From 857b543543ab5faeef5ba851c3878fe289493ad4 Mon Sep 17 00:00:00 2001 From: Arihant Sethia Date: Wed, 15 Apr 2026 06:12:35 +0000 Subject: [PATCH 001/547] feat: add skill analytics to the dashboard Expose skill usage in analytics so the dashboard and insights output can show which skills the agent loads and manages over time. This adds skill aggregation to the InsightsEngine by extracting `skill_view` and `skill_manage` calls from assistant tool_calls, computing per-skill totals, and including the results in both terminal and gateway insights formatting. It also extends the dashboard analytics API and Analytics page to render a Top Skills table. Terminology is aligned with the skills docs: - Agent Loaded = `skill_view` events - Agent Managed = `skill_manage` actions Architecture: - agent/insights.py collects and aggregates per-skill usage - hermes_cli/web_server.py exposes `skills` on `/api/analytics/usage` - web/src/lib/api.ts adds analytics skill response types - web/src/pages/AnalyticsPage.tsx renders the Top Skills table - web/src/i18n/{en,zh}.ts updates user-facing labels Tests: - tests/agent/test_insights.py covers skill aggregation and formatting - tests/hermes_cli/test_web_server.py covers analytics API contract including the `skills` payload - verified with `cd web && npm run build` Files changed: - agent/insights.py - hermes_cli/web_server.py - tests/agent/test_insights.py - tests/hermes_cli/test_web_server.py - web/src/i18n/en.ts - web/src/i18n/types.ts - web/src/i18n/zh.ts - web/src/lib/api.ts - web/src/pages/AnalyticsPage.tsx --- agent/insights.py | 162 ++++++++++++++++++++++++++++ hermes_cli/web_server.py | 20 +++- tests/agent/test_insights.py | 52 +++++++++ tests/hermes_cli/test_web_server.py | 83 +++++++++++++- web/src/i18n/en.ts | 5 + web/src/i18n/types.ts | 5 + web/src/i18n/zh.ts | 5 + web/src/lib/api.ts | 20 ++++ web/src/pages/AnalyticsPage.tsx | 53 ++++++++- 9 files changed, 399 insertions(+), 6 deletions(-) diff --git a/agent/insights.py b/agent/insights.py index a0929c9126..8972f94a83 100644 --- a/agent/insights.py +++ b/agent/insights.py @@ -124,6 +124,7 @@ class InsightsEngine: # Gather raw data sessions = self._get_sessions(cutoff, source) tool_usage = self._get_tool_usage(cutoff, source) + skill_usage = self._get_skill_usage(cutoff, source) message_stats = self._get_message_stats(cutoff, source) if not sessions: @@ -135,6 +136,15 @@ class InsightsEngine: "models": [], "platforms": [], "tools": [], + "skills": { + "summary": { + "total_skill_loads": 0, + "total_skill_edits": 0, + "total_skill_actions": 0, + "distinct_skills_used": 0, + }, + "top_skills": [], + }, "activity": {}, "top_sessions": [], } @@ -144,6 +154,7 @@ class InsightsEngine: models = self._compute_model_breakdown(sessions) platforms = self._compute_platform_breakdown(sessions) tools = self._compute_tool_breakdown(tool_usage) + skills = self._compute_skill_breakdown(skill_usage) activity = self._compute_activity_patterns(sessions) top_sessions = self._compute_top_sessions(sessions) @@ -156,6 +167,7 @@ class InsightsEngine: "models": models, "platforms": platforms, "tools": tools, + "skills": skills, "activity": activity, "top_sessions": top_sessions, } @@ -284,6 +296,82 @@ class InsightsEngine: for name, count in tool_counts.most_common() ] + def _get_skill_usage(self, cutoff: float, source: str = None) -> List[Dict]: + """Extract per-skill usage from assistant tool calls.""" + skill_counts: Dict[str, Dict[str, Any]] = {} + + if source: + cursor = self._conn.execute( + """SELECT m.tool_calls, m.timestamp + FROM messages m + JOIN sessions s ON s.id = m.session_id + WHERE s.started_at >= ? AND s.source = ? + AND m.role = 'assistant' AND m.tool_calls IS NOT NULL""", + (cutoff, source), + ) + else: + cursor = self._conn.execute( + """SELECT m.tool_calls, m.timestamp + FROM messages m + JOIN sessions s ON s.id = m.session_id + WHERE s.started_at >= ? + AND m.role = 'assistant' AND m.tool_calls IS NOT NULL""", + (cutoff,), + ) + + for row in cursor.fetchall(): + try: + calls = row["tool_calls"] + if isinstance(calls, str): + calls = json.loads(calls) + if not isinstance(calls, list): + continue + except (json.JSONDecodeError, TypeError): + continue + + timestamp = row["timestamp"] + for call in calls: + if not isinstance(call, dict): + continue + func = call.get("function", {}) + tool_name = func.get("name") + if tool_name not in {"skill_view", "skill_manage"}: + continue + + args = func.get("arguments") + if isinstance(args, str): + try: + args = json.loads(args) + except (json.JSONDecodeError, TypeError): + continue + if not isinstance(args, dict): + continue + + skill_name = args.get("name") + if not isinstance(skill_name, str) or not skill_name.strip(): + continue + + entry = skill_counts.setdefault( + skill_name, + { + "skill": skill_name, + "view_count": 0, + "manage_count": 0, + "last_used_at": None, + }, + ) + if tool_name == "skill_view": + entry["view_count"] += 1 + else: + entry["manage_count"] += 1 + + if timestamp is not None and ( + entry["last_used_at"] is None or timestamp > entry["last_used_at"] + ): + entry["last_used_at"] = timestamp + + return list(skill_counts.values()) + def _get_message_stats(self, cutoff: float, source: str = None) -> Dict: """Get aggregate message statistics.""" if source: @@ -475,6 +563,46 @@ class InsightsEngine: }) return result + def _compute_skill_breakdown(self, skill_usage: List[Dict]) -> Dict[str, Any]: + """Process per-skill usage into summary + ranked list.""" + total_skill_loads = sum(s["view_count"] for s in skill_usage) if skill_usage else 0 + total_skill_edits = sum(s["manage_count"] for s in skill_usage) if skill_usage else 0 + total_skill_actions = total_skill_loads + total_skill_edits + + top_skills = [] + for skill in skill_usage: + total_count = skill["view_count"] + skill["manage_count"] + percentage = (total_count / total_skill_actions * 100) if total_skill_actions else 0 + top_skills.append({ + "skill": skill["skill"], + "view_count": skill["view_count"], + "manage_count": skill["manage_count"], + "total_count": total_count, + "percentage": percentage, + "last_used_at": skill.get("last_used_at"), + }) + + top_skills.sort( + key=lambda s: ( + s["total_count"], + s["view_count"], + s["manage_count"], + s["last_used_at"] or 0, + s["skill"], + ), + reverse=True, + ) + + return { + "summary": { + "total_skill_loads": total_skill_loads, + "total_skill_edits": total_skill_edits, + "total_skill_actions": total_skill_actions, + "distinct_skills_used": len(skill_usage), + }, + "top_skills": top_skills, + } + def _compute_activity_patterns(self, sessions: List[Dict]) -> Dict: """Analyze activity patterns by day of week and hour.""" day_counts = Counter() # 0=Monday ... 6=Sunday @@ -682,6 +810,28 @@ class InsightsEngine: lines.append(f" ... and {len(report['tools']) - 15} more tools") lines.append("") + # Skill usage + skills = report.get("skills", {}) + top_skills = skills.get("top_skills", []) + if top_skills: + lines.append(" 🧠 Top Skills") + lines.append(" " + "─" * 56) + lines.append(f" {'Skill':<28} {'Loads':>7} {'Edits':>7} {'Last used':>11}") + for skill in top_skills[:10]: + last_used = "—" + if skill.get("last_used_at"): + last_used = datetime.fromtimestamp(skill["last_used_at"]).strftime("%b %d") + lines.append( + f" {skill['skill'][:28]:<28} {skill['view_count']:>7,} {skill['manage_count']:>7,} {last_used:>11}" + ) + summary = skills.get("summary", {}) + lines.append( + f" Distinct skills: {summary.get('distinct_skills_used', 0)} " + f"Loads: {summary.get('total_skill_loads', 0):,} " + f"Edits: {summary.get('total_skill_edits', 0):,}" + ) + lines.append("") + # Activity patterns act = report.get("activity", {}) if act.get("by_day"): @@ -774,6 +924,18 @@ class InsightsEngine: lines.append(f" {t['tool']} — {t['count']:,} calls ({t['percentage']:.1f}%)") lines.append("") + skills = report.get("skills", {}) + if skills.get("top_skills"): + lines.append("**🧠 Top Skills:**") + for skill in skills["top_skills"][:5]: + suffix = "" + if skill.get("last_used_at"): + suffix = f", last used {datetime.fromtimestamp(skill['last_used_at']).strftime('%b %d')}" + lines.append( + f" {skill['skill']} — {skill['view_count']:,} loads, {skill['manage_count']:,} edits{suffix}" + ) + lines.append("") + # Activity summary act = report.get("activity", {}) if act.get("busiest_day") and act.get("busiest_hour"): diff --git a/hermes_cli/web_server.py b/hermes_cli/web_server.py index 22265faa51..f18afbf866 100644 --- a/hermes_cli/web_server.py +++ b/hermes_cli/web_server.py @@ -1977,6 +1977,8 @@ async def update_config_raw(body: RawConfigUpdate): @app.get("/api/analytics/usage") async def get_usage_analytics(days: int = 30): from hermes_state import SessionDB + from agent.insights import InsightsEngine + db = SessionDB() try: cutoff = time.time() - (days * 86400) @@ -2016,8 +2018,24 @@ async def get_usage_analytics(days: int = 30): FROM sessions WHERE started_at > ? """, (cutoff,)) totals = dict(cur3.fetchone()) + insights_report = InsightsEngine(db).generate(days=days) + skills = insights_report.get("skills", { + "summary": { + "total_skill_loads": 0, + "total_skill_edits": 0, + "total_skill_actions": 0, + "distinct_skills_used": 0, + }, + "top_skills": [], + }) - return {"daily": daily, "by_model": by_model, "totals": totals, "period_days": days} + return { + "daily": daily, + "by_model": by_model, + "totals": totals, + "period_days": days, + "skills": skills, + } finally: db.close() diff --git a/tests/agent/test_insights.py b/tests/agent/test_insights.py index 885e34fec0..7ca8a9792f 100644 --- a/tests/agent/test_insights.py +++ b/tests/agent/test_insights.py @@ -51,6 +51,12 @@ def populated_db(db): db.append_message("s1", role="assistant", content="I found the bug. Let me fix it.", tool_calls=[{"function": {"name": "patch"}}]) db.append_message("s1", role="tool", content="patched successfully", tool_name="patch") + db.append_message( + "s1", + role="assistant", + content="Let me load the PR workflow skill.", + tool_calls=[{"function": {"name": "skill_view", "arguments": '{"name":"github-pr-workflow"}'}}], + ) db.append_message("s1", role="user", content="Thanks!") db.append_message("s1", role="assistant", content="You're welcome!") @@ -88,6 +94,12 @@ def populated_db(db): db.append_message("s3", role="assistant", content="And search files", tool_calls=[{"function": {"name": "search_files"}}]) db.append_message("s3", role="tool", content="found stuff", tool_name="search_files") + db.append_message( + "s3", + role="assistant", + content="Load the debugging skill.", + tool_calls=[{"function": {"name": "skill_view", "arguments": '{"name":"systematic-debugging"}'}}], + ) # Session 4: Discord, same model as s1, ended, 1 day ago db.create_session( @@ -100,6 +112,15 @@ def populated_db(db): db.update_token_counts("s4", input_tokens=10000, output_tokens=5000) db.append_message("s4", role="user", content="Quick question") db.append_message("s4", role="assistant", content="Sure, go ahead") + db.append_message( + "s4", + role="assistant", + content="Load and update GitHub skills.", + tool_calls=[ + {"function": {"name": "skill_view", "arguments": '{"name":"github-pr-workflow"}'}}, + {"function": {"name": "skill_manage", "arguments": '{"name":"github-code-review"}'}}, + ], + ) # Session 5: Old session, 45 days ago (should be excluded from 30-day window) db.create_session( @@ -332,6 +353,35 @@ class TestInsightsPopulated: total_pct = sum(t["percentage"] for t in tools) assert total_pct == pytest.approx(100.0, abs=0.1) + def test_skill_breakdown(self, populated_db): + engine = InsightsEngine(populated_db) + report = engine.generate(days=30) + skills = report["skills"] + + assert skills["summary"]["distinct_skills_used"] == 3 + assert skills["summary"]["total_skill_loads"] == 3 + assert skills["summary"]["total_skill_edits"] == 1 + assert skills["summary"]["total_skill_actions"] == 4 + + top_skill = skills["top_skills"][0] + assert top_skill["skill"] == "github-pr-workflow" + assert top_skill["view_count"] == 2 + assert top_skill["manage_count"] == 0 + assert top_skill["total_count"] == 2 + assert top_skill["last_used_at"] is not None + + def test_skill_breakdown_respects_days_filter(self, populated_db): + engine = InsightsEngine(populated_db) + report = engine.generate(days=3) + skills = report["skills"] + + assert skills["summary"]["distinct_skills_used"] == 2 + assert skills["summary"]["total_skill_loads"] == 2 + assert skills["summary"]["total_skill_edits"] == 1 + + skill_names = [s["skill"] for s in skills["top_skills"]] + assert "systematic-debugging" not in skill_names + def test_activity_patterns(self, populated_db): engine = InsightsEngine(populated_db) report = engine.generate(days=30) @@ -401,6 +451,7 @@ class TestTerminalFormatting: assert "Overview" in text assert "Models Used" in text assert "Top Tools" in text + assert "Top Skills" in text assert "Activity Patterns" in text assert "Notable Sessions" in text @@ -467,6 +518,7 @@ class TestGatewayFormatting: text = engine.format_gateway(report) assert "$" in text + assert "Top Skills" in text assert "Est. cost" in text def test_gateway_format_shows_models(self, populated_db): diff --git a/tests/hermes_cli/test_web_server.py b/tests/hermes_cli/test_web_server.py index 365e3d0fe1..fa7ce62b25 100644 --- a/tests/hermes_cli/test_web_server.py +++ b/tests/hermes_cli/test_web_server.py @@ -101,14 +101,19 @@ class TestWebServerEndpoints: """Test the FastAPI REST endpoints using Starlette TestClient.""" @pytest.fixture(autouse=True) - def _setup_test_client(self): - """Create a TestClient — import is deferred to avoid requiring fastapi.""" + def _setup_test_client(self, monkeypatch, _isolate_hermes_home): + """Create a TestClient and isolate the state DB under the test HERMES_HOME.""" try: from starlette.testclient import TestClient except ImportError: pytest.skip("fastapi/starlette not installed") + import hermes_state + from hermes_constants import get_hermes_home from hermes_cli.web_server import app, _SESSION_TOKEN + + monkeypatch.setattr(hermes_state, "DEFAULT_DB_PATH", get_hermes_home() / "state.db") + self.client = TestClient(app) self.client.headers["Authorization"] = f"Bearer {_SESSION_TOKEN}" @@ -511,12 +516,18 @@ class TestNewEndpoints: """Tests for session detail, logs, cron, skills, tools, raw config, analytics.""" @pytest.fixture(autouse=True) - def _setup(self): + def _setup(self, monkeypatch, _isolate_hermes_home): try: from starlette.testclient import TestClient except ImportError: pytest.skip("fastapi/starlette not installed") + + import hermes_state + from hermes_constants import get_hermes_home from hermes_cli.web_server import app, _SESSION_TOKEN + + monkeypatch.setattr(hermes_state, "DEFAULT_DB_PATH", get_hermes_home() / "state.db") + self.client = TestClient(app) self.client.headers["Authorization"] = f"Bearer {_SESSION_TOKEN}" @@ -692,8 +703,74 @@ class TestNewEndpoints: assert "daily" in data assert "by_model" in data assert "totals" in data + assert "skills" in data assert isinstance(data["daily"], list) assert "total_sessions" in data["totals"] + assert data["skills"] == { + "summary": { + "total_skill_loads": 0, + "total_skill_edits": 0, + "total_skill_actions": 0, + "distinct_skills_used": 0, + }, + "top_skills": [], + } + + def test_analytics_usage_includes_skill_breakdown(self): + from hermes_state import SessionDB + + db = SessionDB() + try: + db.create_session( + session_id="skills-analytics-test", + source="cli", + model="anthropic/claude-sonnet-4", + ) + db.update_token_counts( + "skills-analytics-test", + input_tokens=120, + output_tokens=45, + ) + db.append_message( + "skills-analytics-test", + role="assistant", + content="Loading and updating skills.", + tool_calls=[ + { + "function": { + "name": "skill_view", + "arguments": '{"name":"github-pr-workflow"}', + } + }, + { + "function": { + "name": "skill_manage", + "arguments": '{"name":"github-code-review"}', + } + }, + ], + ) + finally: + db.close() + + resp = self.client.get("/api/analytics/usage?days=7") + assert resp.status_code == 200 + + data = resp.json() + assert data["skills"]["summary"] == { + "total_skill_loads": 1, + "total_skill_edits": 1, + "total_skill_actions": 2, + "distinct_skills_used": 2, + } + assert len(data["skills"]["top_skills"]) == 2 + + top_skill = data["skills"]["top_skills"][0] + assert top_skill["skill"] == "github-pr-workflow" + assert top_skill["view_count"] == 1 + assert top_skill["manage_count"] == 0 + assert top_skill["total_count"] == 1 + assert top_skill["last_used_at"] is not None def test_session_token_endpoint_removed(self): """GET /api/auth/session-token no longer exists.""" diff --git a/web/src/i18n/en.ts b/web/src/i18n/en.ts index 3bf693f218..b15be08a4c 100644 --- a/web/src/i18n/en.ts +++ b/web/src/i18n/en.ts @@ -115,6 +115,11 @@ export const en: Translations = { dailyTokenUsage: "Daily Token Usage", dailyBreakdown: "Daily Breakdown", perModelBreakdown: "Per-Model Breakdown", + topSkills: "Top Skills", + skill: "Skill", + loads: "Agent Loaded", + edits: "Agent Managed", + lastUsed: "Last Used", input: "Input", output: "Output", total: "Total", diff --git a/web/src/i18n/types.ts b/web/src/i18n/types.ts index 34813c68f3..3996fd1f0b 100644 --- a/web/src/i18n/types.ts +++ b/web/src/i18n/types.ts @@ -120,6 +120,11 @@ export interface Translations { dailyTokenUsage: string; dailyBreakdown: string; perModelBreakdown: string; + topSkills: string; + skill: string; + loads: string; + edits: string; + lastUsed: string; input: string; output: string; total: string; diff --git a/web/src/i18n/zh.ts b/web/src/i18n/zh.ts index 18cb3ee38e..c4e334a885 100644 --- a/web/src/i18n/zh.ts +++ b/web/src/i18n/zh.ts @@ -115,6 +115,11 @@ export const zh: Translations = { dailyTokenUsage: "每日 Token 用量", dailyBreakdown: "每日明细", perModelBreakdown: "模型用量明细", + topSkills: "常用技能", + skill: "技能", + loads: "代理加载", + edits: "代理管理", + lastUsed: "最近使用", input: "输入", output: "输出", total: "总计", diff --git a/web/src/lib/api.ts b/web/src/lib/api.ts index e610439938..b82c7808c1 100644 --- a/web/src/lib/api.ts +++ b/web/src/lib/api.ts @@ -283,6 +283,22 @@ export interface AnalyticsModelEntry { sessions: number; } +export interface AnalyticsSkillEntry { + skill: string; + view_count: number; + manage_count: number; + total_count: number; + percentage: number; + last_used_at: number | null; +} + +export interface AnalyticsSkillsSummary { + total_skill_loads: number; + total_skill_edits: number; + total_skill_actions: number; + distinct_skills_used: number; +} + export interface AnalyticsResponse { daily: AnalyticsDailyEntry[]; by_model: AnalyticsModelEntry[]; @@ -295,6 +311,10 @@ export interface AnalyticsResponse { total_actual_cost: number; total_sessions: number; }; + skills: { + summary: AnalyticsSkillsSummary; + top_skills: AnalyticsSkillEntry[]; + }; } export interface CronJob { diff --git a/web/src/pages/AnalyticsPage.tsx b/web/src/pages/AnalyticsPage.tsx index 2f947cbb6a..c9efd70ac7 100644 --- a/web/src/pages/AnalyticsPage.tsx +++ b/web/src/pages/AnalyticsPage.tsx @@ -1,12 +1,14 @@ import { useEffect, useState, useCallback } from "react"; import { BarChart3, + Brain, Cpu, Hash, TrendingUp, } from "lucide-react"; import { api } from "@/lib/api"; -import type { AnalyticsResponse, AnalyticsDailyEntry, AnalyticsModelEntry } from "@/lib/api"; +import type { AnalyticsResponse, AnalyticsDailyEntry, AnalyticsModelEntry, AnalyticsSkillEntry } from "@/lib/api"; +import { timeAgo } from "@/lib/utils"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; import { Button } from "@/components/ui/button"; import { useI18n } from "@/i18n"; @@ -227,6 +229,52 @@ function ModelTable({ models }: { models: AnalyticsModelEntry[] }) { ); } +function SkillTable({ skills }: { skills: AnalyticsSkillEntry[] }) { + const { t } = useI18n(); + if (skills.length === 0) return null; + + return ( + + +
+ + {t.analytics.topSkills} +
+
+ +
+ + + + + + + + + + + + {skills.map((skill) => ( + + + + + + + + ))} + +
{t.analytics.skill}{t.analytics.loads}{t.analytics.edits}{t.analytics.total}{t.analytics.lastUsed}
+ {skill.skill} + {skill.view_count}{skill.manage_count}{skill.total_count} + {skill.last_used_at ? timeAgo(skill.last_used_at) : "—"} +
+
+
+
+ ); +} + export default function AnalyticsPage() { const [days, setDays] = useState(30); const [data, setData] = useState(null); @@ -310,10 +358,11 @@ export default function AnalyticsPage() { {/* Tables */} + )} - {data && data.daily.length === 0 && data.by_model.length === 0 && ( + {data && data.daily.length === 0 && data.by_model.length === 0 && data.skills.top_skills.length === 0 && (
From 9ed6eb0cca9377cd8f080fe0b721c189fe6b8cdf Mon Sep 17 00:00:00 2001 From: Kaio Date: Fri, 17 Apr 2026 18:40:46 -0700 Subject: [PATCH 002/547] fix(tui): resolve runtime provider in _make_agent (#11884) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit _make_agent() was not calling resolve_runtime_provider(), so bare-slug models (e.g. 'claude-opus-4-6' with provider: anthropic) left provider, base_url, and api_key empty in AIAgent — causing HTTP 404 at api.anthropic.com. Now mirrors cli.py: calls resolve_runtime_provider(requested=None) and forwards all 7 resolved fields to AIAgent. Adds regression test. --- tests/tui_gateway/test_make_agent_provider.py | 48 +++++++++++++++++++ tui_gateway/server.py | 9 ++++ 2 files changed, 57 insertions(+) create mode 100644 tests/tui_gateway/test_make_agent_provider.py diff --git a/tests/tui_gateway/test_make_agent_provider.py b/tests/tui_gateway/test_make_agent_provider.py new file mode 100644 index 0000000000..bdc7fecf4d --- /dev/null +++ b/tests/tui_gateway/test_make_agent_provider.py @@ -0,0 +1,48 @@ +"""Regression test for #11884: _make_agent must resolve runtime provider. + +Without resolve_runtime_provider(), bare-slug models in config +(e.g. ``claude-opus-4-6`` with ``model.provider: anthropic``) leave +provider/base_url/api_key empty in AIAgent, causing HTTP 404. +""" + +from unittest.mock import MagicMock, patch + + +def test_make_agent_passes_resolved_provider(): + """_make_agent forwards provider/base_url/api_key/api_mode from + resolve_runtime_provider to AIAgent.""" + + fake_runtime = { + "provider": "anthropic", + "base_url": "https://api.anthropic.com", + "api_key": "sk-test-key", + "api_mode": "anthropic_messages", + "command": None, + "args": None, + "credential_pool": None, + } + + fake_cfg = { + "model": {"default": "claude-opus-4-6", "provider": "anthropic"}, + "agent": {"system_prompt": "test"}, + } + + with patch("tui_gateway.server._load_cfg", return_value=fake_cfg), \ + patch("tui_gateway.server._get_db", return_value=MagicMock()), \ + patch("tui_gateway.server._load_tool_progress_mode", return_value="compact"), \ + patch("tui_gateway.server._load_reasoning_config", return_value=None), \ + patch("tui_gateway.server._load_service_tier", return_value=None), \ + patch("tui_gateway.server._load_enabled_toolsets", return_value=None), \ + patch("hermes_cli.runtime_provider.resolve_runtime_provider", return_value=fake_runtime) as mock_resolve, \ + patch("run_agent.AIAgent") as mock_agent: + + from tui_gateway.server import _make_agent + _make_agent("sid-1", "key-1") + + mock_resolve.assert_called_once_with(requested=None) + + call_kwargs = mock_agent.call_args + assert call_kwargs.kwargs["provider"] == "anthropic" + assert call_kwargs.kwargs["base_url"] == "https://api.anthropic.com" + assert call_kwargs.kwargs["api_key"] == "sk-test-key" + assert call_kwargs.kwargs["api_mode"] == "anthropic_messages" diff --git a/tui_gateway/server.py b/tui_gateway/server.py index d86db00066..536136e2d3 100644 --- a/tui_gateway/server.py +++ b/tui_gateway/server.py @@ -911,12 +911,21 @@ def _reset_session_agent(sid: str, session: dict) -> dict: def _make_agent(sid: str, key: str, session_id: str | None = None): from run_agent import AIAgent + from hermes_cli.runtime_provider import resolve_runtime_provider cfg = _load_cfg() system_prompt = cfg.get("agent", {}).get("system_prompt", "") or "" if not system_prompt: system_prompt = _resolve_personality_prompt(cfg) + runtime = resolve_runtime_provider(requested=None) return AIAgent( model=_resolve_model(), + provider=runtime.get("provider"), + base_url=runtime.get("base_url"), + api_key=runtime.get("api_key"), + api_mode=runtime.get("api_mode"), + acp_command=runtime.get("command"), + acp_args=runtime.get("args"), + credential_pool=runtime.get("credential_pool"), quiet_mode=True, verbose_logging=_load_tool_progress_mode() == "verbose", reasoning_config=_load_reasoning_config(), From 66ee081dc181fc731994f50bb99b0a52a2761310 Mon Sep 17 00:00:00 2001 From: Teknium <127238744+teknium1@users.noreply.github.com> Date: Sun, 19 Apr 2026 05:14:17 -0700 Subject: [PATCH 003/547] skills: move 7 niche mlops/mcp skills to optional (#12474) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Built-in → optional-skills/: mlops/training/peft → optional-skills/mlops/peft mlops/training/pytorch-fsdp → optional-skills/mlops/pytorch-fsdp mlops/models/clip → optional-skills/mlops/clip mlops/models/stable-diffusion → optional-skills/mlops/stable-diffusion mlops/models/whisper → optional-skills/mlops/whisper mlops/cloud/modal → optional-skills/mlops/modal mcp/mcporter → optional-skills/mcp/mcporter Built-in mlops training kept: axolotl, trl-fine-tuning, unsloth. Built-in mlops models kept: audiocraft, segment-anything. Built-in mlops evaluation/research/huggingface-hub/inference all kept. native-mcp stays built-in (documents the native MCP tool); mcporter was a redundant alternative CLI. Also: removed now-empty skills/mlops/cloud/ dir, refreshed skills/mlops/models/DESCRIPTION.md and skills/mcp/DESCRIPTION.md to match what's left, and synchronized both catalog pages (skills-catalog.md, optional-skills-catalog.md). --- .../mcp/mcporter/SKILL.md | 0 .../mlops}/clip/SKILL.md | 0 .../mlops}/clip/references/applications.md | 0 .../mlops}/modal/SKILL.md | 0 .../mlops}/modal/references/advanced-usage.md | 0 .../mlops}/modal/references/troubleshooting.md | 0 .../mlops}/peft/SKILL.md | 0 .../mlops}/peft/references/advanced-usage.md | 0 .../mlops}/peft/references/troubleshooting.md | 0 .../mlops}/pytorch-fsdp/SKILL.md | 0 .../mlops}/pytorch-fsdp/references/index.md | 0 .../mlops}/pytorch-fsdp/references/other.md | 0 .../mlops}/stable-diffusion/SKILL.md | 0 .../references/advanced-usage.md | 0 .../references/troubleshooting.md | 0 .../mlops}/whisper/SKILL.md | 0 .../mlops}/whisper/references/languages.md | 0 skills/mcp/DESCRIPTION.md | 2 +- skills/mlops/cloud/DESCRIPTION.md | 3 --- skills/mlops/models/DESCRIPTION.md | 2 +- .../docs/reference/optional-skills-catalog.md | 7 +++++++ website/docs/reference/skills-catalog.md | 16 +--------------- 22 files changed, 10 insertions(+), 20 deletions(-) rename {skills => optional-skills}/mcp/mcporter/SKILL.md (100%) rename {skills/mlops/models => optional-skills/mlops}/clip/SKILL.md (100%) rename {skills/mlops/models => optional-skills/mlops}/clip/references/applications.md (100%) rename {skills/mlops/cloud => optional-skills/mlops}/modal/SKILL.md (100%) rename {skills/mlops/cloud => optional-skills/mlops}/modal/references/advanced-usage.md (100%) rename {skills/mlops/cloud => optional-skills/mlops}/modal/references/troubleshooting.md (100%) rename {skills/mlops/training => optional-skills/mlops}/peft/SKILL.md (100%) rename {skills/mlops/training => optional-skills/mlops}/peft/references/advanced-usage.md (100%) rename {skills/mlops/training => optional-skills/mlops}/peft/references/troubleshooting.md (100%) rename {skills/mlops/training => optional-skills/mlops}/pytorch-fsdp/SKILL.md (100%) rename {skills/mlops/training => optional-skills/mlops}/pytorch-fsdp/references/index.md (100%) rename {skills/mlops/training => optional-skills/mlops}/pytorch-fsdp/references/other.md (100%) rename {skills/mlops/models => optional-skills/mlops}/stable-diffusion/SKILL.md (100%) rename {skills/mlops/models => optional-skills/mlops}/stable-diffusion/references/advanced-usage.md (100%) rename {skills/mlops/models => optional-skills/mlops}/stable-diffusion/references/troubleshooting.md (100%) rename {skills/mlops/models => optional-skills/mlops}/whisper/SKILL.md (100%) rename {skills/mlops/models => optional-skills/mlops}/whisper/references/languages.md (100%) delete mode 100644 skills/mlops/cloud/DESCRIPTION.md diff --git a/skills/mcp/mcporter/SKILL.md b/optional-skills/mcp/mcporter/SKILL.md similarity index 100% rename from skills/mcp/mcporter/SKILL.md rename to optional-skills/mcp/mcporter/SKILL.md diff --git a/skills/mlops/models/clip/SKILL.md b/optional-skills/mlops/clip/SKILL.md similarity index 100% rename from skills/mlops/models/clip/SKILL.md rename to optional-skills/mlops/clip/SKILL.md diff --git a/skills/mlops/models/clip/references/applications.md b/optional-skills/mlops/clip/references/applications.md similarity index 100% rename from skills/mlops/models/clip/references/applications.md rename to optional-skills/mlops/clip/references/applications.md diff --git a/skills/mlops/cloud/modal/SKILL.md b/optional-skills/mlops/modal/SKILL.md similarity index 100% rename from skills/mlops/cloud/modal/SKILL.md rename to optional-skills/mlops/modal/SKILL.md diff --git a/skills/mlops/cloud/modal/references/advanced-usage.md b/optional-skills/mlops/modal/references/advanced-usage.md similarity index 100% rename from skills/mlops/cloud/modal/references/advanced-usage.md rename to optional-skills/mlops/modal/references/advanced-usage.md diff --git a/skills/mlops/cloud/modal/references/troubleshooting.md b/optional-skills/mlops/modal/references/troubleshooting.md similarity index 100% rename from skills/mlops/cloud/modal/references/troubleshooting.md rename to optional-skills/mlops/modal/references/troubleshooting.md diff --git a/skills/mlops/training/peft/SKILL.md b/optional-skills/mlops/peft/SKILL.md similarity index 100% rename from skills/mlops/training/peft/SKILL.md rename to optional-skills/mlops/peft/SKILL.md diff --git a/skills/mlops/training/peft/references/advanced-usage.md b/optional-skills/mlops/peft/references/advanced-usage.md similarity index 100% rename from skills/mlops/training/peft/references/advanced-usage.md rename to optional-skills/mlops/peft/references/advanced-usage.md diff --git a/skills/mlops/training/peft/references/troubleshooting.md b/optional-skills/mlops/peft/references/troubleshooting.md similarity index 100% rename from skills/mlops/training/peft/references/troubleshooting.md rename to optional-skills/mlops/peft/references/troubleshooting.md diff --git a/skills/mlops/training/pytorch-fsdp/SKILL.md b/optional-skills/mlops/pytorch-fsdp/SKILL.md similarity index 100% rename from skills/mlops/training/pytorch-fsdp/SKILL.md rename to optional-skills/mlops/pytorch-fsdp/SKILL.md diff --git a/skills/mlops/training/pytorch-fsdp/references/index.md b/optional-skills/mlops/pytorch-fsdp/references/index.md similarity index 100% rename from skills/mlops/training/pytorch-fsdp/references/index.md rename to optional-skills/mlops/pytorch-fsdp/references/index.md diff --git a/skills/mlops/training/pytorch-fsdp/references/other.md b/optional-skills/mlops/pytorch-fsdp/references/other.md similarity index 100% rename from skills/mlops/training/pytorch-fsdp/references/other.md rename to optional-skills/mlops/pytorch-fsdp/references/other.md diff --git a/skills/mlops/models/stable-diffusion/SKILL.md b/optional-skills/mlops/stable-diffusion/SKILL.md similarity index 100% rename from skills/mlops/models/stable-diffusion/SKILL.md rename to optional-skills/mlops/stable-diffusion/SKILL.md diff --git a/skills/mlops/models/stable-diffusion/references/advanced-usage.md b/optional-skills/mlops/stable-diffusion/references/advanced-usage.md similarity index 100% rename from skills/mlops/models/stable-diffusion/references/advanced-usage.md rename to optional-skills/mlops/stable-diffusion/references/advanced-usage.md diff --git a/skills/mlops/models/stable-diffusion/references/troubleshooting.md b/optional-skills/mlops/stable-diffusion/references/troubleshooting.md similarity index 100% rename from skills/mlops/models/stable-diffusion/references/troubleshooting.md rename to optional-skills/mlops/stable-diffusion/references/troubleshooting.md diff --git a/skills/mlops/models/whisper/SKILL.md b/optional-skills/mlops/whisper/SKILL.md similarity index 100% rename from skills/mlops/models/whisper/SKILL.md rename to optional-skills/mlops/whisper/SKILL.md diff --git a/skills/mlops/models/whisper/references/languages.md b/optional-skills/mlops/whisper/references/languages.md similarity index 100% rename from skills/mlops/models/whisper/references/languages.md rename to optional-skills/mlops/whisper/references/languages.md diff --git a/skills/mcp/DESCRIPTION.md b/skills/mcp/DESCRIPTION.md index 627c20ea1b..30a0660333 100644 --- a/skills/mcp/DESCRIPTION.md +++ b/skills/mcp/DESCRIPTION.md @@ -1,3 +1,3 @@ --- -description: Skills for working with MCP (Model Context Protocol) servers, tools, and integrations. Includes the built-in native MCP client (configure servers in config.yaml for automatic tool discovery) and the mcporter CLI bridge for ad-hoc server interaction. +description: Skills for working with MCP (Model Context Protocol) servers, tools, and integrations. Documents the built-in native MCP client — configure servers in config.yaml for automatic tool discovery. --- diff --git a/skills/mlops/cloud/DESCRIPTION.md b/skills/mlops/cloud/DESCRIPTION.md deleted file mode 100644 index 32675823e0..0000000000 --- a/skills/mlops/cloud/DESCRIPTION.md +++ /dev/null @@ -1,3 +0,0 @@ ---- -description: GPU cloud providers and serverless compute platforms for ML workloads. ---- diff --git a/skills/mlops/models/DESCRIPTION.md b/skills/mlops/models/DESCRIPTION.md index 8170b517f5..8f7e669562 100644 --- a/skills/mlops/models/DESCRIPTION.md +++ b/skills/mlops/models/DESCRIPTION.md @@ -1,3 +1,3 @@ --- -description: Specific model architectures and tools — computer vision (CLIP, SAM, Stable Diffusion), speech (Whisper), audio generation (AudioCraft), and multimodal models (LLaVA). +description: Specific model architectures and tools — image segmentation (Segment Anything / SAM) and audio generation (AudioCraft / MusicGen). Additional model skills (CLIP, Stable Diffusion, Whisper, LLaVA) are available as optional skills. --- diff --git a/website/docs/reference/optional-skills-catalog.md b/website/docs/reference/optional-skills-catalog.md index 044060e9dd..f5dd2ac5bf 100644 --- a/website/docs/reference/optional-skills-catalog.md +++ b/website/docs/reference/optional-skills-catalog.md @@ -83,6 +83,7 @@ hermes skills uninstall | Skill | Description | |-------|-------------| | **fastmcp** | Build, test, inspect, install, and deploy MCP servers with FastMCP in Python. Covers wrapping APIs or databases as MCP tools, exposing resources or prompts, and deployment. | +| **mcporter** | The `mcporter` CLI — list, configure, auth, and call MCP servers/tools directly (HTTP or stdio) from the terminal. Useful for ad-hoc MCP interactions; for always-on tool discovery use the built-in `native-mcp` client instead. | ## Migration @@ -98,6 +99,7 @@ The largest optional category — covers the full ML pipeline from data curation |-------|-------------| | **accelerate** | Simplest distributed training API. 4 lines to add distributed support to any PyTorch script. Unified API for DeepSpeed/FSDP/Megatron/DDP. | | **chroma** | Open-source embedding database. Store embeddings and metadata, perform vector and full-text search. Simple 4-function API for RAG and semantic search. | +| **clip** | OpenAI's vision-language model connecting images and text. Zero-shot image classification, image-text matching, and cross-modal retrieval. Trained on 400M image-text pairs. Use for image search, content moderation, or vision-language tasks without fine-tuning. | | **faiss** | Facebook's library for efficient similarity search and clustering of dense vectors. Supports billions of vectors, GPU acceleration, and various index types (Flat, IVF, HNSW). | | **flash-attention** | Optimize transformer attention with Flash Attention for 2-4x speedup and 10-20x memory reduction. Supports PyTorch SDPA, flash-attn library, H100 FP8, and sliding window. | | **guidance** | Control LLM output with regex and grammars, guarantee valid JSON/XML/code generation, enforce structured formats, and build multi-step workflows with Guidance — Microsoft Research's constrained generation framework. | @@ -106,15 +108,20 @@ The largest optional category — covers the full ML pipeline from data curation | **instructor** | Extract structured data from LLM responses with Pydantic validation, retry failed extractions automatically, and stream partial results. | | **lambda-labs** | Reserved and on-demand GPU cloud instances for ML training and inference. SSH access, persistent filesystems, and multi-node clusters. | | **llava** | Large Language and Vision Assistant — visual instruction tuning and image-based conversations combining CLIP vision with LLaMA language models. | +| **modal** | Serverless GPU cloud platform for running ML workloads. On-demand GPU access without infrastructure management, ML model deployment as APIs, or batch jobs with automatic scaling. | | **nemo-curator** | GPU-accelerated data curation for LLM training. Fuzzy deduplication (16x faster), quality filtering (30+ heuristics), semantic dedup, PII redaction. Scales with RAPIDS. | +| **peft-fine-tuning** | Parameter-efficient fine-tuning for LLMs using LoRA, QLoRA, and 25+ methods. Train <1% of parameters with minimal accuracy loss for 7B–70B models on limited GPU memory. HuggingFace's official PEFT library. | | **pinecone** | Managed vector database for production AI. Auto-scaling, hybrid search (dense + sparse), metadata filtering, and low latency (under 100ms p95). | +| **pytorch-fsdp** | Expert guidance for Fully Sharded Data Parallel training with PyTorch FSDP — parameter sharding, mixed precision, CPU offloading, FSDP2. | | **pytorch-lightning** | High-level PyTorch framework with Trainer class, automatic distributed training (DDP/FSDP/DeepSpeed), callbacks, and minimal boilerplate. | | **qdrant** | High-performance vector similarity search engine. Rust-powered with fast nearest neighbor search, hybrid search with filtering, and scalable vector storage. | | **saelens** | Train and analyze Sparse Autoencoders (SAEs) using SAELens to decompose neural network activations into interpretable features. | | **simpo** | Simple Preference Optimization — reference-free alternative to DPO with better performance (+6.4 pts on AlpacaEval 2.0). No reference model needed. | | **slime** | LLM post-training with RL using Megatron+SGLang framework. Custom data generation workflows and tight Megatron-LM integration for RL scaling. | +| **stable-diffusion-image-generation** | State-of-the-art text-to-image generation with Stable Diffusion via HuggingFace Diffusers. Text-to-image, image-to-image translation, inpainting, and custom diffusion pipelines. | | **tensorrt-llm** | Optimize LLM inference with NVIDIA TensorRT for maximum throughput. 10-100x faster than PyTorch on A100/H100 with quantization (FP8/INT4) and in-flight batching. | | **torchtitan** | PyTorch-native distributed LLM pretraining with 4D parallelism (FSDP2, TP, PP, CP). Scale from 8 to 512+ GPUs with Float8 and torch.compile. | +| **whisper** | OpenAI's general-purpose speech recognition. 99 languages, transcription, translation to English, and language ID. Six model sizes from tiny (39M) to large (1550M). Best for robust multilingual ASR. | ## Productivity diff --git a/website/docs/reference/skills-catalog.md b/website/docs/reference/skills-catalog.md index 16be6a6581..ffe489d360 100644 --- a/website/docs/reference/skills-catalog.md +++ b/website/docs/reference/skills-catalog.md @@ -114,7 +114,6 @@ Skills for working with MCP (Model Context Protocol) servers, tools, and integra | Skill | Description | Path | |-------|-------------|------| -| `mcporter` | Use the mcporter CLI to list, configure, auth, and call MCP servers/tools directly (HTTP or stdio), including ad-hoc servers, config edits, and CLI/type generation. | `mcp/mcporter` | | `native-mcp` | Built-in MCP (Model Context Protocol) client that connects to external MCP servers, discovers their tools, and registers them as native Hermes Agent tools. Supports stdio and HTTP transports with automatic reconnection, security filtering, and zero-config tool injection. | `mcp/native-mcp` | ## media @@ -136,14 +135,6 @@ General-purpose ML operations tools — model hub management, dataset operations |-------|-------------|------| | `huggingface-hub` | Hugging Face Hub CLI (hf) — search, download, and upload models and datasets, manage repos, query datasets with SQL, deploy inference endpoints, manage Spaces and buckets. | `mlops/huggingface-hub` | -## mlops/cloud - -GPU cloud providers and serverless compute platforms for ML workloads. - -| Skill | Description | Path | -|-------|-------------|------| -| `modal-serverless-gpu` | Serverless GPU cloud platform for running ML workloads. Use when you need on-demand GPU access without infrastructure management, deploying ML models as APIs, or running batch jobs with automatic scaling. | `mlops/cloud/modal` | - ## mlops/evaluation Model evaluation benchmarks, experiment tracking, and interpretability tools. @@ -166,15 +157,12 @@ Model serving, quantization (GGUF/GPTQ), structured output, inference optimizati ## mlops/models -Specific model architectures — computer vision (CLIP, SAM, Stable Diffusion), speech (Whisper), and audio generation (AudioCraft). +Specific model architectures — image segmentation (SAM) and audio generation (AudioCraft / MusicGen). Additional model skills (CLIP, Stable Diffusion, Whisper, LLaVA) are available as optional skills. | Skill | Description | Path | |-------|-------------|------| | `audiocraft-audio-generation` | PyTorch library for audio generation including text-to-music (MusicGen) and text-to-sound (AudioGen). Use when you need to generate music from text descriptions, create sound effects, or perform melody-conditioned music generation. | `mlops/models/audiocraft` | -| `clip` | OpenAI's model connecting vision and language. Enables zero-shot image classification, image-text matching, and cross-modal retrieval. Trained on 400M image-text pairs. Use for image search, content moderation, or vision-language tasks without fine-tuning. Best for general-pur… | `mlops/models/clip` | | `segment-anything-model` | Foundation model for image segmentation with zero-shot transfer. Use when you need to segment any object in images using points, boxes, or masks as prompts, or automatically generate all object masks in an image. | `mlops/models/segment-anything` | -| `stable-diffusion-image-generation` | State-of-the-art text-to-image generation with Stable Diffusion models via HuggingFace Diffusers. Use when generating images from text prompts, performing image-to-image translation, inpainting, or building custom diffusion pipelines. | `mlops/models/stable-diffusion` | -| `whisper` | OpenAI's general-purpose speech recognition model. Supports 99 languages, transcription, translation to English, and language identification. Six model sizes from tiny (39M params) to large (1550M params). Use for speech-to-text, podcast transcription, or multilingual audio pr… | `mlops/models/whisper` | ## mlops/research @@ -192,8 +180,6 @@ Fine-tuning, RLHF/DPO/GRPO training, distributed training frameworks, and optimi |-------|-------------|------| | `axolotl` | Expert guidance for fine-tuning LLMs with Axolotl - YAML configs, 100+ models, LoRA/QLoRA, DPO/KTO/ORPO/GRPO, multimodal support | `mlops/training/axolotl` | | `fine-tuning-with-trl` | Fine-tune LLMs using reinforcement learning with TRL - SFT for instruction tuning, DPO for preference alignment, PPO/GRPO for reward optimization, and reward model training. Use when need RLHF, align model with preferences, or train from human feedback. Works with HuggingFace … | `mlops/training/trl-fine-tuning` | -| `peft-fine-tuning` | Parameter-efficient fine-tuning for LLMs using LoRA, QLoRA, and 25+ methods. Use when fine-tuning large models (7B-70B) with limited GPU memory, when you need to train <1% of parameters with minimal accuracy loss, or for multi-adapter serving. HuggingFace's official library… | `mlops/training/peft` | -| `pytorch-fsdp` | Expert guidance for Fully Sharded Data Parallel training with PyTorch FSDP - parameter sharding, mixed precision, CPU offloading, FSDP2 | `mlops/training/pytorch-fsdp` | | `unsloth` | Expert guidance for fast fine-tuning with Unsloth - 2-5x faster training, 50-80% less memory, LoRA/QLoRA optimization | `mlops/training/unsloth` | ## note-taking From 206a449b2991bd9e2b943483ae785a96ec5ce6a2 Mon Sep 17 00:00:00 2001 From: Teknium <127238744+teknium1@users.noreply.github.com> Date: Sun, 19 Apr 2026 05:18:19 -0700 Subject: [PATCH 004/547] feat(webhook): direct delivery mode for zero-LLM push notifications (#12473) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit External services can now push plain-text notifications to a user's chat via the webhook adapter without invoking the agent. Set deliver_only=true on a route and the rendered prompt template becomes the literal message body — dispatched directly to the configured target (Telegram, Discord, Slack, GitHub PR comment, etc.). Reuses all existing webhook infrastructure: HMAC-SHA256 signature validation, per-route rate limiting, idempotency cache, body-size limits, template rendering with dot-notation, home-channel fallback. No new HTTP server, no new auth scheme, no new port. Use cases: Supabase/Firebase webhooks → user notifications, monitoring alert forwarding, inter-agent pings, background job completion alerts. Changes: - gateway/platforms/webhook.py: new _direct_deliver() helper + early dispatch branch in _handle_webhook when deliver_only=true. Startup validation rejects deliver_only with deliver=log. - hermes_cli/main.py + hermes_cli/webhook.go: --deliver-only flag on subscribe; list/show output marks direct-delivery routes. - website/docs/user-guide/messaging/webhooks.md: new Direct Delivery Mode section with config example, CLI example, response codes. - skills/devops/webhook-subscriptions/SKILL.md: document --deliver-only with use cases (bumped to v1.1.0). - tests/gateway/test_webhook_deliver_only.py: 14 new tests covering agent bypass, template rendering, status codes, HMAC still enforced, idempotency still applies, rate limit still applies, startup validation, and direct-deliver dispatch. Validation: 78 webhook tests pass (64 existing + 14 new). E2E verified with real aiohttp server + real urllib POST — agent not invoked, target adapter.send() called with rendered template, duplicate delivery_id suppressed. Closes the gap identified in PR #12117 (thanks to @H1an1 / Antenna team) without adding a second HTTP ingress server. --- gateway/platforms/webhook.py | 103 ++++ hermes_cli/main.py | 7 + hermes_cli/webhook.py | 16 +- skills/devops/webhook-subscriptions/SKILL.md | 29 +- tests/gateway/test_webhook_deliver_only.py | 473 ++++++++++++++++++ website/docs/user-guide/messaging/webhooks.md | 75 +++ 6 files changed, 699 insertions(+), 4 deletions(-) create mode 100644 tests/gateway/test_webhook_deliver_only.py diff --git a/gateway/platforms/webhook.py b/gateway/platforms/webhook.py index c37445b17e..9995ac3870 100644 --- a/gateway/platforms/webhook.py +++ b/gateway/platforms/webhook.py @@ -13,6 +13,10 @@ Each route defines: - skills: optional list of skills to load for the agent - deliver: where to send the response (github_comment, telegram, etc.) - deliver_extra: additional delivery config (repo, pr_number, chat_id) + - deliver_only: if true, skip the agent — the rendered prompt IS the + message that gets delivered. Use for external push notifications + (Supabase, monitoring alerts, inter-agent pings) where zero LLM cost + and sub-second delivery matter more than agent reasoning. Security: - HMAC secret is required per route (validated at startup) @@ -122,6 +126,19 @@ class WebhookAdapter(BasePlatformAdapter): f"For testing without auth, set secret to '{_INSECURE_NO_AUTH}'." ) + # deliver_only routes bypass the agent — the POST body becomes a + # direct push notification via the configured delivery target. + # Validate up-front so misconfiguration surfaces at startup rather + # than on the first webhook POST. + if route.get("deliver_only"): + deliver = route.get("deliver", "log") + if not deliver or deliver == "log": + raise ValueError( + f"[webhook] Route '{name}' has deliver_only=true but " + f"deliver is '{deliver}'. Direct delivery requires a " + f"real target (telegram, discord, slack, github_comment, etc.)." + ) + app = web.Application() app.router.add_get("/health", self._handle_health) app.router.add_post("/webhooks/{route_name}", self._handle_webhook) @@ -419,6 +436,64 @@ class WebhookAdapter(BasePlatformAdapter): ) self._seen_deliveries[delivery_id] = now + # ── Direct delivery mode (deliver_only) ───────────────── + # Skip the agent entirely — the rendered prompt IS the message we + # deliver. Use case: external services (Supabase, monitoring, + # cron jobs, other agents) that need to push a plain notification + # to a user's chat with zero LLM cost. Reuses the same HMAC auth, + # rate limiting, idempotency, and template rendering as agent mode. + if route_config.get("deliver_only"): + delivery = { + "deliver": route_config.get("deliver", "log"), + "deliver_extra": self._render_delivery_extra( + route_config.get("deliver_extra", {}), payload + ), + "payload": payload, + } + logger.info( + "[webhook] direct-deliver event=%s route=%s target=%s msg_len=%d delivery=%s", + event_type, + route_name, + delivery["deliver"], + len(prompt), + delivery_id, + ) + try: + result = await self._direct_deliver(prompt, delivery) + except Exception: + logger.exception( + "[webhook] direct-deliver failed route=%s delivery=%s", + route_name, + delivery_id, + ) + return web.json_response( + {"status": "error", "error": "Delivery failed", "delivery_id": delivery_id}, + status=502, + ) + + if result.success: + return web.json_response( + { + "status": "delivered", + "route": route_name, + "target": delivery["deliver"], + "delivery_id": delivery_id, + }, + status=200, + ) + # Delivery attempted but target rejected it — surface as 502 + # with a generic error (don't leak adapter-level detail). + logger.warning( + "[webhook] direct-deliver target rejected route=%s target=%s error=%s", + route_name, + delivery["deliver"], + result.error, + ) + return web.json_response( + {"status": "error", "error": "Delivery failed", "delivery_id": delivery_id}, + status=502, + ) + # Use delivery_id in session key so concurrent webhooks on the # same route get independent agent runs (not queued/interrupted). session_chat_id = f"webhook:{route_name}:{delivery_id}" @@ -572,6 +647,34 @@ class WebhookAdapter(BasePlatformAdapter): # Response delivery # ------------------------------------------------------------------ + async def _direct_deliver( + self, content: str, delivery: dict + ) -> SendResult: + """Deliver *content* directly without invoking the agent. + + Used by ``deliver_only`` routes: the rendered template becomes the + literal message body, and we dispatch to the same delivery helpers + that the agent-mode ``send()`` flow uses. All target types that + work in agent mode work here — Telegram, Discord, Slack, GitHub + PR comments, etc. + """ + deliver_type = delivery.get("deliver", "log") + + if deliver_type == "log": + # Shouldn't reach here — startup validation rejects deliver_only + # with deliver=log — but guard defensively. + logger.info("[webhook] direct-deliver log-only: %s", content[:200]) + return SendResult(success=True) + + if deliver_type == "github_comment": + return await self._deliver_github_comment(content, delivery) + + # Fall through to the cross-platform dispatcher, which validates the + # target name and routes via the gateway runner. + return await self._deliver_cross_platform( + deliver_type, content, delivery + ) + async def _deliver_github_comment( self, content: str, delivery: dict ) -> SendResult: diff --git a/hermes_cli/main.py b/hermes_cli/main.py index 7e0220d918..71fc6ae381 100644 --- a/hermes_cli/main.py +++ b/hermes_cli/main.py @@ -7002,6 +7002,13 @@ For more help on a command: wh_sub.add_argument( "--secret", default="", help="HMAC secret (auto-generated if omitted)" ) + wh_sub.add_argument( + "--deliver-only", + action="store_true", + help="Skip the agent — deliver the rendered prompt directly as the " + "message. Zero LLM cost. Requires --deliver to be a real target " + "(not 'log').", + ) webhook_subparsers.add_parser( "list", aliases=["ls"], help="List all dynamic subscriptions" diff --git a/hermes_cli/webhook.py b/hermes_cli/webhook.py index 8ff135e29e..378f11b4a7 100644 --- a/hermes_cli/webhook.py +++ b/hermes_cli/webhook.py @@ -155,6 +155,15 @@ def _cmd_subscribe(args): "created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), } + if getattr(args, "deliver_only", False): + if route["deliver"] == "log": + print( + "Error: --deliver-only requires --deliver to be a real target " + "(telegram, discord, slack, github_comment, etc.) — not 'log'." + ) + return + route["deliver_only"] = True + if args.deliver_chat_id: route["deliver_extra"] = {"chat_id": args.deliver_chat_id} @@ -172,9 +181,12 @@ def _cmd_subscribe(args): else: print(" Events: (all)") print(f" Deliver: {route['deliver']}") + if route.get("deliver_only"): + print(" Mode: direct delivery (no agent, zero LLM cost)") if route.get("prompt"): prompt_preview = route["prompt"][:80] + ("..." if len(route["prompt"]) > 80 else "") - print(f" Prompt: {prompt_preview}") + label = "Message" if route.get("deliver_only") else "Prompt" + print(f" {label}: {prompt_preview}") print(f"\n Configure your service to POST to the URL above.") print(f" Use the secret for HMAC-SHA256 signature validation.") print(f" The gateway must be running to receive events (hermes gateway run).\n") @@ -192,6 +204,8 @@ def _cmd_list(args): for name, route in subs.items(): events = ", ".join(route.get("events", [])) or "(all)" deliver = route.get("deliver", "log") + if route.get("deliver_only"): + deliver = f"{deliver} (direct — no agent)" desc = route.get("description", "") print(f" ◆ {name}") if desc: diff --git a/skills/devops/webhook-subscriptions/SKILL.md b/skills/devops/webhook-subscriptions/SKILL.md index e5ab6d5880..dd20a19b41 100644 --- a/skills/devops/webhook-subscriptions/SKILL.md +++ b/skills/devops/webhook-subscriptions/SKILL.md @@ -1,10 +1,10 @@ --- name: webhook-subscriptions -description: Create and manage webhook subscriptions for event-driven agent activation. Use when the user wants external services to trigger agent runs automatically. -version: 1.0.0 +description: Create and manage webhook subscriptions for event-driven agent activation, or for direct push notifications (zero LLM cost). Use when the user wants external services to trigger agent runs OR push notifications to chats. +version: 1.1.0 metadata: hermes: - tags: [webhook, events, automation, integrations] + tags: [webhook, events, automation, integrations, notifications, push] --- # Webhook Subscriptions @@ -154,6 +154,29 @@ hermes webhook subscribe alerts \ --deliver origin ``` +### Direct delivery (no agent, zero LLM cost) + +For use cases where you just want to push a notification through to a user's chat — no reasoning, no agent loop — add `--deliver-only`. The rendered `--prompt` template becomes the literal message body and is dispatched directly to the target adapter. + +Use this for: +- External service push notifications (Supabase/Firebase webhooks → Telegram) +- Monitoring alerts that should forward verbatim +- Inter-agent pings where one agent is telling another agent's user something +- Any webhook where an LLM round trip would be wasted effort + +```bash +hermes webhook subscribe antenna-matches \ + --deliver telegram \ + --deliver-chat-id "123456789" \ + --deliver-only \ + --prompt "🎉 New match: {match.user_name} matched with you!" \ + --description "Antenna match notifications" +``` + +The POST returns `200 OK` on successful delivery, `502` on target failure — so upstream services can retry intelligently. HMAC auth, rate limits, and idempotency still apply. + +Requires `--deliver` to be a real target (telegram, discord, slack, github_comment, etc.) — `--deliver log` is rejected because log-only direct delivery is pointless. + ## Security - Each subscription gets an auto-generated HMAC-SHA256 secret (or provide your own with `--secret`) diff --git a/tests/gateway/test_webhook_deliver_only.py b/tests/gateway/test_webhook_deliver_only.py new file mode 100644 index 0000000000..d73a152015 --- /dev/null +++ b/tests/gateway/test_webhook_deliver_only.py @@ -0,0 +1,473 @@ +"""Tests for the webhook adapter's ``deliver_only`` route mode. + +``deliver_only`` lets external services (Supabase webhooks, monitoring +alerts, background jobs, other agents) push plain-text notifications to +a user's chat via the webhook adapter WITHOUT invoking the agent. The +rendered prompt template becomes the literal message body. + +Covers: +- Agent is NOT invoked (``handle_message`` never called) +- Rendered content is delivered to the target platform adapter +- HTTP returns 200 OK on success, 502 on delivery failure +- Startup validation rejects ``deliver_only`` without a real delivery target +- HMAC auth, rate limiting, and idempotency still apply +""" + +import asyncio +import hashlib +import hmac +import json +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from aiohttp import web +from aiohttp.test_utils import TestClient, TestServer + +from gateway.config import Platform, PlatformConfig +from gateway.platforms.base import MessageEvent, SendResult +from gateway.platforms.webhook import WebhookAdapter, _INSECURE_NO_AUTH + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _make_adapter(routes, **extra_kw) -> WebhookAdapter: + extra = {"host": "0.0.0.0", "port": 0, "routes": routes} + extra.update(extra_kw) + config = PlatformConfig(enabled=True, extra=extra) + return WebhookAdapter(config) + + +def _create_app(adapter: WebhookAdapter) -> web.Application: + app = web.Application() + app.router.add_get("/health", adapter._handle_health) + app.router.add_post("/webhooks/{route_name}", adapter._handle_webhook) + return app + + +def _wire_mock_target(adapter: WebhookAdapter, platform_name: str = "telegram"): + """Attach a gateway_runner with a mocked target adapter.""" + mock_target = AsyncMock() + mock_target.send = AsyncMock(return_value=SendResult(success=True)) + + mock_runner = MagicMock() + mock_runner.adapters = {Platform(platform_name): mock_target} + mock_runner.config.get_home_channel.return_value = None + + adapter.gateway_runner = mock_runner + return mock_target + + +# =================================================================== +# Core behaviour: agent bypass +# =================================================================== + +class TestDeliverOnlyBypassesAgent: + """The whole point of the feature — handle_message must not be called.""" + + @pytest.mark.asyncio + async def test_post_delivers_directly_without_agent(self): + routes = { + "match-alert": { + "secret": _INSECURE_NO_AUTH, + "deliver": "telegram", + "deliver_only": True, + "deliver_extra": {"chat_id": "12345"}, + "prompt": "{payload.user} matched with {payload.other}!", + } + } + adapter = _make_adapter(routes) + mock_target = _wire_mock_target(adapter) + + # Guard: handle_message must NOT be called in deliver_only mode + handle_message_calls: list[MessageEvent] = [] + + async def _capture(event): + handle_message_calls.append(event) + + adapter.handle_message = _capture + + app = _create_app(adapter) + body = json.dumps( + {"payload": {"user": "alice", "other": "bob"}} + ).encode() + + async with TestClient(TestServer(app)) as cli: + resp = await cli.post( + "/webhooks/match-alert", + data=body, + headers={ + "Content-Type": "application/json", + "X-GitHub-Delivery": "delivery-1", + }, + ) + assert resp.status == 200 + data = await resp.json() + assert data["status"] == "delivered" + assert data["route"] == "match-alert" + assert data["target"] == "telegram" + + # Let any background tasks settle before asserting no agent call + await asyncio.sleep(0.05) + + # Agent was NOT invoked + assert handle_message_calls == [] + + # Target adapter.send() WAS called with the rendered template + mock_target.send.assert_awaited_once() + call_args = mock_target.send.await_args + chat_id_arg, content_arg = call_args.args[0], call_args.args[1] + assert chat_id_arg == "12345" + assert content_arg == "alice matched with bob!" + + @pytest.mark.asyncio + async def test_template_rendering_works(self): + """Dot-notation template variables resolve in deliver_only mode.""" + routes = { + "alert": { + "secret": _INSECURE_NO_AUTH, + "deliver": "telegram", + "deliver_only": True, + "deliver_extra": {"chat_id": "chat-1"}, + "prompt": "Build {build.number} status: {build.status}", + } + } + adapter = _make_adapter(routes) + mock_target = _wire_mock_target(adapter) + app = _create_app(adapter) + + async with TestClient(TestServer(app)) as cli: + resp = await cli.post( + "/webhooks/alert", + json={"build": {"number": 77, "status": "FAILED"}}, + headers={"X-GitHub-Delivery": "d-render-1"}, + ) + assert resp.status == 200 + + mock_target.send.assert_awaited_once() + content_arg = mock_target.send.await_args.args[1] + assert content_arg == "Build 77 status: FAILED" + + @pytest.mark.asyncio + async def test_thread_id_passed_through(self): + """deliver_extra.thread_id flows through to the target adapter.""" + routes = { + "r": { + "secret": _INSECURE_NO_AUTH, + "deliver": "telegram", + "deliver_only": True, + "deliver_extra": {"chat_id": "c-1", "thread_id": "topic-42"}, + "prompt": "hi", + } + } + adapter = _make_adapter(routes) + mock_target = _wire_mock_target(adapter) + + app = _create_app(adapter) + async with TestClient(TestServer(app)) as cli: + resp = await cli.post( + "/webhooks/r", + json={}, + headers={"X-GitHub-Delivery": "d-thread-1"}, + ) + assert resp.status == 200 + + assert mock_target.send.await_args.kwargs["metadata"] == { + "thread_id": "topic-42" + } + + +# =================================================================== +# HTTP status codes +# =================================================================== + +class TestDeliverOnlyStatusCodes: + + @pytest.mark.asyncio + async def test_delivery_failure_returns_502(self): + """If the target adapter returns SendResult(success=False), 502.""" + routes = { + "r": { + "secret": _INSECURE_NO_AUTH, + "deliver": "telegram", + "deliver_only": True, + "deliver_extra": {"chat_id": "c-1"}, + "prompt": "hi", + } + } + adapter = _make_adapter(routes) + mock_target = _wire_mock_target(adapter) + mock_target.send = AsyncMock( + return_value=SendResult(success=False, error="rate limited by tg") + ) + + app = _create_app(adapter) + async with TestClient(TestServer(app)) as cli: + resp = await cli.post( + "/webhooks/r", + json={}, + headers={"X-GitHub-Delivery": "d-fail-1"}, + ) + assert resp.status == 502 + data = await resp.json() + # Generic error — no adapter-level detail leaks + assert data["error"] == "Delivery failed" + assert "rate limited" not in json.dumps(data) + + @pytest.mark.asyncio + async def test_delivery_exception_returns_502(self): + """If adapter.send() raises, we return 502 (not 500).""" + routes = { + "r": { + "secret": _INSECURE_NO_AUTH, + "deliver": "telegram", + "deliver_only": True, + "deliver_extra": {"chat_id": "c-1"}, + "prompt": "hi", + } + } + adapter = _make_adapter(routes) + mock_target = _wire_mock_target(adapter) + mock_target.send = AsyncMock(side_effect=RuntimeError("tg exploded")) + + app = _create_app(adapter) + async with TestClient(TestServer(app)) as cli: + resp = await cli.post( + "/webhooks/r", + json={}, + headers={"X-GitHub-Delivery": "d-exc-1"}, + ) + assert resp.status == 502 + data = await resp.json() + assert data["error"] == "Delivery failed" + # Exception message must not leak + assert "exploded" not in json.dumps(data) + + @pytest.mark.asyncio + async def test_target_platform_not_connected_returns_502(self): + """deliver_only to a platform the gateway doesn't have → 502.""" + routes = { + "r": { + "secret": _INSECURE_NO_AUTH, + "deliver": "discord", # not configured in mock runner + "deliver_only": True, + "deliver_extra": {"chat_id": "c-1"}, + "prompt": "hi", + } + } + adapter = _make_adapter(routes) + _wire_mock_target(adapter, platform_name="telegram") # only TG wired + + app = _create_app(adapter) + async with TestClient(TestServer(app)) as cli: + resp = await cli.post( + "/webhooks/r", + json={}, + headers={"X-GitHub-Delivery": "d-no-platform-1"}, + ) + assert resp.status == 502 + + +# =================================================================== +# Startup validation +# =================================================================== + +class TestDeliverOnlyStartupValidation: + + @pytest.mark.asyncio + async def test_deliver_only_with_log_deliver_rejected(self): + """deliver_only=true + deliver=log is nonsense — reject at connect().""" + routes = { + "bad": { + "secret": _INSECURE_NO_AUTH, + "deliver": "log", + "deliver_only": True, + "prompt": "hi", + } + } + adapter = _make_adapter(routes) + with pytest.raises(ValueError, match="deliver_only=true but deliver is 'log'"): + await adapter.connect() + + @pytest.mark.asyncio + async def test_deliver_only_with_missing_deliver_rejected(self): + """deliver_only=true with no deliver field defaults to 'log' → reject.""" + routes = { + "bad": { + "secret": _INSECURE_NO_AUTH, + # no deliver field + "deliver_only": True, + "prompt": "hi", + } + } + adapter = _make_adapter(routes) + with pytest.raises(ValueError, match="deliver_only=true"): + await adapter.connect() + + @pytest.mark.asyncio + async def test_deliver_only_with_real_target_accepted(self): + """Sanity check — a valid deliver_only config passes validation.""" + routes = { + "good": { + "secret": _INSECURE_NO_AUTH, + "deliver": "telegram", + "deliver_only": True, + "deliver_extra": {"chat_id": "c-1"}, + "prompt": "hi", + } + } + adapter = _make_adapter(routes) + # connect() does more than validation (binds a socket) — we just + # want to verify the validation doesn't raise. Call it and tear + # down immediately. + try: + started = await adapter.connect() + if started: + await adapter.disconnect() + except ValueError: + pytest.fail("valid deliver_only config should not raise ValueError") + + +# =================================================================== +# Security + reliability invariants still hold +# =================================================================== + +class TestDeliverOnlySecurityInvariants: + + @pytest.mark.asyncio + async def test_hmac_still_enforced(self): + """deliver_only does NOT bypass HMAC validation.""" + secret = "real-secret-123" + routes = { + "r": { + "secret": secret, + "deliver": "telegram", + "deliver_only": True, + "deliver_extra": {"chat_id": "c-1"}, + "prompt": "hi", + } + } + adapter = _make_adapter(routes) + mock_target = _wire_mock_target(adapter) + + app = _create_app(adapter) + async with TestClient(TestServer(app)) as cli: + # No signature header → reject + resp = await cli.post( + "/webhooks/r", + json={}, + headers={"X-GitHub-Delivery": "d-noauth-1"}, + ) + assert resp.status == 401 + + # Target never called + mock_target.send.assert_not_awaited() + + @pytest.mark.asyncio + async def test_idempotency_still_applies(self): + """Same delivery_id posted twice → second is suppressed.""" + routes = { + "r": { + "secret": _INSECURE_NO_AUTH, + "deliver": "telegram", + "deliver_only": True, + "deliver_extra": {"chat_id": "c-1"}, + "prompt": "hi", + } + } + adapter = _make_adapter(routes) + mock_target = _wire_mock_target(adapter) + + app = _create_app(adapter) + async with TestClient(TestServer(app)) as cli: + r1 = await cli.post( + "/webhooks/r", + json={}, + headers={"X-GitHub-Delivery": "dup-1"}, + ) + assert r1.status == 200 + + r2 = await cli.post( + "/webhooks/r", + json={}, + headers={"X-GitHub-Delivery": "dup-1"}, + ) + # Existing webhook adapter treats duplicates as 200 + status=duplicate + assert r2.status == 200 + data = await r2.json() + assert data["status"] == "duplicate" + + # Target was called exactly once + assert mock_target.send.await_count == 1 + + @pytest.mark.asyncio + async def test_rate_limit_still_applies(self): + """Route-level rate limit caps deliver_only POSTs too.""" + routes = { + "r": { + "secret": _INSECURE_NO_AUTH, + "deliver": "telegram", + "deliver_only": True, + "deliver_extra": {"chat_id": "c-1"}, + "prompt": "hi", + } + } + adapter = _make_adapter(routes, rate_limit=2) + _wire_mock_target(adapter) + + app = _create_app(adapter) + async with TestClient(TestServer(app)) as cli: + for i in range(2): + r = await cli.post( + "/webhooks/r", + json={}, + headers={"X-GitHub-Delivery": f"rl-{i}"}, + ) + assert r.status == 200 + + # Third within the window → 429 + r3 = await cli.post( + "/webhooks/r", + json={}, + headers={"X-GitHub-Delivery": "rl-3"}, + ) + assert r3.status == 429 + + +# =================================================================== +# Unit: _direct_deliver dispatch +# =================================================================== + +class TestDirectDeliverUnit: + + @pytest.mark.asyncio + async def test_dispatches_to_cross_platform_for_messaging_targets(self): + adapter = _make_adapter({}) + mock_target = _wire_mock_target(adapter, "telegram") + + result = await adapter._direct_deliver( + "hello", + {"deliver": "telegram", "deliver_extra": {"chat_id": "c-1"}}, + ) + assert result.success is True + mock_target.send.assert_awaited_once_with( + "c-1", "hello", metadata=None + ) + + @pytest.mark.asyncio + async def test_dispatches_to_github_comment(self): + adapter = _make_adapter({}) + with patch.object( + adapter, "_deliver_github_comment", + new=AsyncMock(return_value=SendResult(success=True)), + ) as mock_gh: + result = await adapter._direct_deliver( + "review body", + { + "deliver": "github_comment", + "deliver_extra": {"repo": "org/r", "pr_number": "1"}, + }, + ) + assert result.success is True + mock_gh.assert_awaited_once() diff --git a/website/docs/user-guide/messaging/webhooks.md b/website/docs/user-guide/messaging/webhooks.md index bbf04bcb4f..2c60624fb6 100644 --- a/website/docs/user-guide/messaging/webhooks.md +++ b/website/docs/user-guide/messaging/webhooks.md @@ -72,6 +72,7 @@ Routes define how different webhook sources are handled. Each route is a named e | `skills` | No | List of skill names to load for the agent run. | | `deliver` | No | Where to send the response: `github_comment`, `telegram`, `discord`, `slack`, `signal`, `sms`, `whatsapp`, `matrix`, `mattermost`, `homeassistant`, `email`, `dingtalk`, `feishu`, `wecom`, `weixin`, `bluebubbles`, `qqbot`, or `log` (default). | | `deliver_extra` | No | Additional delivery config — keys depend on `deliver` type (e.g. `repo`, `pr_number`, `chat_id`). Values support the same `{dot.notation}` templates as `prompt`. | +| `deliver_only` | No | If `true`, skip the agent entirely — the rendered `prompt` template becomes the literal message that gets delivered. Zero LLM cost, sub-second delivery. See [Direct Delivery Mode](#direct-delivery-mode) for use cases. Requires `deliver` to be a real target (not `log`). | ### Full example @@ -240,6 +241,80 @@ For cross-platform delivery, the target platform must also be enabled and connec --- +## Direct Delivery Mode {#direct-delivery-mode} + +By default, every webhook POST triggers an agent run — the payload becomes a prompt, the agent processes it, and the agent's response is delivered. This costs LLM tokens on every event. + +For use cases where you just want to **push a plain notification** — no reasoning, no agent loop, just deliver the message — set `deliver_only: true` on the route. The rendered `prompt` template becomes the literal message body, and the adapter dispatches it directly to the configured delivery target. + +### When to use direct delivery + +- **External service push** — Supabase/Firebase webhook fires on a database change → notify a user in Telegram instantly +- **Monitoring alerts** — Datadog/Grafana alert webhook → push to a Discord channel +- **Inter-agent pings** — Agent A notifies Agent B's user that a long-running task finished +- **Background job completion** — Cron job finishes → post result to Slack + +Benefits: + +- **Zero LLM tokens** — the agent is never invoked +- **Sub-second delivery** — a single adapter call, no reasoning loop +- **Same security as agent mode** — HMAC auth, rate limits, idempotency, and body-size limits all still apply +- **Synchronous response** — the POST returns `200 OK` once delivery succeeds, or `502` if the target rejects it, so your upstream service can retry intelligently + +### Example: Telegram push from Supabase + +```yaml +platforms: + webhook: + enabled: true + extra: + port: 8644 + secret: "global-secret" + routes: + antenna-matches: + secret: "antenna-webhook-secret" + deliver: "telegram" + deliver_only: true + prompt: "🎉 New match: {match.user_name} matched with you!" + deliver_extra: + chat_id: "{match.telegram_chat_id}" +``` + +Your Supabase edge function signs the payload with HMAC-SHA256 and POSTs to `https://your-server:8644/webhooks/antenna-matches`. The webhook adapter validates the signature, renders the template from the payload, delivers to Telegram, and returns `200 OK`. + +### Example: Dynamic subscription via CLI + +```bash +hermes webhook subscribe antenna-matches \ + --deliver telegram \ + --deliver-chat-id "123456789" \ + --deliver-only \ + --prompt "🎉 New match: {match.user_name} matched with you!" \ + --description "Antenna match notifications" +``` + +### Response codes + +| Status | Meaning | +|--------|---------| +| `200 OK` | Delivered successfully. Body: `{"status": "delivered", "route": "...", "target": "...", "delivery_id": "..."}` | +| `200 OK` (status=duplicate) | Duplicate `X-GitHub-Delivery` ID within the idempotency TTL (1 hour). Not re-delivered. | +| `401 Unauthorized` | HMAC signature invalid or missing. | +| `400 Bad Request` | Malformed JSON body. | +| `404 Not Found` | Unknown route name. | +| `413 Payload Too Large` | Body exceeded `max_body_bytes`. | +| `429 Too Many Requests` | Route rate limit exceeded. | +| `502 Bad Gateway` | Target adapter rejected the message or raised. The error is logged server-side; the response body is a generic `Delivery failed` to avoid leaking adapter internals. | + +### Configuration gotchas + +- `deliver_only: true` requires `deliver` to be a real target. `deliver: log` (or omitting `deliver`) is rejected at startup — the adapter refuses to start if it finds a misconfigured route. +- The `skills` field is ignored in direct delivery mode (no agent runs, so there's nothing to inject skills into). +- Template rendering uses the same `{dot.notation}` syntax as agent mode, including the `{__raw__}` token. +- Idempotency uses the same `X-GitHub-Delivery` / `X-Request-ID` header — retries with the same ID return `status=duplicate` and do NOT re-deliver. + +--- + ## Dynamic Subscriptions (CLI) {#dynamic-subscriptions} In addition to static routes in `config.yaml`, you can create webhook subscriptions dynamically using the `hermes webhook` CLI command. This is especially useful when the agent itself needs to set up event-driven triggers. From 7fa01fafa557f4cba59eb95a61a7343559bc2b44 Mon Sep 17 00:00:00 2001 From: Mibayy Date: Sun, 29 Mar 2026 22:48:28 -0700 Subject: [PATCH 005/547] feat: add maps skill (OpenStreetMap + Overpass + OSRM, no API key) Adds a maps optional skill with 8 commands, 44 POI categories, and zero external dependencies. Uses free open data: Nominatim, Overpass API, OSRM, and TimeAPI.io. Commands: search, reverse, nearby, distance, directions, timezone, area, bbox. Improvements over original PR #2015: - Fixed directory structure (optional-skills/productivity/maps/) - Fixed distance argparse (--to flag instead of broken dual nargs=+) - Fixed timezone (TimeAPI.io instead of broken worldtimeapi heuristic) - Expanded POI categories from 12 to 44 - Added directions command with turn-by-turn OSRM steps - Added area command (bounding box + dimensions for a named place) - Added bbox command (POI search within a geographic rectangle) - Added 23 unit tests - Improved haversine (atan2 for numerical stability) - Comprehensive SKILL.md with workflow examples Co-authored-by: Mibayy --- optional-skills/productivity/maps/SKILL.md | 153 +++ .../productivity/maps/scripts/maps_client.py | 1143 +++++++++++++++++ .../maps/tests/test_maps_client.py | 177 +++ 3 files changed, 1473 insertions(+) create mode 100644 optional-skills/productivity/maps/SKILL.md create mode 100644 optional-skills/productivity/maps/scripts/maps_client.py create mode 100644 optional-skills/productivity/maps/tests/test_maps_client.py diff --git a/optional-skills/productivity/maps/SKILL.md b/optional-skills/productivity/maps/SKILL.md new file mode 100644 index 0000000000..59e0359d56 --- /dev/null +++ b/optional-skills/productivity/maps/SKILL.md @@ -0,0 +1,153 @@ +--- +name: maps +description: > + Geocoding, reverse geocoding, nearby POI search (44 categories), + distance/routing, turn-by-turn directions, timezone lookup, bounding box + search, and area info. Uses OpenStreetMap + Overpass + OSRM. Free, no API key. +version: 1.1.0 +author: Mibayy +license: MIT +metadata: + hermes: + tags: [maps, geocoding, places, routing, distance, directions, openstreetmap, nominatim, overpass, osrm] + category: productivity + requires_toolsets: [terminal] +--- + +# Maps Skill + +Location intelligence using free, open data sources. 8 commands, 44 POI +categories, zero dependencies (Python stdlib only), no API key required. + +Data sources: OpenStreetMap/Nominatim, Overpass API, OSRM, TimeAPI.io. + +## When to Use + +- User wants coordinates for a place name +- User has coordinates and wants the address +- User asks for nearby restaurants, hospitals, pharmacies, hotels, etc. +- User wants driving/walking/cycling distance or travel time +- User wants turn-by-turn directions between two places +- User wants timezone information for a location +- User wants to search for POIs within a geographic area + +## Prerequisites + +Python 3.8+ (stdlib only — no pip installs needed). + +Script path after install: `~/.hermes/skills/maps/scripts/maps_client.py` + +## Commands + +```bash +MAPS=~/.hermes/skills/maps/scripts/maps_client.py +``` + +### search — Geocode a place name + +```bash +python3 $MAPS search "Eiffel Tower" +python3 $MAPS search "1600 Pennsylvania Ave, Washington DC" +``` + +Returns: lat, lon, display name, type, bounding box, importance score. + +### reverse — Coordinates to address + +```bash +python3 $MAPS reverse 48.8584 2.2945 +``` + +Returns: full address breakdown (street, city, state, country, postcode). + +### nearby — Find places by category + +```bash +python3 $MAPS nearby 48.8584 2.2945 restaurant --limit 10 +python3 $MAPS nearby 40.7128 -74.0060 hospital --radius 2000 +python3 $MAPS nearby 51.5074 -0.1278 cafe --limit 5 --radius 300 +``` + +44 categories: restaurant, cafe, bar, hospital, pharmacy, hotel, supermarket, +atm, gas_station, parking, museum, park, school, university, bank, police, +fire_station, library, airport, train_station, bus_stop, church, mosque, +synagogue, dentist, doctor, cinema, theatre, gym, swimming_pool, post_office, +convenience_store, bakery, bookshop, laundry, car_wash, car_rental, +bicycle_rental, taxi, veterinary, zoo, playground, stadium, nightclub. + +### distance — Travel distance and time + +```bash +python3 $MAPS distance "Paris" --to "Lyon" +python3 $MAPS distance "New York" --to "Boston" --mode driving +python3 $MAPS distance "Big Ben" --to "Tower Bridge" --mode walking +``` + +Modes: driving (default), walking, cycling. Returns road distance, duration, +and straight-line distance for comparison. + +### directions — Turn-by-turn navigation + +```bash +python3 $MAPS directions "Eiffel Tower" --to "Louvre Museum" --mode walking +python3 $MAPS directions "JFK Airport" --to "Times Square" --mode driving +``` + +Returns numbered steps with instruction, distance, duration, road name, and +maneuver type (turn, depart, arrive, etc.). + +### timezone — Timezone for coordinates + +```bash +python3 $MAPS timezone 48.8584 2.2945 +python3 $MAPS timezone 35.6762 139.6503 +``` + +Returns timezone name, UTC offset, and current local time. + +### area — Bounding box and area for a place + +```bash +python3 $MAPS area "Manhattan, New York" +python3 $MAPS area "London" +``` + +Returns bounding box coordinates, width/height in km, and approximate area. +Useful as input for the bbox command. + +### bbox — Search within a bounding box + +```bash +python3 $MAPS bbox 40.75 -74.00 40.77 -73.98 restaurant --limit 20 +``` + +Finds POIs within a geographic rectangle. Use `area` first to get the +bounding box coordinates for a named place. + +## Workflow Examples + +**"Find Italian restaurants near the Colosseum":** +1. `search "Colosseum Rome"` → get lat/lon +2. `nearby LAT LON restaurant --radius 500` + +**"How do I walk from hotel to conference center?":** +1. `directions "Hotel Name" --to "Conference Center" --mode walking` + +**"What restaurants are in downtown Seattle?":** +1. `area "Downtown Seattle"` → get bounding box +2. `bbox S W N E restaurant --limit 30` + +## Pitfalls + +- Nominatim ToS: max 1 req/s (handled automatically by the script) +- `nearby` requires lat/lon — use `search` first to get coordinates +- OSRM routing coverage is best for Europe and North America +- Overpass API can be slow during peak hours (script retries automatically) +- `distance` and `directions` use `--to` flag for the destination (not positional) + +## Verification + +```bash +python3 ~/.hermes/skills/maps/scripts/maps_client.py search "Statue of Liberty" +# Should return lat ~40.689, lon ~-74.044 +``` diff --git a/optional-skills/productivity/maps/scripts/maps_client.py b/optional-skills/productivity/maps/scripts/maps_client.py new file mode 100644 index 0000000000..c271570f99 --- /dev/null +++ b/optional-skills/productivity/maps/scripts/maps_client.py @@ -0,0 +1,1143 @@ +#!/usr/bin/env python3 +""" +maps_client.py - CLI tool for maps, geocoding, routing, POI search, and more. +Uses only Python stdlib. Data from OpenStreetMap/Nominatim, Overpass API, OSRM, +and TimeAPI.io. + +Commands: + search - Geocode a place name to coordinates + reverse - Reverse geocode coordinates to an address + nearby - Find nearby POIs by category + distance - Road distance and travel time between two places + directions - Turn-by-turn directions between two places + timezone - Timezone info for coordinates + bbox - Find POIs within a bounding box + area - Get bounding box and area info for a named place +""" + +import argparse +import json +import math +import os +import sys +import time +import urllib.error +import urllib.parse +import urllib.request + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +USER_AGENT = "HermesAgent/1.0 (contact: hermes@agent.ai)" +DATA_SOURCE = "OpenStreetMap/Nominatim" + +NOMINATIM_SEARCH = "https://nominatim.openstreetmap.org/search" +NOMINATIM_REVERSE = "https://nominatim.openstreetmap.org/reverse" +OVERPASS_API = "https://overpass-api.de/api/interpreter" +OSRM_BASE = "https://router.project-osrm.org/route/v1" +TIMEAPI_BASE = "https://timeapi.io/api/timezone/coordinate" + +# Seconds to sleep between Nominatim requests (ToS requirement) +NOMINATIM_RATE_LIMIT = 1.0 + +# Maximum retries for HTTP errors +MAX_RETRIES = 3 +RETRY_DELAY = 2.0 # seconds + +# Category -> (OSM tag key, OSM tag value) +CATEGORY_TAGS = { + # Food & Drink + "restaurant": ("amenity", "restaurant"), + "cafe": ("amenity", "cafe"), + "bar": ("amenity", "bar"), + "bakery": ("shop", "bakery"), + "convenience_store": ("shop", "convenience"), + # Health + "hospital": ("amenity", "hospital"), + "pharmacy": ("amenity", "pharmacy"), + "dentist": ("amenity", "dentist"), + "doctor": ("amenity", "doctors"), + "veterinary": ("amenity", "veterinary"), + # Accommodation + "hotel": ("tourism", "hotel"), + # Shopping & Services + "supermarket": ("shop", "supermarket"), + "bookshop": ("shop", "books"), + "laundry": ("shop", "laundry"), + # Finance + "atm": ("amenity", "atm"), + "bank": ("amenity", "bank"), + # Transport + "gas_station": ("amenity", "fuel"), + "parking": ("amenity", "parking"), + "airport": ("aeroway", "aerodrome"), + "train_station": ("railway", "station"), + "bus_stop": ("highway", "bus_stop"), + "taxi": ("amenity", "taxi"), + "car_wash": ("amenity", "car_wash"), + "car_rental": ("amenity", "car_rental"), + "bicycle_rental": ("amenity", "bicycle_rental"), + # Culture & Entertainment + "museum": ("tourism", "museum"), + "cinema": ("amenity", "cinema"), + "theatre": ("amenity", "theatre"), + "nightclub": ("amenity", "nightclub"), + "zoo": ("tourism", "zoo"), + # Education + "school": ("amenity", "school"), + "university": ("amenity", "university"), + "library": ("amenity", "library"), + # Public Services + "police": ("amenity", "police"), + "fire_station": ("amenity", "fire_station"), + "post_office": ("amenity", "post_office"), + # Religion + "church": ("amenity", "place_of_worship"), # refined by religion tag + "mosque": ("amenity", "place_of_worship"), + "synagogue": ("amenity", "place_of_worship"), + # Recreation + "park": ("leisure", "park"), + "gym": ("leisure", "fitness_centre"), + "swimming_pool": ("leisure", "swimming_pool"), + "playground": ("leisure", "playground"), + "stadium": ("leisure", "stadium"), +} + +# Religion-specific overrides for place_of_worship categories +RELIGION_FILTER = { + "church": "christian", + "mosque": "muslim", + "synagogue": "jewish", +} + +VALID_CATEGORIES = sorted(CATEGORY_TAGS.keys()) + +OSRM_PROFILES = { + "driving": "driving", + "walking": "foot", + "cycling": "bike", +} + +# --------------------------------------------------------------------------- +# Output helpers +# --------------------------------------------------------------------------- + +def print_json(data): + """Print data as pretty-printed JSON to stdout.""" + print(json.dumps(data, indent=2, ensure_ascii=False)) + + +def error_exit(message, code=1): + """Print an error result as JSON and exit.""" + print_json({"error": message, "status": "error"}) + sys.exit(code) + + +# --------------------------------------------------------------------------- +# HTTP helpers +# --------------------------------------------------------------------------- + +def http_get(url, params=None, retries=MAX_RETRIES, silent=False): + """ + Perform an HTTP GET request, returning parsed JSON. + Adds the required User-Agent header. Retries on transient errors. + If silent=True, raises RuntimeError instead of calling error_exit. + """ + if params: + url = url + "?" + urllib.parse.urlencode(params) + + req = urllib.request.Request(url, headers={"User-Agent": USER_AGENT}) + + last_error = None + for attempt in range(1, retries + 1): + try: + with urllib.request.urlopen(req, timeout=15) as resp: + raw = resp.read().decode("utf-8") + return json.loads(raw) + except urllib.error.HTTPError as exc: + last_error = f"HTTP {exc.code}: {exc.reason} for {url}" + if exc.code in (429, 503, 502, 504): + time.sleep(RETRY_DELAY * attempt) + else: + if silent: + raise RuntimeError(last_error) + error_exit(last_error) + except urllib.error.URLError as exc: + last_error = f"URL error: {exc.reason}" + time.sleep(RETRY_DELAY * attempt) + except json.JSONDecodeError as exc: + last_error = f"JSON parse error: {exc}" + time.sleep(RETRY_DELAY * attempt) + + msg = f"Request failed after {retries} attempts. Last error: {last_error}" + if silent: + raise RuntimeError(msg) + error_exit(msg) + + +def http_get_text(url, params=None, retries=MAX_RETRIES, silent=False): + """ + Like http_get but returns raw text instead of parsed JSON. + Useful for APIs that may return non-JSON responses. + """ + if params: + url = url + "?" + urllib.parse.urlencode(params) + + req = urllib.request.Request(url, headers={"User-Agent": USER_AGENT}) + + last_error = None + for attempt in range(1, retries + 1): + try: + with urllib.request.urlopen(req, timeout=15) as resp: + return resp.read().decode("utf-8") + except urllib.error.HTTPError as exc: + last_error = f"HTTP {exc.code}: {exc.reason} for {url}" + if exc.code in (429, 503, 502, 504): + time.sleep(RETRY_DELAY * attempt) + else: + if silent: + raise RuntimeError(last_error) + error_exit(last_error) + except urllib.error.URLError as exc: + last_error = f"URL error: {exc.reason}" + time.sleep(RETRY_DELAY * attempt) + + msg = f"Request failed after {retries} attempts. Last error: {last_error}" + if silent: + raise RuntimeError(msg) + error_exit(msg) + + +def http_post(url, data_str, retries=MAX_RETRIES): + """ + Perform an HTTP POST with a plain-text body (for Overpass QL). + Returns parsed JSON. + """ + encoded = data_str.encode("utf-8") + req = urllib.request.Request( + url, + data=encoded, + headers={ + "User-Agent": USER_AGENT, + "Content-Type": "application/x-www-form-urlencoded", + }, + ) + + last_error = None + for attempt in range(1, retries + 1): + try: + with urllib.request.urlopen(req, timeout=30) as resp: + raw = resp.read().decode("utf-8") + return json.loads(raw) + except urllib.error.HTTPError as exc: + last_error = f"HTTP {exc.code}: {exc.reason}" + if exc.code in (429, 503, 502, 504): + time.sleep(RETRY_DELAY * attempt) + else: + error_exit(last_error) + except urllib.error.URLError as exc: + last_error = f"URL error: {exc.reason}" + time.sleep(RETRY_DELAY * attempt) + except json.JSONDecodeError as exc: + last_error = f"JSON parse error: {exc}" + time.sleep(RETRY_DELAY * attempt) + + error_exit(f"POST failed after {retries} attempts. Last error: {last_error}") + + +# --------------------------------------------------------------------------- +# Geo math +# --------------------------------------------------------------------------- + +def haversine_m(lat1, lon1, lat2, lon2): + """Return distance in metres between two lat/lon points (Haversine).""" + R = 6_371_000 # Earth mean radius in metres + phi1 = math.radians(lat1) + phi2 = math.radians(lat2) + dphi = math.radians(lat2 - lat1) + dlam = math.radians(lon2 - lon1) + a = (math.sin(dphi / 2) ** 2 + + math.cos(phi1) * math.cos(phi2) * math.sin(dlam / 2) ** 2) + return 2 * R * math.atan2(math.sqrt(a), math.sqrt(1 - a)) + + +# --------------------------------------------------------------------------- +# Nominatim helpers +# --------------------------------------------------------------------------- + +def nominatim_search(query, limit=5): + """Geocode a free-text query. Returns list of result dicts.""" + params = { + "q": query, + "format": "json", + "limit": limit, + "addressdetails": 1, + } + time.sleep(NOMINATIM_RATE_LIMIT) + return http_get(NOMINATIM_SEARCH, params=params) + + +def nominatim_reverse(lat, lon): + """Reverse geocode lat/lon. Returns a single result dict.""" + params = { + "lat": lat, + "lon": lon, + "format": "json", + "addressdetails": 1, + } + time.sleep(NOMINATIM_RATE_LIMIT) + return http_get(NOMINATIM_REVERSE, params=params) + + +def geocode_single(query): + """ + Geocode a query and return (lat, lon, display_name). + Exits with error if nothing found. + """ + results = nominatim_search(query, limit=1) + if not results: + error_exit(f"Could not geocode: {query}") + r = results[0] + return float(r["lat"]), float(r["lon"]), r.get("display_name", query) + + +# --------------------------------------------------------------------------- +# Overpass helpers +# --------------------------------------------------------------------------- + +def build_overpass_nearby(tag_key, tag_val, lat, lon, radius, limit, + religion=None): + """Build an Overpass QL query for nearby POIs around a point.""" + religion_filter = "" + if religion: + religion_filter = f'["religion"="{religion}"]' + return ( + f'[out:json][timeout:25];\n' + f'(\n' + f' node["{tag_key}"="{tag_val}"]{religion_filter}' + f'(around:{radius},{lat},{lon});\n' + f' way["{tag_key}"="{tag_val}"]{religion_filter}' + f'(around:{radius},{lat},{lon});\n' + f');\n' + f'out center {limit};\n' + ) + + +def build_overpass_bbox(tag_key, tag_val, south, west, north, east, limit, + religion=None): + """Build an Overpass QL query for POIs within a bounding box.""" + religion_filter = "" + if religion: + religion_filter = f'["religion"="{religion}"]' + return ( + f'[out:json][timeout:25];\n' + f'(\n' + f' node["{tag_key}"="{tag_val}"]{religion_filter}' + f'({south},{west},{north},{east});\n' + f' way["{tag_key}"="{tag_val}"]{religion_filter}' + f'({south},{west},{north},{east});\n' + f');\n' + f'out center {limit};\n' + ) + + +def parse_overpass_elements(elements, ref_lat=None, ref_lon=None): + """ + Parse Overpass elements into a clean list of POI dicts. + If ref_lat/ref_lon are provided, computes distance and sorts by it. + """ + places = [] + for el in elements: + # Ways have a "center" sub-dict; nodes have lat/lon directly + if el["type"] == "way": + center = el.get("center", {}) + el_lat = center.get("lat") + el_lon = center.get("lon") + else: + el_lat = el.get("lat") + el_lon = el.get("lon") + + if el_lat is None or el_lon is None: + continue + + tags = el.get("tags", {}) + name = tags.get("name") or tags.get("name:en") or "" + + # Build a short address from available tags + addr_parts = [] + for part_key in ("addr:housenumber", "addr:street", "addr:city"): + val = tags.get(part_key) + if val: + addr_parts.append(val) + address_str = ", ".join(addr_parts) if addr_parts else "" + + place = { + "name": name, + "address": address_str, + "lat": el_lat, + "lon": el_lon, + "osm_type": el.get("type", ""), + "osm_id": el.get("id", ""), + "tags": { + k: v for k, v in tags.items() + if k not in ("name", "name:en", + "addr:housenumber", "addr:street", "addr:city") + }, + } + + if ref_lat is not None and ref_lon is not None: + dist_m = haversine_m(ref_lat, ref_lon, el_lat, el_lon) + place["distance_m"] = round(dist_m, 1) + + places.append(place) + + # Sort by distance if available + if places and "distance_m" in places[0]: + places.sort(key=lambda p: p["distance_m"]) + + return places + + +# --------------------------------------------------------------------------- +# Command: search +# --------------------------------------------------------------------------- + +def cmd_search(args): + """Geocode a place name and return top results.""" + query = " ".join(args.query) + raw = nominatim_search(query, limit=5) + + if not raw: + print_json({ + "query": query, + "results": [], + "count": 0, + "data_source": DATA_SOURCE, + }) + return + + results = [] + for item in raw: + bb = item.get("boundingbox", []) + results.append({ + "name": item.get("name") or item.get("display_name", ""), + "display_name": item.get("display_name", ""), + "lat": float(item["lat"]), + "lon": float(item["lon"]), + "type": item.get("type", ""), + "category": item.get("category", ""), + "osm_type": item.get("osm_type", ""), + "osm_id": item.get("osm_id", ""), + "bounding_box": { + "min_lat": float(bb[0]) if len(bb) > 0 else None, + "max_lat": float(bb[1]) if len(bb) > 1 else None, + "min_lon": float(bb[2]) if len(bb) > 2 else None, + "max_lon": float(bb[3]) if len(bb) > 3 else None, + }, + "importance": item.get("importance"), + }) + + print_json({ + "query": query, + "results": results, + "count": len(results), + "data_source": DATA_SOURCE, + }) + + +# --------------------------------------------------------------------------- +# Command: reverse +# --------------------------------------------------------------------------- + +def cmd_reverse(args): + """Reverse geocode coordinates to a human-readable address.""" + try: + lat = float(args.lat) + lon = float(args.lon) + except ValueError: + error_exit("LAT and LON must be numeric values.") + + if not (-90 <= lat <= 90): + error_exit("Latitude must be between -90 and 90.") + if not (-180 <= lon <= 180): + error_exit("Longitude must be between -180 and 180.") + + data = nominatim_reverse(lat, lon) + + if "error" in data: + error_exit(f"Reverse geocode failed: {data['error']}") + + address = data.get("address", {}) + + print_json({ + "lat": lat, + "lon": lon, + "display_name": data.get("display_name", ""), + "address": { + "house_number": address.get("house_number", ""), + "road": address.get("road", ""), + "neighbourhood": address.get("neighbourhood", ""), + "suburb": address.get("suburb", ""), + "city": (address.get("city") + or address.get("town") + or address.get("village", "")), + "county": address.get("county", ""), + "state": address.get("state", ""), + "postcode": address.get("postcode", ""), + "country": address.get("country", ""), + "country_code": address.get("country_code", ""), + }, + "osm_type": data.get("osm_type", ""), + "osm_id": data.get("osm_id", ""), + "data_source": DATA_SOURCE, + }) + + +# --------------------------------------------------------------------------- +# Command: nearby +# --------------------------------------------------------------------------- + +def cmd_nearby(args): + """Find nearby POIs using the Overpass API.""" + try: + lat = float(args.lat) + lon = float(args.lon) + except ValueError: + error_exit("LAT and LON must be numeric values.") + + category = args.category.lower() + if category not in CATEGORY_TAGS: + error_exit( + f"Unknown category '{category}'. " + f"Valid categories: {', '.join(VALID_CATEGORIES)}" + ) + + radius = int(args.radius) + limit = int(args.limit) + + if radius <= 0: + error_exit("Radius must be a positive integer (metres).") + if limit <= 0: + error_exit("Limit must be a positive integer.") + + tag_key, tag_val = CATEGORY_TAGS[category] + religion = RELIGION_FILTER.get(category) + query = build_overpass_nearby(tag_key, tag_val, lat, lon, radius, limit, + religion=religion) + + post_data = "data=" + urllib.parse.quote(query) + raw = http_post(OVERPASS_API, post_data) + + elements = raw.get("elements", []) + places = parse_overpass_elements(elements, ref_lat=lat, ref_lon=lon) + + # Add category to each result + for p in places: + p["category"] = category + + print_json({ + "center_lat": lat, + "center_lon": lon, + "category": category, + "radius_m": radius, + "count": len(places), + "results": places, + "data_source": DATA_SOURCE, + }) + + +# --------------------------------------------------------------------------- +# Command: distance +# --------------------------------------------------------------------------- + +def cmd_distance(args): + """Calculate road distance and travel time between two places.""" + origin_query = " ".join(args.origin) + destination_query = " ".join(args.to) + mode = args.mode.lower() + + if mode not in OSRM_PROFILES: + error_exit(f"Invalid mode '{mode}'. Choose from: {', '.join(OSRM_PROFILES)}") + + # Geocode origin and destination + o_lat, o_lon, o_name = geocode_single(origin_query) + d_lat, d_lon, d_name = geocode_single(destination_query) + + profile = OSRM_PROFILES[mode] + url = ( + f"{OSRM_BASE}/{profile}/" + f"{o_lon},{o_lat};{d_lon},{d_lat}" + f"?overview=false&steps=false" + ) + + osrm_data = http_get(url) + + if osrm_data.get("code") != "Ok": + error_exit( + f"OSRM routing failed: " + f"{osrm_data.get('message', osrm_data.get('code', 'unknown error'))}" + ) + + routes = osrm_data.get("routes", []) + if not routes: + error_exit("No route found between the two locations.") + + route = routes[0] + distance_m = route.get("distance", 0) + duration_s = route.get("duration", 0) + distance_km = round(distance_m / 1000, 3) + duration_min = round(duration_s / 60, 2) + + # Straight-line distance for reference + straight_m = haversine_m(o_lat, o_lon, d_lat, d_lon) + + print_json({ + "origin": { + "query": origin_query, + "display_name": o_name, + "lat": o_lat, + "lon": o_lon, + }, + "destination": { + "query": destination_query, + "display_name": d_name, + "lat": d_lat, + "lon": d_lon, + }, + "mode": mode, + "distance_km": distance_km, + "distance_m": round(distance_m, 1), + "duration_minutes": duration_min, + "duration_seconds": round(duration_s, 1), + "straight_line_km": round(straight_m / 1000, 3), + "data_source": DATA_SOURCE, + }) + + +# --------------------------------------------------------------------------- +# Command: directions +# --------------------------------------------------------------------------- + +def _format_duration(seconds): + """Format seconds into a human-readable string.""" + if seconds < 60: + return f"{round(seconds)}s" + minutes = seconds / 60 + if minutes < 60: + return f"{round(minutes, 1)} min" + hours = int(minutes // 60) + remaining = round(minutes % 60) + return f"{hours}h {remaining}min" + + +def _format_distance(metres): + """Format metres into a human-readable string.""" + if metres < 1000: + return f"{round(metres)} m" + return f"{round(metres / 1000, 2)} km" + + +def cmd_directions(args): + """Get turn-by-turn directions between two places via OSRM.""" + origin_query = " ".join(args.origin) + destination_query = " ".join(args.to) + mode = args.mode.lower() + + if mode not in OSRM_PROFILES: + error_exit(f"Invalid mode '{mode}'. Choose from: {', '.join(OSRM_PROFILES)}") + + # Geocode origin and destination + o_lat, o_lon, o_name = geocode_single(origin_query) + d_lat, d_lon, d_name = geocode_single(destination_query) + + profile = OSRM_PROFILES[mode] + url = ( + f"{OSRM_BASE}/{profile}/" + f"{o_lon},{o_lat};{d_lon},{d_lat}" + f"?overview=false&steps=true" + ) + + osrm_data = http_get(url) + + if osrm_data.get("code") != "Ok": + error_exit( + f"OSRM routing failed: " + f"{osrm_data.get('message', osrm_data.get('code', 'unknown error'))}" + ) + + routes = osrm_data.get("routes", []) + if not routes: + error_exit("No route found between the two locations.") + + route = routes[0] + distance_m = route.get("distance", 0) + duration_s = route.get("duration", 0) + + # Extract steps from all legs + steps = [] + step_num = 0 + for leg in route.get("legs", []): + for step in leg.get("steps", []): + maneuver = step.get("maneuver", {}) + step_dist = step.get("distance", 0) + step_dur = step.get("duration", 0) + step_name = step.get("name", "") + modifier = maneuver.get("modifier", "") + m_type = maneuver.get("type", "") + + # Build instruction text + if m_type == "depart": + instruction = f"Depart on {step_name}" if step_name else "Depart" + elif m_type == "arrive": + instruction = "Arrive at destination" + elif m_type == "turn": + instruction = f"Turn {modifier} onto {step_name}" if step_name else f"Turn {modifier}" + elif m_type == "new name": + instruction = f"Continue onto {step_name}" if step_name else "Continue" + elif m_type == "merge": + instruction = f"Merge {modifier} onto {step_name}" if step_name else f"Merge {modifier}" + elif m_type == "fork": + instruction = f"Take the {modifier} fork onto {step_name}" if step_name else f"Take the {modifier} fork" + elif m_type == "roundabout": + instruction = f"Enter roundabout, exit onto {step_name}" if step_name else "Enter roundabout" + elif m_type == "rotary": + instruction = f"Enter rotary, exit onto {step_name}" if step_name else "Enter rotary" + elif m_type == "end of road": + instruction = f"At end of road, turn {modifier} onto {step_name}" if step_name else f"At end of road, turn {modifier}" + elif m_type == "continue": + instruction = f"Continue {modifier} on {step_name}" if step_name else f"Continue {modifier}" + elif m_type == "on ramp": + instruction = f"Take ramp onto {step_name}" if step_name else "Take ramp" + elif m_type == "off ramp": + instruction = f"Take exit onto {step_name}" if step_name else "Take exit" + else: + instruction = f"{m_type} {modifier} {step_name}".strip() + + step_num += 1 + steps.append({ + "step": step_num, + "instruction": instruction, + "distance": _format_distance(step_dist), + "distance_m": round(step_dist, 1), + "duration": _format_duration(step_dur), + "duration_s": round(step_dur, 1), + "road_name": step_name, + "maneuver": m_type, + }) + + print_json({ + "origin": { + "query": origin_query, + "display_name": o_name, + "lat": o_lat, + "lon": o_lon, + }, + "destination": { + "query": destination_query, + "display_name": d_name, + "lat": d_lat, + "lon": d_lon, + }, + "mode": mode, + "total_distance": _format_distance(distance_m), + "total_distance_m": round(distance_m, 1), + "total_duration": _format_duration(duration_s), + "total_duration_s": round(duration_s, 1), + "steps": steps, + "step_count": len(steps), + "data_source": DATA_SOURCE, + }) + + +# --------------------------------------------------------------------------- +# Command: timezone +# --------------------------------------------------------------------------- + +def cmd_timezone(args): + """ + Get timezone information for a lat/lon coordinate. + + Strategy: + 1. Try TimeAPI.io (free, no key, supports coordinate-based lookup). + 2. Fallback: derive UTC offset approximation from longitude. + """ + try: + lat = float(args.lat) + lon = float(args.lon) + except ValueError: + error_exit("LAT and LON must be numeric values.") + + if not (-90 <= lat <= 90): + error_exit("Latitude must be between -90 and 90.") + if not (-180 <= lon <= 180): + error_exit("Longitude must be between -180 and 180.") + + timezone_str = None + timezone_src = None + current_time = None + utc_offset = None + + # --- Strategy 1: TimeAPI.io coordinate lookup --- + try: + params = {"latitude": lat, "longitude": lon} + tz_data = http_get(TIMEAPI_BASE, params=params, silent=True) + if isinstance(tz_data, dict): + timezone_str = tz_data.get("timeZone") + current_time = tz_data.get("currentLocalTime") + # Build utc_offset from currentUtcOffset if available + offset_info = tz_data.get("currentUtcOffset", {}) + if isinstance(offset_info, dict): + oh = offset_info.get("hours", 0) + om = abs(offset_info.get("minutes", 0)) + os_ = offset_info.get("seconds", 0) + sign = "+" if oh >= 0 else "-" + utc_offset = f"{sign}{abs(oh):02d}:{om:02d}" + elif tz_data.get("standardUtcOffset"): + offset_info2 = tz_data["standardUtcOffset"] + if isinstance(offset_info2, dict): + oh = offset_info2.get("hours", 0) + om = abs(offset_info2.get("minutes", 0)) + sign = "+" if oh >= 0 else "-" + utc_offset = f"{sign}{abs(oh):02d}:{om:02d}" + timezone_src = "timeapi.io" + except (RuntimeError, KeyError, TypeError): + pass # API may be down; continue to fallback + + # --- Strategy 2: longitude-based UTC offset approximation --- + if not timezone_str: + approx_offset_h = round(lon / 15) + if approx_offset_h >= 0: + utc_offset = f"+{approx_offset_h:02d}:00" + else: + utc_offset = f"-{abs(approx_offset_h):02d}:00" + timezone_str = f"UTC{utc_offset}" + timezone_src = "longitude approximation (longitude/15)" + + print_json({ + "lat": lat, + "lon": lon, + "timezone": timezone_str, + "utc_offset": utc_offset, + "current_time": current_time, + "source": timezone_src, + "data_source": DATA_SOURCE, + }) + + +# --------------------------------------------------------------------------- +# Command: bbox +# --------------------------------------------------------------------------- + +def cmd_bbox(args): + """Find POIs within a bounding box using the Overpass API.""" + try: + lat1 = float(args.lat1) + lon1 = float(args.lon1) + lat2 = float(args.lat2) + lon2 = float(args.lon2) + except ValueError: + error_exit("All coordinate arguments must be numeric values.") + + # Normalize: south/west < north/east + south = min(lat1, lat2) + north = max(lat1, lat2) + west = min(lon1, lon2) + east = max(lon1, lon2) + + category = args.category.lower() + if category not in CATEGORY_TAGS: + error_exit( + f"Unknown category '{category}'. " + f"Valid categories: {', '.join(VALID_CATEGORIES)}" + ) + + limit = int(args.limit) + if limit <= 0: + error_exit("Limit must be a positive integer.") + + tag_key, tag_val = CATEGORY_TAGS[category] + religion = RELIGION_FILTER.get(category) + query = build_overpass_bbox(tag_key, tag_val, south, west, north, east, + limit, religion=religion) + + post_data = "data=" + urllib.parse.quote(query) + raw = http_post(OVERPASS_API, post_data) + + elements = raw.get("elements", []) + + # Use center of bbox as reference for distance sorting + center_lat = (south + north) / 2 + center_lon = (west + east) / 2 + places = parse_overpass_elements(elements, ref_lat=center_lat, + ref_lon=center_lon) + + for p in places: + p["category"] = category + + print_json({ + "bounding_box": { + "south": south, + "west": west, + "north": north, + "east": east, + }, + "category": category, + "count": len(places), + "results": places, + "data_source": DATA_SOURCE, + }) + + +# --------------------------------------------------------------------------- +# Command: area +# --------------------------------------------------------------------------- + +def cmd_area(args): + """Get bounding box and area info for a named place.""" + query = " ".join(args.place) + raw = nominatim_search(query, limit=1) + + if not raw: + error_exit(f"Could not find place: {query}") + + item = raw[0] + bb = item.get("boundingbox", []) + + if len(bb) < 4: + error_exit(f"No bounding box data available for: {query}") + + min_lat = float(bb[0]) + max_lat = float(bb[1]) + min_lon = float(bb[2]) + max_lon = float(bb[3]) + + # Approximate area in km² using the bounding box + # Width in km at the average latitude + avg_lat = (min_lat + max_lat) / 2 + height_km = haversine_m(min_lat, min_lon, max_lat, min_lon) / 1000 + width_km = haversine_m(avg_lat, min_lon, avg_lat, max_lon) / 1000 + approx_area_km2 = round(height_km * width_km, 3) + + print_json({ + "query": query, + "display_name": item.get("display_name", ""), + "lat": float(item["lat"]), + "lon": float(item["lon"]), + "type": item.get("type", ""), + "category": item.get("category", ""), + "bounding_box": { + "south": min_lat, + "north": max_lat, + "west": min_lon, + "east": max_lon, + }, + "dimensions": { + "width_km": round(width_km, 3), + "height_km": round(height_km, 3), + }, + "approx_area_km2": approx_area_km2, + "osm_type": item.get("osm_type", ""), + "osm_id": item.get("osm_id", ""), + "data_source": DATA_SOURCE, + }) + + +# --------------------------------------------------------------------------- +# CLI setup +# --------------------------------------------------------------------------- + +def build_parser(): + parser = argparse.ArgumentParser( + prog="maps_client.py", + description=( + "CLI maps tool: geocoding, reverse geocoding, POI search, " + "routing, directions, timezone, and area lookup. " + "Powered by OpenStreetMap, OSRM, Overpass, and TimeAPI.io. " + "No API keys required." + ), + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=( + "Examples:\n" + " maps_client.py search Times Square\n" + " maps_client.py reverse 40.758 -73.985\n" + " maps_client.py nearby 40.758 -73.985 restaurant --radius 800\n" + " maps_client.py distance New York --to Los Angeles --mode driving\n" + " maps_client.py directions Paris --to Berlin --mode driving\n" + " maps_client.py timezone 48.8566 2.3522\n" + " maps_client.py bbox 40.70 -74.02 40.78 -73.95 restaurant\n" + " maps_client.py area Manhattan" + ), + ) + sub = parser.add_subparsers(dest="command", required=True, + metavar="COMMAND") + + # -- search -- + p_search = sub.add_parser( + "search", + help="Geocode a place name to coordinates.", + description="Search for a place by name and return coordinates and details.", + ) + p_search.add_argument( + "query", nargs="+", + help="Place name or address to search.", + ) + + # -- reverse -- + p_reverse = sub.add_parser( + "reverse", + help="Reverse geocode coordinates to an address.", + description="Convert latitude/longitude coordinates to a human-readable address.", + ) + p_reverse.add_argument("lat", help="Latitude (decimal degrees).") + p_reverse.add_argument("lon", help="Longitude (decimal degrees).") + + # -- nearby -- + p_nearby = sub.add_parser( + "nearby", + help="Find nearby places of a given category.", + description=( + "Find points of interest near a location using the Overpass API.\n" + f"Categories: {', '.join(VALID_CATEGORIES)}" + ), + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + p_nearby.add_argument("lat", help="Center latitude (decimal degrees).") + p_nearby.add_argument("lon", help="Center longitude (decimal degrees).") + p_nearby.add_argument( + "category", + help="POI category (use --help to see full list).", + ) + p_nearby.add_argument( + "--radius", "-r", + default=500, type=int, metavar="METRES", + help="Search radius in metres (default: 500).", + ) + p_nearby.add_argument( + "--limit", "-n", + default=10, type=int, metavar="N", + help="Maximum number of results (default: 10).", + ) + + # -- distance -- + p_dist = sub.add_parser( + "distance", + help="Calculate road distance and travel time.", + description=( + "Calculate road distance and estimated travel time between two places.\n" + "Example: maps_client.py distance New York --to Los Angeles" + ), + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + p_dist.add_argument( + "origin", nargs="+", + help="Origin address or place name.", + ) + p_dist.add_argument( + "--to", nargs="+", required=True, metavar="DEST", + help="Destination address or place name (required).", + ) + p_dist.add_argument( + "--mode", "-m", + default="driving", + choices=list(OSRM_PROFILES.keys()), + help="Travel mode (default: driving).", + ) + + # -- directions -- + p_dir = sub.add_parser( + "directions", + help="Get turn-by-turn directions between two places.", + description=( + "Get step-by-step navigation directions between two places.\n" + "Example: maps_client.py directions Paris --to Berlin --mode driving" + ), + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + p_dir.add_argument( + "origin", nargs="+", + help="Origin address or place name.", + ) + p_dir.add_argument( + "--to", nargs="+", required=True, metavar="DEST", + help="Destination address or place name (required).", + ) + p_dir.add_argument( + "--mode", "-m", + default="driving", + choices=list(OSRM_PROFILES.keys()), + help="Travel mode (default: driving).", + ) + + # -- timezone -- + p_tz = sub.add_parser( + "timezone", + help="Get timezone information for coordinates.", + description="Look up timezone and current local time for a lat/lon coordinate.", + ) + p_tz.add_argument("lat", help="Latitude (decimal degrees).") + p_tz.add_argument("lon", help="Longitude (decimal degrees).") + + # -- bbox -- + p_bbox = sub.add_parser( + "bbox", + help="Find POIs within a bounding box.", + description=( + "Search for points of interest within a geographic bounding box.\n" + "Tip: use the 'area' command to find bounding boxes for named places.\n" + f"Categories: {', '.join(VALID_CATEGORIES)}" + ), + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + p_bbox.add_argument("lat1", help="First corner latitude.") + p_bbox.add_argument("lon1", help="First corner longitude.") + p_bbox.add_argument("lat2", help="Second corner latitude.") + p_bbox.add_argument("lon2", help="Second corner longitude.") + p_bbox.add_argument("category", help="POI category to search for.") + p_bbox.add_argument( + "--limit", "-n", + default=20, type=int, metavar="N", + help="Maximum number of results (default: 20).", + ) + + # -- area -- + p_area = sub.add_parser( + "area", + help="Get bounding box and area info for a named place.", + description=( + "Look up a place by name and return its bounding box, dimensions, " + "and approximate area. Useful as input to the 'bbox' command." + ), + ) + p_area.add_argument( + "place", nargs="+", + help="Place name to look up (e.g., 'Manhattan' or 'downtown Seattle').", + ) + + return parser + + +def main(): + parser = build_parser() + args = parser.parse_args() + + dispatch = { + "search": cmd_search, + "reverse": cmd_reverse, + "nearby": cmd_nearby, + "distance": cmd_distance, + "directions": cmd_directions, + "timezone": cmd_timezone, + "bbox": cmd_bbox, + "area": cmd_area, + } + + handler = dispatch.get(args.command) + if handler is None: + error_exit(f"Unknown command: {args.command}") + + handler(args) + + +if __name__ == "__main__": + main() diff --git a/optional-skills/productivity/maps/tests/test_maps_client.py b/optional-skills/productivity/maps/tests/test_maps_client.py new file mode 100644 index 0000000000..0400d51b7d --- /dev/null +++ b/optional-skills/productivity/maps/tests/test_maps_client.py @@ -0,0 +1,177 @@ +"""Unit tests for maps_client.py pure functions.""" + +import json +import math +import sys +from pathlib import Path +from unittest.mock import patch, MagicMock + +import pytest + +# Add the scripts directory to the path so we can import maps_client +SCRIPTS_DIR = str(Path(__file__).resolve().parent.parent / "scripts") +sys.path.insert(0, SCRIPTS_DIR) + +import maps_client as mc + + +# ── Haversine ──────────────────────────────────────────────────────────── + + +class TestHaversine: + def test_same_point_is_zero(self): + assert mc.haversine_m(48.8584, 2.2945, 48.8584, 2.2945) == 0.0 + + def test_known_distance_paris_lyon(self): + # Paris to Lyon is ~393 km straight line + dist = mc.haversine_m(48.8566, 2.3522, 45.7640, 4.8357) + assert 390_000 < dist < 400_000 + + def test_antipodal_points(self): + # North pole to south pole ~20,000 km + dist = mc.haversine_m(90, 0, -90, 0) + assert 20_000_000 < dist < 20_100_000 + + def test_equator_quarter(self): + # 0,0 to 0,90 is ~10,000 km + dist = mc.haversine_m(0, 0, 0, 90) + assert 10_000_000 < dist < 10_100_000 + + def test_symmetry(self): + d1 = mc.haversine_m(40.7128, -74.0060, 51.5074, -0.1278) + d2 = mc.haversine_m(51.5074, -0.1278, 40.7128, -74.0060) + assert d1 == pytest.approx(d2) + + +# ── Overpass query builder ─────────────────────────────────────────────── + + +class TestBuildOverpassQuery: + def test_basic_query_structure(self): + q = mc.build_overpass_nearby("amenity", "restaurant", 48.85, 2.29, 500, 10) + assert "[out:json]" in q + assert '"amenity"="restaurant"' in q + assert "around:500,48.85,2.29" in q + assert "out center 10" in q + + def test_contains_node_and_way(self): + q = mc.build_overpass_nearby("tourism", "hotel", 40.0, -74.0, 1000, 5) + assert "node[" in q + assert "way[" in q + + def test_bbox_query_structure(self): + q = mc.build_overpass_bbox("amenity", "cafe", 40.75, -74.00, 40.77, -73.98, 20) + assert "[out:json]" in q + assert '"amenity"="cafe"' in q + assert "40.75,-74.0,40.77,-73.98" in q + + +# ── Category validation ────────────────────────────────────────────────── + + +class TestCategories: + def test_original_12_categories_exist(self): + original = [ + "restaurant", "cafe", "bar", "hospital", "pharmacy", "hotel", + "supermarket", "atm", "gas_station", "parking", "museum", "park", + ] + for cat in original: + assert cat in mc.CATEGORY_TAGS, f"Missing original category: {cat}" + + def test_new_categories_exist(self): + new_cats = [ + "school", "university", "bank", "police", "fire_station", + "library", "airport", "train_station", "bus_stop", "dentist", + "doctor", "cinema", "theatre", "gym", "post_office", + "convenience_store", "bakery", "nightclub", "zoo", "playground", + ] + for cat in new_cats: + assert cat in mc.CATEGORY_TAGS, f"Missing new category: {cat}" + + def test_all_categories_have_valid_tags(self): + for cat, tag in mc.CATEGORY_TAGS.items(): + assert isinstance(tag, tuple), f"{cat}: tag should be tuple" + assert len(tag) == 2, f"{cat}: tag should be (key, value)" + assert isinstance(tag[0], str) and isinstance(tag[1], str) + + def test_at_least_40_categories(self): + assert len(mc.CATEGORY_TAGS) >= 40 + + +# ── OSRM profiles ──────────────────────────────────────────────────────── + + +class TestOSRMProfiles: + def test_driving_walking_cycling(self): + assert "driving" in mc.OSRM_PROFILES + assert "walking" in mc.OSRM_PROFILES + assert "cycling" in mc.OSRM_PROFILES + + def test_profile_mappings(self): + assert mc.OSRM_PROFILES["driving"] == "driving" + assert mc.OSRM_PROFILES["walking"] == "foot" + assert mc.OSRM_PROFILES["cycling"] == "bike" + + +# ── Argparse ───────────────────────────────────────────────────────────── + + +class TestArgparse: + def test_distance_uses_to_flag(self): + """The distance command should use --to, not two positional nargs='+'.""" + parser = mc.build_parser() + args = parser.parse_args(["distance", "Paris", "--to", "Lyon"]) + assert args.command == "distance" + assert args.origin == ["Paris"] + assert args.to == ["Lyon"] + + def test_distance_multiword_origin(self): + parser = mc.build_parser() + args = parser.parse_args(["distance", "New", "York", "--to", "Boston"]) + assert args.origin == ["New", "York"] + assert args.to == ["Boston"] + + def test_directions_uses_to_flag(self): + parser = mc.build_parser() + args = parser.parse_args(["directions", "Big Ben", "--to", "Tower Bridge"]) + assert args.command == "directions" + + def test_search_accepts_query(self): + parser = mc.build_parser() + args = parser.parse_args(["search", "Eiffel", "Tower"]) + assert args.command == "search" + assert args.query == ["Eiffel", "Tower"] + + def test_nearby_accepts_category(self): + parser = mc.build_parser() + args = parser.parse_args(["nearby", "48.85", "2.29", "restaurant"]) + assert args.command == "nearby" + assert args.category == "restaurant" + + def test_bbox_accepts_coordinates(self): + parser = mc.build_parser() + args = parser.parse_args(["bbox", "40.75", "-74.00", "40.77", "-73.98", "cafe"]) + assert args.command == "bbox" + assert args.category == "cafe" + + def test_area_accepts_query(self): + parser = mc.build_parser() + args = parser.parse_args(["area", "Manhattan"]) + assert args.command == "area" + + +# ── Output helpers ─────────────────────────────────────────────────────── + + +class TestOutputHelpers: + def test_print_json_outputs_valid_json(self, capsys): + mc.print_json({"key": "value", "num": 42}) + captured = capsys.readouterr() + data = json.loads(captured.out) + assert data["key"] == "value" + assert data["num"] == 42 + + def test_error_exit_outputs_error_json(self): + with pytest.raises(SystemExit) as exc_info: + mc.error_exit("something went wrong") + assert exc_info.value.code == 1 From de491fdf0e4a35a91b447f8f077af4961a59b7b3 Mon Sep 17 00:00:00 2001 From: Teknium Date: Mon, 30 Mar 2026 00:10:04 -0700 Subject: [PATCH 006/547] chore: remove unit tests from maps skill MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Skills are self-contained scripts — they don't need test suites in the repo. --- .../maps/tests/test_maps_client.py | 177 ------------------ 1 file changed, 177 deletions(-) delete mode 100644 optional-skills/productivity/maps/tests/test_maps_client.py diff --git a/optional-skills/productivity/maps/tests/test_maps_client.py b/optional-skills/productivity/maps/tests/test_maps_client.py deleted file mode 100644 index 0400d51b7d..0000000000 --- a/optional-skills/productivity/maps/tests/test_maps_client.py +++ /dev/null @@ -1,177 +0,0 @@ -"""Unit tests for maps_client.py pure functions.""" - -import json -import math -import sys -from pathlib import Path -from unittest.mock import patch, MagicMock - -import pytest - -# Add the scripts directory to the path so we can import maps_client -SCRIPTS_DIR = str(Path(__file__).resolve().parent.parent / "scripts") -sys.path.insert(0, SCRIPTS_DIR) - -import maps_client as mc - - -# ── Haversine ──────────────────────────────────────────────────────────── - - -class TestHaversine: - def test_same_point_is_zero(self): - assert mc.haversine_m(48.8584, 2.2945, 48.8584, 2.2945) == 0.0 - - def test_known_distance_paris_lyon(self): - # Paris to Lyon is ~393 km straight line - dist = mc.haversine_m(48.8566, 2.3522, 45.7640, 4.8357) - assert 390_000 < dist < 400_000 - - def test_antipodal_points(self): - # North pole to south pole ~20,000 km - dist = mc.haversine_m(90, 0, -90, 0) - assert 20_000_000 < dist < 20_100_000 - - def test_equator_quarter(self): - # 0,0 to 0,90 is ~10,000 km - dist = mc.haversine_m(0, 0, 0, 90) - assert 10_000_000 < dist < 10_100_000 - - def test_symmetry(self): - d1 = mc.haversine_m(40.7128, -74.0060, 51.5074, -0.1278) - d2 = mc.haversine_m(51.5074, -0.1278, 40.7128, -74.0060) - assert d1 == pytest.approx(d2) - - -# ── Overpass query builder ─────────────────────────────────────────────── - - -class TestBuildOverpassQuery: - def test_basic_query_structure(self): - q = mc.build_overpass_nearby("amenity", "restaurant", 48.85, 2.29, 500, 10) - assert "[out:json]" in q - assert '"amenity"="restaurant"' in q - assert "around:500,48.85,2.29" in q - assert "out center 10" in q - - def test_contains_node_and_way(self): - q = mc.build_overpass_nearby("tourism", "hotel", 40.0, -74.0, 1000, 5) - assert "node[" in q - assert "way[" in q - - def test_bbox_query_structure(self): - q = mc.build_overpass_bbox("amenity", "cafe", 40.75, -74.00, 40.77, -73.98, 20) - assert "[out:json]" in q - assert '"amenity"="cafe"' in q - assert "40.75,-74.0,40.77,-73.98" in q - - -# ── Category validation ────────────────────────────────────────────────── - - -class TestCategories: - def test_original_12_categories_exist(self): - original = [ - "restaurant", "cafe", "bar", "hospital", "pharmacy", "hotel", - "supermarket", "atm", "gas_station", "parking", "museum", "park", - ] - for cat in original: - assert cat in mc.CATEGORY_TAGS, f"Missing original category: {cat}" - - def test_new_categories_exist(self): - new_cats = [ - "school", "university", "bank", "police", "fire_station", - "library", "airport", "train_station", "bus_stop", "dentist", - "doctor", "cinema", "theatre", "gym", "post_office", - "convenience_store", "bakery", "nightclub", "zoo", "playground", - ] - for cat in new_cats: - assert cat in mc.CATEGORY_TAGS, f"Missing new category: {cat}" - - def test_all_categories_have_valid_tags(self): - for cat, tag in mc.CATEGORY_TAGS.items(): - assert isinstance(tag, tuple), f"{cat}: tag should be tuple" - assert len(tag) == 2, f"{cat}: tag should be (key, value)" - assert isinstance(tag[0], str) and isinstance(tag[1], str) - - def test_at_least_40_categories(self): - assert len(mc.CATEGORY_TAGS) >= 40 - - -# ── OSRM profiles ──────────────────────────────────────────────────────── - - -class TestOSRMProfiles: - def test_driving_walking_cycling(self): - assert "driving" in mc.OSRM_PROFILES - assert "walking" in mc.OSRM_PROFILES - assert "cycling" in mc.OSRM_PROFILES - - def test_profile_mappings(self): - assert mc.OSRM_PROFILES["driving"] == "driving" - assert mc.OSRM_PROFILES["walking"] == "foot" - assert mc.OSRM_PROFILES["cycling"] == "bike" - - -# ── Argparse ───────────────────────────────────────────────────────────── - - -class TestArgparse: - def test_distance_uses_to_flag(self): - """The distance command should use --to, not two positional nargs='+'.""" - parser = mc.build_parser() - args = parser.parse_args(["distance", "Paris", "--to", "Lyon"]) - assert args.command == "distance" - assert args.origin == ["Paris"] - assert args.to == ["Lyon"] - - def test_distance_multiword_origin(self): - parser = mc.build_parser() - args = parser.parse_args(["distance", "New", "York", "--to", "Boston"]) - assert args.origin == ["New", "York"] - assert args.to == ["Boston"] - - def test_directions_uses_to_flag(self): - parser = mc.build_parser() - args = parser.parse_args(["directions", "Big Ben", "--to", "Tower Bridge"]) - assert args.command == "directions" - - def test_search_accepts_query(self): - parser = mc.build_parser() - args = parser.parse_args(["search", "Eiffel", "Tower"]) - assert args.command == "search" - assert args.query == ["Eiffel", "Tower"] - - def test_nearby_accepts_category(self): - parser = mc.build_parser() - args = parser.parse_args(["nearby", "48.85", "2.29", "restaurant"]) - assert args.command == "nearby" - assert args.category == "restaurant" - - def test_bbox_accepts_coordinates(self): - parser = mc.build_parser() - args = parser.parse_args(["bbox", "40.75", "-74.00", "40.77", "-73.98", "cafe"]) - assert args.command == "bbox" - assert args.category == "cafe" - - def test_area_accepts_query(self): - parser = mc.build_parser() - args = parser.parse_args(["area", "Manhattan"]) - assert args.command == "area" - - -# ── Output helpers ─────────────────────────────────────────────────────── - - -class TestOutputHelpers: - def test_print_json_outputs_valid_json(self, capsys): - mc.print_json({"key": "value", "num": 42}) - captured = capsys.readouterr() - data = json.loads(captured.out) - assert data["key"] == "value" - assert data["num"] == 42 - - def test_error_exit_outputs_error_json(self): - with pytest.raises(SystemExit) as exc_info: - mc.error_exit("something went wrong") - assert exc_info.value.code == 1 From ea0bd81b84e460368c35432472ef6e8cbdf6c541 Mon Sep 17 00:00:00 2001 From: Teknium Date: Sun, 19 Apr 2026 05:17:39 -0700 Subject: [PATCH 007/547] feat(skills): consolidate find-nearby into maps as a single location skill MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit find-nearby and the (new) maps optional skill both used OpenStreetMap's Overpass + Nominatim to answer the same question — 'what's near this location?' — so shipping both would be duplicate code for overlapping capability. Consolidate into one active-by-default skill at skills/productivity/maps/ that is a strict superset of find-nearby. Moves + deletions: - optional-skills/productivity/maps/ → skills/productivity/maps/ (active, no install step needed) - skills/leisure/find-nearby/ → DELETED (fully superseded) Upgrades to maps_client.py so it covers everything find-nearby did: - Overpass server failover — tries overpass-api.de then overpass.kumi.systems so a single-mirror outage doesn't break the skill (new overpass_query helper, used by both nearby and bbox) - nearby now accepts --near "
" as a shortcut that auto-geocodes, so one command replaces the old 'search → copy coords → nearby' chain - nearby now accepts --category (repeatable) for multi-type queries in one call (e.g. --category restaurant --category bar), results merged and deduped by (osm_type, osm_id), sorted by distance, capped at --limit - Each nearby result now includes maps_url (clickable Google Maps search link) and directions_url (Google Maps directions from the search point — only when a ref point is known) - Promoted commonly-useful OSM tags to top-level fields on each result: cuisine, hours (opening_hours), phone, website — instead of forcing callers to dig into the raw tags dict SKILL.md: - Version bumped 1.1.0 → 1.2.0, description rewritten to lead with capability surface - New 'Working With Telegram Location Pins' section replacing find-nearby's equivalent workflow - metadata.hermes.supersedes: [find-nearby] so tooling can flag any lingering references to the old skill External references updated: - optional-skills/productivity/telephony/SKILL.md — related_skills find-nearby → maps - website/docs/reference/skills-catalog.md — removed the (now-empty) 'leisure' section, added 'maps' row under productivity - website/docs/user-guide/features/cron.md — find-nearby example usages swapped to maps - tests/tools/test_cronjob_tools.py, tests/hermes_cli/test_cron.py, tests/cron/test_scheduler.py — fixture string values swapped - cli.py:5290 — /cron help-hint example swapped Not touched: - RELEASE_v0.2.0.md — historical record, left intact E2E-verified live (Nominatim + Overpass, one query each): - nearby --near "Times Square" --category restaurant --category bar → 3 results, sorted by distance, all with maps_url, directions_url, cuisine, phone, website where OSM had the tags All 111 targeted tests pass across tests/cron/, tests/tools/, tests/hermes_cli/. --- cli.py | 2 +- .../productivity/telephony/SKILL.md | 2 +- skills/leisure/find-nearby/SKILL.md | 69 ------- .../find-nearby/scripts/find_nearby.py | 184 ------------------ .../productivity/maps/SKILL.md | 81 ++++++-- .../productivity/maps/scripts/maps_client.py | 168 +++++++++++++--- tests/cron/test_scheduler.py | 8 +- tests/hermes_cli/test_cron.py | 8 +- tests/tools/test_cronjob_tools.py | 10 +- website/docs/reference/skills-catalog.md | 9 +- website/docs/user-guide/features/cron.md | 12 +- 11 files changed, 222 insertions(+), 331 deletions(-) delete mode 100644 skills/leisure/find-nearby/SKILL.md delete mode 100644 skills/leisure/find-nearby/scripts/find_nearby.py rename {optional-skills => skills}/productivity/maps/SKILL.md (53%) rename {optional-skills => skills}/productivity/maps/scripts/maps_client.py (86%) diff --git a/cli.py b/cli.py index e814e35b12..0e5e9ff660 100644 --- a/cli.py +++ b/cli.py @@ -5287,7 +5287,7 @@ class HermesCLI: print(" /cron list") print(' /cron add "every 2h" "Check server status" [--skill blogwatcher]') print(' /cron edit --schedule "every 4h" --prompt "New task"') - print(" /cron edit --skill blogwatcher --skill find-nearby") + print(" /cron edit --skill blogwatcher --skill maps") print(" /cron edit --remove-skill blogwatcher") print(" /cron edit --clear-skills") print(" /cron pause ") diff --git a/optional-skills/productivity/telephony/SKILL.md b/optional-skills/productivity/telephony/SKILL.md index c74a369209..6c457592a9 100644 --- a/optional-skills/productivity/telephony/SKILL.md +++ b/optional-skills/productivity/telephony/SKILL.md @@ -7,7 +7,7 @@ license: MIT metadata: hermes: tags: [telephony, phone, sms, mms, voice, twilio, bland.ai, vapi, calling, texting] - related_skills: [find-nearby, google-workspace, agentmail] + related_skills: [maps, google-workspace, agentmail] category: productivity --- diff --git a/skills/leisure/find-nearby/SKILL.md b/skills/leisure/find-nearby/SKILL.md deleted file mode 100644 index f0ecdbf531..0000000000 --- a/skills/leisure/find-nearby/SKILL.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -name: find-nearby -description: Find nearby places (restaurants, cafes, bars, pharmacies, etc.) using OpenStreetMap. Works with coordinates, addresses, cities, zip codes, or Telegram location pins. No API keys needed. -version: 1.0.0 -metadata: - hermes: - tags: [location, maps, nearby, places, restaurants, local] - related_skills: [] ---- - -# Find Nearby — Local Place Discovery - -Find restaurants, cafes, bars, pharmacies, and other places near any location. Uses OpenStreetMap (free, no API keys). Works with: - -- **Coordinates** from Telegram location pins (latitude/longitude in conversation) -- **Addresses** ("near 123 Main St, Springfield") -- **Cities** ("restaurants in downtown Austin") -- **Zip codes** ("pharmacies near 90210") -- **Landmarks** ("cafes near Times Square") - -## Quick Reference - -```bash -# By coordinates (from Telegram location pin or user-provided) -python3 SKILL_DIR/scripts/find_nearby.py --lat --lon --type restaurant --radius 1500 - -# By address, city, or landmark (auto-geocoded) -python3 SKILL_DIR/scripts/find_nearby.py --near "Times Square, New York" --type cafe - -# Multiple place types -python3 SKILL_DIR/scripts/find_nearby.py --near "downtown austin" --type restaurant --type bar --limit 10 - -# JSON output -python3 SKILL_DIR/scripts/find_nearby.py --near "90210" --type pharmacy --json -``` - -### Parameters - -| Flag | Description | Default | -|------|-------------|---------| -| `--lat`, `--lon` | Exact coordinates | — | -| `--near` | Address, city, zip, or landmark (geocoded) | — | -| `--type` | Place type (repeatable for multiple) | restaurant | -| `--radius` | Search radius in meters | 1500 | -| `--limit` | Max results | 15 | -| `--json` | Machine-readable JSON output | off | - -### Common Place Types - -`restaurant`, `cafe`, `bar`, `pub`, `fast_food`, `pharmacy`, `hospital`, `bank`, `atm`, `fuel`, `parking`, `supermarket`, `convenience`, `hotel` - -## Workflow - -1. **Get the location.** Look for coordinates (`latitude: ... / longitude: ...`) from a Telegram pin, or ask the user for an address/city/zip. - -2. **Ask for preferences** (only if not already stated): place type, how far they're willing to go, any specifics (cuisine, "open now", etc.). - -3. **Run the script** with appropriate flags. Use `--json` if you need to process results programmatically. - -4. **Present results** with names, distances, and Google Maps links. If the user asked about hours or "open now," check the `hours` field in results — if missing or unclear, verify with `web_search`. - -5. **For directions**, use the `directions_url` from results, or construct: `https://www.google.com/maps/dir/?api=1&origin=,&destination=,` - -## Tips - -- If results are sparse, widen the radius (1500 → 3000m) -- For "open now" requests: check the `hours` field in results, cross-reference with `web_search` for accuracy since OSM hours aren't always complete -- Zip codes alone can be ambiguous globally — prompt the user for country/state if results look wrong -- The script uses OpenStreetMap data which is community-maintained; coverage varies by region diff --git a/skills/leisure/find-nearby/scripts/find_nearby.py b/skills/leisure/find-nearby/scripts/find_nearby.py deleted file mode 100644 index 9d7fed78f4..0000000000 --- a/skills/leisure/find-nearby/scripts/find_nearby.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/env python3 -"""Find nearby places using OpenStreetMap (Overpass + Nominatim). No API keys needed. - -Usage: - # By coordinates - python find_nearby.py --lat 36.17 --lon -115.14 --type restaurant --radius 1500 - - # By address/city/zip (auto-geocoded) - python find_nearby.py --near "Times Square, New York" --type cafe --radius 1000 - python find_nearby.py --near "90210" --type pharmacy - - # Multiple types - python find_nearby.py --lat 36.17 --lon -115.14 --type restaurant --type bar - - # JSON output for programmatic use - python find_nearby.py --near "downtown las vegas" --type restaurant --json -""" - -import argparse -import json -import math -import sys -import urllib.parse -import urllib.request -from typing import Any - -OVERPASS_URLS = [ - "https://overpass-api.de/api/interpreter", - "https://overpass.kumi.systems/api/interpreter", -] -NOMINATIM_URL = "https://nominatim.openstreetmap.org/search" -USER_AGENT = "HermesAgent/1.0 (find-nearby skill)" -TIMEOUT = 15 - - -def _http_get(url: str) -> Any: - req = urllib.request.Request(url, headers={"User-Agent": USER_AGENT}) - with urllib.request.urlopen(req, timeout=TIMEOUT) as r: - return json.loads(r.read()) - - -def _http_post(url: str, data: str) -> Any: - req = urllib.request.Request( - url, data=data.encode(), headers={"User-Agent": USER_AGENT} - ) - with urllib.request.urlopen(req, timeout=TIMEOUT) as r: - return json.loads(r.read()) - - -def haversine(lat1: float, lon1: float, lat2: float, lon2: float) -> float: - """Distance in meters between two coordinates.""" - R = 6_371_000 - rlat1, rlat2 = math.radians(lat1), math.radians(lat2) - dlat = math.radians(lat2 - lat1) - dlon = math.radians(lon2 - lon1) - a = math.sin(dlat / 2) ** 2 + math.cos(rlat1) * math.cos(rlat2) * math.sin(dlon / 2) ** 2 - return R * 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) - - -def geocode(query: str) -> tuple[float, float]: - """Convert address/city/zip to coordinates via Nominatim.""" - params = urllib.parse.urlencode({"q": query, "format": "json", "limit": 1}) - results = _http_get(f"{NOMINATIM_URL}?{params}") - if not results: - print(f"Error: Could not geocode '{query}'. Try a more specific address.", file=sys.stderr) - sys.exit(1) - return float(results[0]["lat"]), float(results[0]["lon"]) - - -def find_nearby(lat: float, lon: float, types: list[str], radius: int = 1500, limit: int = 15) -> list[dict]: - """Query Overpass for nearby amenities.""" - # Build Overpass QL query - type_filters = "".join( - f'nwr["amenity"="{t}"](around:{radius},{lat},{lon});' for t in types - ) - query = f"[out:json][timeout:{TIMEOUT}];({type_filters});out center tags;" - - # Try each Overpass server - data = None - for url in OVERPASS_URLS: - try: - data = _http_post(url, f"data={urllib.parse.quote(query)}") - break - except Exception: - continue - - if not data: - return [] - - # Parse results - places = [] - for el in data.get("elements", []): - tags = el.get("tags", {}) - name = tags.get("name") - if not name: - continue - - # Get coordinates (nodes have lat/lon directly, ways/relations use center) - plat = el.get("lat") or (el.get("center", {}) or {}).get("lat") - plon = el.get("lon") or (el.get("center", {}) or {}).get("lon") - if plat is None or plon is None: - continue - - dist = haversine(lat, lon, plat, plon) - - place = { - "name": name, - "type": tags.get("amenity", ""), - "distance_m": round(dist), - "lat": plat, - "lon": plon, - "maps_url": f"https://www.google.com/maps/search/?api=1&query={plat},{plon}", - "directions_url": f"https://www.google.com/maps/dir/?api=1&origin={lat},{lon}&destination={plat},{plon}", - } - - # Add useful optional fields - if tags.get("cuisine"): - place["cuisine"] = tags["cuisine"] - if tags.get("opening_hours"): - place["hours"] = tags["opening_hours"] - if tags.get("phone"): - place["phone"] = tags["phone"] - if tags.get("website"): - place["website"] = tags["website"] - if tags.get("addr:street"): - addr_parts = [tags.get("addr:housenumber", ""), tags.get("addr:street", "")] - if tags.get("addr:city"): - addr_parts.append(tags["addr:city"]) - place["address"] = " ".join(p for p in addr_parts if p) - - places.append(place) - - # Sort by distance, limit results - places.sort(key=lambda p: p["distance_m"]) - return places[:limit] - - -def main(): - parser = argparse.ArgumentParser(description="Find nearby places via OpenStreetMap") - parser.add_argument("--lat", type=float, help="Latitude") - parser.add_argument("--lon", type=float, help="Longitude") - parser.add_argument("--near", type=str, help="Address, city, or zip code (geocoded automatically)") - parser.add_argument("--type", action="append", dest="types", default=[], help="Place type (restaurant, cafe, bar, pharmacy, etc.)") - parser.add_argument("--radius", type=int, default=1500, help="Search radius in meters (default: 1500)") - parser.add_argument("--limit", type=int, default=15, help="Max results (default: 15)") - parser.add_argument("--json", action="store_true", dest="json_output", help="Output as JSON") - args = parser.parse_args() - - # Resolve coordinates - if args.near: - lat, lon = geocode(args.near) - elif args.lat is not None and args.lon is not None: - lat, lon = args.lat, args.lon - else: - print("Error: Provide --lat/--lon or --near", file=sys.stderr) - sys.exit(1) - - if not args.types: - args.types = ["restaurant"] - - places = find_nearby(lat, lon, args.types, args.radius, args.limit) - - if args.json_output: - print(json.dumps({"origin": {"lat": lat, "lon": lon}, "results": places, "count": len(places)}, indent=2)) - else: - if not places: - print(f"No {'/'.join(args.types)} found within {args.radius}m") - return - print(f"Found {len(places)} places within {args.radius}m:\n") - for i, p in enumerate(places, 1): - dist_str = f"{p['distance_m']}m" if p["distance_m"] < 1000 else f"{p['distance_m']/1000:.1f}km" - print(f" {i}. {p['name']} ({p['type']}) — {dist_str}") - if p.get("cuisine"): - print(f" Cuisine: {p['cuisine']}") - if p.get("hours"): - print(f" Hours: {p['hours']}") - if p.get("address"): - print(f" Address: {p['address']}") - print(f" Map: {p['maps_url']}") - print() - - -if __name__ == "__main__": - main() diff --git a/optional-skills/productivity/maps/SKILL.md b/skills/productivity/maps/SKILL.md similarity index 53% rename from optional-skills/productivity/maps/SKILL.md rename to skills/productivity/maps/SKILL.md index 59e0359d56..9eded20866 100644 --- a/optional-skills/productivity/maps/SKILL.md +++ b/skills/productivity/maps/SKILL.md @@ -1,17 +1,20 @@ --- name: maps description: > - Geocoding, reverse geocoding, nearby POI search (44 categories), - distance/routing, turn-by-turn directions, timezone lookup, bounding box - search, and area info. Uses OpenStreetMap + Overpass + OSRM. Free, no API key. -version: 1.1.0 + Location intelligence — geocode a place, reverse-geocode coordinates, + find nearby places (44 POI categories), driving/walking/cycling + distance + time, turn-by-turn directions, timezone lookup, bounding + box + area for a named place, and POI search within a rectangle. + Uses OpenStreetMap + Overpass + OSRM. Free, no API key. +version: 1.2.0 author: Mibayy license: MIT metadata: hermes: - tags: [maps, geocoding, places, routing, distance, directions, openstreetmap, nominatim, overpass, osrm] + tags: [maps, geocoding, places, routing, distance, directions, nearby, location, openstreetmap, nominatim, overpass, osrm] category: productivity requires_toolsets: [terminal] + supersedes: [find-nearby] --- # Maps Skill @@ -21,21 +24,26 @@ categories, zero dependencies (Python stdlib only), no API key required. Data sources: OpenStreetMap/Nominatim, Overpass API, OSRM, TimeAPI.io. +This skill supersedes the old `find-nearby` skill — all of find-nearby's +functionality is covered by the `nearby` command below, with the same +`--near ""` shortcut and multi-category support. + ## When to Use -- User wants coordinates for a place name -- User has coordinates and wants the address -- User asks for nearby restaurants, hospitals, pharmacies, hotels, etc. -- User wants driving/walking/cycling distance or travel time -- User wants turn-by-turn directions between two places -- User wants timezone information for a location -- User wants to search for POIs within a geographic area +- User sends a Telegram location pin (latitude/longitude in the message) → `nearby` +- User wants coordinates for a place name → `search` +- User has coordinates and wants the address → `reverse` +- User asks for nearby restaurants, hospitals, pharmacies, hotels, etc. → `nearby` +- User wants driving/walking/cycling distance or travel time → `distance` +- User wants turn-by-turn directions between two places → `directions` +- User wants timezone information for a location → `timezone` +- User wants to search for POIs within a geographic area → `area` + `bbox` ## Prerequisites Python 3.8+ (stdlib only — no pip installs needed). -Script path after install: `~/.hermes/skills/maps/scripts/maps_client.py` +Script path: `~/.hermes/skills/maps/scripts/maps_client.py` ## Commands @@ -63,9 +71,16 @@ Returns: full address breakdown (street, city, state, country, postcode). ### nearby — Find places by category ```bash +# By coordinates (from a Telegram location pin, for example) python3 $MAPS nearby 48.8584 2.2945 restaurant --limit 10 python3 $MAPS nearby 40.7128 -74.0060 hospital --radius 2000 -python3 $MAPS nearby 51.5074 -0.1278 cafe --limit 5 --radius 300 + +# By address / city / zip / landmark — --near auto-geocodes +python3 $MAPS nearby --near "Times Square, New York" --category cafe +python3 $MAPS nearby --near "90210" --category pharmacy + +# Multiple categories merged into one query +python3 $MAPS nearby --near "downtown austin" --category restaurant --category bar --limit 10 ``` 44 categories: restaurant, cafe, bar, hospital, pharmacy, hotel, supermarket, @@ -75,6 +90,11 @@ synagogue, dentist, doctor, cinema, theatre, gym, swimming_pool, post_office, convenience_store, bakery, bookshop, laundry, car_wash, car_rental, bicycle_rental, taxi, veterinary, zoo, playground, stadium, nightclub. +Each result includes: `name`, `address`, `lat`/`lon`, `distance_m`, +`maps_url` (clickable Google Maps link), `directions_url` (Google Maps +directions from the search point), and promoted tags when available — +`cuisine`, `hours` (opening_hours), `phone`, `website`. + ### distance — Travel distance and time ```bash @@ -124,11 +144,31 @@ python3 $MAPS bbox 40.75 -74.00 40.77 -73.98 restaurant --limit 20 Finds POIs within a geographic rectangle. Use `area` first to get the bounding box coordinates for a named place. +## Working With Telegram Location Pins + +When a user sends a location pin, the message contains `latitude:` and +`longitude:` fields. Extract those and pass them straight to `nearby`: + +```bash +# User sent a pin at 36.17, -115.14 and asked "find cafes nearby" +python3 $MAPS nearby 36.17 -115.14 cafe --radius 1500 +``` + +Present results as a numbered list with names, distances, and the +`maps_url` field so the user gets a tap-to-open link in chat. For "open +now?" questions, check the `hours` field; if missing or unclear, verify +with `web_search` since OSM hours are community-maintained and not always +current. + ## Workflow Examples **"Find Italian restaurants near the Colosseum":** -1. `search "Colosseum Rome"` → get lat/lon -2. `nearby LAT LON restaurant --radius 500` +1. `nearby --near "Colosseum Rome" --category restaurant --radius 500` + — one command, auto-geocoded + +**"What's near this location pin they sent?":** +1. Extract lat/lon from the Telegram message +2. `nearby LAT LON cafe --radius 1500` **"How do I walk from hotel to conference center?":** 1. `directions "Hotel Name" --to "Conference Center" --mode walking` @@ -140,14 +180,19 @@ bounding box coordinates for a named place. ## Pitfalls - Nominatim ToS: max 1 req/s (handled automatically by the script) -- `nearby` requires lat/lon — use `search` first to get coordinates +- `nearby` requires lat/lon OR `--near "
"` — one of the two is needed - OSRM routing coverage is best for Europe and North America -- Overpass API can be slow during peak hours (script retries automatically) +- Overpass API can be slow during peak hours; the script automatically + falls back between mirrors (overpass-api.de → overpass.kumi.systems) - `distance` and `directions` use `--to` flag for the destination (not positional) +- If a zip code alone gives ambiguous results globally, include country/state ## Verification ```bash python3 ~/.hermes/skills/maps/scripts/maps_client.py search "Statue of Liberty" # Should return lat ~40.689, lon ~-74.044 + +python3 ~/.hermes/skills/maps/scripts/maps_client.py nearby --near "Times Square" --category restaurant --limit 3 +# Should return a list of restaurants within ~500m of Times Square ``` diff --git a/optional-skills/productivity/maps/scripts/maps_client.py b/skills/productivity/maps/scripts/maps_client.py similarity index 86% rename from optional-skills/productivity/maps/scripts/maps_client.py rename to skills/productivity/maps/scripts/maps_client.py index c271570f99..db0de82d6d 100644 --- a/optional-skills/productivity/maps/scripts/maps_client.py +++ b/skills/productivity/maps/scripts/maps_client.py @@ -34,7 +34,14 @@ DATA_SOURCE = "OpenStreetMap/Nominatim" NOMINATIM_SEARCH = "https://nominatim.openstreetmap.org/search" NOMINATIM_REVERSE = "https://nominatim.openstreetmap.org/reverse" -OVERPASS_API = "https://overpass-api.de/api/interpreter" +# Public Overpass endpoints. We try them in order so a single server +# outage doesn't break the skill — kumi.systems is a well-known mirror. +OVERPASS_URLS = [ + "https://overpass-api.de/api/interpreter", + "https://overpass.kumi.systems/api/interpreter", +] +# Backward-compat alias for any caller that imports OVERPASS_API directly. +OVERPASS_API = OVERPASS_URLS[0] OSRM_BASE = "https://router.project-osrm.org/route/v1" TIMEAPI_BASE = "https://timeapi.io/api/timezone/coordinate" @@ -246,6 +253,30 @@ def http_post(url, data_str, retries=MAX_RETRIES): error_exit(f"POST failed after {retries} attempts. Last error: {last_error}") +def overpass_query(query): + """POST an Overpass QL query, trying each URL in OVERPASS_URLS in turn. + + A single public Overpass mirror can be rate-limited or down; trying the + next mirror before giving up turns a flaky outage into a retry. Returns + parsed JSON. Falls through to error_exit if every mirror fails. + """ + post_data = "data=" + urllib.parse.quote(query) + last_error = None + for url in OVERPASS_URLS: + try: + return http_post(url, post_data, retries=1) + except SystemExit: + # error_exit inside http_post — keep trying the next mirror. + last_error = f"mirror {url} exhausted retries" + continue + except Exception as exc: + last_error = f"{url}: {exc}" + continue + error_exit( + f"All Overpass mirrors failed. Last error: {last_error or 'unknown'}" + ) + + # --------------------------------------------------------------------------- # Geo math # --------------------------------------------------------------------------- @@ -379,6 +410,9 @@ def parse_overpass_elements(elements, ref_lat=None, ref_lon=None): "lon": el_lon, "osm_type": el.get("type", ""), "osm_id": el.get("id", ""), + # Clickable Google Maps link so the agent can render a tap-to-open + # URL in chat without composing one downstream. + "maps_url": f"https://www.google.com/maps/search/?api=1&query={el_lat},{el_lon}", "tags": { k: v for k, v in tags.items() if k not in ("name", "name:en", @@ -386,9 +420,27 @@ def parse_overpass_elements(elements, ref_lat=None, ref_lon=None): }, } + # Promote commonly-useful tags to top-level fields so agents can + # reference them without digging into the raw ``tags`` dict. + for src_key, dst_key in ( + ("cuisine", "cuisine"), + ("opening_hours", "hours"), + ("phone", "phone"), + ("website", "website"), + ): + val = tags.get(src_key) + if val: + place[dst_key] = val + if ref_lat is not None and ref_lon is not None: dist_m = haversine_m(ref_lat, ref_lon, el_lat, el_lon) place["distance_m"] = round(dist_m, 1) + # With a reference point we can also hand back a directions URL. + place["directions_url"] = ( + f"https://www.google.com/maps/dir/?api=1" + f"&origin={ref_lat},{ref_lon}" + f"&destination={el_lat},{el_lon}" + ) places.append(place) @@ -499,47 +551,84 @@ def cmd_reverse(args): # --------------------------------------------------------------------------- def cmd_nearby(args): - """Find nearby POIs using the Overpass API.""" - try: - lat = float(args.lat) - lon = float(args.lon) - except ValueError: - error_exit("LAT and LON must be numeric values.") + """Find nearby POIs using the Overpass API. - category = args.category.lower() - if category not in CATEGORY_TAGS: + Accepts either explicit coordinates (``lat``/``lon``) or a free-form + address via ``--near`` (auto-geocoded through Nominatim). Supports + multiple categories in one call — results are merged, deduplicated + by ``osm_type+osm_id``, sorted by distance. + """ + # Resolve the center point. --near takes precedence if provided so the + # agent can ask "cafes near Times Square" in one command without having + # to geocode first. + if getattr(args, "near", None): + near_query = " ".join(args.near).strip() if isinstance(args.near, list) else str(args.near).strip() + if not near_query: + error_exit("--near must be a non-empty address or place name.") + lat, lon, _ = geocode_single(near_query) + else: + try: + lat = float(args.lat) + lon = float(args.lon) + except (TypeError, ValueError): + error_exit("Provide numeric LAT and LON, or use --near \"
\".") + + # Categories: support both legacy single positional ``category`` and the + # new repeatable ``--category`` flag. Users can ask for multiple place + # types in one query. + categories = [] + if getattr(args, "category_list", None): + categories.extend(args.category_list) + if getattr(args, "category", None): + categories.append(args.category) + # Deduplicate, preserve order, lower-case. + categories = list(dict.fromkeys(c.lower() for c in categories if c)) + if not categories: + error_exit("Provide at least one category (positional or --category).") + unknown = [c for c in categories if c not in CATEGORY_TAGS] + if unknown: error_exit( - f"Unknown category '{category}'. " + f"Unknown categor{'ies' if len(unknown) > 1 else 'y'} " + f"{', '.join(repr(c) for c in unknown)}. " f"Valid categories: {', '.join(VALID_CATEGORIES)}" ) radius = int(args.radius) limit = int(args.limit) - if radius <= 0: error_exit("Radius must be a positive integer (metres).") if limit <= 0: error_exit("Limit must be a positive integer.") - tag_key, tag_val = CATEGORY_TAGS[category] - religion = RELIGION_FILTER.get(category) - query = build_overpass_nearby(tag_key, tag_val, lat, lon, radius, limit, - religion=religion) + # Query each category against the Overpass fallback chain, merge results, + # dedupe by OSM identity so POIs tagged under multiple categories don't + # appear twice. + merged = {} + for category in categories: + tag_key, tag_val = CATEGORY_TAGS[category] + religion = RELIGION_FILTER.get(category) + query = build_overpass_nearby(tag_key, tag_val, lat, lon, radius, limit, + religion=religion) + raw = overpass_query(query) + elements = raw.get("elements", []) + for place in parse_overpass_elements(elements, ref_lat=lat, ref_lon=lon): + place["category"] = category + key = (place.get("osm_type", ""), place.get("osm_id", "")) + # Prefer the entry that actually has a distance_m attached (first + # pass through the ref_lat/ref_lon branch), then first-seen wins. + if key not in merged: + merged[key] = place - post_data = "data=" + urllib.parse.quote(query) - raw = http_post(OVERPASS_API, post_data) - - elements = raw.get("elements", []) - places = parse_overpass_elements(elements, ref_lat=lat, ref_lon=lon) - - # Add category to each result - for p in places: - p["category"] = category + # Sort merged by distance when we have ref lat/lon, then cap at ``limit``. + places = sorted( + merged.values(), + key=lambda p: p.get("distance_m", float("inf")), + )[:limit] print_json({ "center_lat": lat, "center_lon": lon, - "category": category, + "categories": categories, "radius_m": radius, "count": len(places), "results": places, @@ -861,8 +950,7 @@ def cmd_bbox(args): query = build_overpass_bbox(tag_key, tag_val, south, west, north, east, limit, religion=religion) - post_data = "data=" + urllib.parse.quote(query) - raw = http_post(OVERPASS_API, post_data) + raw = overpass_query(query) elements = raw.get("elements", []) @@ -998,15 +1086,33 @@ def build_parser(): help="Find nearby places of a given category.", description=( "Find points of interest near a location using the Overpass API.\n" + "Provide either LAT/LON, or use --near \"
\" to auto-geocode.\n" + "Categories can be specified positionally OR repeated via --category\n" + "to merge multiple types in one query (e.g. --category bar --category cafe).\n" f"Categories: {', '.join(VALID_CATEGORIES)}" ), formatter_class=argparse.RawDescriptionHelpFormatter, ) - p_nearby.add_argument("lat", help="Center latitude (decimal degrees).") - p_nearby.add_argument("lon", help="Center longitude (decimal degrees).") p_nearby.add_argument( - "category", - help="POI category (use --help to see full list).", + "lat", nargs="?", default=None, + help="Center latitude (decimal degrees). Omit if using --near.", + ) + p_nearby.add_argument( + "lon", nargs="?", default=None, + help="Center longitude (decimal degrees). Omit if using --near.", + ) + p_nearby.add_argument( + "category", nargs="?", default=None, + help="POI category (use --help for full list). Omit if using --category flags.", + ) + p_nearby.add_argument( + "--near", nargs="+", metavar="PLACE", + help="Address, city, or landmark to search around (geocoded via Nominatim).", + ) + p_nearby.add_argument( + "--category", action="append", dest="category_list", default=[], + metavar="CAT", + help="POI category (repeatable — adds a type to the search).", ) p_nearby.add_argument( "--radius", "-r", diff --git a/tests/cron/test_scheduler.py b/tests/cron/test_scheduler.py index b889ede372..c083a4a80e 100644 --- a/tests/cron/test_scheduler.py +++ b/tests/cron/test_scheduler.py @@ -1024,7 +1024,7 @@ class TestRunJobSkillBacked: "id": "multi-skill-job", "name": "multi skill test", "prompt": "Combine the results.", - "skills": ["blogwatcher", "find-nearby"], + "skills": ["blogwatcher", "maps"], } fake_db = MagicMock() @@ -1057,12 +1057,12 @@ class TestRunJobSkillBacked: assert error is None assert final_response == "ok" assert skill_view_mock.call_count == 2 - assert [call.args[0] for call in skill_view_mock.call_args_list] == ["blogwatcher", "find-nearby"] + assert [call.args[0] for call in skill_view_mock.call_args_list] == ["blogwatcher", "maps"] prompt_arg = mock_agent.run_conversation.call_args.args[0] - assert prompt_arg.index("blogwatcher") < prompt_arg.index("find-nearby") + assert prompt_arg.index("blogwatcher") < prompt_arg.index("maps") assert "Instructions for blogwatcher." in prompt_arg - assert "Instructions for find-nearby." in prompt_arg + assert "Instructions for maps." in prompt_arg assert "Combine the results." in prompt_arg diff --git a/tests/hermes_cli/test_cron.py b/tests/hermes_cli/test_cron.py index 9ae9204827..8593195a1b 100644 --- a/tests/hermes_cli/test_cron.py +++ b/tests/hermes_cli/test_cron.py @@ -54,12 +54,12 @@ class TestCronCommandLifecycle: deliver=None, repeat=None, skill=None, - skills=["find-nearby", "blogwatcher"], + skills=["maps", "blogwatcher"], clear_skills=False, ) ) updated = get_job(job["id"]) - assert updated["skills"] == ["find-nearby", "blogwatcher"] + assert updated["skills"] == ["maps", "blogwatcher"] assert updated["name"] == "Edited Job" assert updated["prompt"] == "Revised prompt" assert updated["schedule_display"] == "every 120m" @@ -95,7 +95,7 @@ class TestCronCommandLifecycle: deliver=None, repeat=None, skill=None, - skills=["blogwatcher", "find-nearby"], + skills=["blogwatcher", "maps"], ) ) out = capsys.readouterr().out @@ -103,5 +103,5 @@ class TestCronCommandLifecycle: jobs = list_jobs() assert len(jobs) == 1 - assert jobs[0]["skills"] == ["blogwatcher", "find-nearby"] + assert jobs[0]["skills"] == ["blogwatcher", "maps"] assert jobs[0]["name"] == "Skill combo" diff --git a/tests/tools/test_cronjob_tools.py b/tests/tools/test_cronjob_tools.py index dd6b0101b1..38fc12cc8c 100644 --- a/tests/tools/test_cronjob_tools.py +++ b/tests/tools/test_cronjob_tools.py @@ -192,23 +192,23 @@ class TestUnifiedCronjobTool: result = json.loads( cronjob( action="create", - skills=["blogwatcher", "find-nearby"], + skills=["blogwatcher", "maps"], prompt="Use both skills and combine the result.", schedule="every 1h", name="Combo job", ) ) assert result["success"] is True - assert result["skills"] == ["blogwatcher", "find-nearby"] + assert result["skills"] == ["blogwatcher", "maps"] listing = json.loads(cronjob(action="list")) - assert listing["jobs"][0]["skills"] == ["blogwatcher", "find-nearby"] + assert listing["jobs"][0]["skills"] == ["blogwatcher", "maps"] def test_multi_skill_default_name_prefers_prompt_when_present(self): result = json.loads( cronjob( action="create", - skills=["blogwatcher", "find-nearby"], + skills=["blogwatcher", "maps"], prompt="Use both skills and combine the result.", schedule="every 1h", ) @@ -220,7 +220,7 @@ class TestUnifiedCronjobTool: created = json.loads( cronjob( action="create", - skills=["blogwatcher", "find-nearby"], + skills=["blogwatcher", "maps"], prompt="Use both skills and combine the result.", schedule="every 1h", ) diff --git a/website/docs/reference/skills-catalog.md b/website/docs/reference/skills-catalog.md index ffe489d360..46c29929f9 100644 --- a/website/docs/reference/skills-catalog.md +++ b/website/docs/reference/skills-catalog.md @@ -100,14 +100,6 @@ GitHub workflow skills for managing repositories, pull requests, code reviews, i | `github-pr-workflow` | Full pull request lifecycle — create branches, commit changes, open PRs, monitor CI status, auto-fix failures, and merge. Works with gh CLI or falls back to git + GitHub REST API via curl. | `github/github-pr-workflow` | | `github-repo-management` | Clone, create, fork, configure, and manage GitHub repositories. Manage remotes, secrets, releases, and workflows. Works with gh CLI or falls back to git + GitHub REST API via curl. | `github/github-repo-management` | -## leisure - -Skills for discovery and everyday tasks. - -| Skill | Description | Path | -|-------|-------------|------| -| `find-nearby` | Find nearby places (restaurants, cafes, bars, pharmacies, etc.) using OpenStreetMap. Works with coordinates, addresses, cities, zip codes, or Telegram location pins. No API keys needed. | `leisure/find-nearby` | - ## mcp Skills for working with MCP (Model Context Protocol) servers, tools, and integrations. @@ -198,6 +190,7 @@ Skills for document creation, presentations, spreadsheets, and other productivit |-------|-------------|------| | `google-workspace` | Gmail, Calendar, Drive, Contacts, Sheets, and Docs integration for Hermes. Uses Hermes-managed OAuth2 setup, prefers the Google Workspace CLI (`gws`) when available for broader API coverage, and falls back to the Python client libraries otherwise. | `productivity/google-workspace` | | `linear` | Manage Linear issues, projects, and teams via the GraphQL API. Create, update, search, and organize issues. Uses API key auth (no OAuth needed). All operations via curl — no dependencies. | `productivity/linear` | +| `maps` | Location intelligence — geocode, reverse-geocode, nearby POI search (44 categories, coordinates or address via `--near`), driving/walking/cycling distance + time, turn-by-turn directions, timezone, bounding box + area, POI search in a rectangle. Uses OpenStreetMap + Overpass + OSRM. No API key needed. Telegram location-pin friendly. | `productivity/maps` | | `nano-pdf` | Edit PDFs with natural-language instructions using the nano-pdf CLI. Modify text, fix typos, update titles, and make content changes to specific pages without manual editing. | `productivity/nano-pdf` | | `notion` | Notion API for creating and managing pages, databases, and blocks via curl. Search, create, update, and query Notion workspaces directly from the terminal. | `productivity/notion` | | `ocr-and-documents` | Extract text from PDFs and scanned documents. Use web_extract for remote URLs, pymupdf for local text-based PDFs, marker-pdf for OCR/scanned docs. For DOCX use python-docx, for PPTX see the powerpoint skill. | `productivity/ocr-and-documents` | diff --git a/website/docs/user-guide/features/cron.md b/website/docs/user-guide/features/cron.md index 222c00827c..4628fcc639 100644 --- a/website/docs/user-guide/features/cron.md +++ b/website/docs/user-guide/features/cron.md @@ -30,7 +30,7 @@ Cron-run sessions cannot recursively create more cron jobs. Hermes disables cron /cron add 30m "Remind me to check the build" /cron add "every 2h" "Check server status" /cron add "every 1h" "Summarize new feed items" --skill blogwatcher -/cron add "every 1h" "Use both skills and combine the result" --skill blogwatcher --skill find-nearby +/cron add "every 1h" "Use both skills and combine the result" --skill blogwatcher --skill maps ``` ### From the standalone CLI @@ -40,7 +40,7 @@ hermes cron create "every 2h" "Check server status" hermes cron create "every 1h" "Summarize new feed items" --skill blogwatcher hermes cron create "every 1h" "Use both skills and combine the result" \ --skill blogwatcher \ - --skill find-nearby \ + --skill maps \ --name "Skill combo" ``` @@ -77,7 +77,7 @@ Skills are loaded in order. The prompt becomes the task instruction layered on t ```python cronjob( action="create", - skills=["blogwatcher", "find-nearby"], + skills=["blogwatcher", "maps"], prompt="Look for new local events and interesting nearby places, then combine them into one short brief.", schedule="every 6h", name="Local brief", @@ -95,7 +95,7 @@ You do not need to delete and recreate jobs just to change them. ```bash /cron edit --schedule "every 4h" /cron edit --prompt "Use the revised task" -/cron edit --skill blogwatcher --skill find-nearby +/cron edit --skill blogwatcher --skill maps /cron edit --remove-skill blogwatcher /cron edit --clear-skills ``` @@ -105,8 +105,8 @@ You do not need to delete and recreate jobs just to change them. ```bash hermes cron edit --schedule "every 4h" hermes cron edit --prompt "Use the revised task" -hermes cron edit --skill blogwatcher --skill find-nearby -hermes cron edit --add-skill find-nearby +hermes cron edit --skill blogwatcher --skill maps +hermes cron edit --add-skill maps hermes cron edit --remove-skill blogwatcher hermes cron edit --clear-skills ``` From a3b76ae36d37124638b3e547b608b266f230c679 Mon Sep 17 00:00:00 2001 From: Teknium Date: Sun, 19 Apr 2026 05:19:51 -0700 Subject: [PATCH 008/547] chore(attribution): add AUTHOR_MAP entry for Mibayy Adds the Mibayy noreply email to the AUTHOR_MAP so CI attribution checks pass for the #3884 maps skill feat commit (7fa01faf). --- scripts/release.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/release.py b/scripts/release.py index 9c04c1c6b3..a20c3c134f 100755 --- a/scripts/release.py +++ b/scripts/release.py @@ -77,6 +77,7 @@ AUTHOR_MAP = { "Asunfly@users.noreply.github.com": "Asunfly", "2500400+honghua@users.noreply.github.com": "honghua", "nish3451@users.noreply.github.com": "nish3451", + "Mibayy@users.noreply.github.com": "Mibayy", "135070653+sgaofen@users.noreply.github.com": "sgaofen", # contributors (manual mapping from git names) "ahmedsherif95@gmail.com": "asheriif", From d5fc8a5e00dfd396cd188f605ff2abc76fce3c2e Mon Sep 17 00:00:00 2001 From: Teknium <127238744+teknium1@users.noreply.github.com> Date: Sun, 19 Apr 2026 05:19:57 -0700 Subject: [PATCH 009/547] fix(tui): reject /model and agent-mutating slash passthroughs while running (#12548) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit agent.switch_model() mutates self.model, self.provider, self.base_url, self.api_key, self.api_mode, and rebuilds self.client / self._anthropic_client in place. The worker thread running agent.run_conversation reads those fields on every iteration. A concurrent config.set key=model or slash- worker-mirrored /model / /personality / /prompt / /compress can send an HTTP request with mismatched model + base_url (or the old client keeps running against a new endpoint) — 400/404s the user never asked for. Fix: same pattern as the session.undo / session.compress guards (PR #12416) and the gateway runner's running-agent /model guard (PR #12334). Reject with 4009 'session busy' when session.running is True. Two call sites guarded: - config.set with key=model: primary /model entry point from Ink - _mirror_slash_side_effects for model / personality / prompt / compress: slash-worker passthrough path that applies live-agent side effects Idle sessions still switch models normally — regression guard test verifies this. Tests (tests/test_tui_gateway_server.py): 4 new cases. - test_config_set_model_rejects_while_running - test_config_set_model_allowed_when_idle (regression guard) - test_mirror_slash_side_effects_rejects_mutating_commands_while_running - test_mirror_slash_side_effects_allowed_when_idle (regression guard) Validated: against unpatched server.py, the two 'rejects_while_running' tests fail with the exact race they assert against. With the fix all 4 pass. Live E2E against the live Python environment confirmed both guards enforce 4009 / 'session busy' exactly as designed. --- tests/test_tui_gateway_server.py | 121 +++++++++++++++++++++++++++++++ tui_gateway/server.py | 24 ++++++ 2 files changed, 145 insertions(+) diff --git a/tests/test_tui_gateway_server.py b/tests/test_tui_gateway_server.py index 07a68ac9e9..c0f5239035 100644 --- a/tests/test_tui_gateway_server.py +++ b/tests/test_tui_gateway_server.py @@ -828,3 +828,124 @@ def test_respond_unpacks_sid_tuple_correctly(): server._pending.pop("rid-x", None) server._answers.pop("rid-x", None) + + +# --------------------------------------------------------------------------- +# /model switch and other agent-mutating commands must reject while the +# session is running. agent.switch_model() mutates self.model, self.provider, +# self.base_url, self.client etc. in place — the worker thread running +# agent.run_conversation is reading those on every iteration. Same class of +# bug as the session.undo / session.compress mid-run silent-drop; same fix +# pattern: reject with 4009 while running. +# --------------------------------------------------------------------------- + + +def test_config_set_model_rejects_while_running(monkeypatch): + """/model via config.set must reject during an in-flight turn.""" + seen = {"called": False} + + def _fake_apply(sid, session, raw): + seen["called"] = True + return {"value": raw, "warning": ""} + + monkeypatch.setattr(server, "_apply_model_switch", _fake_apply) + + server._sessions["sid"] = _session(running=True) + try: + resp = server.handle_request({ + "id": "1", "method": "config.set", + "params": {"session_id": "sid", "key": "model", "value": "anthropic/claude-sonnet-4.6"}, + }) + assert resp.get("error") + assert resp["error"]["code"] == 4009 + assert "session busy" in resp["error"]["message"] + assert not seen["called"], ( + "_apply_model_switch was called mid-turn — would race with " + "the worker thread reading agent.model / agent.client" + ) + finally: + server._sessions.pop("sid", None) + + +def test_config_set_model_allowed_when_idle(monkeypatch): + """Regression guard: idle sessions can still switch models.""" + seen = {"called": False} + + def _fake_apply(sid, session, raw): + seen["called"] = True + return {"value": "newmodel", "warning": ""} + + monkeypatch.setattr(server, "_apply_model_switch", _fake_apply) + + server._sessions["sid"] = _session(running=False) + try: + resp = server.handle_request({ + "id": "1", "method": "config.set", + "params": {"session_id": "sid", "key": "model", "value": "newmodel"}, + }) + assert resp.get("result") + assert resp["result"]["value"] == "newmodel" + assert seen["called"] + finally: + server._sessions.pop("sid", None) + + +def test_mirror_slash_side_effects_rejects_mutating_commands_while_running(monkeypatch): + """Slash worker passthrough (e.g. /model, /personality, /prompt, + /compress) must reject during an in-flight turn. Same race as + config.set — mutates live agent state while run_conversation is + reading it.""" + import types + + applied = {"model": False, "compress": False} + + def _fake_apply_model(sid, session, arg): + applied["model"] = True + return {"value": arg, "warning": ""} + + def _fake_compress(session, focus): + applied["compress"] = True + return (0, {}) + + monkeypatch.setattr(server, "_apply_model_switch", _fake_apply_model) + monkeypatch.setattr(server, "_compress_session_history", _fake_compress) + + session = _session(running=True) + session["agent"] = types.SimpleNamespace(model="x") + + for cmd, expected_name in [ + ("/model new/model", "model"), + ("/personality default", "personality"), + ("/prompt", "prompt"), + ("/compress", "compress"), + ]: + warning = server._mirror_slash_side_effects("sid", session, cmd) + assert "session busy" in warning, ( + f"{cmd} should have returned busy warning, got: {warning!r}" + ) + assert f"/{expected_name}" in warning + + # None of the mutating side-effect helpers should have fired. + assert not applied["model"], "model switch fired despite running session" + assert not applied["compress"], "compress fired despite running session" + + +def test_mirror_slash_side_effects_allowed_when_idle(monkeypatch): + """Regression guard: idle session still runs the side effects.""" + import types + + applied = {"model": False} + + def _fake_apply_model(sid, session, arg): + applied["model"] = True + return {"value": arg, "warning": ""} + + monkeypatch.setattr(server, "_apply_model_switch", _fake_apply_model) + + session = _session(running=False) + session["agent"] = types.SimpleNamespace(model="x") + + warning = server._mirror_slash_side_effects("sid", session, "/model foo") + # Should NOT contain "session busy" — the switch went through. + assert "session busy" not in warning + assert applied["model"] diff --git a/tui_gateway/server.py b/tui_gateway/server.py index 921f868a3c..00f8346191 100644 --- a/tui_gateway/server.py +++ b/tui_gateway/server.py @@ -1743,6 +1743,19 @@ def _(rid, params: dict) -> dict: if not value: return _err(rid, 4002, "model value required") if session: + # Reject during an in-flight turn. agent.switch_model() + # mutates self.model / self.provider / self.base_url / + # self.client in place; the worker thread running + # agent.run_conversation is reading those on every + # iteration. A mid-turn swap can send an HTTP request + # with the new base_url but old model (or vice versa), + # producing 400/404s the user never asked for. Parity + # with the gateway's running-agent /model guard. + if session.get("running"): + return _err( + rid, 4009, + "session busy — /interrupt the current turn before switching models", + ) result = _apply_model_switch(params.get("session_id", ""), session, value) else: result = _apply_model_switch("", {"agent": None}, value) @@ -2446,6 +2459,17 @@ def _mirror_slash_side_effects(sid: str, session: dict, command: str) -> str: return "" name, arg, agent = parts[0], (parts[1].strip() if len(parts) > 1 else ""), session.get("agent") + # Reject agent-mutating commands during an in-flight turn. These + # all do read-then-mutate on live agent/session state that the + # worker thread running agent.run_conversation is using. Parity + # with the session.compress / session.undo guards and the gateway + # runner's running-agent /model guard. + _MUTATES_WHILE_RUNNING = {"model", "personality", "prompt", "compress"} + if name in _MUTATES_WHILE_RUNNING and session.get("running"): + return ( + f"session busy — /interrupt the current turn before running /{name}" + ) + try: if name == "model" and arg and agent: result = _apply_model_switch(sid, session, arg) From 37524a574ec94adcd40e65d4cbb847e84153aa92 Mon Sep 17 00:00:00 2001 From: Teknium Date: Thu, 9 Apr 2026 03:16:04 -0700 Subject: [PATCH 010/547] docs: add PR review guides, rework quickstart, slim down installation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds two complementary GitHub PR review guides from contest submissions: - Cron-based PR review agent (from PR #5836 by @dieutx) — polls on a schedule, no server needed, teaches skills + memory authoring - Webhook-based PR review (from PR #6503 by @gaijinkush) — real-time via GitHub webhooks, documents previously undocumented webhook feature Both guides are cross-linked so users can pick the approach that fits. Reworks quickstart.md by integrating the best content from PR #5744 by @aidil2105: - Opinionated decision table ('The fastest path') - Common failure modes table with causes and fixes - Recovery toolkit sequence - Session lifecycle verification step - Better first-chat guidance with example prompts Slims down installation.md: - Removes 10-step manual/dev install section (already covered in developer-guide/contributing.md) - Links to Contributing guide for dev setup - Keeps focused on the automated installer + prerequisites + troubleshooting --- website/docs/getting-started/installation.md | 199 +---------- website/docs/getting-started/quickstart.md | 255 ++++++++------ website/docs/guides/github-pr-review-agent.md | 300 ++++++++++++++++ .../docs/guides/webhook-github-pr-review.md | 329 ++++++++++++++++++ website/sidebars.ts | 2 + 5 files changed, 784 insertions(+), 301 deletions(-) create mode 100644 website/docs/guides/github-pr-review-agent.md create mode 100644 website/docs/guides/webhook-github-pr-review.md diff --git a/website/docs/getting-started/installation.md b/website/docs/getting-started/installation.md index a28b1256e6..219c1e7d55 100644 --- a/website/docs/getting-started/installation.md +++ b/website/docs/getting-started/installation.md @@ -6,7 +6,7 @@ description: "Install Hermes Agent on Linux, macOS, WSL2, or Android via Termux" # Installation -Get Hermes Agent up and running in under two minutes with the one-line installer, or follow the manual steps for full control. +Get Hermes Agent up and running in under two minutes with the one-line installer. ## Quick Install @@ -82,202 +82,9 @@ If you use Nix (on NixOS, macOS, or Linux), there's a dedicated setup path with --- -## Manual Installation +## Manual / Developer Installation -If you prefer full control over the installation process, follow these steps. - -### Step 1: Clone the Repository - -Clone with `--recurse-submodules` to pull the required submodules: - -```bash -git clone --recurse-submodules https://github.com/NousResearch/hermes-agent.git -cd hermes-agent -``` - -If you already cloned without `--recurse-submodules`: -```bash -git submodule update --init --recursive -``` - -### Step 2: Install uv & Create Virtual Environment - -```bash -# Install uv (if not already installed) -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Create venv with Python 3.11 (uv downloads it if not present — no sudo needed) -uv venv venv --python 3.11 -``` - -:::tip -You do **not** need to activate the venv to use `hermes`. The entry point has a hardcoded shebang pointing to the venv Python, so it works globally once symlinked. -::: - -### Step 3: Install Python Dependencies - -```bash -# Tell uv which venv to install into -export VIRTUAL_ENV="$(pwd)/venv" - -# Install with all extras -uv pip install -e ".[all]" -``` - -If you only want the core agent (no Telegram/Discord/cron support): -```bash -uv pip install -e "." -``` - -
-Optional extras breakdown - -| Extra | What it adds | Install command | -|-------|-------------|-----------------| -| `all` | Everything below | `uv pip install -e ".[all]"` | -| `messaging` | Telegram, Discord & Slack gateway | `uv pip install -e ".[messaging]"` | -| `cron` | Cron expression parsing for scheduled tasks | `uv pip install -e ".[cron]"` | -| `cli` | Terminal menu UI for setup wizard | `uv pip install -e ".[cli]"` | -| `modal` | Modal cloud execution backend | `uv pip install -e ".[modal]"` | -| `tts-premium` | ElevenLabs premium voices | `uv pip install -e ".[tts-premium]"` | -| `voice` | CLI microphone input + audio playback | `uv pip install -e ".[voice]"` | -| `pty` | PTY terminal support | `uv pip install -e ".[pty]"` | -| `termux` | Tested Android / Termux bundle (`cron`, `cli`, `pty`, `mcp`, `honcho`, `acp`) | `python -m pip install -e ".[termux]" -c constraints-termux.txt` | -| `honcho` | AI-native memory (Honcho integration) | `uv pip install -e ".[honcho]"` | -| `mcp` | Model Context Protocol support | `uv pip install -e ".[mcp]"` | -| `homeassistant` | Home Assistant integration | `uv pip install -e ".[homeassistant]"` | -| `acp` | ACP editor integration support | `uv pip install -e ".[acp]"` | -| `slack` | Slack messaging | `uv pip install -e ".[slack]"` | -| `dev` | pytest & test utilities | `uv pip install -e ".[dev]"` | - -You can combine extras: `uv pip install -e ".[messaging,cron]"` - -:::tip Termux users -`.[all]` is not currently available on Android because the `voice` extra pulls `faster-whisper`, which depends on `ctranslate2` wheels that are not published for Android. Use `.[termux]` for the tested mobile install path, then add individual extras only as needed. -::: - -
- -### Step 4: Install Optional Submodules (if needed) - -```bash -# RL training backend (optional) -uv pip install -e "./tinker-atropos" -``` - -Both are optional — if you skip them, the corresponding toolsets simply won't be available. - -### Step 5: Install Node.js Dependencies (Optional) - -Only needed for **browser automation** (Browserbase-powered) and **WhatsApp bridge**: - -```bash -npm install -``` - -### Step 6: Create the Configuration Directory - -```bash -# Create the directory structure -mkdir -p ~/.hermes/{cron,sessions,logs,memories,skills,pairing,hooks,image_cache,audio_cache,whatsapp/session} - -# Copy the example config file -cp cli-config.yaml.example ~/.hermes/config.yaml - -# Create an empty .env file for API keys -touch ~/.hermes/.env -``` - -### Step 7: Add Your API Keys - -Open `~/.hermes/.env` and add at minimum an LLM provider key: - -```bash -# Required — at least one LLM provider: -OPENROUTER_API_KEY=sk-or-v1-your-key-here - -# Optional — enable additional tools: -FIRECRAWL_API_KEY=fc-your-key # Web search & scraping (or self-host, see docs) -FAL_KEY=your-fal-key # Image generation (FLUX) -``` - -Or set them via the CLI: -```bash -hermes config set OPENROUTER_API_KEY sk-or-v1-your-key-here -``` - -### Step 8: Add `hermes` to Your PATH - -```bash -mkdir -p ~/.local/bin -ln -sf "$(pwd)/venv/bin/hermes" ~/.local/bin/hermes -``` - -If `~/.local/bin` isn't on your PATH, add it to your shell config: - -```bash -# Bash -echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.bashrc && source ~/.bashrc - -# Zsh -echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.zshrc && source ~/.zshrc - -# Fish -fish_add_path $HOME/.local/bin -``` - -### Step 9: Configure Your Provider - -```bash -hermes model # Select your LLM provider and model -``` - -### Step 10: Verify the Installation - -```bash -hermes version # Check that the command is available -hermes doctor # Run diagnostics to verify everything is working -hermes status # Check your configuration -hermes chat -q "Hello! What tools do you have available?" -``` - ---- - -## Quick-Reference: Manual Install (Condensed) - -For those who just want the commands: - -```bash -# Install uv -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Clone & enter -git clone --recurse-submodules https://github.com/NousResearch/hermes-agent.git -cd hermes-agent - -# Create venv with Python 3.11 -uv venv venv --python 3.11 -export VIRTUAL_ENV="$(pwd)/venv" - -# Install everything -uv pip install -e ".[all]" -uv pip install -e "./tinker-atropos" -npm install # optional, for browser tools and WhatsApp - -# Configure -mkdir -p ~/.hermes/{cron,sessions,logs,memories,skills,pairing,hooks,image_cache,audio_cache,whatsapp/session} -cp cli-config.yaml.example ~/.hermes/config.yaml -touch ~/.hermes/.env -echo 'OPENROUTER_API_KEY=sk-or-v1-your-key' >> ~/.hermes/.env - -# Make hermes available globally -mkdir -p ~/.local/bin -ln -sf "$(pwd)/venv/bin/hermes" ~/.local/bin/hermes - -# Verify -hermes doctor -hermes -``` +If you want to clone the repo and install from source — for contributing, running from a specific branch, or having full control over the virtual environment — see the [Development Setup](../developer-guide/contributing.md#development-setup) section in the Contributing guide. --- diff --git a/website/docs/getting-started/quickstart.md b/website/docs/getting-started/quickstart.md index 8a39c49f1e..b67f63ae36 100644 --- a/website/docs/getting-started/quickstart.md +++ b/website/docs/getting-started/quickstart.md @@ -1,12 +1,35 @@ --- sidebar_position: 1 title: "Quickstart" -description: "Your first conversation with Hermes Agent — from install to chatting in 2 minutes" +description: "Your first conversation with Hermes Agent — from install to chatting in under 5 minutes" --- # Quickstart -This guide walks you through installing Hermes Agent, setting up a provider, and having your first conversation. By the end, you'll know the key features and how to explore further. +This guide gets you from zero to a working Hermes setup that survives real use. Install, choose a provider, verify a working chat, and know exactly what to do when something breaks. + +## Who this is for + +- Brand new and want the shortest path to a working setup +- Switching providers and don't want to lose time to config mistakes +- Setting up Hermes for a team, bot, or always-on workflow +- Tired of "it installed, but it still does nothing" + +## The fastest path + +Pick the row that matches your goal: + +| Goal | Do this first | Then do this | +|---|---|---| +| I just want Hermes working on my machine | `hermes setup` | Run a real chat and verify it responds | +| I already know my provider | `hermes model` | Save the config, then start chatting | +| I want a bot or always-on setup | `hermes gateway setup` after CLI works | Connect Telegram, Discord, Slack, or another platform | +| I want a local or self-hosted model | `hermes model` → custom endpoint | Verify the endpoint, model name, and context length | +| I want multi-provider fallback | `hermes model` first | Add routing and fallback only after the base chat works | + +**Rule of thumb:** if Hermes cannot complete a normal chat, do not add more features yet. Get one clean conversation working first, then layer on gateway, cron, skills, voice, or routing. + +--- ## 1. Install Hermes Agent @@ -31,86 +54,109 @@ After it finishes, reload your shell: source ~/.bashrc # or source ~/.zshrc ``` -## 2. Set Up a Provider +For detailed installation options, prerequisites, and troubleshooting, see the [Installation guide](./installation.md). -The installer configures your LLM provider automatically. To change it later, use one of these commands: +## 2. Choose a Provider + +The single most important setup step. Use `hermes model` to walk through the choice interactively: ```bash -hermes model # Choose your LLM provider and model -hermes tools # Configure which tools are enabled -hermes setup # Or configure everything at once +hermes model ``` -`hermes model` walks you through selecting an inference provider: +Good defaults: -| Provider | What it is | How to set up | -|----------|-----------|---------------| -| **Nous Portal** | Subscription-based, zero-config | OAuth login via `hermes model` | -| **OpenAI Codex** | ChatGPT OAuth, uses Codex models | Device code auth via `hermes model` | -| **Anthropic** | Claude models directly (Pro/Max or API key) | `hermes model` with Claude Code auth, or an Anthropic API key | -| **OpenRouter** | Multi-provider routing across many models | Enter your API key | -| **Z.AI** | GLM / Zhipu-hosted models | Set `GLM_API_KEY` / `ZAI_API_KEY` | -| **Kimi / Moonshot** | Moonshot-hosted coding and chat models | Set `KIMI_API_KEY` | -| **Kimi / Moonshot China** | China-region Moonshot endpoint | Set `KIMI_CN_API_KEY` | -| **Arcee AI** | Trinity models | Set `ARCEEAI_API_KEY` | -| **Xiaomi MiMo** | Xiaomi MiMo models via [platform.xiaomimimo.com](https://platform.xiaomimimo.com) | Set `XIAOMI_API_KEY` | -| **AWS Bedrock** | Anthropic Claude, Amazon Nova, DeepSeek v3.2, and Meta Llama via AWS | Standard boto3 auth (`AWS_PROFILE` or `AWS_ACCESS_KEY_ID` + `AWS_REGION`) | -| **Qwen Portal (OAuth)** | Qwen 3.5 / Qwen-Coder models via Alibaba's consumer Qwen Portal | OAuth via `hermes model` (optional: `HERMES_QWEN_BASE_URL`) | -| **MiniMax** | International MiniMax endpoint | Set `MINIMAX_API_KEY` | -| **MiniMax China** | China-region MiniMax endpoint | Set `MINIMAX_CN_API_KEY` | -| **Alibaba Cloud** | Qwen models via DashScope | Set `DASHSCOPE_API_KEY` | -| **Hugging Face** | 20+ open models via unified router (Qwen, DeepSeek, Kimi, etc.) | Set `HF_TOKEN` | -| **Kilo Code** | KiloCode-hosted models | Set `KILOCODE_API_KEY` | -| **OpenCode Zen** | Pay-as-you-go access to curated models | Set `OPENCODE_ZEN_API_KEY` | -| **OpenCode Go** | $10/month subscription for open models | Set `OPENCODE_GO_API_KEY` | -| **DeepSeek** | Direct DeepSeek API access | Set `DEEPSEEK_API_KEY` | -| **NVIDIA NIM** | Nemotron models via build.nvidia.com or local NIM | Set `NVIDIA_API_KEY` (optional: `NVIDIA_BASE_URL`) | -| **Ollama Cloud** | Managed Ollama catalog without local GPU | Set `OLLAMA_API_KEY` (or pick **Ollama Cloud** in `hermes model`) | -| **Google Gemini (OAuth)** | Gemini via Cloud Code Assist — free and paid tiers | OAuth via `hermes model` (optional: `HERMES_GEMINI_PROJECT_ID` for paid tiers) | -| **xAI (Grok)** | Grok 4 models via Responses API + prompt caching | Set `XAI_API_KEY` (alias: `grok`) | -| **GitHub Copilot** | GitHub Copilot subscription (GPT-5.x, Claude, Gemini, etc.) | OAuth via `hermes model`, or `COPILOT_GITHUB_TOKEN` / `GH_TOKEN` | -| **GitHub Copilot ACP** | Copilot ACP agent backend (spawns local `copilot` CLI) | `hermes model` (requires `copilot` CLI + `copilot login`) | -| **Vercel AI Gateway** | Vercel AI Gateway routing | Set `AI_GATEWAY_API_KEY` | -| **Custom Endpoint** | VLLM, SGLang, Ollama, or any OpenAI-compatible API | Set base URL + API key | +| Situation | Recommended path | +|---|---| +| Least friction | Nous Portal or OpenRouter | +| You already have Claude or Codex auth | Anthropic or OpenAI Codex | +| You want local/private inference | Ollama or any custom OpenAI-compatible endpoint | +| You want multi-provider routing | OpenRouter | +| You have a custom GPU server | vLLM, SGLang, LiteLLM, or any OpenAI-compatible endpoint | + +For most first-time users: choose a provider, accept the defaults unless you know why you're changing them. The full provider catalog with env vars and setup steps lives on the [Providers](../integrations/providers.md) page. :::caution Minimum context: 64K tokens Hermes Agent requires a model with at least **64,000 tokens** of context. Models with smaller windows cannot maintain enough working memory for multi-step tool-calling workflows and will be rejected at startup. Most hosted models (Claude, GPT, Gemini, Qwen, DeepSeek) meet this easily. If you're running a local model, set its context size to at least 64K (e.g. `--ctx-size 65536` for llama.cpp or `-c 65536` for Ollama). ::: :::tip -You can switch providers at any time with `hermes model` — no code changes, no lock-in. When configuring a custom endpoint, Hermes will prompt for the context window size and auto-detect it when possible. See [Context Length Detection](../integrations/providers.md#context-length-detection) for details. +You can switch providers at any time with `hermes model` — no lock-in. For a full list of all supported providers and setup details, see [AI Providers](../integrations/providers.md). ::: -## 3. Start Chatting +### How settings are stored + +Hermes separates secrets from normal config: + +- **Secrets and tokens** → `~/.hermes/.env` +- **Non-secret settings** → `~/.hermes/config.yaml` + +The easiest way to set values correctly is through the CLI: + +```bash +hermes config set model anthropic/claude-opus-4.6 +hermes config set terminal.backend docker +hermes config set OPENROUTER_API_KEY sk-or-... +``` + +The right value goes to the right file automatically. + +## 3. Run Your First Chat ```bash hermes # classic CLI hermes --tui # modern TUI (recommended) ``` -That's it! You'll see a welcome banner with your model, available tools, and skills. Type a message and press Enter. +You'll see a welcome banner with your model, available tools, and skills. Use a prompt that's specific and easy to verify: :::tip Pick your interface Hermes ships with two terminal interfaces: the classic `prompt_toolkit` CLI and a newer [TUI](../user-guide/tui.md) with modal overlays, mouse selection, and non-blocking input. Both share the same sessions, slash commands, and config — try each with `hermes` vs `hermes --tui`. ::: ``` -❯ What can you help me with? +Summarize this repo in 5 bullets and tell me what the main entrypoint is. ``` -The agent has access to tools for web search, file operations, terminal commands, and more — all out of the box. +``` +Check my current directory and tell me what looks like the main project file. +``` -## 4. Try Key Features +``` +Help me set up a clean GitHub PR workflow for this codebase. +``` -### Ask it to use the terminal +**What success looks like:** + +- The banner shows your chosen model/provider +- Hermes replies without error +- It can use a tool if needed (terminal, file read, web search) +- The conversation continues normally for more than one turn + +If that works, you're past the hardest part. + +## 4. Verify Sessions Work + +Before moving on, make sure resume works: + +```bash +hermes --continue # Resume the most recent session +hermes -c # Short form +``` + +That should bring you back to the session you just had. If it doesn't, check whether you're in the same profile and whether the session actually saved. This matters later when you're juggling multiple setups or machines. + +## 5. Try Key Features + +### Use the terminal ``` ❯ What's my disk usage? Show the top 5 largest directories. ``` -The agent will run terminal commands on your behalf and show you the results. +The agent runs terminal commands on your behalf and shows results. -### Use slash commands +### Slash commands Type `/` to see an autocomplete dropdown of all commands: @@ -128,22 +174,27 @@ Press `Alt+Enter` or `Ctrl+J` to add a new line. Great for pasting code or writi ### Interrupt the agent -If the agent is taking too long, just type a new message and press Enter — it interrupts the current task and switches to your new instructions. `Ctrl+C` also works. +If the agent is taking too long, type a new message and press Enter — it interrupts the current task and switches to your new instructions. `Ctrl+C` also works. -### Resume a session +## 6. Add the Next Layer -When you exit, hermes prints a resume command: +Only after the base chat works. Pick what you need: + +### Bot or shared assistant ```bash -hermes --continue # Resume the most recent session -hermes -c # Short form +hermes gateway setup # Interactive platform configuration ``` -## 5. Explore Further +Connect [Telegram](/docs/user-guide/messaging/telegram), [Discord](/docs/user-guide/messaging/discord), [Slack](/docs/user-guide/messaging/slack), [WhatsApp](/docs/user-guide/messaging/whatsapp), [Signal](/docs/user-guide/messaging/signal), [Email](/docs/user-guide/messaging/email), or [Home Assistant](/docs/user-guide/messaging/homeassistant). -Here are some things to try next: +### Automation and tools -### Set up a sandboxed terminal +- `hermes tools` — tune tool access per platform +- `hermes skills` — browse and install reusable workflows +- Cron — only after your bot or CLI setup is stable + +### Sandboxed terminal For safety, run the agent in a Docker container or on a remote server: @@ -152,71 +203,25 @@ hermes config set terminal.backend docker # Docker isolation hermes config set terminal.backend ssh # Remote server ``` -### Connect messaging platforms - -Chat with Hermes from your phone or other surfaces via Telegram, Discord, Slack, WhatsApp, Signal, Email, or Home Assistant: - -```bash -hermes gateway setup # Interactive platform configuration -``` - -### Add voice mode - -Want microphone input in the CLI or spoken replies in messaging? +### Voice mode ```bash pip install "hermes-agent[voice]" # Includes faster-whisper for free local speech-to-text ``` -Then start Hermes and enable it inside the CLI: +Then in the CLI: `/voice on`. Press `Ctrl+B` to record. See [Voice Mode](../user-guide/features/voice-mode.md). -```text -/voice on -``` - -Press `Ctrl+B` to record, or use `/voice tts` to have Hermes speak its replies. See [Voice Mode](../user-guide/features/voice-mode.md) for the full setup across CLI, Telegram, Discord, and Discord voice channels. - -### Schedule automated tasks - -``` -❯ Every morning at 9am, check Hacker News for AI news and send me a summary on Telegram. -``` - -The agent will set up a cron job that runs automatically via the gateway. - -### Browse and install skills +### Skills ```bash hermes skills search kubernetes -hermes skills search react --source skills-sh -hermes skills search https://mintlify.com/docs --source well-known hermes skills install openai/skills/k8s -hermes skills install official/security/1password -hermes skills install skills-sh/vercel-labs/json-render/json-render-react --force ``` -Tips: -- Use `--source skills-sh` to search the public `skills.sh` directory. -- Use `--source well-known` with a docs/site URL to discover skills from `/.well-known/skills/index.json`. -- Use `--force` only after reviewing a third-party skill. It can override non-dangerous policy blocks, but not a `dangerous` scan verdict. +Or use `/skills` inside a chat session. -Or use the `/skills` slash command inside chat. - -### Use Hermes inside an editor via ACP - -Hermes can also run as an ACP server for ACP-compatible editors like VS Code, Zed, and JetBrains: - -```bash -pip install -e '.[acp]' -hermes acp -``` - -See [ACP Editor Integration](../user-guide/features/acp.md) for setup details. - -### Try MCP servers - -Connect to external tools via the Model Context Protocol: +### MCP servers ```yaml # Add to ~/.hermes/config.yaml @@ -228,6 +233,43 @@ mcp_servers: GITHUB_PERSONAL_ACCESS_TOKEN: "ghp_xxx" ``` +### Editor integration (ACP) + +```bash +pip install -e '.[acp]' +hermes acp +``` + +See [ACP Editor Integration](../user-guide/features/acp.md). + +--- + +## Common Failure Modes + +These are the problems that waste the most time: + +| Symptom | Likely cause | Fix | +|---|---|---| +| Hermes opens but gives empty or broken replies | Provider auth or model selection is wrong | Run `hermes model` again and confirm provider, model, and auth | +| Custom endpoint "works" but returns garbage | Wrong base URL, model name, or not actually OpenAI-compatible | Verify the endpoint in a separate client first | +| Gateway starts but nobody can message it | Bot token, allowlist, or platform setup is incomplete | Re-run `hermes gateway setup` and check `hermes gateway status` | +| `hermes --continue` can't find old session | Switched profiles or session never saved | Check `hermes sessions list` and confirm you're in the right profile | +| Model unavailable or odd fallback behavior | Provider routing or fallback settings are too aggressive | Keep routing off until the base provider is stable | +| `hermes doctor` flags config problems | Config values are missing or stale | Fix the config, retest a plain chat before adding features | + +## Recovery Toolkit + +When something feels off, use this order: + +1. `hermes doctor` +2. `hermes model` +3. `hermes setup` +4. `hermes sessions list` +5. `hermes --continue` +6. `hermes gateway status` + +That sequence gets you from "broken vibes" back to a known state fast. + --- ## Quick Reference @@ -249,3 +291,6 @@ mcp_servers: - **[Configuration](../user-guide/configuration.md)** — Customize your setup - **[Messaging Gateway](../user-guide/messaging/index.md)** — Connect Telegram, Discord, Slack, WhatsApp, Signal, Email, or Home Assistant - **[Tools & Toolsets](../user-guide/features/tools.md)** — Explore available capabilities +- **[AI Providers](../integrations/providers.md)** — Full provider list and setup details +- **[Skills System](../user-guide/features/skills.md)** — Reusable workflows and knowledge +- **[Tips & Best Practices](../guides/tips.md)** — Power user tips diff --git a/website/docs/guides/github-pr-review-agent.md b/website/docs/guides/github-pr-review-agent.md new file mode 100644 index 0000000000..530d8d6df0 --- /dev/null +++ b/website/docs/guides/github-pr-review-agent.md @@ -0,0 +1,300 @@ +--- +sidebar_position: 10 +title: "Tutorial: GitHub PR Review Agent" +description: "Build an automated AI code reviewer that monitors your repos, reviews pull requests, and delivers feedback — hands-free" +--- + +# Tutorial: Build a GitHub PR Review Agent + +**The problem:** Your team opens PRs faster than you can review them. PRs sit for days waiting for eyeballs. Junior devs merge bugs because nobody had time to check. You spend your mornings catching up on diffs instead of building. + +**The solution:** An AI agent that watches your repos around the clock, reviews every new PR for bugs, security issues, and code quality, and sends you a summary — so you only spend time on PRs that actually need human judgment. + +**What you'll build:** + +``` +┌──────────────┐ ┌───────────────┐ ┌──────────────┐ ┌──────────────┐ +│ Cron Timer │────▶│ Hermes Agent │────▶│ GitHub API │────▶│ Review to │ +│ (every 2h) │ │ + gh CLI │ │ (PR diffs) │ │ Telegram/ │ +│ │ │ + skill │ │ │ │ Discord/ │ +│ │ │ + memory │ │ │ │ local file │ +└──────────────┘ └───────────────┘ └──────────────┘ └──────────────┘ +``` + +This guide uses **cron jobs** to poll for PRs on a schedule — no server or public endpoint needed. Works behind NAT and firewalls. + +:::tip Want real-time reviews instead? +If you have a public endpoint available, check out [Automated GitHub PR Comments with Webhooks](./webhook-github-pr-review.md) — GitHub pushes events to Hermes instantly when PRs are opened or updated. +::: + +--- + +## Prerequisites + +- **Hermes Agent installed** — see the [Installation guide](/docs/getting-started/installation) +- **Gateway running** for cron jobs: + ```bash + hermes gateway install # Install as a service + # or + hermes gateway # Run in foreground + ``` +- **GitHub CLI (`gh`) installed and authenticated**: + ```bash + # Install + brew install gh # macOS + sudo apt install gh # Ubuntu/Debian + + # Authenticate + gh auth login + ``` +- **Messaging configured** (optional) — [Telegram](/docs/user-guide/messaging/telegram) or [Discord](/docs/user-guide/messaging/discord) + +:::tip No messaging? No problem +Use `deliver: "local"` to save reviews to `~/.hermes/cron/output/`. Great for testing before wiring up notifications. +::: + +--- + +## Step 1: Verify the Setup + +Make sure Hermes can access GitHub. Start a chat: + +```bash +hermes +``` + +Test with a simple command: + +``` +Run: gh pr list --repo NousResearch/hermes-agent --state open --limit 3 +``` + +You should see a list of open PRs. If this works, you're ready. + +--- + +## Step 2: Try a Manual Review + +Still in the chat, ask Hermes to review a real PR: + +``` +Review this pull request. Read the diff, check for bugs, security issues, +and code quality. Be specific about line numbers and quote problematic code. + +Run: gh pr diff 3888 --repo NousResearch/hermes-agent +``` + +Hermes will: +1. Execute `gh pr diff` to fetch the code changes +2. Read through the entire diff +3. Produce a structured review with specific findings + +If you're happy with the quality, time to automate it. + +--- + +## Step 3: Create a Review Skill + +A skill gives Hermes consistent review guidelines that persist across sessions and cron runs. Without one, review quality varies. + +```bash +mkdir -p ~/.hermes/skills/code-review +``` + +Create `~/.hermes/skills/code-review/SKILL.md`: + +```markdown +--- +name: code-review +description: Review pull requests for bugs, security issues, and code quality +--- + +# Code Review Guidelines + +When reviewing a pull request: + +## What to Check +1. **Bugs** — Logic errors, off-by-one, null/undefined handling +2. **Security** — Injection, auth bypass, secrets in code, SSRF +3. **Performance** — N+1 queries, unbounded loops, memory leaks +4. **Style** — Naming conventions, dead code, missing error handling +5. **Tests** — Are changes tested? Do tests cover edge cases? + +## Output Format +For each finding: +- **File:Line** — exact location +- **Severity** — Critical / Warning / Suggestion +- **What's wrong** — one sentence +- **Fix** — how to fix it + +## Rules +- Be specific. Quote the problematic code. +- Don't flag style nitpicks unless they affect readability. +- If the PR looks good, say so. Don't invent problems. +- End with: APPROVE / REQUEST_CHANGES / COMMENT +``` + +Verify it loaded — start `hermes` and you should see `code-review` in the skills list at startup. + +--- + +## Step 4: Teach It Your Conventions + +This is what makes the reviewer actually useful. Start a session and teach Hermes your team's standards: + +``` +Remember: In our backend repo, we use Python with FastAPI. +All endpoints must have type annotations and Pydantic models. +We don't allow raw SQL — only SQLAlchemy ORM. +Test files go in tests/ and must use pytest fixtures. +``` + +``` +Remember: In our frontend repo, we use TypeScript with React. +No `any` types allowed. All components must have props interfaces. +We use React Query for data fetching, never useEffect for API calls. +``` + +These memories persist forever — the reviewer will enforce your conventions without being told each time. + +--- + +## Step 5: Create the Automated Cron Job + +Now wire it all together. Create a cron job that runs every 2 hours: + +```bash +hermes cron create "0 */2 * * *" \ + "Check for new open PRs and review them. + +Repos to monitor: +- myorg/backend-api +- myorg/frontend-app + +Steps: +1. Run: gh pr list --repo REPO --state open --limit 5 --json number,title,author,createdAt +2. For each PR created or updated in the last 4 hours: + - Run: gh pr diff NUMBER --repo REPO + - Review the diff using the code-review guidelines +3. Format output as: + +## PR Reviews — today + +### [repo] #[number]: [title] +**Author:** [name] | **Verdict:** APPROVE/REQUEST_CHANGES/COMMENT +[findings] + +If no new PRs found, say: No new PRs to review." \ + --name "pr-review" \ + --deliver telegram \ + --skill code-review +``` + +Verify it's scheduled: + +```bash +hermes cron list +``` + +### Other useful schedules + +| Schedule | When | +|----------|------| +| `0 */2 * * *` | Every 2 hours | +| `0 9,13,17 * * 1-5` | Three times a day, weekdays only | +| `0 9 * * 1` | Weekly Monday morning roundup | +| `30m` | Every 30 minutes (high-traffic repos) | + +--- + +## Step 6: Run It On Demand + +Don't want to wait for the schedule? Trigger it manually: + +```bash +hermes cron run pr-review +``` + +Or from within a chat session: + +``` +/cron run pr-review +``` + +--- + +## Going Further + +### Post Reviews Directly to GitHub + +Instead of delivering to Telegram, have the agent comment on the PR itself: + +Add this to your cron prompt: + +``` +After reviewing, post your review: +- For issues: gh pr review NUMBER --repo REPO --comment --body "YOUR_REVIEW" +- For critical issues: gh pr review NUMBER --repo REPO --request-changes --body "YOUR_REVIEW" +- For clean PRs: gh pr review NUMBER --repo REPO --approve --body "Looks good" +``` + +:::caution +Make sure `gh` has a token with `repo` scope. Reviews are posted as whoever `gh` is authenticated as. +::: + +### Weekly PR Dashboard + +Create a Monday morning overview of all your repos: + +```bash +hermes cron create "0 9 * * 1" \ + "Generate a weekly PR dashboard: +- myorg/backend-api +- myorg/frontend-app +- myorg/infra + +For each repo show: +1. Open PR count and oldest PR age +2. PRs merged this week +3. Stale PRs (older than 5 days) +4. PRs with no reviewer assigned + +Format as a clean summary." \ + --name "weekly-dashboard" \ + --deliver telegram +``` + +### Multi-Repo Monitoring + +Scale up by adding more repos to the prompt. The agent processes them sequentially — no extra setup needed. + +--- + +## Troubleshooting + +### "gh: command not found" +The gateway runs in a minimal environment. Ensure `gh` is in the system PATH and restart the gateway. + +### Reviews are too generic +1. Add the `code-review` skill (Step 3) +2. Teach Hermes your conventions via memory (Step 4) +3. The more context it has about your stack, the better the reviews + +### Cron job doesn't run +```bash +hermes gateway status # Is the gateway running? +hermes cron list # Is the job enabled? +``` + +### Rate limits +GitHub allows 5,000 API requests/hour for authenticated users. Each PR review uses ~3-5 requests (list + diff + optional comments). Even reviewing 100 PRs/day stays well within limits. + +--- + +## What's Next? + +- **[Webhook-Based PR Reviews](./webhook-github-pr-review.md)** — get instant reviews when PRs are opened (requires a public endpoint) +- **[Daily Briefing Bot](/docs/guides/daily-briefing-bot)** — combine PR reviews with your morning news digest +- **[Build a Plugin](/docs/guides/build-a-hermes-plugin)** — wrap the review logic into a shareable plugin +- **[Profiles](/docs/user-guide/profiles)** — run a dedicated reviewer profile with its own memory and config +- **[Fallback Providers](/docs/user-guide/features/fallback-providers)** — ensure reviews run even when one provider is down diff --git a/website/docs/guides/webhook-github-pr-review.md b/website/docs/guides/webhook-github-pr-review.md new file mode 100644 index 0000000000..b0dd15ecea --- /dev/null +++ b/website/docs/guides/webhook-github-pr-review.md @@ -0,0 +1,329 @@ +--- +sidebar_position: 11 +sidebar_label: "GitHub PR Reviews via Webhook" +title: "Automated GitHub PR Comments with Webhooks" +description: "Connect Hermes to GitHub so it automatically fetches PR diffs, reviews code changes, and posts comments — triggered by webhooks with no manual prompting" +--- + +# Automated GitHub PR Comments with Webhooks + +This guide walks you through connecting Hermes Agent to GitHub so it automatically fetches a pull request's diff, analyzes the code changes, and posts a comment — triggered by a webhook event with no manual prompting. + +When a PR is opened or updated, GitHub sends a webhook POST to your Hermes instance. Hermes runs the agent with a prompt that instructs it to retrieve the diff via the `gh` CLI, and the response is posted back to the PR thread. + +:::tip Want a simpler setup without a public endpoint? +If you don't have a public URL or just want to get started quickly, check out [Build a GitHub PR Review Agent](./github-pr-review-agent.md) — uses cron jobs to poll for PRs on a schedule, works behind NAT and firewalls. +::: + +:::info Reference docs +For the full webhook platform reference (all config options, delivery types, dynamic subscriptions, security model) see [Webhooks](/docs/user-guide/messaging/webhooks). +::: + +:::warning Prompt injection risk +Webhook payloads contain attacker-controlled data — PR titles, commit messages, and descriptions can contain malicious instructions. When your webhook endpoint is exposed to the internet, run the gateway in a sandboxed environment (Docker, SSH backend). See the [security section](#security-notes) below. +::: + +--- + +## Prerequisites + +- Hermes Agent installed and running (`hermes gateway`) +- [`gh` CLI](https://cli.github.com/) installed and authenticated on the gateway host (`gh auth login`) +- A publicly reachable URL for your Hermes instance (see [Local testing with ngrok](#local-testing-with-ngrok) if running locally) +- Admin access to the GitHub repository (required to manage webhooks) + +--- + +## Step 1 — Enable the webhook platform + +Add the following to your `~/.hermes/config.yaml`: + +```yaml +platforms: + webhook: + enabled: true + extra: + port: 8644 # default; change if another service occupies this port + rate_limit: 30 # max requests per minute per route (not a global cap) + + routes: + github-pr-review: + secret: "your-webhook-secret-here" # must match the GitHub webhook secret exactly + events: + - pull_request + + # The agent is instructed to fetch the actual diff before reviewing. + # {number} and {repository.full_name} are resolved from the GitHub payload. + prompt: | + A pull request event was received (action: {action}). + + PR #{number}: {pull_request.title} + Author: {pull_request.user.login} + Branch: {pull_request.head.ref} → {pull_request.base.ref} + Description: {pull_request.body} + URL: {pull_request.html_url} + + If the action is "closed" or "labeled", stop here and do not post a comment. + + Otherwise: + 1. Run: gh pr diff {number} --repo {repository.full_name} + 2. Review the code changes for correctness, security issues, and clarity. + 3. Write a concise, actionable review comment and post it. + + deliver: github_comment + deliver_extra: + repo: "{repository.full_name}" + pr_number: "{number}" +``` + +**Key fields:** + +| Field | Description | +|---|---| +| `secret` (route-level) | HMAC secret for this route. Falls back to `extra.secret` global if omitted. | +| `events` | List of `X-GitHub-Event` header values to accept. Empty list = accept all. | +| `prompt` | Template; `{field}` and `{nested.field}` resolve from the GitHub payload. | +| `deliver` | `github_comment` posts via `gh pr comment`. `log` just writes to the gateway log. | +| `deliver_extra.repo` | Resolves to e.g. `org/repo` from the payload. | +| `deliver_extra.pr_number` | Resolves to the PR number from the payload. | + +:::note The payload does not contain code +The GitHub webhook payload includes PR metadata (title, description, branch names, URLs) but **not the diff**. The prompt above instructs the agent to run `gh pr diff` to fetch the actual changes. The `terminal` tool is included in the default `hermes-webhook` toolset, so no extra configuration is needed. +::: + +--- + +## Step 2 — Start the gateway + +```bash +hermes gateway +``` + +You should see: + +``` +[webhook] Listening on 0.0.0.0:8644 — routes: github-pr-review +``` + +Verify it's running: + +```bash +curl http://localhost:8644/health +# {"status": "ok", "platform": "webhook"} +``` + +--- + +## Step 3 — Register the webhook on GitHub + +1. Go to your repository → **Settings** → **Webhooks** → **Add webhook** +2. Fill in: + - **Payload URL:** `https://your-public-url.example.com/webhooks/github-pr-review` + - **Content type:** `application/json` + - **Secret:** the same value you set for `secret` in the route config + - **Which events?** → Select individual events → check **Pull requests** +3. Click **Add webhook** + +GitHub will immediately send a `ping` event to confirm the connection. It is safely ignored — `ping` is not in your `events` list — and returns `{"status": "ignored", "event": "ping"}`. It is only logged at DEBUG level, so it won't appear in the console at the default log level. + +--- + +## Step 4 — Open a test PR + +Create a branch, push a change, and open a PR. Within 30–90 seconds (depending on PR size and model), Hermes should post a review comment. + +To follow the agent's progress in real time: + +```bash +tail -f "${HERMES_HOME:-$HOME/.hermes}/logs/gateway.log" +``` + +--- + +## Local testing with ngrok + +If Hermes is running on your laptop, use [ngrok](https://ngrok.com/) to expose it: + +```bash +ngrok http 8644 +``` + +Copy the `https://...ngrok-free.app` URL and use it as your GitHub Payload URL. On the free ngrok tier the URL changes each time ngrok restarts — update your GitHub webhook each session. Paid ngrok accounts get a static domain. + +You can smoke-test a static route directly with `curl` — no GitHub account or real PR needed. + +:::tip Use `deliver: log` when testing locally +Change `deliver: github_comment` to `deliver: log` in your config while testing. Otherwise the agent will attempt to post a comment to the fake `org/repo#99` repo in the test payload, which will fail. Switch back to `deliver: github_comment` once you're satisfied with the prompt output. +::: + +```bash +SECRET="your-webhook-secret-here" +BODY='{"action":"opened","number":99,"pull_request":{"title":"Test PR","body":"Adds a feature.","user":{"login":"testuser"},"head":{"ref":"feat/x"},"base":{"ref":"main"},"html_url":"https://github.com/org/repo/pull/99"},"repository":{"full_name":"org/repo"}}' +SIG=$(printf '%s' "$BODY" | openssl dgst -sha256 -hmac "$SECRET" -hex | awk '{print "sha256="$2}') + +curl -s -X POST http://localhost:8644/webhooks/github-pr-review \ + -H "Content-Type: application/json" \ + -H "X-GitHub-Event: pull_request" \ + -H "X-Hub-Signature-256: $SIG" \ + -d "$BODY" +# Expected: {"status":"accepted","route":"github-pr-review","event":"pull_request","delivery_id":"..."} +``` + +Then watch the agent run: +```bash +tail -f "${HERMES_HOME:-$HOME/.hermes}/logs/gateway.log" +``` + +:::note +`hermes webhook test ` only works for **dynamic subscriptions** created with `hermes webhook subscribe`. It does not read routes from `config.yaml`. +::: + +--- + +## Filtering to specific actions + +GitHub sends `pull_request` events for many actions: `opened`, `synchronize`, `reopened`, `closed`, `labeled`, etc. The `events` list filters only by the `X-GitHub-Event` header value — it cannot filter by action sub-type at the routing level. + +The prompt in Step 1 already handles this by instructing the agent to stop early for `closed` and `labeled` events. + +:::warning The agent still runs and consumes tokens +The "stop here" instruction prevents a meaningful review, but the agent still runs to completion for every `pull_request` event regardless of action. GitHub webhooks can only filter by event type (`pull_request`, `push`, `issues`, etc.) — not by action sub-type (`opened`, `closed`, `labeled`). There is no routing-level filter for sub-actions. For high-volume repos, accept this cost or filter upstream with a GitHub Actions workflow that calls your webhook URL conditionally. +::: + +> There is no Jinja2 or conditional template syntax. `{field}` and `{nested.field}` are the only substitutions supported. Anything else is passed verbatim to the agent. + +--- + +## Using a skill for consistent review style + +Load a [Hermes skill](/docs/user-guide/features/skills) to give the agent a consistent review persona. Add `skills` to your route inside `platforms.webhook.extra.routes` in `config.yaml`: + +```yaml +platforms: + webhook: + enabled: true + extra: + routes: + github-pr-review: + secret: "your-webhook-secret-here" + events: [pull_request] + prompt: | + A pull request event was received (action: {action}). + PR #{number}: {pull_request.title} by {pull_request.user.login} + URL: {pull_request.html_url} + + If the action is "closed" or "labeled", stop here and do not post a comment. + + Otherwise: + 1. Run: gh pr diff {number} --repo {repository.full_name} + 2. Review the diff using your review guidelines. + 3. Write a concise, actionable review comment and post it. + skills: + - review + deliver: github_comment + deliver_extra: + repo: "{repository.full_name}" + pr_number: "{number}" +``` + +> **Note:** Only the first skill in the list that is found is loaded. Hermes does not stack multiple skills — subsequent entries are ignored. + +--- + +## Sending responses to Slack or Discord instead + +Replace the `deliver` and `deliver_extra` fields inside your route with your target platform: + +```yaml +# Inside platforms.webhook.extra.routes.: + +# Slack +deliver: slack +deliver_extra: + chat_id: "C0123456789" # Slack channel ID (omit to use the configured home channel) + +# Discord +deliver: discord +deliver_extra: + chat_id: "987654321012345678" # Discord channel ID (omit to use home channel) +``` + +The target platform must also be enabled and connected in the gateway. If `chat_id` is omitted, the response is sent to that platform's configured home channel. + +Valid `deliver` values: `log` · `github_comment` · `telegram` · `discord` · `slack` · `signal` · `sms` + +--- + +## GitLab support + +The same adapter works with GitLab. GitLab uses `X-Gitlab-Token` for authentication (plain string match, not HMAC) — Hermes handles both automatically. + +For event filtering, GitLab sets `X-GitLab-Event` to values like `Merge Request Hook`, `Push Hook`, `Pipeline Hook`. Use the exact header value in `events`: + +```yaml +events: + - Merge Request Hook +``` + +GitLab payload fields differ from GitHub's — e.g. `{object_attributes.title}` for the MR title and `{object_attributes.iid}` for the MR number. The easiest way to discover the full payload structure is GitLab's **Test** button in your webhook settings, combined with the **Recent Deliveries** log. Alternatively, omit `prompt` from your route config — Hermes will then pass the full payload as formatted JSON directly to the agent, and the agent's response (visible in the gateway log with `deliver: log`) will describe its structure. + +--- + +## Security notes + +- **Never use `INSECURE_NO_AUTH`** in production — it disables signature validation entirely. It is only for local development. +- **Rotate your webhook secret** periodically and update it in both GitHub (webhook settings) and your `config.yaml`. +- **Rate limiting** is 30 req/min per route by default (configurable via `extra.rate_limit`). Exceeding it returns `429`. +- **Duplicate deliveries** (webhook retries) are deduplicated via a 1-hour idempotency cache. The cache key is `X-GitHub-Delivery` if present, then `X-Request-ID`, then a millisecond timestamp. When neither delivery ID header is set, retries are **not** deduplicated. +- **Prompt injection:** PR titles, descriptions, and commit messages are attacker-controlled. Malicious PRs could attempt to manipulate the agent's actions. Run the gateway in a sandboxed environment (Docker, VM) when exposed to the public internet. + +--- + +## Troubleshooting + +| Symptom | Check | +|---|---| +| `401 Invalid signature` | Secret in config.yaml doesn't match GitHub webhook secret | +| `404 Unknown route` | Route name in the URL doesn't match the key in `routes:` | +| `429 Rate limit exceeded` | 30 req/min per route exceeded — common when re-delivering test events from GitHub's UI; wait a minute or raise `extra.rate_limit` | +| No comment posted | `gh` not installed, not on PATH, or not authenticated (`gh auth login`) | +| Agent runs but no comment | Check the gateway log — if the agent output was empty or just "SKIP", delivery is still attempted | +| Port already in use | Change `extra.port` in config.yaml | +| Agent runs but reviews only the PR description | The prompt isn't including the `gh pr diff` instruction — the diff is not in the webhook payload | +| Can't see the ping event | Ignored events return `{"status":"ignored","event":"ping"}` at DEBUG log level only — check GitHub's delivery log (repo → Settings → Webhooks → your webhook → Recent Deliveries) | + +**GitHub's Recent Deliveries tab** (repo → Settings → Webhooks → your webhook) shows the exact request headers, payload, HTTP status, and response body for every delivery. It is the fastest way to diagnose failures without touching your server logs. + +--- + +## Full config reference + +```yaml +platforms: + webhook: + enabled: true + extra: + host: "0.0.0.0" # bind address (default: 0.0.0.0) + port: 8644 # listen port (default: 8644) + secret: "" # optional global fallback secret + rate_limit: 30 # requests per minute per route + max_body_bytes: 1048576 # payload size limit in bytes (default: 1 MB) + + routes: + : + secret: "required-per-route" + events: [] # [] = accept all; otherwise list X-GitHub-Event values + prompt: "" # {field} / {nested.field} resolved from payload + skills: [] # first matching skill is loaded (only one) + deliver: "log" # log | github_comment | telegram | discord | slack | signal | sms + deliver_extra: {} # repo + pr_number for github_comment; chat_id for others +``` + +--- + +## What's Next? + +- **[Cron-Based PR Reviews](./github-pr-review-agent.md)** — poll for PRs on a schedule, no public endpoint needed +- **[Webhook Reference](/docs/user-guide/messaging/webhooks)** — full config reference for the webhook platform +- **[Build a Plugin](/docs/guides/build-a-hermes-plugin)** — package review logic into a shareable plugin +- **[Profiles](/docs/user-guide/profiles)** — run a dedicated reviewer profile with its own memory and config diff --git a/website/sidebars.ts b/website/sidebars.ts index c84184c4e6..d57a71dcc2 100644 --- a/website/sidebars.ts +++ b/website/sidebars.ts @@ -162,6 +162,8 @@ const sidebars: SidebarsConfig = { 'guides/cron-troubleshooting', 'guides/work-with-skills', 'guides/delegation-patterns', + 'guides/github-pr-review-agent', + 'guides/webhook-github-pr-review', 'guides/migrate-from-openclaw', 'guides/aws-bedrock', ], From c567adb58abbaa0fd1f775ec27d1754efacca83c Mon Sep 17 00:00:00 2001 From: Teknium <127238744+teknium1@users.noreply.github.com> Date: Sun, 19 Apr 2026 05:35:45 -0700 Subject: [PATCH 011/547] fix(tui): session.create build thread must clean up if session.close races (#12555) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a user hits /new or /resume before the previous session finishes initializing, session.close runs while the previous session.create's _build thread is still constructing the agent. session.close pops _sessions[sid] and closes whatever slash_worker it finds (None at that point — _build hasn't installed it yet), then returns. _build keeps running in the background, installs the slash_worker subprocess and registers an approval-notify callback on a session dict that's now unreachable via _sessions. The subprocess leaks until process exit; the notify callback lingers in the global registry. Fix: _build now tracks what it allocates (worker, notify_registered) and checks in its finally block whether _sessions[sid] still points to the session it's building for. If not, the build was orphaned by a racing close, so clean up the subprocess and unregister the notify ourselves. tui_gateway/server.py: - _build reads _sessions.get(sid) safely (returns early if already gone) - tracks allocated worker + notify registration - finally checks orphan status and cleans up Tests (tests/test_tui_gateway_server.py): 2 new cases. - test_session_create_close_race_does_not_orphan_worker: slow _make_agent, close mid-build, verify worker.close() and unregister_gateway_notify both fire from the build thread's cleanup path. - test_session_create_no_race_keeps_worker_alive: regression guard — happy path does NOT over-eagerly clean up a live worker. Validated: against the unpatched code, the race test fails with 'orphan worker was not cleaned up — closed_workers=[]'. Live E2E against the live Python environment confirmed the cleanup fires exactly when the race happens. --- tests/test_tui_gateway_server.py | 159 +++++++++++++++++++++++++++++++ tui_gateway/server.py | 39 +++++++- 2 files changed, 196 insertions(+), 2 deletions(-) diff --git a/tests/test_tui_gateway_server.py b/tests/test_tui_gateway_server.py index c0f5239035..533516b95d 100644 --- a/tests/test_tui_gateway_server.py +++ b/tests/test_tui_gateway_server.py @@ -949,3 +949,162 @@ def test_mirror_slash_side_effects_allowed_when_idle(monkeypatch): # Should NOT contain "session busy" — the switch went through. assert "session busy" not in warning assert applied["model"] + + +# --------------------------------------------------------------------------- +# session.create / session.close race: fast /new churn must not orphan the +# slash_worker subprocess or the global approval-notify registration. +# --------------------------------------------------------------------------- + + +def test_session_create_close_race_does_not_orphan_worker(monkeypatch): + """Regression guard: if session.close runs while session.create's + _build thread is still constructing the agent, the build thread + must detect the orphan and clean up the slash_worker + notify + registration it's about to install. Without the cleanup those + resources leak — the subprocess stays alive until atexit and the + notify callback lingers in the global registry.""" + import threading + + closed_workers: list[str] = [] + unregistered_keys: list[str] = [] + + class _FakeWorker: + def __init__(self, key, model): + self.key = key + self._closed = False + + def close(self): + self._closed = True + closed_workers.append(self.key) + + class _FakeAgent: + def __init__(self): + self.model = "x" + self.provider = "openrouter" + self.base_url = "" + self.api_key = "" + + # Make _build block until we release it — simulates slow agent init + release_build = threading.Event() + + def _slow_make_agent(sid, key): + release_build.wait(timeout=3.0) + return _FakeAgent() + + # Stub everything _build touches + monkeypatch.setattr(server, "_make_agent", _slow_make_agent) + monkeypatch.setattr(server, "_SlashWorker", _FakeWorker) + monkeypatch.setattr(server, "_get_db", lambda: types.SimpleNamespace(create_session=lambda *a, **kw: None)) + monkeypatch.setattr(server, "_session_info", lambda _a: {"model": "x"}) + monkeypatch.setattr(server, "_probe_credentials", lambda _a: None) + monkeypatch.setattr(server, "_wire_callbacks", lambda _sid: None) + monkeypatch.setattr(server, "_emit", lambda *a, **kw: None) + + # Shim register/unregister to observe leaks + import tools.approval as _approval + monkeypatch.setattr(_approval, "register_gateway_notify", + lambda key, cb: None) + monkeypatch.setattr(_approval, "unregister_gateway_notify", + lambda key: unregistered_keys.append(key)) + monkeypatch.setattr(_approval, "load_permanent_allowlist", lambda: None) + + # Start: session.create spawns _build thread, returns synchronously + resp = server.handle_request({ + "id": "1", "method": "session.create", "params": {"cols": 80}, + }) + assert resp.get("result"), f"got error: {resp.get('error')}" + sid = resp["result"]["session_id"] + + # Build thread is blocked in _slow_make_agent. Close the session + # NOW — this pops _sessions[sid] before _build can install the + # worker/notify. + close_resp = server.handle_request({ + "id": "2", "method": "session.close", "params": {"session_id": sid}, + }) + assert close_resp.get("result", {}).get("closed") is True + + # At this point session.close saw slash_worker=None (not yet + # installed) so it didn't close anything. Release the build thread + # and let it finish — it should detect the orphan and clean up the + # worker it just allocated + unregister the notify. + release_build.set() + + # Give the build thread a moment to run through its finally. + for _ in range(100): + if closed_workers: + break + import time + time.sleep(0.02) + + assert len(closed_workers) == 1, ( + f"orphan worker was not cleaned up — closed_workers={closed_workers}" + ) + # Notify may be unregistered by both session.close (unconditional) + # and the orphan-cleanup path; the key guarantee is that the build + # thread does at least one unregister call (any prior close + # already popped the callback; the duplicate is a no-op). + assert len(unregistered_keys) >= 1, ( + f"orphan notify registration was not unregistered — " + f"unregistered_keys={unregistered_keys}" + ) + + +def test_session_create_no_race_keeps_worker_alive(monkeypatch): + """Regression guard: when session.close does NOT race, the build + thread must install the worker + notify normally and leave them + alone (no over-eager cleanup).""" + closed_workers: list[str] = [] + unregistered_keys: list[str] = [] + + class _FakeWorker: + def __init__(self, key, model): + self.key = key + + def close(self): + closed_workers.append(self.key) + + class _FakeAgent: + def __init__(self): + self.model = "x" + self.provider = "openrouter" + self.base_url = "" + self.api_key = "" + + monkeypatch.setattr(server, "_make_agent", lambda sid, key: _FakeAgent()) + monkeypatch.setattr(server, "_SlashWorker", _FakeWorker) + monkeypatch.setattr(server, "_get_db", lambda: types.SimpleNamespace(create_session=lambda *a, **kw: None)) + monkeypatch.setattr(server, "_session_info", lambda _a: {"model": "x"}) + monkeypatch.setattr(server, "_probe_credentials", lambda _a: None) + monkeypatch.setattr(server, "_wire_callbacks", lambda _sid: None) + monkeypatch.setattr(server, "_emit", lambda *a, **kw: None) + + import tools.approval as _approval + monkeypatch.setattr(_approval, "register_gateway_notify", lambda key, cb: None) + monkeypatch.setattr(_approval, "unregister_gateway_notify", + lambda key: unregistered_keys.append(key)) + monkeypatch.setattr(_approval, "load_permanent_allowlist", lambda: None) + + resp = server.handle_request({ + "id": "1", "method": "session.create", "params": {"cols": 80}, + }) + sid = resp["result"]["session_id"] + + # Wait for the build to finish (ready event inside session dict). + session = server._sessions[sid] + session["agent_ready"].wait(timeout=2.0) + + # Build finished without a close race — nothing should have been + # cleaned up by the orphan check. + assert closed_workers == [], ( + f"build thread closed its own worker despite no race: {closed_workers}" + ) + assert unregistered_keys == [], ( + f"build thread unregistered its own notify despite no race: {unregistered_keys}" + ) + + # Session should have the live worker installed. + assert session.get("slash_worker") is not None + + # Cleanup + server._sessions.pop(sid, None) diff --git a/tui_gateway/server.py b/tui_gateway/server.py index 00f8346191..70dff3b17b 100644 --- a/tui_gateway/server.py +++ b/tui_gateway/server.py @@ -1088,7 +1088,23 @@ def _(rid, params: dict) -> dict: } def _build() -> None: - session = _sessions[sid] + session = _sessions.get(sid) + if session is None: + # session.close ran before the build thread got scheduled. + ready.set() + return + + # Track what we allocate so we can clean up if session.close + # races us to the finish line. session.close pops _sessions[sid] + # unconditionally and tries to close the slash_worker it finds; + # if _build is still mid-construction when close runs, close + # finds slash_worker=None / notify unregistered and returns + # cleanly — leaving us, the build thread, to later install the + # worker + notify on an orphaned session dict. The finally + # block below detects the orphan and cleans up instead of + # leaking a subprocess and a global notify registration. + worker = None + notify_registered = False try: tokens = _set_session_context(key) try: @@ -1100,13 +1116,15 @@ def _(rid, params: dict) -> dict: session["agent"] = agent try: - session["slash_worker"] = _SlashWorker(key, getattr(agent, "model", _resolve_model())) + worker = _SlashWorker(key, getattr(agent, "model", _resolve_model())) + session["slash_worker"] = worker except Exception: pass try: from tools.approval import register_gateway_notify, load_permanent_allowlist register_gateway_notify(key, lambda data: _emit("approval.request", sid, data)) + notify_registered = True load_permanent_allowlist() except Exception: pass @@ -1122,6 +1140,23 @@ def _(rid, params: dict) -> dict: session["agent_error"] = str(e) _emit("error", sid, {"message": f"agent init failed: {e}"}) finally: + # Orphan check: if session.close raced us and popped + # _sessions[sid] while we were building, the dict we just + # populated is unreachable. Clean up the subprocess and + # the global notify registration ourselves — session.close + # couldn't see them at the time it ran. + if _sessions.get(sid) is not session: + if worker is not None: + try: + worker.close() + except Exception: + pass + if notify_registered: + try: + from tools.approval import unregister_gateway_notify + unregister_gateway_notify(key) + except Exception: + pass ready.set() threading.Thread(target=_build, daemon=True).start() From a521005fe5e5885b23c878a5c5fdc2e1b361a4da Mon Sep 17 00:00:00 2001 From: Teknium <127238744+teknium1@users.noreply.github.com> Date: Sun, 19 Apr 2026 05:45:59 -0700 Subject: [PATCH 012/547] fix(discord): close two low-severity adapter races (#12558) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two small races in gateway/platforms/discord.py, bundled together since they're adjacent in the adapter and both narrow in impact. 1. on_message vs _resolve_allowed_usernames (startup window) DISCORD_ALLOWED_USERS accepts both numeric IDs and raw usernames. At connect-time, _resolve_allowed_usernames walks the bot's guilds (fetch_members can take multiple seconds) to swap usernames for IDs. on_message can fire during that window; _is_allowed_user compares the numeric author.id against a set that may still contain raw usernames — legitimate users get silently rejected for a few seconds after every reconnect. Fix: on_message awaits _ready_event (with a 30s timeout) when it isn't already set. on_ready sets the event after the resolve completes. In steady state this is a no-op (event already set); only the startup / reconnect window ever blocks. 2. join_voice_channel check-and-connect The existing-connection check at _voice_clients.get() and the channel.connect() call straddled an await boundary with no lock. Two concurrent /voice channel invocations could both see None and both call connect(); discord.py raises ClientException ("Already connected") on the loser. Same race class for leave running concurrently with _voice_timeout_handler. Fix: per-guild asyncio.Lock (_voice_locks dict with lazy alloc via _voice_lock_for). join_voice_channel and leave_voice_channel both run their body under the lock. Sequential within a guild, still fully concurrent across guilds. Both: LOW severity. The first only affects username-based allowlists on fast-follow-up messages at startup; the second is a narrow exception on simultaneous voice commands. Bundled so the adapter gets a single coherent polish pass. Tests (tests/gateway/test_discord_race_polish.py): 2 regression cases. - test_concurrent_joins_do_not_double_connect: two concurrent join_voice_channel calls on the same guild result in exactly one channel.connect() invocation. - test_on_message_blocks_until_ready_event_set: asserts the expected wait pattern is present in on_message (source inspection, since full discord.py client setup isn't practical here). Regression-guard validated: against unpatched gateway/platforms/discord.py both tests fail. With the fix they pass. Full Discord suite (118 tests) green. --- gateway/platforms/discord.py | 116 +++++++++++++------- tests/gateway/test_discord_race_polish.py | 122 ++++++++++++++++++++++ 2 files changed, 201 insertions(+), 37 deletions(-) create mode 100644 tests/gateway/test_discord_race_polish.py diff --git a/gateway/platforms/discord.py b/gateway/platforms/discord.py index 1ec831b66d..fce7ece414 100644 --- a/gateway/platforms/discord.py +++ b/gateway/platforms/discord.py @@ -498,6 +498,7 @@ class DiscordAdapter(BasePlatformAdapter): self._allowed_role_ids: set = set() # For DISCORD_ALLOWED_ROLES filtering # Voice channel state (per-guild) self._voice_clients: Dict[int, Any] = {} # guild_id -> VoiceClient + self._voice_locks: Dict[int, asyncio.Lock] = {} # guild_id -> serialize join/leave # Text batching: merge rapid successive messages (Telegram-style) self._text_batch_delay_seconds = float(os.getenv("HERMES_DISCORD_TEXT_BATCH_DELAY_SECONDS", "0.6")) self._text_batch_split_delay_seconds = float(os.getenv("HERMES_DISCORD_TEXT_BATCH_SPLIT_DELAY_SECONDS", "2.0")) @@ -636,6 +637,30 @@ class DiscordAdapter(BasePlatformAdapter): @self._client.event async def on_message(message: DiscordMessage): + # Wait for on_ready to finish resolving username-based + # allowlist entries. Without this block, messages + # arriving between Discord's READY event and the end + # of _resolve_allowed_usernames compare author IDs + # (numeric) against a set that may still contain raw + # usernames (strings) from DISCORD_ALLOWED_USERS — + # legitimate users get silently rejected for the first + # few seconds after every reconnect. The wait is a + # near-instant no-op in steady state (_ready_event is + # already set); only the startup / reconnect window + # ever blocks. + if not adapter_self._ready_event.is_set(): + try: + await asyncio.wait_for( + adapter_self._ready_event.wait(), + timeout=30.0, + ) + except asyncio.TimeoutError: + logger.warning( + "[%s] on_message timed out waiting for _ready_event; " + "allowlist check may use pre-resolved entries", + adapter_self.name, + ) + # Dedup: Discord RESUME replays events after reconnects (#4777) if adapter_self._dedup.is_duplicate(str(message.id)): return @@ -1231,57 +1256,74 @@ class DiscordAdapter(BasePlatformAdapter): # Voice channel methods (join / leave / play) # ------------------------------------------------------------------ + def _voice_lock_for(self, guild_id: int) -> "asyncio.Lock": + """Return the per-guild lock, creating it on first use. + + Voice join/leave/move must be serialized per guild — without + this, two concurrent /voice channel invocations both see + _voice_clients.get(guild_id) return None, both call + channel.connect(), and discord.py raises ClientException + ('Already connected') on the loser. + """ + lock = self._voice_locks.get(guild_id) + if lock is None: + lock = asyncio.Lock() + self._voice_locks[guild_id] = lock + return lock + async def join_voice_channel(self, channel) -> bool: """Join a Discord voice channel. Returns True on success.""" if not self._client or not DISCORD_AVAILABLE: return False guild_id = channel.guild.id - # Already connected in this guild? - existing = self._voice_clients.get(guild_id) - if existing and existing.is_connected(): - if existing.channel.id == channel.id: + async with self._voice_lock_for(guild_id): + # Already connected in this guild? + existing = self._voice_clients.get(guild_id) + if existing and existing.is_connected(): + if existing.channel.id == channel.id: + self._reset_voice_timeout(guild_id) + return True + await existing.move_to(channel) self._reset_voice_timeout(guild_id) return True - await existing.move_to(channel) + + vc = await channel.connect() + self._voice_clients[guild_id] = vc self._reset_voice_timeout(guild_id) + + # Start voice receiver (Phase 2: listen to users) + try: + receiver = VoiceReceiver(vc, allowed_user_ids=self._allowed_user_ids) + receiver.start() + self._voice_receivers[guild_id] = receiver + self._voice_listen_tasks[guild_id] = asyncio.ensure_future( + self._voice_listen_loop(guild_id) + ) + except Exception as e: + logger.warning("Voice receiver failed to start: %s", e) + return True - vc = await channel.connect() - self._voice_clients[guild_id] = vc - self._reset_voice_timeout(guild_id) - - # Start voice receiver (Phase 2: listen to users) - try: - receiver = VoiceReceiver(vc, allowed_user_ids=self._allowed_user_ids) - receiver.start() - self._voice_receivers[guild_id] = receiver - self._voice_listen_tasks[guild_id] = asyncio.ensure_future( - self._voice_listen_loop(guild_id) - ) - except Exception as e: - logger.warning("Voice receiver failed to start: %s", e) - - return True - async def leave_voice_channel(self, guild_id: int) -> None: """Disconnect from the voice channel in a guild.""" - # Stop voice receiver first - receiver = self._voice_receivers.pop(guild_id, None) - if receiver: - receiver.stop() - listen_task = self._voice_listen_tasks.pop(guild_id, None) - if listen_task: - listen_task.cancel() + async with self._voice_lock_for(guild_id): + # Stop voice receiver first + receiver = self._voice_receivers.pop(guild_id, None) + if receiver: + receiver.stop() + listen_task = self._voice_listen_tasks.pop(guild_id, None) + if listen_task: + listen_task.cancel() - vc = self._voice_clients.pop(guild_id, None) - if vc and vc.is_connected(): - await vc.disconnect() - task = self._voice_timeout_tasks.pop(guild_id, None) - if task: - task.cancel() - self._voice_text_channels.pop(guild_id, None) - self._voice_sources.pop(guild_id, None) + vc = self._voice_clients.pop(guild_id, None) + if vc and vc.is_connected(): + await vc.disconnect() + task = self._voice_timeout_tasks.pop(guild_id, None) + if task: + task.cancel() + self._voice_text_channels.pop(guild_id, None) + self._voice_sources.pop(guild_id, None) # Maximum seconds to wait for voice playback before giving up PLAYBACK_TIMEOUT = 120 diff --git a/tests/gateway/test_discord_race_polish.py b/tests/gateway/test_discord_race_polish.py new file mode 100644 index 0000000000..a0f900aea6 --- /dev/null +++ b/tests/gateway/test_discord_race_polish.py @@ -0,0 +1,122 @@ +"""Regression tests for the Discord adapter race-polish fix. + +Two races are addressed: +1. on_message allowlist check racing on_ready's _resolve_allowed_usernames + resolution window. Username-based entries in DISCORD_ALLOWED_USERS + appear in the set as raw strings for several seconds after + connect/reconnect; author.id is always numeric, so legitimate users + are silently rejected until resolution finishes. +2. join_voice_channel check-and-connect: concurrent /voice channel + invocations both see _voice_clients.get(guild_id) is None, both call + channel.connect(), second raises ClientException ('Already connected'). +""" + +import asyncio +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from gateway.config import Platform, PlatformConfig + + +def _make_adapter(): + """Bare DiscordAdapter for testing — object.__new__ pattern per AGENTS.md.""" + from gateway.platforms.discord import DiscordAdapter + + adapter = object.__new__(DiscordAdapter) + adapter._platform = Platform.DISCORD + adapter.config = PlatformConfig(enabled=True, token="t") + adapter._ready_event = asyncio.Event() + adapter._allowed_user_ids = set() + adapter._allowed_role_ids = set() + adapter._voice_clients = {} + adapter._voice_locks = {} + adapter._voice_receivers = {} + adapter._voice_listen_tasks = {} + adapter._voice_timeout_tasks = {} + adapter._voice_text_channels = {} + adapter._voice_sources = {} + adapter._client = MagicMock() + return adapter + + +class TestJoinVoiceSerialization: + @pytest.mark.asyncio + async def test_concurrent_joins_do_not_double_connect(self): + """Two concurrent join_voice_channel calls on the same guild + must serialize through the per-guild lock — only ONE + channel.connect() actually fires; the second sees the + _voice_clients entry the first just installed.""" + adapter = _make_adapter() + + connect_count = [0] + connect_event = asyncio.Event() + + class FakeVC: + def __init__(self, channel): + self.channel = channel + + def is_connected(self): + return True + + async def move_to(self, _channel): + return None + + async def disconnect(self): + return None + + async def slow_connect(self): + connect_count[0] += 1 + # Widen the race window + await connect_event.wait() + return FakeVC(self) + + channel = MagicMock() + channel.id = 111 + channel.guild.id = 42 + channel.connect = lambda: slow_connect(channel) + + # Swap out VoiceReceiver so it doesn't try to set up real audio + from gateway.platforms import discord as discord_mod + with patch.object(discord_mod, "VoiceReceiver", MagicMock(return_value=MagicMock(start=lambda: None))): + with patch.object(discord_mod.asyncio, "ensure_future", lambda _c: asyncio.create_task(asyncio.sleep(0))): + # Fire two joins concurrently + t1 = asyncio.create_task(adapter.join_voice_channel(channel)) + t2 = asyncio.create_task(adapter.join_voice_channel(channel)) + # Let them run until they're blocked on our event + await asyncio.sleep(0.05) + # Release connect so both can finish + connect_event.set() + r1, r2 = await asyncio.gather(t1, t2) + + assert connect_count[0] == 1, ( + f"Expected exactly 1 channel.connect() call, got {connect_count[0]} — " + "per-guild voice lock is not serializing join_voice_channel" + ) + assert r1 is True and r2 is True + assert 42 in adapter._voice_clients + + +class TestOnMessageWaitsForReadyEvent: + @pytest.mark.asyncio + async def test_on_message_blocks_until_ready_event_set(self): + """A message arriving before on_ready finishes + _resolve_allowed_usernames must wait, not proceed with a + half-resolved allowlist.""" + # This is an integration-style check — we pull out the + # on_message handler by asserting the source contains the + # expected wait pattern. A full end-to-end test would require + # setting up the discord.py client machinery, which is not + # practical here. + import inspect + from gateway.platforms import discord as discord_mod + + src = inspect.getsource(discord_mod.DiscordAdapter.connect) + assert "_ready_event.is_set()" in src, ( + "on_message must gate on _ready_event so username-based " + "allowlist entries are resolved before the allowlist check" + ) + assert "await asyncio.wait_for(" in src and "_ready_event.wait()" in src, ( + "Expected asyncio.wait_for(_ready_event.wait(), timeout=...) " + "pattern in on_message" + ) From a6fe5d08727c9bb2486709ba3357137fbb49a321 Mon Sep 17 00:00:00 2001 From: Brooklyn Nicholson Date: Sun, 19 Apr 2026 07:47:15 -0500 Subject: [PATCH 013/547] fix(tui-gateway): dispatch slow RPC handlers on a thread pool (#12546) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The stdin-read loop in entry.py calls handle_request() inline, so the five handlers that can block for seconds to minutes (slash.exec, cli.exec, shell.exec, session.resume, session.branch) freeze the dispatcher. While one is running, any inbound RPC — notably approval.respond and session.interrupt — sits unread in the pipe buffer and lands only after the slow handler returns. Route only those five onto a small ThreadPoolExecutor; every other handler stays on the main thread so the fast-path ordering is unchanged and the audit surface stays small. write_json is already _stdout_lock-guarded, so concurrent response writes are safe. Pool size defaults to 4 (overridable via HERMES_TUI_RPC_POOL_WORKERS). - add _LONG_HANDLERS set + ThreadPoolExecutor + atexit shutdown - new dispatch(req) function: pool for long handlers, inline for rest - _run_and_emit wraps pool work in a try/except so a misbehaving handler still surfaces as a JSON-RPC error instead of silently dying in a worker - entry.py swaps handle_request → dispatch - 5 new tests: sync path still inline, long handlers emit via stdout, fast handler not blocked behind slow one, handler exceptions map to error responses, non-long methods always take the sync path Manual repro confirms the fix: shell.exec(sleep 3) + terminal.resize sent back-to-back now returns the resize response at t=0s while the sleep finishes independently at t=3s. Before, both landed together at t=3s. Fixes #12546. --- tests/tui_gateway/test_protocol.py | 79 ++++++++++++++++++++++++++++++ tui_gateway/entry.py | 4 +- tui_gateway/server.py | 51 +++++++++++++++++++ 3 files changed, 132 insertions(+), 2 deletions(-) diff --git a/tests/tui_gateway/test_protocol.py b/tests/tui_gateway/test_protocol.py index 926dfadf17..da154cc168 100644 --- a/tests/tui_gateway/test_protocol.py +++ b/tests/tui_gateway/test_protocol.py @@ -4,6 +4,7 @@ import io import json import sys import threading +import time from unittest.mock import MagicMock, patch import pytest @@ -432,3 +433,81 @@ def test_command_dispatch_returns_skill_payload(server): assert result["type"] == "skill" assert result["message"] == fake_msg assert result["name"] == "hermes-agent-dev" + + +# ── dispatch(): pool routing for long handlers (#12546) ────────────── + + +def test_dispatch_runs_short_handlers_inline(server): + """Non-long handlers return their response synchronously from dispatch().""" + server._methods["fast.ping"] = lambda rid, params: server._ok(rid, {"pong": True}) + + resp = server.dispatch({"id": "r1", "method": "fast.ping", "params": {}}) + + assert resp == {"jsonrpc": "2.0", "id": "r1", "result": {"pong": True}} + + +def test_dispatch_offloads_long_handlers_and_emits_via_stdout(capture): + """Long handlers run on the pool and write their response via write_json.""" + server, buf = capture + server._methods["slash.exec"] = lambda rid, params: server._ok(rid, {"output": "hi"}) + + resp = server.dispatch({"id": "r2", "method": "slash.exec", "params": {}}) + assert resp is None + + for _ in range(50): + if buf.getvalue(): + break + time.sleep(0.01) + + written = json.loads(buf.getvalue()) + assert written == {"jsonrpc": "2.0", "id": "r2", "result": {"output": "hi"}} + + +def test_dispatch_long_handler_does_not_block_fast_handler(server): + """A slow long handler must not prevent a concurrent fast handler from completing.""" + released = threading.Event() + server._methods["slash.exec"] = lambda rid, params: (released.wait(timeout=5), server._ok(rid, {"done": True}))[1] + server._methods["fast.ping"] = lambda rid, params: server._ok(rid, {"pong": True}) + + t0 = time.monotonic() + assert server.dispatch({"id": "slow", "method": "slash.exec", "params": {}}) is None + + fast_resp = server.dispatch({"id": "fast", "method": "fast.ping", "params": {}}) + fast_elapsed = time.monotonic() - t0 + + assert fast_resp["result"] == {"pong": True} + assert fast_elapsed < 0.5, f"fast handler blocked for {fast_elapsed:.2f}s behind slow handler" + + released.set() + + +def test_dispatch_long_handler_exception_produces_error_response(capture): + """An exception inside a pool-dispatched handler still yields a JSON-RPC error.""" + server, buf = capture + + def boom(rid, params): + raise RuntimeError("kaboom") + + server._methods["slash.exec"] = boom + + server.dispatch({"id": "r3", "method": "slash.exec", "params": {}}) + + for _ in range(50): + if buf.getvalue(): + break + time.sleep(0.01) + + written = json.loads(buf.getvalue()) + assert written["id"] == "r3" + assert written["error"]["code"] == -32000 + assert "kaboom" in written["error"]["message"] + + +def test_dispatch_unknown_long_method_still_goes_inline(server): + """Method name not in _LONG_HANDLERS takes the sync path even if handler is slow.""" + server._methods["some.method"] = lambda rid, params: server._ok(rid, {"ok": True}) + + resp = server.dispatch({"id": "r4", "method": "some.method", "params": {}}) + + assert resp["result"] == {"ok": True} diff --git a/tui_gateway/entry.py b/tui_gateway/entry.py index a9667528de..d2b82b9dab 100644 --- a/tui_gateway/entry.py +++ b/tui_gateway/entry.py @@ -2,7 +2,7 @@ import json import signal import sys -from tui_gateway.server import handle_request, resolve_skin, write_json +from tui_gateway.server import dispatch, resolve_skin, write_json signal.signal(signal.SIGPIPE, signal.SIG_DFL) signal.signal(signal.SIGINT, signal.SIG_IGN) @@ -28,7 +28,7 @@ def main(): sys.exit(0) continue - resp = handle_request(req) + resp = dispatch(req) if resp is not None: if not write_json(resp): sys.exit(0) diff --git a/tui_gateway/server.py b/tui_gateway/server.py index 70dff3b17b..6d0dbea659 100644 --- a/tui_gateway/server.py +++ b/tui_gateway/server.py @@ -1,4 +1,5 @@ import atexit +import concurrent.futures import copy import json import os @@ -36,6 +37,29 @@ _cfg_cache: dict | None = None _cfg_mtime: float | None = None _SLASH_WORKER_TIMEOUT_S = max(5.0, float(os.environ.get("HERMES_TUI_SLASH_TIMEOUT_S", "45") or 45)) +# ── Async RPC dispatch (#12546) ────────────────────────────────────── +# A handful of handlers block the dispatcher loop in entry.py for seconds +# to minutes (slash.exec, cli.exec, shell.exec, session.resume, +# session.branch). While they're running, inbound RPCs — notably +# approval.respond and session.interrupt — sit unread in the stdin pipe. +# We route only those slow handlers onto a small thread pool; everything +# else stays on the main thread so ordering stays sane for the fast path. +# write_json is already _stdout_lock-guarded, so concurrent response +# writes are safe. +_LONG_HANDLERS = frozenset({ + "cli.exec", + "session.branch", + "session.resume", + "shell.exec", + "slash.exec", +}) +_RPC_POOL_WORKERS = max(2, int(os.environ.get("HERMES_TUI_RPC_POOL_WORKERS", "4") or 4)) +_pool = concurrent.futures.ThreadPoolExecutor( + max_workers=_RPC_POOL_WORKERS, + thread_name_prefix="tui-rpc", +) +atexit.register(lambda: _pool.shutdown(wait=False, cancel_futures=True)) + # Reserve real stdout for JSON-RPC only; redirect Python's stdout to stderr # so stray print() from libraries/tools becomes harmless gateway.stderr instead # of corrupting the JSON protocol. @@ -200,6 +224,33 @@ def handle_request(req: dict) -> dict | None: return fn(req.get("id"), req.get("params", {})) +def _run_and_emit(req: dict) -> None: + """Run a handler on the RPC pool and write its response directly. + + Catches any unexpected exception so a misbehaving handler can't kill + the worker thread silently — the caller still sees a JSON-RPC error. + """ + try: + resp = handle_request(req) + except Exception as exc: + resp = _err(req.get("id"), -32000, f"handler error: {exc}") + if resp is not None: + write_json(resp) + + +def dispatch(req: dict) -> dict | None: + """Route an inbound RPC — long handlers to the pool, everything else inline. + + Returns the response for sync-dispatched requests so the caller + (entry.py) can write it. Returns None when the request has been + scheduled on the pool; the worker writes the response itself. + """ + if req.get("method", "") in _LONG_HANDLERS: + _pool.submit(_run_and_emit, req) + return None + return handle_request(req) + + def _wait_agent(session: dict, rid: str, timeout: float = 30.0) -> dict | None: ready = session.get("agent_ready") if ready is not None and not ready.wait(timeout=timeout): From ab6eaaff2610ec236edbbe4d7729c103b816e573 Mon Sep 17 00:00:00 2001 From: Brooklyn Nicholson Date: Sun, 19 Apr 2026 07:53:01 -0500 Subject: [PATCH 014/547] chore(tui-gateway): inline one-off RPC_POOL_WORKERS, compact _LONG_HANDLERS --- tui_gateway/server.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/tui_gateway/server.py b/tui_gateway/server.py index 6d0dbea659..41d93db442 100644 --- a/tui_gateway/server.py +++ b/tui_gateway/server.py @@ -46,16 +46,10 @@ _SLASH_WORKER_TIMEOUT_S = max(5.0, float(os.environ.get("HERMES_TUI_SLASH_TIMEOU # else stays on the main thread so ordering stays sane for the fast path. # write_json is already _stdout_lock-guarded, so concurrent response # writes are safe. -_LONG_HANDLERS = frozenset({ - "cli.exec", - "session.branch", - "session.resume", - "shell.exec", - "slash.exec", -}) -_RPC_POOL_WORKERS = max(2, int(os.environ.get("HERMES_TUI_RPC_POOL_WORKERS", "4") or 4)) +_LONG_HANDLERS = frozenset({"cli.exec", "session.branch", "session.resume", "shell.exec", "slash.exec"}) + _pool = concurrent.futures.ThreadPoolExecutor( - max_workers=_RPC_POOL_WORKERS, + max_workers=max(2, int(os.environ.get("HERMES_TUI_RPC_POOL_WORKERS", "4") or 4)), thread_name_prefix="tui-rpc", ) atexit.register(lambda: _pool.shutdown(wait=False, cancel_futures=True)) From 596280a40bc2807641a42625d172d97af30a841c Mon Sep 17 00:00:00 2001 From: Brooklyn Nicholson Date: Sun, 19 Apr 2026 07:54:16 -0500 Subject: [PATCH 015/547] =?UTF-8?q?chore(tui):=20/clean=20pass=20=E2=80=94?= =?UTF-8?q?=20inline=20one-off=20locals,=20tighten=20ConfirmPrompt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - providers.ts: drop the `dup` intermediate, fold the ternary inline - paths.ts (fmtCwdBranch): inline `b` into the `tag` template - prompts.tsx (ConfirmPrompt): hoist a single `lower = ch.toLowerCase()`, collapse the three early-return branches into two, drop the redundant bounds checks on arrow-key handlers (setSel is idempotent at 0/1), inline the `confirmLabel`/`cancelLabel` defaults at the use site - modelPicker.tsx / config/env.ts / providers.test.ts: auto-formatter reflows picked up by `npm run fix` - useInputHandlers.ts: drop the stray blank line that was tripping perfectionist/sort-imports (pre-existing lint error) --- ui-tui/src/__tests__/providers.test.ts | 9 +++++--- ui-tui/src/app/useInputHandlers.ts | 1 - ui-tui/src/components/modelPicker.tsx | 10 +++++++-- ui-tui/src/components/prompts.tsx | 30 ++++++++------------------ ui-tui/src/config/env.ts | 4 +--- ui-tui/src/domain/paths.ts | 3 +-- ui-tui/src/domain/providers.ts | 12 +++-------- 7 files changed, 28 insertions(+), 41 deletions(-) diff --git a/ui-tui/src/__tests__/providers.test.ts b/ui-tui/src/__tests__/providers.test.ts index a46102e893..2dfd76d022 100644 --- a/ui-tui/src/__tests__/providers.test.ts +++ b/ui-tui/src/__tests__/providers.test.ts @@ -4,9 +4,12 @@ import { providerDisplayNames } from '../domain/providers.js' describe('providerDisplayNames', () => { it('returns bare names when all are unique', () => { - expect(providerDisplayNames([{ name: 'Anthropic', slug: 'anthropic' }, { name: 'OpenAI', slug: 'openai' }])).toEqual( - ['Anthropic', 'OpenAI'] - ) + expect( + providerDisplayNames([ + { name: 'Anthropic', slug: 'anthropic' }, + { name: 'OpenAI', slug: 'openai' } + ]) + ).toEqual(['Anthropic', 'OpenAI']) }) it('appends slug to every collision so the disambiguation is symmetric', () => { diff --git a/ui-tui/src/app/useInputHandlers.ts b/ui-tui/src/app/useInputHandlers.ts index b71a1dc392..258cf7cee3 100644 --- a/ui-tui/src/app/useInputHandlers.ts +++ b/ui-tui/src/app/useInputHandlers.ts @@ -7,7 +7,6 @@ import type { SudoRespondResponse, VoiceRecordResponse } from '../gatewayTypes.js' - import { writeOsc52Clipboard } from '../lib/osc52.js' import { getInputSelection } from './inputSelectionStore.js' diff --git a/ui-tui/src/components/modelPicker.tsx b/ui-tui/src/components/modelPicker.tsx index 406047bc11..5ee19e407c 100644 --- a/ui-tui/src/components/modelPicker.tsx +++ b/ui-tui/src/components/modelPicker.tsx @@ -181,7 +181,10 @@ export function ModelPicker({ gw, onCancel, onSelect, sessionId, t }: ModelPicke const idx = off + i return ( - + {providerIdx === idx ? '▸ ' : ' '} {i + 1}. {row} @@ -212,7 +215,10 @@ export function ModelPicker({ gw, onCancel, onSelect, sessionId, t }: ModelPicke const idx = off + i return ( - + {modelIdx === idx ? '▸ ' : ' '} {i + 1}. {row} diff --git a/ui-tui/src/components/prompts.tsx b/ui-tui/src/components/prompts.tsx index cd9c3a2d1d..f9d00dbfe3 100644 --- a/ui-tui/src/components/prompts.tsx +++ b/ui-tui/src/components/prompts.tsx @@ -155,31 +155,21 @@ export function ConfirmPrompt({ onCancel, onConfirm, req, t }: ConfirmPromptProp const [sel, setSel] = useState(0) useInput((ch, key) => { - if (key.escape || (key.ctrl && ch.toLowerCase() === 'c')) { - onCancel() - - return - } - const lower = ch.toLowerCase() + if (key.escape || (key.ctrl && lower === 'c') || lower === 'n') { + return onCancel() + } + if (lower === 'y') { - onConfirm() - - return + return onConfirm() } - if (lower === 'n') { - onCancel() - - return - } - - if (key.upArrow && sel > 0) { + if (key.upArrow) { setSel(0) } - if (key.downArrow && sel < 1) { + if (key.downArrow) { setSel(1) } @@ -189,12 +179,10 @@ export function ConfirmPrompt({ onCancel, onConfirm, req, t }: ConfirmPromptProp }) const accent = req.danger ? t.color.error : t.color.warn - const confirmLabel = req.confirmLabel ?? 'Yes' - const cancelLabel = req.cancelLabel ?? 'No' const rows = [ - { color: t.color.cornsilk, label: cancelLabel }, - { color: req.danger ? t.color.error : t.color.cornsilk, label: confirmLabel } + { color: t.color.cornsilk, label: req.cancelLabel ?? 'No' }, + { color: req.danger ? t.color.error : t.color.cornsilk, label: req.confirmLabel ?? 'Yes' } ] return ( diff --git a/ui-tui/src/config/env.ts b/ui-tui/src/config/env.ts index 999607dacf..60f1e80c53 100644 --- a/ui-tui/src/config/env.ts +++ b/ui-tui/src/config/env.ts @@ -1,5 +1,3 @@ export const STARTUP_RESUME_ID = (process.env.HERMES_TUI_RESUME ?? '').trim() export const MOUSE_TRACKING = !/^(?:1|true|yes|on)$/i.test((process.env.HERMES_TUI_DISABLE_MOUSE ?? '').trim()) -export const NO_CONFIRM_DESTRUCTIVE = /^(?:1|true|yes|on)$/i.test( - (process.env.HERMES_TUI_NO_CONFIRM ?? '').trim() -) +export const NO_CONFIRM_DESTRUCTIVE = /^(?:1|true|yes|on)$/i.test((process.env.HERMES_TUI_NO_CONFIRM ?? '').trim()) diff --git a/ui-tui/src/domain/paths.ts b/ui-tui/src/domain/paths.ts index 6b95dcbac1..43c023b6ba 100644 --- a/ui-tui/src/domain/paths.ts +++ b/ui-tui/src/domain/paths.ts @@ -10,8 +10,7 @@ export const fmtCwdBranch = (cwd: string, branch: null | string, max = 40) => { return shortCwd(cwd, max) } - const b = branch.length > 16 ? `…${branch.slice(-15)}` : branch - const tag = ` (${b})` + const tag = ` (${branch.length > 16 ? `…${branch.slice(-15)}` : branch})` return `${shortCwd(cwd, Math.max(8, max - tag.length))}${tag}` } diff --git a/ui-tui/src/domain/providers.ts b/ui-tui/src/domain/providers.ts index 02cc99b922..83ac016ff1 100644 --- a/ui-tui/src/domain/providers.ts +++ b/ui-tui/src/domain/providers.ts @@ -5,13 +5,7 @@ export const providerDisplayNames = (providers: readonly { name: string; slug: s counts.set(p.name, (counts.get(p.name) ?? 0) + 1) } - return providers.map(p => { - const dup = (counts.get(p.name) ?? 0) > 1 - - if (!dup || !p.slug || p.slug === p.name) { - return p.name - } - - return `${p.name} (${p.slug})` - }) + return providers.map(p => + (counts.get(p.name) ?? 0) > 1 && p.slug && p.slug !== p.name ? `${p.name} (${p.slug})` : p.name + ) } From 393175e60ce119f654d15dad489a8e282a532d24 Mon Sep 17 00:00:00 2001 From: Brooklyn Nicholson Date: Sun, 19 Apr 2026 07:58:33 -0500 Subject: [PATCH 016/547] =?UTF-8?q?chore(tui-gateway):=20inline=20=5Frun?= =?UTF-8?q?=5Fand=5Femit=20=E2=80=94=20one-off=20wrapper,=20belongs=20insi?= =?UTF-8?q?de=20dispatch?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tui_gateway/server.py | 40 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/tui_gateway/server.py b/tui_gateway/server.py index 41d93db442..3a48e381e8 100644 --- a/tui_gateway/server.py +++ b/tui_gateway/server.py @@ -218,31 +218,27 @@ def handle_request(req: dict) -> dict | None: return fn(req.get("id"), req.get("params", {})) -def _run_and_emit(req: dict) -> None: - """Run a handler on the RPC pool and write its response directly. - - Catches any unexpected exception so a misbehaving handler can't kill - the worker thread silently — the caller still sees a JSON-RPC error. - """ - try: - resp = handle_request(req) - except Exception as exc: - resp = _err(req.get("id"), -32000, f"handler error: {exc}") - if resp is not None: - write_json(resp) - - def dispatch(req: dict) -> dict | None: - """Route an inbound RPC — long handlers to the pool, everything else inline. + """Route inbound RPCs — long handlers to the pool, everything else inline. - Returns the response for sync-dispatched requests so the caller - (entry.py) can write it. Returns None when the request has been - scheduled on the pool; the worker writes the response itself. + Returns a response dict when handled inline. Returns None when the + handler was scheduled on the pool; the worker writes its own + response via write_json when done. """ - if req.get("method", "") in _LONG_HANDLERS: - _pool.submit(_run_and_emit, req) - return None - return handle_request(req) + if req.get("method") not in _LONG_HANDLERS: + return handle_request(req) + + def run(): + try: + resp = handle_request(req) + except Exception as exc: + resp = _err(req.get("id"), -32000, f"handler error: {exc}") + if resp is not None: + write_json(resp) + + _pool.submit(run) + + return None def _wait_agent(session: dict, rid: str, timeout: float = 30.0) -> dict | None: From d32e8d2ace98a24ce22d014ddf8da44812aee37a Mon Sep 17 00:00:00 2001 From: Brooklyn Nicholson Date: Sun, 19 Apr 2026 08:56:29 -0500 Subject: [PATCH 017/547] =?UTF-8?q?fix(tui):=20drain=20message=20queue=20o?= =?UTF-8?q?n=20every=20busy=20=E2=86=92=20false=20transition?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously the queue only drained inside the message.complete event handler, so anything enqueued while a shell.exec (!sleep, !cmd) or a failed agent turn was running would stay stuck forever — neither of those paths emits message.complete. After Ctrl+C an interrupted session would also orphan the queue because idle() flips busy=false locally without going through message.complete. Single source of truth: a useEffect that watches ui.busy. When the session is settled (sid present, busy false, not editing a queue item), pull one message and send it. Covers agent turn end, interrupt, shell.exec completion, error recovery, and the original startup hydration (first-sid case) all at once. Dropped the now-redundant dequeue/sendQueued from createGatewayEventHandler.message.complete and the accompanying GatewayEventHandlerContext.composer field — the effect handles it. --- ui-tui/src/app/createGatewayEventHandler.ts | 11 ----------- ui-tui/src/app/interfaces.ts | 5 ----- ui-tui/src/app/useMainApp.ts | 15 ++++++--------- 3 files changed, 6 insertions(+), 25 deletions(-) diff --git a/ui-tui/src/app/createGatewayEventHandler.ts b/ui-tui/src/app/createGatewayEventHandler.ts index 699a3794de..8f45bb3d7e 100644 --- a/ui-tui/src/app/createGatewayEventHandler.ts +++ b/ui-tui/src/app/createGatewayEventHandler.ts @@ -46,7 +46,6 @@ const pushNote = pushUnique(6) const pushTool = pushUnique(8) export function createGatewayEventHandler(ctx: GatewayEventHandlerContext): (ev: GatewayEvent) => void { - const { dequeue, queueEditRef, sendQueued } = ctx.composer const { rpc } = ctx.gateway const { STARTUP_RESUME_ID, newSession, resumeById, setCatalog } = ctx.session const { bellOnComplete, stdout, sys } = ctx.system @@ -394,16 +393,6 @@ export function createGatewayEventHandler(ctx: GatewayEventHandlerContext): (ev: patchUiState(state => ({ ...state, usage: { ...state.usage, ...ev.payload!.usage } })) } - if (queueEditRef.current !== null) { - return - } - - const next = dequeue() - - if (next) { - sendQueued(next) - } - return } diff --git a/ui-tui/src/app/interfaces.ts b/ui-tui/src/app/interfaces.ts index 353c56535b..af13e047c7 100644 --- a/ui-tui/src/app/interfaces.ts +++ b/ui-tui/src/app/interfaces.ts @@ -193,11 +193,6 @@ export interface InputHandlerResult { } export interface GatewayEventHandlerContext { - composer: { - dequeue: () => string | undefined - queueEditRef: MutableRefObject - sendQueued: (text: string) => void - } gateway: GatewayServices session: { STARTUP_RESUME_ID: string diff --git a/ui-tui/src/app/useMainApp.ts b/ui-tui/src/app/useMainApp.ts index fb48badea9..e0c18dec64 100644 --- a/ui-tui/src/app/useMainApp.ts +++ b/ui-tui/src/app/useMainApp.ts @@ -380,12 +380,13 @@ export function useMainApp(gw: GatewayClient) { sys }) - const prevSidRef = useRef(null) + // Drain one queued message whenever the session settles (busy → false): + // agent turn ends, interrupt, shell.exec finishes, error recovered, or the + // session first comes up with pre-queued messages. Without this, shell.exec + // and error paths never emit message.complete, so anything enqueued while + // `!sleep` / a failed turn was running would stay stuck forever. useEffect(() => { - const prev = prevSidRef.current - prevSidRef.current = ui.sid - - if (prev !== null || !ui.sid || ui.busy || composerRefs.queueEditRef.current !== null) { + if (!ui.sid || ui.busy || composerRefs.queueEditRef.current !== null) { return } @@ -416,7 +417,6 @@ export function useMainApp(gw: GatewayClient) { const onEvent = useMemo( () => createGatewayEventHandler({ - composer: { dequeue: composerActions.dequeue, queueEditRef: composerRefs.queueEditRef, sendQueued }, gateway, session: { STARTUP_RESUME_ID, @@ -432,11 +432,8 @@ export function useMainApp(gw: GatewayClient) { [ appendMessage, bellOnComplete, - composerActions, - composerRefs, gateway, panel, - sendQueued, session.newSession, session.resetSession, session.resumeById, From 923539a46b801a1ba993fae13f3a02eb91d51c7b Mon Sep 17 00:00:00 2001 From: Austin Pickett Date: Sun, 19 Apr 2026 10:48:56 -0400 Subject: [PATCH 018/547] fix: add nous-research/ui package --- .gitignore | 5 + ui-tui/package-lock.json | 38 ++- web/package-lock.json | 236 ++++++++++++++++++- web/package.json | 4 + web/public/fonts/CourierPrime-Bold.woff2 | Bin 11588 -> 0 bytes web/public/fonts/CourierPrime-Regular.woff2 | Bin 11192 -> 0 bytes web/src/App.tsx | 107 +++++---- web/src/components/Backdrop.tsx | 77 +++++++ web/src/components/LanguageSwitcher.tsx | 2 +- web/src/components/OAuthLoginModal.tsx | 2 +- web/src/components/OAuthProvidersCard.tsx | 8 +- web/src/components/ThemeSwitcher.tsx | 124 ++++++---- web/src/components/ui/button.tsx | 2 +- web/src/components/ui/card.tsx | 2 +- web/src/components/ui/label.tsx | 2 +- web/src/components/ui/tabs.tsx | 2 +- web/src/index.css | 242 +++++++------------- web/src/lib/api.ts | 24 +- web/src/main.tsx | 2 +- web/src/pages/StatusPage.tsx | 2 +- web/src/plugins/registry.ts | 2 - web/src/themes/context.tsx | 213 +++++++---------- web/src/themes/index.ts | 4 +- web/src/themes/presets.ts | 215 ++++------------- web/src/themes/types.ts | 66 +++--- web/vite.config.ts | 54 ++++- 26 files changed, 798 insertions(+), 637 deletions(-) delete mode 100644 web/public/fonts/CourierPrime-Bold.woff2 delete mode 100644 web/public/fonts/CourierPrime-Regular.woff2 create mode 100644 web/src/components/Backdrop.tsx diff --git a/.gitignore b/.gitignore index e516d154f3..8b455cf506 100644 --- a/.gitignore +++ b/.gitignore @@ -54,6 +54,11 @@ environments/benchmarks/evals/ # Web UI build output hermes_cli/web_dist/ +# Web UI assets — synced from @nous-research/ui at build time via +# `npm run sync-assets` (see web/package.json). +web/public/fonts/ +web/public/ds-assets/ + # Release script temp files .release_notes.md mini-swe-agent/ diff --git a/ui-tui/package-lock.json b/ui-tui/package-lock.json index 0b33e6e334..1e8e5cfa4f 100644 --- a/ui-tui/package-lock.json +++ b/ui-tui/package-lock.json @@ -89,6 +89,7 @@ "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", @@ -318,29 +319,6 @@ "node": ">=6.9.0" } }, - "node_modules/@emnapi/core": { - "version": "1.9.2", - "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.2.tgz", - "integrity": "sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA==", - "dev": true, - "license": "MIT", - "optional": true, - "dependencies": { - "@emnapi/wasi-threads": "1.2.1", - "tslib": "^2.4.0" - } - }, - "node_modules/@emnapi/runtime": { - "version": "1.9.2", - "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.2.tgz", - "integrity": "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw==", - "dev": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, "node_modules/@emnapi/wasi-threads": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.1.tgz", @@ -1484,6 +1462,7 @@ "integrity": "sha512-+qIYRKdNYJwY3vRCZMdJbPLJAtGjQBudzZzdzwQYkEPQd+PJGixUL5QfvCLDaULoLv+RhT3LDkwEfKaAkgSmNQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~7.19.0" } @@ -1494,6 +1473,7 @@ "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "csstype": "^3.2.2" } @@ -1504,6 +1484,7 @@ "integrity": "sha512-eSkwoemjo76bdXl2MYqtxg51HNwUSkWfODUOQ3PaTLZGh9uIWWFZIjyjaJnex7wXDu+TRx+ATsnSxdN9YWfRTQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@eslint-community/regexpp": "^4.12.2", "@typescript-eslint/scope-manager": "8.58.1", @@ -1533,6 +1514,7 @@ "integrity": "sha512-gGkiNMPqerb2cJSVcruigx9eHBlLG14fSdPdqMoOcBfh+vvn4iCq2C8MzUB89PrxOXk0y3GZ1yIWb9aOzL93bw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "8.58.1", "@typescript-eslint/types": "8.58.1", @@ -1850,6 +1832,7 @@ "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", "dev": true, "license": "MIT", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -2185,6 +2168,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.10.12", "caniuse-lite": "^1.0.30001782", @@ -2870,6 +2854,7 @@ "integrity": "sha512-XoMjdBOwe/esVgEvLmNsD3IRHkm7fbKIUGvrleloJXUZgDHig2IPWNniv+GwjyJXzuNqVjlr5+4yVUZjycJwfQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", @@ -3765,6 +3750,7 @@ "resolved": "https://registry.npmjs.org/ink-text-input/-/ink-text-input-6.0.0.tgz", "integrity": "sha512-Fw64n7Yha5deb1rHY137zHTAbSTNelUKuB5Kkk2HACXEtwIHBCf9OH2tP/LQ9fRYTl1F0dZgbW0zPnZk6FA9Lw==", "license": "MIT", + "peer": true, "dependencies": { "chalk": "^5.3.0", "type-fest": "^4.18.2" @@ -5121,6 +5107,7 @@ "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", "dev": true, "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -5220,6 +5207,7 @@ "resolved": "https://registry.npmjs.org/react/-/react-19.2.5.tgz", "integrity": "sha512-llUJLzz1zTUBrskt2pwZgLq59AemifIftw4aB7JxOqf1HY2FDaGDxgwpAPVzHU1kdWabH7FauP4i1oEeer2WCA==", "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } @@ -5992,6 +5980,7 @@ "integrity": "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "esbuild": "~0.27.0", "get-tsconfig": "^4.7.5" @@ -6118,6 +6107,7 @@ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", "dev": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -6227,6 +6217,7 @@ "integrity": "sha512-dbU7/iLVa8KZALJyLOBOQ88nOXtNG8vxKuOT4I2mD+Ya70KPceF4IAmDsmU0h1Qsn5bPrvsY9HJstCRh3hG6Uw==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "lightningcss": "^1.32.0", "picomatch": "^4.0.4", @@ -6635,6 +6626,7 @@ "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", "dev": true, "license": "MIT", + "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } diff --git a/web/package-lock.json b/web/package-lock.json index 71ca2c7a7e..47c6595ab6 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -8,6 +8,7 @@ "name": "web", "version": "0.0.0", "dependencies": { + "@nous-research/ui": "^0.3.0", "@tailwindcss/vite": "^4.2.1", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", @@ -64,6 +65,7 @@ "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", @@ -985,6 +987,66 @@ "@jridgewell/sourcemap-codec": "^1.4.14" } }, + "node_modules/@nanostores/react": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@nanostores/react/-/react-1.1.0.tgz", + "integrity": "sha512-MbH35fjhcf7LAubYX5vhOChYUfTLzNLqH/mBGLVsHkcvjy0F8crO1WQwdmQ2xKbAmtpalDa2zBt3Hlg5kqr8iw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "engines": { + "node": "^20.0.0 || >=22.0.0" + }, + "peerDependencies": { + "nanostores": "^1.2.0", + "react": ">=18.0.0" + } + }, + "node_modules/@nous-research/ui": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@nous-research/ui/-/ui-0.3.0.tgz", + "integrity": "sha512-konGgtV9lkzqYkWuoUGnROqavq1svTnGbERLKItvEXmsRz4xRtbAMHI8rK6sjGpHDpwvOUN3olcOhRLTGuVfcA==", + "license": "MIT", + "dependencies": { + "@nanostores/react": "^1.0.0", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "nanostores": "^1.0.1", + "sanitize-html": "^2.16.0", + "tailwind-merge": "^3.3.1", + "tw-animate-css": "^1.4.0" + }, + "peerDependencies": { + "@observablehq/plot": "^0.6.17", + "@react-three/fiber": "^9.4.0", + "gsap": "^3.13.0", + "leva": "^0.10.1", + "react": "^19.0.0", + "react-dom": "^19.0.0", + "three": "^0.180.0" + }, + "peerDependenciesMeta": { + "@observablehq/plot": { + "optional": true + }, + "@react-three/fiber": { + "optional": true + }, + "gsap": { + "optional": true + }, + "leva": { + "optional": true + }, + "three": { + "optional": true + } + } + }, "node_modules/@rolldown/pluginutils": { "version": "1.0.0-rc.3", "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.3.tgz", @@ -1638,6 +1700,7 @@ "integrity": "sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~7.16.0" } @@ -1648,6 +1711,7 @@ "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "csstype": "^3.2.2" } @@ -1707,6 +1771,7 @@ "integrity": "sha512-XZzOmihLIr8AD1b9hL9ccNMzEMWt/dE2u7NyTY9jJG6YNiNthaD5XtUHVF2uCXZ15ng+z2hT3MVuxnUYhq6k1g==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "8.57.0", "@typescript-eslint/types": "8.57.0", @@ -1984,6 +2049,7 @@ "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", "dev": true, "license": "MIT", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -2092,6 +2158,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.9.0", "caniuse-lite": "^1.0.30001759", @@ -2269,6 +2336,15 @@ "dev": true, "license": "MIT" }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/detect-libc": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", @@ -2278,6 +2354,73 @@ "node": ">=8" } }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "license": "MIT", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/dom-serializer/node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "BSD-2-Clause" + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "license": "BSD-2-Clause", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/domutils": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.2.2.tgz", + "integrity": "sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==", + "license": "BSD-2-Clause", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, "node_modules/electron-to-chromium": { "version": "1.5.313", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.313.tgz", @@ -2298,6 +2441,18 @@ "node": ">=10.13.0" } }, + "node_modules/entities": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-7.0.1.tgz", + "integrity": "sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, "node_modules/esbuild": { "version": "0.27.4", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.4.tgz", @@ -2353,7 +2508,6 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true, "license": "MIT", "engines": { "node": ">=10" @@ -2368,6 +2522,7 @@ "integrity": "sha512-XoMjdBOwe/esVgEvLmNsD3IRHkm7fbKIUGvrleloJXUZgDHig2IPWNniv+GwjyJXzuNqVjlr5+4yVUZjycJwfQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", @@ -2718,6 +2873,25 @@ "hermes-estree": "0.25.1" } }, + "node_modules/htmlparser2": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-10.1.0.tgz", + "integrity": "sha512-VTZkM9GWRAtEpveh7MSF6SjjrpNVNNVJfFup7xTY3UpFtm67foy9HDVXneLtFVt4pMz5kZtgNcvCniNFb1hlEQ==", + "funding": [ + "https://github.com/fb55/htmlparser2?sponsor=1", + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ], + "license": "MIT", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3", + "domutils": "^3.2.2", + "entities": "^7.0.1" + } + }, "node_modules/ignore": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", @@ -2778,6 +2952,15 @@ "node": ">=0.10.0" } }, + "node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", @@ -3223,6 +3406,22 @@ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, + "node_modules/nanostores": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/nanostores/-/nanostores-1.3.0.tgz", + "integrity": "sha512-XPUa/jz+P1oJvN9VBxw4L9MtdFfaH3DAryqPssqhb2kXjmb9npz0dly6rCsgFWOPr4Yg9mTfM3MDZgZZ+7A3lA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "peer": true, + "engines": { + "node": "^20.0.0 || >=22.0.0" + } + }, "node_modules/natural-compare": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", @@ -3300,6 +3499,12 @@ "node": ">=6" } }, + "node_modules/parse-srcset": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/parse-srcset/-/parse-srcset-1.0.2.tgz", + "integrity": "sha512-/2qh0lav6CmI15FzA3i/2Bzk2zCgQhGMkvhOhKNcBVQ1ldgpbfiNTVslmooUmWJcADi1f1kIeynbDRVzNlfR6Q==", + "license": "MIT" + }, "node_modules/path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", @@ -3331,6 +3536,7 @@ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "license": "MIT", + "peer": true, "engines": { "node": ">=12" }, @@ -3391,6 +3597,7 @@ "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } @@ -3400,6 +3607,7 @@ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", "license": "MIT", + "peer": true, "dependencies": { "scheduler": "^0.27.0" }, @@ -3509,6 +3717,20 @@ "fsevents": "~2.3.2" } }, + "node_modules/sanitize-html": { + "version": "2.17.3", + "resolved": "https://registry.npmjs.org/sanitize-html/-/sanitize-html-2.17.3.tgz", + "integrity": "sha512-Kn4srCAo2+wZyvCNKCSyB2g8RQ8IkX/gQs2uqoSRNu5t9I2qvUyAVvRDiFUVAiX3N3PNuwStY0eNr+ooBHVWEg==", + "license": "MIT", + "dependencies": { + "deepmerge": "^4.2.2", + "escape-string-regexp": "^4.0.0", + "htmlparser2": "^10.1.0", + "is-plain-object": "^5.0.0", + "parse-srcset": "^1.0.2", + "postcss": "^8.3.11" + } + }, "node_modules/scheduler": { "version": "0.27.0", "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", @@ -3647,6 +3869,15 @@ "typescript": ">=4.8.4" } }, + "node_modules/tw-animate-css": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/tw-animate-css/-/tw-animate-css-1.4.0.tgz", + "integrity": "sha512-7bziOlRqH0hJx80h/3mbicLW7o8qLsH5+RaLR2t+OHM3D0JlWGODQKQ4cxbK7WlvmUxpcj6Kgu6EKqjrGFe3QQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/Wombosvideo" + } + }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -3666,6 +3897,7 @@ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", "dev": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -3751,6 +3983,7 @@ "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", "license": "MIT", + "peer": true, "dependencies": { "esbuild": "^0.27.0", "fdir": "^6.5.0", @@ -3872,6 +4105,7 @@ "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", "dev": true, "license": "MIT", + "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } diff --git a/web/package.json b/web/package.json index 09675d283f..e10a10127e 100644 --- a/web/package.json +++ b/web/package.json @@ -4,12 +4,16 @@ "version": "0.0.0", "type": "module", "scripts": { + "sync-assets": "rm -rf public/fonts public/ds-assets && cp -r node_modules/@nous-research/ui/dist/fonts public/fonts && cp -r node_modules/@nous-research/ui/dist/assets public/ds-assets", + "predev": "npm run sync-assets", + "prebuild": "npm run sync-assets", "dev": "vite", "build": "tsc -b && vite build", "lint": "eslint .", "preview": "vite preview" }, "dependencies": { + "@nous-research/ui": "^0.3.0", "@tailwindcss/vite": "^4.2.1", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", diff --git a/web/public/fonts/CourierPrime-Bold.woff2 b/web/public/fonts/CourierPrime-Bold.woff2 deleted file mode 100644 index 4f6d5e9c863cad49d54112e119f708ed9f644d74..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11588 zcmV-KExXcpPew8T0RR9104+oS3;+NC0BSG*04(hQ0RR9100000000000000000000 z0000Sf;0wT0D=Y)2nvC`RDqWO3xh%c0X7081A{^YAO(aR2ZT%;#ZzfLfR%=rSX2Ms|wyPJk4C4O2{mVuN(2{xSU>0i9&{5DX8&w$EXZ>=F^OwlT(Ih*8^NNL9K+ zdR24^uK%{vBtF1FleBwnR(NDj^5oe8m@8AQbcf4b0`fdd-Q63O>YWKf@}0#gi7vg5 z20Z{~J%|OcPb@$XG-oIS+NU6o#geK87y5z(stQNF@n$JYo}r9`-Z+yPYg3oH^z-zC z?9G;1B#K*Ea*D$+GFEur{FvH5vO3P(Sf4CfZf=Yi8sHhLu?2cB`p%IkT)FWmkEZ62~ORHKHt#pxj zd>8r{l!#;}Hz9~swST$Xs?6C6K=+ayJp8K8vj(EU*C2ph`LXpXO~VBMX&Ij%F=3Gz z=cquimtm)-dh9&tuC3be9h!FTWP4o&phMR(`MM@_kXBv3e)$Z5r%MBdthLvKIp2jz zv|iSy`n-b$-WqRhD}H#FdP8$!3wRkR@fuZA3!|r=yp1|c4WW-xfKiA3o9R}BBMp0BOO{miE0@0 z6p;^%VRy0!wJG@&6m@2;A|Ol^L>O*>9hR`*V_~AmU`K#Ps#VikDMB##MG|_mV5S-l z`4I}?y_@(+cZzYmwA2JmRf++%TIwFBGFQ1x&l*!3KYK0fSdq9l>kZ+)*|_+rUBAO z$V{<9WB5Sg_)OTO>@=k)Ol>q}bf_Xsa7nY->*E7>L_QNd(Klr0LB&R8o(8vO;sSJD9Efq= zuZ$b$)pa`bT$k~a!mMQjB|<}r(wg*J2h+$&0lknC$%>v1u;B{=BSPNhZ-A`p&)^I5 zKwJL}6$doG5`421B))*pTym-06>JjZ`rin?%2|TWp?jnqJyP)$#3AIZKMEeI06!20 zLvh4iKmdswS)}2VAX+F2vrh!x^e;=~^FoN=8FgQSa*Vt=5w^M)@*i~KS(a^;Nyqqz zA6n?rh8>8HSn{7KCurrm{nQ_v0<0BvJ2HE$F&qPSp)n=!fEI$~5+-d4tu=3S#~hHi zNxEb-ZRA$)XyP!;OiPs<>ob=S?gsRr$|}RIq}zhjE+*3l=tvS-8olC=5b0flg(24s%aC5T~OaETL5a+I|@Y zRoA;AU15-Y*QOZ;zWsauc6zM)oJ)^f)XkjPJ9?^YZ*}i96`B|;|H{gb64UHFfpEiu zY(v2N|IMSFS22QG-}1_NT}v(4l6;E(mU2EIInzG@f znPX-tUnAENp_!kY6S~H;bbeqaQ;i+%^LCYE8DzGBjyi}KhZpA&QnjKG><9n&lF~@3 z_)fAMnHZ)K*`8*TA~O?Ko(HYT9Kc0MQ!R7KwvR&?v2XZ4R%?twSwSWlDb$w+7n-;;C-T!hu677S=Xb9 zR7G=d(vl8FvP79{UdvS+35r_eIleeEBPfzgX%#a>Jaq<6HF~TuXVH#Zxg(lKMEQ+- zB!xW#MISj@F=~27@91gLbQ>est!8IXTp#RNWbKQaaT~K(N~q7l0*ZFYX_4?s*4?ri z-<&GlzOkiGO+8H!U+=DCrh4kGfGELU1o1JdCbG04Imhs3vb|GjU6XQ;=yk_Cy$5a> zw>#|_ZJ14=c*|E`Iw&4EEFNKzP%z-kPN?ivv>#nUjZfx-1M>Ehrm7d%jTqKDsvN>J z`DDzk<99x4$eJ=V2cSm6kBOL5*^2rJw%SRJz6d^E5kEwnfZLpd6l%^?mZ(+YWqVLA#bEwz|I+;v~8S<4)c<6lMWf-~O)Q4j3x@YE1)wFn| z6v-92#-@*Hh$rxGx@Q}v=m^X_=4Nq1>?<1A9p&L3coVNe84^&NN23)^pJ+(nI^sbk z0x3^Oy%2QYC=LQuBH#c|in=3<)XFauga!dtr$e11`Goq=2ql7k=+)qIe@xhi*ej~V zxJj7tZC5x0vb5rUQ(=FyD9Z}ab%{e7%e*k~CBg~;!hp<44T2K_jSTuizQ8jw+%f2= zP+=Q0n+>Kj`^s!k1QQwBxz-IS&IC=zup216U^p-ZSb-i`MgPM?Oa~xSm=2V0Wx0cl zfX#6~nL9f=sgk~mV8E2bA+rd<@SYxE$%d%sMJZ{^Y*J}V5FBGq1r*&9v{@mF6!kNNK?zJnk}Hq zz~e*}Bz^g$PaiEMVEHn=ku^>xwAOhPl{uVA_hw%!< zX9|3T;$y zXeuo$X8%V;@jLWYm|$nFrV zDeGEiHM{k>z7-i#Cgos^f>2(+97%%G$RJLm3=m!McvQ?#55@tC%CQdeZU2kXDc7nAjSr-GhI~(`{%|oMPeUx0ZxlDMmi<6lPGcp&w-Fs=s6uS@Gf*U6J`E zi2@L+^sKYSUc3%N8CnD_D*qUSTj1HZcFf#Kb?21EQL=LegH`^9tUqqB3mwv=5w2E5^{=Y9XIpIDsZiYG?lqe-F5n<1FbN&iq(kTm^}?v z>6?3_#E^QvatZ+96dF~jkSQgb^ih;F-Iis&@^AgCVHOU0??5KHiXSlV7SnDq>$-?c z6^Uh{&jiVA7}V`7B^AuRUjH@B8fZABzZ_XF*P>|c9!(%vlc&OwS9arUp!l(9tw$!v zrp3EeouC-XV4BciGUZRcIpoz?b9b!Ib;{_oPNquT5(7v1a+=X&oLGkKfqjcL^#z%T z7Aw%9Eai$GS(nB45J)6dzhTI6tvJ-I1Ai^{mvB5YU+K*9mzAp8VvWySvSrnJYgV?g z`KP~Tv7ZB9pmM>S(FN(${r}3WW$E-YPlNFWG_dI;so2Udm10&)1F4{kSJhdm4mE)NH{ z5XzpBbWFul9U^enAYiMEP#N))!7F~%l{2Kf3hC1gDCU=w8X*}FlKEIrVn7e7>ARJF zUQ{0a5|s!M;5)=%Y==)1qYo86)063kg_Yf6n$NNCtd!xT;pUAA;D<`Y=wVO8Od+*s zL5Rdfly=U5_t3Zmla_9&$@I%^KMw2yS8{?-&MTK&MBsUfcnk7`One!=df`V9uU^JU zZT&(u)Fm!#TNpB1Yh-F^))l%hJQjp@nOT||tu@oIK9;C!NVRlmdR(B%0Z??mcwxMi zp(!b$Z%>RD>gUzwlOb5>A><9Q_;iP0>OQsQHidFqiQeh=x&aOSu$q7`0efvpA$Hb! zs~PtfAIwa&@|sCo(0hDeH*R|8{oZD`^tiUP?M~l+&GxZa^m`4`<#-8vX4R}dWDvlX zLieF5nx{tqHyOLN+A!QM0-so(CW&ny6_)87g(Y5I-Mwn5h6rdU`(l11yr=m~n1XEV zJ~lEJH`W<85|`AlJ2-Qd$~V$+cr6Z37kKnKIR+E@=UTS`^h-^BXlT7a04z8bLks?_j8T`kl?$SbZjM zS6uA&%+$oKiM}zFbs3T!lkw#S?mBpmvriz`OZ4vib-+Bnq=o#*-byBKWy{IxARJu& z1?84GMkrmDr?ZXCmAV_8hYx`XXA3!dvUE{KkGe!Zzuzppd6O|?ekKqPHaV;G5#IX< zq(5ukQA}Cn8|s2j5LEEA#)35T^0wKo4o)G8kt=b;^X`hz9v*jjct=St169b;an1AP z5XqwEX#oU%QWv--Sf-SBKEOF<^;aa?z)+wB!^?;K>ide(47f%xAOWMi8{q#dQ)6TK zJ9I#t!z2c&lYyg;SbhSn>Jv4!e~}CS1kl$+MEi(^E|JtflZX7JDc35n6)vX7?f3q*pD*pQeUc%H>N1&1fb1@eNTgT-M&tBAIioFlCk*-gOFfa_}ByWyw! zDmyjWThQp^FegT5z=t=`ae=ds2f3$LKQo4b??u%q7Hb}!HC#%ym(|T=CLXQ0_V2C5 z)LF97ImVTgujFmv&C=r3*r+5)bV?e$$B8X8!xPPJLNd@fI(va{f{kxFf%rYFWwZHu zNvn5!@pMYsxV)tF(2ews8_s31Q+wQ|v+mDQize10d&UwSX?!x-O&CXF#^of031xAy zpl_j4-f*F{S(`r})Pr!xul3j(q@bD$%yTFD1TFd zT)y{8Y)cEUc*+a&dQzcqLTtS>*?>)6HM}R$;Q;|rVWFYGf-^6E>|;s1Kp=^Yi{l_q zijM<%4^8hT@Z=e-M#_;qfyWRJ1$mo{%91fl#5$QH=m$noLOlwi7&+}fPV$EBRW&Dn zYAivJ3kI% zROHjz7Mg*QArzke7XWU-_|V<-_wEMK$u3z!a8j62_~xpZtoqWN_*+|Aq9yjF_@EH9 zdGO!^KfYIkrTu4&!zrjVydv@Nc$%X1PH)+lzOtBuDw(3A+Obs6h`BM})b#LC-d?O& z^`T|Mr_rTZ&G{! zha2OU-23J1`EEf9FXJp2T+}>+H$qxI#mxtuSy-9Xz4^-g?C|xT_&wxg%3nqlTk(Re z`@%xr!pJ`Z9aeL*tyb>_1`o#62S7X7fLf^j>ulAZpT~{=bsc=vQ8o>kwmG{;YcFVX zckDVlJG<-5j$AN!sak}|7H8wI^8e4U6#uwB6SiN6rnsE{U>URok2La(N)Tj^cUc>a znx+2rgc_Ro;}s3ahMPwK`=!RLCXDXRaXHEP`Qmik*tt*I(H-8-Z|0QQ`KOlc+G}DQs?lXIZ`#Q2JGZ+4DWsyH91e z*m=cPAc;zz!^?%Zo#-(q~eKg&gO54#|o{@p#J`@i(es}j%=%|HDeR&#S` z`)6QeBn)}>k94hUzA`_*`D%AJ1?5Cnx`B6)>fNmi7vA0edGN3}^4VAPA^VO94+twO zt0Xd6RZ`^*MsDiRnV#;@<{pu`b~GK=qU6 z@GpP)#{4aCDQvh%Ey0+Rc0OqlX#q|G`n*5PNqrMW^kjpp3vQR^ywG_3+dy_&NLUy? zgKV0HMgo zYYH+s_Wdzw{KVkn@5dTke8B_od#Ft|3MoEaMHg;l_VfVD++98t^&2T9DvC-{Qn*e* z6w2YM#pmM(Ssj_Yzti7;(|OAmlhvBK_7;f`Ghb%%mEnmXOzbb*TQ7RjNn%6hOsn@i z-1ks@ic4M>=A>snsgPP%6F*S72Y<_Cb|rB|Y2k>5d1@_JQxTyI*>rETig0UVAS9gy#<#PU$O z2AItM-eTSFB-mZCOV~#bp;0M3DmS=|agg0?y#aVy@mgrqsnt)^e4!!dSw{!36f~#q zNN|scf%NDyM@HLRT_JWcGRN1yEHV;U3fL?Oi>eTb2-8k15YVT71_v5eZx<(iq>E2T z0gG8C6f))yNQiJREzA{TOd^P^ZAoKNcdbv(@T=OoXFWaj&mY&c?dOy+?AR9FV7wNx z@H@VVH;@*JagW%MklMpxQLCR5N^jMR9z_!2WE1zL2j~U$LQ~=ELFfk5WEN2uBinJ5 zam2vtuiO3B)`i0v_}j$H;b*m_PdD{cKKrS5==h4uwv#3y^2j()A0-ZXcWlI3_stQ} z2D%#@QX3EWd0F-=yyh|d^~{O+6HAKoIT1R0{M-z1_@v$u;V$4oTIkKS@2mJ*7vDU5 z6L$kI#eQR;Z+-~H7^+S)%c=`9bFv*;eMOBIs`4JRw*$+_Mvs-E4GIN4P%=B0;a4aU zVfs~O1Melg7W%pJ_=Kb*B6MrU9--U$5mGWmo)H1e92sk$R7JUHoyjqPA9+*b4|biT zpZG7iGdD6G`N8g90MbJDbnesPV+^lj@a{jK7h_wezps7__XlpHETc5x?dwe2e)V@o za$t4dz_y?egX;J#v(B^8hQip=6LS_-`XboM?Za_B)1B*jZVwi2sFlxoUye84m&$dlO^y*9JdfYg*)FeyR?M zPK5!hR+lef25F%u0_FLgnV_XoCzQ@)X2>Jcm*Wh(gqr3hBc%E@mRYH#H=Zi(QVTY8 zSMZLD+wSm5-W!d%scA_v$Xuti-8*g@pWHYSlrvNolBn7d5P~#rK4@!&LYOdp8 z#wp4^!O)Wa7Zz(3F1Yz{!p!k=v+y~&I_^1ZNXrMEBRjQ_(&?$o3@oV(B0_b3A^Iu{#?cME%D%tan}^sY6D zt&o_JKAXAsWz^nyG|Gj<>H~s`y(3ix=YWiVL~#rIz9ogrA{5 zYJBo^1vVT)(v@baliNeVO*vszmn6m)sT)Li2LzuV!A4Y&h3Bh88j&Og( zqrud#P#QBmar;C<1(?YGH8UPtyjg8*7Sz&`L@q1@79mJkTYYnJY@VhjT2hgmCTVPZ z4GcYBtU<~pkcx^xz}lL{=R>%P4PEjgBQ_Ttupr0AQoOu?rJBKjES_LSy2;1Y462iR zzrNJ6t?>;LHMWjv@T;`<$mXeJN;Y|mguB$@1Ig#o)p>ocgSN-7=u!DEOUpUe2XJt# zGo(y(OYKYRUZ3)g5fxnB#+7wJ@2|OAPl@zLk0TKnSjYc-5Y2+umrnCvst5j8(LK-z z6wc$tXNG-w-(gQcFKYDn4x*>02Qd;G8=DaZp$j$+U#QH#*W8S@3ssGcVQ})5jUg2> z%YO7)Q_C}_3rzK!BW##_abi>yb@7?x)vRw?aQd{B=~{HZrGO$E6vu4O&u1)OhB83= zHG^9^mh85m(Ke}6iU=E*O^m$NPUkoGHLWigs_4upx3Z#h=~2;jPO5d5caQ8aw|5K1 zY_|@qJ5*tBpXKhE<8pzXX`TiK7le!OV*zf2vkk4hNg`>LXfY_RVJx-Q|Eg5hzgcSQ z?qdtRaWhBfL$rBE8te}9~56X)YW3g`PKhz~eMms0CGE$VhK@xbW_X8QjpnLp+ zp1=fQk}-$ey^iv}R=abE)Q4VBkbyR#>Pui6yZpyqNC;>vW3;)VqMG?vaB6V<@rEA+ zyKwC|C)D=CA59Rn>>4biKh9O+nT)O^lPl2z)*{K7OA6!!W0{MM>X4oBK{JWD5bs?5qe(p3cjJ5`BU$b$@<{*SkA?3L}Er z!ZTiH!RV+o4sw78-6u8+b-&|X_xe|D9Xm=K+{nqts|HdT^Hl~@mKHhFH?W?s|9M;O^pd_>`m-TP4H zHgc@|7$6>=dYZXomU~g&0QBh%pE&b6@6G>a0LfUKnGf^0MRWp9R3gmUhY0GqR{$W`s}PaDI<} z3;L6mVA{T8+DIpAB^%{d7Xm+U6Iuj1I0Rbw=Y~VA4DWuL8|>!>mFnp9)6&rVevJF}0dGXL2%A8De#~09W#a!*&+oiTJ&zuT z!UJ5iNHhmpfJheo!K`58^s`9l_JsAJDYmdd-QMsV3cxYx^y|~zB zV($KI2M39BF1O9y+$0xVV;C@PT=ngN(M#XC> zT$>Yz6<9(zcU+Vb9SfZeV4Z<_4EuvcF%eO*MfuSZ>R_sXz(^U4s7dGrWFKCj|+?Go1}*Ult$@WNW9s;B)wI9cei`J52fXluk@ zMsD^ZE1Zivn>g$ku{SbVoV>|*g(c7zebx_>xvp`H^ zCZ-67Bbi2~&rJ)gjMt#q5C{6r7Lv4Wu`= z9|>ey_*0M)n&NPF`|scEfc9mkS052f`?gt|@lw+hY9^Mp-yyroIhi>;OY8C3(usH7 zu5iLv7^~jc0^@tdM^8)SXu6BwULfi}RA`nyI#8ajb}0DneeBwxKR=Jt{?Ix4w&%x< zr`1)yqxo^cYrcSX9rdu$ClP8KHPS?rqN`pRpq(FGz@kO@+j$L<^Vbrv>A<1Uvx0-Lc?enS-uSgHz^c=pB`xQvE( z9w4h{S;^&A3IxD{V=9yfJy`S_Or;1?u;sAIIVXn!t$(FDi$vbWep4{L}Cc6xIP z@9-N#^o;N7?*cs+@0T9-*1gZC>8`c-b>zfH`-4~Nekm;|xt~-?V}>)6(cXTV08oS} zXhz5&tMndxG9YcP#=vcCR~NH8cmBEa?;SIj=+OpaR;-G;jf^|rBSB@qqp?xXGk;DZ`I{jhCygA>v9!!NE*U{QHuEQuo}1(5%#ulj zubaEScNd%g6N-?hDU@mZ>ID0*RO(mzglhX~3gtAsm3G$qN8Is;D7>H^ilS1ZXf$BK zu^2J|i5tDJi<7Ndy|7sc{O>t-&!2wA(}0_KH}nV@c~vQVNgz7#)M9ADQ#`?i{sh`x z!)We~+DVa;?~ooT$9=&wWBTnZbuxKTl;gs%eF|+q2vW-UkMp}?>)5p{XeS%At%QlL z@EU)YO80*`+$1+>t?%v=14lT;+okQnSTm?(x&a(8tj8$Oho4`{5f7aS!xi=x5y*qn%^j5ngv?^mTp&&*b zCunuJ2XIQqvn>_vF=saGl#o`?F$`9RR{*EB&I+a{vnQaP@(UEO70xIESaDT$0IS10 zfKw>L2tVy`&`F}LnmH8Tgi9cvua96pD@-@I1Dednh z;(^PnO17bE?uhqn&+M;ozHq5t{Ad!`JZoS-REq>};_hU8Sl$^XdZEXVMx4v1inp3u>F(G!M5`a_A zmj=CWmYG;ifUIv*tmc%QJcKN6ua?+wN=)refFD5expqV&fNSDp&CzRPJk9Hih(@?3 zHJVy8jR39*JLEHTERs=d7L%z&&NW4V98Ij)PbG4$2~)e%%<_>CXe3&8l%5(#sAP;w z&Wvnm9(z2=ztF7uJ!$cBz}G~0Qq(VDrlygQ{DXON);ZpGa$xLsu^(j>V7{lO_3>D$ zvEL6wO#ygxYkrlV{eSw9ufOCk>!=f$RU{|?0)N3-mtU>t{jqBm;$0y~ts7 zBj^fpMpj8*PMJs=KDg)Dqt&e4PN;IoUh^gm=A4vMWx`f>5c(&c9DavXD7u=;K+llA z7(npt<^k#Gwu|mNww!bYVo%7N>(f{uXQM%yW@8JY-0L1bcJ<>>F`~4ZI6`CTocazA zdmf|tm7g`{YGr6pHmBl!u_uPK@k*8Ypm}a0C_&3GCuQZFqzaK`#$B#|`WeLsb%k^Q z+=S$mQE<110?@D9Nfj&dyWurx=EkHNKtcV4Xl#*)lM7&DPk_7;881w9CAG4Hbu!vG zOmav}y0j8Y%lZ9`DR)(hMj-xbDV>ysX*l-=h8=16j3L7s)~nnj=btr zxpWpYY(qsT))!(iiCN;5!(v(+JXA&*ZI3guk$BZ9=FB3;FQ{EZ%KzgM^2o%aOH-CjDm`W&YcHO3|_qX zVDjb1A4>qXKpb2_g7NT$2qh3EoRCO_NKwS1#fTLrUV=nPB&3q1NF|d-rA4b#2EEp% zSGQe8ba|>-Uv$LToHeX-!BX_t?+c&#ll`V$bH$8w8I&^JkmaiDZqvHymS1JN?~c0; z%JDl}JoLbVTyMSCDPNufMGDF7RcyTyr4-7QsZi;SDmALrs#oWveHv`AQKNT0n03hE ztmUES7CoVM#BnEFbks4Iw3~F=8S{Gl76cE1lv*4$k#&MVMvD;}twsiq{ZN*N)EFk~kZCe&CHMq}FE5{~AAV6d{Q4>-&X z$FXaQV*17}GH*-OyCcc6IK&Cx12?lj_=c{#&iWA=`y`k6ujT*x+?n0|DY~DDlJY58 zFb?NYNoh1ff_uWDLY^VmW~VZzOc9|K6e(i{f)$iefMOyRAc`WWV8zfGwN31;T{>4T zovZvgUG~pKf4KcO5C81v?o9I1()zrMqLq-vA|*p762(H{{0bp0!5UIY_5$E&P79Q@ zyH1ckZ$-j2w-lt zt2L|FPTLTnx+fUW_CN>sD^Ul;OAo9%*S_rw)1kt%gyT|C+)QEDQ2l$GW~*=Q8dw+7 zg>?i)4_IZCD^pY{$|tb2U;9@S$+!y2z%uW#yhm#tAap9@YHhDscRtW@h`3_zQ#9$j zE86_;)THvQ4G+K)cw;kGrhI)_o^qm$*XOunEL>Yj-m`U1hcDN$;9|W3ykK8nvh;Yw z@4=OJX%)nod2-+=TuwWXpoFa?lMTw$3Xxak~A`# zJ&#WYA-m=I#ei{?r`G@!6qD(rWD+PahGGVCqX|A(~UG&y`Pv6Yy>J2XS3Q8VCsMTfVY?_EjE-J;ANJ;!<36| zUhO1uz!}z!V{`Vu*@^b3kuGB1%+fCUIN%~<7T5!im@&~n{QBa#g9HN3TBA)S%=kau zi0e{4swWpHk{!u`L?+QmEK(4OPr9m;$p|v~f7nK{$!5Eqh#065(1*Qs6@$ z-*G7@4vdd39sT|M-~XK{`|P#bPLqc8@_K%^v(GLA6HnK`ZTH`2YWJ1U&?;4?oX%4f zDpjdg;~^6!!7iXx^zsDw3Y?q*aQy^;HUp{-)KkV#SUogY(ptLb$kfJH8X=~FK^&Mt zujfhAt(qWDB%_tt0eOKqTnvuBEyG{}mliBGTB{9faR891QH%2q%2+B1{4hYyctP*^f&d2~ zzA6*y<-_{OSk|~_+ZiG@m?;{eqeO(kvd85Txe6q-2#3@S)`NYDx?Q^Ih|+N6ofGK` zqou~_pmPwLF`r|60hkGd(`giqO(Is9-)U<=^#rNct9f>!>LnM^+hYO2@!cCKfnyk% znNEV?M?YtVZ+Q_Fh7U&SIlzd}y5jMpHyY$}iB=ZcSb~oP`6qFL*I1#NvT;*(RBd0a zvMoxh+oecy+c4-(w>mOxf{8d~up#Ae5sl#Z>Cwhg)8R8@GrbP+f~YIWl-G#U@I^Ew zfaA(ZAyb?ZhYK&&tYB`Ib$mrtPDLVG!wBoxk>Yuo<;Js#>|H0k82MNQibAuuXx159 zlaOv6!9k?*@ueO@8LVaN@TiF5dNl!;zsFoCgXpF;OWl|hLRu^}mL-c}Pa4$(+d z8?FIlD2gGTZ8|dc^sh{|#T3t2{>OZ)nD5o}{I9@JsAP|PGz2D+xX=LbZOYsUdA_!`=M0x^0R<%p8czv2jl~3okX)#Al4$ zWO#JpS*AH3jfhTR?RbMVelzqAY7I=ZuynO*&&IWT>nuX}o#|pihk;D=+Ju!Ovtvxg z87q!=WzA-0YkKk;x5XkqzGIFd3`04sqRA-bjYQPea`&<=@RzXT`tXOcYg8vY|NdY6 zP>NBFNQ>EWI9N0i5fRXAS@UA4p~l?#Eyra zxCjjKXCs43169KSxR;nqRu`4vX(B;uItw#*T4Y~9^-j*Vp; zsXcGwidFkl9eQi!jYm}AzPCQ__vn^uiVj?Is@uQOL>}+ z6OO9ieM*v!rttLn2qt(I`TCbxpVVPT)&h_$IDxX~xuj}RTvNgE$;^u}Q| zaqzgwmFxQmxAu%)PaQ!T?x~@BSU^q09gcNw+Ff5v42b}ZU%W6m9sa(qBCu(d$*TPr z5x&kB)>zBT@OHe|HM$I2Eunb7o_Y~?)w)_-_gE7BnCWZS+%pUY>Gk)b`alEqLd*K0 z4f25VO~Ls#5N>lj?oaETpybYiOg#dc7*5L{+D$dsH9}j5m(JGWsUJI%%yec(x8ggV zMA!S%YJOh&w>3V)PnPBAhomI*Xc6fJnCccdtkWzb_Y#l!IA8{m?zo{|*=a`5_?ULh zuoqTxR|=9JhB1(Jut<`zghS|PI;T)o3&*NFt&Y{Tb{Dd7f%`m1u*ufX`g;*J(|bkL zdQ?eOg~(3TR*Xzl-hI1q+wh~4-OJCL8eMMAzedi;Hb#7%O~T-QqX&HZcuhpwn=-Cn z-ins;J|4%V6f?_?XsoCgXHpI#HNH6v7m=(P842D0Y0B0*EGkyZlq*gmh>e}ighsq< zQuM=r6%o4ZXqdm`oOhF2a+2aZ0r;$;W4>%@tD1Gib9urXQi8Q zE(T`*DQBh`C^uXhi}Qh0h&ZN|rU5N@B+zkD4Wh-}2u1)XazcdRiHZcb@mf}NXz0B! zzU7_4{O}You|Cr<6i!cK7%WP}xl#EU{twShS=|{IsOxVyw3J!foWH@o{k4SO&?<|( zI91a$#!uxP?QtRp%|@>Zv~*!EP1NahB#nfCjG4_QPv}Do&guL7^ThT>se^XXqy$|W zE)s>ntZU-3+YBeibNXR;TjZcLu2+wiQ|ZzkoP(pF)>Y}mW}p@JOha7@GJqhVxMciBN=mtCyYBP~utOavmI4rZO<<-Gy!5EP z-A2~HEZ74RqIf%L0?}p@6BG?thTQs_xcp{bx{_Yqqv)QgAE5O}!8^sUy%7Lh=jX1x z_rUA?n-n{5?MMBW`{(l0X}e1fKvqMZ_@adX{Zu7AS$%G^QYq9*W@wbUYCp1Nv{8*;c>`oD}kv&6>#w&~1~qGDAnnOvFwoMrhZW zi+U@E1q0ph0F?!EGj|@a1hHjI5u_S~%2a=*T@i0n4<5b^Znr1raQo<8Xs3ej>5$bD!Z}Y@bkW$Py|{IrFGoPPq?AJSU{DOZFB_? zq?|^Hh!dAq0J3WFkqDhXxgG6$1P8_`55bIY7|tG)hSXYHFPS7_)e(BuB*k!>ApmzG zQPA$hB7ohA$G|#{#(B^K-9!ea6Pq}po+O~$`$NwimOdeWU(WLcpMSK=>n#~;s@aNQ zuoqM9(45FWdkqvnv`HWkLP5~r6PDf3;_XqJn)Kj1(n9foZvW9#!4hv@Dd4g3P3;W4 z#Bn!xLyxnCjXL4p>`guBS{2*8I;f6WM;3FF5;!%NAXCJOVDTCki&w`346As*!n-Jp ztom<_m7KCT;pF2H(9jiq`rNxrdY2OnypvK~ME2mL$G(=VS0uGi_R;l8UfHEX+n;i_S;RqdNr zZ|`$H(w`#W)Obw#n6A>Z;ovwWYUcg+JHYb}@CEM_}< zj`)v2?t8*u@q*DMs&q&ju$~p z7XobyTZ(YNtxkhjGthLy)g3m_f&6;WAI~_4xngJc&fM!9-XCiK$+wf_G4dprOL_qt zR^AzB=QHGU;lH0CUw?{MN$l_!NWw{+89vs*{F-cvWG>#LPG9_5V}N`45by?(h6`t`R> zB}rWJOz){=OB;ZDJ>3u!(-0dQ(_kRMwCxj3Si9OT47Sp5JS=6CfU6K(M{?+Nj2--^b?7UEX9k2nJyalYZYsb7BZ>1Y@fUfxCV2U{q;H_cIHx(Pr(M>Q=V-J! zau%h=J6m0o%sg`Cj2o%BhT*-ckqpsLW`6#9DO;xN$w*o=ELPmU6H>(U@en6^`BqA_ z5E>x*#YDfeuFD;W!L9~JkHA(y7xJO-i<_^&tdfR?qkFeLx?#_A|vE%GK(q|D(KqPHo<7`y~qzr*lF1rEYkm)CK zU|<;yrurnt9zXb_A#7lp+dt#@(M*Va)=TDA^f_QBbwzvrB9m;eXZe$|W8_cK(eO!t zK`Rf%TDZ8G7i%e47q@Fb6w_x$3Ngoy6{1g%tXOe6AvA+2h0OwEwT5@AzhAZ0B|>2a zs-Y1A3w)tKSBrFtc|5{x7#q?@h{CU!qh@6sd*DQ;ltm z&)vpHoAPG=V)4N4k1Bz5(eKb`;;z z!G9<6GwU^t0HrKcK~BIzM)9 zcw|^fd1!omnSZE~9}!Ye8Xy2>>-ZcEmt(ipu}x`DoyEao@w%TM^_S0fuRNy#l-JWc zQ*(wh6EmA)oTaYXL0N%n>vWc0uVBssS?+UZOV5wH-6%qpG zWCZ|}g0Senw7y}vVN+3_)-NUo!3>GXEe{d{6#aBw7kg8&mX9;4E{%EE5R|ojbH%8d zUD2pjb;R^{S$zO$>m)gfaBA7eS-Wp>LL8Mi*#YBEcR9E>I3DSn{R~Q=aQG|DEq{OB4*BEk6?N`=xJQ=7r+1Q+MVo!cuL@SbAW=5Fh zK<#VNhU^nJE*NTm+i`9~5c=Sr;PK@2fUYR7PzUc+39FOqSxyB~>p1KH2Kgwo%tQvY z+Y>)8Bz>;gAp$51KpiyB+}&|4wf#7uW10Z8f$?!+((1f;zW#TWQ|XXal`*_KvDSYd zzSdal+_A;if6nVFa?+)`tcrP~WnHT(Z|-jx;6SE{l-O!{boe3K25y#mdztihUtij- z&1Gu&!N4d#Ym|eMh;>+yvLA8u>)wqtqK_S2f%z~xoV_O8F6e)PCleT6x*Mx(a0PgQjs%s1~PQe1t~7|kn%V$a$lw_KYEMLefdoeJ6hqAb?3)uGm`2SkpB}7auZ>w=0nW+QV{AnVEEER1C zJFCGbO*>JRbGxrka%+2;W?x;v&p$4xJ!e5$=FLjP9>pT;abijN zkzFgCQq?qZZdo+N(=CDc)PR;blx(9cb!1ynIUC6*57mmAkN6Drv6 zmWw7%d~Dw6qC=9x=y92jQA7?k+W1iH7Ti&1*o5kz7@pw2-ea%9F6Xuv*53t2>ok$_ zhC+0uKlGC9$<}JB^HW`iPthWCrg(#LtDp(7Y2Q#F3yWyVkWAR6TV zWL)MOjB7H7q|F1v`fonEe&vetH~YhGD1KmR=gQkApxA}M;LSg)93z2K0=iTYWLf8+JIBq7$;-KmXV@2cC6vaU*$i2s{wWA3=J zCcpm~fI>@Ofd1-}r$e?4Cv6D1&F z;m9@sfjWJ&pC!mxW_vspgGJ6}etN#%fy`MX7L$71c3B2X(0`KvPB;G9<>t1_>Yf#O z06_PU28pjt$HDLwkEC|QdlqCQA@hG-`P9sgZGA#6rFhRgHb=^SmKb#^p*VkxpVpGO z;pNc5jjNd{d1^^b+-BEkss+p`X9t|Z6)!*InRoi{RzYfvLX>%VQof`) z+I;iY6ZPly+D8Ke3f5k|Y+G7bqKGy}-Z)co@U=;J5@*sS6bu&@5|AJxqbZp?ESCd7 zWd^WxQ_IRd8ycR~$jZnaPDvTf%@d9Er{qc}10sEe2x7-OEakf}_#v-+)uV>0$6Z}D zkD*#`#sE7wWIUvsDI|AtQcC8O!-&EXx24I{%0JP(wR^%@|Ib$@nM*U2x<>&uo0`>^ z%q?vrUnAc!AJ{T7C_U)Oqg%iz$*s07hvVrty^4nK>a-7sR>6aEzq-42^`?59Mvf{U zsy2XWKCOlM3uBq*1M0F1jSS6C4>gi5$AviL2=ZMDxdhFv+ji{O4j>|@s_h~}i~ySfeKSL@cEtv|2P&iC{vn6O@8Y!YyLGBW`{kde_9 zBk0M^1%NL$17y&oIohx6+zF*TbEP24D1V(afIg-4Zbo8(D5XCyZFiA_G}xO2XAd|C z)|)zR!bkt*dU#`3%;4d*@D_v~?TD)vMscPOrkM&mf`CM8BZ@=dgb#$LAL;vMiIHQkf)wh05Kssb`I)xq>4$oQr9pnx=?-C_ zN13XClpdvlx~%5p?=MyY^4rwoy80*G-3qpRXD1cp8vB!lBQhBRE}xf&b_P1esgBrm z@`z#ON_fv*9hq^w%(bhm2v^3nvdQjYxmp5acaPSTPw||q=!o`cn$2ZsfaGw!e!k*d zBBEOP^*v1vo|O)N@=9xO8I6hq`uLPa=Q!OG$GMGpYJM20y&jsnjBlO`tweB*JFqmP zZEsAidOUbmtMfTia(1Nk=Z3;AcjOG?5$t;0!I!rvVZvXz8YuC1G3+l~4A77U_Zd7UBby`i+i3|a> zONwuKs;_hbY;Lc+e%ImLcQ-ZO48hCrcVVm1D9!M0WOtfB2po+2Mv1%d&Zwp@haDI> zFm~X8=W@EV<;*(-5c*xDx>XU|=M@=_;rF-QGe+ct9|2asNr&aQ1hA7>OIq06uO?qi zgvWScnfzp$UzETo@wXT5EciPZs{bg$@Wx1XYu)fzq{fVt?8V{cKuUeH4g9W>>)~Y? zg7>!a&PcFWb%-zWHZM%JnBfS2xTxjuOt$6n-~)PCItz5Pbh)~tMb&|fS~Iz66r@VC zu8{?8pqBP&FwmvdR`@1^u5V3?aP`^2Llj$Rt-V#*%F@*++U}MHJ<1XiL$scLa_kxq z7R!kg5D+JCc5VjMl~P|YZB2y4Sfcb%(k&HX^~%yRD|=|IE#=U&gIE1`w9lYx!doq^ zhGI0OeGOD-BTKUmQe_%tCTG-E2NvD@8G`?33fCWNq8y!5BE-L9Fzg=$SdnL3yF?Hq zcjY*j#*pZN8-ix9>2%{uTHfkcm7SYWSju;f_rCCK%Om>Ch({W%Pprtl(bQBhf5K2_ zy)C5n4mPwTq%ov;giKmX)Tj%rhNCQ6eQQ^{&+XgR@E*M+!O9qKqt9e_ih;tP9{1xj z%!-Ff@T~9`doCAwyAcu|*5S_)o;1ud?Ec=K2ppfJs(wa|jrU&(h25U|j|J)+aV+VDdWOM*bYUYAIipcX)~4cIP6bXk2SJIZ}xwuw_!$xTQ=z~$rOXwVTFzv@lx#y?-Y$N73?%rD1m zo-a(yIG;3&n z)5nYqpm(Otw;1sg6RKT0zJ0ryLMgYSwQ9bp{6$}1`Ac)HT#4u(Bkx!~} zW-6;xJ6WXF0wq*cJ}xS)urJe-A@&OkOEISBq@fUS8zjceFce!#tR@l)iTep7gmo%J zdO+iH*94y&FuwC@2z#JpA_Hh!r<-GAo8#jN3?!Ha(A&6lIyV3c1`jifp%Zw*Br*vB_s&y#SvdFas_sOP^s_ihU~_0!CukM@8ht|K0@yx zAwSkF8D(3*d7&8X1>1++Vt}0Qw=y`gII>0?%t1%XW2?5bZHP6J_GQ}J>Kz=REw1H6 zKS>@tl$-6F;o}}g;9{MVQv$<-(n1769w9uYCs{*`U$KfD1v-oENV3H}qMfwj5gvw1 zuf-$(&itQoaJx0dlk(Kr)cGl8RaoAkntTPcP)C?@Ys4<2fJTxJmcn?@&Y~YVsUV zQPYz-vRy^RvYqBCE>60g#YM7RT}HI1Lxa^35y2(ukjSXwkfe%ylGMeaQ2@^epijE_ z`nXNZ_Qc$?!hXD9gDU!~vDMnf+ctO$I>0_I6otGWDD#$EV&iTj5Dzy0P)mOHClCTq znWHG9gTpv#Gzis9vPr59sebhmP5cR^&zyx=KCz5@{My&7IYl{uZt!q@>Jma4Lcjar z#&h-N1cXr|;B%>%0SAWxOf3!y8Y!XLMxBh!QAGq?+G@=~4>*AIMV-bwSQ4bjV#-32 z!sP9xt)X@HP*;q#?4JEn!(a~1BQDX8=`V1Pb*K8^2neUhC@){{7;g@*$`>8)XN{KH zS^QH)2xe>{T4LjFs^+$qm2ELmWzYU^%Y$Y^AUbT=-TIcTHU{GDz;a+*8_7d7XU2~I z%zF5OpK~?T9msqc^*Q~(sEldbVwAhzY$lNQal$boSc;Be9Tqm1&e>!sK&NZQrZ)KH zjm1lMnOT?E)lfSx+SufgzRW(IQFa4!h&|?XTF35AjT{GK)_D|WVgiGzf-Fl~MI(R+ z>NR`BoAf5GDDqjGd9B+mwMp?BEe>%m+TQj@S_8+8S*aRrqb4yytHmRy{7&iJ6c0yN zeY}_)RJs(oqfGneIvP`WfvSy(;z9k}iY>{B13B4={aeHpZ#y5TNub2H&bW?!&RDlV zZu`e1= z*eP%EjUE@R^5U+JPZU&yhgXPp{4seSlkcAw=aJ%c%M(kej4}FNUjJx%?gkN_$a~^z_~GlrF@U@InGur?Iq!B;71Iy>kVcEfcZ8= z$bzL=aCckn_4{S*?FvTP=)+S}5g%REUi3A?9EPYIEk`)*8}PpxPv;Pk59`UcxcZPB z{-^YCxlO@K#;^3n2|A@x0$M&h+foAJYey?`A=xS*fULM-U=>=C@5@mHo0bs_roFf@ zxpvE0=qE}YWs+i$+=DAZKh0UHt6lzm`DKB> zcCPxJxu3QAfPdg}@pkEW=6^PjSsZ)Y*8n|wv|*XajJ?n3SzDC+)2dYpuqB0N{XIxt zP{ViT$9Aq8TmRSl_jr3_>tef|BWfZ9oh888G3S`U(g#MeX6^>9c3n z)j0|vx?n+QBUl6k;KjYd$OF}E3Pt$)0}lRt5w`w>pTlt<{wAPKd8c+++jMxRfL}>NIC{nxIODM+!Q%k+IqtStjWyj zUbj_#npti6latd`@``%ONeDPesb4|9ns59ka$`L$kk4JRlZ9-_X4B~k1`mLJlHNTsI-DBR`%69>Bv%L0sbzuAleb#p{lc~Fmp#h zl>ZkDm9p5^IqKa{Z@3)KVf_g>@*g4IDr0eIqZbVSIPC$~Kx04y^#*X|E})$22%S+i zs8!Mzwj<>c8FeTxmVxQ(BGErPp7bw_aZ^<|eO~RdJ`V}>b@8fymIJL)yX*6Sv2|mg zrB!Ng`huFN&x6s|#fSb`z$()?xuyYXj2$p`z?cEp`5Hkrra}M^UsaL;P17ZM&KmQ= zn5x9{i!?e3Dq8XZQ=C0oPE~GmFK-FfEa|&JpjJr}vnNS%Tn$;Xp^^v?6*VV8(&Z#o zD^2&W=j``H)t6eDrei`~FRlQ8yA+oP12jvp2swR)zQSGwRWpU%dPTUB)SXM=|LO(v zBfMbVH|~a|zL8h-As0LwYqn<@db=n0mgwTlZ~y8~J)JM3I(^aq_1v!aMKA;OjY;g( zf2CQweP(%{UG=%1g91I72W{Fw2UB_d7YfnTw&y|Gz~-rfvz`c|4K3w~jE~+n2TQl9 z>E}V(Km}VxLk$9LfGb@H(9dGpS2nc+t*1RGasL^uAM;2AXgw#ct5EGg>osBt#pjcU zR>IL&B8bvkK_eE8szrd-1L&ClQOKAXj$E9>|NkbG&>g665X&X@&eTsG*B1@E1RK}A z5$Mf{WYm%F)uyZm#JWEp{VzBB$85WMwW}lhBojc(uKh32*5%lQe7j_00RX;y=*@2H zPfXOmrGJ0r-3z4$v!+pefS)_%>@QvX9~`5v@e>o*YtTK~cmKQVASPoL;+kToO+>Hv z?{k-&@WhPMj83>}!JIRBo(>Hr?DHQcD3MAcjAf8_g^t#A5GkDq9={E0MS@0{LhMMa zOHq0^t`evnO_L`5W;97K;}o6)<5z;#@PA{YgDOO0Mj|myIZ*b#7|MH1uF(of+dCLv zRKM!c8nW*uLW*ZLd!!}LlM^2w2L?i=%mMVy0Tef4Gh&%wfx+*ki@g328ZO$|I4}b} z!}fp$22V&94I`%Mk%ftLD$cdkq%<%vF;>~0!>Dz@nd#D~B-NSTGW}_pATq<~iBanT zFP--&8{A_8SSe*LAEb4sEoOi)sZWw%CSCaJn697u-56VfES@i@p!ZQ4iS4QSpiA+EX&UmO=sbMJE&!>Sc9FNSu&SP|?sibK%O(a_&5M^1|Sa={!EiL5OlP`bS~iEv<3~hBMaRU( z#U}`aqQoR|a!P7idWIxZnw6cCn>Vi2N+%6@r%j)3>uuHLkru{Cqc=x~^PaUfSp5C_ z7JFQE*(Em_?DXi`E$v} W$IF>aZ(pA4YyQ(diTN+Q1ONc)ajHWA diff --git a/web/src/App.tsx b/web/src/App.tsx index b07608c311..74d225b497 100644 --- a/web/src/App.tsx +++ b/web/src/App.tsx @@ -6,6 +6,9 @@ import { Sparkles, Terminal, Globe, Database, Shield, Wrench, Zap, Heart, Star, Code, Eye, } from "lucide-react"; +import { SelectionSwitcher } from "@nous-research/ui/ui/components/selection-switcher"; +import { cn } from "@/lib/utils"; +import { Backdrop } from "@/components/Backdrop"; import StatusPage from "@/pages/StatusPage"; import ConfigPage from "@/pages/ConfigPage"; import EnvPage from "@/pages/EnvPage"; @@ -20,17 +23,6 @@ import { useI18n } from "@/i18n"; import { usePlugins } from "@/plugins"; import type { RegisteredPlugin } from "@/plugins"; -// --------------------------------------------------------------------------- -// Built-in nav items -// --------------------------------------------------------------------------- - -interface NavItem { - path: string; - label: string; - labelKey?: string; - icon: React.ComponentType<{ className?: string }>; -} - const BUILTIN_NAV: NavItem[] = [ { path: "/", labelKey: "status", label: "Status", icon: Activity }, { path: "/sessions", labelKey: "sessions", label: "Sessions", icon: MessageSquare }, @@ -42,11 +34,8 @@ const BUILTIN_NAV: NavItem[] = [ { path: "/env", labelKey: "keys", label: "Keys", icon: KeyRound }, ]; -// --------------------------------------------------------------------------- -// Helpers -// --------------------------------------------------------------------------- - -/** Map of icon names plugins can use. Covers common choices without importing all of lucide. */ +// Plugins can reference any of these by name in their manifest — keeps bundle +// size sane vs. importing the full lucide-react set. const ICON_MAP: Record> = { Activity, BarChart3, Clock, FileText, KeyRound, MessageSquare, Package, Settings, Puzzle, @@ -54,12 +43,10 @@ const ICON_MAP: Record> = { Wrench, Zap, Heart, Star, Code, Eye, }; -/** Resolve a Lucide icon name to a component, fallback to Puzzle. */ function resolveIcon(name: string): React.ComponentType<{ className?: string }> { return ICON_MAP[name] ?? Puzzle; } -/** Insert plugin nav items at the position specified in their manifest. */ function buildNavItems(builtIn: NavItem[], plugins: RegisteredPlugin[]): NavItem[] { const items = [...builtIn]; @@ -89,10 +76,6 @@ function buildNavItems(builtIn: NavItem[], plugins: RegisteredPlugin[]): NavItem return items; } -// --------------------------------------------------------------------------- -// App -// --------------------------------------------------------------------------- - export default function App() { const { t } = useI18n(); const { plugins } = usePlugins(); @@ -103,15 +86,26 @@ export default function App() { ); return ( -
-
-
+
+ + -
-
-
- - Hermes Agent +
+
+
+ + Hermes +
+ Agent
@@ -122,22 +116,36 @@ export default function App() { to={path} end={path === "/"} className={({ isActive }) => - `group relative inline-flex items-center gap-1 sm:gap-1.5 border-r border-border px-2.5 sm:px-4 py-2 font-display text-[0.65rem] sm:text-[0.8rem] tracking-[0.12em] uppercase whitespace-nowrap transition-colors cursor-pointer shrink-0 focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring ${ + cn( + "group relative inline-flex items-center gap-1.5 shrink-0", + "border-r border-current/20 px-2.5 sm:px-4 py-2", + "font-mondwest text-[0.65rem] sm:text-[0.8rem] tracking-[0.12em]", + "whitespace-nowrap transition-colors cursor-pointer", + "focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-midground", isActive - ? "text-foreground" - : "text-muted-foreground hover:text-foreground" - }` + ? "text-midground" + : "opacity-60 hover:opacity-100", + ) } > {({ isActive }) => ( <> - + {labelKey ? (t.app.nav as Record)[labelKey] ?? label : label} - + + + {isActive && ( - + )} )} @@ -145,17 +153,17 @@ export default function App() { ))} -
+
- + {t.app.webUi}
-
+
} /> } /> @@ -166,7 +174,6 @@ export default function App() { } /> } /> - {/* Plugin routes */} {plugins.map(({ manifest, component: PluginComponent }) => (
-