Merge branch 'main' into fix/tui-provider-resolution

This commit is contained in:
Kaio 2026-04-22 11:47:49 -07:00 committed by GitHub
commit ec374c0599
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
625 changed files with 68938 additions and 11055 deletions

8
.github/actions/nix-setup/action.yml vendored Normal file
View file

@ -0,0 +1,8 @@
name: 'Setup Nix'
description: 'Install Nix with DeterminateSystems and enable magic-nix-cache'
runs:
using: composite
steps:
- uses: DeterminateSystems/nix-installer-action@ef8a148080ab6020fd15196c2084a2eea5ff2d25 # v22
- uses: DeterminateSystems/magic-nix-cache-action@565684385bcd71bad329742eefe8d12f2e765b39 # v13

View file

@ -3,8 +3,13 @@ name: Docker Build and Publish
on:
push:
branches: [main]
pull_request:
branches: [main]
paths:
- '**/*.py'
- 'pyproject.toml'
- 'uv.lock'
- 'Dockerfile'
- 'docker/**'
- '.github/workflows/docker-publish.yml'
release:
types: [published]
@ -49,6 +54,14 @@ jobs:
- name: Test image starts
run: |
# The image runs as the hermes user (UID 10000). GitHub Actions
# creates /tmp/hermes-test root-owned by default, which hermes
# can't write to — chown it to match the in-container UID before
# bind-mounting. Real users doing `docker run -v ~/.hermes:...`
# with their own UID hit the same issue and have their own
# remediations (HERMES_UID env var, or chown locally).
mkdir -p /tmp/hermes-test
sudo chown -R 10000:10000 /tmp/hermes-test
docker run --rm \
-v /tmp/hermes-test:/opt/data \
--entrypoint /opt/hermes/docker/entrypoint.sh \

View file

@ -0,0 +1,68 @@
name: Nix Lockfile Check
on:
pull_request:
workflow_dispatch:
permissions:
contents: read
pull-requests: write
concurrency:
group: nix-lockfile-check-${{ github.ref }}
cancel-in-progress: true
jobs:
check:
runs-on: ubuntu-latest
timeout-minutes: 20
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: ./.github/actions/nix-setup
- name: Resolve head SHA
id: sha
shell: bash
run: |
FULL="${{ github.event.pull_request.head.sha || github.sha }}"
echo "full=$FULL" >> "$GITHUB_OUTPUT"
echo "short=${FULL:0:7}" >> "$GITHUB_OUTPUT"
- name: Check lockfile hashes
id: check
continue-on-error: true
env:
LINK_SHA: ${{ steps.sha.outputs.full }}
run: nix run .#fix-lockfiles -- --check
- name: Post sticky PR comment (stale)
if: steps.check.outputs.stale == 'true' && github.event_name == 'pull_request'
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
with:
header: nix-lockfile-check
message: |
### ⚠️ npm lockfile hash out of date
Checked against commit [`${{ steps.sha.outputs.short }}`](${{ github.server_url }}/${{ github.repository }}/commit/${{ steps.sha.outputs.full }}) (PR head at check time).
The `hash = "sha256-..."` line in these nix files no longer matches the committed `package-lock.json`:
${{ steps.check.outputs.report }}
#### Apply the fix
- [ ] **Apply lockfile fix** — tick to push a commit with the correct hashes to this PR branch
- Or [run the Nix Lockfile Fix workflow](${{ github.server_url }}/${{ github.repository }}/actions/workflows/nix-lockfile-fix.yml) manually (pass PR `#${{ github.event.pull_request.number }}`)
- Or locally: `nix run .#fix-lockfiles -- --apply` and commit the diff
- name: Clear sticky PR comment (resolved)
if: steps.check.outputs.stale == 'false' && github.event_name == 'pull_request'
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
with:
header: nix-lockfile-check
delete: true
- name: Fail if stale
if: steps.check.outputs.stale == 'true'
run: exit 1

149
.github/workflows/nix-lockfile-fix.yml vendored Normal file
View file

@ -0,0 +1,149 @@
name: Nix Lockfile Fix
on:
workflow_dispatch:
inputs:
pr_number:
description: 'PR number to fix (leave empty to run on the selected branch)'
required: false
type: string
issue_comment:
types: [edited]
permissions:
contents: write
pull-requests: write
concurrency:
group: nix-lockfile-fix-${{ github.event.issue.number || github.event.inputs.pr_number || github.ref }}
cancel-in-progress: false
jobs:
fix:
# Run on manual dispatch OR when a task-list checkbox in the sticky
# lockfile-check comment flips from `[ ]` to `[x]`.
if: |
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'issue_comment'
&& github.event.issue.pull_request != null
&& contains(github.event.comment.body, '[x] **Apply lockfile fix**')
&& !contains(github.event.changes.body.from, '[x] **Apply lockfile fix**'))
runs-on: ubuntu-latest
timeout-minutes: 25
steps:
- name: Authorize & resolve PR
id: resolve
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
with:
script: |
// 1. Verify the actor has write access — applies to both checkbox
// clicks and manual dispatch.
const { data: perm } =
await github.rest.repos.getCollaboratorPermissionLevel({
owner: context.repo.owner,
repo: context.repo.repo,
username: context.actor,
});
if (!['admin', 'write', 'maintain'].includes(perm.permission)) {
core.setFailed(
`${context.actor} lacks write access (has: ${perm.permission})`
);
return;
}
// 2. Resolve which ref to check out.
let prNumber = '';
if (context.eventName === 'issue_comment') {
prNumber = String(context.payload.issue.number);
} else if (context.eventName === 'workflow_dispatch') {
prNumber = context.payload.inputs.pr_number || '';
}
if (!prNumber) {
core.setOutput('ref', context.ref.replace(/^refs\/heads\//, ''));
core.setOutput('repo', context.repo.repo);
core.setOutput('owner', context.repo.owner);
core.setOutput('pr', '');
return;
}
const { data: pr } = await github.rest.pulls.get({
owner: context.repo.owner,
repo: context.repo.repo,
pull_number: Number(prNumber),
});
core.setOutput('ref', pr.head.ref);
core.setOutput('repo', pr.head.repo.name);
core.setOutput('owner', pr.head.repo.owner.login);
core.setOutput('pr', String(pr.number));
# Wipe the sticky lockfile-check comment to a "running" state as soon
# as the job is authorized, so the user sees their click was picked up
# before the ~minute of nix build work.
- name: Mark sticky as running
if: steps.resolve.outputs.pr != ''
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
with:
header: nix-lockfile-check
number: ${{ steps.resolve.outputs.pr }}
message: |
### 🔄 Applying lockfile fix…
Triggered by @${{ github.actor }} — [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}).
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
repository: ${{ steps.resolve.outputs.owner }}/${{ steps.resolve.outputs.repo }}
ref: ${{ steps.resolve.outputs.ref }}
token: ${{ secrets.GITHUB_TOKEN }}
fetch-depth: 0
- uses: ./.github/actions/nix-setup
- name: Apply lockfile hashes
id: apply
run: nix run .#fix-lockfiles -- --apply
- name: Commit & push
if: steps.apply.outputs.changed == 'true'
shell: bash
run: |
set -euo pipefail
git config user.name 'github-actions[bot]'
git config user.email '41898282+github-actions[bot]@users.noreply.github.com'
git add nix/tui.nix nix/web.nix
git commit -m "fix(nix): refresh npm lockfile hashes"
git push
- name: Update sticky (applied)
if: steps.apply.outputs.changed == 'true' && steps.resolve.outputs.pr != ''
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
with:
header: nix-lockfile-check
number: ${{ steps.resolve.outputs.pr }}
message: |
### ✅ Lockfile fix applied
Pushed a commit refreshing the npm lockfile hashes — [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}).
- name: Update sticky (already current)
if: steps.apply.outputs.changed == 'false' && steps.resolve.outputs.pr != ''
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
with:
header: nix-lockfile-check
number: ${{ steps.resolve.outputs.pr }}
message: |
### ✅ Lockfile hashes already current
Nothing to commit — [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}).
- name: Update sticky (failed)
if: failure() && steps.resolve.outputs.pr != ''
uses: marocchino/sticky-pull-request-comment@52423e01640425a022ef5fd42c6fb5f633a02728 # v2.9.1
with:
header: nix-lockfile-check
number: ${{ steps.resolve.outputs.pr }}
message: |
### ❌ Lockfile fix failed
See the [workflow run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for logs.

View file

@ -4,15 +4,6 @@ on:
push:
branches: [main]
pull_request:
paths:
- 'flake.nix'
- 'flake.lock'
- 'nix/**'
- 'pyproject.toml'
- 'uv.lock'
- 'hermes_cli/**'
- 'run_agent.py'
- 'acp_adapter/**'
permissions:
contents: read
@ -29,9 +20,8 @@ jobs:
runs-on: ${{ matrix.os }}
timeout-minutes: 30
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: DeterminateSystems/nix-installer-action@ef8a148080ab6020fd15196c2084a2eea5ff2d25 # v22
- uses: DeterminateSystems/magic-nix-cache-action@565684385bcd71bad329742eefe8d12f2e765b39 # v13
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- uses: ./.github/actions/nix-setup
- name: Check flake
if: runner.os == 'Linux'
run: nix flake check --print-build-logs

View file

@ -3,14 +3,31 @@ name: Supply Chain Audit
on:
pull_request:
types: [opened, synchronize, reopened]
paths:
- '**/*.py'
- '**/*.pth'
- '**/setup.py'
- '**/setup.cfg'
- '**/sitecustomize.py'
- '**/usercustomize.py'
- '**/__init__.pth'
permissions:
pull-requests: write
contents: read
# Narrow, high-signal scanner. Only fires on critical indicators of supply
# chain attacks (e.g. the litellm-style payloads). Low-signal heuristics
# (plain base64, plain exec/eval, dependency/Dockerfile/workflow edits,
# Actions version unpinning, outbound POST/PUT) were intentionally
# removed — they fired on nearly every PR and trained reviewers to ignore
# the scanner. Keep this file's checks ruthlessly narrow: if you find
# yourself adding WARNING-tier patterns here again, make a separate
# advisory-only workflow instead.
jobs:
scan:
name: Scan PR for supply chain risks
name: Scan PR for critical supply chain risks
runs-on: ubuntu-latest
steps:
- name: Checkout
@ -18,7 +35,7 @@ jobs:
with:
fetch-depth: 0
- name: Scan diff for suspicious patterns
- name: Scan diff for critical patterns
id: scan
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@ -28,19 +45,19 @@ jobs:
BASE="${{ github.event.pull_request.base.sha }}"
HEAD="${{ github.event.pull_request.head.sha }}"
# Get the full diff (added lines only)
# Added lines only, excluding lockfiles.
DIFF=$(git diff "$BASE".."$HEAD" -- . ':!uv.lock' ':!*.lock' ':!package-lock.json' ':!yarn.lock' || true)
FINDINGS=""
CRITICAL=false
# --- .pth files (auto-execute on Python startup) ---
# The exact mechanism used in the litellm supply chain attack:
# https://github.com/BerriAI/litellm/issues/24512
PTH_FILES=$(git diff --name-only "$BASE".."$HEAD" | grep '\.pth$' || true)
if [ -n "$PTH_FILES" ]; then
CRITICAL=true
FINDINGS="${FINDINGS}
### 🚨 CRITICAL: .pth file added or modified
Python \`.pth\` files in \`site-packages/\` execute automatically when the interpreter starts — no import required. This is the exact mechanism used in the [litellm supply chain attack](https://github.com/BerriAI/litellm/issues/24512).
Python \`.pth\` files in \`site-packages/\` execute automatically when the interpreter starts — no import required.
**Files:**
\`\`\`
@ -49,13 +66,12 @@ jobs:
"
fi
# --- base64 + exec/eval combo (the litellm attack pattern) ---
# --- base64 decode + exec/eval on the same line (the litellm attack pattern) ---
B64_EXEC_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'base64\.(b64decode|decodebytes|urlsafe_b64decode)' | grep -iE 'exec\(|eval\(' | head -10 || true)
if [ -n "$B64_EXEC_HITS" ]; then
CRITICAL=true
FINDINGS="${FINDINGS}
### 🚨 CRITICAL: base64 decode + exec/eval combo
This is the exact pattern used in the [litellm supply chain attack](https://github.com/BerriAI/litellm/issues/24512) — base64-decoded strings passed to exec/eval to hide credential-stealing payloads.
Base64-decoded strings passed directly to exec/eval — the signature of hidden credential-stealing payloads.
**Matches:**
\`\`\`
@ -64,41 +80,12 @@ jobs:
"
fi
# --- base64 decode/encode (alone — legitimate uses exist) ---
B64_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'base64\.(b64decode|b64encode|decodebytes|encodebytes|urlsafe_b64decode)|atob\(|btoa\(|Buffer\.from\(.*base64' | head -20 || true)
if [ -n "$B64_HITS" ]; then
FINDINGS="${FINDINGS}
### ⚠️ WARNING: base64 encoding/decoding detected
Base64 has legitimate uses (images, JWT, etc.) but is also commonly used to obfuscate malicious payloads. Verify the usage is appropriate.
**Matches (first 20):**
\`\`\`
${B64_HITS}
\`\`\`
"
fi
# --- exec/eval with string arguments ---
EXEC_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -E '(exec|eval)\s*\(' | grep -v '^\+\s*#' | grep -v 'test_\|mock\|assert\|# ' | head -20 || true)
if [ -n "$EXEC_HITS" ]; then
FINDINGS="${FINDINGS}
### ⚠️ WARNING: exec() or eval() usage
Dynamic code execution can hide malicious behavior, especially when combined with base64 or network fetches.
**Matches (first 20):**
\`\`\`
${EXEC_HITS}
\`\`\`
"
fi
# --- subprocess with encoded/obfuscated commands ---
PROC_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -E 'subprocess\.(Popen|call|run)\s*\(' | grep -iE 'base64|decode|encode|\\x|chr\(' | head -10 || true)
# --- subprocess with encoded/obfuscated command argument ---
PROC_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -E 'subprocess\.(Popen|call|run)\s*\(' | grep -iE 'base64|\\x[0-9a-f]{2}|chr\(' | head -10 || true)
if [ -n "$PROC_HITS" ]; then
CRITICAL=true
FINDINGS="${FINDINGS}
### 🚨 CRITICAL: subprocess with encoded/obfuscated command
Subprocess calls with encoded arguments are a strong indicator of payload execution.
Subprocess calls whose command strings are base64- or hex-encoded are a strong indicator of payload execution.
**Matches:**
\`\`\`
@ -107,25 +94,12 @@ jobs:
"
fi
# --- Network calls to non-standard domains ---
EXFIL_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'requests\.(post|put)\(|httpx\.(post|put)\(|urllib\.request\.urlopen' | grep -v '^\+\s*#' | grep -v 'test_\|mock\|assert' | head -10 || true)
if [ -n "$EXFIL_HITS" ]; then
FINDINGS="${FINDINGS}
### ⚠️ WARNING: Outbound network calls (POST/PUT)
Outbound POST/PUT requests in new code could be data exfiltration. Verify the destination URLs are legitimate.
**Matches (first 10):**
\`\`\`
${EXFIL_HITS}
\`\`\`
"
fi
# --- setup.py / setup.cfg install hooks ---
SETUP_HITS=$(git diff --name-only "$BASE".."$HEAD" | grep -E '(setup\.py|setup\.cfg|__init__\.pth|sitecustomize\.py|usercustomize\.py)$' || true)
# --- Install-hook files (setup.py/sitecustomize/usercustomize/__init__.pth) ---
# These execute during pip install or interpreter startup.
SETUP_HITS=$(git diff --name-only "$BASE".."$HEAD" | grep -E '(^|/)(setup\.py|setup\.cfg|sitecustomize\.py|usercustomize\.py|__init__\.pth)$' || true)
if [ -n "$SETUP_HITS" ]; then
FINDINGS="${FINDINGS}
### ⚠️ WARNING: Install hook files modified
### 🚨 CRITICAL: Install-hook file added or modified
These files can execute code during package installation or interpreter startup.
**Files:**
@ -135,114 +109,31 @@ jobs:
"
fi
# --- Compile/marshal/pickle (code object injection) ---
MARSHAL_HITS=$(echo "$DIFF" | grep -n '^\+' | grep -iE 'marshal\.loads|pickle\.loads|compile\(' | grep -v '^\+\s*#' | grep -v 'test_\|re\.compile\|ast\.compile' | head -10 || true)
if [ -n "$MARSHAL_HITS" ]; then
FINDINGS="${FINDINGS}
### ⚠️ WARNING: marshal/pickle/compile usage
These can deserialize or construct executable code objects.
**Matches:**
\`\`\`
${MARSHAL_HITS}
\`\`\`
"
fi
# --- CI/CD workflow files modified ---
WORKFLOW_HITS=$(git diff --name-only "$BASE".."$HEAD" | grep -E '\.github/workflows/.*\.ya?ml$' || true)
if [ -n "$WORKFLOW_HITS" ]; then
FINDINGS="${FINDINGS}
### ⚠️ WARNING: CI/CD workflow files modified
Changes to workflow files can alter build pipelines, inject steps, or modify permissions. Verify no unauthorized actions or secrets access were added.
**Files:**
\`\`\`
${WORKFLOW_HITS}
\`\`\`
"
fi
# --- Dockerfile / container build files modified ---
DOCKER_HITS=$(git diff --name-only "$BASE".."$HEAD" | grep -iE '(Dockerfile|\.dockerignore|docker-compose)' || true)
if [ -n "$DOCKER_HITS" ]; then
FINDINGS="${FINDINGS}
### ⚠️ WARNING: Container build files modified
Changes to Dockerfiles or compose files can alter base images, add build steps, or expose ports. Verify base image pins and build commands.
**Files:**
\`\`\`
${DOCKER_HITS}
\`\`\`
"
fi
# --- Dependency manifest files modified ---
DEP_HITS=$(git diff --name-only "$BASE".."$HEAD" | grep -E '(pyproject\.toml|requirements.*\.txt|package\.json|Gemfile|go\.mod|Cargo\.toml)$' || true)
if [ -n "$DEP_HITS" ]; then
FINDINGS="${FINDINGS}
### ⚠️ WARNING: Dependency manifest files modified
Changes to dependency files can introduce new packages or change version pins. Verify all dependency changes are intentional and from trusted sources.
**Files:**
\`\`\`
${DEP_HITS}
\`\`\`
"
fi
# --- GitHub Actions version unpinning (mutable tags instead of SHAs) ---
ACTIONS_UNPIN=$(echo "$DIFF" | grep -n '^\+' | grep 'uses:' | grep -v '#' | grep -E '@v[0-9]' | head -10 || true)
if [ -n "$ACTIONS_UNPIN" ]; then
FINDINGS="${FINDINGS}
### ⚠️ WARNING: GitHub Actions with mutable version tags
Actions should be pinned to full commit SHAs (not \`@v4\`, \`@v5\`). Mutable tags can be retargeted silently if a maintainer account is compromised.
**Matches:**
\`\`\`
${ACTIONS_UNPIN}
\`\`\`
"
fi
# --- Output results ---
if [ -n "$FINDINGS" ]; then
echo "found=true" >> "$GITHUB_OUTPUT"
if [ "$CRITICAL" = true ]; then
echo "critical=true" >> "$GITHUB_OUTPUT"
else
echo "critical=false" >> "$GITHUB_OUTPUT"
fi
# Write findings to a file (multiline env vars are fragile)
echo "$FINDINGS" > /tmp/findings.md
else
echo "found=false" >> "$GITHUB_OUTPUT"
echo "critical=false" >> "$GITHUB_OUTPUT"
fi
- name: Post warning comment
- name: Post critical finding comment
if: steps.scan.outputs.found == 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
SEVERITY="⚠️ Supply Chain Risk Detected"
if [ "${{ steps.scan.outputs.critical }}" = "true" ]; then
SEVERITY="🚨 CRITICAL Supply Chain Risk Detected"
fi
BODY="## 🚨 CRITICAL Supply Chain Risk Detected
BODY="## ${SEVERITY}
This PR contains patterns commonly associated with supply chain attacks. This does **not** mean the PR is malicious — but these patterns require careful human review before merging.
This PR contains a pattern that has been used in real supply chain attacks. A maintainer must review the flagged code carefully before merging.
$(cat /tmp/findings.md)
---
*Automated scan triggered by [supply-chain-audit](/.github/workflows/supply-chain-audit.yml). If this is a false positive, a maintainer can approve after manual review.*"
*Scanner only fires on high-signal indicators: .pth files, base64+exec/eval combos, subprocess with encoded commands, or install-hook files. Low-signal warnings were removed intentionally — if you're seeing this comment, the finding is worth inspecting.*"
gh pr comment "${{ github.event.pull_request.number }}" --body "$BODY" || echo "::warning::Could not post PR comment (expected for fork PRs — GITHUB_TOKEN is read-only)"
- name: Fail on critical findings
if: steps.scan.outputs.critical == 'true'
if: steps.scan.outputs.found == 'true'
run: |
echo "::error::CRITICAL supply chain risk patterns detected in this PR. See the PR comment for details."
exit 1

View file

@ -3,8 +3,14 @@ name: Tests
on:
push:
branches: [main]
paths-ignore:
- '**/*.md'
- 'docs/**'
pull_request:
branches: [main]
paths-ignore:
- '**/*.md'
- 'docs/**'
permissions:
contents: read
@ -17,7 +23,7 @@ concurrency:
jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 10
timeout-minutes: 20
steps:
- name: Checkout code
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4

5
.gitignore vendored
View file

@ -54,6 +54,11 @@ environments/benchmarks/evals/
# Web UI build output
hermes_cli/web_dist/
# Web UI assets — synced from @nous-research/ui at build time via
# `npm run sync-assets` (see web/package.json).
web/public/fonts/
web/public/ds-assets/
# Release script temp files
.release_notes.md
mini-swe-agent/

View file

@ -566,3 +566,52 @@ python -m pytest tests/ -q -n 4
Worker count above 4 will surface test-ordering flakes that CI never sees.
Always run the full suite before pushing changes.
### Don't write change-detector tests
A test is a **change-detector** if it fails whenever data that is **expected
to change** gets updated — model catalogs, config version numbers,
enumeration counts, hardcoded lists of provider models. These tests add no
behavioral coverage; they just guarantee that routine source updates break
CI and cost engineering time to "fix."
**Do not write:**
```python
# catalog snapshot — breaks every model release
assert "gemini-2.5-pro" in _PROVIDER_MODELS["gemini"]
assert "MiniMax-M2.7" in models
# config version literal — breaks every schema bump
assert DEFAULT_CONFIG["_config_version"] == 21
# enumeration count — breaks every time a skill/provider is added
assert len(_PROVIDER_MODELS["huggingface"]) == 8
```
**Do write:**
```python
# behavior: does the catalog plumbing work at all?
assert "gemini" in _PROVIDER_MODELS
assert len(_PROVIDER_MODELS["gemini"]) >= 1
# behavior: does migration bump the user's version to current latest?
assert raw["_config_version"] == DEFAULT_CONFIG["_config_version"]
# invariant: no plan-only model leaks into the legacy list
assert not (set(moonshot_models) & coding_plan_only_models)
# invariant: every model in the catalog has a context-length entry
for m in _PROVIDER_MODELS["huggingface"]:
assert m.lower() in DEFAULT_CONTEXT_LENGTHS_LOWER
```
The rule: if the test reads like a snapshot of current data, delete it. If
it reads like a contract about how two pieces of data must relate, keep it.
When a PR adds a new provider/model and you want a test, make the test
assert the relationship (e.g. "catalog entries all have context lengths"),
not the specific names.
Reviewers should reject new change-detector tests; authors should convert
them into invariants before re-requesting review.

View file

@ -27,12 +27,10 @@ WORKDIR /opt/hermes
# Copy only package manifests first so npm install + Playwright are cached
# unless the lockfiles themselves change.
COPY package.json package-lock.json ./
COPY scripts/whatsapp-bridge/package.json scripts/whatsapp-bridge/package-lock.json scripts/whatsapp-bridge/
COPY web/package.json web/package-lock.json web/
RUN npm install --prefer-offline --no-audit && \
npx playwright install --with-deps chromium --only-shell && \
(cd scripts/whatsapp-bridge && npm install --prefer-offline --no-audit) && \
(cd web && npm install --prefer-offline --no-audit) && \
npm cache clean --force

View file

@ -20,6 +20,46 @@ from pathlib import Path
from hermes_constants import get_hermes_home
# Methods clients send as periodic liveness probes. They are not part of the
# ACP schema, so the acp router correctly returns JSON-RPC -32601 to the
# caller — but the supervisor task that dispatches the request then surfaces
# the raised RequestError via ``logging.exception("Background task failed")``,
# which dumps a traceback to stderr every probe interval. Clients like
# acp-bridge already treat the -32601 response as "agent alive", so the
# traceback is pure noise. We keep the protocol response intact and only
# silence the stderr noise for this specific benign case.
_BENIGN_PROBE_METHODS = frozenset({"ping", "health", "healthcheck"})
class _BenignProbeMethodFilter(logging.Filter):
"""Suppress acp 'Background task failed' tracebacks caused by unknown
liveness-probe methods (e.g. ``ping``) while leaving every other
background-task error including method_not_found for any non-probe
method visible in stderr.
"""
def filter(self, record: logging.LogRecord) -> bool:
if record.getMessage() != "Background task failed":
return True
exc_info = record.exc_info
if not exc_info:
return True
exc = exc_info[1]
# Imported lazily so this module stays importable when the optional
# ``agent-client-protocol`` dependency is not installed.
try:
from acp.exceptions import RequestError
except ImportError:
return True
if not isinstance(exc, RequestError):
return True
if getattr(exc, "code", None) != -32601:
return True
data = getattr(exc, "data", None)
method = data.get("method") if isinstance(data, dict) else None
return method not in _BENIGN_PROBE_METHODS
def _setup_logging() -> None:
"""Route all logging to stderr so stdout stays clean for ACP stdio."""
handler = logging.StreamHandler(sys.stderr)
@ -29,6 +69,7 @@ def _setup_logging() -> None:
datefmt="%Y-%m-%d %H:%M:%S",
)
)
handler.addFilter(_BenignProbeMethodFilter())
root = logging.getLogger()
root.handlers.clear()
root.addHandler(handler)

View file

@ -63,6 +63,9 @@ def make_approval_callback(
logger.warning("Permission request timed out or failed: %s", exc)
return "deny"
if response is None:
return "deny"
outcome = response.outcome
if isinstance(outcome, AllowedOutcome):
option_id = outcome.option_id

View file

@ -4,6 +4,7 @@ from __future__ import annotations
import asyncio
import logging
import os
from collections import defaultdict, deque
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Deque, Optional
@ -51,7 +52,7 @@ try:
except ImportError:
from acp.schema import AuthMethod as AuthMethodAgent # type: ignore[attr-defined]
from acp_adapter.auth import detect_provider, has_provider
from acp_adapter.auth import detect_provider
from acp_adapter.events import (
make_message_cb,
make_step_cb,
@ -71,6 +72,11 @@ except Exception:
# Thread pool for running AIAgent (synchronous) in parallel.
_executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="acp-agent")
# Server-side page size for list_sessions. The ACP ListSessionsRequest schema
# does not expose a client-side limit, so this is a fixed cap that clients
# paginate against using `cursor` / `next_cursor`.
_LIST_SESSIONS_PAGE_SIZE = 50
def _extract_text(
prompt: list[
@ -351,9 +357,18 @@ class HermesACPAgent(acp.Agent):
)
async def authenticate(self, method_id: str, **kwargs: Any) -> AuthenticateResponse | None:
if has_provider():
return AuthenticateResponse()
return None
# Only accept authenticate() calls whose method_id matches the
# provider we advertised in initialize(). Without this check,
# authenticate() would acknowledge any method_id as long as the
# server has provider credentials configured — harmless under
# Hermes' threat model (ACP is stdio-only, local-trust), but poor
# API hygiene and confusing if ACP ever grows multi-method auth.
provider = detect_provider()
if not provider:
return None
if not isinstance(method_id, str) or method_id.strip().lower() != provider:
return None
return AuthenticateResponse()
# ---- Session management -------------------------------------------------
@ -437,7 +452,28 @@ class HermesACPAgent(acp.Agent):
cwd: str | None = None,
**kwargs: Any,
) -> ListSessionsResponse:
"""List ACP sessions with optional ``cwd`` filtering and cursor pagination.
``cwd`` is passed through to ``SessionManager.list_sessions`` which already
normalizes and filters by working directory. ``cursor`` is a ``session_id``
previously returned as ``next_cursor``; results resume after that entry.
Server-side page size is capped at ``_LIST_SESSIONS_PAGE_SIZE``; when more
results remain, ``next_cursor`` is set to the last returned ``session_id``.
"""
infos = self.session_manager.list_sessions(cwd=cwd)
if cursor:
for idx, s in enumerate(infos):
if s["session_id"] == cursor:
infos = infos[idx + 1:]
break
else:
# Unknown cursor -> empty page (do not fall back to full list).
infos = []
has_more = len(infos) > _LIST_SESSIONS_PAGE_SIZE
infos = infos[:_LIST_SESSIONS_PAGE_SIZE]
sessions = []
for s in infos:
updated_at = s.get("updated_at")
@ -451,7 +487,9 @@ class HermesACPAgent(acp.Agent):
updated_at=updated_at,
)
)
return ListSessionsResponse(sessions=sessions)
next_cursor = sessions[-1].session_id if has_more and sessions else None
return ListSessionsResponse(sessions=sessions, next_cursor=next_cursor)
# ---- Prompt (core) ------------------------------------------------------
@ -517,15 +555,32 @@ class HermesACPAgent(acp.Agent):
agent.step_callback = step_cb
agent.message_callback = message_cb
if approval_cb:
try:
from tools import terminal_tool as _terminal_tool
previous_approval_cb = getattr(_terminal_tool, "_approval_callback", None)
_terminal_tool.set_approval_callback(approval_cb)
except Exception:
logger.debug("Could not set ACP approval callback", exc_info=True)
# Approval callback is per-thread (thread-local, GHSA-qg5c-hvr5-hjgr).
# Set it INSIDE _run_agent so the TLS write happens in the executor
# thread — setting it here would write to the event-loop thread's TLS,
# not the executor's. Also set HERMES_INTERACTIVE so approval.py
# takes the CLI-interactive path (which calls the registered
# callback via prompt_dangerous_approval) instead of the
# non-interactive auto-approve branch (GHSA-96vc-wcxf-jjff).
# ACP's conn.request_permission maps cleanly to the interactive
# callback shape — not the gateway-queue HERMES_EXEC_ASK path,
# which requires a notify_cb registered in _gateway_notify_cbs.
previous_approval_cb = None
previous_interactive = None
def _run_agent() -> dict:
nonlocal previous_approval_cb, previous_interactive
if approval_cb:
try:
from tools import terminal_tool as _terminal_tool
previous_approval_cb = _terminal_tool._get_approval_callback()
_terminal_tool.set_approval_callback(approval_cb)
except Exception:
logger.debug("Could not set ACP approval callback", exc_info=True)
# Signal to tools.approval that we have an interactive callback
# and the non-interactive auto-approve path must not fire.
previous_interactive = os.environ.get("HERMES_INTERACTIVE")
os.environ["HERMES_INTERACTIVE"] = "1"
try:
result = agent.run_conversation(
user_message=user_text,
@ -537,6 +592,11 @@ class HermesACPAgent(acp.Agent):
logger.exception("Agent error in session %s", session_id)
return {"final_response": f"Error: {e}", "messages": state.history}
finally:
# Restore HERMES_INTERACTIVE.
if previous_interactive is None:
os.environ.pop("HERMES_INTERACTIVE", None)
else:
os.environ["HERMES_INTERACTIVE"] = previous_interactive
if approval_cb:
try:
from tools import terminal_tool as _terminal_tool
@ -613,8 +673,8 @@ class HermesACPAgent(acp.Agent):
await self._conn.session_update(
session_id=session_id,
update=AvailableCommandsUpdate(
sessionUpdate="available_commands_update",
availableCommands=self._available_commands(),
session_update="available_commands_update",
available_commands=self._available_commands(),
),
)
except Exception:

326
agent/account_usage.py Normal file
View file

@ -0,0 +1,326 @@
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime, timezone
from typing import Any, Optional
import httpx
from agent.anthropic_adapter import _is_oauth_token, resolve_anthropic_token
from hermes_cli.auth import _read_codex_tokens, resolve_codex_runtime_credentials
from hermes_cli.runtime_provider import resolve_runtime_provider
def _utc_now() -> datetime:
return datetime.now(timezone.utc)
@dataclass(frozen=True)
class AccountUsageWindow:
label: str
used_percent: Optional[float] = None
reset_at: Optional[datetime] = None
detail: Optional[str] = None
@dataclass(frozen=True)
class AccountUsageSnapshot:
provider: str
source: str
fetched_at: datetime
title: str = "Account limits"
plan: Optional[str] = None
windows: tuple[AccountUsageWindow, ...] = ()
details: tuple[str, ...] = ()
unavailable_reason: Optional[str] = None
@property
def available(self) -> bool:
return bool(self.windows or self.details) and not self.unavailable_reason
def _title_case_slug(value: Optional[str]) -> Optional[str]:
cleaned = str(value or "").strip()
if not cleaned:
return None
return cleaned.replace("_", " ").replace("-", " ").title()
def _parse_dt(value: Any) -> Optional[datetime]:
if value in (None, ""):
return None
if isinstance(value, (int, float)):
return datetime.fromtimestamp(float(value), tz=timezone.utc)
if isinstance(value, str):
text = value.strip()
if not text:
return None
if text.endswith("Z"):
text = text[:-1] + "+00:00"
try:
dt = datetime.fromisoformat(text)
return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc)
except ValueError:
return None
return None
def _format_reset(dt: Optional[datetime]) -> str:
if not dt:
return "unknown"
local_dt = dt.astimezone()
delta = dt - _utc_now()
total_seconds = int(delta.total_seconds())
if total_seconds <= 0:
return f"now ({local_dt.strftime('%Y-%m-%d %H:%M %Z')})"
hours, rem = divmod(total_seconds, 3600)
minutes = rem // 60
if hours >= 24:
days, hours = divmod(hours, 24)
rel = f"in {days}d {hours}h"
elif hours > 0:
rel = f"in {hours}h {minutes}m"
else:
rel = f"in {minutes}m"
return f"{rel} ({local_dt.strftime('%Y-%m-%d %H:%M %Z')})"
def render_account_usage_lines(snapshot: Optional[AccountUsageSnapshot], *, markdown: bool = False) -> list[str]:
if not snapshot:
return []
header = f"📈 {'**' if markdown else ''}{snapshot.title}{'**' if markdown else ''}"
lines = [header]
if snapshot.plan:
lines.append(f"Provider: {snapshot.provider} ({snapshot.plan})")
else:
lines.append(f"Provider: {snapshot.provider}")
for window in snapshot.windows:
if window.used_percent is None:
base = f"{window.label}: unavailable"
else:
remaining = max(0, round(100 - float(window.used_percent)))
used = max(0, round(float(window.used_percent)))
base = f"{window.label}: {remaining}% remaining ({used}% used)"
if window.reset_at:
base += f" • resets {_format_reset(window.reset_at)}"
elif window.detail:
base += f"{window.detail}"
lines.append(base)
for detail in snapshot.details:
lines.append(detail)
if snapshot.unavailable_reason:
lines.append(f"Unavailable: {snapshot.unavailable_reason}")
return lines
def _resolve_codex_usage_url(base_url: str) -> str:
normalized = (base_url or "").strip().rstrip("/")
if not normalized:
normalized = "https://chatgpt.com/backend-api/codex"
if normalized.endswith("/codex"):
normalized = normalized[: -len("/codex")]
if "/backend-api" in normalized:
return normalized + "/wham/usage"
return normalized + "/api/codex/usage"
def _fetch_codex_account_usage() -> Optional[AccountUsageSnapshot]:
creds = resolve_codex_runtime_credentials(refresh_if_expiring=True)
token_data = _read_codex_tokens()
tokens = token_data.get("tokens") or {}
account_id = str(tokens.get("account_id", "") or "").strip() or None
headers = {
"Authorization": f"Bearer {creds['api_key']}",
"Accept": "application/json",
"User-Agent": "codex-cli",
}
if account_id:
headers["ChatGPT-Account-Id"] = account_id
with httpx.Client(timeout=15.0) as client:
response = client.get(_resolve_codex_usage_url(creds.get("base_url", "")), headers=headers)
response.raise_for_status()
payload = response.json() or {}
rate_limit = payload.get("rate_limit") or {}
windows: list[AccountUsageWindow] = []
for key, label in (("primary_window", "Session"), ("secondary_window", "Weekly")):
window = rate_limit.get(key) or {}
used = window.get("used_percent")
if used is None:
continue
windows.append(
AccountUsageWindow(
label=label,
used_percent=float(used),
reset_at=_parse_dt(window.get("reset_at")),
)
)
details: list[str] = []
credits = payload.get("credits") or {}
if credits.get("has_credits"):
balance = credits.get("balance")
if isinstance(balance, (int, float)):
details.append(f"Credits balance: ${float(balance):.2f}")
elif credits.get("unlimited"):
details.append("Credits balance: unlimited")
return AccountUsageSnapshot(
provider="openai-codex",
source="usage_api",
fetched_at=_utc_now(),
plan=_title_case_slug(payload.get("plan_type")),
windows=tuple(windows),
details=tuple(details),
)
def _fetch_anthropic_account_usage() -> Optional[AccountUsageSnapshot]:
token = (resolve_anthropic_token() or "").strip()
if not token:
return None
if not _is_oauth_token(token):
return AccountUsageSnapshot(
provider="anthropic",
source="oauth_usage_api",
fetched_at=_utc_now(),
unavailable_reason="Anthropic account limits are only available for OAuth-backed Claude accounts.",
)
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json",
"Content-Type": "application/json",
"anthropic-beta": "oauth-2025-04-20",
"User-Agent": "claude-code/2.1.0",
}
with httpx.Client(timeout=15.0) as client:
response = client.get("https://api.anthropic.com/api/oauth/usage", headers=headers)
response.raise_for_status()
payload = response.json() or {}
windows: list[AccountUsageWindow] = []
mapping = (
("five_hour", "Current session"),
("seven_day", "Current week"),
("seven_day_opus", "Opus week"),
("seven_day_sonnet", "Sonnet week"),
)
for key, label in mapping:
window = payload.get(key) or {}
util = window.get("utilization")
if util is None:
continue
used = float(util) * 100 if float(util) <= 1 else float(util)
windows.append(
AccountUsageWindow(
label=label,
used_percent=used,
reset_at=_parse_dt(window.get("resets_at")),
)
)
details: list[str] = []
extra = payload.get("extra_usage") or {}
if extra.get("is_enabled"):
used_credits = extra.get("used_credits")
monthly_limit = extra.get("monthly_limit")
currency = extra.get("currency") or "USD"
if isinstance(used_credits, (int, float)) and isinstance(monthly_limit, (int, float)):
details.append(
f"Extra usage: {used_credits:.2f} / {monthly_limit:.2f} {currency}"
)
return AccountUsageSnapshot(
provider="anthropic",
source="oauth_usage_api",
fetched_at=_utc_now(),
windows=tuple(windows),
details=tuple(details),
)
def _fetch_openrouter_account_usage(base_url: Optional[str], api_key: Optional[str]) -> Optional[AccountUsageSnapshot]:
runtime = resolve_runtime_provider(
requested="openrouter",
explicit_base_url=base_url,
explicit_api_key=api_key,
)
token = str(runtime.get("api_key", "") or "").strip()
if not token:
return None
normalized = str(runtime.get("base_url", "") or "").rstrip("/")
credits_url = f"{normalized}/credits"
key_url = f"{normalized}/key"
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/json",
}
with httpx.Client(timeout=10.0) as client:
credits_resp = client.get(credits_url, headers=headers)
credits_resp.raise_for_status()
credits = (credits_resp.json() or {}).get("data") or {}
try:
key_resp = client.get(key_url, headers=headers)
key_resp.raise_for_status()
key_data = (key_resp.json() or {}).get("data") or {}
except Exception:
key_data = {}
total_credits = float(credits.get("total_credits") or 0.0)
total_usage = float(credits.get("total_usage") or 0.0)
details = [f"Credits balance: ${max(0.0, total_credits - total_usage):.2f}"]
windows: list[AccountUsageWindow] = []
limit = key_data.get("limit")
limit_remaining = key_data.get("limit_remaining")
limit_reset = str(key_data.get("limit_reset") or "").strip()
usage = key_data.get("usage")
if (
isinstance(limit, (int, float))
and float(limit) > 0
and isinstance(limit_remaining, (int, float))
and 0 <= float(limit_remaining) <= float(limit)
):
limit_value = float(limit)
remaining_value = float(limit_remaining)
used_percent = ((limit_value - remaining_value) / limit_value) * 100
detail_parts = [f"${remaining_value:.2f} of ${limit_value:.2f} remaining"]
if limit_reset:
detail_parts.append(f"resets {limit_reset}")
windows.append(
AccountUsageWindow(
label="API key quota",
used_percent=used_percent,
detail="".join(detail_parts),
)
)
if isinstance(usage, (int, float)):
usage_parts = [f"API key usage: ${float(usage):.2f} total"]
for value, label in (
(key_data.get("usage_daily"), "today"),
(key_data.get("usage_weekly"), "this week"),
(key_data.get("usage_monthly"), "this month"),
):
if isinstance(value, (int, float)) and float(value) > 0:
usage_parts.append(f"${float(value):.2f} {label}")
details.append("".join(usage_parts))
return AccountUsageSnapshot(
provider="openrouter",
source="credits_api",
fetched_at=_utc_now(),
windows=tuple(windows),
details=tuple(details),
)
def fetch_account_usage(
provider: Optional[str],
*,
base_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Optional[AccountUsageSnapshot]:
normalized = str(provider or "").strip().lower()
if normalized in {"", "auto", "custom"}:
return None
try:
if normalized == "openai-codex":
return _fetch_codex_account_usage()
if normalized == "anthropic":
return _fetch_anthropic_account_usage()
if normalized == "openrouter":
return _fetch_openrouter_account_usage(base_url, api_key)
except Exception:
return None
return None

View file

@ -19,6 +19,7 @@ from pathlib import Path
from hermes_constants import get_hermes_home
from types import SimpleNamespace
from typing import Any, Dict, List, Optional, Tuple
from utils import normalize_proxy_env_vars
try:
import anthropic as _anthropic_sdk
@ -265,6 +266,14 @@ def _is_third_party_anthropic_endpoint(base_url: str | None) -> bool:
return True # Any other endpoint is a third-party proxy
def _is_kimi_coding_endpoint(base_url: str | None) -> bool:
"""Return True for Kimi's /coding endpoint that requires claude-code UA."""
normalized = _normalize_base_url_text(base_url)
if not normalized:
return False
return normalized.rstrip("/").lower().startswith("https://api.kimi.com/coding")
def _requires_bearer_auth(base_url: str | None) -> bool:
"""Return True for Anthropic-compatible providers that require Bearer auth.
@ -292,9 +301,15 @@ def _common_betas_for_base_url(base_url: str | None) -> list[str]:
return _COMMON_BETAS
def build_anthropic_client(api_key: str, base_url: str = None):
def build_anthropic_client(api_key: str, base_url: str = None, timeout: float = None):
"""Create an Anthropic client, auto-detecting setup-tokens vs API keys.
If *timeout* is provided it overrides the default 900s read timeout. The
connect timeout stays at 10s. Callers pass this from the per-provider /
per-model ``request_timeout_seconds`` config so Anthropic-native and
Anthropic-compatible providers respect the same knob as OpenAI-wire
providers.
Returns an anthropic.Anthropic instance.
"""
if _anthropic_sdk is None:
@ -302,19 +317,32 @@ def build_anthropic_client(api_key: str, base_url: str = None):
"The 'anthropic' package is required for the Anthropic provider. "
"Install it with: pip install 'anthropic>=0.39.0'"
)
normalize_proxy_env_vars()
from httpx import Timeout
normalized_base_url = _normalize_base_url_text(base_url)
_read_timeout = timeout if (isinstance(timeout, (int, float)) and timeout > 0) else 900.0
kwargs = {
"timeout": Timeout(timeout=900.0, connect=10.0),
"timeout": Timeout(timeout=float(_read_timeout), connect=10.0),
}
if normalized_base_url:
kwargs["base_url"] = normalized_base_url
common_betas = _common_betas_for_base_url(normalized_base_url)
if _requires_bearer_auth(normalized_base_url):
if _is_kimi_coding_endpoint(base_url):
# Kimi's /coding endpoint requires User-Agent: claude-code/0.1.0
# to be recognized as a valid Coding Agent. Without it, returns 403.
# Check this BEFORE _requires_bearer_auth since both match api.kimi.com/coding.
kwargs["api_key"] = api_key
kwargs["default_headers"] = {
"User-Agent": "claude-code/0.1.0",
**( {"anthropic-beta": ",".join(common_betas)} if common_betas else {} )
}
elif _requires_bearer_auth(normalized_base_url):
# Some Anthropic-compatible providers (e.g. MiniMax) expect the API key in
# Authorization: Bearer even for regular API keys. Route those endpoints
# Authorization: Bearer *** for regular API keys. Route those endpoints
# through auth_token so the SDK sends Bearer auth instead of x-api-key.
# Check this before OAuth token shape detection because MiniMax secrets do
# not use Anthropic's sk-ant-api prefix and would otherwise be misread as
@ -1055,6 +1083,31 @@ def convert_messages_to_anthropic(
"name": fn.get("name", ""),
"input": parsed_args,
})
# Kimi's /coding endpoint (Anthropic protocol) requires assistant
# tool-call messages to carry reasoning_content when thinking is
# enabled server-side. Preserve it as a thinking block so Kimi
# can validate the message history. See hermes-agent#13848.
#
# Accept empty string "" — _copy_reasoning_content_for_api()
# injects "" as a tier-3 fallback for Kimi tool-call messages
# that had no reasoning. Kimi requires the field to exist, even
# if empty.
#
# Prepend (not append): Anthropic protocol requires thinking
# blocks before text and tool_use blocks.
#
# Guard: only add when reasoning_details didn't already contribute
# thinking blocks. On native Anthropic, reasoning_details produces
# signed thinking blocks — adding another unsigned one from
# reasoning_content would create a duplicate (same text) that gets
# downgraded to a spurious text block on the last assistant message.
reasoning_content = m.get("reasoning_content")
_already_has_thinking = any(
isinstance(b, dict) and b.get("type") in ("thinking", "redacted_thinking")
for b in blocks
)
if isinstance(reasoning_content, str) and not _already_has_thinking:
blocks.insert(0, {"type": "thinking", "thinking": reasoning_content})
# Anthropic rejects empty assistant content
effective = blocks or content
if not effective or effective == "":
@ -1210,6 +1263,7 @@ def convert_messages_to_anthropic(
# cache markers can interfere with signature validation.
_THINKING_TYPES = frozenset(("thinking", "redacted_thinking"))
_is_third_party = _is_third_party_anthropic_endpoint(base_url)
_is_kimi = _is_kimi_coding_endpoint(base_url)
last_assistant_idx = None
for i in range(len(result) - 1, -1, -1):
@ -1221,7 +1275,25 @@ def convert_messages_to_anthropic(
if m.get("role") != "assistant" or not isinstance(m.get("content"), list):
continue
if _is_third_party or idx != last_assistant_idx:
if _is_kimi:
# Kimi's /coding endpoint enables thinking server-side and
# requires unsigned thinking blocks on replayed assistant
# tool-call messages. Strip signed Anthropic blocks (Kimi
# can't validate signatures) but preserve the unsigned ones
# we synthesised from reasoning_content above.
new_content = []
for b in m["content"]:
if not isinstance(b, dict) or b.get("type") not in _THINKING_TYPES:
new_content.append(b)
continue
if b.get("signature") or b.get("data"):
# Anthropic-signed block — Kimi can't validate, strip
continue
# Unsigned thinking (synthesised from reasoning_content) —
# keep it: Kimi needs it for message-history validation.
new_content.append(b)
m["content"] = new_content or [{"type": "text", "text": "(empty)"}]
elif _is_third_party or idx != last_assistant_idx:
# Third-party endpoint: strip ALL thinking blocks from every
# assistant message — signatures are Anthropic-proprietary.
# Direct Anthropic: strip from non-latest assistant messages only.
@ -1398,11 +1470,25 @@ def build_anthropic_kwargs(
# MiniMax Anthropic-compat endpoints support thinking (manual mode only,
# not adaptive). Haiku does NOT support extended thinking — skip entirely.
#
# Kimi's /coding endpoint speaks the Anthropic Messages protocol but has
# its own thinking semantics: when ``thinking.enabled`` is sent, Kimi
# validates the message history and requires every prior assistant
# tool-call message to carry OpenAI-style ``reasoning_content``. The
# Anthropic path never populates that field, and
# ``convert_messages_to_anthropic`` strips all Anthropic thinking blocks
# on third-party endpoints — so the request fails with HTTP 400
# "thinking is enabled but reasoning_content is missing in assistant
# tool call message at index N". Kimi's reasoning is driven server-side
# on the /coding route, so skip Anthropic's thinking parameter entirely
# for that host. (Kimi on chat_completions enables thinking via
# extra_body in the ChatCompletionsTransport — see #13503.)
#
# On 4.7+ the `thinking.display` field defaults to "omitted", which
# silently hides reasoning text that Hermes surfaces in its CLI. We
# request "summarized" so the reasoning blocks stay populated — matching
# 4.6 behavior and preserving the activity-feed UX during long tool runs.
if reasoning_config and isinstance(reasoning_config, dict):
_is_kimi_coding = _is_kimi_coding_endpoint(base_url)
if reasoning_config and isinstance(reasoning_config, dict) and not _is_kimi_coding:
if reasoning_config.get("enabled") is not False and "haiku" not in model.lower():
effort = str(reasoning_config.get("effort", "medium")).lower()
budget = THINKING_BUDGET.get(effort, 8000)
@ -1518,3 +1604,42 @@ def normalize_anthropic_response(
),
finish_reason,
)
def normalize_anthropic_response_v2(
response,
strip_tool_prefix: bool = False,
) -> "NormalizedResponse":
"""Normalize Anthropic response to NormalizedResponse.
Wraps the existing normalize_anthropic_response() and maps its output
to the shared transport types. This allows incremental migration
one call site at a time without changing the original function.
"""
from agent.transports.types import NormalizedResponse, build_tool_call
assistant_msg, finish_reason = normalize_anthropic_response(response, strip_tool_prefix)
tool_calls = None
if assistant_msg.tool_calls:
tool_calls = [
build_tool_call(
id=tc.id,
name=tc.function.name,
arguments=tc.function.arguments,
)
for tc in assistant_msg.tool_calls
]
provider_data = {}
if getattr(assistant_msg, "reasoning_details", None):
provider_data["reasoning_details"] = assistant_msg.reasoning_details
return NormalizedResponse(
content=assistant_msg.content,
tool_calls=tool_calls,
finish_reason=finish_reason,
reasoning=getattr(assistant_msg, "reasoning", None),
usage=None, # Anthropic usage is on the raw response, not the normaliser
provider_data=provider_data or None,
)

View file

@ -48,6 +48,7 @@ from openai import OpenAI
from agent.credential_pool import load_pool
from hermes_cli.config import get_hermes_home
from hermes_constants import OPENROUTER_BASE_URL
from utils import base_url_host_matches, base_url_hostname, normalize_proxy_env_vars
logger = logging.getLogger(__name__)
@ -95,51 +96,37 @@ def _normalize_aux_provider(provider: Optional[str]) -> str:
return _PROVIDER_ALIASES.get(normalized, normalized)
_FIXED_TEMPERATURE_MODELS: Dict[str, float] = {
"kimi-for-coding": 0.6,
}
# Moonshot's kimi-for-coding endpoint (api.kimi.com/coding) documents:
# "k2.5 model will use a fixed value 1.0, non-thinking mode will use a fixed
# value 0.6. Any other value will result in an error." The same lock applies
# to the other k2.* models served on that endpoint. Enumerated explicitly so
# non-coding siblings like `kimi-k2-instruct` (variable temperature, served on
# the standard chat API and third parties) are NOT clamped.
# Source: https://platform.kimi.ai/docs/guide/kimi-k2-5-quickstart
_KIMI_INSTANT_MODELS: frozenset = frozenset({
"kimi-k2.5",
"kimi-k2-turbo-preview",
"kimi-k2-0905-preview",
})
_KIMI_THINKING_MODELS: frozenset = frozenset({
"kimi-k2-thinking",
"kimi-k2-thinking-turbo",
})
# Sentinel: when returned by _fixed_temperature_for_model(), callers must
# strip the ``temperature`` key from API kwargs entirely so the provider's
# server-side default applies. Kimi/Moonshot models manage temperature
# internally — sending *any* value (even the "correct" one) can conflict
# with gateway-side mode selection (thinking → 1.0, non-thinking → 0.6).
OMIT_TEMPERATURE: object = object()
def _fixed_temperature_for_model(model: Optional[str]) -> Optional[float]:
"""Return a required temperature override for models with strict contracts.
def _is_kimi_model(model: Optional[str]) -> bool:
"""True for any Kimi / Moonshot model that manages temperature server-side."""
bare = (model or "").strip().lower().rsplit("/", 1)[-1]
return bare.startswith("kimi-") or bare == "kimi"
Moonshot's kimi-for-coding endpoint rejects any non-approved temperature on
the k2.5 family. Non-thinking variants require exactly 0.6; thinking
variants require 1.0. An optional ``vendor/`` prefix (e.g.
``moonshotai/kimi-k2.5``) is tolerated for aggregator routings.
Returns ``None`` for every other model, including ``kimi-k2-instruct*``
which is the separate non-coding K2 family with variable temperature.
def _fixed_temperature_for_model(
model: Optional[str],
base_url: Optional[str] = None,
) -> "Optional[float] | object":
"""Return a temperature directive for models with strict contracts.
Returns:
``OMIT_TEMPERATURE`` caller must remove the ``temperature`` key so the
provider chooses its own default. Used for all Kimi / Moonshot
models whose gateway selects temperature server-side.
``float`` a specific value the caller must use (reserved for future
models with fixed-temperature contracts).
``None`` no override; caller should use its own default.
"""
normalized = (model or "").strip().lower()
fixed = _FIXED_TEMPERATURE_MODELS.get(normalized)
if fixed is not None:
logger.debug("Forcing temperature=%s for model %r (fixed map)", fixed, model)
return fixed
bare = normalized.rsplit("/", 1)[-1]
if bare in _KIMI_THINKING_MODELS:
logger.debug("Forcing temperature=1.0 for kimi thinking model %r", model)
return 1.0
if bare in _KIMI_INSTANT_MODELS:
logger.debug("Forcing temperature=0.6 for kimi instant model %r", model)
return 0.6
if _is_kimi_model(model):
logger.debug("Omitting temperature for Kimi model %r (server-managed)", model)
return OMIT_TEMPERATURE
return None
# Default auxiliary models for direct API-key providers (cheap/fast for side tasks)
@ -147,6 +134,7 @@ _API_KEY_PROVIDER_AUX_MODELS: Dict[str, str] = {
"gemini": "gemini-3-flash-preview",
"zai": "glm-4.5-flash",
"kimi-coding": "kimi-k2-turbo-preview",
"stepfun": "step-3.5-flash",
"kimi-coding-cn": "kimi-k2-turbo-preview",
"minimax": "MiniMax-M2.7",
"minimax-cn": "MiniMax-M2.7",
@ -174,6 +162,16 @@ _OR_HEADERS = {
"X-OpenRouter-Categories": "productivity,cli-agent",
}
# Vercel AI Gateway app attribution headers. HTTP-Referer maps to
# referrerUrl and X-Title maps to appName in the gateway's analytics.
from hermes_cli import __version__ as _HERMES_VERSION
_AI_GATEWAY_HEADERS = {
"HTTP-Referer": "https://hermes-agent.nousresearch.com",
"X-Title": "Hermes Agent",
"User-Agent": f"HermesAgent/{_HERMES_VERSION}",
}
# Nous Portal extra_body for product attribution.
# Callers should pass this as extra_body in chat.completions.create()
# when the auxiliary client is backed by Nous Portal.
@ -185,8 +183,6 @@ auxiliary_is_nous: bool = False
# Default auxiliary models per provider
_OPENROUTER_MODEL = "google/gemini-3-flash-preview"
_NOUS_MODEL = "google/gemini-3-flash-preview"
_NOUS_FREE_TIER_VISION_MODEL = "xiaomi/mimo-v2-omni"
_NOUS_FREE_TIER_AUX_MODEL = "xiaomi/mimo-v2-pro"
_NOUS_DEFAULT_BASE_URL = "https://inference-api.nousresearch.com/v1"
_ANTHROPIC_DEFAULT_BASE_URL = "https://api.anthropic.com"
_AUTH_JSON_PATH = get_hermes_home() / "auth.json"
@ -200,6 +196,45 @@ _CODEX_AUX_MODEL = "gpt-5.2-codex"
_CODEX_AUX_BASE_URL = "https://chatgpt.com/backend-api/codex"
def _codex_cloudflare_headers(access_token: str) -> Dict[str, str]:
"""Headers required to avoid Cloudflare 403s on chatgpt.com/backend-api/codex.
The Cloudflare layer in front of the Codex endpoint whitelists a small set of
first-party originators (``codex_cli_rs``, ``codex_vscode``, ``codex_sdk_ts``,
anything starting with ``Codex``). Requests from non-residential IPs (VPS,
server-hosted agents) that don't advertise an allowed originator are served
a 403 with ``cf-mitigated: challenge`` regardless of auth correctness.
We pin ``originator: codex_cli_rs`` to match the upstream codex-rs CLI, set
``User-Agent`` to a codex_cli_rs-shaped string (beats SDK fingerprinting),
and extract ``ChatGPT-Account-ID`` (canonical casing, from codex-rs
``auth.rs``) out of the OAuth JWT's ``chatgpt_account_id`` claim.
Malformed tokens are tolerated we drop the account-ID header rather than
raise, so a bad token still surfaces as an auth error (401) instead of a
crash at client construction.
"""
headers = {
"User-Agent": "codex_cli_rs/0.0.0 (Hermes Agent)",
"originator": "codex_cli_rs",
}
if not isinstance(access_token, str) or not access_token.strip():
return headers
try:
import base64
parts = access_token.split(".")
if len(parts) < 2:
return headers
payload_b64 = parts[1] + "=" * (-len(parts[1]) % 4)
claims = json.loads(base64.urlsafe_b64decode(payload_b64))
acct_id = claims.get("https://api.openai.com/auth", {}).get("chatgpt_account_id")
if isinstance(acct_id, str) and acct_id:
headers["ChatGPT-Account-ID"] = acct_id
except Exception:
pass
return headers
def _to_openai_base_url(base_url: str) -> str:
"""Normalize an Anthropic-style base URL to OpenAI-compatible format.
@ -692,6 +727,33 @@ def _nous_base_url() -> str:
return os.getenv("NOUS_INFERENCE_BASE_URL", _NOUS_DEFAULT_BASE_URL)
def _resolve_nous_runtime_api(*, force_refresh: bool = False) -> Optional[tuple[str, str]]:
"""Return fresh Nous runtime credentials when available.
This mirrors the main agent's 401 recovery path and keeps auxiliary
clients aligned with the singleton auth store + mint flow instead of
relying only on whatever raw tokens happen to be sitting in auth.json
or the credential pool.
"""
try:
from hermes_cli.auth import resolve_nous_runtime_credentials
creds = resolve_nous_runtime_credentials(
min_key_ttl_seconds=max(60, int(os.getenv("HERMES_NOUS_MIN_KEY_TTL_SECONDS", "1800"))),
timeout_seconds=float(os.getenv("HERMES_NOUS_TIMEOUT_SECONDS", "15")),
force_mint=force_refresh,
)
except Exception as exc:
logger.debug("Auxiliary Nous runtime credential resolution failed: %s", exc)
return None
api_key = str(creds.get("api_key") or "").strip()
base_url = str(creds.get("base_url") or "").strip().rstrip("/")
if not api_key or not base_url:
return None
return api_key, base_url
def _read_codex_access_token() -> Optional[str]:
"""Read a valid, non-expired Codex OAuth access token from Hermes auth store.
@ -775,10 +837,15 @@ def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]:
if model is None:
continue # skip provider if we don't know a valid aux model
logger.debug("Auxiliary text client: %s (%s) via pool", pconfig.name, model)
if provider_id == "gemini":
from agent.gemini_native_adapter import GeminiNativeClient, is_native_gemini_base_url
if is_native_gemini_base_url(base_url):
return GeminiNativeClient(api_key=api_key, base_url=base_url), model
extra = {}
if "api.kimi.com" in base_url.lower():
extra["default_headers"] = {"User-Agent": "KimiCLI/1.30.0"}
elif "api.githubcopilot.com" in base_url.lower():
if base_url_host_matches(base_url, "api.kimi.com"):
extra["default_headers"] = {"User-Agent": "claude-code/0.1.0"}
elif base_url_host_matches(base_url, "api.githubcopilot.com"):
from hermes_cli.models import copilot_default_headers
extra["default_headers"] = copilot_default_headers()
@ -796,10 +863,15 @@ def _resolve_api_key_provider() -> Tuple[Optional[OpenAI], Optional[str]]:
if model is None:
continue # skip provider if we don't know a valid aux model
logger.debug("Auxiliary text client: %s (%s)", pconfig.name, model)
if provider_id == "gemini":
from agent.gemini_native_adapter import GeminiNativeClient, is_native_gemini_base_url
if is_native_gemini_base_url(base_url):
return GeminiNativeClient(api_key=api_key, base_url=base_url), model
extra = {}
if "api.kimi.com" in base_url.lower():
extra["default_headers"] = {"User-Agent": "KimiCLI/1.30.0"}
elif "api.githubcopilot.com" in base_url.lower():
if base_url_host_matches(base_url, "api.kimi.com"):
extra["default_headers"] = {"User-Agent": "claude-code/0.1.0"}
elif base_url_host_matches(base_url, "api.githubcopilot.com"):
from hermes_cli.models import copilot_default_headers
extra["default_headers"] = copilot_default_headers()
@ -848,29 +920,50 @@ def _try_nous(vision: bool = False) -> Tuple[Optional[OpenAI], Optional[str]]:
pass
nous = _read_nous_auth()
if not nous:
runtime = _resolve_nous_runtime_api(force_refresh=False)
if runtime is None and not nous:
return None, None
global auxiliary_is_nous
auxiliary_is_nous = True
logger.debug("Auxiliary client: Nous Portal")
if nous.get("source") == "pool":
model = "gemini-3-flash"
else:
model = _NOUS_MODEL
# Free-tier users can't use paid auxiliary models — use the free
# models instead: mimo-v2-omni for vision, mimo-v2-pro for text tasks.
# Ask the Portal which model it currently recommends for this task type.
# The /api/nous/recommended-models endpoint is the authoritative source:
# it distinguishes paid vs free tier recommendations, and get_nous_recommended_aux_model
# auto-detects the caller's tier via check_nous_free_tier(). Fall back to
# _NOUS_MODEL (google/gemini-3-flash-preview) when the Portal is unreachable
# or returns a null recommendation for this task type.
model = _NOUS_MODEL
try:
from hermes_cli.models import check_nous_free_tier
if check_nous_free_tier():
model = _NOUS_FREE_TIER_VISION_MODEL if vision else _NOUS_FREE_TIER_AUX_MODEL
logger.debug("Free-tier Nous account — using %s for auxiliary/%s",
model, "vision" if vision else "text")
except Exception:
pass
from hermes_cli.models import get_nous_recommended_aux_model
recommended = get_nous_recommended_aux_model(vision=vision)
if recommended:
model = recommended
logger.debug(
"Auxiliary/%s: using Portal-recommended model %s",
"vision" if vision else "text", model,
)
else:
logger.debug(
"Auxiliary/%s: no Portal recommendation, falling back to %s",
"vision" if vision else "text", model,
)
except Exception as exc:
logger.debug(
"Auxiliary/%s: recommended-models lookup failed (%s); "
"falling back to %s",
"vision" if vision else "text", exc, model,
)
if runtime is not None:
api_key, base_url = runtime
else:
api_key = _nous_api_key(nous or {})
base_url = str((nous or {}).get("inference_base_url") or _nous_base_url()).rstrip("/")
return (
OpenAI(
api_key=_nous_api_key(nous),
base_url=str(nous.get("inference_base_url") or _nous_base_url()).rstrip("/"),
api_key=api_key,
base_url=base_url,
),
model,
)
@ -948,7 +1041,7 @@ def _resolve_custom_runtime() -> Tuple[Optional[str], Optional[str], Optional[st
return None, None, None
custom_base = custom_base.strip().rstrip("/")
if "openrouter.ai" in custom_base.lower():
if base_url_host_matches(custom_base, "openrouter.ai"):
# requested='custom' falls back to OpenRouter when no custom endpoint is
# configured. Treat that as "no custom endpoint" for auxiliary routing.
return None, None, None
@ -982,6 +1075,8 @@ def _validate_proxy_env_urls() -> None:
"""
from urllib.parse import urlparse
normalize_proxy_env_vars()
for key in ("HTTPS_PROXY", "HTTP_PROXY", "ALL_PROXY",
"https_proxy", "http_proxy", "all_proxy"):
value = str(os.environ.get(key) or "").strip()
@ -1016,7 +1111,7 @@ def _validate_base_url(base_url: str) -> None:
) from exc
def _try_custom_endpoint() -> Tuple[Optional[OpenAI], Optional[str]]:
def _try_custom_endpoint() -> Tuple[Optional[Any], Optional[str]]:
runtime = _resolve_custom_runtime()
if len(runtime) == 2:
custom_base, custom_key = runtime
@ -1032,6 +1127,23 @@ def _try_custom_endpoint() -> Tuple[Optional[OpenAI], Optional[str]]:
if custom_mode == "codex_responses":
real_client = OpenAI(api_key=custom_key, base_url=custom_base)
return CodexAuxiliaryClient(real_client, model), model
if custom_mode == "anthropic_messages":
# Third-party Anthropic-compatible gateway (MiniMax, Zhipu GLM,
# LiteLLM proxies, etc.). Must NEVER be treated as OAuth —
# Anthropic OAuth claims only apply to api.anthropic.com.
try:
from agent.anthropic_adapter import build_anthropic_client
real_client = build_anthropic_client(custom_key, custom_base)
except ImportError:
logger.warning(
"Custom endpoint declares api_mode=anthropic_messages but the "
"anthropic SDK is not installed — falling back to OpenAI-wire."
)
return OpenAI(api_key=custom_key, base_url=custom_base), model
return (
AnthropicAuxiliaryClient(real_client, model, custom_key, custom_base, is_oauth=False),
model,
)
return OpenAI(api_key=custom_key, base_url=custom_base), model
@ -1052,7 +1164,11 @@ def _try_codex() -> Tuple[Optional[Any], Optional[str]]:
return None, None
base_url = _CODEX_AUX_BASE_URL
logger.debug("Auxiliary client: Codex OAuth (%s via Responses API)", _CODEX_AUX_MODEL)
real_client = OpenAI(api_key=codex_token, base_url=base_url)
real_client = OpenAI(
api_key=codex_token,
base_url=base_url,
default_headers=_codex_cloudflare_headers(codex_token),
)
return CodexAuxiliaryClient(real_client, _CODEX_AUX_MODEL), _CODEX_AUX_MODEL
@ -1191,6 +1307,15 @@ def _is_connection_error(exc: Exception) -> bool:
return False
def _is_auth_error(exc: Exception) -> bool:
"""Detect auth failures that should trigger provider-specific refresh."""
status = getattr(exc, "status_code", None)
if status == 401:
return True
err_lower = str(exc).lower()
return "error code: 401" in err_lower or "authenticationerror" in type(exc).__name__.lower()
def _try_payment_fallback(
failed_provider: str,
task: str = None,
@ -1348,6 +1473,13 @@ def _to_async_client(sync_client, model: str):
return AsyncCodexAuxiliaryClient(sync_client), model
if isinstance(sync_client, AnthropicAuxiliaryClient):
return AsyncAnthropicAuxiliaryClient(sync_client), model
try:
from agent.gemini_native_adapter import GeminiNativeClient, AsyncGeminiNativeClient
if isinstance(sync_client, GeminiNativeClient):
return AsyncGeminiNativeClient(sync_client), model
except ImportError:
pass
try:
from agent.copilot_acp_client import CopilotACPClient
if isinstance(sync_client, CopilotACPClient):
@ -1359,15 +1491,15 @@ def _to_async_client(sync_client, model: str):
"api_key": sync_client.api_key,
"base_url": str(sync_client.base_url),
}
base_lower = str(sync_client.base_url).lower()
if "openrouter" in base_lower:
sync_base_url = str(sync_client.base_url)
if base_url_host_matches(sync_base_url, "openrouter.ai"):
async_kwargs["default_headers"] = dict(_OR_HEADERS)
elif "api.githubcopilot.com" in base_lower:
elif base_url_host_matches(sync_base_url, "api.githubcopilot.com"):
from hermes_cli.models import copilot_default_headers
async_kwargs["default_headers"] = copilot_default_headers()
elif "api.kimi.com" in base_lower:
async_kwargs["default_headers"] = {"User-Agent": "KimiCLI/1.30.0"}
elif base_url_host_matches(sync_base_url, "api.kimi.com"):
async_kwargs["default_headers"] = {"User-Agent": "claude-code/0.1.0"}
return AsyncOpenAI(**async_kwargs), model
@ -1443,8 +1575,7 @@ def resolve_provider_client(
# Auto-detect: api.openai.com + codex model name pattern
if api_mode and api_mode != "codex_responses":
return False # explicit non-codex mode
normalized_base = (base_url_str or "").strip().lower()
if "api.openai.com" in normalized_base and "openrouter" not in normalized_base:
if base_url_hostname(base_url_str) == "api.openai.com":
model_lower = (model_str or "").lower()
if "codex" in model_lower:
return True
@ -1492,7 +1623,13 @@ def resolve_provider_client(
# ── Nous Portal (OAuth) ──────────────────────────────────────────
if provider == "nous":
client, default = _try_nous()
# Detect vision tasks: either explicit model override from
# _PROVIDER_VISION_MODELS, or caller passed a known vision model.
_is_vision = (
model in _PROVIDER_VISION_MODELS.values()
or (model or "").strip().lower() == "mimo-v2-omni"
)
client, default = _try_nous(vision=_is_vision)
if client is None:
logger.warning("resolve_provider_client: nous requested "
"but Nous Portal not configured (run: hermes auth)")
@ -1512,7 +1649,11 @@ def resolve_provider_client(
"but no Codex OAuth token found (run: hermes model)")
return None, None
final_model = _normalize_resolved_model(model or _CODEX_AUX_MODEL, provider)
raw_client = OpenAI(api_key=codex_token, base_url=_CODEX_AUX_BASE_URL)
raw_client = OpenAI(
api_key=codex_token,
base_url=_CODEX_AUX_BASE_URL,
default_headers=_codex_cloudflare_headers(codex_token),
)
return (raw_client, final_model)
# Standard path: wrap in CodexAuxiliaryClient adapter
client, default = _try_codex()
@ -1544,9 +1685,9 @@ def resolve_provider_client(
provider,
)
extra = {}
if "api.kimi.com" in custom_base.lower():
extra["default_headers"] = {"User-Agent": "KimiCLI/1.30.0"}
elif "api.githubcopilot.com" in custom_base.lower():
if base_url_host_matches(custom_base, "api.kimi.com"):
extra["default_headers"] = {"User-Agent": "claude-code/0.1.0"}
elif base_url_host_matches(custom_base, "api.githubcopilot.com"):
from hermes_cli.models import copilot_default_headers
extra["default_headers"] = copilot_default_headers()
client = OpenAI(api_key=custom_key, base_url=custom_base, **extra)
@ -1640,11 +1781,20 @@ def resolve_provider_client(
default_model = _API_KEY_PROVIDER_AUX_MODELS.get(provider, "")
final_model = _normalize_resolved_model(model or default_model, provider)
if provider == "gemini":
from agent.gemini_native_adapter import GeminiNativeClient, is_native_gemini_base_url
if is_native_gemini_base_url(base_url):
client = GeminiNativeClient(api_key=api_key, base_url=base_url)
logger.debug("resolve_provider_client: %s (%s)", provider, final_model)
return (_to_async_client(client, final_model) if async_mode
else (client, final_model))
# Provider-specific headers
headers = {}
if "api.kimi.com" in base_url.lower():
headers["User-Agent"] = "KimiCLI/1.30.0"
elif "api.githubcopilot.com" in base_url.lower():
if base_url_host_matches(base_url, "api.kimi.com"):
headers["User-Agent"] = "claude-code/0.1.0"
elif base_url_host_matches(base_url, "api.githubcopilot.com"):
from hermes_cli.models import copilot_default_headers
headers.update(copilot_default_headers())
@ -1875,24 +2025,35 @@ def resolve_vision_provider_client(
# _PROVIDER_VISION_MODELS provides per-provider vision model
# overrides when the provider has a dedicated multimodal model
# that differs from the chat model (e.g. xiaomi → mimo-v2-omni,
# zai → glm-5v-turbo).
# zai → glm-5v-turbo). Nous is the exception: it has a dedicated
# strict vision backend with tier-aware defaults, so it must not
# fall through to the user's text chat model here.
# 2. OpenRouter (vision-capable aggregator fallback)
# 3. Nous Portal (vision-capable aggregator fallback)
# 4. Stop
main_provider = _read_main_provider()
main_model = _read_main_model()
if main_provider and main_provider not in ("auto", ""):
vision_model = _PROVIDER_VISION_MODELS.get(main_provider, main_model)
rpc_client, rpc_model = resolve_provider_client(
main_provider, vision_model,
api_mode=resolved_api_mode)
if rpc_client is not None:
logger.info(
"Vision auto-detect: using main provider %s (%s)",
main_provider, rpc_model or vision_model,
)
return _finalize(
main_provider, rpc_client, rpc_model or vision_model)
if main_provider == "nous":
sync_client, default_model = _resolve_strict_vision_backend(main_provider)
if sync_client is not None:
logger.info(
"Vision auto-detect: using main provider %s (%s)",
main_provider, default_model or resolved_model or main_model,
)
return _finalize(main_provider, sync_client, default_model)
else:
vision_model = _PROVIDER_VISION_MODELS.get(main_provider, main_model)
rpc_client, rpc_model = resolve_provider_client(
main_provider, vision_model,
api_mode=resolved_api_mode)
if rpc_client is not None:
logger.info(
"Vision auto-detect: using main provider %s (%s)",
main_provider, rpc_model or vision_model,
)
return _finalize(
main_provider, rpc_client, rpc_model or vision_model)
# Fall back through aggregators (uses their dedicated vision model,
# not the user's main model) when main provider has no client.
@ -1939,7 +2100,7 @@ def auxiliary_max_tokens_param(value: int) -> dict:
# Only use max_completion_tokens for direct OpenAI custom endpoints
if (not or_key
and _read_nous_auth() is None
and "api.openai.com" in custom_base.lower()):
and base_url_hostname(custom_base) == "api.openai.com"):
return {"max_completion_tokens": value}
return {"max_tokens": value}
@ -1967,6 +2128,76 @@ _client_cache_lock = threading.Lock()
_CLIENT_CACHE_MAX_SIZE = 64 # safety belt — evict oldest when exceeded
def _client_cache_key(
provider: str,
*,
async_mode: bool,
base_url: Optional[str] = None,
api_key: Optional[str] = None,
api_mode: Optional[str] = None,
main_runtime: Optional[Dict[str, Any]] = None,
) -> tuple:
runtime = _normalize_main_runtime(main_runtime)
runtime_key = tuple(runtime.get(field, "") for field in _MAIN_RUNTIME_FIELDS) if provider == "auto" else ()
return (provider, async_mode, base_url or "", api_key or "", api_mode or "", runtime_key)
def _store_cached_client(cache_key: tuple, client: Any, default_model: Optional[str], *, bound_loop: Any = None) -> None:
with _client_cache_lock:
old_entry = _client_cache.get(cache_key)
if old_entry is not None and old_entry[0] is not client:
_force_close_async_httpx(old_entry[0])
try:
close_fn = getattr(old_entry[0], "close", None)
if callable(close_fn):
close_fn()
except Exception:
pass
_client_cache[cache_key] = (client, default_model, bound_loop)
def _refresh_nous_auxiliary_client(
*,
cache_provider: str,
model: Optional[str],
async_mode: bool,
base_url: Optional[str] = None,
api_key: Optional[str] = None,
api_mode: Optional[str] = None,
main_runtime: Optional[Dict[str, Any]] = None,
) -> Tuple[Optional[Any], Optional[str]]:
"""Refresh Nous runtime creds, rebuild the client, and replace the cache entry."""
runtime = _resolve_nous_runtime_api(force_refresh=True)
if runtime is None:
return None, model
fresh_key, fresh_base_url = runtime
sync_client = OpenAI(api_key=fresh_key, base_url=fresh_base_url)
final_model = model
current_loop = None
if async_mode:
try:
import asyncio as _aio
current_loop = _aio.get_event_loop()
except RuntimeError:
pass
client, final_model = _to_async_client(sync_client, final_model or "")
else:
client = sync_client
cache_key = _client_cache_key(
cache_provider,
async_mode=async_mode,
base_url=base_url,
api_key=api_key,
api_mode=api_mode,
main_runtime=main_runtime,
)
_store_cached_client(cache_key, client, final_model, bound_loop=current_loop)
return client, final_model
def neuter_async_httpx_del() -> None:
"""Monkey-patch ``AsyncHttpxClientWrapper.__del__`` to be a no-op.
@ -2068,7 +2299,7 @@ def cleanup_stale_async_clients() -> None:
def _is_openrouter_client(client: Any) -> bool:
for obj in (client, getattr(client, "_client", None), getattr(client, "client", None)):
if obj and "openrouter" in str(getattr(obj, "base_url", "") or "").lower():
if obj and base_url_host_matches(str(getattr(obj, "base_url", "") or ""), "openrouter.ai"):
return True
return False
@ -2120,8 +2351,14 @@ def _get_cached_client(
except RuntimeError:
pass
runtime = _normalize_main_runtime(main_runtime)
runtime_key = tuple(runtime.get(field, "") for field in _MAIN_RUNTIME_FIELDS) if provider == "auto" else ()
cache_key = (provider, async_mode, base_url or "", api_key or "", api_mode or "", runtime_key)
cache_key = _client_cache_key(
provider,
async_mode=async_mode,
base_url=base_url,
api_key=api_key,
api_mode=api_mode,
main_runtime=main_runtime,
)
with _client_cache_lock:
if cache_key in _client_cache:
cached_client, cached_default, cached_loop = _client_cache[cache_key]
@ -2190,7 +2427,6 @@ def _resolve_task_provider_model(
to "custom" and the task uses that direct endpoint. api_mode is one of
"chat_completions", "codex_responses", or None (auto-detect).
"""
config = {}
cfg_provider = None
cfg_model = None
cfg_base_url = None
@ -2198,16 +2434,7 @@ def _resolve_task_provider_model(
cfg_api_mode = None
if task:
try:
from hermes_cli.config import load_config
config = load_config()
except ImportError:
config = {}
aux = config.get("auxiliary", {}) if isinstance(config, dict) else {}
task_config = aux.get(task, {}) if isinstance(aux, dict) else {}
if not isinstance(task_config, dict):
task_config = {}
task_config = _get_auxiliary_task_config(task)
cfg_provider = str(task_config.get("provider", "")).strip() or None
cfg_model = str(task_config.get("model", "")).strip() or None
cfg_base_url = str(task_config.get("base_url", "")).strip() or None
@ -2237,17 +2464,25 @@ def _resolve_task_provider_model(
_DEFAULT_AUX_TIMEOUT = 30.0
def _get_task_timeout(task: str, default: float = _DEFAULT_AUX_TIMEOUT) -> float:
"""Read timeout from auxiliary.{task}.timeout in config, falling back to *default*."""
def _get_auxiliary_task_config(task: str) -> Dict[str, Any]:
"""Return the config dict for auxiliary.<task>, or {} when unavailable."""
if not task:
return default
return {}
try:
from hermes_cli.config import load_config
config = load_config()
except ImportError:
return default
return {}
aux = config.get("auxiliary", {}) if isinstance(config, dict) else {}
task_config = aux.get(task, {}) if isinstance(aux, dict) else {}
return task_config if isinstance(task_config, dict) else {}
def _get_task_timeout(task: str, default: float = _DEFAULT_AUX_TIMEOUT) -> float:
"""Read timeout from auxiliary.{task}.timeout in config, falling back to *default*."""
if not task:
return default
task_config = _get_auxiliary_task_config(task)
raw = task_config.get("timeout")
if raw is not None:
try:
@ -2257,6 +2492,15 @@ def _get_task_timeout(task: str, default: float = _DEFAULT_AUX_TIMEOUT) -> float
return default
def _get_task_extra_body(task: str) -> Dict[str, Any]:
"""Read auxiliary.<task>.extra_body and return a shallow copy when valid."""
task_config = _get_auxiliary_task_config(task)
raw = task_config.get("extra_body")
if isinstance(raw, dict):
return dict(raw)
return {}
# ---------------------------------------------------------------------------
# Anthropic-compatible endpoint detection + image block conversion
# ---------------------------------------------------------------------------
@ -2344,8 +2588,10 @@ def _build_call_kwargs(
"timeout": timeout,
}
fixed_temperature = _fixed_temperature_for_model(model)
if fixed_temperature is not None:
fixed_temperature = _fixed_temperature_for_model(model, base_url)
if fixed_temperature is OMIT_TEMPERATURE:
temperature = None # strip — let server choose
elif fixed_temperature is not None:
temperature = fixed_temperature
# Opus 4.7+ rejects any non-default temperature/top_p/top_k — silently
@ -2365,7 +2611,7 @@ def _build_call_kwargs(
# Direct OpenAI api.openai.com with newer models needs max_completion_tokens.
if provider == "custom":
custom_base = base_url or _current_custom_base_url()
if "api.openai.com" in custom_base.lower():
if base_url_hostname(custom_base) == "api.openai.com":
kwargs["max_completion_tokens"] = max_tokens
else:
kwargs["max_tokens"] = max_tokens
@ -2457,6 +2703,8 @@ def call_llm(
"""
resolved_provider, resolved_model, resolved_base_url, resolved_api_key, resolved_api_mode = _resolve_task_provider_model(
task, provider, model, base_url, api_key)
effective_extra_body = _get_task_extra_body(task)
effective_extra_body.update(extra_body or {})
if task == "vision":
effective_provider, client, final_model = resolve_vision_provider_client(
@ -2525,11 +2773,14 @@ def call_llm(
task, resolved_provider or "auto", final_model or "default",
f" at {_base_info}" if _base_info and "openrouter" not in _base_info else "")
# Pass the client's actual base_url (not just resolved_base_url) so
# endpoint-specific temperature overrides can distinguish
# api.moonshot.ai vs api.kimi.com/coding even on auto-detected routes.
kwargs = _build_call_kwargs(
resolved_provider, final_model, messages,
temperature=temperature, max_tokens=max_tokens,
tools=tools, timeout=effective_timeout, extra_body=extra_body,
base_url=resolved_base_url)
tools=tools, timeout=effective_timeout, extra_body=effective_extra_body,
base_url=_base_info or resolved_base_url)
# Convert image blocks for Anthropic-compatible endpoints (e.g. MiniMax)
_client_base = str(getattr(client, "base_url", "") or "")
@ -2555,6 +2806,29 @@ def call_llm(
raise
first_err = retry_err
# ── Nous auth refresh parity with main agent ──────────────────
client_is_nous = (
resolved_provider == "nous"
or base_url_host_matches(_base_info, "inference-api.nousresearch.com")
)
if _is_auth_error(first_err) and client_is_nous:
refreshed_client, refreshed_model = _refresh_nous_auxiliary_client(
cache_provider=resolved_provider or "nous",
model=final_model,
async_mode=False,
base_url=resolved_base_url,
api_key=resolved_api_key,
api_mode=resolved_api_mode,
main_runtime=main_runtime,
)
if refreshed_client is not None:
logger.info("Auxiliary %s: refreshed Nous runtime credentials after 401, retrying",
task or "call")
if refreshed_model and refreshed_model != kwargs.get("model"):
kwargs["model"] = refreshed_model
return _validate_llm_response(
refreshed_client.chat.completions.create(**kwargs), task)
# ── Payment / credit exhaustion fallback ──────────────────────
# When the resolved provider returns 402 or a credit-related error,
# try alternative providers instead of giving up. This handles the
@ -2583,7 +2857,8 @@ def call_llm(
fb_label, fb_model, messages,
temperature=temperature, max_tokens=max_tokens,
tools=tools, timeout=effective_timeout,
extra_body=extra_body)
extra_body=effective_extra_body,
base_url=str(getattr(fb_client, "base_url", "") or ""))
return _validate_llm_response(
fb_client.chat.completions.create(**fb_kwargs), task)
raise
@ -2665,6 +2940,8 @@ async def async_call_llm(
"""
resolved_provider, resolved_model, resolved_base_url, resolved_api_key, resolved_api_mode = _resolve_task_provider_model(
task, provider, model, base_url, api_key)
effective_extra_body = _get_task_extra_body(task)
effective_extra_body.update(extra_body or {})
if task == "vision":
effective_provider, client, final_model = resolve_vision_provider_client(
@ -2718,14 +2995,17 @@ async def async_call_llm(
effective_timeout = timeout if timeout is not None else _get_task_timeout(task)
# Pass the client's actual base_url (not just resolved_base_url) so
# endpoint-specific temperature overrides can distinguish
# api.moonshot.ai vs api.kimi.com/coding even on auto-detected routes.
_client_base = str(getattr(client, "base_url", "") or "")
kwargs = _build_call_kwargs(
resolved_provider, final_model, messages,
temperature=temperature, max_tokens=max_tokens,
tools=tools, timeout=effective_timeout, extra_body=extra_body,
base_url=resolved_base_url)
tools=tools, timeout=effective_timeout, extra_body=effective_extra_body,
base_url=_client_base or resolved_base_url)
# Convert image blocks for Anthropic-compatible endpoints (e.g. MiniMax)
_client_base = str(getattr(client, "base_url", "") or "")
if _is_anthropic_compat_endpoint(resolved_provider, _client_base):
kwargs["messages"] = _convert_openai_images_to_anthropic(kwargs["messages"])
@ -2747,6 +3027,28 @@ async def async_call_llm(
raise
first_err = retry_err
# ── Nous auth refresh parity with main agent ──────────────────
client_is_nous = (
resolved_provider == "nous"
or base_url_host_matches(_client_base, "inference-api.nousresearch.com")
)
if _is_auth_error(first_err) and client_is_nous:
refreshed_client, refreshed_model = _refresh_nous_auxiliary_client(
cache_provider=resolved_provider or "nous",
model=final_model,
async_mode=True,
base_url=resolved_base_url,
api_key=resolved_api_key,
api_mode=resolved_api_mode,
)
if refreshed_client is not None:
logger.info("Auxiliary %s (async): refreshed Nous runtime credentials after 401, retrying",
task or "call")
if refreshed_model and refreshed_model != kwargs.get("model"):
kwargs["model"] = refreshed_model
return _validate_llm_response(
await refreshed_client.chat.completions.create(**kwargs), task)
# ── Payment / connection fallback (mirrors sync call_llm) ─────
should_fallback = _is_payment_error(first_err) or _is_connection_error(first_err)
is_auto = resolved_provider in ("auto", "", None)
@ -2761,7 +3063,8 @@ async def async_call_llm(
fb_label, fb_model, messages,
temperature=temperature, max_tokens=max_tokens,
tools=tools, timeout=effective_timeout,
extra_body=extra_body)
extra_body=effective_extra_body,
base_url=str(getattr(fb_client, "base_url", "") or ""))
# Convert sync fallback client to async
async_fb, async_fb_model = _to_async_client(fb_client, fb_model or "")
if async_fb_model and async_fb_model != fb_kwargs.get("model"):

View file

@ -0,0 +1,813 @@
"""Codex Responses API adapter.
Pure format-conversion and normalization logic for the OpenAI Responses API
(used by OpenAI Codex, xAI, GitHub Models, and other Responses-compatible endpoints).
Extracted from run_agent.py to isolate Responses API-specific logic from the
core agent loop. All functions are stateless they operate on the data passed
in and return transformed results.
"""
from __future__ import annotations
import hashlib
import json
import logging
import re
import uuid
from types import SimpleNamespace
from typing import Any, Dict, List, Optional
from agent.prompt_builder import DEFAULT_AGENT_IDENTITY
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Multimodal content helpers
# ---------------------------------------------------------------------------
def _chat_content_to_responses_parts(content: Any) -> List[Dict[str, Any]]:
"""Convert chat-style multimodal content to Responses API input parts.
Input: ``[{"type":"text"|"image_url", ...}]`` (native OpenAI Chat format)
Output: ``[{"type":"input_text"|"input_image", ...}]`` (Responses format)
Returns an empty list when ``content`` is not a list or contains no
recognized parts callers fall back to the string path.
"""
if not isinstance(content, list):
return []
converted: List[Dict[str, Any]] = []
for part in content:
if isinstance(part, str):
if part:
converted.append({"type": "input_text", "text": part})
continue
if not isinstance(part, dict):
continue
ptype = str(part.get("type") or "").strip().lower()
if ptype in {"text", "input_text", "output_text"}:
text = part.get("text")
if isinstance(text, str) and text:
converted.append({"type": "input_text", "text": text})
continue
if ptype in {"image_url", "input_image"}:
image_ref = part.get("image_url")
detail = part.get("detail")
if isinstance(image_ref, dict):
url = image_ref.get("url")
detail = image_ref.get("detail", detail)
else:
url = image_ref
if not isinstance(url, str) or not url:
continue
image_part: Dict[str, Any] = {"type": "input_image", "image_url": url}
if isinstance(detail, str) and detail.strip():
image_part["detail"] = detail.strip()
converted.append(image_part)
return converted
def _summarize_user_message_for_log(content: Any) -> str:
"""Return a short text summary of a user message for logging/trajectory.
Multimodal messages arrive as a list of ``{type:"text"|"image_url", ...}``
parts from the API server. Logging, spinner previews, and trajectory
files all want a plain string this helper extracts the first chunk of
text and notes any attached images. Returns an empty string for empty
lists and ``str(content)`` for unexpected scalar types.
"""
if content is None:
return ""
if isinstance(content, str):
return content
if isinstance(content, list):
text_bits: List[str] = []
image_count = 0
for part in content:
if isinstance(part, str):
if part:
text_bits.append(part)
continue
if not isinstance(part, dict):
continue
ptype = str(part.get("type") or "").strip().lower()
if ptype in {"text", "input_text", "output_text"}:
text = part.get("text")
if isinstance(text, str) and text:
text_bits.append(text)
elif ptype in {"image_url", "input_image"}:
image_count += 1
summary = " ".join(text_bits).strip()
if image_count:
note = f"[{image_count} image{'s' if image_count != 1 else ''}]"
summary = f"{note} {summary}" if summary else note
return summary
try:
return str(content)
except Exception:
return ""
# ---------------------------------------------------------------------------
# ID helpers
# ---------------------------------------------------------------------------
def _deterministic_call_id(fn_name: str, arguments: str, index: int = 0) -> str:
"""Generate a deterministic call_id from tool call content.
Used as a fallback when the API doesn't provide a call_id.
Deterministic IDs prevent cache invalidation random UUIDs would
make every API call's prefix unique, breaking OpenAI's prompt cache.
"""
seed = f"{fn_name}:{arguments}:{index}"
digest = hashlib.sha256(seed.encode("utf-8", errors="replace")).hexdigest()[:12]
return f"call_{digest}"
def _split_responses_tool_id(raw_id: Any) -> tuple[Optional[str], Optional[str]]:
"""Split a stored tool id into (call_id, response_item_id)."""
if not isinstance(raw_id, str):
return None, None
value = raw_id.strip()
if not value:
return None, None
if "|" in value:
call_id, response_item_id = value.split("|", 1)
call_id = call_id.strip() or None
response_item_id = response_item_id.strip() or None
return call_id, response_item_id
if value.startswith("fc_"):
return None, value
return value, None
def _derive_responses_function_call_id(
call_id: str,
response_item_id: Optional[str] = None,
) -> str:
"""Build a valid Responses `function_call.id` (must start with `fc_`)."""
if isinstance(response_item_id, str):
candidate = response_item_id.strip()
if candidate.startswith("fc_"):
return candidate
source = (call_id or "").strip()
if source.startswith("fc_"):
return source
if source.startswith("call_") and len(source) > len("call_"):
return f"fc_{source[len('call_'):]}"
sanitized = re.sub(r"[^A-Za-z0-9_-]", "", source)
if sanitized.startswith("fc_"):
return sanitized
if sanitized.startswith("call_") and len(sanitized) > len("call_"):
return f"fc_{sanitized[len('call_'):]}"
if sanitized:
return f"fc_{sanitized[:48]}"
seed = source or str(response_item_id or "") or uuid.uuid4().hex
digest = hashlib.sha1(seed.encode("utf-8")).hexdigest()[:24]
return f"fc_{digest}"
# ---------------------------------------------------------------------------
# Schema conversion
# ---------------------------------------------------------------------------
def _responses_tools(tools: Optional[List[Dict[str, Any]]] = None) -> Optional[List[Dict[str, Any]]]:
"""Convert chat-completions tool schemas to Responses function-tool schemas."""
if not tools:
return None
converted: List[Dict[str, Any]] = []
for item in tools:
fn = item.get("function", {}) if isinstance(item, dict) else {}
name = fn.get("name")
if not isinstance(name, str) or not name.strip():
continue
converted.append({
"type": "function",
"name": name,
"description": fn.get("description", ""),
"strict": False,
"parameters": fn.get("parameters", {"type": "object", "properties": {}}),
})
return converted or None
# ---------------------------------------------------------------------------
# Message format conversion
# ---------------------------------------------------------------------------
def _chat_messages_to_responses_input(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Convert internal chat-style messages to Responses input items."""
items: List[Dict[str, Any]] = []
seen_item_ids: set = set()
for msg in messages:
if not isinstance(msg, dict):
continue
role = msg.get("role")
if role == "system":
continue
if role in {"user", "assistant"}:
content = msg.get("content", "")
if isinstance(content, list):
content_parts = _chat_content_to_responses_parts(content)
content_text = "".join(
p.get("text", "") for p in content_parts if p.get("type") == "input_text"
)
else:
content_parts = []
content_text = str(content) if content is not None else ""
if role == "assistant":
# Replay encrypted reasoning items from previous turns
# so the API can maintain coherent reasoning chains.
codex_reasoning = msg.get("codex_reasoning_items")
has_codex_reasoning = False
if isinstance(codex_reasoning, list):
for ri in codex_reasoning:
if isinstance(ri, dict) and ri.get("encrypted_content"):
item_id = ri.get("id")
if item_id and item_id in seen_item_ids:
continue
# Strip the "id" field — with store=False the
# Responses API cannot look up items by ID and
# returns 404. The encrypted_content blob is
# self-contained for reasoning chain continuity.
replay_item = {k: v for k, v in ri.items() if k != "id"}
items.append(replay_item)
if item_id:
seen_item_ids.add(item_id)
has_codex_reasoning = True
if content_parts:
items.append({"role": "assistant", "content": content_parts})
elif content_text.strip():
items.append({"role": "assistant", "content": content_text})
elif has_codex_reasoning:
# The Responses API requires a following item after each
# reasoning item (otherwise: missing_following_item error).
# When the assistant produced only reasoning with no visible
# content, emit an empty assistant message as the required
# following item.
items.append({"role": "assistant", "content": ""})
tool_calls = msg.get("tool_calls")
if isinstance(tool_calls, list):
for tc in tool_calls:
if not isinstance(tc, dict):
continue
fn = tc.get("function", {})
fn_name = fn.get("name")
if not isinstance(fn_name, str) or not fn_name.strip():
continue
embedded_call_id, embedded_response_item_id = _split_responses_tool_id(
tc.get("id")
)
call_id = tc.get("call_id")
if not isinstance(call_id, str) or not call_id.strip():
call_id = embedded_call_id
if not isinstance(call_id, str) or not call_id.strip():
if (
isinstance(embedded_response_item_id, str)
and embedded_response_item_id.startswith("fc_")
and len(embedded_response_item_id) > len("fc_")
):
call_id = f"call_{embedded_response_item_id[len('fc_'):]}"
else:
_raw_args = str(fn.get("arguments", "{}"))
call_id = _deterministic_call_id(fn_name, _raw_args, len(items))
call_id = call_id.strip()
arguments = fn.get("arguments", "{}")
if isinstance(arguments, dict):
arguments = json.dumps(arguments, ensure_ascii=False)
elif not isinstance(arguments, str):
arguments = str(arguments)
arguments = arguments.strip() or "{}"
items.append({
"type": "function_call",
"call_id": call_id,
"name": fn_name,
"arguments": arguments,
})
continue
# Non-assistant (user) role: emit multimodal parts when present,
# otherwise fall back to the text payload.
if content_parts:
items.append({"role": role, "content": content_parts})
else:
items.append({"role": role, "content": content_text})
continue
if role == "tool":
raw_tool_call_id = msg.get("tool_call_id")
call_id, _ = _split_responses_tool_id(raw_tool_call_id)
if not isinstance(call_id, str) or not call_id.strip():
if isinstance(raw_tool_call_id, str) and raw_tool_call_id.strip():
call_id = raw_tool_call_id.strip()
if not isinstance(call_id, str) or not call_id.strip():
continue
items.append({
"type": "function_call_output",
"call_id": call_id,
"output": str(msg.get("content", "") or ""),
})
return items
# ---------------------------------------------------------------------------
# Input preflight / validation
# ---------------------------------------------------------------------------
def _preflight_codex_input_items(raw_items: Any) -> List[Dict[str, Any]]:
if not isinstance(raw_items, list):
raise ValueError("Codex Responses input must be a list of input items.")
normalized: List[Dict[str, Any]] = []
seen_ids: set = set()
for idx, item in enumerate(raw_items):
if not isinstance(item, dict):
raise ValueError(f"Codex Responses input[{idx}] must be an object.")
item_type = item.get("type")
if item_type == "function_call":
call_id = item.get("call_id")
name = item.get("name")
if not isinstance(call_id, str) or not call_id.strip():
raise ValueError(f"Codex Responses input[{idx}] function_call is missing call_id.")
if not isinstance(name, str) or not name.strip():
raise ValueError(f"Codex Responses input[{idx}] function_call is missing name.")
arguments = item.get("arguments", "{}")
if isinstance(arguments, dict):
arguments = json.dumps(arguments, ensure_ascii=False)
elif not isinstance(arguments, str):
arguments = str(arguments)
arguments = arguments.strip() or "{}"
normalized.append(
{
"type": "function_call",
"call_id": call_id.strip(),
"name": name.strip(),
"arguments": arguments,
}
)
continue
if item_type == "function_call_output":
call_id = item.get("call_id")
if not isinstance(call_id, str) or not call_id.strip():
raise ValueError(f"Codex Responses input[{idx}] function_call_output is missing call_id.")
output = item.get("output", "")
if output is None:
output = ""
if not isinstance(output, str):
output = str(output)
normalized.append(
{
"type": "function_call_output",
"call_id": call_id.strip(),
"output": output,
}
)
continue
if item_type == "reasoning":
encrypted = item.get("encrypted_content")
if isinstance(encrypted, str) and encrypted:
item_id = item.get("id")
if isinstance(item_id, str) and item_id:
if item_id in seen_ids:
continue
seen_ids.add(item_id)
reasoning_item = {"type": "reasoning", "encrypted_content": encrypted}
# Do NOT include the "id" in the outgoing item — with
# store=False (our default) the API tries to resolve the
# id server-side and returns 404. The id is still used
# above for local deduplication via seen_ids.
summary = item.get("summary")
if isinstance(summary, list):
reasoning_item["summary"] = summary
else:
reasoning_item["summary"] = []
normalized.append(reasoning_item)
continue
role = item.get("role")
if role in {"user", "assistant"}:
content = item.get("content", "")
if content is None:
content = ""
if isinstance(content, list):
# Multimodal content from ``_chat_messages_to_responses_input``
# is already in Responses format (``input_text`` / ``input_image``).
# Validate each part and pass through.
validated: List[Dict[str, Any]] = []
for part_idx, part in enumerate(content):
if isinstance(part, str):
if part:
validated.append({"type": "input_text", "text": part})
continue
if not isinstance(part, dict):
raise ValueError(
f"Codex Responses input[{idx}].content[{part_idx}] must be an object or string."
)
ptype = str(part.get("type") or "").strip().lower()
if ptype in {"input_text", "text", "output_text"}:
text = part.get("text", "")
if not isinstance(text, str):
text = str(text or "")
validated.append({"type": "input_text", "text": text})
elif ptype in {"input_image", "image_url"}:
image_ref = part.get("image_url", "")
detail = part.get("detail")
if isinstance(image_ref, dict):
url = image_ref.get("url", "")
detail = image_ref.get("detail", detail)
else:
url = image_ref
if not isinstance(url, str):
url = str(url or "")
image_part: Dict[str, Any] = {"type": "input_image", "image_url": url}
if isinstance(detail, str) and detail.strip():
image_part["detail"] = detail.strip()
validated.append(image_part)
else:
raise ValueError(
f"Codex Responses input[{idx}].content[{part_idx}] has unsupported type {part.get('type')!r}."
)
normalized.append({"role": role, "content": validated})
continue
if not isinstance(content, str):
content = str(content)
normalized.append({"role": role, "content": content})
continue
raise ValueError(
f"Codex Responses input[{idx}] has unsupported item shape (type={item_type!r}, role={role!r})."
)
return normalized
def _preflight_codex_api_kwargs(
api_kwargs: Any,
*,
allow_stream: bool = False,
) -> Dict[str, Any]:
if not isinstance(api_kwargs, dict):
raise ValueError("Codex Responses request must be a dict.")
required = {"model", "instructions", "input"}
missing = [key for key in required if key not in api_kwargs]
if missing:
raise ValueError(f"Codex Responses request missing required field(s): {', '.join(sorted(missing))}.")
model = api_kwargs.get("model")
if not isinstance(model, str) or not model.strip():
raise ValueError("Codex Responses request 'model' must be a non-empty string.")
model = model.strip()
instructions = api_kwargs.get("instructions")
if instructions is None:
instructions = ""
if not isinstance(instructions, str):
instructions = str(instructions)
instructions = instructions.strip() or DEFAULT_AGENT_IDENTITY
normalized_input = _preflight_codex_input_items(api_kwargs.get("input"))
tools = api_kwargs.get("tools")
normalized_tools = None
if tools is not None:
if not isinstance(tools, list):
raise ValueError("Codex Responses request 'tools' must be a list when provided.")
normalized_tools = []
for idx, tool in enumerate(tools):
if not isinstance(tool, dict):
raise ValueError(f"Codex Responses tools[{idx}] must be an object.")
if tool.get("type") != "function":
raise ValueError(f"Codex Responses tools[{idx}] has unsupported type {tool.get('type')!r}.")
name = tool.get("name")
parameters = tool.get("parameters")
if not isinstance(name, str) or not name.strip():
raise ValueError(f"Codex Responses tools[{idx}] is missing a valid name.")
if not isinstance(parameters, dict):
raise ValueError(f"Codex Responses tools[{idx}] is missing valid parameters.")
description = tool.get("description", "")
if description is None:
description = ""
if not isinstance(description, str):
description = str(description)
strict = tool.get("strict", False)
if not isinstance(strict, bool):
strict = bool(strict)
normalized_tools.append(
{
"type": "function",
"name": name.strip(),
"description": description,
"strict": strict,
"parameters": parameters,
}
)
store = api_kwargs.get("store", False)
if store is not False:
raise ValueError("Codex Responses contract requires 'store' to be false.")
allowed_keys = {
"model", "instructions", "input", "tools", "store",
"reasoning", "include", "max_output_tokens", "temperature",
"tool_choice", "parallel_tool_calls", "prompt_cache_key", "service_tier",
"extra_headers",
}
normalized: Dict[str, Any] = {
"model": model,
"instructions": instructions,
"input": normalized_input,
"store": False,
}
if normalized_tools is not None:
normalized["tools"] = normalized_tools
# Pass through reasoning config
reasoning = api_kwargs.get("reasoning")
if isinstance(reasoning, dict):
normalized["reasoning"] = reasoning
include = api_kwargs.get("include")
if isinstance(include, list):
normalized["include"] = include
service_tier = api_kwargs.get("service_tier")
if isinstance(service_tier, str) and service_tier.strip():
normalized["service_tier"] = service_tier.strip()
# Pass through max_output_tokens and temperature
max_output_tokens = api_kwargs.get("max_output_tokens")
if isinstance(max_output_tokens, (int, float)) and max_output_tokens > 0:
normalized["max_output_tokens"] = int(max_output_tokens)
temperature = api_kwargs.get("temperature")
if isinstance(temperature, (int, float)):
normalized["temperature"] = float(temperature)
# Pass through tool_choice, parallel_tool_calls, prompt_cache_key
for passthrough_key in ("tool_choice", "parallel_tool_calls", "prompt_cache_key"):
val = api_kwargs.get(passthrough_key)
if val is not None:
normalized[passthrough_key] = val
extra_headers = api_kwargs.get("extra_headers")
if extra_headers is not None:
if not isinstance(extra_headers, dict):
raise ValueError("Codex Responses request 'extra_headers' must be an object.")
normalized_headers: Dict[str, str] = {}
for key, value in extra_headers.items():
if not isinstance(key, str) or not key.strip():
raise ValueError("Codex Responses request 'extra_headers' keys must be non-empty strings.")
if value is None:
continue
normalized_headers[key.strip()] = str(value)
if normalized_headers:
normalized["extra_headers"] = normalized_headers
if allow_stream:
stream = api_kwargs.get("stream")
if stream is not None and stream is not True:
raise ValueError("Codex Responses 'stream' must be true when set.")
if stream is True:
normalized["stream"] = True
allowed_keys.add("stream")
elif "stream" in api_kwargs:
raise ValueError("Codex Responses stream flag is only allowed in fallback streaming requests.")
unexpected = sorted(key for key in api_kwargs if key not in allowed_keys)
if unexpected:
raise ValueError(
f"Codex Responses request has unsupported field(s): {', '.join(unexpected)}."
)
return normalized
# ---------------------------------------------------------------------------
# Response extraction helpers
# ---------------------------------------------------------------------------
def _extract_responses_message_text(item: Any) -> str:
"""Extract assistant text from a Responses message output item."""
content = getattr(item, "content", None)
if not isinstance(content, list):
return ""
chunks: List[str] = []
for part in content:
ptype = getattr(part, "type", None)
if ptype not in {"output_text", "text"}:
continue
text = getattr(part, "text", None)
if isinstance(text, str) and text:
chunks.append(text)
return "".join(chunks).strip()
def _extract_responses_reasoning_text(item: Any) -> str:
"""Extract a compact reasoning text from a Responses reasoning item."""
summary = getattr(item, "summary", None)
if isinstance(summary, list):
chunks: List[str] = []
for part in summary:
text = getattr(part, "text", None)
if isinstance(text, str) and text:
chunks.append(text)
if chunks:
return "\n".join(chunks).strip()
text = getattr(item, "text", None)
if isinstance(text, str) and text:
return text.strip()
return ""
# ---------------------------------------------------------------------------
# Full response normalization
# ---------------------------------------------------------------------------
def _normalize_codex_response(response: Any) -> tuple[Any, str]:
"""Normalize a Responses API object to an assistant_message-like object."""
output = getattr(response, "output", None)
if not isinstance(output, list) or not output:
# The Codex backend can return empty output when the answer was
# delivered entirely via stream events. Check output_text as a
# last-resort fallback before raising.
out_text = getattr(response, "output_text", None)
if isinstance(out_text, str) and out_text.strip():
logger.debug(
"Codex response has empty output but output_text is present (%d chars); "
"synthesizing output item.", len(out_text.strip()),
)
output = [SimpleNamespace(
type="message", role="assistant", status="completed",
content=[SimpleNamespace(type="output_text", text=out_text.strip())],
)]
response.output = output
else:
raise RuntimeError("Responses API returned no output items")
response_status = getattr(response, "status", None)
if isinstance(response_status, str):
response_status = response_status.strip().lower()
else:
response_status = None
if response_status in {"failed", "cancelled"}:
error_obj = getattr(response, "error", None)
if isinstance(error_obj, dict):
error_msg = error_obj.get("message") or str(error_obj)
else:
error_msg = str(error_obj) if error_obj else f"Responses API returned status '{response_status}'"
raise RuntimeError(error_msg)
content_parts: List[str] = []
reasoning_parts: List[str] = []
reasoning_items_raw: List[Dict[str, Any]] = []
tool_calls: List[Any] = []
has_incomplete_items = response_status in {"queued", "in_progress", "incomplete"}
saw_commentary_phase = False
saw_final_answer_phase = False
for item in output:
item_type = getattr(item, "type", None)
item_status = getattr(item, "status", None)
if isinstance(item_status, str):
item_status = item_status.strip().lower()
else:
item_status = None
if item_status in {"queued", "in_progress", "incomplete"}:
has_incomplete_items = True
if item_type == "message":
item_phase = getattr(item, "phase", None)
if isinstance(item_phase, str):
normalized_phase = item_phase.strip().lower()
if normalized_phase in {"commentary", "analysis"}:
saw_commentary_phase = True
elif normalized_phase in {"final_answer", "final"}:
saw_final_answer_phase = True
message_text = _extract_responses_message_text(item)
if message_text:
content_parts.append(message_text)
elif item_type == "reasoning":
reasoning_text = _extract_responses_reasoning_text(item)
if reasoning_text:
reasoning_parts.append(reasoning_text)
# Capture the full reasoning item for multi-turn continuity.
# encrypted_content is an opaque blob the API needs back on
# subsequent turns to maintain coherent reasoning chains.
encrypted = getattr(item, "encrypted_content", None)
if isinstance(encrypted, str) and encrypted:
raw_item = {"type": "reasoning", "encrypted_content": encrypted}
item_id = getattr(item, "id", None)
if isinstance(item_id, str) and item_id:
raw_item["id"] = item_id
# Capture summary — required by the API when replaying reasoning items
summary = getattr(item, "summary", None)
if isinstance(summary, list):
raw_summary = []
for part in summary:
text = getattr(part, "text", None)
if isinstance(text, str):
raw_summary.append({"type": "summary_text", "text": text})
raw_item["summary"] = raw_summary
reasoning_items_raw.append(raw_item)
elif item_type == "function_call":
if item_status in {"queued", "in_progress", "incomplete"}:
continue
fn_name = getattr(item, "name", "") or ""
arguments = getattr(item, "arguments", "{}")
if not isinstance(arguments, str):
arguments = json.dumps(arguments, ensure_ascii=False)
raw_call_id = getattr(item, "call_id", None)
raw_item_id = getattr(item, "id", None)
embedded_call_id, _ = _split_responses_tool_id(raw_item_id)
call_id = raw_call_id if isinstance(raw_call_id, str) and raw_call_id.strip() else embedded_call_id
if not isinstance(call_id, str) or not call_id.strip():
call_id = _deterministic_call_id(fn_name, arguments, len(tool_calls))
call_id = call_id.strip()
response_item_id = raw_item_id if isinstance(raw_item_id, str) else None
response_item_id = _derive_responses_function_call_id(call_id, response_item_id)
tool_calls.append(SimpleNamespace(
id=call_id,
call_id=call_id,
response_item_id=response_item_id,
type="function",
function=SimpleNamespace(name=fn_name, arguments=arguments),
))
elif item_type == "custom_tool_call":
fn_name = getattr(item, "name", "") or ""
arguments = getattr(item, "input", "{}")
if not isinstance(arguments, str):
arguments = json.dumps(arguments, ensure_ascii=False)
raw_call_id = getattr(item, "call_id", None)
raw_item_id = getattr(item, "id", None)
embedded_call_id, _ = _split_responses_tool_id(raw_item_id)
call_id = raw_call_id if isinstance(raw_call_id, str) and raw_call_id.strip() else embedded_call_id
if not isinstance(call_id, str) or not call_id.strip():
call_id = _deterministic_call_id(fn_name, arguments, len(tool_calls))
call_id = call_id.strip()
response_item_id = raw_item_id if isinstance(raw_item_id, str) else None
response_item_id = _derive_responses_function_call_id(call_id, response_item_id)
tool_calls.append(SimpleNamespace(
id=call_id,
call_id=call_id,
response_item_id=response_item_id,
type="function",
function=SimpleNamespace(name=fn_name, arguments=arguments),
))
final_text = "\n".join([p for p in content_parts if p]).strip()
if not final_text and hasattr(response, "output_text"):
out_text = getattr(response, "output_text", "")
if isinstance(out_text, str):
final_text = out_text.strip()
assistant_message = SimpleNamespace(
content=final_text,
tool_calls=tool_calls,
reasoning="\n\n".join(reasoning_parts).strip() if reasoning_parts else None,
reasoning_content=None,
reasoning_details=None,
codex_reasoning_items=reasoning_items_raw or None,
)
if tool_calls:
finish_reason = "tool_calls"
elif has_incomplete_items or (saw_commentary_phase and not saw_final_answer_phase):
finish_reason = "incomplete"
elif reasoning_items_raw and not final_text:
# Response contains only reasoning (encrypted thinking state) with
# no visible content or tool calls. The model is still thinking and
# needs another turn to produce the actual answer. Marking this as
# "stop" would send it into the empty-content retry loop which burns
# 3 retries then fails — treat it as incomplete instead so the Codex
# continuation path handles it correctly.
finish_reason = "incomplete"
else:
finish_reason = "stop"
return assistant_message, finish_reason

View file

@ -31,6 +31,7 @@ from agent.model_metadata import (
get_model_context_length,
estimate_messages_tokens_rough,
)
from agent.redact import redact_sensitive_text
logger = logging.getLogger(__name__)
@ -550,11 +551,15 @@ class ContextCompressor(ContextEngine):
Includes tool call arguments and result content (up to
``_CONTENT_MAX`` chars per message) so the summarizer can preserve
specific details like file paths, commands, and outputs.
All content is redacted before serialization to prevent secrets
(API keys, tokens, passwords) from leaking into the summary that
gets sent to the auxiliary model and persisted across compactions.
"""
parts = []
for msg in turns:
role = msg.get("role", "unknown")
content = msg.get("content") or ""
content = redact_sensitive_text(msg.get("content") or "")
# Tool results: keep enough content for the summarizer
if role == "tool":
@ -575,7 +580,7 @@ class ContextCompressor(ContextEngine):
if isinstance(tc, dict):
fn = tc.get("function", {})
name = fn.get("name", "?")
args = fn.get("arguments", "")
args = redact_sensitive_text(fn.get("arguments", ""))
# Truncate long arguments but keep enough for context
if len(args) > self._TOOL_ARGS_MAX:
args = args[:self._TOOL_ARGS_HEAD] + "..."
@ -633,7 +638,13 @@ class ContextCompressor(ContextEngine):
"assistant that continues the conversation. "
"Do NOT respond to any questions or requests in the conversation — "
"only output the structured summary. "
"Do NOT include any preamble, greeting, or prefix."
"Do NOT include any preamble, greeting, or prefix. "
"Write the summary in the same language the user was using in the "
"conversation — do not translate or switch to English. "
"NEVER include API keys, tokens, passwords, secrets, credentials, "
"or connection strings in the summary — replace any that appear "
"with [REDACTED]. Note that the user had credentials present, but "
"do not preserve their values."
)
# Shared structured template (used by both paths).
@ -690,7 +701,7 @@ Be specific with file paths, commands, line numbers, and results.]
[What remains to be done framed as context, not instructions]
## Critical Context
[Any specific values, error messages, configuration details, or data that would be lost without explicit preservation]
[Any specific values, error messages, configuration details, or data that would be lost without explicit preservation. NEVER include API keys, tokens, passwords, or credentials write [REDACTED] instead.]
Target ~{summary_budget} tokens. Be CONCRETE include file paths, command outputs, error messages, line numbers, and specific values. Avoid vague descriptions like "made some changes" say exactly what changed.
@ -730,7 +741,7 @@ Use this exact structure:
prompt += f"""
FOCUS TOPIC: "{focus_topic}"
The user has requested that this compaction PRIORITISE preserving all information related to the focus topic above. For content related to "{focus_topic}", include full detail exact values, file paths, command outputs, error messages, and decisions. For content NOT related to the focus topic, summarise more aggressively (brief one-liners or omit if truly irrelevant). The focus topic sections should receive roughly 60-70% of the summary token budget."""
The user has requested that this compaction PRIORITISE preserving all information related to the focus topic above. For content related to "{focus_topic}", include full detail exact values, file paths, command outputs, error messages, and decisions. For content NOT related to the focus topic, summarise more aggressively (brief one-liners or omit if truly irrelevant). The focus topic sections should receive roughly 60-70% of the summary token budget. Even for the focus topic, NEVER preserve API keys, tokens, passwords, or credentials use [REDACTED]."""
try:
call_kwargs = {
@ -753,7 +764,9 @@ The user has requested that this compaction PRIORITISE preserving all informatio
# Handle cases where content is not a string (e.g., dict from llama.cpp)
if not isinstance(content, str):
content = str(content) if content else ""
summary = content.strip()
# Redact the summary output as well — the summarizer LLM may
# ignore prompt instructions and echo back secrets verbatim.
summary = redact_sensitive_text(content.strip())
# Store for iterative updates on next compaction
self._previous_summary = summary
self._summary_failure_cooldown_until = 0.0
@ -794,7 +807,7 @@ The user has requested that this compaction PRIORITISE preserving all informatio
)
self.summary_model = "" # empty = use main model
self._summary_failure_cooldown_until = 0.0 # no cooldown
return self._generate_summary(messages, summary_budget) # retry immediately
return self._generate_summary(turns_to_summarize) # retry immediately
# Transient errors (timeout, rate limit, network) — shorter cooldown
_transient_cooldown = 60

View file

@ -483,9 +483,7 @@ def _rg_files(path: Path, cwd: Path, limit: int) -> list[Path] | None:
text=True,
timeout=10,
)
except FileNotFoundError:
return None
except subprocess.TimeoutExpired:
except (FileNotFoundError, OSError, subprocess.TimeoutExpired):
return None
if result.returncode != 0:
return None

View file

@ -21,6 +21,9 @@ from pathlib import Path
from types import SimpleNamespace
from typing import Any
from agent.file_safety import get_read_block_error, is_write_denied
from agent.redact import redact_sensitive_text
ACP_MARKER_BASE_URL = "acp://copilot"
_DEFAULT_TIMEOUT_SECONDS = 900.0
@ -54,6 +57,18 @@ def _jsonrpc_error(message_id: Any, code: int, message: str) -> dict[str, Any]:
}
def _permission_denied(message_id: Any) -> dict[str, Any]:
return {
"jsonrpc": "2.0",
"id": message_id,
"result": {
"outcome": {
"outcome": "cancelled",
}
},
}
def _format_messages_as_prompt(
messages: list[dict[str, Any]],
model: str | None = None,
@ -386,6 +401,8 @@ class CopilotACPClient:
stderr_tail: deque[str] = deque(maxlen=40)
def _stdout_reader() -> None:
if proc.stdout is None:
return
for line in proc.stdout:
try:
inbox.put(json.loads(line))
@ -533,18 +550,13 @@ class CopilotACPClient:
params = msg.get("params") or {}
if method == "session/request_permission":
response = {
"jsonrpc": "2.0",
"id": message_id,
"result": {
"outcome": {
"outcome": "allow_once",
}
},
}
response = _permission_denied(message_id)
elif method == "fs/read_text_file":
try:
path = _ensure_path_within_cwd(str(params.get("path") or ""), cwd)
block_error = get_read_block_error(str(path))
if block_error:
raise PermissionError(block_error)
content = path.read_text() if path.exists() else ""
line = params.get("line")
limit = params.get("limit")
@ -553,6 +565,8 @@ class CopilotACPClient:
start = line - 1
end = start + limit if isinstance(limit, int) and limit > 0 else None
content = "".join(lines[start:end])
if content:
content = redact_sensitive_text(content)
response = {
"jsonrpc": "2.0",
"id": message_id,
@ -565,6 +579,10 @@ class CopilotACPClient:
elif method == "fs/write_text_file":
try:
path = _ensure_path_within_cwd(str(params.get("path") or ""), cwd)
if is_write_denied(str(path)):
raise PermissionError(
f"Write denied: '{path}' is a protected system/credential file."
)
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(str(params.get("content") or ""))
response = {

View file

@ -983,6 +983,14 @@ def _seed_from_singletons(provider: str, entries: List[PooledCredential]) -> Tup
active_sources: Set[str] = set()
auth_store = _load_auth_store()
# Shared suppression gate — used at every upsert site so
# `hermes auth remove <provider> <N>` is stable across all source types.
try:
from hermes_cli.auth import is_source_suppressed as _is_suppressed
except ImportError:
def _is_suppressed(_p, _s): # type: ignore[misc]
return False
if provider == "anthropic":
# Only auto-discover external credentials (Claude Code, Hermes PKCE)
# when the user has explicitly configured anthropic as their provider.
@ -1002,13 +1010,8 @@ def _seed_from_singletons(provider: str, entries: List[PooledCredential]) -> Tup
("claude_code", read_claude_code_credentials()),
):
if creds and creds.get("accessToken"):
# Check if user explicitly removed this source
try:
from hermes_cli.auth import is_source_suppressed
if is_source_suppressed(provider, source_name):
continue
except ImportError:
pass
if _is_suppressed(provider, source_name):
continue
active_sources.add(source_name)
changed |= _upsert_entry(
entries,
@ -1026,7 +1029,7 @@ def _seed_from_singletons(provider: str, entries: List[PooledCredential]) -> Tup
elif provider == "nous":
state = _load_provider_state(auth_store, "nous")
if state:
if state and not _is_suppressed(provider, "device_code"):
active_sources.add("device_code")
# Prefer a user-supplied label embedded in the singleton state
# (set by persist_nous_credentials(label=...) when the user ran
@ -1067,20 +1070,21 @@ def _seed_from_singletons(provider: str, entries: List[PooledCredential]) -> Tup
token, source = resolve_copilot_token()
if token:
source_name = "gh_cli" if "gh" in source.lower() else f"env:{source}"
active_sources.add(source_name)
pconfig = PROVIDER_REGISTRY.get(provider)
changed |= _upsert_entry(
entries,
provider,
source_name,
{
"source": source_name,
"auth_type": AUTH_TYPE_API_KEY,
"access_token": token,
"base_url": pconfig.inference_base_url if pconfig else "",
"label": source,
},
)
if not _is_suppressed(provider, source_name):
active_sources.add(source_name)
pconfig = PROVIDER_REGISTRY.get(provider)
changed |= _upsert_entry(
entries,
provider,
source_name,
{
"source": source_name,
"auth_type": AUTH_TYPE_API_KEY,
"access_token": token,
"base_url": pconfig.inference_base_url if pconfig else "",
"label": source,
},
)
except Exception as exc:
logger.debug("Copilot token seed failed: %s", exc)
@ -1096,20 +1100,21 @@ def _seed_from_singletons(provider: str, entries: List[PooledCredential]) -> Tup
token = creds.get("api_key", "")
if token:
source_name = creds.get("source", "qwen-cli")
active_sources.add(source_name)
changed |= _upsert_entry(
entries,
provider,
source_name,
{
"source": source_name,
"auth_type": AUTH_TYPE_OAUTH,
"access_token": token,
"expires_at_ms": creds.get("expires_at_ms"),
"base_url": creds.get("base_url", ""),
"label": creds.get("auth_file", source_name),
},
)
if not _is_suppressed(provider, source_name):
active_sources.add(source_name)
changed |= _upsert_entry(
entries,
provider,
source_name,
{
"source": source_name,
"auth_type": AUTH_TYPE_OAUTH,
"access_token": token,
"expires_at_ms": creds.get("expires_at_ms"),
"base_url": creds.get("base_url", ""),
"label": creds.get("auth_file", source_name),
},
)
except Exception as exc:
logger.debug("Qwen OAuth token seed failed: %s", exc)
@ -1118,13 +1123,7 @@ def _seed_from_singletons(provider: str, entries: List[PooledCredential]) -> Tup
# the device_code source as suppressed so it won't be re-seeded from
# the Hermes auth store. Without this gate the removal is instantly
# undone on the next load_pool() call.
codex_suppressed = False
try:
from hermes_cli.auth import is_source_suppressed
codex_suppressed = is_source_suppressed(provider, "device_code")
except ImportError:
pass
if codex_suppressed:
if _is_suppressed(provider, "device_code"):
return changed, active_sources
state = _load_provider_state(auth_store, "openai-codex")
@ -1158,10 +1157,22 @@ def _seed_from_singletons(provider: str, entries: List[PooledCredential]) -> Tup
def _seed_from_env(provider: str, entries: List[PooledCredential]) -> Tuple[bool, Set[str]]:
changed = False
active_sources: Set[str] = set()
# Honour user suppression — `hermes auth remove <provider> <N>` for an
# env-seeded credential marks the env:<VAR> source as suppressed so it
# won't be re-seeded from the user's shell environment or ~/.hermes/.env.
# Without this gate the removal is silently undone on the next
# load_pool() call whenever the var is still exported by the shell.
try:
from hermes_cli.auth import is_source_suppressed as _is_source_suppressed
except ImportError:
def _is_source_suppressed(_p, _s): # type: ignore[misc]
return False
if provider == "openrouter":
token = os.getenv("OPENROUTER_API_KEY", "").strip()
if token:
source = "env:OPENROUTER_API_KEY"
if _is_source_suppressed(provider, source):
return changed, active_sources
active_sources.add(source)
changed |= _upsert_entry(
entries,
@ -1198,6 +1209,8 @@ def _seed_from_env(provider: str, entries: List[PooledCredential]) -> Tuple[bool
if not token:
continue
source = f"env:{env_var}"
if _is_source_suppressed(provider, source):
continue
active_sources.add(source)
auth_type = AUTH_TYPE_OAUTH if provider == "anthropic" and not token.startswith("sk-ant-api") else AUTH_TYPE_API_KEY
base_url = env_url or pconfig.inference_base_url
@ -1242,6 +1255,13 @@ def _seed_custom_pool(pool_key: str, entries: List[PooledCredential]) -> Tuple[b
changed = False
active_sources: Set[str] = set()
# Shared suppression gate — same pattern as _seed_from_env/_seed_from_singletons.
try:
from hermes_cli.auth import is_source_suppressed as _is_suppressed
except ImportError:
def _is_suppressed(_p, _s): # type: ignore[misc]
return False
# Seed from the custom_providers config entry's api_key field
cp_config = _get_custom_provider_config(pool_key)
if cp_config:
@ -1250,19 +1270,20 @@ def _seed_custom_pool(pool_key: str, entries: List[PooledCredential]) -> Tuple[b
name = str(cp_config.get("name") or "").strip()
if api_key:
source = f"config:{name}"
active_sources.add(source)
changed |= _upsert_entry(
entries,
pool_key,
source,
{
"source": source,
"auth_type": AUTH_TYPE_API_KEY,
"access_token": api_key,
"base_url": base_url,
"label": name or source,
},
)
if not _is_suppressed(pool_key, source):
active_sources.add(source)
changed |= _upsert_entry(
entries,
pool_key,
source,
{
"source": source,
"auth_type": AUTH_TYPE_API_KEY,
"access_token": api_key,
"base_url": base_url,
"label": name or source,
},
)
# Seed from model.api_key if model.provider=='custom' and model.base_url matches
try:
@ -1282,19 +1303,20 @@ def _seed_custom_pool(pool_key: str, entries: List[PooledCredential]) -> Tuple[b
matched_key = get_custom_provider_pool_key(model_base_url)
if matched_key == pool_key:
source = "model_config"
active_sources.add(source)
changed |= _upsert_entry(
entries,
pool_key,
source,
{
"source": source,
"auth_type": AUTH_TYPE_API_KEY,
"access_token": model_api_key,
"base_url": model_base_url,
"label": "model_config",
},
)
if not _is_suppressed(pool_key, source):
active_sources.add(source)
changed |= _upsert_entry(
entries,
pool_key,
source,
{
"source": source,
"auth_type": AUTH_TYPE_API_KEY,
"access_token": model_api_key,
"base_url": model_base_url,
"label": "model_config",
},
)
except Exception:
pass

401
agent/credential_sources.py Normal file
View file

@ -0,0 +1,401 @@
"""Unified removal contract for every credential source Hermes reads from.
Hermes seeds its credential pool from many places:
env:<VAR> os.environ / ~/.hermes/.env
claude_code ~/.claude/.credentials.json
hermes_pkce ~/.hermes/.anthropic_oauth.json
device_code auth.json providers.<provider> (nous, openai-codex, ...)
qwen-cli ~/.qwen/oauth_creds.json
gh_cli gh auth token
config:<name> custom_providers config entry
model_config model.api_key when model.provider == "custom"
manual user ran `hermes auth add`
Each source has its own reader inside ``agent.credential_pool._seed_from_*``
(which keep their existing shape we haven't restructured them). What we
unify here is **removal**:
``hermes auth remove <provider> <N>`` must make the pool entry stay gone.
Before this module, every source had an ad-hoc removal branch in
``auth_remove_command``, and several sources had no branch at all so
``auth remove`` silently reverted on the next ``load_pool()`` call for
qwen-cli, nous device_code (partial), hermes_pkce, copilot gh_cli, and
custom-config sources.
Now every source registers a ``RemovalStep`` that does exactly three things
in the same shape:
1. Clean up whatever externally-readable state the source reads from
(.env line, auth.json block, OAuth file, etc.)
2. Suppress the ``(provider, source_id)`` in auth.json so the
corresponding ``_seed_from_*`` branch skips the upsert on re-load
3. Return ``RemovalResult`` describing what was cleaned and any
diagnostic hints the user should see (shell-exported env vars,
external credential files we deliberately don't delete, etc.)
Adding a new credential source is:
- wire up a reader branch in ``_seed_from_*`` (existing pattern)
- gate that reader behind ``is_source_suppressed(provider, source_id)``
- register a ``RemovalStep`` here
No more per-source if/elif chain in ``auth_remove_command``.
"""
from __future__ import annotations
import os
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, List, Optional
@dataclass
class RemovalResult:
"""Outcome of removing a credential source.
Attributes:
cleaned: Short strings describing external state that was actually
mutated (``"Cleared XAI_API_KEY from .env"``,
``"Cleared openai-codex OAuth tokens from auth store"``).
Printed as plain lines to the user.
hints: Diagnostic lines ABOUT state the user may need to clean up
themselves or is deliberately left intact (shell-exported env
var, Claude Code credential file we don't delete, etc.).
Printed as plain lines to the user. Always non-destructive.
suppress: Whether to call ``suppress_credential_source`` after
cleanup so future ``load_pool`` calls skip this source.
Default True almost every source needs this to stay sticky.
The only legitimate False is ``manual`` entries, which aren't
seeded from anywhere external.
"""
cleaned: List[str] = field(default_factory=list)
hints: List[str] = field(default_factory=list)
suppress: bool = True
@dataclass
class RemovalStep:
"""How to remove one specific credential source cleanly.
Attributes:
provider: Provider pool key (``"xai"``, ``"anthropic"``, ``"nous"``, ...).
Special value ``"*"`` means "matches any provider" used for
sources like ``manual`` that aren't provider-specific.
source_id: Source identifier as it appears in
``PooledCredential.source``. May be a literal (``"claude_code"``)
or a prefix pattern matched via ``match_fn``.
match_fn: Optional predicate overriding literal ``source_id``
matching. Gets the removed entry's source string. Used for
``env:*`` (any env-seeded key), ``config:*`` (any custom
pool), and ``manual:*`` (any manual-source variant).
remove_fn: ``(provider, removed_entry) -> RemovalResult``. Does the
actual cleanup and returns what happened for the user.
description: One-line human-readable description for docs / tests.
"""
provider: str
source_id: str
remove_fn: Callable[..., RemovalResult]
match_fn: Optional[Callable[[str], bool]] = None
description: str = ""
def matches(self, provider: str, source: str) -> bool:
if self.provider != "*" and self.provider != provider:
return False
if self.match_fn is not None:
return self.match_fn(source)
return source == self.source_id
_REGISTRY: List[RemovalStep] = []
def register(step: RemovalStep) -> RemovalStep:
_REGISTRY.append(step)
return step
def find_removal_step(provider: str, source: str) -> Optional[RemovalStep]:
"""Return the first matching RemovalStep, or None if unregistered.
Unregistered sources fall through to the default remove path in
``auth_remove_command``: the pool entry is already gone (that happens
before dispatch), no external cleanup, no suppression. This is the
correct behaviour for ``manual`` entries they were only ever stored
in the pool, nothing external to clean up.
"""
for step in _REGISTRY:
if step.matches(provider, source):
return step
return None
# ---------------------------------------------------------------------------
# Individual RemovalStep implementations — one per source.
# ---------------------------------------------------------------------------
# Each remove_fn is intentionally small and single-purpose. Adding a new
# credential source means adding ONE entry here — no other changes to
# auth_remove_command.
def _remove_env_source(provider: str, removed) -> RemovalResult:
"""env:<VAR> — the most common case.
Handles three user situations:
1. Var lives only in ~/.hermes/.env clear it
2. Var lives only in the user's shell (shell profile, systemd
EnvironmentFile, launchd plist) hint them where to unset it
3. Var lives in both clear from .env, hint about shell
"""
from hermes_cli.config import get_env_path, remove_env_value
result = RemovalResult()
env_var = removed.source[len("env:"):]
if not env_var:
return result
# Detect shell vs .env BEFORE remove_env_value pops os.environ.
env_in_process = bool(os.getenv(env_var))
env_in_dotenv = False
try:
env_path = get_env_path()
if env_path.exists():
env_in_dotenv = any(
line.strip().startswith(f"{env_var}=")
for line in env_path.read_text(errors="replace").splitlines()
)
except OSError:
pass
shell_exported = env_in_process and not env_in_dotenv
cleared = remove_env_value(env_var)
if cleared:
result.cleaned.append(f"Cleared {env_var} from .env")
if shell_exported:
result.hints.extend([
f"Note: {env_var} is still set in your shell environment "
f"(not in ~/.hermes/.env).",
" Unset it there (shell profile, systemd EnvironmentFile, "
"launchd plist, etc.) or it will keep being visible to Hermes.",
f" The pool entry is now suppressed — Hermes will ignore "
f"{env_var} until you run `hermes auth add {provider}`.",
])
else:
result.hints.append(
f"Suppressed env:{env_var} — it will not be re-seeded even "
f"if the variable is re-exported later."
)
return result
def _remove_claude_code(provider: str, removed) -> RemovalResult:
"""~/.claude/.credentials.json is owned by Claude Code itself.
We don't delete it — the user's Claude Code install still needs to
work. We just suppress it so Hermes stops reading it.
"""
return RemovalResult(hints=[
"Suppressed claude_code credential — it will not be re-seeded.",
"Note: Claude Code credentials still live in ~/.claude/.credentials.json",
"Run `hermes auth add anthropic` to re-enable if needed.",
])
def _remove_hermes_pkce(provider: str, removed) -> RemovalResult:
"""~/.hermes/.anthropic_oauth.json is ours — delete it outright."""
from hermes_constants import get_hermes_home
result = RemovalResult()
oauth_file = get_hermes_home() / ".anthropic_oauth.json"
if oauth_file.exists():
try:
oauth_file.unlink()
result.cleaned.append("Cleared Hermes Anthropic OAuth credentials")
except OSError as exc:
result.hints.append(f"Could not delete {oauth_file}: {exc}")
return result
def _clear_auth_store_provider(provider: str) -> bool:
"""Delete auth_store.providers[provider]. Returns True if deleted."""
from hermes_cli.auth import (
_auth_store_lock,
_load_auth_store,
_save_auth_store,
)
with _auth_store_lock():
auth_store = _load_auth_store()
providers_dict = auth_store.get("providers")
if isinstance(providers_dict, dict) and provider in providers_dict:
del providers_dict[provider]
_save_auth_store(auth_store)
return True
return False
def _remove_nous_device_code(provider: str, removed) -> RemovalResult:
"""Nous OAuth lives in auth.json providers.nous — clear it and suppress.
We suppress in addition to clearing because nothing else stops the
user's next `hermes login` run from writing providers.nous again
before they decide to. Suppression forces them to go through
`hermes auth add nous` to re-engage, which is the documented re-add
path and clears the suppression atomically.
"""
result = RemovalResult()
if _clear_auth_store_provider(provider):
result.cleaned.append(f"Cleared {provider} OAuth tokens from auth store")
return result
def _remove_codex_device_code(provider: str, removed) -> RemovalResult:
"""Codex tokens live in TWO places: our auth store AND ~/.codex/auth.json.
refresh_codex_oauth_pure() writes both every time, so clearing only
the Hermes auth store is not enough _seed_from_singletons() would
re-import from ~/.codex/auth.json on the next load_pool() call and
the removal would be instantly undone. We suppress instead of
deleting Codex CLI's file, so the Codex CLI itself keeps working.
The canonical source name in ``_seed_from_singletons`` is
``"device_code"`` (no prefix). Entries may show up in the pool as
either ``"device_code"`` (seeded) or ``"manual:device_code"`` (added
via ``hermes auth add openai-codex``), but in both cases the re-seed
gate lives at the ``"device_code"`` suppression key. We suppress
that canonical key here; the central dispatcher also suppresses
``removed.source`` which is fine belt-and-suspenders, idempotent.
"""
from hermes_cli.auth import suppress_credential_source
result = RemovalResult()
if _clear_auth_store_provider(provider):
result.cleaned.append(f"Cleared {provider} OAuth tokens from auth store")
# Suppress the canonical re-seed source, not just whatever source the
# removed entry had. Otherwise `manual:device_code` removals wouldn't
# block the `device_code` re-seed path.
suppress_credential_source(provider, "device_code")
result.hints.extend([
"Suppressed openai-codex device_code source — it will not be re-seeded.",
"Note: Codex CLI credentials still live in ~/.codex/auth.json",
"Run `hermes auth add openai-codex` to re-enable if needed.",
])
return result
def _remove_qwen_cli(provider: str, removed) -> RemovalResult:
"""~/.qwen/oauth_creds.json is owned by the Qwen CLI.
Same pattern as claude_code suppress, don't delete. The user's
Qwen CLI install still reads from that file.
"""
return RemovalResult(hints=[
"Suppressed qwen-cli credential — it will not be re-seeded.",
"Note: Qwen CLI credentials still live in ~/.qwen/oauth_creds.json",
"Run `hermes auth add qwen-oauth` to re-enable if needed.",
])
def _remove_copilot_gh(provider: str, removed) -> RemovalResult:
"""Copilot token comes from `gh auth token` or COPILOT_GITHUB_TOKEN / GH_TOKEN / GITHUB_TOKEN.
Copilot is special: the same token can be seeded as multiple source
entries (gh_cli from ``_seed_from_singletons`` plus env:<VAR> from
``_seed_from_env``), so removing one entry without suppressing the
others lets the duplicates resurrect. We suppress ALL known copilot
sources here so removal is stable regardless of which entry the
user clicked.
We don't touch the user's gh CLI or shell state just suppress so
Hermes stops picking the token up.
"""
# Suppress ALL copilot source variants up-front so no path resurrects
# the pool entry. The central dispatcher in auth_remove_command will
# ALSO suppress removed.source, but it's idempotent so double-calling
# is harmless.
from hermes_cli.auth import suppress_credential_source
suppress_credential_source(provider, "gh_cli")
for env_var in ("COPILOT_GITHUB_TOKEN", "GH_TOKEN", "GITHUB_TOKEN"):
suppress_credential_source(provider, f"env:{env_var}")
return RemovalResult(hints=[
"Suppressed all copilot token sources (gh_cli + env vars) — they will not be re-seeded.",
"Note: Your gh CLI / shell environment is unchanged.",
"Run `hermes auth add copilot` to re-enable if needed.",
])
def _remove_custom_config(provider: str, removed) -> RemovalResult:
"""Custom provider pools are seeded from custom_providers config or
model.api_key. Both are in config.yaml modifying that from here
is more invasive than suppression. We suppress; the user can edit
config.yaml if they want to remove the key from disk entirely.
"""
source_label = removed.source
return RemovalResult(hints=[
f"Suppressed {source_label} — it will not be re-seeded.",
"Note: The underlying value in config.yaml is unchanged. Edit it "
"directly if you want to remove the credential from disk.",
])
def _register_all_sources() -> None:
"""Called once on module import.
ORDER MATTERS ``find_removal_step`` returns the first match. Put
provider-specific steps before the generic ``env:*`` step so that e.g.
copilot's ``env:GH_TOKEN`` goes through the copilot removal (which
doesn't touch the user's shell), not the generic env-var removal
(which would try to clear .env).
"""
register(RemovalStep(
provider="copilot", source_id="gh_cli",
match_fn=lambda src: src == "gh_cli" or src.startswith("env:"),
remove_fn=_remove_copilot_gh,
description="gh auth token / COPILOT_GITHUB_TOKEN / GH_TOKEN",
))
register(RemovalStep(
provider="*", source_id="env:",
match_fn=lambda src: src.startswith("env:"),
remove_fn=_remove_env_source,
description="Any env-seeded credential (XAI_API_KEY, DEEPSEEK_API_KEY, etc.)",
))
register(RemovalStep(
provider="anthropic", source_id="claude_code",
remove_fn=_remove_claude_code,
description="~/.claude/.credentials.json",
))
register(RemovalStep(
provider="anthropic", source_id="hermes_pkce",
remove_fn=_remove_hermes_pkce,
description="~/.hermes/.anthropic_oauth.json",
))
register(RemovalStep(
provider="nous", source_id="device_code",
remove_fn=_remove_nous_device_code,
description="auth.json providers.nous",
))
register(RemovalStep(
provider="openai-codex", source_id="device_code",
match_fn=lambda src: src == "device_code" or src.endswith(":device_code"),
remove_fn=_remove_codex_device_code,
description="auth.json providers.openai-codex + ~/.codex/auth.json",
))
register(RemovalStep(
provider="qwen-oauth", source_id="qwen-cli",
remove_fn=_remove_qwen_cli,
description="~/.qwen/oauth_creds.json",
))
register(RemovalStep(
provider="*", source_id="config:",
match_fn=lambda src: src.startswith("config:") or src == "model_config",
remove_fn=_remove_custom_config,
description="Custom provider config.yaml api_key field",
))
_register_all_sources()

View file

@ -225,9 +225,11 @@ def build_tool_preview(tool_name: str, args: dict, max_len: int | None = None) -
content = _oneline(args.get("content", ""))
return f"+{target}: \"{content[:25]}{'...' if len(content) > 25 else ''}\""
elif action == "replace":
return f"~{target}: \"{_oneline(args.get('old_text', '')[:20])}\""
old = _oneline(args.get("old_text") or "") or "<missing old_text>"
return f"~{target}: \"{old[:20]}\""
elif action == "remove":
return f"-{target}: \"{_oneline(args.get('old_text', '')[:20])}\""
old = _oneline(args.get("old_text") or "") or "<missing old_text>"
return f"-{target}: \"{old[:20]}\""
return action
if tool_name == "send_message":
@ -939,9 +941,13 @@ def get_cute_tool_message(
if action == "add":
return _wrap(f"┊ 🧠 memory +{target}: \"{_trunc(args.get('content', ''), 30)}\" {dur}")
elif action == "replace":
return _wrap(f"┊ 🧠 memory ~{target}: \"{_trunc(args.get('old_text', ''), 20)}\" {dur}")
old = args.get("old_text") or ""
old = old if old else "<missing old_text>"
return _wrap(f"┊ 🧠 memory ~{target}: \"{_trunc(old, 20)}\" {dur}")
elif action == "remove":
return _wrap(f"┊ 🧠 memory -{target}: \"{_trunc(args.get('old_text', ''), 20)}\" {dur}")
old = args.get("old_text") or ""
old = old if old else "<missing old_text>"
return _wrap(f"┊ 🧠 memory -{target}: \"{_trunc(old, 20)}\" {dur}")
return _wrap(f"┊ 🧠 memory {action} {dur}")
if tool_name == "skills_list":
return _wrap(f"┊ 📚 skills list {args.get('category', 'all')} {dur}")

View file

@ -290,7 +290,7 @@ def classify_api_error(
if isinstance(body, dict):
_err_obj = body.get("error", {})
if isinstance(_err_obj, dict):
_body_msg = (_err_obj.get("message") or "").lower()
_body_msg = str(_err_obj.get("message") or "").lower()
# Parse metadata.raw for wrapped provider errors
_metadata = _err_obj.get("metadata", {})
if isinstance(_metadata, dict):
@ -302,11 +302,11 @@ def classify_api_error(
if isinstance(_inner, dict):
_inner_err = _inner.get("error", {})
if isinstance(_inner_err, dict):
_metadata_msg = (_inner_err.get("message") or "").lower()
_metadata_msg = str(_inner_err.get("message") or "").lower()
except (json.JSONDecodeError, TypeError):
pass
if not _body_msg:
_body_msg = (body.get("message") or "").lower()
_body_msg = str(body.get("message") or "").lower()
# Combine all message sources for pattern matching
parts = [_raw_msg]
if _body_msg and _body_msg not in _raw_msg:
@ -470,11 +470,16 @@ def _classify_by_status(
retryable=False,
should_fallback=True,
)
# Generic 404 — could be model or endpoint
# Generic 404 with no "model not found" signal — could be a wrong
# endpoint path (common with local llama.cpp / Ollama / vLLM when
# the URL is slightly misconfigured), a proxy routing glitch, or
# a transient backend issue. Classifying these as model_not_found
# silently falls back to a different provider and tells the model
# the model is missing, which is wrong and wastes a turn. Treat
# as unknown so the retry loop surfaces the real error instead.
return result_fn(
FailoverReason.model_not_found,
retryable=False,
should_fallback=True,
FailoverReason.unknown,
retryable=True,
)
if status_code == 413:
@ -606,10 +611,10 @@ def _classify_400(
if isinstance(body, dict):
err_obj = body.get("error", {})
if isinstance(err_obj, dict):
err_body_msg = (err_obj.get("message") or "").strip().lower()
err_body_msg = str(err_obj.get("message") or "").strip().lower()
# Responses API (and some providers) use flat body: {"message": "..."}
if not err_body_msg:
err_body_msg = (body.get("message") or "").strip().lower()
err_body_msg = str(body.get("message") or "").strip().lower()
is_generic = len(err_body_msg) < 30 or err_body_msg in ("error", "")
is_large = approx_tokens > context_length * 0.4 or approx_tokens > 80000 or num_messages > 80

111
agent/file_safety.py Normal file
View file

@ -0,0 +1,111 @@
"""Shared file safety rules used by both tools and ACP shims."""
from __future__ import annotations
import os
from pathlib import Path
from typing import Optional
def _hermes_home_path() -> Path:
"""Resolve the active HERMES_HOME (profile-aware) without circular imports."""
try:
from hermes_constants import get_hermes_home # local import to avoid cycles
return get_hermes_home()
except Exception:
return Path(os.path.expanduser("~/.hermes"))
def build_write_denied_paths(home: str) -> set[str]:
"""Return exact sensitive paths that must never be written."""
hermes_home = _hermes_home_path()
return {
os.path.realpath(p)
for p in [
os.path.join(home, ".ssh", "authorized_keys"),
os.path.join(home, ".ssh", "id_rsa"),
os.path.join(home, ".ssh", "id_ed25519"),
os.path.join(home, ".ssh", "config"),
str(hermes_home / ".env"),
os.path.join(home, ".bashrc"),
os.path.join(home, ".zshrc"),
os.path.join(home, ".profile"),
os.path.join(home, ".bash_profile"),
os.path.join(home, ".zprofile"),
os.path.join(home, ".netrc"),
os.path.join(home, ".pgpass"),
os.path.join(home, ".npmrc"),
os.path.join(home, ".pypirc"),
"/etc/sudoers",
"/etc/passwd",
"/etc/shadow",
]
}
def build_write_denied_prefixes(home: str) -> list[str]:
"""Return sensitive directory prefixes that must never be written."""
return [
os.path.realpath(p) + os.sep
for p in [
os.path.join(home, ".ssh"),
os.path.join(home, ".aws"),
os.path.join(home, ".gnupg"),
os.path.join(home, ".kube"),
"/etc/sudoers.d",
"/etc/systemd",
os.path.join(home, ".docker"),
os.path.join(home, ".azure"),
os.path.join(home, ".config", "gh"),
]
]
def get_safe_write_root() -> Optional[str]:
"""Return the resolved HERMES_WRITE_SAFE_ROOT path, or None if unset."""
root = os.getenv("HERMES_WRITE_SAFE_ROOT", "")
if not root:
return None
try:
return os.path.realpath(os.path.expanduser(root))
except Exception:
return None
def is_write_denied(path: str) -> bool:
"""Return True if path is blocked by the write denylist or safe root."""
home = os.path.realpath(os.path.expanduser("~"))
resolved = os.path.realpath(os.path.expanduser(str(path)))
if resolved in build_write_denied_paths(home):
return True
for prefix in build_write_denied_prefixes(home):
if resolved.startswith(prefix):
return True
safe_root = get_safe_write_root()
if safe_root and not (resolved == safe_root or resolved.startswith(safe_root + os.sep)):
return True
return False
def get_read_block_error(path: str) -> Optional[str]:
"""Return an error message when a read targets internal Hermes cache files."""
resolved = Path(path).expanduser().resolve()
hermes_home = _hermes_home_path().resolve()
blocked_dirs = [
hermes_home / "skills" / ".hub" / "index-cache",
hermes_home / "skills" / ".hub",
]
for blocked in blocked_dirs:
try:
resolved.relative_to(blocked)
except ValueError:
continue
return (
f"Access denied: {path} is an internal Hermes cache file "
"and cannot be read directly to prevent prompt injection. "
"Use the skills_list or skill_view tools instead."
)
return None

View file

@ -39,6 +39,7 @@ from typing import Any, Dict, Iterator, List, Optional
import httpx
from agent import google_oauth
from agent.gemini_schema import sanitize_gemini_tool_parameters
from agent.google_code_assist import (
CODE_ASSIST_ENDPOINT,
FREE_TIER_ID,
@ -205,7 +206,7 @@ def _translate_tools_to_gemini(tools: Any) -> List[Dict[str, Any]]:
decl["description"] = str(fn["description"])
params = fn.get("parameters")
if isinstance(params, dict):
decl["parameters"] = params
decl["parameters"] = sanitize_gemini_tool_parameters(params)
declarations.append(decl)
if not declarations:
return []
@ -504,9 +505,16 @@ def _iter_sse_events(response: httpx.Response) -> Iterator[Dict[str, Any]]:
def _translate_stream_event(
event: Dict[str, Any],
model: str,
tool_call_indices: Dict[str, int],
tool_call_counter: List[int],
) -> List[_GeminiStreamChunk]:
"""Unwrap Code Assist envelope and emit OpenAI-shaped chunk(s)."""
"""Unwrap Code Assist envelope and emit OpenAI-shaped chunk(s).
``tool_call_counter`` is a single-element list used as a mutable counter
across events in the same stream. Each ``functionCall`` part gets a
fresh, unique OpenAI ``index`` keying by function name would collide
whenever the model issues parallel calls to the same tool (e.g. reading
three files in one turn).
"""
inner = event.get("response") if isinstance(event.get("response"), dict) else event
candidates = inner.get("candidates") or []
if not candidates:
@ -532,7 +540,8 @@ def _translate_stream_event(
fc = part.get("functionCall")
if isinstance(fc, dict) and fc.get("name"):
name = str(fc["name"])
idx = tool_call_indices.setdefault(name, len(tool_call_indices))
idx = tool_call_counter[0]
tool_call_counter[0] += 1
try:
args_str = json.dumps(fc.get("args") or {}, ensure_ascii=False)
except (TypeError, ValueError):
@ -549,7 +558,7 @@ def _translate_stream_event(
finish_reason_raw = str(cand.get("finishReason") or "")
if finish_reason_raw:
mapped = _map_gemini_finish_reason(finish_reason_raw)
if tool_call_indices:
if tool_call_counter[0] > 0:
mapped = "tool_calls"
chunks.append(_make_stream_chunk(model=model, finish_reason=mapped))
return chunks
@ -733,9 +742,9 @@ class GeminiCloudCodeClient:
# Materialize error body for better diagnostics
response.read()
raise _gemini_http_error(response)
tool_call_indices: Dict[str, int] = {}
tool_call_counter: List[int] = [0]
for event in _iter_sse_events(response):
for chunk in _translate_stream_event(event, model, tool_call_indices):
for chunk in _translate_stream_event(event, model, tool_call_counter):
yield chunk
except httpx.HTTPError as exc:
raise CodeAssistError(
@ -790,7 +799,8 @@ def _gemini_http_error(response: httpx.Response) -> CodeAssistError:
err_obj = {}
err_status = str(err_obj.get("status") or "").strip()
err_message = str(err_obj.get("message") or "").strip()
err_details_list = err_obj.get("details") if isinstance(err_obj.get("details"), list) else []
_raw_details = err_obj.get("details")
err_details_list = _raw_details if isinstance(_raw_details, list) else []
# Extract google.rpc.ErrorInfo reason + metadata. There may be more
# than one ErrorInfo (rare), so we pick the first one with a reason.

View file

@ -0,0 +1,847 @@
"""OpenAI-compatible facade over Google AI Studio's native Gemini API.
Hermes keeps ``api_mode='chat_completions'`` for the ``gemini`` provider so the
main agent loop can keep using its existing OpenAI-shaped message flow.
This adapter is the transport shim that converts those OpenAI-style
``messages[]`` / ``tools[]`` requests into Gemini's native
``models/{model}:generateContent`` schema and converts the responses back.
Why this exists
---------------
Google's OpenAI-compatible endpoint has been brittle for Hermes's multi-turn
agent/tool loop (auth churn, tool-call replay quirks, thought-signature
requirements). The native Gemini API is the canonical path and avoids the
OpenAI-compat layer entirely.
"""
from __future__ import annotations
import asyncio
import base64
import json
import logging
import time
import uuid
from types import SimpleNamespace
from typing import Any, Dict, Iterator, List, Optional
import httpx
from agent.gemini_schema import sanitize_gemini_tool_parameters
logger = logging.getLogger(__name__)
DEFAULT_GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta"
def is_native_gemini_base_url(base_url: str) -> bool:
"""Return True when the endpoint speaks Gemini's native REST API."""
normalized = str(base_url or "").strip().rstrip("/").lower()
if not normalized:
return False
if "generativelanguage.googleapis.com" not in normalized:
return False
return not normalized.endswith("/openai")
class GeminiAPIError(Exception):
"""Error shape compatible with Hermes retry/error classification."""
def __init__(
self,
message: str,
*,
code: str = "gemini_api_error",
status_code: Optional[int] = None,
response: Optional[httpx.Response] = None,
retry_after: Optional[float] = None,
details: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(message)
self.code = code
self.status_code = status_code
self.response = response
self.retry_after = retry_after
self.details = details or {}
def _coerce_content_to_text(content: Any) -> str:
if content is None:
return ""
if isinstance(content, str):
return content
if isinstance(content, list):
pieces: List[str] = []
for part in content:
if isinstance(part, str):
pieces.append(part)
elif isinstance(part, dict) and part.get("type") == "text":
text = part.get("text")
if isinstance(text, str):
pieces.append(text)
return "\n".join(pieces)
return str(content)
def _extract_multimodal_parts(content: Any) -> List[Dict[str, Any]]:
if not isinstance(content, list):
text = _coerce_content_to_text(content)
return [{"text": text}] if text else []
parts: List[Dict[str, Any]] = []
for item in content:
if isinstance(item, str):
parts.append({"text": item})
continue
if not isinstance(item, dict):
continue
ptype = item.get("type")
if ptype == "text":
text = item.get("text")
if isinstance(text, str) and text:
parts.append({"text": text})
elif ptype == "image_url":
url = ((item.get("image_url") or {}).get("url") or "")
if not isinstance(url, str) or not url.startswith("data:"):
continue
try:
header, encoded = url.split(",", 1)
mime = header.split(":", 1)[1].split(";", 1)[0]
raw = base64.b64decode(encoded)
except Exception:
continue
parts.append(
{
"inlineData": {
"mimeType": mime,
"data": base64.b64encode(raw).decode("ascii"),
}
}
)
return parts
def _tool_call_extra_signature(tool_call: Dict[str, Any]) -> Optional[str]:
extra = tool_call.get("extra_content") or {}
if not isinstance(extra, dict):
return None
google = extra.get("google") or extra.get("thought_signature")
if isinstance(google, dict):
sig = google.get("thought_signature") or google.get("thoughtSignature")
return str(sig) if isinstance(sig, str) and sig else None
if isinstance(google, str) and google:
return google
return None
def _translate_tool_call_to_gemini(tool_call: Dict[str, Any]) -> Dict[str, Any]:
fn = tool_call.get("function") or {}
args_raw = fn.get("arguments", "")
try:
args = json.loads(args_raw) if isinstance(args_raw, str) and args_raw else {}
except json.JSONDecodeError:
args = {"_raw": args_raw}
if not isinstance(args, dict):
args = {"_value": args}
part: Dict[str, Any] = {
"functionCall": {
"name": str(fn.get("name") or ""),
"args": args,
}
}
thought_signature = _tool_call_extra_signature(tool_call)
if thought_signature:
part["thoughtSignature"] = thought_signature
return part
def _translate_tool_result_to_gemini(
message: Dict[str, Any],
tool_name_by_call_id: Optional[Dict[str, str]] = None,
) -> Dict[str, Any]:
tool_name_by_call_id = tool_name_by_call_id or {}
tool_call_id = str(message.get("tool_call_id") or "")
name = str(
message.get("name")
or tool_name_by_call_id.get(tool_call_id)
or tool_call_id
or "tool"
)
content = _coerce_content_to_text(message.get("content"))
try:
parsed = json.loads(content) if content.strip().startswith(("{", "[")) else None
except json.JSONDecodeError:
parsed = None
response = parsed if isinstance(parsed, dict) else {"output": content}
return {
"functionResponse": {
"name": name,
"response": response,
}
}
def _build_gemini_contents(messages: List[Dict[str, Any]]) -> tuple[List[Dict[str, Any]], Optional[Dict[str, Any]]]:
system_text_parts: List[str] = []
contents: List[Dict[str, Any]] = []
tool_name_by_call_id: Dict[str, str] = {}
for msg in messages:
if not isinstance(msg, dict):
continue
role = str(msg.get("role") or "user")
if role == "system":
system_text_parts.append(_coerce_content_to_text(msg.get("content")))
continue
if role in {"tool", "function"}:
contents.append(
{
"role": "user",
"parts": [
_translate_tool_result_to_gemini(
msg,
tool_name_by_call_id=tool_name_by_call_id,
)
],
}
)
continue
gemini_role = "model" if role == "assistant" else "user"
parts: List[Dict[str, Any]] = []
content_parts = _extract_multimodal_parts(msg.get("content"))
parts.extend(content_parts)
tool_calls = msg.get("tool_calls") or []
if isinstance(tool_calls, list):
for tool_call in tool_calls:
if isinstance(tool_call, dict):
tool_call_id = str(tool_call.get("id") or tool_call.get("call_id") or "")
tool_name = str(((tool_call.get("function") or {}).get("name") or ""))
if tool_call_id and tool_name:
tool_name_by_call_id[tool_call_id] = tool_name
parts.append(_translate_tool_call_to_gemini(tool_call))
if parts:
contents.append({"role": gemini_role, "parts": parts})
system_instruction = None
joined_system = "\n".join(part for part in system_text_parts if part).strip()
if joined_system:
system_instruction = {"parts": [{"text": joined_system}]}
return contents, system_instruction
def _translate_tools_to_gemini(tools: Any) -> List[Dict[str, Any]]:
if not isinstance(tools, list):
return []
declarations: List[Dict[str, Any]] = []
for tool in tools:
if not isinstance(tool, dict):
continue
fn = tool.get("function") or {}
if not isinstance(fn, dict):
continue
name = fn.get("name")
if not isinstance(name, str) or not name:
continue
decl: Dict[str, Any] = {"name": name}
description = fn.get("description")
if isinstance(description, str) and description:
decl["description"] = description
parameters = fn.get("parameters")
if isinstance(parameters, dict):
decl["parameters"] = sanitize_gemini_tool_parameters(parameters)
declarations.append(decl)
return [{"functionDeclarations": declarations}] if declarations else []
def _translate_tool_choice_to_gemini(tool_choice: Any) -> Optional[Dict[str, Any]]:
if tool_choice is None:
return None
if isinstance(tool_choice, str):
if tool_choice == "auto":
return {"functionCallingConfig": {"mode": "AUTO"}}
if tool_choice == "required":
return {"functionCallingConfig": {"mode": "ANY"}}
if tool_choice == "none":
return {"functionCallingConfig": {"mode": "NONE"}}
if isinstance(tool_choice, dict):
fn = tool_choice.get("function") or {}
name = fn.get("name")
if isinstance(name, str) and name:
return {"functionCallingConfig": {"mode": "ANY", "allowedFunctionNames": [name]}}
return None
def _normalize_thinking_config(config: Any) -> Optional[Dict[str, Any]]:
if not isinstance(config, dict) or not config:
return None
budget = config.get("thinkingBudget", config.get("thinking_budget"))
include = config.get("includeThoughts", config.get("include_thoughts"))
level = config.get("thinkingLevel", config.get("thinking_level"))
normalized: Dict[str, Any] = {}
if isinstance(budget, (int, float)):
normalized["thinkingBudget"] = int(budget)
if isinstance(include, bool):
normalized["includeThoughts"] = include
if isinstance(level, str) and level.strip():
normalized["thinkingLevel"] = level.strip().lower()
return normalized or None
def build_gemini_request(
*,
messages: List[Dict[str, Any]],
tools: Any = None,
tool_choice: Any = None,
temperature: Optional[float] = None,
max_tokens: Optional[int] = None,
top_p: Optional[float] = None,
stop: Any = None,
thinking_config: Any = None,
) -> Dict[str, Any]:
contents, system_instruction = _build_gemini_contents(messages)
request: Dict[str, Any] = {"contents": contents}
if system_instruction:
request["systemInstruction"] = system_instruction
gemini_tools = _translate_tools_to_gemini(tools)
if gemini_tools:
request["tools"] = gemini_tools
tool_config = _translate_tool_choice_to_gemini(tool_choice)
if tool_config:
request["toolConfig"] = tool_config
generation_config: Dict[str, Any] = {}
if temperature is not None:
generation_config["temperature"] = temperature
if max_tokens is not None:
generation_config["maxOutputTokens"] = max_tokens
if top_p is not None:
generation_config["topP"] = top_p
if stop:
generation_config["stopSequences"] = stop if isinstance(stop, list) else [str(stop)]
normalized_thinking = _normalize_thinking_config(thinking_config)
if normalized_thinking:
generation_config["thinkingConfig"] = normalized_thinking
if generation_config:
request["generationConfig"] = generation_config
return request
def _map_gemini_finish_reason(reason: str) -> str:
mapping = {
"STOP": "stop",
"MAX_TOKENS": "length",
"SAFETY": "content_filter",
"RECITATION": "content_filter",
"OTHER": "stop",
}
return mapping.get(str(reason or "").upper(), "stop")
def _tool_call_extra_from_part(part: Dict[str, Any]) -> Optional[Dict[str, Any]]:
sig = part.get("thoughtSignature")
if isinstance(sig, str) and sig:
return {"google": {"thought_signature": sig}}
return None
def _empty_response(model: str) -> SimpleNamespace:
message = SimpleNamespace(
role="assistant",
content="",
tool_calls=None,
reasoning=None,
reasoning_content=None,
reasoning_details=None,
)
choice = SimpleNamespace(index=0, message=message, finish_reason="stop")
usage = SimpleNamespace(
prompt_tokens=0,
completion_tokens=0,
total_tokens=0,
prompt_tokens_details=SimpleNamespace(cached_tokens=0),
)
return SimpleNamespace(
id=f"chatcmpl-{uuid.uuid4().hex[:12]}",
object="chat.completion",
created=int(time.time()),
model=model,
choices=[choice],
usage=usage,
)
def translate_gemini_response(resp: Dict[str, Any], model: str) -> SimpleNamespace:
candidates = resp.get("candidates") or []
if not isinstance(candidates, list) or not candidates:
return _empty_response(model)
cand = candidates[0] if isinstance(candidates[0], dict) else {}
content_obj = cand.get("content") if isinstance(cand, dict) else {}
parts = content_obj.get("parts") if isinstance(content_obj, dict) else []
text_pieces: List[str] = []
reasoning_pieces: List[str] = []
tool_calls: List[SimpleNamespace] = []
for index, part in enumerate(parts or []):
if not isinstance(part, dict):
continue
if part.get("thought") is True and isinstance(part.get("text"), str):
reasoning_pieces.append(part["text"])
continue
if isinstance(part.get("text"), str):
text_pieces.append(part["text"])
continue
fc = part.get("functionCall")
if isinstance(fc, dict) and fc.get("name"):
try:
args_str = json.dumps(fc.get("args") or {}, ensure_ascii=False)
except (TypeError, ValueError):
args_str = "{}"
tool_call = SimpleNamespace(
id=f"call_{uuid.uuid4().hex[:12]}",
type="function",
index=index,
function=SimpleNamespace(name=str(fc["name"]), arguments=args_str),
)
extra_content = _tool_call_extra_from_part(part)
if extra_content:
tool_call.extra_content = extra_content
tool_calls.append(tool_call)
finish_reason = "tool_calls" if tool_calls else _map_gemini_finish_reason(str(cand.get("finishReason") or ""))
usage_meta = resp.get("usageMetadata") or {}
usage = SimpleNamespace(
prompt_tokens=int(usage_meta.get("promptTokenCount") or 0),
completion_tokens=int(usage_meta.get("candidatesTokenCount") or 0),
total_tokens=int(usage_meta.get("totalTokenCount") or 0),
prompt_tokens_details=SimpleNamespace(
cached_tokens=int(usage_meta.get("cachedContentTokenCount") or 0),
),
)
reasoning = "".join(reasoning_pieces) or None
message = SimpleNamespace(
role="assistant",
content="".join(text_pieces) if text_pieces else None,
tool_calls=tool_calls or None,
reasoning=reasoning,
reasoning_content=reasoning,
reasoning_details=None,
)
choice = SimpleNamespace(index=0, message=message, finish_reason=finish_reason)
return SimpleNamespace(
id=f"chatcmpl-{uuid.uuid4().hex[:12]}",
object="chat.completion",
created=int(time.time()),
model=model,
choices=[choice],
usage=usage,
)
class _GeminiStreamChunk(SimpleNamespace):
pass
def _make_stream_chunk(
*,
model: str,
content: str = "",
tool_call_delta: Optional[Dict[str, Any]] = None,
finish_reason: Optional[str] = None,
reasoning: str = "",
) -> _GeminiStreamChunk:
delta_kwargs: Dict[str, Any] = {
"role": "assistant",
"content": None,
"tool_calls": None,
"reasoning": None,
"reasoning_content": None,
}
if content:
delta_kwargs["content"] = content
if tool_call_delta is not None:
tool_delta = SimpleNamespace(
index=tool_call_delta.get("index", 0),
id=tool_call_delta.get("id") or f"call_{uuid.uuid4().hex[:12]}",
type="function",
function=SimpleNamespace(
name=tool_call_delta.get("name") or "",
arguments=tool_call_delta.get("arguments") or "",
),
)
extra_content = tool_call_delta.get("extra_content")
if isinstance(extra_content, dict):
tool_delta.extra_content = extra_content
delta_kwargs["tool_calls"] = [tool_delta]
if reasoning:
delta_kwargs["reasoning"] = reasoning
delta_kwargs["reasoning_content"] = reasoning
delta = SimpleNamespace(**delta_kwargs)
choice = SimpleNamespace(index=0, delta=delta, finish_reason=finish_reason)
return _GeminiStreamChunk(
id=f"chatcmpl-{uuid.uuid4().hex[:12]}",
object="chat.completion.chunk",
created=int(time.time()),
model=model,
choices=[choice],
usage=None,
)
def _iter_sse_events(response: httpx.Response) -> Iterator[Dict[str, Any]]:
buffer = ""
for chunk in response.iter_text():
if not chunk:
continue
buffer += chunk
while "\n" in buffer:
line, buffer = buffer.split("\n", 1)
line = line.rstrip("\r")
if not line:
continue
if not line.startswith("data: "):
continue
data = line[6:]
if data == "[DONE]":
return
try:
payload = json.loads(data)
except json.JSONDecodeError:
logger.debug("Non-JSON Gemini SSE line: %s", data[:200])
continue
if isinstance(payload, dict):
yield payload
def translate_stream_event(event: Dict[str, Any], model: str, tool_call_indices: Dict[str, Dict[str, Any]]) -> List[_GeminiStreamChunk]:
candidates = event.get("candidates") or []
if not candidates:
return []
cand = candidates[0] if isinstance(candidates[0], dict) else {}
parts = ((cand.get("content") or {}).get("parts") or []) if isinstance(cand, dict) else []
chunks: List[_GeminiStreamChunk] = []
for part_index, part in enumerate(parts):
if not isinstance(part, dict):
continue
if part.get("thought") is True and isinstance(part.get("text"), str):
chunks.append(_make_stream_chunk(model=model, reasoning=part["text"]))
continue
if isinstance(part.get("text"), str) and part["text"]:
chunks.append(_make_stream_chunk(model=model, content=part["text"]))
fc = part.get("functionCall")
if isinstance(fc, dict) and fc.get("name"):
name = str(fc["name"])
try:
args_str = json.dumps(fc.get("args") or {}, ensure_ascii=False, sort_keys=True)
except (TypeError, ValueError):
args_str = "{}"
thought_signature = part.get("thoughtSignature") if isinstance(part.get("thoughtSignature"), str) else ""
call_key = json.dumps(
{
"part_index": part_index,
"name": name,
"thought_signature": thought_signature,
},
sort_keys=True,
)
slot = tool_call_indices.get(call_key)
if slot is None:
slot = {
"index": len(tool_call_indices),
"id": f"call_{uuid.uuid4().hex[:12]}",
"last_arguments": "",
}
tool_call_indices[call_key] = slot
emitted_arguments = args_str
last_arguments = str(slot.get("last_arguments") or "")
if last_arguments:
if args_str == last_arguments:
emitted_arguments = ""
elif args_str.startswith(last_arguments):
emitted_arguments = args_str[len(last_arguments):]
slot["last_arguments"] = args_str
chunks.append(
_make_stream_chunk(
model=model,
tool_call_delta={
"index": slot["index"],
"id": slot["id"],
"name": name,
"arguments": emitted_arguments,
"extra_content": _tool_call_extra_from_part(part),
},
)
)
finish_reason_raw = str(cand.get("finishReason") or "")
if finish_reason_raw:
mapped = "tool_calls" if tool_call_indices else _map_gemini_finish_reason(finish_reason_raw)
chunks.append(_make_stream_chunk(model=model, finish_reason=mapped))
return chunks
def gemini_http_error(response: httpx.Response) -> GeminiAPIError:
status = response.status_code
body_text = ""
body_json: Dict[str, Any] = {}
try:
body_text = response.text
except Exception:
body_text = ""
if body_text:
try:
parsed = json.loads(body_text)
if isinstance(parsed, dict):
body_json = parsed
except (ValueError, TypeError):
body_json = {}
err_obj = body_json.get("error") if isinstance(body_json, dict) else None
if not isinstance(err_obj, dict):
err_obj = {}
err_status = str(err_obj.get("status") or "").strip()
err_message = str(err_obj.get("message") or "").strip()
_raw_details = err_obj.get("details")
details_list = _raw_details if isinstance(_raw_details, list) else []
reason = ""
retry_after: Optional[float] = None
metadata: Dict[str, Any] = {}
for detail in details_list:
if not isinstance(detail, dict):
continue
type_url = str(detail.get("@type") or "")
if not reason and type_url.endswith("/google.rpc.ErrorInfo"):
reason_value = detail.get("reason")
if isinstance(reason_value, str):
reason = reason_value
md = detail.get("metadata")
if isinstance(md, dict):
metadata = md
header_retry = response.headers.get("Retry-After") or response.headers.get("retry-after")
if header_retry:
try:
retry_after = float(header_retry)
except (TypeError, ValueError):
retry_after = None
code = f"gemini_http_{status}"
if status == 401:
code = "gemini_unauthorized"
elif status == 429:
code = "gemini_rate_limited"
elif status == 404:
code = "gemini_model_not_found"
if err_message:
message = f"Gemini HTTP {status} ({err_status or 'error'}): {err_message}"
else:
message = f"Gemini returned HTTP {status}: {body_text[:500]}"
return GeminiAPIError(
message,
code=code,
status_code=status,
response=response,
retry_after=retry_after,
details={
"status": err_status,
"reason": reason,
"metadata": metadata,
"message": err_message,
},
)
class _GeminiChatCompletions:
def __init__(self, client: "GeminiNativeClient"):
self._client = client
def create(self, **kwargs: Any) -> Any:
return self._client._create_chat_completion(**kwargs)
class _AsyncGeminiChatCompletions:
def __init__(self, client: "AsyncGeminiNativeClient"):
self._client = client
async def create(self, **kwargs: Any) -> Any:
return await self._client._create_chat_completion(**kwargs)
class _GeminiChatNamespace:
def __init__(self, client: "GeminiNativeClient"):
self.completions = _GeminiChatCompletions(client)
class _AsyncGeminiChatNamespace:
def __init__(self, client: "AsyncGeminiNativeClient"):
self.completions = _AsyncGeminiChatCompletions(client)
class GeminiNativeClient:
"""Minimal OpenAI-SDK-compatible facade over Gemini's native REST API."""
def __init__(
self,
*,
api_key: str,
base_url: Optional[str] = None,
default_headers: Optional[Dict[str, str]] = None,
timeout: Any = None,
http_client: Optional[httpx.Client] = None,
**_: Any,
) -> None:
self.api_key = api_key
normalized_base = (base_url or DEFAULT_GEMINI_BASE_URL).rstrip("/")
if normalized_base.endswith("/openai"):
normalized_base = normalized_base[: -len("/openai")]
self.base_url = normalized_base
self._default_headers = dict(default_headers or {})
self.chat = _GeminiChatNamespace(self)
self.is_closed = False
self._http = http_client or httpx.Client(
timeout=timeout or httpx.Timeout(connect=15.0, read=600.0, write=30.0, pool=30.0)
)
def close(self) -> None:
self.is_closed = True
try:
self._http.close()
except Exception:
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _headers(self) -> Dict[str, str]:
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"x-goog-api-key": self.api_key,
"User-Agent": "hermes-agent (gemini-native)",
}
headers.update(self._default_headers)
return headers
@staticmethod
def _advance_stream_iterator(iterator: Iterator[_GeminiStreamChunk]) -> tuple[bool, Optional[_GeminiStreamChunk]]:
try:
return False, next(iterator)
except StopIteration:
return True, None
def _create_chat_completion(
self,
*,
model: str = "gemini-2.5-flash",
messages: Optional[List[Dict[str, Any]]] = None,
stream: bool = False,
tools: Any = None,
tool_choice: Any = None,
temperature: Optional[float] = None,
max_tokens: Optional[int] = None,
top_p: Optional[float] = None,
stop: Any = None,
extra_body: Optional[Dict[str, Any]] = None,
timeout: Any = None,
**_: Any,
) -> Any:
thinking_config = None
if isinstance(extra_body, dict):
thinking_config = extra_body.get("thinking_config") or extra_body.get("thinkingConfig")
request = build_gemini_request(
messages=messages or [],
tools=tools,
tool_choice=tool_choice,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
stop=stop,
thinking_config=thinking_config,
)
if stream:
return self._stream_completion(model=model, request=request, timeout=timeout)
url = f"{self.base_url}/models/{model}:generateContent"
response = self._http.post(url, json=request, headers=self._headers(), timeout=timeout)
if response.status_code != 200:
raise gemini_http_error(response)
try:
payload = response.json()
except ValueError as exc:
raise GeminiAPIError(
f"Invalid JSON from Gemini native API: {exc}",
code="gemini_invalid_json",
status_code=response.status_code,
response=response,
) from exc
return translate_gemini_response(payload, model=model)
def _stream_completion(self, *, model: str, request: Dict[str, Any], timeout: Any = None) -> Iterator[_GeminiStreamChunk]:
url = f"{self.base_url}/models/{model}:streamGenerateContent?alt=sse"
stream_headers = dict(self._headers())
stream_headers["Accept"] = "text/event-stream"
def _generator() -> Iterator[_GeminiStreamChunk]:
try:
with self._http.stream("POST", url, json=request, headers=stream_headers, timeout=timeout) as response:
if response.status_code != 200:
response.read()
raise gemini_http_error(response)
tool_call_indices: Dict[str, Dict[str, Any]] = {}
for event in _iter_sse_events(response):
for chunk in translate_stream_event(event, model, tool_call_indices):
yield chunk
except httpx.HTTPError as exc:
raise GeminiAPIError(
f"Gemini streaming request failed: {exc}",
code="gemini_stream_error",
) from exc
return _generator()
class AsyncGeminiNativeClient:
"""Async wrapper used by auxiliary_client for native Gemini calls."""
def __init__(self, sync_client: GeminiNativeClient):
self._sync = sync_client
self.api_key = sync_client.api_key
self.base_url = sync_client.base_url
self.chat = _AsyncGeminiChatNamespace(self)
async def _create_chat_completion(self, **kwargs: Any) -> Any:
stream = bool(kwargs.get("stream"))
result = await asyncio.to_thread(self._sync.chat.completions.create, **kwargs)
if not stream:
return result
async def _async_stream() -> Any:
while True:
done, chunk = await asyncio.to_thread(self._sync._advance_stream_iterator, result)
if done:
break
yield chunk
return _async_stream()
async def close(self) -> None:
await asyncio.to_thread(self._sync.close)

85
agent/gemini_schema.py Normal file
View file

@ -0,0 +1,85 @@
"""Helpers for translating OpenAI-style tool schemas to Gemini's schema subset."""
from __future__ import annotations
from typing import Any, Dict, List
# Gemini's ``FunctionDeclaration.parameters`` field accepts the ``Schema``
# object, which is only a subset of OpenAPI 3.0 / JSON Schema. Strip fields
# outside that subset before sending Hermes tool schemas to Google.
_GEMINI_SCHEMA_ALLOWED_KEYS = {
"type",
"format",
"title",
"description",
"nullable",
"enum",
"maxItems",
"minItems",
"properties",
"required",
"minProperties",
"maxProperties",
"minLength",
"maxLength",
"pattern",
"example",
"anyOf",
"propertyOrdering",
"default",
"items",
"minimum",
"maximum",
}
def sanitize_gemini_schema(schema: Any) -> Dict[str, Any]:
"""Return a Gemini-compatible copy of a tool parameter schema.
Hermes tool schemas are OpenAI-flavored JSON Schema and may contain keys
such as ``$schema`` or ``additionalProperties`` that Google's Gemini
``Schema`` object rejects. This helper preserves the documented Gemini
subset and recursively sanitizes nested ``properties`` / ``items`` /
``anyOf`` definitions.
"""
if not isinstance(schema, dict):
return {}
cleaned: Dict[str, Any] = {}
for key, value in schema.items():
if key not in _GEMINI_SCHEMA_ALLOWED_KEYS:
continue
if key == "properties":
if not isinstance(value, dict):
continue
props: Dict[str, Any] = {}
for prop_name, prop_schema in value.items():
if not isinstance(prop_name, str):
continue
props[prop_name] = sanitize_gemini_schema(prop_schema)
cleaned[key] = props
continue
if key == "items":
cleaned[key] = sanitize_gemini_schema(value)
continue
if key == "anyOf":
if not isinstance(value, list):
continue
cleaned[key] = [
sanitize_gemini_schema(item)
for item in value
if isinstance(item, dict)
]
continue
cleaned[key] = value
return cleaned
def sanitize_gemini_tool_parameters(parameters: Any) -> Dict[str, Any]:
"""Normalize tool parameters to a valid Gemini object schema."""
cleaned = sanitize_gemini_schema(parameters)
if not cleaned:
return {"type": "object", "properties": {}}
return cleaned

242
agent/image_gen_provider.py Normal file
View file

@ -0,0 +1,242 @@
"""
Image Generation Provider ABC
=============================
Defines the pluggable-backend interface for image generation. Providers register
instances via ``PluginContext.register_image_gen_provider()``; the active one
(selected via ``image_gen.provider`` in ``config.yaml``) services every
``image_generate`` tool call.
Providers live in ``<repo>/plugins/image_gen/<name>/`` (built-in, auto-loaded
as ``kind: backend``) or ``~/.hermes/plugins/image_gen/<name>/`` (user, opt-in
via ``plugins.enabled``).
Response shape
--------------
All providers return a dict that :func:`success_response` / :func:`error_response`
produce. The tool wrapper JSON-serializes it. Keys:
success bool
image str | None URL or absolute file path
model str provider-specific model identifier
prompt str echoed prompt
aspect_ratio str "landscape" | "square" | "portrait"
provider str provider name (for diagnostics)
error str only when success=False
error_type str only when success=False
"""
from __future__ import annotations
import abc
import base64
import datetime
import logging
import uuid
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
logger = logging.getLogger(__name__)
VALID_ASPECT_RATIOS: Tuple[str, ...] = ("landscape", "square", "portrait")
DEFAULT_ASPECT_RATIO = "landscape"
# ---------------------------------------------------------------------------
# ABC
# ---------------------------------------------------------------------------
class ImageGenProvider(abc.ABC):
"""Abstract base class for an image generation backend.
Subclasses must implement :meth:`generate`. Everything else has sane
defaults override only what your provider needs.
"""
@property
@abc.abstractmethod
def name(self) -> str:
"""Stable short identifier used in ``image_gen.provider`` config.
Lowercase, no spaces. Examples: ``fal``, ``openai``, ``replicate``.
"""
@property
def display_name(self) -> str:
"""Human-readable label shown in ``hermes tools``. Defaults to ``name.title()``."""
return self.name.title()
def is_available(self) -> bool:
"""Return True when this provider can service calls.
Typically checks for a required API key. Default: True
(providers with no external dependencies are always available).
"""
return True
def list_models(self) -> List[Dict[str, Any]]:
"""Return catalog entries for ``hermes tools`` model picker.
Each entry::
{
"id": "gpt-image-1.5", # required
"display": "GPT Image 1.5", # optional; defaults to id
"speed": "~10s", # optional
"strengths": "...", # optional
"price": "$...", # optional
}
Default: empty list (provider has no user-selectable models).
"""
return []
def get_setup_schema(self) -> Dict[str, Any]:
"""Return provider metadata for the ``hermes tools`` picker.
Used by ``tools_config.py`` to inject this provider as a row in
the Image Generation provider list. Shape::
{
"name": "OpenAI", # picker label
"badge": "paid", # optional short tag
"tag": "One-line description...", # optional subtitle
"env_vars": [ # keys to prompt for
{"key": "OPENAI_API_KEY",
"prompt": "OpenAI API key",
"url": "https://platform.openai.com/api-keys"},
],
}
Default: minimal entry derived from ``display_name``. Override to
expose API key prompts and custom badges.
"""
return {
"name": self.display_name,
"badge": "",
"tag": "",
"env_vars": [],
}
def default_model(self) -> Optional[str]:
"""Return the default model id, or None if not applicable."""
models = self.list_models()
if models:
return models[0].get("id")
return None
@abc.abstractmethod
def generate(
self,
prompt: str,
aspect_ratio: str = DEFAULT_ASPECT_RATIO,
**kwargs: Any,
) -> Dict[str, Any]:
"""Generate an image.
Implementations should return the dict from :func:`success_response`
or :func:`error_response`. ``kwargs`` may contain forward-compat
parameters future versions of the schema will expose implementations
should ignore unknown keys.
"""
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def resolve_aspect_ratio(value: Optional[str]) -> str:
"""Clamp an aspect_ratio value to the valid set, defaulting to landscape.
Invalid values are coerced rather than rejected so the tool surface is
forgiving of agent mistakes.
"""
if not isinstance(value, str):
return DEFAULT_ASPECT_RATIO
v = value.strip().lower()
if v in VALID_ASPECT_RATIOS:
return v
return DEFAULT_ASPECT_RATIO
def _images_cache_dir() -> Path:
"""Return ``$HERMES_HOME/cache/images/``, creating parents as needed."""
from hermes_constants import get_hermes_home
path = get_hermes_home() / "cache" / "images"
path.mkdir(parents=True, exist_ok=True)
return path
def save_b64_image(
b64_data: str,
*,
prefix: str = "image",
extension: str = "png",
) -> Path:
"""Decode base64 image data and write it under ``$HERMES_HOME/cache/images/``.
Returns the absolute :class:`Path` to the saved file.
Filename format: ``<prefix>_<YYYYMMDD_HHMMSS>_<short-uuid>.<ext>``.
"""
raw = base64.b64decode(b64_data)
ts = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
short = uuid.uuid4().hex[:8]
path = _images_cache_dir() / f"{prefix}_{ts}_{short}.{extension}"
path.write_bytes(raw)
return path
def success_response(
*,
image: str,
model: str,
prompt: str,
aspect_ratio: str,
provider: str,
extra: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Build a uniform success response dict.
``image`` may be an HTTP URL or an absolute filesystem path (for b64
providers like OpenAI). Callers that need to pass through additional
backend-specific fields can supply ``extra``.
"""
payload: Dict[str, Any] = {
"success": True,
"image": image,
"model": model,
"prompt": prompt,
"aspect_ratio": aspect_ratio,
"provider": provider,
}
if extra:
for k, v in extra.items():
payload.setdefault(k, v)
return payload
def error_response(
*,
error: str,
error_type: str = "provider_error",
provider: str = "",
model: str = "",
prompt: str = "",
aspect_ratio: str = DEFAULT_ASPECT_RATIO,
) -> Dict[str, Any]:
"""Build a uniform error response dict."""
return {
"success": False,
"image": None,
"error": error,
"error_type": error_type,
"model": model,
"prompt": prompt,
"aspect_ratio": aspect_ratio,
"provider": provider,
}

120
agent/image_gen_registry.py Normal file
View file

@ -0,0 +1,120 @@
"""
Image Generation Provider Registry
==================================
Central map of registered providers. Populated by plugins at import-time via
``PluginContext.register_image_gen_provider()``; consumed by the
``image_generate`` tool to dispatch each call to the active backend.
Active selection
----------------
The active provider is chosen by ``image_gen.provider`` in ``config.yaml``.
If unset, :func:`get_active_provider` applies fallback logic:
1. If exactly one provider is registered, use it.
2. Otherwise if a provider named ``fal`` is registered, use it (legacy
default matches pre-plugin behavior).
3. Otherwise return ``None`` (the tool surfaces a helpful error pointing
the user at ``hermes tools``).
"""
from __future__ import annotations
import logging
import threading
from typing import Dict, List, Optional
from agent.image_gen_provider import ImageGenProvider
logger = logging.getLogger(__name__)
_providers: Dict[str, ImageGenProvider] = {}
_lock = threading.Lock()
def register_provider(provider: ImageGenProvider) -> None:
"""Register an image generation provider.
Re-registration (same ``name``) overwrites the previous entry and logs
a debug message this makes hot-reload scenarios (tests, dev loops)
behave predictably.
"""
if not isinstance(provider, ImageGenProvider):
raise TypeError(
f"register_provider() expects an ImageGenProvider instance, "
f"got {type(provider).__name__}"
)
name = provider.name
if not isinstance(name, str) or not name.strip():
raise ValueError("Image gen provider .name must be a non-empty string")
with _lock:
existing = _providers.get(name)
_providers[name] = provider
if existing is not None:
logger.debug("Image gen provider '%s' re-registered (was %r)", name, type(existing).__name__)
else:
logger.debug("Registered image gen provider '%s' (%s)", name, type(provider).__name__)
def list_providers() -> List[ImageGenProvider]:
"""Return all registered providers, sorted by name."""
with _lock:
items = list(_providers.values())
return sorted(items, key=lambda p: p.name)
def get_provider(name: str) -> Optional[ImageGenProvider]:
"""Return the provider registered under *name*, or None."""
if not isinstance(name, str):
return None
with _lock:
return _providers.get(name.strip())
def get_active_provider() -> Optional[ImageGenProvider]:
"""Resolve the currently-active provider.
Reads ``image_gen.provider`` from config.yaml; falls back per the
module docstring.
"""
configured: Optional[str] = None
try:
from hermes_cli.config import load_config
cfg = load_config()
section = cfg.get("image_gen") if isinstance(cfg, dict) else None
if isinstance(section, dict):
raw = section.get("provider")
if isinstance(raw, str) and raw.strip():
configured = raw.strip()
except Exception as exc:
logger.debug("Could not read image_gen.provider from config: %s", exc)
with _lock:
snapshot = dict(_providers)
if configured:
provider = snapshot.get(configured)
if provider is not None:
return provider
logger.debug(
"image_gen.provider='%s' configured but not registered; falling back",
configured,
)
# Fallback: single-provider case
if len(snapshot) == 1:
return next(iter(snapshot.values()))
# Fallback: prefer legacy FAL for backward compat
if "fal" in snapshot:
return snapshot["fal"]
return None
def _reset_for_tests() -> None:
"""Clear the registry. **Test-only.**"""
with _lock:
_providers.clear()

View file

@ -124,6 +124,7 @@ class InsightsEngine:
# Gather raw data
sessions = self._get_sessions(cutoff, source)
tool_usage = self._get_tool_usage(cutoff, source)
skill_usage = self._get_skill_usage(cutoff, source)
message_stats = self._get_message_stats(cutoff, source)
if not sessions:
@ -135,6 +136,15 @@ class InsightsEngine:
"models": [],
"platforms": [],
"tools": [],
"skills": {
"summary": {
"total_skill_loads": 0,
"total_skill_edits": 0,
"total_skill_actions": 0,
"distinct_skills_used": 0,
},
"top_skills": [],
},
"activity": {},
"top_sessions": [],
}
@ -144,6 +154,7 @@ class InsightsEngine:
models = self._compute_model_breakdown(sessions)
platforms = self._compute_platform_breakdown(sessions)
tools = self._compute_tool_breakdown(tool_usage)
skills = self._compute_skill_breakdown(skill_usage)
activity = self._compute_activity_patterns(sessions)
top_sessions = self._compute_top_sessions(sessions)
@ -156,6 +167,7 @@ class InsightsEngine:
"models": models,
"platforms": platforms,
"tools": tools,
"skills": skills,
"activity": activity,
"top_sessions": top_sessions,
}
@ -284,6 +296,82 @@ class InsightsEngine:
for name, count in tool_counts.most_common()
]
def _get_skill_usage(self, cutoff: float, source: str = None) -> List[Dict]:
"""Extract per-skill usage from assistant tool calls."""
skill_counts: Dict[str, Dict[str, Any]] = {}
if source:
cursor = self._conn.execute(
"""SELECT m.tool_calls, m.timestamp
FROM messages m
JOIN sessions s ON s.id = m.session_id
WHERE s.started_at >= ? AND s.source = ?
AND m.role = 'assistant' AND m.tool_calls IS NOT NULL""",
(cutoff, source),
)
else:
cursor = self._conn.execute(
"""SELECT m.tool_calls, m.timestamp
FROM messages m
JOIN sessions s ON s.id = m.session_id
WHERE s.started_at >= ?
AND m.role = 'assistant' AND m.tool_calls IS NOT NULL""",
(cutoff,),
)
for row in cursor.fetchall():
try:
calls = row["tool_calls"]
if isinstance(calls, str):
calls = json.loads(calls)
if not isinstance(calls, list):
continue
except (json.JSONDecodeError, TypeError):
continue
timestamp = row["timestamp"]
for call in calls:
if not isinstance(call, dict):
continue
func = call.get("function", {})
tool_name = func.get("name")
if tool_name not in {"skill_view", "skill_manage"}:
continue
args = func.get("arguments")
if isinstance(args, str):
try:
args = json.loads(args)
except (json.JSONDecodeError, TypeError):
continue
if not isinstance(args, dict):
continue
skill_name = args.get("name")
if not isinstance(skill_name, str) or not skill_name.strip():
continue
entry = skill_counts.setdefault(
skill_name,
{
"skill": skill_name,
"view_count": 0,
"manage_count": 0,
"last_used_at": None,
},
)
if tool_name == "skill_view":
entry["view_count"] += 1
else:
entry["manage_count"] += 1
if timestamp is not None and (
entry["last_used_at"] is None or timestamp > entry["last_used_at"]
):
entry["last_used_at"] = timestamp
return list(skill_counts.values())
def _get_message_stats(self, cutoff: float, source: str = None) -> Dict:
"""Get aggregate message statistics."""
if source:
@ -475,6 +563,46 @@ class InsightsEngine:
})
return result
def _compute_skill_breakdown(self, skill_usage: List[Dict]) -> Dict[str, Any]:
"""Process per-skill usage into summary + ranked list."""
total_skill_loads = sum(s["view_count"] for s in skill_usage) if skill_usage else 0
total_skill_edits = sum(s["manage_count"] for s in skill_usage) if skill_usage else 0
total_skill_actions = total_skill_loads + total_skill_edits
top_skills = []
for skill in skill_usage:
total_count = skill["view_count"] + skill["manage_count"]
percentage = (total_count / total_skill_actions * 100) if total_skill_actions else 0
top_skills.append({
"skill": skill["skill"],
"view_count": skill["view_count"],
"manage_count": skill["manage_count"],
"total_count": total_count,
"percentage": percentage,
"last_used_at": skill.get("last_used_at"),
})
top_skills.sort(
key=lambda s: (
s["total_count"],
s["view_count"],
s["manage_count"],
s["last_used_at"] or 0,
s["skill"],
),
reverse=True,
)
return {
"summary": {
"total_skill_loads": total_skill_loads,
"total_skill_edits": total_skill_edits,
"total_skill_actions": total_skill_actions,
"distinct_skills_used": len(skill_usage),
},
"top_skills": top_skills,
}
def _compute_activity_patterns(self, sessions: List[Dict]) -> Dict:
"""Analyze activity patterns by day of week and hour."""
day_counts = Counter() # 0=Monday ... 6=Sunday
@ -670,6 +798,28 @@ class InsightsEngine:
lines.append(f" ... and {len(report['tools']) - 15} more tools")
lines.append("")
# Skill usage
skills = report.get("skills", {})
top_skills = skills.get("top_skills", [])
if top_skills:
lines.append(" 🧠 Top Skills")
lines.append(" " + "" * 56)
lines.append(f" {'Skill':<28} {'Loads':>7} {'Edits':>7} {'Last used':>11}")
for skill in top_skills[:10]:
last_used = ""
if skill.get("last_used_at"):
last_used = datetime.fromtimestamp(skill["last_used_at"]).strftime("%b %d")
lines.append(
f" {skill['skill'][:28]:<28} {skill['view_count']:>7,} {skill['manage_count']:>7,} {last_used:>11}"
)
summary = skills.get("summary", {})
lines.append(
f" Distinct skills: {summary.get('distinct_skills_used', 0)} "
f"Loads: {summary.get('total_skill_loads', 0):,} "
f"Edits: {summary.get('total_skill_edits', 0):,}"
)
lines.append("")
# Activity patterns
act = report.get("activity", {})
if act.get("by_day"):
@ -753,6 +903,18 @@ class InsightsEngine:
lines.append(f" {t['tool']}{t['count']:,} calls ({t['percentage']:.1f}%)")
lines.append("")
skills = report.get("skills", {})
if skills.get("top_skills"):
lines.append("**🧠 Top Skills:**")
for skill in skills["top_skills"][:5]:
suffix = ""
if skill.get("last_used_at"):
suffix = f", last used {datetime.fromtimestamp(skill['last_used_at']).strftime('%b %d')}"
lines.append(
f" {skill['skill']}{skill['view_count']:,} loads, {skill['manage_count']:,} edits{suffix}"
)
lines.append("")
# Activity summary
act = report.get("activity", {})
if act.get("busiest_day") and act.get("busiest_hour"):

View file

@ -14,6 +14,8 @@ from urllib.parse import urlparse
import requests
import yaml
from utils import base_url_host_matches, base_url_hostname
from hermes_constants import OPENROUTER_MODELS_URL
logger = logging.getLogger(__name__)
@ -23,7 +25,7 @@ logger = logging.getLogger(__name__)
# are preserved so the full model name reaches cache lookups and server queries.
_PROVIDER_PREFIXES: frozenset[str] = frozenset({
"openrouter", "nous", "openai-codex", "copilot", "copilot-acp",
"gemini", "ollama-cloud", "zai", "kimi-coding", "kimi-coding-cn", "minimax", "minimax-cn", "anthropic", "deepseek",
"gemini", "ollama-cloud", "zai", "kimi-coding", "kimi-coding-cn", "stepfun", "minimax", "minimax-cn", "anthropic", "deepseek",
"opencode-zen", "opencode-go", "ai-gateway", "kilocode", "alibaba",
"qwen-oauth",
"xiaomi",
@ -34,7 +36,7 @@ _PROVIDER_PREFIXES: frozenset[str] = frozenset({
"glm", "z-ai", "z.ai", "zhipu", "github", "github-copilot",
"github-models", "kimi", "moonshot", "kimi-cn", "moonshot-cn", "claude", "deep-seek",
"ollama",
"opencode", "zen", "go", "vercel", "kilo", "dashscope", "aliyun", "qwen",
"stepfun", "opencode", "zen", "go", "vercel", "kilo", "dashscope", "aliyun", "qwen",
"mimo", "xiaomi-mimo",
"arcee-ai", "arceeai",
"xai", "x-ai", "x.ai", "grok",
@ -116,7 +118,6 @@ DEFAULT_CONTEXT_LENGTHS = {
"gpt-5.4-nano": 400000, # 400k (not 1.05M like full 5.4)
"gpt-5.4-mini": 400000, # 400k (not 1.05M like full 5.4)
"gpt-5.4": 1050000, # GPT-5.4, GPT-5.4 Pro (1.05M context)
"gpt-5.3-codex-spark": 128000, # Spark variant has reduced 128k context
"gpt-5.1-chat": 128000, # Chat variant has 128k context
"gpt-5": 400000, # GPT-5.x base, mini, codex variants (400k)
"gpt-4.1": 1047576,
@ -169,6 +170,7 @@ DEFAULT_CONTEXT_LENGTHS = {
"Qwen/Qwen3.5-35B-A3B": 131072,
"deepseek-ai/DeepSeek-V3.2": 65536,
"moonshotai/Kimi-K2.5": 262144,
"moonshotai/Kimi-K2.6": 262144,
"moonshotai/Kimi-K2-Thinking": 262144,
"MiniMaxAI/MiniMax-M2.5": 204800,
"XiaomiMiMo/MiMo-V2-Flash": 256000,
@ -211,8 +213,15 @@ def _normalize_base_url(base_url: str) -> str:
return (base_url or "").strip().rstrip("/")
def _auth_headers(api_key: str = "") -> Dict[str, str]:
token = str(api_key or "").strip()
if not token:
return {}
return {"Authorization": f"Bearer {token}"}
def _is_openrouter_base_url(base_url: str) -> bool:
return "openrouter.ai" in _normalize_base_url(base_url).lower()
return base_url_host_matches(base_url, "openrouter.ai")
def _is_custom_endpoint(base_url: str) -> bool:
@ -228,6 +237,8 @@ _URL_TO_PROVIDER: Dict[str, str] = {
"api.moonshot.ai": "kimi-coding",
"api.moonshot.cn": "kimi-coding-cn",
"api.kimi.com": "kimi-coding",
"api.stepfun.ai": "stepfun",
"api.stepfun.com": "stepfun",
"api.arcee.ai": "arcee",
"api.minimax": "minimax",
"dashscope.aliyuncs.com": "alibaba",
@ -310,7 +321,7 @@ def is_local_endpoint(base_url: str) -> bool:
return False
def detect_local_server_type(base_url: str) -> Optional[str]:
def detect_local_server_type(base_url: str, api_key: str = "") -> Optional[str]:
"""Detect which local server is running at base_url by probing known endpoints.
Returns one of: "ollama", "lm-studio", "vllm", "llamacpp", or None.
@ -322,8 +333,10 @@ def detect_local_server_type(base_url: str) -> Optional[str]:
if server_url.endswith("/v1"):
server_url = server_url[:-3]
headers = _auth_headers(api_key)
try:
with httpx.Client(timeout=2.0) as client:
with httpx.Client(timeout=2.0, headers=headers) as client:
# LM Studio exposes /api/v1/models — check first (most specific)
try:
r = client.get(f"{server_url}/api/v1/models")
@ -510,6 +523,59 @@ def fetch_endpoint_model_metadata(
headers = {"Authorization": f"Bearer {api_key}"} if api_key else {}
last_error: Optional[Exception] = None
if is_local_endpoint(normalized):
try:
if detect_local_server_type(normalized, api_key=api_key) == "lm-studio":
server_url = normalized[:-3].rstrip("/") if normalized.endswith("/v1") else normalized
response = requests.get(
server_url.rstrip("/") + "/api/v1/models",
headers=headers,
timeout=10,
)
response.raise_for_status()
payload = response.json()
cache: Dict[str, Dict[str, Any]] = {}
for model in payload.get("models", []):
if not isinstance(model, dict):
continue
model_id = model.get("key") or model.get("id")
if not model_id:
continue
entry: Dict[str, Any] = {"name": model.get("name", model_id)}
context_length = None
for inst in model.get("loaded_instances", []) or []:
if not isinstance(inst, dict):
continue
cfg = inst.get("config", {})
ctx = cfg.get("context_length") if isinstance(cfg, dict) else None
if isinstance(ctx, int) and ctx > 0:
context_length = ctx
break
if context_length is None:
context_length = _extract_context_length(model)
if context_length is not None:
entry["context_length"] = context_length
max_completion_tokens = _extract_max_completion_tokens(model)
if max_completion_tokens is not None:
entry["max_completion_tokens"] = max_completion_tokens
pricing = _extract_pricing(model)
if pricing:
entry["pricing"] = pricing
_add_model_aliases(cache, model_id, entry)
alt_id = model.get("id")
if isinstance(alt_id, str) and alt_id and alt_id != model_id:
_add_model_aliases(cache, alt_id, entry)
_endpoint_model_metadata_cache[normalized] = cache
_endpoint_model_metadata_cache_time[normalized] = time.time()
return cache
except Exception as exc:
last_error = exc
for candidate in candidates:
url = candidate.rstrip("/") + "/models"
try:
@ -716,7 +782,7 @@ def _model_id_matches(candidate_id: str, lookup_model: str) -> bool:
return False
def query_ollama_num_ctx(model: str, base_url: str) -> Optional[int]:
def query_ollama_num_ctx(model: str, base_url: str, api_key: str = "") -> Optional[int]:
"""Query an Ollama server for the model's context length.
Returns the model's maximum context from GGUF metadata via ``/api/show``,
@ -734,14 +800,16 @@ def query_ollama_num_ctx(model: str, base_url: str) -> Optional[int]:
server_url = server_url[:-3]
try:
server_type = detect_local_server_type(base_url)
server_type = detect_local_server_type(base_url, api_key=api_key)
except Exception:
return None
if server_type != "ollama":
return None
headers = _auth_headers(api_key)
try:
with httpx.Client(timeout=3.0) as client:
with httpx.Client(timeout=3.0, headers=headers) as client:
resp = client.post(f"{server_url}/api/show", json={"name": bare_model})
if resp.status_code != 200:
return None
@ -769,7 +837,7 @@ def query_ollama_num_ctx(model: str, base_url: str) -> Optional[int]:
return None
def _query_local_context_length(model: str, base_url: str) -> Optional[int]:
def _query_local_context_length(model: str, base_url: str, api_key: str = "") -> Optional[int]:
"""Query a local server for the model's context length."""
import httpx
@ -782,13 +850,15 @@ def _query_local_context_length(model: str, base_url: str) -> Optional[int]:
if server_url.endswith("/v1"):
server_url = server_url[:-3]
headers = _auth_headers(api_key)
try:
server_type = detect_local_server_type(base_url)
server_type = detect_local_server_type(base_url, api_key=api_key)
except Exception:
server_type = None
try:
with httpx.Client(timeout=3.0) as client:
with httpx.Client(timeout=3.0, headers=headers) as client:
# Ollama: /api/show returns model details with context info
if server_type == "ollama":
resp = client.post(f"{server_url}/api/show", json={"name": model})
@ -999,7 +1069,7 @@ def get_model_context_length(
if not _is_known_provider_base_url(base_url):
# 3. Try querying local server directly
if is_local_endpoint(base_url):
local_ctx = _query_local_context_length(model, base_url)
local_ctx = _query_local_context_length(model, base_url, api_key=api_key)
if local_ctx and local_ctx > 0:
save_context_length(model, base_url, local_ctx)
return local_ctx
@ -1013,7 +1083,7 @@ def get_model_context_length(
# 4. Anthropic /v1/models API (only for regular API keys, not OAuth)
if provider == "anthropic" or (
base_url and "api.anthropic.com" in base_url
base_url and base_url_hostname(base_url) == "api.anthropic.com"
):
ctx = _query_anthropic_context_length(model, base_url or "https://api.anthropic.com", api_key)
if ctx:
@ -1022,7 +1092,11 @@ def get_model_context_length(
# 4b. AWS Bedrock — use static context length table.
# Bedrock's ListFoundationModels doesn't expose context window sizes,
# so we maintain a curated table in bedrock_adapter.py.
if provider == "bedrock" or (base_url and "bedrock-runtime" in base_url):
if provider == "bedrock" or (
base_url
and base_url_hostname(base_url).startswith("bedrock-runtime.")
and base_url_host_matches(base_url, "amazonaws.com")
):
try:
from agent.bedrock_adapter import get_bedrock_context_length
return get_bedrock_context_length(model)
@ -1069,7 +1143,7 @@ def get_model_context_length(
# 9. Query local server as last resort
if base_url and is_local_endpoint(base_url):
local_ctx = _query_local_context_length(model, base_url)
local_ctx = _query_local_context_length(model, base_url, api_key=api_key)
if local_ctx and local_ctx > 0:
save_context_length(model, base_url, local_ctx)
return local_ctx

View file

@ -146,6 +146,7 @@ PROVIDER_TO_MODELS_DEV: Dict[str, str] = {
"openai-codex": "openai",
"zai": "zai",
"kimi-coding": "kimi-for-coding",
"stepfun": "stepfun",
"kimi-coding-cn": "kimi-for-coding",
"minimax": "minimax",
"minimax-cn": "minimax-cn",

View file

@ -152,7 +152,13 @@ MEMORY_GUIDANCE = (
"Do NOT save task progress, session outcomes, completed-work logs, or temporary TODO "
"state to memory; use session_search to recall those from past transcripts. "
"If you've discovered a new way to do something, solved a problem that could be "
"necessary later, save it as a skill with the skill tool."
"necessary later, save it as a skill with the skill tool.\n"
"Write memories as declarative facts, not instructions to yourself. "
"'User prefers concise responses' ✓ — 'Always respond concisely' ✗. "
"'Project uses pytest with xdist' ✓ — 'Run tests with pytest -n 4' ✗. "
"Imperative phrasing gets re-read as a directive in later sessions and can "
"cause repeated work or override the user's current request. Procedures and "
"workflows belong in skills, not memory."
)
SESSION_SEARCH_GUIDANCE = (
@ -344,7 +350,13 @@ PLATFORM_HINTS = {
),
"cli": (
"You are a CLI AI Agent. Try not to use markdown but simple text "
"renderable inside a terminal."
"renderable inside a terminal. "
"File delivery: there is no attachment channel — the user reads your "
"response directly in their terminal. Do NOT emit MEDIA:/path tags "
"(those are only intercepted on messaging platforms like Telegram, "
"Discord, Slack, etc.; on the CLI they render as literal text). "
"When referring to a file you created or changed, just state its "
"absolute path in plain text; the user can open it from there."
),
"sms": (
"You are communicating via SMS. Keep responses concise and use plain text "
@ -613,12 +625,14 @@ def build_skills_system_prompt(
or get_session_env("HERMES_SESSION_PLATFORM")
or ""
)
disabled = get_disabled_skill_names()
cache_key = (
str(skills_dir.resolve()),
tuple(str(d) for d in external_dirs),
tuple(sorted(str(t) for t in (available_tools or set()))),
tuple(sorted(str(ts) for ts in (available_toolsets or set()))),
_platform_hint,
tuple(sorted(disabled)),
)
with _SKILLS_PROMPT_CACHE_LOCK:
cached = _SKILLS_PROMPT_CACHE.get(cache_key)
@ -626,8 +640,6 @@ def build_skills_system_prompt(
_SKILLS_PROMPT_CACHE.move_to_end(cache_key)
return cached
disabled = get_disabled_skill_names()
# ── Layer 2: disk snapshot ────────────────────────────────────────
snapshot = _load_skills_snapshot(skills_dir)

View file

@ -13,6 +13,48 @@ import re
logger = logging.getLogger(__name__)
# Sensitive query-string parameter names (case-insensitive exact match).
# Ported from nearai/ironclaw#2529 — catches tokens whose values don't match
# any known vendor prefix regex (e.g. opaque tokens, short OAuth codes).
_SENSITIVE_QUERY_PARAMS = frozenset({
"access_token",
"refresh_token",
"id_token",
"token",
"api_key",
"apikey",
"client_secret",
"password",
"auth",
"jwt",
"session",
"secret",
"key",
"code", # OAuth authorization codes
"signature", # pre-signed URL signatures
"x-amz-signature",
})
# Sensitive form-urlencoded / JSON body key names (case-insensitive exact match).
# Exact match, NOT substring — "token_count" and "session_id" must NOT match.
# Ported from nearai/ironclaw#2529.
_SENSITIVE_BODY_KEYS = frozenset({
"access_token",
"refresh_token",
"id_token",
"token",
"api_key",
"apikey",
"client_secret",
"password",
"auth",
"jwt",
"secret",
"private_key",
"authorization",
"key",
})
# Snapshot at import time so runtime env mutations (e.g. LLM-generated
# `export HERMES_REDACT_SECRETS=false`) cannot disable redaction mid-session.
_REDACT_ENABLED = os.getenv("HERMES_REDACT_SECRETS", "").lower() not in ("0", "false", "no", "off")
@ -108,6 +150,30 @@ _DISCORD_MENTION_RE = re.compile(r"<@!?(\d{17,20})>")
# Negative lookahead prevents matching hex strings or identifiers
_SIGNAL_PHONE_RE = re.compile(r"(\+[1-9]\d{6,14})(?![A-Za-z0-9])")
# URLs containing query strings — matches `scheme://...?...[# or end]`.
# Used to scan text for URLs whose query params may contain secrets.
# Ported from nearai/ironclaw#2529.
_URL_WITH_QUERY_RE = re.compile(
r"(https?|wss?|ftp)://" # scheme
r"([^\s/?#]+)" # authority (may include userinfo)
r"([^\s?#]*)" # path
r"\?([^\s#]+)" # query (required)
r"(#\S*)?", # optional fragment
)
# URLs containing userinfo — `scheme://user:password@host` for ANY scheme
# (not just DB protocols already covered by _DB_CONNSTR_RE above).
# Catches things like `https://user:token@api.example.com/v1/foo`.
_URL_USERINFO_RE = re.compile(
r"(https?|wss?|ftp)://([^/\s:@]+):([^/\s@]+)@",
)
# Form-urlencoded body detection: conservative — only applies when the entire
# text looks like a query string (k=v&k=v pattern with no newlines).
_FORM_BODY_RE = re.compile(
r"^[A-Za-z_][A-Za-z0-9_.-]*=[^&\s]*(?:&[A-Za-z_][A-Za-z0-9_.-]*=[^&\s]*)+$"
)
# Compile known prefix patterns into one alternation
_PREFIX_RE = re.compile(
r"(?<![A-Za-z0-9_-])(" + "|".join(_PREFIX_PATTERNS) + r")(?![A-Za-z0-9_-])"
@ -121,6 +187,72 @@ def _mask_token(token: str) -> str:
return f"{token[:6]}...{token[-4:]}"
def _redact_query_string(query: str) -> str:
"""Redact sensitive parameter values in a URL query string.
Handles `k=v&k=v` format. Sensitive keys (case-insensitive) have values
replaced with `***`. Non-sensitive keys pass through unchanged.
Empty or malformed pairs are preserved as-is.
"""
if not query:
return query
parts = []
for pair in query.split("&"):
if "=" not in pair:
parts.append(pair)
continue
key, _, value = pair.partition("=")
if key.lower() in _SENSITIVE_QUERY_PARAMS:
parts.append(f"{key}=***")
else:
parts.append(pair)
return "&".join(parts)
def _redact_url_query_params(text: str) -> str:
"""Scan text for URLs with query strings and redact sensitive params.
Catches opaque tokens that don't match vendor prefix regexes, e.g.
`https://example.com/cb?code=ABC123&state=xyz` `...?code=***&state=xyz`.
"""
def _sub(m: re.Match) -> str:
scheme = m.group(1)
authority = m.group(2)
path = m.group(3)
query = _redact_query_string(m.group(4))
fragment = m.group(5) or ""
return f"{scheme}://{authority}{path}?{query}{fragment}"
return _URL_WITH_QUERY_RE.sub(_sub, text)
def _redact_url_userinfo(text: str) -> str:
"""Strip `user:password@` from HTTP/WS/FTP URLs.
DB protocols (postgres, mysql, mongodb, redis, amqp) are handled
separately by `_DB_CONNSTR_RE`.
"""
return _URL_USERINFO_RE.sub(
lambda m: f"{m.group(1)}://{m.group(2)}:***@",
text,
)
def _redact_form_body(text: str) -> str:
"""Redact sensitive values in a form-urlencoded body.
Only applies when the entire input looks like a pure form body
(k=v&k=v with no newlines, no other text). Single-line non-form
text passes through unchanged. This is a conservative pass the
`_redact_url_query_params` function handles embedded query strings.
"""
if not text or "\n" in text or "&" not in text:
return text
# The body-body form check is strict: only trigger on clean k=v&k=v.
if not _FORM_BODY_RE.match(text.strip()):
return text
return _redact_query_string(text.strip())
def redact_sensitive_text(text: str) -> str:
"""Apply all redaction patterns to a block of text.
@ -173,6 +305,16 @@ def redact_sensitive_text(text: str) -> str:
# JWT tokens (eyJ... — base64-encoded JSON headers)
text = _JWT_RE.sub(lambda m: _mask_token(m.group(0)), text)
# URL userinfo (http(s)://user:pass@host) — redact for non-DB schemes.
# DB schemes are handled above by _DB_CONNSTR_RE.
text = _redact_url_userinfo(text)
# URL query params containing opaque tokens (?access_token=…&code=…)
text = _redact_url_query_params(text)
# Form-urlencoded bodies (only triggers on clean k=v&k=v inputs).
text = _redact_form_body(text)
# Discord user/role mentions (<@snowflake_id>)
text = _DISCORD_MENTION_RE.sub(lambda m: f"<@{'!' if '!' in m.group(0) else ''}***>", text)

831
agent/shell_hooks.py Normal file
View file

@ -0,0 +1,831 @@
"""
Shell-script hooks bridge.
Reads the ``hooks:`` block from ``cli-config.yaml``, prompts the user for
consent on first use of each ``(event, command)`` pair, and registers
callbacks on the existing plugin hook manager so every existing
``invoke_hook()`` site dispatches to the configured shell scripts with
zero changes to call sites.
Design notes
------------
* Python plugins and shell hooks compose naturally: both flow through
:func:`hermes_cli.plugins.invoke_hook` and its aggregators. Python
plugins are registered first (via ``discover_and_load()``) so their
block decisions win ties over shell-hook blocks.
* Subprocess execution uses ``shlex.split(os.path.expanduser(command))``
with ``shell=False`` no shell injection footguns. Users that need
pipes/redirection wrap their logic in a script.
* First-use consent is gated by the allowlist under
``~/.hermes/shell-hooks-allowlist.json``. Non-TTY callers must pass
``accept_hooks=True`` (resolved from ``--accept-hooks``,
``HERMES_ACCEPT_HOOKS``, or ``hooks_auto_accept: true`` in config)
for registration to succeed without a prompt.
* Registration is idempotent safe to invoke from both the CLI entry
point (``hermes_cli/main.py``) and the gateway entry point
(``gateway/run.py``).
Wire protocol
-------------
**stdin** (JSON, piped to the script)::
{
"hook_event_name": "pre_tool_call",
"tool_name": "terminal",
"tool_input": {"command": "rm -rf /"},
"session_id": "sess_abc123",
"cwd": "/home/user/project",
"extra": {...} # event-specific kwargs
}
**stdout** (JSON, optional anything else is ignored)::
# Block a pre_tool_call (either shape accepted; normalised internally):
{"decision": "block", "reason": "Forbidden command"} # Claude-Code-style
{"action": "block", "message": "Forbidden command"} # Hermes-canonical
# Inject context for pre_llm_call:
{"context": "Today is Friday"}
# Silent no-op:
<empty or any non-matching JSON object>
"""
from __future__ import annotations
import difflib
import json
import logging
import os
import re
import shlex
import subprocess
import sys
import tempfile
import threading
import time
from contextlib import contextmanager
from dataclasses import dataclass, field
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, List, Optional, Set, Tuple
try:
import fcntl # POSIX only; Windows falls back to best-effort without flock.
except ImportError: # pragma: no cover
fcntl = None # type: ignore[assignment]
from hermes_constants import get_hermes_home
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT_SECONDS = 60
MAX_TIMEOUT_SECONDS = 300
ALLOWLIST_FILENAME = "shell-hooks-allowlist.json"
# (event, matcher, command) triples that have been wired to the plugin
# manager in the current process. Matcher is part of the key because
# the same script can legitimately register for different matchers under
# the same event (e.g. one entry per tool the user wants to gate).
# Second registration attempts for the exact same triple become no-ops
# so the CLI and gateway can both call register_from_config() safely.
_registered: Set[Tuple[str, Optional[str], str]] = set()
_registered_lock = threading.Lock()
# Intra-process lock for allowlist read-modify-write on platforms that
# lack ``fcntl`` (non-POSIX). Kept separate from ``_registered_lock``
# because ``register_from_config`` already holds ``_registered_lock`` when
# it triggers ``_record_approval`` — reusing it here would self-deadlock
# (``threading.Lock`` is non-reentrant). POSIX callers use the sibling
# ``.lock`` file via ``fcntl.flock`` and bypass this.
_allowlist_write_lock = threading.Lock()
@dataclass
class ShellHookSpec:
"""Parsed and validated representation of a single ``hooks:`` entry."""
event: str
command: str
matcher: Optional[str] = None
timeout: int = DEFAULT_TIMEOUT_SECONDS
compiled_matcher: Optional[re.Pattern] = field(default=None, repr=False)
def __post_init__(self) -> None:
# Strip whitespace introduced by YAML quirks (e.g. multi-line string
# folding) — a matcher of " terminal" would otherwise silently fail
# to match "terminal" without any diagnostic.
if isinstance(self.matcher, str):
stripped = self.matcher.strip()
self.matcher = stripped if stripped else None
if self.matcher:
try:
self.compiled_matcher = re.compile(self.matcher)
except re.error as exc:
logger.warning(
"shell hook matcher %r is invalid (%s) — treating as "
"literal equality", self.matcher, exc,
)
self.compiled_matcher = None
def matches_tool(self, tool_name: Optional[str]) -> bool:
if not self.matcher:
return True
if tool_name is None:
return False
if self.compiled_matcher is not None:
return self.compiled_matcher.fullmatch(tool_name) is not None
# compiled_matcher is None only when the regex failed to compile,
# in which case we already warned and fall back to literal equality.
return tool_name == self.matcher
# ---------------------------------------------------------------------------
# Public API
# ---------------------------------------------------------------------------
def register_from_config(
cfg: Optional[Dict[str, Any]],
*,
accept_hooks: bool = False,
) -> List[ShellHookSpec]:
"""Register every configured shell hook on the plugin manager.
``cfg`` is the full parsed config dict (``hermes_cli.config.load_config``
output). The ``hooks:`` key is read out of it. Missing, empty, or
non-dict ``hooks`` is treated as zero configured hooks.
``accept_hooks=True`` skips the TTY consent prompt the caller is
promising that the user has opted in via a flag, env var, or config
setting. ``HERMES_ACCEPT_HOOKS=1`` and ``hooks_auto_accept: true`` are
also honored inside this function so either CLI or gateway call sites
pick them up.
Returns the list of :class:`ShellHookSpec` entries that ended up wired
up on the plugin manager. Skipped entries (unknown events, malformed,
not allowlisted, already registered) are logged but not returned.
"""
if not isinstance(cfg, dict):
return []
effective_accept = _resolve_effective_accept(cfg, accept_hooks)
specs = _parse_hooks_block(cfg.get("hooks"))
if not specs:
return []
registered: List[ShellHookSpec] = []
# Import lazily — avoids circular imports at module-load time.
from hermes_cli.plugins import get_plugin_manager
manager = get_plugin_manager()
# Idempotence + allowlist read happen under the lock; the TTY
# prompt runs outside so other threads aren't parked on a blocking
# input(). Mutation re-takes the lock with a defensive idempotence
# re-check in case two callers ever race through the prompt.
for spec in specs:
key = (spec.event, spec.matcher, spec.command)
with _registered_lock:
if key in _registered:
continue
already_allowlisted = _is_allowlisted(spec.event, spec.command)
if not already_allowlisted:
if not _prompt_and_record(
spec.event, spec.command, accept_hooks=effective_accept,
):
logger.warning(
"shell hook for %s (%s) not allowlisted — skipped. "
"Use --accept-hooks / HERMES_ACCEPT_HOOKS=1 / "
"hooks_auto_accept: true, or approve at the TTY "
"prompt next run.",
spec.event, spec.command,
)
continue
with _registered_lock:
if key in _registered:
continue
manager._hooks.setdefault(spec.event, []).append(_make_callback(spec))
_registered.add(key)
registered.append(spec)
logger.info(
"shell hook registered: %s -> %s (matcher=%s, timeout=%ds)",
spec.event, spec.command, spec.matcher, spec.timeout,
)
return registered
def iter_configured_hooks(cfg: Optional[Dict[str, Any]]) -> List[ShellHookSpec]:
"""Return the parsed ``ShellHookSpec`` entries from config without
registering anything. Used by ``hermes hooks list`` and ``doctor``."""
if not isinstance(cfg, dict):
return []
return _parse_hooks_block(cfg.get("hooks"))
def reset_for_tests() -> None:
"""Clear the idempotence set. Test-only helper."""
with _registered_lock:
_registered.clear()
# ---------------------------------------------------------------------------
# Config parsing
# ---------------------------------------------------------------------------
def _parse_hooks_block(hooks_cfg: Any) -> List[ShellHookSpec]:
"""Normalise the ``hooks:`` dict into a flat list of ``ShellHookSpec``.
Malformed entries warn-and-skip we never raise from config parsing
because a broken hook must not crash the agent.
"""
from hermes_cli.plugins import VALID_HOOKS
if not isinstance(hooks_cfg, dict):
return []
specs: List[ShellHookSpec] = []
for event_name, entries in hooks_cfg.items():
if event_name not in VALID_HOOKS:
suggestion = difflib.get_close_matches(
str(event_name), VALID_HOOKS, n=1, cutoff=0.6,
)
if suggestion:
logger.warning(
"unknown hook event %r in hooks: config — did you mean %r?",
event_name, suggestion[0],
)
else:
logger.warning(
"unknown hook event %r in hooks: config (valid: %s)",
event_name, ", ".join(sorted(VALID_HOOKS)),
)
continue
if entries is None:
continue
if not isinstance(entries, list):
logger.warning(
"hooks.%s must be a list of hook definitions; got %s",
event_name, type(entries).__name__,
)
continue
for i, raw in enumerate(entries):
spec = _parse_single_entry(event_name, i, raw)
if spec is not None:
specs.append(spec)
return specs
def _parse_single_entry(
event: str, index: int, raw: Any,
) -> Optional[ShellHookSpec]:
if not isinstance(raw, dict):
logger.warning(
"hooks.%s[%d] must be a mapping with a 'command' key; got %s",
event, index, type(raw).__name__,
)
return None
command = raw.get("command")
if not isinstance(command, str) or not command.strip():
logger.warning(
"hooks.%s[%d] is missing a non-empty 'command' field",
event, index,
)
return None
matcher = raw.get("matcher")
if matcher is not None and not isinstance(matcher, str):
logger.warning(
"hooks.%s[%d].matcher must be a string regex; ignoring",
event, index,
)
matcher = None
if matcher is not None and event not in ("pre_tool_call", "post_tool_call"):
logger.warning(
"hooks.%s[%d].matcher=%r will be ignored at runtime — the "
"matcher field is only honored for pre_tool_call / "
"post_tool_call. The hook will fire on every %s event.",
event, index, matcher, event,
)
matcher = None
timeout_raw = raw.get("timeout", DEFAULT_TIMEOUT_SECONDS)
try:
timeout = int(timeout_raw)
except (TypeError, ValueError):
logger.warning(
"hooks.%s[%d].timeout must be an int (got %r); using default %ds",
event, index, timeout_raw, DEFAULT_TIMEOUT_SECONDS,
)
timeout = DEFAULT_TIMEOUT_SECONDS
if timeout < 1:
logger.warning(
"hooks.%s[%d].timeout must be >=1; using default %ds",
event, index, DEFAULT_TIMEOUT_SECONDS,
)
timeout = DEFAULT_TIMEOUT_SECONDS
if timeout > MAX_TIMEOUT_SECONDS:
logger.warning(
"hooks.%s[%d].timeout=%ds exceeds max %ds; clamping",
event, index, timeout, MAX_TIMEOUT_SECONDS,
)
timeout = MAX_TIMEOUT_SECONDS
return ShellHookSpec(
event=event,
command=command.strip(),
matcher=matcher,
timeout=timeout,
)
# ---------------------------------------------------------------------------
# Subprocess callback
# ---------------------------------------------------------------------------
_TOP_LEVEL_PAYLOAD_KEYS = {"tool_name", "args", "session_id", "parent_session_id"}
def _spawn(spec: ShellHookSpec, stdin_json: str) -> Dict[str, Any]:
"""Run ``spec.command`` as a subprocess with ``stdin_json`` on stdin.
Returns a diagnostic dict with the same keys for every outcome
(``returncode``, ``stdout``, ``stderr``, ``timed_out``,
``elapsed_seconds``, ``error``). This is the single place the
subprocess is actually invoked both the live callback path
(:func:`_make_callback`) and the CLI test helper (:func:`run_once`)
go through it.
"""
result: Dict[str, Any] = {
"returncode": None,
"stdout": "",
"stderr": "",
"timed_out": False,
"elapsed_seconds": 0.0,
"error": None,
}
try:
argv = shlex.split(os.path.expanduser(spec.command))
except ValueError as exc:
result["error"] = f"command {spec.command!r} cannot be parsed: {exc}"
return result
if not argv:
result["error"] = "empty command"
return result
t0 = time.monotonic()
try:
proc = subprocess.run(
argv,
input=stdin_json,
capture_output=True,
timeout=spec.timeout,
text=True,
shell=False,
)
except subprocess.TimeoutExpired:
result["timed_out"] = True
result["elapsed_seconds"] = round(time.monotonic() - t0, 3)
return result
except FileNotFoundError:
result["error"] = "command not found"
return result
except PermissionError:
result["error"] = "command not executable"
return result
except Exception as exc: # pragma: no cover — defensive
result["error"] = str(exc)
return result
result["returncode"] = proc.returncode
result["stdout"] = proc.stdout or ""
result["stderr"] = proc.stderr or ""
result["elapsed_seconds"] = round(time.monotonic() - t0, 3)
return result
def _make_callback(spec: ShellHookSpec) -> Callable[..., Optional[Dict[str, Any]]]:
"""Build the closure that ``invoke_hook()`` will call per firing."""
def _callback(**kwargs: Any) -> Optional[Dict[str, Any]]:
# Matcher gate — only meaningful for tool-scoped events.
if spec.event in ("pre_tool_call", "post_tool_call"):
if not spec.matches_tool(kwargs.get("tool_name")):
return None
r = _spawn(spec, _serialize_payload(spec.event, kwargs))
if r["error"]:
logger.warning(
"shell hook failed (event=%s command=%s): %s",
spec.event, spec.command, r["error"],
)
return None
if r["timed_out"]:
logger.warning(
"shell hook timed out after %.2fs (event=%s command=%s)",
r["elapsed_seconds"], spec.event, spec.command,
)
return None
stderr = r["stderr"].strip()
if stderr:
logger.debug(
"shell hook stderr (event=%s command=%s): %s",
spec.event, spec.command, stderr[:400],
)
# Non-zero exits: log but still parse stdout so scripts that
# signal failure via exit code can also return a block directive.
if r["returncode"] != 0:
logger.warning(
"shell hook exited %d (event=%s command=%s); stderr=%s",
r["returncode"], spec.event, spec.command, stderr[:400],
)
return _parse_response(spec.event, r["stdout"])
_callback.__name__ = f"shell_hook[{spec.event}:{spec.command}]"
_callback.__qualname__ = _callback.__name__
return _callback
def _serialize_payload(event: str, kwargs: Dict[str, Any]) -> str:
"""Render the stdin JSON payload. Unserialisable values are
stringified via ``default=str`` rather than dropped."""
extras = {k: v for k, v in kwargs.items() if k not in _TOP_LEVEL_PAYLOAD_KEYS}
try:
cwd = str(Path.cwd())
except OSError:
cwd = ""
payload = {
"hook_event_name": event,
"tool_name": kwargs.get("tool_name"),
"tool_input": kwargs.get("args") if isinstance(kwargs.get("args"), dict) else None,
"session_id": kwargs.get("session_id") or kwargs.get("parent_session_id") or "",
"cwd": cwd,
"extra": extras,
}
return json.dumps(payload, ensure_ascii=False, default=str)
def _parse_response(event: str, stdout: str) -> Optional[Dict[str, Any]]:
"""Translate stdout JSON into a Hermes wire-shape dict.
For ``pre_tool_call`` the Claude-Code-style ``{"decision": "block",
"reason": "..."}`` payload is translated into the canonical Hermes
``{"action": "block", "message": "..."}`` shape expected by
:func:`hermes_cli.plugins.get_pre_tool_call_block_message`. This is
the single most important correctness invariant in this module
skipping the translation silently breaks every ``pre_tool_call``
block directive.
For ``pre_llm_call``, ``{"context": "..."}`` is passed through
unchanged to match the existing plugin-hook contract.
Anything else returns ``None``.
"""
stdout = (stdout or "").strip()
if not stdout:
return None
try:
data = json.loads(stdout)
except json.JSONDecodeError:
logger.warning(
"shell hook stdout was not valid JSON (event=%s): %s",
event, stdout[:200],
)
return None
if not isinstance(data, dict):
return None
if event == "pre_tool_call":
if data.get("action") == "block":
message = data.get("message") or data.get("reason") or ""
if isinstance(message, str) and message:
return {"action": "block", "message": message}
if data.get("decision") == "block":
message = data.get("reason") or data.get("message") or ""
if isinstance(message, str) and message:
return {"action": "block", "message": message}
return None
context = data.get("context")
if isinstance(context, str) and context.strip():
return {"context": context}
return None
# ---------------------------------------------------------------------------
# Allowlist / consent
# ---------------------------------------------------------------------------
def allowlist_path() -> Path:
"""Path to the per-user shell-hook allowlist file."""
return get_hermes_home() / ALLOWLIST_FILENAME
def load_allowlist() -> Dict[str, Any]:
"""Return the parsed allowlist, or an empty skeleton if absent."""
try:
raw = json.loads(allowlist_path().read_text())
except (FileNotFoundError, json.JSONDecodeError, OSError):
return {"approvals": []}
if not isinstance(raw, dict):
return {"approvals": []}
approvals = raw.get("approvals")
if not isinstance(approvals, list):
raw["approvals"] = []
return raw
def save_allowlist(data: Dict[str, Any]) -> None:
"""Atomically persist the allowlist via per-process ``mkstemp`` +
``os.replace``. Cross-process read-modify-write races are handled
by :func:`_locked_update_approvals` (``fcntl.flock``). On OSError
the failure is logged; the in-process hook still registers but
the approval won't survive across runs."""
p = allowlist_path()
try:
p.parent.mkdir(parents=True, exist_ok=True)
fd, tmp_path = tempfile.mkstemp(
prefix=f"{p.name}.", suffix=".tmp", dir=str(p.parent),
)
try:
with os.fdopen(fd, "w") as fh:
fh.write(json.dumps(data, indent=2, sort_keys=True))
os.replace(tmp_path, p)
except Exception:
try:
os.unlink(tmp_path)
except OSError:
pass
raise
except OSError as exc:
logger.warning(
"Failed to persist shell hook allowlist to %s: %s. "
"The approval is in-memory for this run, but the next "
"startup will re-prompt (or skip registration on non-TTY "
"runs without --accept-hooks / HERMES_ACCEPT_HOOKS).",
p, exc,
)
def _is_allowlisted(event: str, command: str) -> bool:
data = load_allowlist()
return any(
isinstance(e, dict)
and e.get("event") == event
and e.get("command") == command
for e in data.get("approvals", [])
)
@contextmanager
def _locked_update_approvals() -> Iterator[Dict[str, Any]]:
"""Serialise read-modify-write on the allowlist across processes.
Holds an exclusive ``flock`` on a sibling lock file for the duration
of the update so concurrent ``_record_approval``/``revoke`` callers
cannot clobber each other's changes (the race Codex reproduced with
2050 simultaneous writers). Falls back to an in-process lock on
platforms without ``fcntl``.
"""
p = allowlist_path()
p.parent.mkdir(parents=True, exist_ok=True)
lock_path = p.with_suffix(p.suffix + ".lock")
if fcntl is None: # pragma: no cover — non-POSIX fallback
with _allowlist_write_lock:
data = load_allowlist()
yield data
save_allowlist(data)
return
with open(lock_path, "a+") as lock_fh:
fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX)
try:
data = load_allowlist()
yield data
save_allowlist(data)
finally:
fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN)
def _prompt_and_record(
event: str, command: str, *, accept_hooks: bool,
) -> bool:
"""Decide whether to approve an unseen ``(event, command)`` pair.
Returns ``True`` iff the approval was granted and recorded.
"""
if accept_hooks:
_record_approval(event, command)
logger.info(
"shell hook auto-approved via --accept-hooks / env / config: "
"%s -> %s", event, command,
)
return True
if not sys.stdin.isatty():
return False
print(
f"\n⚠ Hermes is about to register a shell hook that will run a\n"
f" command on your behalf.\n\n"
f" Event: {event}\n"
f" Command: {command}\n\n"
f" Commands run with your full user credentials. Only approve\n"
f" commands you trust."
)
try:
answer = input("Allow this hook to run? [y/N]: ").strip().lower()
except (EOFError, KeyboardInterrupt):
print() # keep the terminal tidy after ^C
return False
if answer in ("y", "yes"):
_record_approval(event, command)
return True
return False
def _record_approval(event: str, command: str) -> None:
entry = {
"event": event,
"command": command,
"approved_at": _utc_now_iso(),
"script_mtime_at_approval": script_mtime_iso(command),
}
with _locked_update_approvals() as data:
data["approvals"] = [
e for e in data.get("approvals", [])
if not (
isinstance(e, dict)
and e.get("event") == event
and e.get("command") == command
)
] + [entry]
def _utc_now_iso() -> str:
return datetime.now(tz=timezone.utc).isoformat().replace("+00:00", "Z")
def revoke(command: str) -> int:
"""Remove every allowlist entry matching ``command``.
Returns the number of entries removed. Does not unregister any
callbacks that are already live on the plugin manager in the current
process restart the CLI / gateway to drop them.
"""
with _locked_update_approvals() as data:
before = len(data.get("approvals", []))
data["approvals"] = [
e for e in data.get("approvals", [])
if not (isinstance(e, dict) and e.get("command") == command)
]
after = len(data["approvals"])
return before - after
_SCRIPT_EXTENSIONS: Tuple[str, ...] = (
".sh", ".bash", ".zsh", ".fish",
".py", ".pyw",
".rb", ".pl", ".lua",
".js", ".mjs", ".cjs", ".ts",
)
def _command_script_path(command: str) -> str:
"""Return the script path from ``command`` for doctor / drift checks.
Prefers a token ending in a known script extension, then a token
containing ``/`` or leading ``~``, then the first token. Handles
``python3 /path/hook.py``, ``/usr/bin/env bash hook.sh``, and the
common bare-path form.
"""
try:
parts = shlex.split(command)
except ValueError:
return command
if not parts:
return command
for part in parts:
if part.lower().endswith(_SCRIPT_EXTENSIONS):
return part
for part in parts:
if "/" in part or part.startswith("~"):
return part
return parts[0]
# ---------------------------------------------------------------------------
# Helpers for accept-hooks resolution
# ---------------------------------------------------------------------------
def _resolve_effective_accept(
cfg: Dict[str, Any], accept_hooks_arg: bool,
) -> bool:
"""Combine all three opt-in channels into a single boolean.
Precedence (any truthy source flips us on):
1. ``--accept-hooks`` flag (CLI) / explicit argument
2. ``HERMES_ACCEPT_HOOKS`` env var
3. ``hooks_auto_accept: true`` in ``cli-config.yaml``
"""
if accept_hooks_arg:
return True
env = os.environ.get("HERMES_ACCEPT_HOOKS", "").strip().lower()
if env in ("1", "true", "yes", "on"):
return True
cfg_val = cfg.get("hooks_auto_accept", False)
return bool(cfg_val)
# ---------------------------------------------------------------------------
# Introspection (used by `hermes hooks` CLI)
# ---------------------------------------------------------------------------
def allowlist_entry_for(event: str, command: str) -> Optional[Dict[str, Any]]:
"""Return the allowlist record for this pair, if any."""
for e in load_allowlist().get("approvals", []):
if (
isinstance(e, dict)
and e.get("event") == event
and e.get("command") == command
):
return e
return None
def script_mtime_iso(command: str) -> Optional[str]:
"""ISO-8601 mtime of the resolved script path, or ``None`` if the
script is missing."""
path = _command_script_path(command)
if not path:
return None
try:
expanded = os.path.expanduser(path)
return datetime.fromtimestamp(
os.path.getmtime(expanded), tz=timezone.utc,
).isoformat().replace("+00:00", "Z")
except OSError:
return None
def script_is_executable(command: str) -> bool:
"""Return ``True`` iff ``command`` is runnable as configured.
For a bare invocation (``/path/hook.sh``) the script itself must be
executable. For interpreter-prefixed commands (``python3
/path/hook.py``, ``/usr/bin/env bash hook.sh``) the script just has
to be readable the interpreter doesn't care about the ``X_OK``
bit. Mirrors what ``_spawn`` would actually do at runtime."""
path = _command_script_path(command)
if not path:
return False
expanded = os.path.expanduser(path)
if not os.path.isfile(expanded):
return False
try:
argv = shlex.split(command)
except ValueError:
return False
is_bare_invocation = bool(argv) and argv[0] == path
required = os.X_OK if is_bare_invocation else os.R_OK
return os.access(expanded, required)
def run_once(
spec: ShellHookSpec, kwargs: Dict[str, Any],
) -> Dict[str, Any]:
"""Fire a single shell-hook invocation with a synthetic payload.
Used by ``hermes hooks test`` and ``hermes hooks doctor``.
``kwargs`` is the same dict that :func:`hermes_cli.plugins.invoke_hook`
would pass at runtime. It is routed through :func:`_serialize_payload`
so the synthetic stdin exactly matches what a real hook firing would
produce otherwise scripts tested via ``hermes hooks test`` could
diverge silently from production behaviour.
Returns the :func:`_spawn` diagnostic dict plus a ``parsed`` field
holding the canonical Hermes-wire-shape response."""
stdin_json = _serialize_payload(spec.event, kwargs)
result = _spawn(spec, stdin_json)
result["parsed"] = _parse_response(spec.event, result["stdout"])
return result

View file

@ -8,6 +8,7 @@ can invoke skills via /skill-name commands and prompt-only built-ins like
import json
import logging
import re
import subprocess
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Optional
@ -22,6 +23,110 @@ _PLAN_SLUG_RE = re.compile(r"[^a-z0-9]+")
_SKILL_INVALID_CHARS = re.compile(r"[^a-z0-9-]")
_SKILL_MULTI_HYPHEN = re.compile(r"-{2,}")
# Matches ${HERMES_SKILL_DIR} / ${HERMES_SESSION_ID} tokens in SKILL.md.
# Tokens that don't resolve (e.g. ${HERMES_SESSION_ID} with no session) are
# left as-is so the user can debug them.
_SKILL_TEMPLATE_RE = re.compile(r"\$\{(HERMES_SKILL_DIR|HERMES_SESSION_ID)\}")
# Matches inline shell snippets like: !`date +%Y-%m-%d`
# Non-greedy, single-line only — no newlines inside the backticks.
_INLINE_SHELL_RE = re.compile(r"!`([^`\n]+)`")
# Cap inline-shell output so a runaway command can't blow out the context.
_INLINE_SHELL_MAX_OUTPUT = 4000
def _load_skills_config() -> dict:
"""Load the ``skills`` section of config.yaml (best-effort)."""
try:
from hermes_cli.config import load_config
cfg = load_config() or {}
skills_cfg = cfg.get("skills")
if isinstance(skills_cfg, dict):
return skills_cfg
except Exception:
logger.debug("Could not read skills config", exc_info=True)
return {}
def _substitute_template_vars(
content: str,
skill_dir: Path | None,
session_id: str | None,
) -> str:
"""Replace ${HERMES_SKILL_DIR} / ${HERMES_SESSION_ID} in skill content.
Only substitutes tokens for which a concrete value is available
unresolved tokens are left in place so the author can spot them.
"""
if not content:
return content
skill_dir_str = str(skill_dir) if skill_dir else None
def _replace(match: re.Match) -> str:
token = match.group(1)
if token == "HERMES_SKILL_DIR" and skill_dir_str:
return skill_dir_str
if token == "HERMES_SESSION_ID" and session_id:
return str(session_id)
return match.group(0)
return _SKILL_TEMPLATE_RE.sub(_replace, content)
def _run_inline_shell(command: str, cwd: Path | None, timeout: int) -> str:
"""Execute a single inline-shell snippet and return its stdout (trimmed).
Failures return a short ``[inline-shell error: ...]`` marker instead of
raising, so one bad snippet can't wreck the whole skill message.
"""
try:
completed = subprocess.run(
["bash", "-c", command],
cwd=str(cwd) if cwd else None,
capture_output=True,
text=True,
timeout=max(1, int(timeout)),
check=False,
)
except subprocess.TimeoutExpired:
return f"[inline-shell timeout after {timeout}s: {command}]"
except FileNotFoundError:
return f"[inline-shell error: bash not found]"
except Exception as exc:
return f"[inline-shell error: {exc}]"
output = (completed.stdout or "").rstrip("\n")
if not output and completed.stderr:
output = completed.stderr.rstrip("\n")
if len(output) > _INLINE_SHELL_MAX_OUTPUT:
output = output[:_INLINE_SHELL_MAX_OUTPUT] + "…[truncated]"
return output
def _expand_inline_shell(
content: str,
skill_dir: Path | None,
timeout: int,
) -> str:
"""Replace every !`cmd` snippet in ``content`` with its stdout.
Runs each snippet with the skill directory as CWD so relative paths in
the snippet work the way the author expects.
"""
if "!`" not in content:
return content
def _replace(match: re.Match) -> str:
cmd = match.group(1).strip()
if not cmd:
return ""
return _run_inline_shell(cmd, skill_dir, timeout)
return _INLINE_SHELL_RE.sub(_replace, content)
def build_plan_path(
user_instruction: str = "",
@ -133,14 +238,36 @@ def _build_skill_message(
activation_note: str,
user_instruction: str = "",
runtime_note: str = "",
session_id: str | None = None,
) -> str:
"""Format a loaded skill into a user/system message payload."""
from tools.skills_tool import SKILLS_DIR
content = str(loaded_skill.get("content") or "")
# ── Template substitution and inline-shell expansion ──
# Done before anything else so downstream blocks (setup notes,
# supporting-file hints) see the expanded content.
skills_cfg = _load_skills_config()
if skills_cfg.get("template_vars", True):
content = _substitute_template_vars(content, skill_dir, session_id)
if skills_cfg.get("inline_shell", False):
timeout = int(skills_cfg.get("inline_shell_timeout", 10) or 10)
content = _expand_inline_shell(content, skill_dir, timeout)
parts = [activation_note, "", content.strip()]
# ── Inject the absolute skill directory so the agent can reference
# bundled scripts without an extra skill_view() round-trip. ──
if skill_dir:
parts.append("")
parts.append(f"[Skill directory: {skill_dir}]")
parts.append(
"Resolve any relative paths in this skill (e.g. `scripts/foo.js`, "
"`templates/config.yaml`) against that directory, then run them "
"with the terminal tool using the absolute path."
)
# ── Inject resolved skill config values ──
_inject_skill_config(loaded_skill, parts)
@ -188,11 +315,13 @@ def _build_skill_message(
# Skill is from an external dir — use the skill name instead
skill_view_target = skill_dir.name
parts.append("")
parts.append("[This skill has supporting files you can load with the skill_view tool:]")
parts.append("[This skill has supporting files:]")
for sf in supporting:
parts.append(f"- {sf}")
parts.append(f"- {sf} -> {skill_dir / sf}")
parts.append(
f'\nTo view any of these, use: skill_view(name="{skill_view_target}", file_path="<path>")'
f'\nLoad any of these with skill_view(name="{skill_view_target}", '
f'file_path="<path>"), or run scripts directly by absolute path '
f"(e.g. `node {skill_dir}/scripts/foo.js`)."
)
if user_instruction:
@ -332,6 +461,7 @@ def build_skill_invocation_message(
activation_note,
user_instruction=user_instruction,
runtime_note=runtime_note,
session_id=task_id,
)
@ -370,6 +500,7 @@ def build_preloaded_skills_prompt(
loaded_skill,
skill_dir,
activation_note,
session_id=task_id,
)
)
loaded_names.append(skill_name)

View file

@ -1,195 +0,0 @@
"""Helpers for optional cheap-vs-strong model routing."""
from __future__ import annotations
import os
import re
from typing import Any, Dict, Optional
from utils import is_truthy_value
_COMPLEX_KEYWORDS = {
"debug",
"debugging",
"implement",
"implementation",
"refactor",
"patch",
"traceback",
"stacktrace",
"exception",
"error",
"analyze",
"analysis",
"investigate",
"architecture",
"design",
"compare",
"benchmark",
"optimize",
"optimise",
"review",
"terminal",
"shell",
"tool",
"tools",
"pytest",
"test",
"tests",
"plan",
"planning",
"delegate",
"subagent",
"cron",
"docker",
"kubernetes",
}
_URL_RE = re.compile(r"https?://|www\.", re.IGNORECASE)
def _coerce_bool(value: Any, default: bool = False) -> bool:
return is_truthy_value(value, default=default)
def _coerce_int(value: Any, default: int) -> int:
try:
return int(value)
except (TypeError, ValueError):
return default
def choose_cheap_model_route(user_message: str, routing_config: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
"""Return the configured cheap-model route when a message looks simple.
Conservative by design: if the message has signs of code/tool/debugging/
long-form work, keep the primary model.
"""
cfg = routing_config or {}
if not _coerce_bool(cfg.get("enabled"), False):
return None
cheap_model = cfg.get("cheap_model") or {}
if not isinstance(cheap_model, dict):
return None
provider = str(cheap_model.get("provider") or "").strip().lower()
model = str(cheap_model.get("model") or "").strip()
if not provider or not model:
return None
text = (user_message or "").strip()
if not text:
return None
max_chars = _coerce_int(cfg.get("max_simple_chars"), 160)
max_words = _coerce_int(cfg.get("max_simple_words"), 28)
if len(text) > max_chars:
return None
if len(text.split()) > max_words:
return None
if text.count("\n") > 1:
return None
if "```" in text or "`" in text:
return None
if _URL_RE.search(text):
return None
lowered = text.lower()
words = {token.strip(".,:;!?()[]{}\"'`") for token in lowered.split()}
if words & _COMPLEX_KEYWORDS:
return None
route = dict(cheap_model)
route["provider"] = provider
route["model"] = model
route["routing_reason"] = "simple_turn"
return route
def resolve_turn_route(user_message: str, routing_config: Optional[Dict[str, Any]], primary: Dict[str, Any]) -> Dict[str, Any]:
"""Resolve the effective model/runtime for one turn.
Returns a dict with model/runtime/signature/label fields.
"""
route = choose_cheap_model_route(user_message, routing_config)
if not route:
return {
"model": primary.get("model"),
"runtime": {
"api_key": primary.get("api_key"),
"base_url": primary.get("base_url"),
"provider": primary.get("provider"),
"api_mode": primary.get("api_mode"),
"command": primary.get("command"),
"args": list(primary.get("args") or []),
"credential_pool": primary.get("credential_pool"),
},
"label": None,
"signature": (
primary.get("model"),
primary.get("provider"),
primary.get("base_url"),
primary.get("api_mode"),
primary.get("command"),
tuple(primary.get("args") or ()),
),
}
from hermes_cli.runtime_provider import resolve_runtime_provider
explicit_api_key = None
api_key_env = str(route.get("api_key_env") or "").strip()
if api_key_env:
explicit_api_key = os.getenv(api_key_env) or None
try:
runtime = resolve_runtime_provider(
requested=route.get("provider"),
explicit_api_key=explicit_api_key,
explicit_base_url=route.get("base_url"),
)
except Exception:
return {
"model": primary.get("model"),
"runtime": {
"api_key": primary.get("api_key"),
"base_url": primary.get("base_url"),
"provider": primary.get("provider"),
"api_mode": primary.get("api_mode"),
"command": primary.get("command"),
"args": list(primary.get("args") or []),
"credential_pool": primary.get("credential_pool"),
},
"label": None,
"signature": (
primary.get("model"),
primary.get("provider"),
primary.get("base_url"),
primary.get("api_mode"),
primary.get("command"),
tuple(primary.get("args") or ()),
),
}
return {
"model": route.get("model"),
"runtime": {
"api_key": runtime.get("api_key"),
"base_url": runtime.get("base_url"),
"provider": runtime.get("provider"),
"api_mode": runtime.get("api_mode"),
"command": runtime.get("command"),
"args": list(runtime.get("args") or []),
"credential_pool": runtime.get("credential_pool"),
},
"label": f"smart route → {route.get('model')} ({runtime.get('provider')})",
"signature": (
route.get("model"),
runtime.get("provider"),
runtime.get("base_url"),
runtime.get("api_mode"),
runtime.get("command"),
tuple(runtime.get("args") or ()),
),
}

View file

@ -0,0 +1,51 @@
"""Transport layer types and registry for provider response normalization.
Usage:
from agent.transports import get_transport
transport = get_transport("anthropic_messages")
result = transport.normalize_response(raw_response)
"""
from agent.transports.types import NormalizedResponse, ToolCall, Usage, build_tool_call, map_finish_reason # noqa: F401
_REGISTRY: dict = {}
def register_transport(api_mode: str, transport_cls: type) -> None:
"""Register a transport class for an api_mode string."""
_REGISTRY[api_mode] = transport_cls
def get_transport(api_mode: str):
"""Get a transport instance for the given api_mode.
Returns None if no transport is registered for this api_mode.
This allows gradual migration call sites can check for None
and fall back to the legacy code path.
"""
if not _REGISTRY:
_discover_transports()
cls = _REGISTRY.get(api_mode)
if cls is None:
return None
return cls()
def _discover_transports() -> None:
"""Import all transport modules to trigger auto-registration."""
try:
import agent.transports.anthropic # noqa: F401
except ImportError:
pass
try:
import agent.transports.codex # noqa: F401
except ImportError:
pass
try:
import agent.transports.chat_completions # noqa: F401
except ImportError:
pass
try:
import agent.transports.bedrock # noqa: F401
except ImportError:
pass

View file

@ -0,0 +1,129 @@
"""Anthropic Messages API transport.
Delegates to the existing adapter functions in agent/anthropic_adapter.py.
This transport owns format conversion and normalization NOT client lifecycle.
"""
from typing import Any, Dict, List, Optional
from agent.transports.base import ProviderTransport
from agent.transports.types import NormalizedResponse
class AnthropicTransport(ProviderTransport):
"""Transport for api_mode='anthropic_messages'.
Wraps the existing functions in anthropic_adapter.py behind the
ProviderTransport ABC. Each method delegates no logic is duplicated.
"""
@property
def api_mode(self) -> str:
return "anthropic_messages"
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
"""Convert OpenAI messages to Anthropic (system, messages) tuple.
kwargs:
base_url: Optional[str] affects thinking signature handling.
"""
from agent.anthropic_adapter import convert_messages_to_anthropic
base_url = kwargs.get("base_url")
return convert_messages_to_anthropic(messages, base_url=base_url)
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
"""Convert OpenAI tool schemas to Anthropic input_schema format."""
from agent.anthropic_adapter import convert_tools_to_anthropic
return convert_tools_to_anthropic(tools)
def build_kwargs(
self,
model: str,
messages: List[Dict[str, Any]],
tools: Optional[List[Dict[str, Any]]] = None,
**params,
) -> Dict[str, Any]:
"""Build Anthropic messages.create() kwargs.
Calls convert_messages and convert_tools internally.
params (all optional):
max_tokens: int
reasoning_config: dict | None
tool_choice: str | None
is_oauth: bool
preserve_dots: bool
context_length: int | None
base_url: str | None
fast_mode: bool
"""
from agent.anthropic_adapter import build_anthropic_kwargs
return build_anthropic_kwargs(
model=model,
messages=messages,
tools=tools,
max_tokens=params.get("max_tokens", 16384),
reasoning_config=params.get("reasoning_config"),
tool_choice=params.get("tool_choice"),
is_oauth=params.get("is_oauth", False),
preserve_dots=params.get("preserve_dots", False),
context_length=params.get("context_length"),
base_url=params.get("base_url"),
fast_mode=params.get("fast_mode", False),
)
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
"""Normalize Anthropic response to NormalizedResponse.
kwargs:
strip_tool_prefix: bool strip 'mcp_mcp_' prefixes from tool names.
"""
from agent.anthropic_adapter import normalize_anthropic_response_v2
strip_tool_prefix = kwargs.get("strip_tool_prefix", False)
return normalize_anthropic_response_v2(response, strip_tool_prefix=strip_tool_prefix)
def validate_response(self, response: Any) -> bool:
"""Check Anthropic response structure is valid."""
if response is None:
return False
content_blocks = getattr(response, "content", None)
if not isinstance(content_blocks, list):
return False
if not content_blocks:
return False
return True
def extract_cache_stats(self, response: Any) -> Optional[Dict[str, int]]:
"""Extract Anthropic cache_read and cache_creation token counts."""
usage = getattr(response, "usage", None)
if usage is None:
return None
cached = getattr(usage, "cache_read_input_tokens", 0) or 0
written = getattr(usage, "cache_creation_input_tokens", 0) or 0
if cached or written:
return {"cached_tokens": cached, "creation_tokens": written}
return None
# Promote the adapter's canonical mapping to module level so it's shared
_STOP_REASON_MAP = {
"end_turn": "stop",
"tool_use": "tool_calls",
"max_tokens": "length",
"stop_sequence": "stop",
"refusal": "content_filter",
"model_context_window_exceeded": "length",
}
def map_finish_reason(self, raw_reason: str) -> str:
"""Map Anthropic stop_reason to OpenAI finish_reason."""
return self._STOP_REASON_MAP.get(raw_reason, "stop")
# Auto-register on import
from agent.transports import register_transport # noqa: E402
register_transport("anthropic_messages", AnthropicTransport)

89
agent/transports/base.py Normal file
View file

@ -0,0 +1,89 @@
"""Abstract base for provider transports.
A transport owns the data path for one api_mode:
convert_messages convert_tools build_kwargs normalize_response
It does NOT own: client construction, streaming, credential refresh,
prompt caching, interrupt handling, or retry logic. Those stay on AIAgent.
"""
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from agent.transports.types import NormalizedResponse
class ProviderTransport(ABC):
"""Base class for provider-specific format conversion and normalization."""
@property
@abstractmethod
def api_mode(self) -> str:
"""The api_mode string this transport handles (e.g. 'anthropic_messages')."""
...
@abstractmethod
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
"""Convert OpenAI-format messages to provider-native format.
Returns provider-specific structure (e.g. (system, messages) for Anthropic,
or the messages list unchanged for chat_completions).
"""
...
@abstractmethod
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
"""Convert OpenAI-format tool definitions to provider-native format.
Returns provider-specific tool list (e.g. Anthropic input_schema format).
"""
...
@abstractmethod
def build_kwargs(
self,
model: str,
messages: List[Dict[str, Any]],
tools: Optional[List[Dict[str, Any]]] = None,
**params,
) -> Dict[str, Any]:
"""Build the complete API call kwargs dict.
This is the primary entry point it typically calls convert_messages()
and convert_tools() internally, then adds model-specific config.
Returns a dict ready to be passed to the provider's SDK client.
"""
...
@abstractmethod
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
"""Normalize a raw provider response to the shared NormalizedResponse type.
This is the only method that returns a transport-layer type.
"""
...
def validate_response(self, response: Any) -> bool:
"""Optional: check if the raw response is structurally valid.
Returns True if valid, False if the response should be treated as invalid.
Default implementation always returns True.
"""
return True
def extract_cache_stats(self, response: Any) -> Optional[Dict[str, int]]:
"""Optional: extract provider-specific cache hit/creation stats.
Returns dict with 'cached_tokens' and 'creation_tokens', or None.
Default returns None.
"""
return None
def map_finish_reason(self, raw_reason: str) -> str:
"""Optional: map provider-specific stop reason to OpenAI equivalent.
Default returns the raw reason unchanged. Override for providers
with different stop reason vocabularies.
"""
return raw_reason

154
agent/transports/bedrock.py Normal file
View file

@ -0,0 +1,154 @@
"""AWS Bedrock Converse API transport.
Delegates to the existing adapter functions in agent/bedrock_adapter.py.
Bedrock uses its own boto3 client (not the OpenAI SDK), so the transport
owns format conversion and normalization, while client construction and
boto3 calls stay on AIAgent.
"""
from typing import Any, Dict, List, Optional
from agent.transports.base import ProviderTransport
from agent.transports.types import NormalizedResponse, ToolCall, Usage
class BedrockTransport(ProviderTransport):
"""Transport for api_mode='bedrock_converse'."""
@property
def api_mode(self) -> str:
return "bedrock_converse"
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
"""Convert OpenAI messages to Bedrock Converse format."""
from agent.bedrock_adapter import convert_messages_to_converse
return convert_messages_to_converse(messages)
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
"""Convert OpenAI tool schemas to Bedrock Converse toolConfig."""
from agent.bedrock_adapter import convert_tools_to_converse
return convert_tools_to_converse(tools)
def build_kwargs(
self,
model: str,
messages: List[Dict[str, Any]],
tools: Optional[List[Dict[str, Any]]] = None,
**params,
) -> Dict[str, Any]:
"""Build Bedrock converse() kwargs.
Calls convert_messages and convert_tools internally.
params:
max_tokens: int output token limit (default 4096)
temperature: float | None
guardrail_config: dict | None Bedrock guardrails
region: str AWS region (default 'us-east-1')
"""
from agent.bedrock_adapter import build_converse_kwargs
region = params.get("region", "us-east-1")
guardrail = params.get("guardrail_config")
kwargs = build_converse_kwargs(
model=model,
messages=messages,
tools=tools,
max_tokens=params.get("max_tokens", 4096),
temperature=params.get("temperature"),
guardrail_config=guardrail,
)
# Sentinel keys for dispatch — agent pops these before the boto3 call
kwargs["__bedrock_converse__"] = True
kwargs["__bedrock_region__"] = region
return kwargs
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
"""Normalize Bedrock response to NormalizedResponse.
Handles two shapes:
1. Raw boto3 dict (from direct converse() calls)
2. Already-normalized SimpleNamespace with .choices (from dispatch site)
"""
from agent.bedrock_adapter import normalize_converse_response
# Normalize to OpenAI-compatible SimpleNamespace
if hasattr(response, "choices") and response.choices:
# Already normalized at dispatch site
ns = response
else:
# Raw boto3 dict
ns = normalize_converse_response(response)
choice = ns.choices[0]
msg = choice.message
finish_reason = choice.finish_reason or "stop"
tool_calls = None
if msg.tool_calls:
tool_calls = [
ToolCall(
id=tc.id,
name=tc.function.name,
arguments=tc.function.arguments,
)
for tc in msg.tool_calls
]
usage = None
if hasattr(ns, "usage") and ns.usage:
u = ns.usage
usage = Usage(
prompt_tokens=getattr(u, "prompt_tokens", 0) or 0,
completion_tokens=getattr(u, "completion_tokens", 0) or 0,
total_tokens=getattr(u, "total_tokens", 0) or 0,
)
reasoning = getattr(msg, "reasoning", None) or getattr(msg, "reasoning_content", None)
return NormalizedResponse(
content=msg.content,
tool_calls=tool_calls,
finish_reason=finish_reason,
reasoning=reasoning,
usage=usage,
)
def validate_response(self, response: Any) -> bool:
"""Check Bedrock response structure.
After normalize_converse_response, the response has OpenAI-compatible
.choices same check as chat_completions.
"""
if response is None:
return False
# Raw Bedrock dict response — check for 'output' key
if isinstance(response, dict):
return "output" in response
# Already-normalized SimpleNamespace
if hasattr(response, "choices"):
return bool(response.choices)
return False
def map_finish_reason(self, raw_reason: str) -> str:
"""Map Bedrock stop reason to OpenAI finish_reason.
The adapter already does this mapping inside normalize_converse_response,
so this is only used for direct access to raw responses.
"""
_MAP = {
"end_turn": "stop",
"tool_use": "tool_calls",
"max_tokens": "length",
"stop_sequence": "stop",
"guardrail_intervened": "content_filter",
"content_filtered": "content_filter",
}
return _MAP.get(raw_reason, "stop")
# Auto-register on import
from agent.transports import register_transport # noqa: E402
register_transport("bedrock_converse", BedrockTransport)

View file

@ -0,0 +1,387 @@
"""OpenAI Chat Completions transport.
Handles the default api_mode ('chat_completions') used by ~16 OpenAI-compatible
providers (OpenRouter, Nous, NVIDIA, Qwen, Ollama, DeepSeek, xAI, Kimi, etc.).
Messages and tools are already in OpenAI format convert_messages and
convert_tools are near-identity. The complexity lives in build_kwargs
which has provider-specific conditionals for max_tokens defaults,
reasoning configuration, temperature handling, and extra_body assembly.
"""
import copy
from typing import Any, Dict, List, Optional
from agent.prompt_builder import DEVELOPER_ROLE_MODELS
from agent.transports.base import ProviderTransport
from agent.transports.types import NormalizedResponse, ToolCall, Usage
class ChatCompletionsTransport(ProviderTransport):
"""Transport for api_mode='chat_completions'.
The default path for OpenAI-compatible providers.
"""
@property
def api_mode(self) -> str:
return "chat_completions"
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> List[Dict[str, Any]]:
"""Messages are already in OpenAI format — sanitize Codex leaks only.
Strips Codex Responses API fields (``codex_reasoning_items`` on the
message, ``call_id``/``response_item_id`` on tool_calls) that strict
chat-completions providers reject with 400/422.
"""
needs_sanitize = False
for msg in messages:
if not isinstance(msg, dict):
continue
if "codex_reasoning_items" in msg:
needs_sanitize = True
break
tool_calls = msg.get("tool_calls")
if isinstance(tool_calls, list):
for tc in tool_calls:
if isinstance(tc, dict) and ("call_id" in tc or "response_item_id" in tc):
needs_sanitize = True
break
if needs_sanitize:
break
if not needs_sanitize:
return messages
sanitized = copy.deepcopy(messages)
for msg in sanitized:
if not isinstance(msg, dict):
continue
msg.pop("codex_reasoning_items", None)
tool_calls = msg.get("tool_calls")
if isinstance(tool_calls, list):
for tc in tool_calls:
if isinstance(tc, dict):
tc.pop("call_id", None)
tc.pop("response_item_id", None)
return sanitized
def convert_tools(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Tools are already in OpenAI format — identity."""
return tools
def build_kwargs(
self,
model: str,
messages: List[Dict[str, Any]],
tools: Optional[List[Dict[str, Any]]] = None,
**params,
) -> Dict[str, Any]:
"""Build chat.completions.create() kwargs.
This is the most complex transport method it handles ~16 providers
via params rather than subclasses.
params:
timeout: float API call timeout
max_tokens: int | None user-configured max tokens
ephemeral_max_output_tokens: int | None one-shot override (error recovery)
max_tokens_param_fn: callable returns {max_tokens: N} or {max_completion_tokens: N}
reasoning_config: dict | None
request_overrides: dict | None
session_id: str | None
qwen_session_metadata: dict | None {sessionId, promptId} precomputed
model_lower: str lowercase model name for pattern matching
# Provider detection flags (all optional, default False)
is_openrouter: bool
is_nous: bool
is_qwen_portal: bool
is_github_models: bool
is_nvidia_nim: bool
is_kimi: bool
is_custom_provider: bool
ollama_num_ctx: int | None
# Provider routing
provider_preferences: dict | None
# Qwen-specific
qwen_prepare_fn: callable | None runs AFTER codex sanitization
qwen_prepare_inplace_fn: callable | None in-place variant for deepcopied lists
# Temperature
fixed_temperature: Any from _fixed_temperature_for_model()
omit_temperature: bool
# Reasoning
supports_reasoning: bool
github_reasoning_extra: dict | None
# Claude on OpenRouter/Nous max output
anthropic_max_output: int | None
# Extra
extra_body_additions: dict | None pre-built extra_body entries
"""
# Codex sanitization: drop reasoning_items / call_id / response_item_id
sanitized = self.convert_messages(messages)
# Qwen portal prep AFTER codex sanitization. If sanitize already
# deepcopied, reuse that copy via the in-place variant to avoid a
# second deepcopy.
is_qwen = params.get("is_qwen_portal", False)
if is_qwen:
qwen_prep = params.get("qwen_prepare_fn")
qwen_prep_inplace = params.get("qwen_prepare_inplace_fn")
if sanitized is messages:
if qwen_prep is not None:
sanitized = qwen_prep(sanitized)
else:
# Already deepcopied — transform in place
if qwen_prep_inplace is not None:
qwen_prep_inplace(sanitized)
elif qwen_prep is not None:
sanitized = qwen_prep(sanitized)
# Developer role swap for GPT-5/Codex models
model_lower = params.get("model_lower", (model or "").lower())
if (
sanitized
and isinstance(sanitized[0], dict)
and sanitized[0].get("role") == "system"
and any(p in model_lower for p in DEVELOPER_ROLE_MODELS)
):
sanitized = list(sanitized)
sanitized[0] = {**sanitized[0], "role": "developer"}
api_kwargs: Dict[str, Any] = {
"model": model,
"messages": sanitized,
}
timeout = params.get("timeout")
if timeout is not None:
api_kwargs["timeout"] = timeout
# Temperature
fixed_temp = params.get("fixed_temperature")
omit_temp = params.get("omit_temperature", False)
if omit_temp:
api_kwargs.pop("temperature", None)
elif fixed_temp is not None:
api_kwargs["temperature"] = fixed_temp
# Qwen metadata (caller precomputes {sessionId, promptId})
qwen_meta = params.get("qwen_session_metadata")
if qwen_meta and is_qwen:
api_kwargs["metadata"] = qwen_meta
# Tools
if tools:
api_kwargs["tools"] = tools
# max_tokens resolution — priority: ephemeral > user > provider default
max_tokens_fn = params.get("max_tokens_param_fn")
ephemeral = params.get("ephemeral_max_output_tokens")
max_tokens = params.get("max_tokens")
anthropic_max_out = params.get("anthropic_max_output")
is_nvidia_nim = params.get("is_nvidia_nim", False)
is_kimi = params.get("is_kimi", False)
reasoning_config = params.get("reasoning_config")
if ephemeral is not None and max_tokens_fn:
api_kwargs.update(max_tokens_fn(ephemeral))
elif max_tokens is not None and max_tokens_fn:
api_kwargs.update(max_tokens_fn(max_tokens))
elif is_nvidia_nim and max_tokens_fn:
api_kwargs.update(max_tokens_fn(16384))
elif is_qwen and max_tokens_fn:
api_kwargs.update(max_tokens_fn(65536))
elif is_kimi and max_tokens_fn:
# Kimi/Moonshot: 32000 matches Kimi CLI's default
api_kwargs.update(max_tokens_fn(32000))
elif anthropic_max_out is not None:
api_kwargs["max_tokens"] = anthropic_max_out
# Kimi: top-level reasoning_effort (unless thinking disabled)
if is_kimi:
_kimi_thinking_off = bool(
reasoning_config
and isinstance(reasoning_config, dict)
and reasoning_config.get("enabled") is False
)
if not _kimi_thinking_off:
_kimi_effort = "medium"
if reasoning_config and isinstance(reasoning_config, dict):
_e = (reasoning_config.get("effort") or "").strip().lower()
if _e in ("low", "medium", "high"):
_kimi_effort = _e
api_kwargs["reasoning_effort"] = _kimi_effort
# extra_body assembly
extra_body: Dict[str, Any] = {}
is_openrouter = params.get("is_openrouter", False)
is_nous = params.get("is_nous", False)
is_github_models = params.get("is_github_models", False)
provider_prefs = params.get("provider_preferences")
if provider_prefs and is_openrouter:
extra_body["provider"] = provider_prefs
# Kimi extra_body.thinking
if is_kimi:
_kimi_thinking_enabled = True
if reasoning_config and isinstance(reasoning_config, dict):
if reasoning_config.get("enabled") is False:
_kimi_thinking_enabled = False
extra_body["thinking"] = {
"type": "enabled" if _kimi_thinking_enabled else "disabled",
}
# Reasoning
if params.get("supports_reasoning", False):
if is_github_models:
gh_reasoning = params.get("github_reasoning_extra")
if gh_reasoning is not None:
extra_body["reasoning"] = gh_reasoning
else:
if reasoning_config is not None:
rc = dict(reasoning_config)
if is_nous and rc.get("enabled") is False:
pass # omit for Nous when disabled
else:
extra_body["reasoning"] = rc
else:
extra_body["reasoning"] = {"enabled": True, "effort": "medium"}
if is_nous:
extra_body["tags"] = ["product=hermes-agent"]
# Ollama num_ctx
ollama_ctx = params.get("ollama_num_ctx")
if ollama_ctx:
options = extra_body.get("options", {})
options["num_ctx"] = ollama_ctx
extra_body["options"] = options
# Ollama/custom think=false
if params.get("is_custom_provider", False):
if reasoning_config and isinstance(reasoning_config, dict):
_effort = (reasoning_config.get("effort") or "").strip().lower()
_enabled = reasoning_config.get("enabled", True)
if _effort == "none" or _enabled is False:
extra_body["think"] = False
if is_qwen:
extra_body["vl_high_resolution_images"] = True
# Merge any pre-built extra_body additions
additions = params.get("extra_body_additions")
if additions:
extra_body.update(additions)
if extra_body:
api_kwargs["extra_body"] = extra_body
# Request overrides last (service_tier etc.)
overrides = params.get("request_overrides")
if overrides:
api_kwargs.update(overrides)
return api_kwargs
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
"""Normalize OpenAI ChatCompletion to NormalizedResponse.
For chat_completions, this is near-identity the response is already
in OpenAI format. extra_content on tool_calls (Gemini thought_signature)
is preserved via ToolCall.provider_data. reasoning_details (OpenRouter
unified format) and reasoning_content (DeepSeek/Moonshot) are also
preserved for downstream replay.
"""
choice = response.choices[0]
msg = choice.message
finish_reason = choice.finish_reason or "stop"
tool_calls = None
if msg.tool_calls:
tool_calls = []
for tc in msg.tool_calls:
# Preserve provider-specific extras on the tool call.
# Gemini 3 thinking models attach extra_content with
# thought_signature — without replay on the next turn the API
# rejects the request with 400.
tc_provider_data: Dict[str, Any] = {}
extra = getattr(tc, "extra_content", None)
if extra is None and hasattr(tc, "model_extra"):
extra = (tc.model_extra or {}).get("extra_content")
if extra is not None:
if hasattr(extra, "model_dump"):
try:
extra = extra.model_dump()
except Exception:
pass
tc_provider_data["extra_content"] = extra
tool_calls.append(ToolCall(
id=tc.id,
name=tc.function.name,
arguments=tc.function.arguments,
provider_data=tc_provider_data or None,
))
usage = None
if hasattr(response, "usage") and response.usage:
u = response.usage
usage = Usage(
prompt_tokens=getattr(u, "prompt_tokens", 0) or 0,
completion_tokens=getattr(u, "completion_tokens", 0) or 0,
total_tokens=getattr(u, "total_tokens", 0) or 0,
)
# Preserve reasoning fields separately. DeepSeek/Moonshot use
# ``reasoning_content``; others use ``reasoning``. Downstream code
# (_extract_reasoning, thinking-prefill retry) reads both distinctly,
# so keep them apart in provider_data rather than merging.
reasoning = getattr(msg, "reasoning", None)
reasoning_content = getattr(msg, "reasoning_content", None)
provider_data: Dict[str, Any] = {}
if reasoning_content:
provider_data["reasoning_content"] = reasoning_content
rd = getattr(msg, "reasoning_details", None)
if rd:
provider_data["reasoning_details"] = rd
return NormalizedResponse(
content=msg.content,
tool_calls=tool_calls,
finish_reason=finish_reason,
reasoning=reasoning,
usage=usage,
provider_data=provider_data or None,
)
def validate_response(self, response: Any) -> bool:
"""Check that response has valid choices."""
if response is None:
return False
if not hasattr(response, "choices") or response.choices is None:
return False
if not response.choices:
return False
return True
def extract_cache_stats(self, response: Any) -> Optional[Dict[str, int]]:
"""Extract OpenRouter/OpenAI cache stats from prompt_tokens_details."""
usage = getattr(response, "usage", None)
if usage is None:
return None
details = getattr(usage, "prompt_tokens_details", None)
if details is None:
return None
cached = getattr(details, "cached_tokens", 0) or 0
written = getattr(details, "cache_write_tokens", 0) or 0
if cached or written:
return {"cached_tokens": cached, "creation_tokens": written}
return None
# Auto-register on import
from agent.transports import register_transport # noqa: E402
register_transport("chat_completions", ChatCompletionsTransport)

217
agent/transports/codex.py Normal file
View file

@ -0,0 +1,217 @@
"""OpenAI Responses API (Codex) transport.
Delegates to the existing adapter functions in agent/codex_responses_adapter.py.
This transport owns format conversion and normalization NOT client lifecycle,
streaming, or the _run_codex_stream() call path.
"""
from typing import Any, Dict, List, Optional
from agent.transports.base import ProviderTransport
from agent.transports.types import NormalizedResponse, ToolCall, Usage
class ResponsesApiTransport(ProviderTransport):
"""Transport for api_mode='codex_responses'.
Wraps the functions extracted into codex_responses_adapter.py (PR 1).
"""
@property
def api_mode(self) -> str:
return "codex_responses"
def convert_messages(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
"""Convert OpenAI chat messages to Responses API input items."""
from agent.codex_responses_adapter import _chat_messages_to_responses_input
return _chat_messages_to_responses_input(messages)
def convert_tools(self, tools: List[Dict[str, Any]]) -> Any:
"""Convert OpenAI tool schemas to Responses API function definitions."""
from agent.codex_responses_adapter import _responses_tools
return _responses_tools(tools)
def build_kwargs(
self,
model: str,
messages: List[Dict[str, Any]],
tools: Optional[List[Dict[str, Any]]] = None,
**params,
) -> Dict[str, Any]:
"""Build Responses API kwargs.
Calls convert_messages and convert_tools internally.
params:
instructions: str system prompt (extracted from messages[0] if not given)
reasoning_config: dict | None {effort, enabled}
session_id: str | None used for prompt_cache_key + xAI conv header
max_tokens: int | None max_output_tokens
request_overrides: dict | None extra kwargs merged in
provider: str | None provider name for backend-specific logic
base_url: str | None endpoint URL
base_url_hostname: str | None hostname for backend detection
is_github_responses: bool Copilot/GitHub models backend
is_codex_backend: bool chatgpt.com/backend-api/codex
is_xai_responses: bool xAI/Grok backend
github_reasoning_extra: dict | None Copilot reasoning params
"""
from agent.codex_responses_adapter import (
_chat_messages_to_responses_input,
_responses_tools,
)
from run_agent import DEFAULT_AGENT_IDENTITY
instructions = params.get("instructions", "")
payload_messages = messages
if not instructions:
if messages and messages[0].get("role") == "system":
instructions = str(messages[0].get("content") or "").strip()
payload_messages = messages[1:]
if not instructions:
instructions = DEFAULT_AGENT_IDENTITY
is_github_responses = params.get("is_github_responses", False)
is_codex_backend = params.get("is_codex_backend", False)
is_xai_responses = params.get("is_xai_responses", False)
# Resolve reasoning effort
reasoning_effort = "medium"
reasoning_enabled = True
reasoning_config = params.get("reasoning_config")
if reasoning_config and isinstance(reasoning_config, dict):
if reasoning_config.get("enabled") is False:
reasoning_enabled = False
elif reasoning_config.get("effort"):
reasoning_effort = reasoning_config["effort"]
_effort_clamp = {"minimal": "low"}
reasoning_effort = _effort_clamp.get(reasoning_effort, reasoning_effort)
kwargs = {
"model": model,
"instructions": instructions,
"input": _chat_messages_to_responses_input(payload_messages),
"tools": _responses_tools(tools),
"tool_choice": "auto",
"parallel_tool_calls": True,
"store": False,
}
session_id = params.get("session_id")
if not is_github_responses and session_id:
kwargs["prompt_cache_key"] = session_id
if reasoning_enabled and is_xai_responses:
kwargs["include"] = ["reasoning.encrypted_content"]
elif reasoning_enabled:
if is_github_responses:
github_reasoning = params.get("github_reasoning_extra")
if github_reasoning is not None:
kwargs["reasoning"] = github_reasoning
else:
kwargs["reasoning"] = {"effort": reasoning_effort, "summary": "auto"}
kwargs["include"] = ["reasoning.encrypted_content"]
elif not is_github_responses and not is_xai_responses:
kwargs["include"] = []
request_overrides = params.get("request_overrides")
if request_overrides:
kwargs.update(request_overrides)
max_tokens = params.get("max_tokens")
if max_tokens is not None and not is_codex_backend:
kwargs["max_output_tokens"] = max_tokens
if is_xai_responses and session_id:
kwargs["extra_headers"] = {"x-grok-conv-id": session_id}
return kwargs
def normalize_response(self, response: Any, **kwargs) -> NormalizedResponse:
"""Normalize Codex Responses API response to NormalizedResponse."""
from agent.codex_responses_adapter import (
_normalize_codex_response,
_extract_responses_message_text,
_extract_responses_reasoning_text,
)
# _normalize_codex_response returns (SimpleNamespace, finish_reason_str)
msg, finish_reason = _normalize_codex_response(response)
tool_calls = None
if msg and msg.tool_calls:
tool_calls = []
for tc in msg.tool_calls:
provider_data = {}
if hasattr(tc, "call_id") and tc.call_id:
provider_data["call_id"] = tc.call_id
if hasattr(tc, "response_item_id") and tc.response_item_id:
provider_data["response_item_id"] = tc.response_item_id
tool_calls.append(ToolCall(
id=tc.id if hasattr(tc, "id") else (tc.function.name if hasattr(tc, "function") else None),
name=tc.function.name if hasattr(tc, "function") else getattr(tc, "name", ""),
arguments=tc.function.arguments if hasattr(tc, "function") else getattr(tc, "arguments", "{}"),
provider_data=provider_data or None,
))
# Extract reasoning items for provider_data
provider_data = {}
if msg and hasattr(msg, "codex_reasoning_items") and msg.codex_reasoning_items:
provider_data["codex_reasoning_items"] = msg.codex_reasoning_items
if msg and hasattr(msg, "reasoning_details") and msg.reasoning_details:
provider_data["reasoning_details"] = msg.reasoning_details
return NormalizedResponse(
content=msg.content if msg else None,
tool_calls=tool_calls,
finish_reason=finish_reason or "stop",
reasoning=msg.reasoning if msg and hasattr(msg, "reasoning") else None,
usage=None, # Codex usage is extracted separately in normalize_usage()
provider_data=provider_data or None,
)
def validate_response(self, response: Any) -> bool:
"""Check Codex Responses API response has valid output structure.
Returns True only if response.output is a non-empty list.
Does NOT check output_text fallback the caller handles that
with diagnostic logging for stream backfill recovery.
"""
if response is None:
return False
output = getattr(response, "output", None)
if not isinstance(output, list) or not output:
return False
return True
def preflight_kwargs(self, api_kwargs: Any, *, allow_stream: bool = False) -> dict:
"""Validate and sanitize Codex API kwargs before the call.
Normalizes input items, strips unsupported fields, validates structure.
"""
from agent.codex_responses_adapter import _preflight_codex_api_kwargs
return _preflight_codex_api_kwargs(api_kwargs, allow_stream=allow_stream)
def map_finish_reason(self, raw_reason: str) -> str:
"""Map Codex response.status to OpenAI finish_reason.
Codex uses response.status ('completed', 'incomplete') +
response.incomplete_details.reason for granular mapping.
This method handles the simple status string; the caller
should check incomplete_details separately for 'max_output_tokens'.
"""
_MAP = {
"completed": "stop",
"incomplete": "length",
"failed": "stop",
"cancelled": "stop",
}
return _MAP.get(raw_reason, "stop")
# Auto-register on import
from agent.transports import register_transport # noqa: E402
register_transport("codex_responses", ResponsesApiTransport)

100
agent/transports/types.py Normal file
View file

@ -0,0 +1,100 @@
"""Shared types for normalized provider responses.
These dataclasses define the canonical shape that all provider adapters
normalize responses to. The shared surface is intentionally minimal
only fields that every downstream consumer reads are top-level.
Protocol-specific state goes in ``provider_data`` dicts (response-level
and per-tool-call) so that protocol-aware code paths can access it
without polluting the shared type.
"""
from __future__ import annotations
import json
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
@dataclass
class ToolCall:
"""A normalized tool call from any provider.
``id`` is the protocol's canonical identifier — what gets used in
``tool_call_id`` / ``tool_use_id`` when constructing tool result
messages. May be ``None`` when the provider omits it; the agent
fills it via ``_deterministic_call_id()`` before storing in history.
``provider_data`` carries per-tool-call protocol metadata that only
protocol-aware code reads:
* Codex: ``{"call_id": "call_XXX", "response_item_id": "fc_XXX"}``
* Gemini: ``{"extra_content": {"google": {"thought_signature": "..."}}}``
* Others: ``None``
"""
id: Optional[str]
name: str
arguments: str # JSON string
provider_data: Optional[Dict[str, Any]] = field(default=None, repr=False)
@dataclass
class Usage:
"""Token usage from an API response."""
prompt_tokens: int = 0
completion_tokens: int = 0
total_tokens: int = 0
cached_tokens: int = 0
@dataclass
class NormalizedResponse:
"""Normalized API response from any provider.
Shared fields are truly cross-provider every caller can rely on
them without branching on api_mode. Protocol-specific state goes in
``provider_data`` so that only protocol-aware code paths read it.
Response-level ``provider_data`` examples:
* Anthropic: ``{"reasoning_details": [...]}``
* Codex: ``{"codex_reasoning_items": [...]}``
* Others: ``None``
"""
content: Optional[str]
tool_calls: Optional[List[ToolCall]]
finish_reason: str # "stop", "tool_calls", "length", "content_filter"
reasoning: Optional[str] = None
usage: Optional[Usage] = None
provider_data: Optional[Dict[str, Any]] = field(default=None, repr=False)
# ---------------------------------------------------------------------------
# Factory helpers
# ---------------------------------------------------------------------------
def build_tool_call(
id: Optional[str],
name: str,
arguments: Any,
**provider_fields: Any,
) -> ToolCall:
"""Build a ``ToolCall``, auto-serialising *arguments* if it's a dict.
Any extra keyword arguments are collected into ``provider_data``.
"""
args_str = json.dumps(arguments) if isinstance(arguments, dict) else str(arguments)
pd = dict(provider_fields) if provider_fields else None
return ToolCall(id=id, name=name, arguments=args_str, provider_data=pd)
def map_finish_reason(reason: Optional[str], mapping: Dict[str, str]) -> str:
"""Translate a provider-specific stop reason to the normalised set.
Falls back to ``"stop"`` for unknown or ``None`` reasons.
"""
if reason is None:
return "stop"
return mapping.get(reason, "stop")

View file

@ -6,6 +6,7 @@ from decimal import Decimal
from typing import Any, Dict, Literal, Optional
from agent.model_metadata import fetch_endpoint_model_metadata, fetch_model_metadata
from utils import base_url_host_matches
DEFAULT_PRICING = {"input": 0.0, "output": 0.0}
@ -393,7 +394,7 @@ def resolve_billing_route(
if provider_name == "openai-codex":
return BillingRoute(provider="openai-codex", model=model, base_url=base_url or "", billing_mode="subscription_included")
if provider_name == "openrouter" or "openrouter.ai" in base:
if provider_name == "openrouter" or base_url_host_matches(base_url or "", "openrouter.ai"):
return BillingRoute(provider="openrouter", model=model, base_url=base_url or "", billing_mode="official_models_api")
if provider_name == "anthropic":
return BillingRoute(provider="anthropic", model=model.split("/")[-1], base_url=base_url or "", billing_mode="official_docs_snapshot")

View file

@ -444,6 +444,7 @@ def _process_batch_worker(args: Tuple) -> Dict[str, Any]:
if not reasoning.get("has_any_reasoning", True):
print(f" 🚫 Prompt {prompt_index} discarded (no reasoning in any turn)")
discarded_no_reasoning += 1
completed_in_batch.append(prompt_index)
continue
# Get and normalize tool stats for consistent schema across all entries
@ -1189,12 +1190,12 @@ def main(
"""
# Handle list distributions
if list_distributions:
from toolset_distributions import list_distributions as get_all_dists, print_distribution_info
from toolset_distributions import print_distribution_info
print("📊 Available Toolset Distributions")
print("=" * 70)
all_dists = get_all_dists()
all_dists = list_distributions()
for dist_name in sorted(all_dists.keys()):
print_distribution_info(dist_name)

View file

@ -63,7 +63,38 @@ model:
# Leave unset to use the model's native output ceiling (recommended).
# Set only if you want to deliberately limit individual response length.
#
# max_tokens: 8192
# max_tokens: 8192
# Named provider overrides (optional)
# Use this for per-provider request timeouts, non-stream stale timeouts,
# and per-model exceptions.
# Applies to the primary turn client on every api_mode (OpenAI-wire, native
# Anthropic, and Anthropic-compatible providers), the fallback chain, and
# client rebuilds during credential rotation. For OpenAI-wire chat
# completions (streaming and non-streaming) the configured value is also
# used as the per-request ``timeout=`` kwarg so it wins over the legacy
# HERMES_API_TIMEOUT env var (which still applies when no config is set).
# ``stale_timeout_seconds`` controls the non-streaming stale-call detector and
# wins over the legacy HERMES_API_CALL_STALE_TIMEOUT env var. Leaving these
# unset keeps the legacy defaults (HERMES_API_TIMEOUT=1800s,
# HERMES_API_CALL_STALE_TIMEOUT=300s, native Anthropic 900s).
#
# Not currently wired for AWS Bedrock (bedrock_converse + AnthropicBedrock
# SDK paths) — those use boto3 with its own timeout configuration.
#
# providers:
# ollama-local:
# request_timeout_seconds: 300 # Longer timeout for local cold-starts
# stale_timeout_seconds: 900 # Explicitly re-enable stale detection on local endpoints
# anthropic:
# request_timeout_seconds: 30 # Fast-fail cloud requests
# models:
# claude-opus-4.6:
# timeout_seconds: 600 # Longer timeout for extended-thinking Opus calls
# openai-codex:
# models:
# gpt-5.4:
# stale_timeout_seconds: 1800 # Longer non-stream stale timeout for slow large-context turns
# =============================================================================
# OpenRouter Provider Routing (only applies when using OpenRouter)
@ -91,20 +122,6 @@ model:
# # Data policy: "allow" (default) or "deny" to exclude providers that may store data
# # data_collection: "deny"
# =============================================================================
# Smart Model Routing (optional)
# =============================================================================
# Use a cheaper model for short/simple turns while keeping your main model for
# more complex requests. Disabled by default.
#
# smart_model_routing:
# enabled: true
# max_simple_chars: 160
# max_simple_words: 28
# cheap_model:
# provider: openrouter
# model: google/gemini-2.5-flash
# =============================================================================
# Git Worktree Isolation
# =============================================================================
@ -357,6 +374,18 @@ compression:
# web_extract:
# provider: "auto"
# model: ""
#
# # Session search — summarizes matching past sessions
# session_search:
# provider: "auto"
# model: ""
# timeout: 30
# max_concurrency: 3 # Limit parallel summaries to reduce request-burst 429s
# extra_body: {} # Provider-specific OpenAI-compatible request fields
# # Example for providers that support request-body
# # reasoning controls:
# # extra_body:
# # enable_thinking: false
# =============================================================================
# Persistent Memory
@ -741,10 +770,12 @@ code_execution:
# Subagent Delegation
# =============================================================================
# The delegate_task tool spawns child agents with isolated context.
# Supports single tasks and batch mode (up to 3 parallel).
# Supports single tasks and batch mode (default 3 parallel, configurable).
delegation:
max_iterations: 50 # Max tool-calling turns per child (default: 50)
default_toolsets: ["terminal", "file", "web"] # Default toolsets for subagents
# max_concurrent_children: 3 # Max parallel child agents (default: 3)
# max_spawn_depth: 1 # Tree depth cap (1-3, default: 1 = flat). Raise to 2 or 3 to allow orchestrator children to spawn their own workers.
# orchestrator_enabled: true # Kill switch for role="orchestrator" children (default: true).
# model: "google/gemini-3-flash-preview" # Override model for subagents (empty = inherit parent)
# provider: "openrouter" # Override provider for subagents (empty = inherit parent)
# # Resolves full credentials (base_url, api_key) automatically.
@ -888,3 +919,39 @@ display:
# # Names and usernames are NOT affected (user-chosen, publicly visible).
# # Routing/delivery still uses the original values internally.
# redact_pii: false
# =============================================================================
# Shell-script hooks
# =============================================================================
# Register shell scripts as plugin-hook callbacks. Each entry is executed as
# a subprocess (shell=False, shlex.split) with a JSON payload on stdin. On
# stdout the script may return JSON that either blocks the tool call or
# injects context into the next LLM call.
#
# Valid events (mirror hermes_cli.plugins.VALID_HOOKS):
# pre_tool_call, post_tool_call, pre_llm_call, post_llm_call,
# pre_api_request, post_api_request, on_session_start, on_session_end,
# on_session_finalize, on_session_reset, subagent_stop
#
# First-use consent: each (event, command) pair prompts once on a TTY, then
# is persisted to ~/.hermes/shell-hooks-allowlist.json. Non-interactive
# runs (gateway, cron) need --accept-hooks, HERMES_ACCEPT_HOOKS=1, or the
# hooks_auto_accept key below.
#
# See website/docs/user-guide/features/hooks.md for the full JSON wire
# protocol and worked examples.
#
# hooks:
# pre_tool_call:
# - matcher: "terminal"
# command: "~/.hermes/agent-hooks/block-rm-rf.sh"
# timeout: 10
# post_tool_call:
# - matcher: "write_file|patch"
# command: "~/.hermes/agent-hooks/auto-format.sh"
# pre_llm_call:
# - command: "~/.hermes/agent-hooks/inject-cwd-context.sh"
# subagent_stop:
# - command: "~/.hermes/agent-hooks/log-orchestration.sh"
#
# hooks_auto_accept: false

917
cli.py

File diff suppressed because it is too large Load diff

View file

@ -9,6 +9,7 @@ import copy
import json
import logging
import tempfile
import threading
import os
import re
import uuid
@ -34,6 +35,11 @@ except ImportError:
HERMES_DIR = get_hermes_home().resolve()
CRON_DIR = HERMES_DIR / "cron"
JOBS_FILE = CRON_DIR / "jobs.json"
# In-process lock protecting load_jobs→modify→save_jobs cycles.
# Required when tick() runs jobs in parallel threads — without this,
# concurrent mark_job_run / advance_next_run calls can clobber each other.
_jobs_file_lock = threading.Lock()
OUTPUT_DIR = CRON_DIR / "output"
ONESHOT_GRACE_SECONDS = 120
@ -594,43 +600,44 @@ def mark_job_run(job_id: str, success: bool, error: Optional[str] = None,
``delivery_error`` is tracked separately from the agent error a job
can succeed (agent produced output) but fail delivery (platform down).
"""
jobs = load_jobs()
for i, job in enumerate(jobs):
if job["id"] == job_id:
now = _hermes_now().isoformat()
job["last_run_at"] = now
job["last_status"] = "ok" if success else "error"
job["last_error"] = error if not success else None
# Track delivery failures separately — cleared on successful delivery
job["last_delivery_error"] = delivery_error
# Increment completed count
if job.get("repeat"):
job["repeat"]["completed"] = job["repeat"].get("completed", 0) + 1
with _jobs_file_lock:
jobs = load_jobs()
for i, job in enumerate(jobs):
if job["id"] == job_id:
now = _hermes_now().isoformat()
job["last_run_at"] = now
job["last_status"] = "ok" if success else "error"
job["last_error"] = error if not success else None
# Track delivery failures separately — cleared on successful delivery
job["last_delivery_error"] = delivery_error
# Check if we've hit the repeat limit
times = job["repeat"].get("times")
completed = job["repeat"]["completed"]
if times is not None and times > 0 and completed >= times:
# Remove the job (limit reached)
jobs.pop(i)
save_jobs(jobs)
return
# Compute next run
job["next_run_at"] = compute_next_run(job["schedule"], now)
# Increment completed count
if job.get("repeat"):
job["repeat"]["completed"] = job["repeat"].get("completed", 0) + 1
# Check if we've hit the repeat limit
times = job["repeat"].get("times")
completed = job["repeat"]["completed"]
if times is not None and times > 0 and completed >= times:
# Remove the job (limit reached)
jobs.pop(i)
save_jobs(jobs)
return
# Compute next run
job["next_run_at"] = compute_next_run(job["schedule"], now)
# If no next run (one-shot completed), disable
if job["next_run_at"] is None:
job["enabled"] = False
job["state"] = "completed"
elif job.get("state") != "paused":
job["state"] = "scheduled"
# If no next run (one-shot completed), disable
if job["next_run_at"] is None:
job["enabled"] = False
job["state"] = "completed"
elif job.get("state") != "paused":
job["state"] = "scheduled"
save_jobs(jobs)
return
save_jobs(jobs)
return
logger.warning("mark_job_run: job_id %s not found, skipping save", job_id)
logger.warning("mark_job_run: job_id %s not found, skipping save", job_id)
def advance_next_run(job_id: str) -> bool:
@ -645,20 +652,21 @@ def advance_next_run(job_id: str) -> bool:
Returns True if next_run_at was advanced, False otherwise.
"""
jobs = load_jobs()
for job in jobs:
if job["id"] == job_id:
kind = job.get("schedule", {}).get("kind")
if kind not in ("cron", "interval"):
with _jobs_file_lock:
jobs = load_jobs()
for job in jobs:
if job["id"] == job_id:
kind = job.get("schedule", {}).get("kind")
if kind not in ("cron", "interval"):
return False
now = _hermes_now().isoformat()
new_next = compute_next_run(job["schedule"], now)
if new_next and new_next != job.get("next_run_at"):
job["next_run_at"] = new_next
save_jobs(jobs)
return True
return False
now = _hermes_now().isoformat()
new_next = compute_next_run(job["schedule"], now)
if new_next and new_next != job.get("next_run_at"):
job["next_run_at"] = new_next
save_jobs(jobs)
return True
return False
return False
return False
def get_due_jobs() -> List[Dict[str, Any]]:

View file

@ -252,7 +252,11 @@ def _send_media_via_adapter(adapter, chat_id: str, media_files: list, metadata:
coro = adapter.send_document(chat_id=chat_id, file_path=media_path, metadata=metadata)
future = asyncio.run_coroutine_threadsafe(coro, loop)
result = future.result(timeout=30)
try:
result = future.result(timeout=30)
except TimeoutError:
future.cancel()
raise
if result and not getattr(result, "success", True):
logger.warning(
"Job '%s': media send failed for %s: %s",
@ -382,7 +386,11 @@ def _deliver_result(job: dict, content: str, adapters=None, loop=None) -> Option
runtime_adapter.send(chat_id, text_to_send, metadata=send_metadata),
loop,
)
send_result = future.result(timeout=60)
try:
send_result = future.result(timeout=60)
except TimeoutError:
future.cancel()
raise
if send_result and not getattr(send_result, "success", True):
err = getattr(send_result, "error", "unknown")
logger.warning(
@ -422,7 +430,6 @@ def _deliver_result(job: dict, content: str, adapters=None, loop=None) -> Option
# prevent "coroutine was never awaited" RuntimeWarning, then retry in a
# fresh thread that has no running loop.
coro.close()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
future = pool.submit(asyncio.run, _send_to_platform(platform, pconfig, chat_id, cleaned_delivery_content, thread_id=thread_id, media_files=media_files))
result = future.result(timeout=30)
@ -564,15 +571,53 @@ def _run_job_script(script_path: str) -> tuple[bool, str]:
return False, f"Script execution failed: {exc}"
def _build_job_prompt(job: dict) -> str:
"""Build the effective prompt for a cron job, optionally loading one or more skills first."""
def _parse_wake_gate(script_output: str) -> bool:
"""Parse the last non-empty stdout line of a cron job's pre-check script
as a wake gate.
The convention (ported from nanoclaw #1232): if the last stdout line is
JSON like ``{"wakeAgent": false}``, the agent is skipped entirely no
LLM run, no delivery. Any other output (non-JSON, missing flag, gate
absent, or ``wakeAgent: true``) means wake the agent normally.
Returns True if the agent should wake, False to skip.
"""
if not script_output:
return True
stripped_lines = [line for line in script_output.splitlines() if line.strip()]
if not stripped_lines:
return True
last_line = stripped_lines[-1].strip()
try:
gate = json.loads(last_line)
except (json.JSONDecodeError, ValueError):
return True
if not isinstance(gate, dict):
return True
return gate.get("wakeAgent", True) is not False
def _build_job_prompt(job: dict, prerun_script: Optional[tuple] = None) -> str:
"""Build the effective prompt for a cron job, optionally loading one or more skills first.
Args:
job: The cron job dict.
prerun_script: Optional ``(success, stdout)`` from a script that has
already been executed by the caller (e.g. for a wake-gate check).
When provided, the script is not re-executed and the cached
result is used for prompt injection. When omitted, the script
(if any) runs inline as before.
"""
prompt = job.get("prompt", "")
skills = job.get("skills")
# Run data-collection script if configured, inject output as context.
script_path = job.get("script")
if script_path:
success, script_output = _run_job_script(script_path)
if prerun_script is not None:
success, script_output = prerun_script
else:
success, script_output = _run_job_script(script_path)
if success:
if script_output:
prompt = (
@ -674,7 +719,30 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
job_id = job["id"]
job_name = job["name"]
prompt = _build_job_prompt(job)
# Wake-gate: if this job has a pre-check script, run it BEFORE building
# the prompt so a ``{"wakeAgent": false}`` response can short-circuit
# the whole agent run. We pass the result into _build_job_prompt so
# the script is only executed once.
prerun_script = None
script_path = job.get("script")
if script_path:
prerun_script = _run_job_script(script_path)
_ran_ok, _script_output = prerun_script
if _ran_ok and not _parse_wake_gate(_script_output):
logger.info(
"Job '%s' (ID: %s): wakeAgent=false, skipping agent run",
job_name, job_id,
)
silent_doc = (
f"# Cron Job: {job_name}\n\n"
f"**Job ID:** {job_id}\n"
f"**Run Time:** {_hermes_now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
"Script gate returned `wakeAgent=false` — agent skipped.\n"
)
return True, silent_doc, SILENT_MARKER, None
prompt = _build_job_prompt(job, prerun_script=prerun_script)
origin = _resolve_origin(job)
_cron_session_id = f"cron_{job_id}_{_hermes_now().strftime('%Y%m%d_%H%M%S')}"
@ -686,14 +754,17 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
# scheduler process — every job this process runs is a cron job.
os.environ["HERMES_CRON_SESSION"] = "1"
# Use ContextVars for per-job session/delivery state so parallel jobs
# don't clobber each other's targets (os.environ is process-global).
from gateway.session_context import set_session_vars, clear_session_vars, _VAR_MAP
_ctx_tokens = set_session_vars(
platform=origin["platform"] if origin else "",
chat_id=str(origin["chat_id"]) if origin else "",
chat_name=origin.get("chat_name", "") if origin else "",
)
try:
# Inject origin context so the agent's send_message tool knows the chat.
# Must be INSIDE the try block so the finally cleanup always runs.
if origin:
os.environ["HERMES_SESSION_PLATFORM"] = origin["platform"]
os.environ["HERMES_SESSION_CHAT_ID"] = str(origin["chat_id"])
if origin.get("chat_name"):
os.environ["HERMES_SESSION_CHAT_NAME"] = origin["chat_name"]
# Re-read .env and config.yaml fresh every run so provider/key
# changes take effect without a gateway restart.
from dotenv import load_dotenv
@ -704,10 +775,10 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
delivery_target = _resolve_delivery_target(job)
if delivery_target:
os.environ["HERMES_CRON_AUTO_DELIVER_PLATFORM"] = delivery_target["platform"]
os.environ["HERMES_CRON_AUTO_DELIVER_CHAT_ID"] = str(delivery_target["chat_id"])
_VAR_MAP["HERMES_CRON_AUTO_DELIVER_PLATFORM"].set(delivery_target["platform"])
_VAR_MAP["HERMES_CRON_AUTO_DELIVER_CHAT_ID"].set(str(delivery_target["chat_id"]))
if delivery_target.get("thread_id") is not None:
os.environ["HERMES_CRON_AUTO_DELIVER_THREAD_ID"] = str(delivery_target["thread_id"])
_VAR_MAP["HERMES_CRON_AUTO_DELIVER_THREAD_ID"].set(str(delivery_target["thread_id"]))
model = job.get("model") or os.getenv("HERMES_MODEL") or ""
@ -746,14 +817,13 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
prefill_messages = None
prefill_file = os.getenv("HERMES_PREFILL_MESSAGES_FILE", "") or _cfg.get("prefill_messages_file", "")
if prefill_file:
import json as _json
pfpath = Path(prefill_file).expanduser()
if not pfpath.is_absolute():
pfpath = _hermes_home / pfpath
if pfpath.exists():
try:
with open(pfpath, "r", encoding="utf-8") as _pf:
prefill_messages = _json.load(_pf)
prefill_messages = json.load(_pf)
if not isinstance(prefill_messages, list):
prefill_messages = None
except Exception as e:
@ -765,7 +835,6 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
# Provider routing
pr = _cfg.get("provider_routing", {})
smart_routing = _cfg.get("smart_model_routing", {}) or {}
from hermes_cli.runtime_provider import (
resolve_runtime_provider,
@ -782,24 +851,9 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
message = format_runtime_provider_error(exc)
raise RuntimeError(message) from exc
from agent.smart_model_routing import resolve_turn_route
turn_route = resolve_turn_route(
prompt,
smart_routing,
{
"model": model,
"api_key": runtime.get("api_key"),
"base_url": runtime.get("base_url"),
"provider": runtime.get("provider"),
"api_mode": runtime.get("api_mode"),
"command": runtime.get("command"),
"args": list(runtime.get("args") or []),
},
)
fallback_model = _cfg.get("fallback_providers") or _cfg.get("fallback_model") or None
credential_pool = None
runtime_provider = str(turn_route["runtime"].get("provider") or "").strip().lower()
runtime_provider = str(runtime.get("provider") or "").strip().lower()
if runtime_provider:
try:
from agent.credential_pool import load_pool
@ -816,13 +870,13 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
logger.debug("Job '%s': failed to load credential pool for %s: %s", job_id, runtime_provider, e)
agent = AIAgent(
model=turn_route["model"],
api_key=turn_route["runtime"].get("api_key"),
base_url=turn_route["runtime"].get("base_url"),
provider=turn_route["runtime"].get("provider"),
api_mode=turn_route["runtime"].get("api_mode"),
acp_command=turn_route["runtime"].get("command"),
acp_args=turn_route["runtime"].get("args"),
model=model,
api_key=runtime.get("api_key"),
base_url=runtime.get("base_url"),
provider=runtime.get("provider"),
api_mode=runtime.get("api_mode"),
acp_command=runtime.get("command"),
acp_args=runtime.get("args"),
max_iterations=max_iterations,
reasoning_config=reasoning_config,
prefill_messages=prefill_messages,
@ -967,16 +1021,8 @@ def run_job(job: dict) -> tuple[bool, str, str, Optional[str]]:
return False, output, "", error_msg
finally:
# Clean up injected env vars so they don't leak to other jobs
for key in (
"HERMES_SESSION_PLATFORM",
"HERMES_SESSION_CHAT_ID",
"HERMES_SESSION_CHAT_NAME",
"HERMES_CRON_AUTO_DELIVER_PLATFORM",
"HERMES_CRON_AUTO_DELIVER_CHAT_ID",
"HERMES_CRON_AUTO_DELIVER_THREAD_ID",
):
os.environ.pop(key, None)
# Clean up ContextVar session/delivery state for this job.
clear_session_vars(_ctx_tokens)
if _session_db:
try:
_session_db.end_session(_cron_session_id, "cron_complete")
@ -1029,15 +1075,41 @@ def tick(verbose: bool = True, adapters=None, loop=None) -> int:
if verbose:
logger.info("%s - %s job(s) due", _hermes_now().strftime('%H:%M:%S'), len(due_jobs))
executed = 0
# Advance next_run_at for all recurring jobs FIRST, under the file lock,
# before any execution begins. This preserves at-most-once semantics.
for job in due_jobs:
try:
# For recurring jobs (cron/interval), advance next_run_at to the
# next future occurrence BEFORE execution. This way, if the
# process crashes mid-run, the job won't re-fire on restart.
# One-shot jobs are left alone so they can retry on restart.
advance_next_run(job["id"])
advance_next_run(job["id"])
# Resolve max parallel workers: env var > config.yaml > unbounded.
# Set HERMES_CRON_MAX_PARALLEL=1 to restore old serial behaviour.
_max_workers: Optional[int] = None
try:
_env_par = os.getenv("HERMES_CRON_MAX_PARALLEL", "").strip()
if _env_par:
_max_workers = int(_env_par) or None
except (ValueError, TypeError):
logger.warning("Invalid HERMES_CRON_MAX_PARALLEL value; defaulting to unbounded")
if _max_workers is None:
try:
_ucfg = load_config() or {}
_cfg_par = (
_ucfg.get("cron", {}) if isinstance(_ucfg, dict) else {}
).get("max_parallel_jobs")
if _cfg_par is not None:
_max_workers = int(_cfg_par) or None
except Exception:
pass
if verbose:
logger.info(
"Running %d job(s) in parallel (max_workers=%s)",
len(due_jobs),
_max_workers if _max_workers else "unbounded",
)
def _process_job(job: dict) -> bool:
"""Run one due job end-to-end: execute, save, deliver, mark."""
try:
success, output, final_response, error = run_job(job)
output_file = save_job_output(job["id"], output)
@ -1069,13 +1141,23 @@ def tick(verbose: bool = True, adapters=None, loop=None) -> int:
error = "Agent completed but produced empty response (model error, timeout, or misconfiguration)"
mark_job_run(job["id"], success, error, delivery_error=delivery_error)
executed += 1
return True
except Exception as e:
logger.error("Error processing job %s: %s", job['id'], e)
mark_job_run(job["id"], False, str(e))
return False
return executed
# Run all due jobs concurrently, each in its own ContextVar copy
# so session/delivery state stays isolated per-thread.
with concurrent.futures.ThreadPoolExecutor(max_workers=_max_workers) as _tick_pool:
_futures = []
for job in due_jobs:
_ctx = contextvars.copy_context()
_futures.append(_tick_pool.submit(_ctx.run, _process_job, job))
_results = [f.result() for f in _futures]
return sum(_results)
finally:
if fcntl:
fcntl.flock(lock_fd, fcntl.LOCK_UN)

View file

@ -1,228 +0,0 @@
# Hermes Agent — ACP (Agent Client Protocol) Setup Guide
Hermes Agent supports the **Agent Client Protocol (ACP)**, allowing it to run as
a coding agent inside your editor. ACP lets your IDE send tasks to Hermes, and
Hermes responds with file edits, terminal commands, and explanations — all shown
natively in the editor UI.
---
## Prerequisites
- Hermes Agent installed and configured (`hermes setup` completed)
- An API key / provider set up in `~/.hermes/.env` or via `hermes login`
- Python 3.11+
Install the ACP extra:
```bash
pip install -e ".[acp]"
```
---
## VS Code Setup
### 1. Install the ACP Client extension
Open VS Code and install **ACP Client** from the marketplace:
- Press `Ctrl+Shift+X` (or `Cmd+Shift+X` on macOS)
- Search for **"ACP Client"**
- Click **Install**
Or install from the command line:
```bash
code --install-extension anysphere.acp-client
```
### 2. Configure settings.json
Open your VS Code settings (`Ctrl+,` → click the `{}` icon for JSON) and add:
```json
{
"acpClient.agents": [
{
"name": "hermes-agent",
"registryDir": "/path/to/hermes-agent/acp_registry"
}
]
}
```
Replace `/path/to/hermes-agent` with the actual path to your Hermes Agent
installation (e.g. `~/.hermes/hermes-agent`).
Alternatively, if `hermes` is on your PATH, the ACP Client can discover it
automatically via the registry directory.
### 3. Restart VS Code
After configuring, restart VS Code. You should see **Hermes Agent** appear in
the ACP agent picker in the chat/agent panel.
---
## Zed Setup
Zed has built-in ACP support.
### 1. Configure Zed settings
Open Zed settings (`Cmd+,` on macOS or `Ctrl+,` on Linux) and add to your
`settings.json`:
```json
{
"agent_servers": {
"hermes-agent": {
"type": "custom",
"command": "hermes",
"args": ["acp"],
},
},
}
```
### 2. Restart Zed
Hermes Agent will appear in the agent panel. Select it and start a conversation.
---
## JetBrains Setup (IntelliJ, PyCharm, WebStorm, etc.)
### 1. Install the ACP plugin
- Open **Settings****Plugins** → **Marketplace**
- Search for **"ACP"** or **"Agent Client Protocol"**
- Install and restart the IDE
### 2. Configure the agent
- Open **Settings****Tools** → **ACP Agents**
- Click **+** to add a new agent
- Set the registry directory to your `acp_registry/` folder:
`/path/to/hermes-agent/acp_registry`
- Click **OK**
### 3. Use the agent
Open the ACP panel (usually in the right sidebar) and select **Hermes Agent**.
---
## What You Will See
Once connected, your editor provides a native interface to Hermes Agent:
### Chat Panel
A conversational interface where you can describe tasks, ask questions, and
give instructions. Hermes responds with explanations and actions.
### File Diffs
When Hermes edits files, you see standard diffs in the editor. You can:
- **Accept** individual changes
- **Reject** changes you don't want
- **Review** the full diff before applying
### Terminal Commands
When Hermes needs to run shell commands (builds, tests, installs), the editor
shows them in an integrated terminal. Depending on your settings:
- Commands may run automatically
- Or you may be prompted to **approve** each command
### Approval Flow
For potentially destructive operations, the editor will prompt you for
approval before Hermes proceeds. This includes:
- File deletions
- Shell commands
- Git operations
---
## Configuration
Hermes Agent under ACP uses the **same configuration** as the CLI:
- **API keys / providers**: `~/.hermes/.env`
- **Agent config**: `~/.hermes/config.yaml`
- **Skills**: `~/.hermes/skills/`
- **Sessions**: `~/.hermes/state.db`
You can run `hermes setup` to configure providers, or edit `~/.hermes/.env`
directly.
### Changing the model
Edit `~/.hermes/config.yaml`:
```yaml
model: openrouter/nous/hermes-3-llama-3.1-70b
```
Or set the `HERMES_MODEL` environment variable.
### Toolsets
ACP sessions use the curated `hermes-acp` toolset by default. It is designed for editor workflows and intentionally excludes things like messaging delivery, cronjob management, and audio-first UX features.
---
## Troubleshooting
### Agent doesn't appear in the editor
1. **Check the registry path** — make sure the `acp_registry/` directory path
in your editor settings is correct and contains `agent.json`.
2. **Check `hermes` is on PATH** — run `which hermes` in a terminal. If not
found, you may need to activate your virtualenv or add it to PATH.
3. **Restart the editor** after changing settings.
### Agent starts but errors immediately
1. Run `hermes doctor` to check your configuration.
2. Check that you have a valid API key: `hermes status`
3. Try running `hermes acp` directly in a terminal to see error output.
### "Module not found" errors
Make sure you installed the ACP extra:
```bash
pip install -e ".[acp]"
```
### Slow responses
- ACP streams responses, so you should see incremental output. If the agent
appears stuck, check your network connection and API provider status.
- Some providers have rate limits. Try switching to a different model/provider.
### Permission denied for terminal commands
If the editor blocks terminal commands, check your ACP Client extension
settings for auto-approval or manual-approval preferences.
### Logs
Hermes logs are written to stderr when running in ACP mode. Check:
- VS Code: **Output** panel → select **ACP Client** or **Hermes Agent**
- Zed: **View****Toggle Terminal** and check the process output
- JetBrains: **Event Log** or the ACP tool window
You can also enable verbose logging:
```bash
HERMES_LOG_LEVEL=DEBUG hermes acp
```
---
## Further Reading
- [ACP Specification](https://github.com/anysphere/acp)
- [Hermes Agent Documentation](https://github.com/NousResearch/hermes-agent)
- Run `hermes --help` for all CLI options

View file

@ -1,698 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>honcho-integration-spec</title>
<style>
:root {
--bg: #0b0e14;
--bg-surface: #11151c;
--bg-elevated: #181d27;
--bg-code: #0d1018;
--fg: #c9d1d9;
--fg-bright: #e6edf3;
--fg-muted: #6e7681;
--fg-subtle: #484f58;
--accent: #7eb8f6;
--accent-dim: #3d6ea5;
--accent-glow: rgba(126, 184, 246, 0.08);
--green: #7ee6a8;
--green-dim: #2ea04f;
--orange: #e6a855;
--red: #f47067;
--purple: #bc8cff;
--cyan: #56d4dd;
--border: #21262d;
--border-subtle: #161b22;
--radius: 6px;
--font-sans: 'New York', ui-serif, 'Iowan Old Style', 'Apple Garamond', Baskerville, 'Times New Roman', 'Noto Emoji', serif;
--font-mono: 'Departure Mono', 'Noto Emoji', monospace;
}
*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
html { scroll-behavior: smooth; scroll-padding-top: 2rem; }
body {
font-family: var(--font-sans);
background: var(--bg);
color: var(--fg);
line-height: 1.7;
font-size: 15px;
-webkit-font-smoothing: antialiased;
}
.container { max-width: 860px; margin: 0 auto; padding: 3rem 2rem 6rem; }
.hero {
text-align: center;
padding: 4rem 0 3rem;
border-bottom: 1px solid var(--border);
margin-bottom: 3rem;
}
.hero h1 { font-family: var(--font-mono); font-size: 2.2rem; font-weight: 700; color: var(--fg-bright); letter-spacing: -0.03em; margin-bottom: 0.5rem; }
.hero h1 span { color: var(--accent); }
.hero .subtitle { font-family: var(--font-sans); color: var(--fg-muted); font-size: 0.92rem; max-width: 560px; margin: 0 auto; line-height: 1.6; }
.hero .meta { margin-top: 1.5rem; display: flex; justify-content: center; gap: 1.5rem; flex-wrap: wrap; }
.hero .meta span { font-size: 0.8rem; color: var(--fg-subtle); font-family: var(--font-mono); }
.toc { background: var(--bg-surface); border: 1px solid var(--border); border-radius: var(--radius); padding: 1.5rem 2rem; margin-bottom: 3rem; }
.toc h2 { font-size: 0.75rem; text-transform: uppercase; letter-spacing: 0.1em; color: var(--fg-muted); margin-bottom: 1rem; }
.toc ol { list-style: none; counter-reset: toc; columns: 2; column-gap: 2rem; }
.toc li { counter-increment: toc; break-inside: avoid; margin-bottom: 0.35rem; }
.toc li::before { content: counter(toc, decimal-leading-zero) " "; color: var(--fg-subtle); font-family: var(--font-mono); font-size: 0.75rem; margin-right: 0.25rem; }
.toc a { font-family: var(--font-mono); color: var(--fg); text-decoration: none; font-size: 0.82rem; transition: color 0.15s; }
.toc a:hover { color: var(--accent); }
section { margin-bottom: 4rem; }
section + section { padding-top: 1rem; }
h2 { font-family: var(--font-mono); font-size: 1.3rem; font-weight: 700; color: var(--fg-bright); letter-spacing: -0.01em; margin-bottom: 1.25rem; padding-bottom: 0.5rem; border-bottom: 1px solid var(--border); }
h3 { font-family: var(--font-mono); font-size: 1rem; font-weight: 600; color: var(--fg-bright); margin-top: 2rem; margin-bottom: 0.75rem; }
h4 { font-family: var(--font-mono); font-size: 0.9rem; font-weight: 600; color: var(--accent); margin-top: 1.5rem; margin-bottom: 0.5rem; }
p { margin-bottom: 1rem; font-size: 0.95rem; line-height: 1.75; }
strong { color: var(--fg-bright); font-weight: 600; }
a { color: var(--accent); text-decoration: none; }
a:hover { text-decoration: underline; }
ul, ol { margin-bottom: 1rem; padding-left: 1.5rem; font-size: 0.93rem; line-height: 1.7; }
li { margin-bottom: 0.35rem; }
li::marker { color: var(--fg-subtle); }
.table-wrap { overflow-x: auto; margin-bottom: 1.5rem; }
table { width: 100%; border-collapse: collapse; font-size: 0.88rem; }
th, td { text-align: left; padding: 0.6rem 1rem; border-bottom: 1px solid var(--border-subtle); }
th { font-family: var(--font-mono); font-size: 0.72rem; text-transform: uppercase; letter-spacing: 0.06em; color: var(--fg-muted); background: var(--bg-surface); border-bottom-color: var(--border); white-space: nowrap; }
td { font-family: var(--font-sans); font-size: 0.88rem; color: var(--fg); }
tr:hover td { background: var(--accent-glow); }
td code { background: var(--bg-elevated); padding: 0.15em 0.4em; border-radius: 3px; font-family: var(--font-mono); font-size: 0.82em; color: var(--cyan); }
pre { background: var(--bg-code); border: 1px solid var(--border); border-radius: var(--radius); padding: 1.25rem 1.5rem; overflow-x: auto; margin-bottom: 1.5rem; font-family: var(--font-mono); font-size: 0.82rem; line-height: 1.65; color: var(--fg); }
pre code { background: none; padding: 0; color: inherit; font-size: inherit; }
code { font-family: var(--font-mono); font-size: 0.85em; }
p code, li code { background: var(--bg-elevated); padding: 0.15em 0.4em; border-radius: 3px; color: var(--cyan); font-size: 0.85em; }
.kw { color: var(--purple); }
.str { color: var(--green); }
.cm { color: var(--fg-subtle); font-style: italic; }
.num { color: var(--orange); }
.key { color: var(--accent); }
.mermaid { margin: 1.5rem 0 2rem; text-align: center; }
.mermaid svg { max-width: 100%; height: auto; }
.callout { font-family: var(--font-sans); background: var(--bg-surface); border-left: 3px solid var(--accent-dim); border-radius: 0 var(--radius) var(--radius) 0; padding: 1rem 1.25rem; margin-bottom: 1.5rem; font-size: 0.88rem; color: var(--fg-muted); line-height: 1.6; }
.callout strong { font-family: var(--font-mono); color: var(--fg-bright); }
.callout.success { border-left-color: var(--green-dim); }
.callout.warn { border-left-color: var(--orange); }
.badge { display: inline-block; font-family: var(--font-mono); font-size: 0.65rem; font-weight: 600; text-transform: uppercase; letter-spacing: 0.05em; padding: 0.2em 0.6em; border-radius: 3px; vertical-align: middle; margin-left: 0.4rem; }
.badge-done { background: var(--green-dim); color: #fff; }
.badge-wip { background: var(--orange); color: #0b0e14; }
.badge-todo { background: var(--fg-subtle); color: var(--fg); }
.checklist { list-style: none; padding-left: 0; }
.checklist li { padding-left: 1.5rem; position: relative; margin-bottom: 0.5rem; }
.checklist li::before { position: absolute; left: 0; font-family: var(--font-mono); font-size: 0.85rem; }
.checklist li.done { color: var(--fg-muted); }
.checklist li.done::before { content: "\2713"; color: var(--green); }
.checklist li.todo::before { content: "\25CB"; color: var(--fg-subtle); }
.checklist li.wip::before { content: "\25D4"; color: var(--orange); }
.compare { display: grid; grid-template-columns: 1fr 1fr; gap: 1rem; margin-bottom: 2rem; }
.compare-card { background: var(--bg-surface); border: 1px solid var(--border); border-radius: var(--radius); padding: 1.25rem; }
.compare-card h4 { margin-top: 0; font-size: 0.82rem; }
.compare-card.after { border-color: var(--accent-dim); }
.compare-card ul { font-family: var(--font-mono); padding-left: 1.25rem; font-size: 0.8rem; }
hr { border: none; border-top: 1px solid var(--border); margin: 3rem 0; }
.progress-bar { position: fixed; top: 0; left: 0; height: 2px; background: var(--accent); z-index: 999; transition: width 0.1s linear; }
@media (max-width: 640px) {
.container { padding: 2rem 1rem 4rem; }
.hero h1 { font-size: 1.6rem; }
.toc ol { columns: 1; }
.compare { grid-template-columns: 1fr; }
table { font-size: 0.8rem; }
th, td { padding: 0.4rem 0.6rem; }
}
</style>
<link rel="preconnect" href="https://fonts.googleapis.com">
<link href="https://fonts.googleapis.com/css2?family=Noto+Emoji&display=swap" rel="stylesheet">
<style>
@font-face {
font-family: 'Departure Mono';
src: url('https://cdn.jsdelivr.net/gh/rektdeckard/departure-mono@latest/fonts/DepartureMono-Regular.woff2') format('woff2');
font-weight: normal;
font-style: normal;
font-display: swap;
}
</style>
</head>
<body>
<div class="progress-bar" id="progress"></div>
<div class="container">
<header class="hero">
<h1>honcho<span>-integration-spec</span></h1>
<p class="subtitle">Comparison of Hermes Agent vs. openclaw-honcho — and a porting spec for bringing Hermes patterns into other Honcho integrations.</p>
<div class="meta">
<span>hermes-agent / openclaw-honcho</span>
<span>Python + TypeScript</span>
<span>2026-03-09</span>
</div>
</header>
<nav class="toc">
<h2>Contents</h2>
<ol>
<li><a href="#overview">Overview</a></li>
<li><a href="#architecture">Architecture comparison</a></li>
<li><a href="#diff-table">Diff table</a></li>
<li><a href="#patterns">Hermes patterns to port</a></li>
<li><a href="#spec-async">Spec: async prefetch</a></li>
<li><a href="#spec-reasoning">Spec: dynamic reasoning level</a></li>
<li><a href="#spec-modes">Spec: per-peer memory modes</a></li>
<li><a href="#spec-identity">Spec: AI peer identity formation</a></li>
<li><a href="#spec-sessions">Spec: session naming strategies</a></li>
<li><a href="#spec-cli">Spec: CLI surface injection</a></li>
<li><a href="#openclaw-checklist">openclaw-honcho checklist</a></li>
<li><a href="#nanobot-checklist">nanobot-honcho checklist</a></li>
</ol>
</nav>
<!-- OVERVIEW -->
<section id="overview">
<h2>Overview</h2>
<p>Two independent Honcho integrations have been built for two different agent runtimes: <strong>Hermes Agent</strong> (Python, baked into the runner) and <strong>openclaw-honcho</strong> (TypeScript plugin via hook/tool API). Both use the same Honcho peer paradigm — dual peer model, <code>session.context()</code>, <code>peer.chat()</code> — but they made different tradeoffs at every layer.</p>
<p>This document maps those tradeoffs and defines a porting spec: a set of Hermes-originated patterns, each stated as an integration-agnostic interface, that any Honcho integration can adopt regardless of runtime or language.</p>
<div class="callout">
<strong>Scope</strong> Both integrations work correctly today. This spec is about the delta — patterns in Hermes that are worth propagating and patterns in openclaw-honcho that Hermes should eventually adopt. The spec is additive, not prescriptive.
</div>
</section>
<!-- ARCHITECTURE -->
<section id="architecture">
<h2>Architecture comparison</h2>
<h3>Hermes: baked-in runner</h3>
<p>Honcho is initialised directly inside <code>AIAgent.__init__</code>. There is no plugin boundary. Session management, context injection, async prefetch, and CLI surface are all first-class concerns of the runner. Context is injected once per session (baked into <code>_cached_system_prompt</code>) and never re-fetched mid-session — this maximises prefix cache hits at the LLM provider.</p>
<div class="mermaid">
%%{init: {'theme': 'dark', 'themeVariables': { 'primaryColor': '#1f3150', 'primaryTextColor': '#c9d1d9', 'primaryBorderColor': '#3d6ea5', 'lineColor': '#3d6ea5', 'secondaryColor': '#162030', 'tertiaryColor': '#11151c' }}}%%
flowchart TD
U["user message"] --> P["_honcho_prefetch()<br/>(reads cache — no HTTP)"]
P --> SP["_build_system_prompt()<br/>(first turn only, cached)"]
SP --> LLM["LLM call"]
LLM --> R["response"]
R --> FP["_honcho_fire_prefetch()<br/>(daemon threads, turn end)"]
FP --> C1["prefetch_context() thread"]
FP --> C2["prefetch_dialectic() thread"]
C1 --> CACHE["_context_cache / _dialectic_cache"]
C2 --> CACHE
style U fill:#162030,stroke:#3d6ea5,color:#c9d1d9
style P fill:#1f3150,stroke:#3d6ea5,color:#c9d1d9
style SP fill:#1f3150,stroke:#3d6ea5,color:#c9d1d9
style LLM fill:#162030,stroke:#3d6ea5,color:#c9d1d9
style R fill:#162030,stroke:#3d6ea5,color:#c9d1d9
style FP fill:#2a1a40,stroke:#bc8cff,color:#c9d1d9
style C1 fill:#2a1a40,stroke:#bc8cff,color:#c9d1d9
style C2 fill:#2a1a40,stroke:#bc8cff,color:#c9d1d9
style CACHE fill:#11151c,stroke:#484f58,color:#6e7681
</div>
<h3>openclaw-honcho: hook-based plugin</h3>
<p>The plugin registers hooks against OpenClaw's event bus. Context is fetched synchronously inside <code>before_prompt_build</code> on every turn. Message capture happens in <code>agent_end</code>. The multi-agent hierarchy is tracked via <code>subagent_spawned</code>. This model is correct but every turn pays a blocking Honcho round-trip before the LLM call can begin.</p>
<div class="mermaid">
%%{init: {'theme': 'dark', 'themeVariables': { 'primaryColor': '#1f3150', 'primaryTextColor': '#c9d1d9', 'primaryBorderColor': '#3d6ea5', 'lineColor': '#3d6ea5', 'secondaryColor': '#162030', 'tertiaryColor': '#11151c' }}}%%
flowchart TD
U2["user message"] --> BPB["before_prompt_build<br/>(BLOCKING HTTP — every turn)"]
BPB --> CTX["session.context()"]
CTX --> SP2["system prompt assembled"]
SP2 --> LLM2["LLM call"]
LLM2 --> R2["response"]
R2 --> AE["agent_end hook"]
AE --> SAVE["session.addMessages()<br/>session.setMetadata()"]
style U2 fill:#162030,stroke:#3d6ea5,color:#c9d1d9
style BPB fill:#3a1515,stroke:#f47067,color:#c9d1d9
style CTX fill:#3a1515,stroke:#f47067,color:#c9d1d9
style SP2 fill:#1f3150,stroke:#3d6ea5,color:#c9d1d9
style LLM2 fill:#162030,stroke:#3d6ea5,color:#c9d1d9
style R2 fill:#162030,stroke:#3d6ea5,color:#c9d1d9
style AE fill:#162030,stroke:#3d6ea5,color:#c9d1d9
style SAVE fill:#11151c,stroke:#484f58,color:#6e7681
</div>
</section>
<!-- DIFF TABLE -->
<section id="diff-table">
<h2>Diff table</h2>
<div class="table-wrap">
<table>
<thead>
<tr>
<th>Dimension</th>
<th>Hermes Agent</th>
<th>openclaw-honcho</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>Context injection timing</strong></td>
<td>Once per session (cached). Zero HTTP on response path after turn 1.</td>
<td>Every turn, blocking. Fresh context per turn but adds latency.</td>
</tr>
<tr>
<td><strong>Prefetch strategy</strong></td>
<td>Daemon threads fire at turn end; consumed next turn from cache.</td>
<td>None. Blocking call at prompt-build time.</td>
</tr>
<tr>
<td><strong>Dialectic (peer.chat)</strong></td>
<td>Prefetched async; result injected into system prompt next turn.</td>
<td>On-demand via <code>honcho_recall</code> / <code>honcho_analyze</code> tools.</td>
</tr>
<tr>
<td><strong>Reasoning level</strong></td>
<td>Dynamic: scales with message length. Floor = config default. Cap = "high".</td>
<td>Fixed per tool: recall=minimal, analyze=medium.</td>
</tr>
<tr>
<td><strong>Memory modes</strong></td>
<td><code>user_memory_mode</code> / <code>agent_memory_mode</code>: hybrid / honcho / local.</td>
<td>None. Always writes to Honcho.</td>
</tr>
<tr>
<td><strong>Write frequency</strong></td>
<td>async (background queue), turn, session, N turns.</td>
<td>After every agent_end (no control).</td>
</tr>
<tr>
<td><strong>AI peer identity</strong></td>
<td><code>observe_me=True</code>, <code>seed_ai_identity()</code>, <code>get_ai_representation()</code>, SOUL.md → AI peer.</td>
<td>Agent files uploaded to agent peer at setup. No ongoing self-observation seeding.</td>
</tr>
<tr>
<td><strong>Context scope</strong></td>
<td>User peer + AI peer representation, both injected.</td>
<td>User peer (owner) representation + conversation summary. <code>peerPerspective</code> on context call.</td>
</tr>
<tr>
<td><strong>Session naming</strong></td>
<td>per-directory / global / manual map / title-based.</td>
<td>Derived from platform session key.</td>
</tr>
<tr>
<td><strong>Multi-agent</strong></td>
<td>Single-agent only.</td>
<td>Parent observer hierarchy via <code>subagent_spawned</code>.</td>
</tr>
<tr>
<td><strong>Tool surface</strong></td>
<td>Single <code>query_user_context</code> tool (on-demand dialectic).</td>
<td>6 tools: session, profile, search, context (fast) + recall, analyze (LLM).</td>
</tr>
<tr>
<td><strong>Platform metadata</strong></td>
<td>Not stripped.</td>
<td>Explicitly stripped before Honcho storage.</td>
</tr>
<tr>
<td><strong>Message dedup</strong></td>
<td>None (sends on every save cycle).</td>
<td><code>lastSavedIndex</code> in session metadata prevents re-sending.</td>
</tr>
<tr>
<td><strong>CLI surface in prompt</strong></td>
<td>Management commands injected into system prompt. Agent knows its own CLI.</td>
<td>Not injected.</td>
</tr>
<tr>
<td><strong>AI peer name in identity</strong></td>
<td>Replaces "Hermes Agent" in DEFAULT_AGENT_IDENTITY when configured.</td>
<td>Not implemented.</td>
</tr>
<tr>
<td><strong>QMD / local file search</strong></td>
<td>Not implemented.</td>
<td>Passthrough tools when QMD backend configured.</td>
</tr>
<tr>
<td><strong>Workspace metadata</strong></td>
<td>Not implemented.</td>
<td><code>agentPeerMap</code> in workspace metadata tracks agent&#8594;peer ID.</td>
</tr>
</tbody>
</table>
</div>
</section>
<!-- PATTERNS -->
<section id="patterns">
<h2>Hermes patterns to port</h2>
<p>Six patterns from Hermes are worth adopting in any Honcho integration. They are described below as integration-agnostic interfaces — the implementation will differ per runtime, but the contract is the same.</p>
<div class="compare">
<div class="compare-card">
<h4>Patterns Hermes contributes</h4>
<ul>
<li>Async prefetch (zero-latency)</li>
<li>Dynamic reasoning level</li>
<li>Per-peer memory modes</li>
<li>AI peer identity formation</li>
<li>Session naming strategies</li>
<li>CLI surface injection</li>
</ul>
</div>
<div class="compare-card after">
<h4>Patterns openclaw contributes back</h4>
<ul>
<li>lastSavedIndex dedup</li>
<li>Platform metadata stripping</li>
<li>Multi-agent observer hierarchy</li>
<li>peerPerspective on context()</li>
<li>Tiered tool surface (fast/LLM)</li>
<li>Workspace agentPeerMap</li>
</ul>
</div>
</div>
</section>
<!-- SPEC: ASYNC PREFETCH -->
<section id="spec-async">
<h2>Spec: async prefetch</h2>
<h3>Problem</h3>
<p>Calling <code>session.context()</code> and <code>peer.chat()</code> synchronously before each LLM call adds 200800ms of Honcho round-trip latency to every turn. Users experience this as the agent "thinking slowly."</p>
<h3>Pattern</h3>
<p>Fire both calls as non-blocking background work at the <strong>end</strong> of each turn. Store results in a per-session cache keyed by session ID. At the <strong>start</strong> of the next turn, pop from cache — the HTTP is already done. First turn is cold (empty cache); all subsequent turns are zero-latency on the response path.</p>
<h3>Interface contract</h3>
<pre><code><span class="cm">// TypeScript (openclaw / nanobot plugin shape)</span>
<span class="kw">interface</span> <span class="key">AsyncPrefetch</span> {
<span class="cm">// Fire context + dialectic fetches at turn end. Non-blocking.</span>
firePrefetch(sessionId: <span class="str">string</span>, userMessage: <span class="str">string</span>): <span class="kw">void</span>;
<span class="cm">// Pop cached results at turn start. Returns empty if cache is cold.</span>
popContextResult(sessionId: <span class="str">string</span>): ContextResult | <span class="kw">null</span>;
popDialecticResult(sessionId: <span class="str">string</span>): <span class="str">string</span> | <span class="kw">null</span>;
}
<span class="kw">type</span> <span class="key">ContextResult</span> = {
representation: <span class="str">string</span>;
card: <span class="str">string</span>[];
aiRepresentation?: <span class="str">string</span>; <span class="cm">// AI peer context if enabled</span>
summary?: <span class="str">string</span>; <span class="cm">// conversation summary if fetched</span>
};</code></pre>
<h3>Implementation notes</h3>
<ul>
<li>Python: <code>threading.Thread(daemon=True)</code>. Write to <code>dict[session_id, result]</code> — GIL makes this safe for simple writes.</li>
<li>TypeScript: <code>Promise</code> stored in <code>Map&lt;string, Promise&lt;ContextResult&gt;&gt;</code>. Await at pop time. If not resolved yet, skip (return null) — do not block.</li>
<li>The pop is destructive: clears the cache entry after reading so stale data never accumulates.</li>
<li>Prefetch should also fire on first turn (even though it won't be consumed until turn 2) — this ensures turn 2 is never cold.</li>
</ul>
<h3>openclaw-honcho adoption</h3>
<p>Move <code>session.context()</code> from <code>before_prompt_build</code> to a post-<code>agent_end</code> background task. Store result in <code>state.contextCache</code>. In <code>before_prompt_build</code>, read from cache instead of calling Honcho. If cache is empty (turn 1), inject nothing — the prompt is still valid without Honcho context on the first turn.</p>
</section>
<!-- SPEC: DYNAMIC REASONING LEVEL -->
<section id="spec-reasoning">
<h2>Spec: dynamic reasoning level</h2>
<h3>Problem</h3>
<p>Honcho's dialectic endpoint supports reasoning levels from <code>minimal</code> to <code>max</code>. A fixed level per tool wastes budget on simple queries and under-serves complex ones.</p>
<h3>Pattern</h3>
<p>Select the reasoning level dynamically based on the user's message. Use the configured default as a floor. Bump by message length. Cap auto-selection at <code>high</code> — never select <code>max</code> automatically.</p>
<h3>Interface contract</h3>
<pre><code><span class="cm">// Shared helper — identical logic in any language</span>
<span class="kw">const</span> LEVELS = [<span class="str">"minimal"</span>, <span class="str">"low"</span>, <span class="str">"medium"</span>, <span class="str">"high"</span>, <span class="str">"max"</span>];
<span class="kw">function</span> <span class="key">dynamicReasoningLevel</span>(
query: <span class="str">string</span>,
configDefault: <span class="str">string</span> = <span class="str">"low"</span>
): <span class="str">string</span> {
<span class="kw">const</span> baseIdx = Math.max(<span class="num">0</span>, LEVELS.indexOf(configDefault));
<span class="kw">const</span> n = query.length;
<span class="kw">const</span> bump = n &lt; <span class="num">120</span> ? <span class="num">0</span> : n &lt; <span class="num">400</span> ? <span class="num">1</span> : <span class="num">2</span>;
<span class="kw">return</span> LEVELS[Math.min(baseIdx + bump, <span class="num">3</span>)]; <span class="cm">// cap at "high" (idx 3)</span>
}</code></pre>
<h3>Config key</h3>
<p>Add a <code>dialecticReasoningLevel</code> config field (string, default <code>"low"</code>). This sets the floor. Users can raise or lower it. The dynamic bump always applies on top.</p>
<h3>openclaw-honcho adoption</h3>
<p>Apply in <code>honcho_recall</code> and <code>honcho_analyze</code>: replace the fixed <code>reasoningLevel</code> with the dynamic selector. <code>honcho_recall</code> should use floor <code>"minimal"</code> and <code>honcho_analyze</code> floor <code>"medium"</code> — both still bump with message length.</p>
</section>
<!-- SPEC: PER-PEER MEMORY MODES -->
<section id="spec-modes">
<h2>Spec: per-peer memory modes</h2>
<h3>Problem</h3>
<p>Users want independent control over whether user context and agent context are written locally, to Honcho, or both. A single <code>memoryMode</code> shorthand is not granular enough.</p>
<h3>Pattern</h3>
<p>Three modes per peer: <code>hybrid</code> (write both local + Honcho), <code>honcho</code> (Honcho only, disable local files), <code>local</code> (local files only, skip Honcho sync for this peer). Two orthogonal axes: user peer and agent peer.</p>
<h3>Config schema</h3>
<pre><code><span class="cm">// ~/.openclaw/openclaw.json (or ~/.nanobot/config.json)</span>
{
<span class="str">"plugins"</span>: {
<span class="str">"openclaw-honcho"</span>: {
<span class="str">"config"</span>: {
<span class="str">"apiKey"</span>: <span class="str">"..."</span>,
<span class="str">"memoryMode"</span>: <span class="str">"hybrid"</span>, <span class="cm">// shorthand: both peers</span>
<span class="str">"userMemoryMode"</span>: <span class="str">"honcho"</span>, <span class="cm">// override for user peer</span>
<span class="str">"agentMemoryMode"</span>: <span class="str">"hybrid"</span> <span class="cm">// override for agent peer</span>
}
}
}
}</code></pre>
<h3>Resolution order</h3>
<ol>
<li>Per-peer field (<code>userMemoryMode</code> / <code>agentMemoryMode</code>) — wins if present.</li>
<li>Shorthand <code>memoryMode</code> — applies to both peers as default.</li>
<li>Hardcoded default: <code>"hybrid"</code>.</li>
</ol>
<h3>Effect on Honcho sync</h3>
<ul>
<li><code>userMemoryMode=local</code>: skip adding user peer messages to Honcho.</li>
<li><code>agentMemoryMode=local</code>: skip adding assistant peer messages to Honcho.</li>
<li>Both local: skip <code>session.addMessages()</code> entirely.</li>
<li><code>userMemoryMode=honcho</code>: disable local USER.md writes.</li>
<li><code>agentMemoryMode=honcho</code>: disable local MEMORY.md / SOUL.md writes.</li>
</ul>
</section>
<!-- SPEC: AI PEER IDENTITY -->
<section id="spec-identity">
<h2>Spec: AI peer identity formation</h2>
<h3>Problem</h3>
<p>Honcho builds the user's representation organically by observing what the user says. The same mechanism exists for the AI peer — but only if <code>observe_me=True</code> is set for the agent peer. Without it, the agent peer accumulates nothing and Honcho's AI-side model never forms.</p>
<p>Additionally, existing persona files (SOUL.md, IDENTITY.md) should seed the AI peer's Honcho representation at first activation, rather than waiting for it to emerge from scratch.</p>
<h3>Part A: observe_me=True for agent peer</h3>
<pre><code><span class="cm">// TypeScript — in session.addPeers() call</span>
<span class="kw">await</span> session.addPeers([
[ownerPeer.id, { observeMe: <span class="kw">true</span>, observeOthers: <span class="kw">false</span> }],
[agentPeer.id, { observeMe: <span class="kw">true</span>, observeOthers: <span class="kw">true</span> }], <span class="cm">// was false</span>
]);</code></pre>
<p>This is a one-line change but foundational. Without it, Honcho's AI peer representation stays empty regardless of what the agent says.</p>
<h3>Part B: seedAiIdentity()</h3>
<pre><code><span class="kw">async function</span> <span class="key">seedAiIdentity</span>(
session: HonchoSession,
agentPeer: Peer,
content: <span class="str">string</span>,
source: <span class="str">string</span>
): Promise&lt;<span class="kw">boolean</span>&gt; {
<span class="kw">const</span> wrapped = [
<span class="str">`&lt;ai_identity_seed&gt;`</span>,
<span class="str">`&lt;source&gt;${source}&lt;/source&gt;`</span>,
<span class="str">``</span>,
content.trim(),
<span class="str">`&lt;/ai_identity_seed&gt;`</span>,
].join(<span class="str">"\n"</span>);
<span class="kw">await</span> agentPeer.addMessage(<span class="str">"assistant"</span>, wrapped);
<span class="kw">return true</span>;
}</code></pre>
<h3>Part C: migrate agent files at setup</h3>
<p>During <code>openclaw honcho setup</code>, upload agent-self files (SOUL.md, IDENTITY.md, AGENTS.md, BOOTSTRAP.md) to the agent peer using <code>seedAiIdentity()</code> instead of <code>session.uploadFile()</code>. This routes the content through Honcho's observation pipeline rather than the file store.</p>
<h3>Part D: AI peer name in identity</h3>
<p>When the agent has a configured name (non-default), inject it into the agent's self-identity prefix. In OpenClaw this means adding to the injected system prompt section:</p>
<pre><code><span class="cm">// In context hook return value</span>
<span class="kw">return</span> {
systemPrompt: [
agentName ? <span class="str">`You are ${agentName}.`</span> : <span class="str">""</span>,
<span class="str">"## User Memory Context"</span>,
...sections,
].filter(Boolean).join(<span class="str">"\n\n"</span>)
};</code></pre>
<h3>CLI surface: honcho identity subcommand</h3>
<pre><code>openclaw honcho identity &lt;file&gt; <span class="cm"># seed from file</span>
openclaw honcho identity --show <span class="cm"># show current AI peer representation</span></code></pre>
</section>
<!-- SPEC: SESSION NAMING -->
<section id="spec-sessions">
<h2>Spec: session naming strategies</h2>
<h3>Problem</h3>
<p>When Honcho is used across multiple projects or directories, a single global session means every project shares the same context. Per-directory sessions provide isolation without requiring users to name sessions manually.</p>
<h3>Strategies</h3>
<div class="table-wrap">
<table>
<thead><tr><th>Strategy</th><th>Session key</th><th>When to use</th></tr></thead>
<tbody>
<tr><td><code>per-directory</code></td><td>basename of CWD</td><td>Default. Each project gets its own session.</td></tr>
<tr><td><code>global</code></td><td>fixed string <code>"global"</code></td><td>Single cross-project session.</td></tr>
<tr><td>manual map</td><td>user-configured per path</td><td><code>sessions</code> config map overrides directory basename.</td></tr>
<tr><td>title-based</td><td>sanitized session title</td><td>When agent supports named sessions; title set mid-conversation.</td></tr>
</tbody>
</table>
</div>
<h3>Config schema</h3>
<pre><code>{
<span class="str">"sessionStrategy"</span>: <span class="str">"per-directory"</span>, <span class="cm">// "per-directory" | "global"</span>
<span class="str">"sessionPeerPrefix"</span>: <span class="kw">false</span>, <span class="cm">// prepend peer name to session key</span>
<span class="str">"sessions"</span>: { <span class="cm">// manual overrides</span>
<span class="str">"/home/user/projects/foo"</span>: <span class="str">"foo-project"</span>
}
}</code></pre>
<h3>CLI surface</h3>
<pre><code>openclaw honcho sessions <span class="cm"># list all mappings</span>
openclaw honcho map &lt;name&gt; <span class="cm"># map cwd to session name</span>
openclaw honcho map <span class="cm"># no-arg = list mappings</span></code></pre>
<p>Resolution order: manual map wins &rarr; session title &rarr; directory basename &rarr; platform key.</p>
</section>
<!-- SPEC: CLI SURFACE INJECTION -->
<section id="spec-cli">
<h2>Spec: CLI surface injection</h2>
<h3>Problem</h3>
<p>When a user asks "how do I change my memory settings?" or "what Honcho commands are available?" the agent either hallucinates or says it doesn't know. The agent should know its own management interface.</p>
<h3>Pattern</h3>
<p>When Honcho is active, append a compact command reference to the system prompt. The agent can cite these commands directly instead of guessing.</p>
<pre><code><span class="cm">// In context hook, append to systemPrompt</span>
<span class="kw">const</span> honchoSection = [
<span class="str">"# Honcho memory integration"</span>,
<span class="str">`Active. Session: ${sessionKey}. Mode: ${mode}.`</span>,
<span class="str">"Management commands:"</span>,
<span class="str">" openclaw honcho status — show config + connection"</span>,
<span class="str">" openclaw honcho mode [hybrid|honcho|local] — show or set memory mode"</span>,
<span class="str">" openclaw honcho sessions — list session mappings"</span>,
<span class="str">" openclaw honcho map &lt;name&gt; — map directory to session"</span>,
<span class="str">" openclaw honcho identity [file] [--show] — seed or show AI identity"</span>,
<span class="str">" openclaw honcho setup — full interactive wizard"</span>,
].join(<span class="str">"\n"</span>);</code></pre>
<div class="callout warn">
<strong>Keep it compact.</strong> This section is injected every turn. Keep it under 300 chars of context. List commands, not explanations — the agent can explain them on request.
</div>
</section>
<!-- OPENCLAW CHECKLIST -->
<section id="openclaw-checklist">
<h2>openclaw-honcho checklist</h2>
<p>Ordered by impact. Each item maps to a spec section above.</p>
<ul class="checklist">
<li class="todo"><strong>Async prefetch</strong> — move <code>session.context()</code> out of <code>before_prompt_build</code> into post-<code>agent_end</code> background Promise. Pop from cache at prompt build. (<a href="#spec-async">spec</a>)</li>
<li class="todo"><strong>observe_me=True for agent peer</strong> — one-line change in <code>session.addPeers()</code> config for agent peer. (<a href="#spec-identity">spec</a>)</li>
<li class="todo"><strong>Dynamic reasoning level</strong> — add <code>dynamicReasoningLevel()</code> helper; apply in <code>honcho_recall</code> and <code>honcho_analyze</code>. Add <code>dialecticReasoningLevel</code> to config schema. (<a href="#spec-reasoning">spec</a>)</li>
<li class="todo"><strong>Per-peer memory modes</strong> — add <code>userMemoryMode</code> / <code>agentMemoryMode</code> to config; gate Honcho sync and local writes accordingly. (<a href="#spec-modes">spec</a>)</li>
<li class="todo"><strong>seedAiIdentity()</strong> — add helper; apply during setup migration for SOUL.md / IDENTITY.md instead of <code>session.uploadFile()</code>. (<a href="#spec-identity">spec</a>)</li>
<li class="todo"><strong>Session naming strategies</strong> — add <code>sessionStrategy</code>, <code>sessions</code> map, <code>sessionPeerPrefix</code> to config; implement resolution function. (<a href="#spec-sessions">spec</a>)</li>
<li class="todo"><strong>CLI surface injection</strong> — append command reference to <code>before_prompt_build</code> return value when Honcho is active. (<a href="#spec-cli">spec</a>)</li>
<li class="todo"><strong>honcho identity subcommand</strong> — add <code>openclaw honcho identity</code> CLI command. (<a href="#spec-identity">spec</a>)</li>
<li class="todo"><strong>AI peer name injection</strong> — if <code>aiPeer</code> name configured, prepend to injected system prompt. (<a href="#spec-identity">spec</a>)</li>
<li class="todo"><strong>honcho mode / honcho sessions / honcho map</strong> — CLI parity with Hermes. (<a href="#spec-sessions">spec</a>)</li>
</ul>
<div class="callout success">
<strong>Already done in openclaw-honcho (do not re-implement):</strong> lastSavedIndex dedup, platform metadata stripping, multi-agent parent observer hierarchy, peerPerspective on context(), tiered tool surface (fast/LLM), workspace agentPeerMap, QMD passthrough, self-hosted Honcho support.
</div>
</section>
<!-- NANOBOT CHECKLIST -->
<section id="nanobot-checklist">
<h2>nanobot-honcho checklist</h2>
<p>nanobot-honcho is a greenfield integration. Start from openclaw-honcho's architecture (hook-based, dual peer) and apply all Hermes patterns from day one rather than retrofitting. Priority order:</p>
<h3>Phase 1 — core correctness</h3>
<ul class="checklist">
<li class="todo">Dual peer model (owner + agent peer), both with <code>observe_me=True</code></li>
<li class="todo">Message capture at turn end with <code>lastSavedIndex</code> dedup</li>
<li class="todo">Platform metadata stripping before Honcho storage</li>
<li class="todo">Async prefetch from day one — do not implement blocking context injection</li>
<li class="todo">Legacy file migration at first activation (USER.md → owner peer, SOUL.md → <code>seedAiIdentity()</code>)</li>
</ul>
<h3>Phase 2 — configuration</h3>
<ul class="checklist">
<li class="todo">Config schema: <code>apiKey</code>, <code>workspaceId</code>, <code>baseUrl</code>, <code>memoryMode</code>, <code>userMemoryMode</code>, <code>agentMemoryMode</code>, <code>dialecticReasoningLevel</code>, <code>sessionStrategy</code>, <code>sessions</code></li>
<li class="todo">Per-peer memory mode gating</li>
<li class="todo">Dynamic reasoning level</li>
<li class="todo">Session naming strategies</li>
</ul>
<h3>Phase 3 — tools and CLI</h3>
<ul class="checklist">
<li class="todo">Tool surface: <code>honcho_profile</code>, <code>honcho_recall</code>, <code>honcho_analyze</code>, <code>honcho_search</code>, <code>honcho_context</code></li>
<li class="todo">CLI: <code>setup</code>, <code>status</code>, <code>sessions</code>, <code>map</code>, <code>mode</code>, <code>identity</code></li>
<li class="todo">CLI surface injection into system prompt</li>
<li class="todo">AI peer name wired into agent identity</li>
</ul>
</section>
</div>
<script type="module">
import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@11/dist/mermaid.esm.min.mjs';
mermaid.initialize({ startOnLoad: true, securityLevel: 'loose', fontFamily: 'Departure Mono, Noto Emoji, monospace' });
</script>
<script>
window.addEventListener('scroll', () => {
const bar = document.getElementById('progress');
const max = document.documentElement.scrollHeight - window.innerHeight;
bar.style.width = (max > 0 ? (window.scrollY / max) * 100 : 0) + '%';
});
</script>
</body>
</html>

View file

@ -1,377 +0,0 @@
# honcho-integration-spec
Comparison of Hermes Agent vs. openclaw-honcho — and a porting spec for bringing Hermes patterns into other Honcho integrations.
---
## Overview
Two independent Honcho integrations have been built for two different agent runtimes: **Hermes Agent** (Python, baked into the runner) and **openclaw-honcho** (TypeScript plugin via hook/tool API). Both use the same Honcho peer paradigm — dual peer model, `session.context()`, `peer.chat()` — but they made different tradeoffs at every layer.
This document maps those tradeoffs and defines a porting spec: a set of Hermes-originated patterns, each stated as an integration-agnostic interface, that any Honcho integration can adopt regardless of runtime or language.
> **Scope** Both integrations work correctly today. This spec is about the delta — patterns in Hermes that are worth propagating and patterns in openclaw-honcho that Hermes should eventually adopt. The spec is additive, not prescriptive.
---
## Architecture comparison
### Hermes: baked-in runner
Honcho is initialised directly inside `AIAgent.__init__`. There is no plugin boundary. Session management, context injection, async prefetch, and CLI surface are all first-class concerns of the runner. Context is injected once per session (baked into `_cached_system_prompt`) and never re-fetched mid-session — this maximises prefix cache hits at the LLM provider.
Turn flow:
```
user message
→ _honcho_prefetch() (reads cache — no HTTP)
→ _build_system_prompt() (first turn only, cached)
→ LLM call
→ response
→ _honcho_fire_prefetch() (daemon threads, turn end)
→ prefetch_context() thread ──┐
→ prefetch_dialectic() thread ─┴→ _context_cache / _dialectic_cache
```
### openclaw-honcho: hook-based plugin
The plugin registers hooks against OpenClaw's event bus. Context is fetched synchronously inside `before_prompt_build` on every turn. Message capture happens in `agent_end`. The multi-agent hierarchy is tracked via `subagent_spawned`. This model is correct but every turn pays a blocking Honcho round-trip before the LLM call can begin.
Turn flow:
```
user message
→ before_prompt_build (BLOCKING HTTP — every turn)
→ session.context()
→ system prompt assembled
→ LLM call
→ response
→ agent_end hook
→ session.addMessages()
→ session.setMetadata()
```
---
## Diff table
| Dimension | Hermes Agent | openclaw-honcho |
|---|---|---|
| **Context injection timing** | Once per session (cached). Zero HTTP on response path after turn 1. | Every turn, blocking. Fresh context per turn but adds latency. |
| **Prefetch strategy** | Daemon threads fire at turn end; consumed next turn from cache. | None. Blocking call at prompt-build time. |
| **Dialectic (peer.chat)** | Prefetched async; result injected into system prompt next turn. | On-demand via `honcho_recall` / `honcho_analyze` tools. |
| **Reasoning level** | Dynamic: scales with message length. Floor = config default. Cap = "high". | Fixed per tool: recall=minimal, analyze=medium. |
| **Memory modes** | `user_memory_mode` / `agent_memory_mode`: hybrid / honcho / local. | None. Always writes to Honcho. |
| **Write frequency** | async (background queue), turn, session, N turns. | After every agent_end (no control). |
| **AI peer identity** | `observe_me=True`, `seed_ai_identity()`, `get_ai_representation()`, SOUL.md → AI peer. | Agent files uploaded to agent peer at setup. No ongoing self-observation. |
| **Context scope** | User peer + AI peer representation, both injected. | User peer (owner) representation + conversation summary. `peerPerspective` on context call. |
| **Session naming** | per-directory / global / manual map / title-based. | Derived from platform session key. |
| **Multi-agent** | Single-agent only. | Parent observer hierarchy via `subagent_spawned`. |
| **Tool surface** | Single `query_user_context` tool (on-demand dialectic). | 6 tools: session, profile, search, context (fast) + recall, analyze (LLM). |
| **Platform metadata** | Not stripped. | Explicitly stripped before Honcho storage. |
| **Message dedup** | None. | `lastSavedIndex` in session metadata prevents re-sending. |
| **CLI surface in prompt** | Management commands injected into system prompt. Agent knows its own CLI. | Not injected. |
| **AI peer name in identity** | Replaces "Hermes Agent" in DEFAULT_AGENT_IDENTITY when configured. | Not implemented. |
| **QMD / local file search** | Not implemented. | Passthrough tools when QMD backend configured. |
| **Workspace metadata** | Not implemented. | `agentPeerMap` in workspace metadata tracks agent→peer ID. |
---
## Patterns
Six patterns from Hermes are worth adopting in any Honcho integration. Each is described as an integration-agnostic interface.
**Hermes contributes:**
- Async prefetch (zero-latency)
- Dynamic reasoning level
- Per-peer memory modes
- AI peer identity formation
- Session naming strategies
- CLI surface injection
**openclaw-honcho contributes back (Hermes should adopt):**
- `lastSavedIndex` dedup
- Platform metadata stripping
- Multi-agent observer hierarchy
- `peerPerspective` on `context()`
- Tiered tool surface (fast/LLM)
- Workspace `agentPeerMap`
---
## Spec: async prefetch
### Problem
Calling `session.context()` and `peer.chat()` synchronously before each LLM call adds 200800ms of Honcho round-trip latency to every turn.
### Pattern
Fire both calls as non-blocking background work at the **end** of each turn. Store results in a per-session cache keyed by session ID. At the **start** of the next turn, pop from cache — the HTTP is already done. First turn is cold (empty cache); all subsequent turns are zero-latency on the response path.
### Interface contract
```typescript
interface AsyncPrefetch {
// Fire context + dialectic fetches at turn end. Non-blocking.
firePrefetch(sessionId: string, userMessage: string): void;
// Pop cached results at turn start. Returns empty if cache is cold.
popContextResult(sessionId: string): ContextResult | null;
popDialecticResult(sessionId: string): string | null;
}
type ContextResult = {
representation: string;
card: string[];
aiRepresentation?: string; // AI peer context if enabled
summary?: string; // conversation summary if fetched
};
```
### Implementation notes
- **Python:** `threading.Thread(daemon=True)`. Write to `dict[session_id, result]` — GIL makes this safe for simple writes.
- **TypeScript:** `Promise` stored in `Map<string, Promise<ContextResult>>`. Await at pop time. If not resolved yet, return null — do not block.
- The pop is destructive: clears the cache entry after reading so stale data never accumulates.
- Prefetch should also fire on first turn (even though it won't be consumed until turn 2).
### openclaw-honcho adoption
Move `session.context()` from `before_prompt_build` to a post-`agent_end` background task. Store result in `state.contextCache`. In `before_prompt_build`, read from cache instead of calling Honcho. If cache is empty (turn 1), inject nothing — the prompt is still valid without Honcho context on the first turn.
---
## Spec: dynamic reasoning level
### Problem
Honcho's dialectic endpoint supports reasoning levels from `minimal` to `max`. A fixed level per tool wastes budget on simple queries and under-serves complex ones.
### Pattern
Select the reasoning level dynamically based on the user's message. Use the configured default as a floor. Bump by message length. Cap auto-selection at `high` — never select `max` automatically.
### Logic
```
< 120 chars default (typically "low")
120400 chars → one level above default (cap at "high")
> 400 chars → two levels above default (cap at "high")
```
### Config key
Add `dialecticReasoningLevel` (string, default `"low"`). This sets the floor. The dynamic bump always applies on top.
### openclaw-honcho adoption
Apply in `honcho_recall` and `honcho_analyze`: replace fixed `reasoningLevel` with the dynamic selector. `honcho_recall` uses floor `"minimal"`, `honcho_analyze` uses floor `"medium"` — both still bump with message length.
---
## Spec: per-peer memory modes
### Problem
Users want independent control over whether user context and agent context are written locally, to Honcho, or both.
### Modes
| Mode | Effect |
|---|---|
| `hybrid` | Write to both local files and Honcho (default) |
| `honcho` | Honcho only — disable corresponding local file writes |
| `local` | Local files only — skip Honcho sync for this peer |
### Config schema
```json
{
"memoryMode": "hybrid",
"userMemoryMode": "honcho",
"agentMemoryMode": "hybrid"
}
```
Resolution order: per-peer field wins → shorthand `memoryMode` → default `"hybrid"`.
### Effect on Honcho sync
- `userMemoryMode=local`: skip adding user peer messages to Honcho
- `agentMemoryMode=local`: skip adding assistant peer messages to Honcho
- Both local: skip `session.addMessages()` entirely
- `userMemoryMode=honcho`: disable local USER.md writes
- `agentMemoryMode=honcho`: disable local MEMORY.md / SOUL.md writes
---
## Spec: AI peer identity formation
### Problem
Honcho builds the user's representation organically by observing what the user says. The same mechanism exists for the AI peer — but only if `observe_me=True` is set for the agent peer. Without it, the agent peer accumulates nothing.
Additionally, existing persona files (SOUL.md, IDENTITY.md) should seed the AI peer's Honcho representation at first activation.
### Part A: observe_me=True for agent peer
```typescript
await session.addPeers([
[ownerPeer.id, { observeMe: true, observeOthers: false }],
[agentPeer.id, { observeMe: true, observeOthers: true }], // was false
]);
```
One-line change. Foundational. Without it, the AI peer representation stays empty regardless of what the agent says.
### Part B: seedAiIdentity()
```typescript
async function seedAiIdentity(
agentPeer: Peer,
content: string,
source: string
): Promise<boolean> {
const wrapped = [
`<ai_identity_seed>`,
`<source>${source}</source>`,
``,
content.trim(),
`</ai_identity_seed>`,
].join("\n");
await agentPeer.addMessage("assistant", wrapped);
return true;
}
```
### Part C: migrate agent files at setup
During `honcho setup`, upload agent-self files (SOUL.md, IDENTITY.md, AGENTS.md) to the agent peer via `seedAiIdentity()` instead of `session.uploadFile()`. This routes content through Honcho's observation pipeline.
### Part D: AI peer name in identity
When the agent has a configured name, prepend it to the injected system prompt:
```typescript
const namePrefix = agentName ? `You are ${agentName}.\n\n` : "";
return { systemPrompt: namePrefix + "## User Memory Context\n\n" + sections };
```
### CLI surface
```
honcho identity <file> # seed from file
honcho identity --show # show current AI peer representation
```
---
## Spec: session naming strategies
### Problem
A single global session means every project shares the same Honcho context. Per-directory sessions provide isolation without requiring users to name sessions manually.
### Strategies
| Strategy | Session key | When to use |
|---|---|---|
| `per-directory` | basename of CWD | Default. Each project gets its own session. |
| `global` | fixed string `"global"` | Single cross-project session. |
| manual map | user-configured per path | `sessions` config map overrides directory basename. |
| title-based | sanitized session title | When agent supports named sessions set mid-conversation. |
### Config schema
```json
{
"sessionStrategy": "per-directory",
"sessionPeerPrefix": false,
"sessions": {
"/home/user/projects/foo": "foo-project"
}
}
```
### CLI surface
```
honcho sessions # list all mappings
honcho map <name> # map cwd to session name
honcho map # no-arg = list mappings
```
Resolution order: manual map → session title → directory basename → platform key.
---
## Spec: CLI surface injection
### Problem
When a user asks "how do I change my memory settings?" the agent either hallucinates or says it doesn't know. The agent should know its own management interface.
### Pattern
When Honcho is active, append a compact command reference to the system prompt. Keep it under 300 chars.
```
# Honcho memory integration
Active. Session: {sessionKey}. Mode: {mode}.
Management commands:
honcho status — show config + connection
honcho mode [hybrid|honcho|local] — show or set memory mode
honcho sessions — list session mappings
honcho map <name> — map directory to session
honcho identity [file] [--show] — seed or show AI identity
honcho setup — full interactive wizard
```
---
## openclaw-honcho checklist
Ordered by impact:
- [ ] **Async prefetch** — move `session.context()` out of `before_prompt_build` into post-`agent_end` background Promise
- [ ] **observe_me=True for agent peer** — one-line change in `session.addPeers()`
- [ ] **Dynamic reasoning level** — add helper; apply in `honcho_recall` and `honcho_analyze`; add `dialecticReasoningLevel` to config
- [ ] **Per-peer memory modes** — add `userMemoryMode` / `agentMemoryMode` to config; gate Honcho sync and local writes
- [ ] **seedAiIdentity()** — add helper; use during setup migration for SOUL.md / IDENTITY.md
- [ ] **Session naming strategies** — add `sessionStrategy`, `sessions` map, `sessionPeerPrefix`
- [ ] **CLI surface injection** — append command reference to `before_prompt_build` return value
- [ ] **honcho identity subcommand** — seed from file or `--show` current representation
- [ ] **AI peer name injection** — if `aiPeer` name configured, prepend to injected system prompt
- [ ] **honcho mode / sessions / map** — CLI parity with Hermes
Already done in openclaw-honcho (do not re-implement): `lastSavedIndex` dedup, platform metadata stripping, multi-agent parent observer, `peerPerspective` on `context()`, tiered tool surface, workspace `agentPeerMap`, QMD passthrough, self-hosted Honcho.
---
## nanobot-honcho checklist
Greenfield integration. Start from openclaw-honcho's architecture and apply all Hermes patterns from day one.
### Phase 1 — core correctness
- [ ] Dual peer model (owner + agent peer), both with `observe_me=True`
- [ ] Message capture at turn end with `lastSavedIndex` dedup
- [ ] Platform metadata stripping before Honcho storage
- [ ] Async prefetch from day one — do not implement blocking context injection
- [ ] Legacy file migration at first activation (USER.md → owner peer, SOUL.md → `seedAiIdentity()`)
### Phase 2 — configuration
- [ ] Config schema: `apiKey`, `workspaceId`, `baseUrl`, `memoryMode`, `userMemoryMode`, `agentMemoryMode`, `dialecticReasoningLevel`, `sessionStrategy`, `sessions`
- [ ] Per-peer memory mode gating
- [ ] Dynamic reasoning level
- [ ] Session naming strategies
### Phase 3 — tools and CLI
- [ ] Tool surface: `honcho_profile`, `honcho_recall`, `honcho_analyze`, `honcho_search`, `honcho_context`
- [ ] CLI: `setup`, `status`, `sessions`, `map`, `mode`, `identity`
- [ ] CLI surface injection into system prompt
- [ ] AI peer name wired into agent identity

View file

@ -1,142 +0,0 @@
# Migrating from OpenClaw to Hermes Agent
This guide covers how to import your OpenClaw settings, memories, skills, and API keys into Hermes Agent.
## Three Ways to Migrate
### 1. Automatic (during first-time setup)
When you run `hermes setup` for the first time and Hermes detects `~/.openclaw`, it automatically offers to import your OpenClaw data before configuration begins. Just accept the prompt and everything is handled for you.
### 2. CLI Command (quick, scriptable)
```bash
hermes claw migrate # Preview then migrate (always shows preview first)
hermes claw migrate --dry-run # Preview only, no changes
hermes claw migrate --preset user-data # Migrate without API keys/secrets
hermes claw migrate --yes # Skip confirmation prompt
```
The migration always shows a full preview of what will be imported before making any changes. You review the preview and confirm before anything is written.
**All options:**
| Flag | Description |
|------|-------------|
| `--source PATH` | Path to OpenClaw directory (default: `~/.openclaw`) |
| `--dry-run` | Preview only — no files are modified |
| `--preset {user-data,full}` | Migration preset (default: `full`). `user-data` excludes secrets |
| `--overwrite` | Overwrite existing files (default: skip conflicts) |
| `--migrate-secrets` | Include allowlisted secrets (auto-enabled with `full` preset) |
| `--workspace-target PATH` | Copy workspace instructions (AGENTS.md) to this absolute path |
| `--skill-conflict {skip,overwrite,rename}` | How to handle skill name conflicts (default: `skip`) |
| `--yes`, `-y` | Skip confirmation prompts |
### 3. Agent-Guided (interactive, with previews)
Ask the agent to run the migration for you:
```
> Migrate my OpenClaw setup to Hermes
```
The agent will use the `openclaw-migration` skill to:
1. Run a preview first to show what would change
2. Ask about conflict resolution (SOUL.md, skills, etc.)
3. Let you choose between `user-data` and `full` presets
4. Execute the migration with your choices
5. Print a detailed summary of what was migrated
## What Gets Migrated
### `user-data` preset
| Item | Source | Destination |
|------|--------|-------------|
| SOUL.md | `~/.openclaw/workspace/SOUL.md` | `~/.hermes/SOUL.md` |
| Memory entries | `~/.openclaw/workspace/MEMORY.md` | `~/.hermes/memories/MEMORY.md` |
| User profile | `~/.openclaw/workspace/USER.md` | `~/.hermes/memories/USER.md` |
| Skills | `~/.openclaw/workspace/skills/` | `~/.hermes/skills/openclaw-imports/` |
| Command allowlist | `~/.openclaw/workspace/exec_approval_patterns.yaml` | Merged into `~/.hermes/config.yaml` |
| Messaging settings | `~/.openclaw/config.yaml` (TELEGRAM_ALLOWED_USERS, MESSAGING_CWD) | `~/.hermes/.env` |
| TTS assets | `~/.openclaw/workspace/tts/` | `~/.hermes/tts/` |
Workspace files are also checked at `workspace.default/` and `workspace-main/` as fallback paths (OpenClaw renamed `workspace/` to `workspace-main/` in recent versions).
### `full` preset (adds to `user-data`)
| Item | Source | Destination |
|------|--------|-------------|
| Telegram bot token | `openclaw.json` channels config | `~/.hermes/.env` |
| OpenRouter API key | `.env`, `openclaw.json`, or `openclaw.json["env"]` | `~/.hermes/.env` |
| OpenAI API key | `.env`, `openclaw.json`, or `openclaw.json["env"]` | `~/.hermes/.env` |
| Anthropic API key | `.env`, `openclaw.json`, or `openclaw.json["env"]` | `~/.hermes/.env` |
| ElevenLabs API key | `.env`, `openclaw.json`, or `openclaw.json["env"]` | `~/.hermes/.env` |
API keys are searched across four sources: inline config values, `~/.openclaw/.env`, the `openclaw.json` `"env"` sub-object, and per-agent auth profiles.
Only allowlisted secrets are ever imported. Other credentials are skipped and reported.
## OpenClaw Schema Compatibility
The migration handles both old and current OpenClaw config layouts:
- **Channel tokens**: Reads from flat paths (`channels.telegram.botToken`) and the newer `accounts.default` layout (`channels.telegram.accounts.default.botToken`)
- **TTS provider**: OpenClaw renamed "edge" to "microsoft" — both are recognized and mapped to Hermes' "edge"
- **Provider API types**: Both short (`openai`, `anthropic`) and hyphenated (`openai-completions`, `anthropic-messages`, `google-generative-ai`) values are mapped correctly
- **thinkingDefault**: All enum values are handled including newer ones (`minimal`, `xhigh`, `adaptive`)
- **Matrix**: Uses `accessToken` field (not `botToken`)
- **SecretRef formats**: Plain strings, env templates (`${VAR}`), and `source: "env"` SecretRefs are resolved. `source: "file"` and `source: "exec"` SecretRefs produce a warning — add those keys manually after migration.
## Conflict Handling
By default, the migration **will not overwrite** existing Hermes data:
- **SOUL.md** — skipped if one already exists in `~/.hermes/`
- **Memory entries** — skipped if memories already exist (to avoid duplicates)
- **Skills** — skipped if a skill with the same name already exists
- **API keys** — skipped if the key is already set in `~/.hermes/.env`
To overwrite conflicts, use `--overwrite`. The migration creates backups before overwriting.
For skills, you can also use `--skill-conflict rename` to import conflicting skills under a new name (e.g., `skill-name-imported`).
## Migration Report
Every migration produces a report showing:
- **Migrated items** — what was successfully imported
- **Conflicts** — items skipped because they already exist
- **Skipped items** — items not found in the source
- **Errors** — items that failed to import
For executed migrations, the full report is saved to `~/.hermes/migration/openclaw/<timestamp>/`.
## Post-Migration Notes
- **Skills require a new session** — imported skills take effect after restarting your agent or starting a new chat.
- **WhatsApp requires re-pairing** — WhatsApp uses QR-code pairing, not token-based auth. Run `hermes whatsapp` to pair.
- **Archive cleanup** — after migration, you'll be offered to rename `~/.openclaw/` to `.openclaw.pre-migration/` to prevent state confusion. You can also run `hermes claw cleanup` later.
## Troubleshooting
### "OpenClaw directory not found"
The migration looks for `~/.openclaw` by default, then tries `~/.clawdbot` and `~/.moltbot`. If your OpenClaw is installed elsewhere, use `--source`:
```bash
hermes claw migrate --source /path/to/.openclaw
```
### "Migration script not found"
The migration script ships with Hermes Agent. If you installed via pip (not git clone), the `optional-skills/` directory may not be present. Install the skill from the Skills Hub:
```bash
hermes skills install openclaw-migration
```
### Memory overflow
If your OpenClaw MEMORY.md or USER.md exceeds Hermes' character limits, excess entries are exported to an overflow file in the migration report directory. You can manually review and add the most important ones.
### API keys not found
Keys might be stored in different places depending on your OpenClaw setup:
- `~/.openclaw/.env` file
- Inline in `openclaw.json` under `models.providers.*.apiKey`
- In `openclaw.json` under the `"env"` or `"env.vars"` sub-objects
- In `~/.openclaw/agents/main/agent/auth-profiles.json`
The migration checks all four. If keys use `source: "file"` or `source: "exec"` SecretRefs, they can't be resolved automatically — add them via `hermes config set`.

View file

@ -1,608 +0,0 @@
# Pricing Accuracy Architecture
Date: 2026-03-16
## Goal
Hermes should only show dollar costs when they are backed by an official source for the user's actual billing path.
This design replaces the current static, heuristic pricing flow in:
- `run_agent.py`
- `agent/usage_pricing.py`
- `agent/insights.py`
- `cli.py`
with a provider-aware pricing system that:
- handles cache billing correctly
- distinguishes `actual` vs `estimated` vs `included` vs `unknown`
- reconciles post-hoc costs when providers expose authoritative billing data
- supports direct providers, OpenRouter, subscriptions, enterprise pricing, and custom endpoints
## Problems In The Current Design
Current Hermes behavior has four structural issues:
1. It stores only `prompt_tokens` and `completion_tokens`, which is insufficient for providers that bill cache reads and cache writes separately.
2. It uses a static model price table and fuzzy heuristics, which can drift from current official pricing.
3. It assumes public API list pricing matches the user's real billing path.
4. It has no distinction between live estimates and reconciled billed cost.
## Design Principles
1. Normalize usage before pricing.
2. Never fold cached tokens into plain input cost.
3. Track certainty explicitly.
4. Treat the billing path as part of the model identity.
5. Prefer official machine-readable sources over scraped docs.
6. Use post-hoc provider cost APIs when available.
7. Show `n/a` rather than inventing precision.
## High-Level Architecture
The new system has four layers:
1. `usage_normalization`
Converts raw provider usage into a canonical usage record.
2. `pricing_source_resolution`
Determines the billing path, source of truth, and applicable pricing source.
3. `cost_estimation_and_reconciliation`
Produces an immediate estimate when possible, then replaces or annotates it with actual billed cost later.
4. `presentation`
`/usage`, `/insights`, and the status bar display cost with certainty metadata.
## Canonical Usage Record
Add a canonical usage model that every provider path maps into before any pricing math happens.
Suggested structure:
```python
@dataclass
class CanonicalUsage:
provider: str
billing_provider: str
model: str
billing_route: str
input_tokens: int = 0
output_tokens: int = 0
cache_read_tokens: int = 0
cache_write_tokens: int = 0
reasoning_tokens: int = 0
request_count: int = 1
raw_usage: dict[str, Any] | None = None
raw_usage_fields: dict[str, str] | None = None
computed_fields: set[str] | None = None
provider_request_id: str | None = None
provider_generation_id: str | None = None
provider_response_id: str | None = None
```
Rules:
- `input_tokens` means non-cached input only.
- `cache_read_tokens` and `cache_write_tokens` are never merged into `input_tokens`.
- `output_tokens` excludes cache metrics.
- `reasoning_tokens` is telemetry unless a provider officially bills it separately.
This is the same normalization pattern used by `opencode`, extended with provenance and reconciliation ids.
## Provider Normalization Rules
### OpenAI Direct
Source usage fields:
- `prompt_tokens`
- `completion_tokens`
- `prompt_tokens_details.cached_tokens`
Normalization:
- `cache_read_tokens = cached_tokens`
- `input_tokens = prompt_tokens - cached_tokens`
- `cache_write_tokens = 0` unless OpenAI exposes it in the relevant route
- `output_tokens = completion_tokens`
### Anthropic Direct
Source usage fields:
- `input_tokens`
- `output_tokens`
- `cache_read_input_tokens`
- `cache_creation_input_tokens`
Normalization:
- `input_tokens = input_tokens`
- `output_tokens = output_tokens`
- `cache_read_tokens = cache_read_input_tokens`
- `cache_write_tokens = cache_creation_input_tokens`
### OpenRouter
Estimate-time usage normalization should use the response usage payload with the same rules as the underlying provider when possible.
Reconciliation-time records should also store:
- OpenRouter generation id
- native token fields when available
- `total_cost`
- `cache_discount`
- `upstream_inference_cost`
- `is_byok`
### Gemini / Vertex
Use official Gemini or Vertex usage fields where available.
If cached content tokens are exposed:
- map them to `cache_read_tokens`
If a route exposes no cache creation metric:
- store `cache_write_tokens = 0`
- preserve the raw usage payload for later extension
### DeepSeek And Other Direct Providers
Normalize only the fields that are officially exposed.
If a provider does not expose cache buckets:
- do not infer them unless the provider explicitly documents how to derive them
### Subscription / Included-Cost Routes
These still use the canonical usage model.
Tokens are tracked normally. Cost depends on billing mode, not on whether usage exists.
## Billing Route Model
Hermes must stop keying pricing solely by `model`.
Introduce a billing route descriptor:
```python
@dataclass
class BillingRoute:
provider: str
base_url: str | None
model: str
billing_mode: str
organization_hint: str | None = None
```
`billing_mode` values:
- `official_cost_api`
- `official_generation_api`
- `official_models_api`
- `official_docs_snapshot`
- `subscription_included`
- `user_override`
- `custom_contract`
- `unknown`
Examples:
- OpenAI direct API with Costs API access: `official_cost_api`
- Anthropic direct API with Usage & Cost API access: `official_cost_api`
- OpenRouter request before reconciliation: `official_models_api`
- OpenRouter request after generation lookup: `official_generation_api`
- GitHub Copilot style subscription route: `subscription_included`
- local OpenAI-compatible server: `unknown`
- enterprise contract with configured rates: `custom_contract`
## Cost Status Model
Every displayed cost should have:
```python
@dataclass
class CostResult:
amount_usd: Decimal | None
status: Literal["actual", "estimated", "included", "unknown"]
source: Literal[
"provider_cost_api",
"provider_generation_api",
"provider_models_api",
"official_docs_snapshot",
"user_override",
"custom_contract",
"none",
]
label: str
fetched_at: datetime | None
pricing_version: str | None
notes: list[str]
```
Presentation rules:
- `actual`: show dollar amount as final
- `estimated`: show dollar amount with estimate labeling
- `included`: show `included` or `$0.00 (included)` depending on UX choice
- `unknown`: show `n/a`
## Official Source Hierarchy
Resolve cost using this order:
1. Request-level or account-level official billed cost
2. Official machine-readable model pricing
3. Official docs snapshot
4. User override or custom contract
5. Unknown
The system must never skip to a lower level if a higher-confidence source exists for the current billing route.
## Provider-Specific Truth Rules
### OpenAI Direct
Preferred truth:
1. Costs API for reconciled spend
2. Official pricing page for live estimate
### Anthropic Direct
Preferred truth:
1. Usage & Cost API for reconciled spend
2. Official pricing docs for live estimate
### OpenRouter
Preferred truth:
1. `GET /api/v1/generation` for reconciled `total_cost`
2. `GET /api/v1/models` pricing for live estimate
Do not use underlying provider public pricing as the source of truth for OpenRouter billing.
### Gemini / Vertex
Preferred truth:
1. official billing export or billing API for reconciled spend when available for the route
2. official pricing docs for estimate
### DeepSeek
Preferred truth:
1. official machine-readable cost source if available in the future
2. official pricing docs snapshot today
### Subscription-Included Routes
Preferred truth:
1. explicit route config marking the model as included in subscription
These should display `included`, not an API list-price estimate.
### Custom Endpoint / Local Model
Preferred truth:
1. user override
2. custom contract config
3. unknown
These should default to `unknown`.
## Pricing Catalog
Replace the current `MODEL_PRICING` dict with a richer pricing catalog.
Suggested record:
```python
@dataclass
class PricingEntry:
provider: str
route_pattern: str
model_pattern: str
input_cost_per_million: Decimal | None = None
output_cost_per_million: Decimal | None = None
cache_read_cost_per_million: Decimal | None = None
cache_write_cost_per_million: Decimal | None = None
request_cost: Decimal | None = None
image_cost: Decimal | None = None
source: str = "official_docs_snapshot"
source_url: str | None = None
fetched_at: datetime | None = None
pricing_version: str | None = None
```
The catalog should be route-aware:
- `openai:gpt-5`
- `anthropic:claude-opus-4-6`
- `openrouter:anthropic/claude-opus-4.6`
- `copilot:gpt-4o`
This avoids conflating direct-provider billing with aggregator billing.
## Pricing Sync Architecture
Introduce a pricing sync subsystem instead of manually maintaining a single hardcoded table.
Suggested modules:
- `agent/pricing/catalog.py`
- `agent/pricing/sources.py`
- `agent/pricing/sync.py`
- `agent/pricing/reconcile.py`
- `agent/pricing/types.py`
### Sync Sources
- OpenRouter models API
- official provider docs snapshots where no API exists
- user overrides from config
### Sync Output
Cache pricing entries locally with:
- source URL
- fetch timestamp
- version/hash
- confidence/source type
### Sync Frequency
- startup warm cache
- background refresh every 6 to 24 hours depending on source
- manual `hermes pricing sync`
## Reconciliation Architecture
Live requests may produce only an estimate initially. Hermes should reconcile them later when a provider exposes actual billed cost.
Suggested flow:
1. Agent call completes.
2. Hermes stores canonical usage plus reconciliation ids.
3. Hermes computes an immediate estimate if a pricing source exists.
4. A reconciliation worker fetches actual cost when supported.
5. Session and message records are updated with `actual` cost.
This can run:
- inline for cheap lookups
- asynchronously for delayed provider accounting
## Persistence Changes
Session storage should stop storing only aggregate prompt/completion totals.
Add fields for both usage and cost certainty:
- `input_tokens`
- `output_tokens`
- `cache_read_tokens`
- `cache_write_tokens`
- `reasoning_tokens`
- `estimated_cost_usd`
- `actual_cost_usd`
- `cost_status`
- `cost_source`
- `pricing_version`
- `billing_provider`
- `billing_mode`
If schema expansion is too large for one PR, add a new pricing events table:
```text
session_cost_events
id
session_id
request_id
provider
model
billing_mode
input_tokens
output_tokens
cache_read_tokens
cache_write_tokens
estimated_cost_usd
actual_cost_usd
cost_status
cost_source
pricing_version
created_at
updated_at
```
## Hermes Touchpoints
### `run_agent.py`
Current responsibility:
- parse raw provider usage
- update session token counters
New responsibility:
- build `CanonicalUsage`
- update canonical counters
- store reconciliation ids
- emit usage event to pricing subsystem
### `agent/usage_pricing.py`
Current responsibility:
- static lookup table
- direct cost arithmetic
New responsibility:
- move or replace with pricing catalog facade
- no fuzzy model-family heuristics
- no direct pricing without billing-route context
### `cli.py`
Current responsibility:
- compute session cost directly from prompt/completion totals
New responsibility:
- display `CostResult`
- show status badges:
- `actual`
- `estimated`
- `included`
- `n/a`
### `agent/insights.py`
Current responsibility:
- recompute historical estimates from static pricing
New responsibility:
- aggregate stored pricing events
- prefer actual cost over estimate
- surface estimates only when reconciliation is unavailable
## UX Rules
### Status Bar
Show one of:
- `$1.42`
- `~$1.42`
- `included`
- `cost n/a`
Where:
- `$1.42` means `actual`
- `~$1.42` means `estimated`
- `included` means subscription-backed or explicitly zero-cost route
- `cost n/a` means unknown
### `/usage`
Show:
- token buckets
- estimated cost
- actual cost if available
- cost status
- pricing source
### `/insights`
Aggregate:
- actual cost totals
- estimated-only totals
- unknown-cost sessions count
- included-cost sessions count
## Config And Overrides
Add user-configurable pricing overrides in config:
```yaml
pricing:
mode: hybrid
sync_on_startup: true
sync_interval_hours: 12
overrides:
- provider: openrouter
model: anthropic/claude-opus-4.6
billing_mode: custom_contract
input_cost_per_million: 4.25
output_cost_per_million: 22.0
cache_read_cost_per_million: 0.5
cache_write_cost_per_million: 6.0
included_routes:
- provider: copilot
model: "*"
- provider: codex-subscription
model: "*"
```
Overrides must win over catalog defaults for the matching billing route.
## Rollout Plan
### Phase 1
- add canonical usage model
- split cache token buckets in `run_agent.py`
- stop pricing cache-inflated prompt totals
- preserve current UI with improved backend math
### Phase 2
- add route-aware pricing catalog
- integrate OpenRouter models API sync
- add `estimated` vs `included` vs `unknown`
### Phase 3
- add reconciliation for OpenRouter generation cost
- add actual cost persistence
- update `/insights` to prefer actual cost
### Phase 4
- add direct OpenAI and Anthropic reconciliation paths
- add user overrides and contract pricing
- add pricing sync CLI command
## Testing Strategy
Add tests for:
- OpenAI cached token subtraction
- Anthropic cache read/write separation
- OpenRouter estimated vs actual reconciliation
- subscription-backed models showing `included`
- custom endpoints showing `n/a`
- override precedence
- stale catalog fallback behavior
Current tests that assume heuristic pricing should be replaced with route-aware expectations.
## Non-Goals
- exact enterprise billing reconstruction without an official source or user override
- backfilling perfect historical cost for old sessions that lack cache bucket data
- scraping arbitrary provider web pages at request time
## Recommendation
Do not expand the existing `MODEL_PRICING` dict.
That path cannot satisfy the product requirement. Hermes should instead migrate to:
- canonical usage normalization
- route-aware pricing sources
- estimate-then-reconcile cost lifecycle
- explicit certainty states in the UI
This is the minimum architecture that makes the statement "Hermes pricing is backed by official sources where possible, and otherwise clearly labeled" defensible.

View file

@ -1,108 +0,0 @@
# Ink Gateway TUI Migration — Post-mortem
Planned: 2026-04-01 · Delivered: 2026-04 · Status: shipped, classic (prompt_toolkit) CLI still present
## What Shipped
Three layers, same repo, Python runtime unchanged.
```
ui-tui (Node/TS) ──stdio JSON-RPC──▶ tui_gateway (Py) ──▶ AIAgent (run_agent.py)
```
### Backend — `tui_gateway/`
```
tui_gateway/
├── entry.py # subprocess entrypoint, stdio read/write loop
├── server.py # everything: sessions dict, @method handlers, _emit
├── render.py # stream renderer, diff rendering, message rendering
├── slash_worker.py # subprocess that runs hermes_cli slash commands
└── __init__.py
```
`server.py` owns the full runtime-control surface: session store (`_sessions: dict[str, dict]`), method registry (`@method("…")` decorator), event emitter (`_emit`), agent lifecycle (`_make_agent`, `_init_session`, `_wire_callbacks`), approval/sudo/clarify round-trips, and JSON-RPC dispatch.
Protocol methods (`@method(...)` in `server.py`):
- session: `session.{create, resume, list, close, interrupt, usage, history, compress, branch, title, save, undo}`
- prompt: `prompt.{submit, background, btw}`
- tools: `tools.{list, show, configure}`
- slash: `slash.exec`, `command.{dispatch, resolve}`, `commands.catalog`, `complete.{path, slash}`
- approvals: `approval.respond`, `sudo.respond`, `clarify.respond`, `secret.respond`
- config/state: `config.{get, set, show}`, `model.options`, `reload.mcp`
- ops: `shell.exec`, `cli.exec`, `terminal.resize`, `input.detect_drop`, `clipboard.paste`, `paste.collapse`, `image.attach`, `process.stop`
- misc: `agents.list`, `skills.manage`, `plugins.list`, `cron.manage`, `insights.get`, `rollback.{list, diff, restore}`, `browser.manage`
Protocol events (`_emit(…)` → handled in `ui-tui/src/app/createGatewayEventHandler.ts`):
- lifecycle: `gateway.{ready, stderr}`, `session.info`, `skin.changed`
- stream: `message.{start, delta, complete}`, `thinking.delta`, `reasoning.{delta, available}`, `status.update`
- tools: `tool.{start, progress, complete, generating}`, `subagent.{start, thinking, tool, progress, complete}`
- interactive: `approval.request`, `sudo.request`, `clarify.request`, `secret.request`
- async: `background.complete`, `btw.complete`, `error`
### Frontend — `ui-tui/src/`
```
src/
├── entry.tsx # node bootstrap: bootBanner → spawn python → dynamic-import Ink → render(<App/>)
├── app.tsx # <GatewayProvider> wraps <AppLayout>
├── bootBanner.ts # raw-ANSI banner to stdout in ~2ms, pre-React
├── gatewayClient.ts # JSON-RPC client over child_process stdio
├── gatewayTypes.ts # typed RPC responses + GatewayEvent union
├── theme.ts # DEFAULT_THEME + fromSkin
├── app/ # hooks + stores — the orchestration layer
│ ├── uiStore.ts # nanostore: sid, info, busy, usage, theme, status…
│ ├── turnStore.ts # nanostore: per-turn activity / reasoning / tools
│ ├── turnController.ts # imperative singleton for stream-time operations
│ ├── overlayStore.ts # nanostore: modal/overlay state
│ ├── useMainApp.ts # top-level composition hook
│ ├── useSessionLifecycle.ts # session.create/resume/close/reset
│ ├── useSubmission.ts # shell/slash/prompt dispatch + interpolation
│ ├── useConfigSync.ts # config.get + mtime poll
│ ├── useComposerState.ts # input buffer, paste snippets, editor mode
│ ├── useInputHandlers.ts # key bindings
│ ├── createGatewayEventHandler.ts # event-stream dispatcher
│ ├── createSlashHandler.ts # slash command router (registry + python fallback)
│ └── slash/commands/ # core.ts, ops.ts, session.ts — TS-owned slash commands
├── components/ # AppLayout, AppChrome, AppOverlays, MessageLine, Thinking, Markdown, pickers, prompts, Banner, SessionPanel
├── config/ # env, limits, timing constants
├── content/ # charms, faces, fortunes, hotkeys, placeholders, verbs
├── domain/ # details, messages, paths, roles, slash, usage, viewport
├── protocol/ # interpolation, paste regex
├── hooks/ # useCompletion, useInputHistory, useQueue, useVirtualHistory
└── lib/ # history, messages, osc52, rpc, text
```
### CLI entry points — `hermes_cli/main.py`
- `hermes --tui``node dist/entry.js` (auto-builds when `.ts`/`.tsx` newer than `dist/entry.js`)
- `hermes --tui --dev``tsx src/entry.tsx` (skip build)
- `HERMES_TUI_DIR=…` → external prebuilt dist (nix, distro packaging)
## Diverged From Original Plan
| Plan | Reality | Why |
|---|---|---|
| `tui_gateway/{controller,session_state,events,protocol}.py` | all collapsed into `server.py` | no second consumer ever emerged, keeping one file cheaper than four |
| `ui-tui/src/main.tsx` | split into `entry.tsx` (bootstrap) + `app.tsx` (shell) | boot banner + early python spawn wanted a pre-React moment |
| `ui-tui/src/state/store.ts` | three nanostores (`uiStore`, `turnStore`, `overlayStore`) | separate lifetimes: ui persists, turn resets per reply, overlay is modal |
| `approval.requested` / `sudo.requested` / `clarify.requested` | `*.request` (no `-ed`) | cosmetic |
| `session.cancel` | dropped | `session.interrupt` covers it |
| `HERMES_EXPERIMENTAL_TUI=1`, `display.experimental_tui: true`, `/tui on/off/status` | none shipped | `--tui` went from opt-in to first-class without an experimental phase |
## Post-migration Additions (not in original plan)
- **Async `session.create`** — returns sid in ~1ms, agent builds on a background thread, `session.info` broadcasts when ready; `_wait_agent()` gates every agent-touching handler via `_sess`
- **`bootBanner`** — raw-ANSI logo painted to stdout at T≈2ms, before Ink loads; `<AlternateScreen>` wipes it seamlessly when React mounts
- **Selection uniform bg**`theme.color.selectionBg` wired via `useSelection().setSelectionBgColor`; replaces SGR-inverse per-cell swap that fragmented over amber/gold fg
- **Slash command registry** — TS-owned commands in `app/slash/commands/{core,ops,session}.ts`, everything else falls through to `slash.exec` (python worker)
- **Turn store + controller split** — imperative singleton (`turnController`) holds refs/timers, nanostore (`turnStore`) holds render-visible state
## What's Still Open
- **Classic CLI not deleted.** `cli.py` still has ~80 `prompt_toolkit` references; classic REPL is still the default when `--tui` is absent. The original plan's "Cut 4 · prompt_toolkit removal later" hasn't happened.
- **No config-file opt-in.** `HERMES_EXPERIMENTAL_TUI` and `display.experimental_tui` were never built; only the CLI flag exists. Fine for now — if we want "default to TUI", a single line in `main.py` flips it.

View file

@ -1,106 +0,0 @@
# ============================================================================
# Hermes Agent — Example Skin Template
# ============================================================================
#
# Copy this file to ~/.hermes/skins/<name>.yaml to create a custom skin.
# All fields are optional — missing values inherit from the default skin.
# Activate with: /skin <name> or display.skin: <name> in config.yaml
#
# Keys are marked:
# (both) — applies to both the classic CLI and the TUI
# (classic) — classic CLI only (see hermes --tui in user-guide/tui.md)
# (tui) — TUI only
#
# See hermes_cli/skin_engine.py for the full schema reference.
# ============================================================================
# Required: unique skin name (used in /skin command and config)
name: example
description: An example custom skin — copy and modify this template
# ── Colors ──────────────────────────────────────────────────────────────────
# Hex color values. These control the visual palette.
colors:
# Banner panel (the startup welcome box) — (both)
banner_border: "#CD7F32" # Panel border
banner_title: "#FFD700" # Panel title text
banner_accent: "#FFBF00" # Section headers (Available Tools, Skills, etc.)
banner_dim: "#B8860B" # Dim/muted text (separators, model info)
banner_text: "#FFF8DC" # Body text (tool names, skill names)
# UI elements — (both)
ui_accent: "#FFBF00" # General accent (falls back to banner_accent)
ui_label: "#4dd0e1" # Labels
ui_ok: "#4caf50" # Success indicators
ui_error: "#ef5350" # Error indicators
ui_warn: "#ffa726" # Warning indicators
# Input area
prompt: "#FFF8DC" # Prompt text / `` glyph color (both)
input_rule: "#CD7F32" # Horizontal rule above input (classic)
# Response box — (classic)
response_border: "#FFD700" # Response box border
# Session display — (both)
session_label: "#DAA520" # "Session: " label
session_border: "#8B8682" # Session ID text
# TUI / CLI surfaces — (classic: status bar, voice badge, completion meta)
status_bar_bg: "#1a1a2e" # Status / usage bar background (classic)
voice_status_bg: "#1a1a2e" # Voice-mode badge background (classic)
completion_menu_bg: "#1a1a2e" # Completion list background (both)
completion_menu_current_bg: "#333355" # Active completion row background (both)
completion_menu_meta_bg: "#1a1a2e" # Completion meta column bg (classic)
completion_menu_meta_current_bg: "#333355" # Active meta bg (classic)
# Drag-to-select background — (tui)
selection_bg: "#3a3a55" # Uniform selection highlight in the TUI
# ── Spinner ─────────────────────────────────────────────────────────────────
# (classic) — the TUI uses its own animated indicators; spinner config here
# is only read by the classic prompt_toolkit CLI.
spinner:
# Faces shown while waiting for the API response
waiting_faces:
- "(。◕‿◕。)"
- "(◕‿◕✿)"
- "٩(◕‿◕。)۶"
# Faces shown during extended thinking/reasoning
thinking_faces:
- "(。•́︿•̀。)"
- "(◔_◔)"
- "(¬‿¬)"
# Verbs used in spinner messages (e.g., "pondering your request...")
thinking_verbs:
- "pondering"
- "contemplating"
- "musing"
- "ruminating"
# Optional: left/right decorations around the spinner
# Each entry is a [left, right] pair. Omit entirely for no wings.
# wings:
# - ["⟪⚔", "⚔⟫"]
# - ["⟪▲", "▲⟫"]
# ── Branding ────────────────────────────────────────────────────────────────
# Text strings used throughout the interface.
branding:
agent_name: "Hermes Agent" # (both) Banner title, about display
welcome: "Welcome! Type your message or /help for commands." # (both)
goodbye: "Goodbye! ⚕" # (both) Exit message
response_label: " ⚕ Hermes " # (classic) Response box header label
prompt_symbol: " " # (both) Input prompt glyph
help_header: "(^_^)? Available Commands" # (both) /help overlay title
# ── Tool Output ─────────────────────────────────────────────────────────────
# Character used as the prefix for tool output lines. (both)
# Default is "┊" (thin dotted vertical line). Some alternatives:
# "╎" (light triple dash vertical)
# "▏" (left one-eighth block)
# "│" (box drawing light vertical)
# "┃" (box drawing heavy vertical)
tool_prefix: "┊"

View file

@ -1,329 +0,0 @@
# Container-Aware CLI Review Fixes Spec
**PR:** NousResearch/hermes-agent#7543
**Review:** cursor[bot] bugbot review (4094049442) + two prior rounds
**Date:** 2026-04-12
**Branch:** `feat/container-aware-cli-clean`
## Review Issues Summary
Six issues were raised across three bugbot review rounds. Three were fixed in intermediate commits (38277a6a, 726cf90f). This spec addresses remaining design concerns surfaced by those reviews and simplifies the implementation based on interview decisions.
| # | Issue | Severity | Status |
|---|-------|----------|--------|
| 1 | `os.execvp` retry loop unreachable | Medium | Fixed in 79e8cd12 (switched to subprocess.run) |
| 2 | Redundant `shutil.which("sudo")` | Medium | Fixed in 38277a6a (reuses `sudo` var) |
| 3 | Missing `chown -h` on symlink update | Low | Fixed in 38277a6a |
| 4 | Container routing after `parse_args()` | High | Fixed in 726cf90f |
| 5 | Hardcoded `/home/${user}` | Medium | Fixed in 726cf90f |
| 6 | Group membership not gated on `container.enable` | Low | Fixed in 726cf90f |
The mechanical fixes are in place but the overall design needs revision. The retry loop, error swallowing, and process model have deeper issues than what the bugbot flagged.
---
## Spec: Revised `_exec_in_container`
### Design Principles
1. **Let it crash.** No silent fallbacks. If `.container-mode` exists but something goes wrong, the error propagates naturally (Python traceback). The only case where container routing is skipped is when `.container-mode` doesn't exist or `HERMES_DEV=1`.
2. **No retries.** Probe once for sudo, exec once. If it fails, docker/podman's stderr reaches the user verbatim.
3. **Completely transparent.** No error wrapping, no prefixes, no spinners. Docker's output goes straight through.
4. **`os.execvp` on the happy path.** Replace the Python process entirely so there's no idle parent during interactive sessions. Note: `execvp` never returns on success (process is replaced) and raises `OSError` on failure (it does not return a value). The container process's exit code becomes the process exit code by definition — no explicit propagation needed.
5. **One human-readable exception to "let it crash".** `subprocess.TimeoutExpired` from the sudo probe gets a specific catch with a readable message, since a raw traceback for "your Docker daemon is slow" is confusing. All other exceptions propagate naturally.
### Execution Flow
```
1. get_container_exec_info()
- HERMES_DEV=1 → return None (skip routing)
- Inside container → return None (skip routing)
- .container-mode doesn't exist → return None (skip routing)
- .container-mode exists → parse and return dict
- .container-mode exists but malformed/unreadable → LET IT CRASH (no try/except)
2. _exec_in_container(container_info, sys.argv[1:])
a. shutil.which(backend) → if None, print "{backend} not found on PATH" and sys.exit(1)
b. Sudo probe: subprocess.run([runtime, "inspect", "--format", "ok", container_name], timeout=15)
- If succeeds → needs_sudo = False
- If fails → try subprocess.run([sudo, "-n", runtime, "inspect", ...], timeout=15)
- If succeeds → needs_sudo = True
- If fails → print error with sudoers hint (including why -n is required) and sys.exit(1)
- If TimeoutExpired → catch specifically, print human-readable message about slow daemon
c. Build exec_cmd: [sudo? + runtime, "exec", tty_flags, "-u", exec_user, env_flags, container, hermes_bin, *cli_args]
d. os.execvp(exec_cmd[0], exec_cmd)
- On success: process is replaced — Python is gone, container exit code IS the process exit code
- On OSError: let it crash (natural traceback)
```
### Changes to `hermes_cli/main.py`
#### `_exec_in_container` — rewrite
Remove:
- The entire retry loop (`max_retries`, `for attempt in range(...)`)
- Spinner logic (`"Waiting for container..."`, dots)
- Exit code classification (125/126/127 handling)
- `subprocess.run` for the exec call (keep it only for the sudo probe)
- Special TTY vs non-TTY retry counts
- The `time` import (no longer needed)
Change:
- Use `os.execvp(exec_cmd[0], exec_cmd)` as the final call
- Keep the `subprocess` import only for the sudo probe
- Keep TTY detection for the `-it` vs `-i` flag
- Keep env var forwarding (TERM, COLORTERM, LANG, LC_ALL)
- Keep the sudo probe as-is (it's the one "smart" part)
- Bump probe `timeout` from 5s to 15s — cold podman on a loaded machine needs headroom
- Catch `subprocess.TimeoutExpired` specifically on both probe calls — print a readable message about the daemon being unresponsive instead of a raw traceback
- Expand the sudoers hint error message to explain *why* `-n` (non-interactive) is required: a password prompt would hang the CLI or break piped commands
The function becomes roughly:
```python
def _exec_in_container(container_info: dict, cli_args: list):
"""Replace the current process with a command inside the managed container.
Probes whether sudo is needed (rootful containers), then os.execvp
into the container. If exec fails, the OS error propagates naturally.
"""
import shutil
import subprocess
backend = container_info["backend"]
container_name = container_info["container_name"]
exec_user = container_info["exec_user"]
hermes_bin = container_info["hermes_bin"]
runtime = shutil.which(backend)
if not runtime:
print(f"Error: {backend} not found on PATH. Cannot route to container.",
file=sys.stderr)
sys.exit(1)
# Probe whether we need sudo to see the rootful container.
# Timeout is 15s — cold podman on a loaded machine can take a while.
# TimeoutExpired is caught specifically for a human-readable message;
# all other exceptions propagate naturally.
needs_sudo = False
sudo = None
try:
probe = subprocess.run(
[runtime, "inspect", "--format", "ok", container_name],
capture_output=True, text=True, timeout=15,
)
except subprocess.TimeoutExpired:
print(
f"Error: timed out waiting for {backend} to respond.\n"
f"The {backend} daemon may be unresponsive or starting up.",
file=sys.stderr,
)
sys.exit(1)
if probe.returncode != 0:
sudo = shutil.which("sudo")
if sudo:
try:
probe2 = subprocess.run(
[sudo, "-n", runtime, "inspect", "--format", "ok", container_name],
capture_output=True, text=True, timeout=15,
)
except subprocess.TimeoutExpired:
print(
f"Error: timed out waiting for sudo {backend} to respond.",
file=sys.stderr,
)
sys.exit(1)
if probe2.returncode == 0:
needs_sudo = True
else:
print(
f"Error: container '{container_name}' not found via {backend}.\n"
f"\n"
f"The NixOS service runs the container as root. Your user cannot\n"
f"see it because {backend} uses per-user namespaces.\n"
f"\n"
f"Fix: grant passwordless sudo for {backend}. The -n (non-interactive)\n"
f"flag is required because the CLI calls sudo non-interactively —\n"
f"a password prompt would hang or break piped commands:\n"
f"\n"
f' security.sudo.extraRules = [{{\n'
f' users = [ "{os.getenv("USER", "your-user")}" ];\n'
f' commands = [{{ command = "{runtime}"; options = [ "NOPASSWD" ]; }}];\n'
f' }}];\n'
f"\n"
f"Or run: sudo hermes {' '.join(cli_args)}",
file=sys.stderr,
)
sys.exit(1)
else:
print(
f"Error: container '{container_name}' not found via {backend}.\n"
f"The container may be running under root. Try: sudo hermes {' '.join(cli_args)}",
file=sys.stderr,
)
sys.exit(1)
is_tty = sys.stdin.isatty()
tty_flags = ["-it"] if is_tty else ["-i"]
env_flags = []
for var in ("TERM", "COLORTERM", "LANG", "LC_ALL"):
val = os.environ.get(var)
if val:
env_flags.extend(["-e", f"{var}={val}"])
cmd_prefix = [sudo, "-n", runtime] if needs_sudo else [runtime]
exec_cmd = (
cmd_prefix + ["exec"]
+ tty_flags
+ ["-u", exec_user]
+ env_flags
+ [container_name, hermes_bin]
+ cli_args
)
# execvp replaces this process entirely — it never returns on success.
# On failure it raises OSError, which propagates naturally.
os.execvp(exec_cmd[0], exec_cmd)
```
#### Container routing call site in `main()` — remove try/except
Current:
```python
try:
from hermes_cli.config import get_container_exec_info
container_info = get_container_exec_info()
if container_info:
_exec_in_container(container_info, sys.argv[1:])
sys.exit(1) # exec failed if we reach here
except SystemExit:
raise
except Exception:
pass # Container routing unavailable, proceed locally
```
Revised:
```python
from hermes_cli.config import get_container_exec_info
container_info = get_container_exec_info()
if container_info:
_exec_in_container(container_info, sys.argv[1:])
# Unreachable: os.execvp never returns on success (process is replaced)
# and raises OSError on failure (which propagates as a traceback).
# This line exists only as a defensive assertion.
sys.exit(1)
```
No try/except. If `.container-mode` doesn't exist, `get_container_exec_info()` returns `None` and we skip routing. If it exists but is broken, the exception propagates with a natural traceback.
Note: `sys.exit(1)` after `_exec_in_container` is dead code in all paths — `os.execvp` either replaces the process or raises. It's kept as a belt-and-suspenders assertion with a comment marking it unreachable, not as actual error handling.
### Changes to `hermes_cli/config.py`
#### `get_container_exec_info` — remove inner try/except
Current code catches `(OSError, IOError)` and returns `None`. This silently hides permission errors, corrupt files, etc.
Change: Remove the try/except around file reading. Keep the early returns for `HERMES_DEV=1` and `_is_inside_container()`. The `FileNotFoundError` from `open()` when `.container-mode` doesn't exist should still return `None` (this is the "container mode not enabled" case). All other exceptions propagate.
```python
def get_container_exec_info() -> Optional[dict]:
if os.environ.get("HERMES_DEV") == "1":
return None
if _is_inside_container():
return None
container_mode_file = get_hermes_home() / ".container-mode"
try:
with open(container_mode_file, "r") as f:
# ... parse key=value lines ...
except FileNotFoundError:
return None
# All other exceptions (PermissionError, malformed data, etc.) propagate
return { ... }
```
---
## Spec: NixOS Module Changes
### Symlink creation — simplify to two branches
Current: 4 branches (symlink exists, directory exists, other file, doesn't exist).
Revised: 2 branches.
```bash
if [ -d "${symlinkPath}" ] && [ ! -L "${symlinkPath}" ]; then
# Real directory — back it up, then create symlink
_backup="${symlinkPath}.bak.$(date +%s)"
echo "hermes-agent: backing up existing ${symlinkPath} to $_backup"
mv "${symlinkPath}" "$_backup"
fi
# For everything else (symlink, doesn't exist, etc.) — just force-create
ln -sfn "${target}" "${symlinkPath}"
chown -h ${user}:${cfg.group} "${symlinkPath}"
```
`ln -sfn` handles: existing symlink (replaces), doesn't exist (creates), and after the `mv` above (creates). The only case that needs special handling is a real directory, because `ln -sfn` cannot atomically replace a directory.
Note: there is a theoretical race between the `[ -d ... ]` check and the `mv` (something could create/remove the directory in between). In practice this is a NixOS activation script running as root during `nixos-rebuild switch` — no other process should be touching `~/.hermes` at that moment. Not worth adding locking for.
### Sudoers — document, don't auto-configure
Do NOT add `security.sudo.extraRules` to the module. Document the sudoers requirement in the module's description/comments and in the error message the CLI prints when sudo probe fails.
### Group membership gating — keep as-is
The fix in 726cf90f (`cfg.container.enable && cfg.container.hostUsers != []`) is correct. Leftover group membership when container mode is disabled is harmless. No cleanup needed.
---
## Spec: Test Rewrite
The existing test file (`tests/hermes_cli/test_container_aware_cli.py`) has 16 tests. With the simplified exec model, several are obsolete.
### Tests to keep (update as needed)
- `test_is_inside_container_dockerenv` — unchanged
- `test_is_inside_container_containerenv` — unchanged
- `test_is_inside_container_cgroup_docker` — unchanged
- `test_is_inside_container_false_on_host` — unchanged
- `test_get_container_exec_info_returns_metadata` — unchanged
- `test_get_container_exec_info_none_inside_container` — unchanged
- `test_get_container_exec_info_none_without_file` — unchanged
- `test_get_container_exec_info_skipped_when_hermes_dev` — unchanged
- `test_get_container_exec_info_not_skipped_when_hermes_dev_zero` — unchanged
- `test_get_container_exec_info_defaults` — unchanged
- `test_get_container_exec_info_docker_backend` — unchanged
### Tests to add
- `test_get_container_exec_info_crashes_on_permission_error` — verify that `PermissionError` propagates (no silent `None` return)
- `test_exec_in_container_calls_execvp` — verify `os.execvp` is called with correct args (runtime, tty flags, user, env, container, binary, cli args)
- `test_exec_in_container_sudo_probe_sets_prefix` — verify that when first probe fails and sudo probe succeeds, `os.execvp` is called with `sudo -n` prefix
- `test_exec_in_container_no_runtime_hard_fails` — keep existing, verify `sys.exit(1)` when `shutil.which` returns None
- `test_exec_in_container_non_tty_uses_i_only` — update to check `os.execvp` args instead of `subprocess.run` args
- `test_exec_in_container_probe_timeout_prints_message` — verify that `subprocess.TimeoutExpired` from the probe produces a human-readable error and `sys.exit(1)`, not a raw traceback
- `test_exec_in_container_container_not_running_no_sudo` — verify the path where runtime exists (`shutil.which` returns a path) but probe returns non-zero and no sudo is available. Should print the "container may be running under root" error. This is distinct from `no_runtime_hard_fails` which covers `shutil.which` returning None.
### Tests to delete
- `test_exec_in_container_tty_retries_on_container_failure` — retry loop removed
- `test_exec_in_container_non_tty_retries_silently_exits_126` — retry loop removed
- `test_exec_in_container_propagates_hermes_exit_code` — no subprocess.run to check exit codes; execvp replaces the process. Note: exit code propagation still works correctly — when `os.execvp` succeeds, the container's process *becomes* this process, so its exit code is the process exit code by OS semantics. No application code needed, no test needed. A comment in the function docstring documents this intent for future readers.
---
## Out of Scope
- Auto-configuring sudoers rules in the NixOS module
- Any changes to `get_container_exec_info` parsing logic beyond the try/except narrowing
- Changes to `.container-mode` file format
- Changes to the `HERMES_DEV=1` bypass
- Changes to container detection logic (`_is_inside_container`)

View file

@ -53,7 +53,6 @@ def _run_tool_in_thread(tool_name: str, arguments: Dict[str, Any], task_id: str)
try:
loop = asyncio.get_running_loop()
# We're in an async context -- need to run in thread
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
future = pool.submit(
handle_function_call, tool_name, arguments, task_id

View file

@ -576,6 +576,14 @@ def load_gateway_config() -> GatewayConfig:
bridged["free_response_channels"] = platform_cfg["free_response_channels"]
if "mention_patterns" in platform_cfg:
bridged["mention_patterns"] = platform_cfg["mention_patterns"]
if "dm_policy" in platform_cfg:
bridged["dm_policy"] = platform_cfg["dm_policy"]
if "allow_from" in platform_cfg:
bridged["allow_from"] = platform_cfg["allow_from"]
if "group_policy" in platform_cfg:
bridged["group_policy"] = platform_cfg["group_policy"]
if "group_allow_from" in platform_cfg:
bridged["group_allow_from"] = platform_cfg["group_allow_from"]
if plat == Platform.DISCORD and "channel_skill_bindings" in platform_cfg:
bridged["channel_skill_bindings"] = platform_cfg["channel_skill_bindings"]
if "channel_prompts" in platform_cfg:
@ -608,6 +616,8 @@ def load_gateway_config() -> GatewayConfig:
if isinstance(frc, list):
frc = ",".join(str(v) for v in frc)
os.environ["SLACK_FREE_RESPONSE_CHANNELS"] = str(frc)
if "reactions" in slack_cfg and not os.getenv("SLACK_REACTIONS"):
os.environ["SLACK_REACTIONS"] = str(slack_cfg["reactions"]).lower()
# Discord settings → env vars (env vars take precedence)
discord_cfg = yaml_cfg.get("discord", {})
@ -662,8 +672,7 @@ def load_gateway_config() -> GatewayConfig:
if "require_mention" in telegram_cfg and not os.getenv("TELEGRAM_REQUIRE_MENTION"):
os.environ["TELEGRAM_REQUIRE_MENTION"] = str(telegram_cfg["require_mention"]).lower()
if "mention_patterns" in telegram_cfg and not os.getenv("TELEGRAM_MENTION_PATTERNS"):
import json as _json
os.environ["TELEGRAM_MENTION_PATTERNS"] = _json.dumps(telegram_cfg["mention_patterns"])
os.environ["TELEGRAM_MENTION_PATTERNS"] = json.dumps(telegram_cfg["mention_patterns"])
frc = telegram_cfg.get("free_response_chats")
if frc is not None and not os.getenv("TELEGRAM_FREE_RESPONSE_CHATS"):
if isinstance(frc, list):
@ -700,6 +709,20 @@ def load_gateway_config() -> GatewayConfig:
if isinstance(frc, list):
frc = ",".join(str(v) for v in frc)
os.environ["WHATSAPP_FREE_RESPONSE_CHATS"] = str(frc)
if "dm_policy" in whatsapp_cfg and not os.getenv("WHATSAPP_DM_POLICY"):
os.environ["WHATSAPP_DM_POLICY"] = str(whatsapp_cfg["dm_policy"]).lower()
af = whatsapp_cfg.get("allow_from")
if af is not None and not os.getenv("WHATSAPP_ALLOWED_USERS"):
if isinstance(af, list):
af = ",".join(str(v) for v in af)
os.environ["WHATSAPP_ALLOWED_USERS"] = str(af)
if "group_policy" in whatsapp_cfg and not os.getenv("WHATSAPP_GROUP_POLICY"):
os.environ["WHATSAPP_GROUP_POLICY"] = str(whatsapp_cfg["group_policy"]).lower()
gaf = whatsapp_cfg.get("group_allow_from")
if gaf is not None and not os.getenv("WHATSAPP_GROUP_ALLOWED_USERS"):
if isinstance(gaf, list):
gaf = ",".join(str(v) for v in gaf)
os.environ["WHATSAPP_GROUP_ALLOWED_USERS"] = str(gaf)
# DingTalk settings → env vars (env vars take precedence)
dingtalk_cfg = yaml_cfg.get("dingtalk", {})
@ -1237,7 +1260,6 @@ def _apply_env_overrides(config: GatewayConfig) -> None:
if legacy_home:
qq_home = legacy_home
qq_home_name_env = "QQ_HOME_CHANNEL_NAME"
import logging
logging.getLogger(__name__).warning(
"QQ_HOME_CHANNEL is deprecated; rename to QQBOT_HOME_CHANNEL "
"in your .env for consistency with the platform key."

View file

@ -117,6 +117,160 @@ def _normalize_chat_content(
return ""
# Content part type aliases used by the OpenAI Chat Completions and Responses
# APIs. We accept both spellings on input and emit a single canonical internal
# shape (``{"type": "text", ...}`` / ``{"type": "image_url", ...}``) that the
# rest of the agent pipeline already understands.
_TEXT_PART_TYPES = frozenset({"text", "input_text", "output_text"})
_IMAGE_PART_TYPES = frozenset({"image_url", "input_image"})
_FILE_PART_TYPES = frozenset({"file", "input_file"})
def _normalize_multimodal_content(content: Any) -> Any:
"""Validate and normalize multimodal content for the API server.
Returns a plain string when the content is text-only, or a list of
``{"type": "text"|"image_url", ...}`` parts when images are present.
The output shape is the native OpenAI Chat Completions vision format,
which the agent pipeline accepts verbatim (OpenAI-wire providers) or
converts (``_preprocess_anthropic_content`` for Anthropic).
Raises ``ValueError`` with an OpenAI-style code on invalid input:
* ``unsupported_content_type`` file/input_file/file_id parts, or
non-image ``data:`` URLs.
* ``invalid_image_url`` missing URL or unsupported scheme.
* ``invalid_content_part`` malformed text/image objects.
Callers translate the ValueError into a 400 response.
"""
# Scalar passthrough mirrors ``_normalize_chat_content``.
if content is None:
return ""
if isinstance(content, str):
return content[:MAX_NORMALIZED_TEXT_LENGTH] if len(content) > MAX_NORMALIZED_TEXT_LENGTH else content
if not isinstance(content, list):
# Mirror the legacy text-normalizer's fallback so callers that
# pre-existed image support still get a string back.
return _normalize_chat_content(content)
items = content[:MAX_CONTENT_LIST_SIZE] if len(content) > MAX_CONTENT_LIST_SIZE else content
normalized_parts: List[Dict[str, Any]] = []
text_accum_len = 0
for part in items:
if isinstance(part, str):
if part:
trimmed = part[:MAX_NORMALIZED_TEXT_LENGTH]
normalized_parts.append({"type": "text", "text": trimmed})
text_accum_len += len(trimmed)
continue
if not isinstance(part, dict):
# Ignore unknown scalars for forward compatibility with future
# Responses API additions (e.g. ``refusal``). The same policy
# the text normalizer applies.
continue
raw_type = part.get("type")
part_type = str(raw_type or "").strip().lower()
if part_type in _TEXT_PART_TYPES:
text = part.get("text")
if text is None:
continue
if not isinstance(text, str):
text = str(text)
if text:
trimmed = text[:MAX_NORMALIZED_TEXT_LENGTH]
normalized_parts.append({"type": "text", "text": trimmed})
text_accum_len += len(trimmed)
continue
if part_type in _IMAGE_PART_TYPES:
detail = part.get("detail")
image_ref = part.get("image_url")
# OpenAI Responses sends ``input_image`` with a top-level
# ``image_url`` string; Chat Completions sends ``image_url`` as
# ``{"url": "...", "detail": "..."}``. Support both.
if isinstance(image_ref, dict):
url_value = image_ref.get("url")
detail = image_ref.get("detail", detail)
else:
url_value = image_ref
if not isinstance(url_value, str) or not url_value.strip():
raise ValueError("invalid_image_url:Image parts must include a non-empty image URL.")
url_value = url_value.strip()
lowered = url_value.lower()
if lowered.startswith("data:"):
if not lowered.startswith("data:image/") or "," not in url_value:
raise ValueError(
"unsupported_content_type:Only image data URLs are supported. "
"Non-image data payloads are not supported."
)
elif not (lowered.startswith("http://") or lowered.startswith("https://")):
raise ValueError(
"invalid_image_url:Image inputs must use http(s) URLs or data:image/... URLs."
)
image_part: Dict[str, Any] = {"type": "image_url", "image_url": {"url": url_value}}
if detail is not None:
if not isinstance(detail, str) or not detail.strip():
raise ValueError("invalid_content_part:Image detail must be a non-empty string when provided.")
image_part["image_url"]["detail"] = detail.strip()
normalized_parts.append(image_part)
continue
if part_type in _FILE_PART_TYPES:
raise ValueError(
"unsupported_content_type:Inline image inputs are supported, "
"but uploaded files and document inputs are not supported on this endpoint."
)
# Unknown part type — reject explicitly so clients get a clear error
# instead of a silently dropped turn.
raise ValueError(
f"unsupported_content_type:Unsupported content part type {raw_type!r}. "
"Only text and image_url/input_image parts are supported."
)
if not normalized_parts:
return ""
# Text-only: collapse to a plain string so downstream logging/trajectory
# code sees the native shape and prompt caching on text-only turns is
# unaffected.
if all(p.get("type") == "text" for p in normalized_parts):
return "\n".join(p["text"] for p in normalized_parts if p.get("text"))
return normalized_parts
def _content_has_visible_payload(content: Any) -> bool:
"""True when content has any text or image attachment. Used to reject empty turns."""
if isinstance(content, str):
return bool(content.strip())
if isinstance(content, list):
for part in content:
if isinstance(part, dict):
ptype = str(part.get("type") or "").strip().lower()
if ptype in _TEXT_PART_TYPES and str(part.get("text") or "").strip():
return True
if ptype in _IMAGE_PART_TYPES:
return True
return False
def _multimodal_validation_error(exc: ValueError, *, param: str) -> "web.Response":
"""Translate a ``_normalize_multimodal_content`` ValueError into a 400 response."""
raw = str(exc)
code, _, message = raw.partition(":")
if not message:
code, message = "invalid_content_part", raw
return web.json_response(
_openai_error(message, code=code, param=param),
status=400,
)
def check_api_server_requirements() -> bool:
"""Check if API server dependencies are available."""
return AIOHTTP_AVAILABLE
@ -169,7 +323,6 @@ class ResponseStore:
).fetchone()
if row is None:
return None
import time
self._conn.execute(
"UPDATE responses SET accessed_at = ? WHERE response_id = ?",
(time.time(), response_id),
@ -179,7 +332,6 @@ class ResponseStore:
def put(self, response_id: str, data: Dict[str, Any]) -> None:
"""Store a response, evicting the oldest if at capacity."""
import time
self._conn.execute(
"INSERT OR REPLACE INTO responses (response_id, data, accessed_at) VALUES (?, ?, ?)",
(response_id, json.dumps(data, default=str), time.time()),
@ -315,12 +467,12 @@ class _IdempotencyCache:
def __init__(self, max_items: int = 1000, ttl_seconds: int = 300):
from collections import OrderedDict
self._store = OrderedDict()
self._inflight: Dict[tuple[str, str], "asyncio.Task[Any]"] = {}
self._ttl = ttl_seconds
self._max = max_items
def _purge(self):
import time as _t
now = _t.time()
now = time.time()
expired = [k for k, v in self._store.items() if now - v["ts"] > self._ttl]
for k in expired:
self._store.pop(k, None)
@ -332,11 +484,27 @@ class _IdempotencyCache:
item = self._store.get(key)
if item and item["fp"] == fingerprint:
return item["resp"]
resp = await compute_coro()
import time as _t
self._store[key] = {"resp": resp, "fp": fingerprint, "ts": _t.time()}
self._purge()
return resp
inflight_key = (key, fingerprint)
task = self._inflight.get(inflight_key)
if task is None:
async def _compute_and_store():
resp = await compute_coro()
import time as _t
self._store[key] = {"resp": resp, "fp": fingerprint, "ts": _t.time()}
self._purge()
return resp
task = asyncio.create_task(_compute_and_store())
self._inflight[inflight_key] = task
def _clear_inflight(done_task: "asyncio.Task[Any]") -> None:
if self._inflight.get(inflight_key) is done_task:
self._inflight.pop(inflight_key, None)
task.add_done_callback(_clear_inflight)
return await asyncio.shield(task)
_idem_cache = _IdempotencyCache()
@ -366,6 +534,30 @@ def _derive_chat_session_id(
return f"api-{digest}"
_CRON_AVAILABLE = False
try:
from cron.jobs import (
list_jobs as _cron_list,
get_job as _cron_get,
create_job as _cron_create,
update_job as _cron_update,
remove_job as _cron_remove,
pause_job as _cron_pause,
resume_job as _cron_resume,
trigger_job as _cron_trigger,
)
_CRON_AVAILABLE = True
except ImportError:
_cron_list = None
_cron_get = None
_cron_create = None
_cron_update = None
_cron_remove = None
_cron_pause = None
_cron_resume = None
_cron_trigger = None
class APIServerAdapter(BasePlatformAdapter):
"""
OpenAI-compatible HTTP API server adapter.
@ -637,26 +829,32 @@ class APIServerAdapter(BasePlatformAdapter):
system_prompt = None
conversation_messages: List[Dict[str, str]] = []
for msg in messages:
for idx, msg in enumerate(messages):
role = msg.get("role", "")
content = _normalize_chat_content(msg.get("content", ""))
raw_content = msg.get("content", "")
if role == "system":
# Accumulate system messages
# System messages don't support images (Anthropic rejects, OpenAI
# text-model systems don't render them). Flatten to text.
content = _normalize_chat_content(raw_content)
if system_prompt is None:
system_prompt = content
else:
system_prompt = system_prompt + "\n" + content
elif role in ("user", "assistant"):
try:
content = _normalize_multimodal_content(raw_content)
except ValueError as exc:
return _multimodal_validation_error(exc, param=f"messages[{idx}].content")
conversation_messages.append({"role": role, "content": content})
# Extract the last user message as the primary input
user_message = ""
user_message: Any = ""
history = []
if conversation_messages:
user_message = conversation_messages[-1].get("content", "")
history = conversation_messages[:-1]
if not user_message:
if not _content_has_visible_payload(user_message):
return web.json_response(
{"error": {"message": "No user message found in messages", "type": "invalid_request_error"}},
status=400,
@ -1424,16 +1622,19 @@ class APIServerAdapter(BasePlatformAdapter):
# No error if conversation doesn't exist yet — it's a new conversation
# Normalize input to message list
input_messages: List[Dict[str, str]] = []
input_messages: List[Dict[str, Any]] = []
if isinstance(raw_input, str):
input_messages = [{"role": "user", "content": raw_input}]
elif isinstance(raw_input, list):
for item in raw_input:
for idx, item in enumerate(raw_input):
if isinstance(item, str):
input_messages.append({"role": "user", "content": item})
elif isinstance(item, dict):
role = item.get("role", "user")
content = _normalize_chat_content(item.get("content", ""))
try:
content = _normalize_multimodal_content(item.get("content", ""))
except ValueError as exc:
return _multimodal_validation_error(exc, param=f"input[{idx}].content")
input_messages.append({"role": role, "content": content})
else:
return web.json_response(_openai_error("'input' must be a string or array"), status=400)
@ -1442,7 +1643,7 @@ class APIServerAdapter(BasePlatformAdapter):
# This lets stateless clients supply their own history instead of
# relying on server-side response chaining via previous_response_id.
# Precedence: explicit conversation_history > previous_response_id.
conversation_history: List[Dict[str, str]] = []
conversation_history: List[Dict[str, Any]] = []
raw_history = body.get("conversation_history")
if raw_history:
if not isinstance(raw_history, list):
@ -1456,7 +1657,11 @@ class APIServerAdapter(BasePlatformAdapter):
_openai_error(f"conversation_history[{i}] must have 'role' and 'content' fields"),
status=400,
)
conversation_history.append({"role": str(entry["role"]), "content": str(entry["content"])})
try:
entry_content = _normalize_multimodal_content(entry["content"])
except ValueError as exc:
return _multimodal_validation_error(exc, param=f"conversation_history[{i}].content")
conversation_history.append({"role": str(entry["role"]), "content": entry_content})
if previous_response_id:
logger.debug("Both conversation_history and previous_response_id provided; using conversation_history")
@ -1476,8 +1681,8 @@ class APIServerAdapter(BasePlatformAdapter):
conversation_history.append(msg)
# Last input message is the user_message
user_message = input_messages[-1].get("content", "") if input_messages else ""
if not user_message:
user_message: Any = input_messages[-1].get("content", "") if input_messages else ""
if not _content_has_visible_payload(user_message):
return web.json_response(_openai_error("No user message found in input"), status=400)
# Truncation support
@ -1682,44 +1887,16 @@ class APIServerAdapter(BasePlatformAdapter):
# Cron jobs API
# ------------------------------------------------------------------
# Check cron module availability once (not per-request)
_CRON_AVAILABLE = False
try:
from cron.jobs import (
list_jobs as _cron_list,
get_job as _cron_get,
create_job as _cron_create,
update_job as _cron_update,
remove_job as _cron_remove,
pause_job as _cron_pause,
resume_job as _cron_resume,
trigger_job as _cron_trigger,
)
# Wrap as staticmethod to prevent descriptor binding — these are plain
# module functions, not instance methods. Without this, self._cron_*()
# injects ``self`` as the first positional argument and every call
# raises TypeError.
_cron_list = staticmethod(_cron_list)
_cron_get = staticmethod(_cron_get)
_cron_create = staticmethod(_cron_create)
_cron_update = staticmethod(_cron_update)
_cron_remove = staticmethod(_cron_remove)
_cron_pause = staticmethod(_cron_pause)
_cron_resume = staticmethod(_cron_resume)
_cron_trigger = staticmethod(_cron_trigger)
_CRON_AVAILABLE = True
except ImportError:
pass
_JOB_ID_RE = __import__("re").compile(r"[a-f0-9]{12}")
# Allowed fields for update — prevents clients injecting arbitrary keys
_UPDATE_ALLOWED_FIELDS = {"name", "schedule", "prompt", "deliver", "skills", "skill", "repeat", "enabled"}
_MAX_NAME_LENGTH = 200
_MAX_PROMPT_LENGTH = 5000
def _check_jobs_available(self) -> Optional["web.Response"]:
@staticmethod
def _check_jobs_available() -> Optional["web.Response"]:
"""Return error response if cron module isn't available."""
if not self._CRON_AVAILABLE:
if not _CRON_AVAILABLE:
return web.json_response(
{"error": "Cron module not available"}, status=501,
)
@ -1744,7 +1921,7 @@ class APIServerAdapter(BasePlatformAdapter):
return cron_err
try:
include_disabled = request.query.get("include_disabled", "").lower() in ("true", "1")
jobs = self._cron_list(include_disabled=include_disabled)
jobs = _cron_list(include_disabled=include_disabled)
return web.json_response({"jobs": jobs})
except Exception as e:
return web.json_response({"error": str(e)}, status=500)
@ -1792,7 +1969,7 @@ class APIServerAdapter(BasePlatformAdapter):
if repeat is not None:
kwargs["repeat"] = repeat
job = self._cron_create(**kwargs)
job = _cron_create(**kwargs)
return web.json_response({"job": job})
except Exception as e:
return web.json_response({"error": str(e)}, status=500)
@ -1809,7 +1986,7 @@ class APIServerAdapter(BasePlatformAdapter):
if id_err:
return id_err
try:
job = self._cron_get(job_id)
job = _cron_get(job_id)
if not job:
return web.json_response({"error": "Job not found"}, status=404)
return web.json_response({"job": job})
@ -1842,7 +2019,7 @@ class APIServerAdapter(BasePlatformAdapter):
return web.json_response(
{"error": f"Prompt must be ≤ {self._MAX_PROMPT_LENGTH} characters"}, status=400,
)
job = self._cron_update(job_id, sanitized)
job = _cron_update(job_id, sanitized)
if not job:
return web.json_response({"error": "Job not found"}, status=404)
return web.json_response({"job": job})
@ -1861,7 +2038,7 @@ class APIServerAdapter(BasePlatformAdapter):
if id_err:
return id_err
try:
success = self._cron_remove(job_id)
success = _cron_remove(job_id)
if not success:
return web.json_response({"error": "Job not found"}, status=404)
return web.json_response({"ok": True})
@ -1880,7 +2057,7 @@ class APIServerAdapter(BasePlatformAdapter):
if id_err:
return id_err
try:
job = self._cron_pause(job_id)
job = _cron_pause(job_id)
if not job:
return web.json_response({"error": "Job not found"}, status=404)
return web.json_response({"job": job})
@ -1899,7 +2076,7 @@ class APIServerAdapter(BasePlatformAdapter):
if id_err:
return id_err
try:
job = self._cron_resume(job_id)
job = _cron_resume(job_id)
if not job:
return web.json_response({"error": "Job not found"}, status=404)
return web.json_response({"job": job})
@ -1918,7 +2095,7 @@ class APIServerAdapter(BasePlatformAdapter):
if id_err:
return id_err
try:
job = self._cron_trigger(job_id)
job = _cron_trigger(job_id)
if not job:
return web.json_response({"error": "Job not found"}, status=404)
return web.json_response({"job": job})

View file

@ -6,6 +6,7 @@ and implement the required methods.
"""
import asyncio
import inspect
import ipaddress
import logging
import os
@ -18,6 +19,8 @@ import uuid
from abc import ABC, abstractmethod
from urllib.parse import urlsplit
from utils import normalize_proxy_url
logger = logging.getLogger(__name__)
@ -158,13 +161,13 @@ def resolve_proxy_url(platform_env_var: str | None = None) -> str | None:
if platform_env_var:
value = (os.environ.get(platform_env_var) or "").strip()
if value:
return value
return normalize_proxy_url(value)
for key in ("HTTPS_PROXY", "HTTP_PROXY", "ALL_PROXY",
"https_proxy", "http_proxy", "all_proxy"):
value = (os.environ.get(key) or "").strip()
if value:
return value
return _detect_macos_system_proxy()
return normalize_proxy_url(value)
return normalize_proxy_url(_detect_macos_system_proxy())
def proxy_kwargs_for_bot(proxy_url: str | None) -> dict:
@ -390,12 +393,9 @@ async def cache_image_from_url(url: str, ext: str = ".jpg", retries: int = 2) ->
if not is_safe_url(url):
raise ValueError(f"Blocked unsafe URL (SSRF protection): {safe_url_for_log(url)}")
import asyncio
import httpx
import logging as _logging
_log = _logging.getLogger(__name__)
_log = logging.getLogger(__name__)
last_exc = None
async with httpx.AsyncClient(
timeout=30.0,
follow_redirects=True,
@ -413,7 +413,6 @@ async def cache_image_from_url(url: str, ext: str = ".jpg", retries: int = 2) ->
response.raise_for_status()
return cache_image_from_bytes(response.content, ext)
except (httpx.TimeoutException, httpx.HTTPStatusError) as exc:
last_exc = exc
if isinstance(exc, httpx.HTTPStatusError) and exc.response.status_code < 429:
raise
if attempt < retries:
@ -429,7 +428,6 @@ async def cache_image_from_url(url: str, ext: str = ".jpg", retries: int = 2) ->
await asyncio.sleep(wait)
continue
raise
raise last_exc
def cleanup_image_cache(max_age_hours: int = 24) -> int:
@ -509,12 +507,9 @@ async def cache_audio_from_url(url: str, ext: str = ".ogg", retries: int = 2) ->
if not is_safe_url(url):
raise ValueError(f"Blocked unsafe URL (SSRF protection): {safe_url_for_log(url)}")
import asyncio
import httpx
import logging as _logging
_log = _logging.getLogger(__name__)
_log = logging.getLogger(__name__)
last_exc = None
async with httpx.AsyncClient(
timeout=30.0,
follow_redirects=True,
@ -532,7 +527,6 @@ async def cache_audio_from_url(url: str, ext: str = ".ogg", retries: int = 2) ->
response.raise_for_status()
return cache_audio_from_bytes(response.content, ext)
except (httpx.TimeoutException, httpx.HTTPStatusError) as exc:
last_exc = exc
if isinstance(exc, httpx.HTTPStatusError) and exc.response.status_code < 429:
raise
if attempt < retries:
@ -548,7 +542,39 @@ async def cache_audio_from_url(url: str, ext: str = ".ogg", retries: int = 2) ->
await asyncio.sleep(wait)
continue
raise
raise last_exc
# ---------------------------------------------------------------------------
# Video cache utilities
#
# Same pattern as image/audio cache -- videos from platforms are downloaded
# here so the agent can reference them by local file path.
# ---------------------------------------------------------------------------
VIDEO_CACHE_DIR = get_hermes_dir("cache/videos", "video_cache")
SUPPORTED_VIDEO_TYPES = {
".mp4": "video/mp4",
".mov": "video/quicktime",
".webm": "video/webm",
".mkv": "video/x-matroska",
".avi": "video/x-msvideo",
}
def get_video_cache_dir() -> Path:
"""Return the video cache directory, creating it if it doesn't exist."""
VIDEO_CACHE_DIR.mkdir(parents=True, exist_ok=True)
return VIDEO_CACHE_DIR
def cache_video_from_bytes(data: bytes, ext: str = ".mp4") -> str:
"""Save raw video bytes to the cache and return the absolute file path."""
cache_dir = get_video_cache_dir()
filename = f"video_{uuid.uuid4().hex[:12]}{ext}"
filepath = cache_dir / filename
filepath.write_bytes(data)
return str(filepath)
# ---------------------------------------------------------------------------
@ -880,10 +906,11 @@ class BasePlatformAdapter(ABC):
# working on a task after --replace or manual restarts.
self._background_tasks: set[asyncio.Task] = set()
# One-shot callbacks to fire after the main response is delivered.
# Keyed by session_key. GatewayRunner uses this to defer
# background-review notifications ("💾 Skill created") until the
# primary reply has been sent.
self._post_delivery_callbacks: Dict[str, Callable] = {}
# Keyed by session_key. Values are either a bare callback (legacy) or
# a ``(generation, callback)`` tuple so GatewayRunner can make deferred
# deliveries generation-aware and avoid stale runs clearing callbacks
# registered by a fresher run for the same session.
self._post_delivery_callbacks: Dict[str, Any] = {}
self._expected_cancelled_tasks: set[asyncio.Task] = set()
self._busy_session_handler: Optional[Callable[[MessageEvent, str], Awaitable[bool]]] = None
# Chats where auto-TTS on voice input is disabled (set by /voice off)
@ -1316,7 +1343,7 @@ class BasePlatformAdapter(ABC):
# Extract MEDIA:<path> tags, allowing optional whitespace after the colon
# and quoted/backticked paths for LLM-formatted outputs.
media_pattern = re.compile(
r'''[`"']?MEDIA:\s*(?P<path>`[^`\n]+`|"[^"\n]+"|'[^'\n]+'|(?:~/|/)\S+(?:[^\S\n]+\S+)*?\.(?:png|jpe?g|gif|webp|mp4|mov|avi|mkv|webm|ogg|opus|mp3|wav|m4a)(?=[\s`"',;:)\]}]|$)|\S+)[`"']?'''
r'''[`"']?MEDIA:\s*(?P<path>`[^`\n]+`|"[^"\n]+"|'[^'\n]+'|(?:~/|/)\S+(?:[^\S\n]+\S+)*?\.(?:png|jpe?g|gif|webp|mp4|mov|avi|mkv|webm|ogg|opus|mp3|wav|m4a|pdf)(?=[\s`"',;:)\]}]|$)|\S+)[`"']?'''
)
for match in media_pattern.finditer(content):
path = match.group("path").strip()
@ -1401,7 +1428,13 @@ class BasePlatformAdapter(ABC):
return paths, cleaned
async def _keep_typing(self, chat_id: str, interval: float = 2.0, metadata=None) -> None:
async def _keep_typing(
self,
chat_id: str,
interval: float = 2.0,
metadata=None,
stop_event: asyncio.Event | None = None,
) -> None:
"""
Continuously send typing indicator until cancelled.
@ -1415,9 +1448,18 @@ class BasePlatformAdapter(ABC):
"""
try:
while True:
if stop_event is not None and stop_event.is_set():
return
if chat_id not in self._typing_paused:
await self.send_typing(chat_id, metadata=metadata)
await asyncio.sleep(interval)
if stop_event is None:
await asyncio.sleep(interval)
continue
try:
await asyncio.wait_for(stop_event.wait(), timeout=interval)
except asyncio.TimeoutError:
continue
return
except asyncio.CancelledError:
pass # Normal cancellation when handler completes
finally:
@ -1444,6 +1486,59 @@ class BasePlatformAdapter(ABC):
"""Resume typing indicator for a chat after approval resolves."""
self._typing_paused.discard(chat_id)
async def interrupt_session_activity(self, session_key: str, chat_id: str) -> None:
"""Signal the active session loop to stop and clear typing immediately."""
if session_key:
interrupt_event = self._active_sessions.get(session_key)
if interrupt_event is not None:
interrupt_event.set()
try:
await self.stop_typing(chat_id)
except Exception:
pass
def register_post_delivery_callback(
self,
session_key: str,
callback: Callable,
*,
generation: int | None = None,
) -> None:
"""Register a deferred callback to fire after the main response.
``generation`` lets callers tie the callback to a specific gateway run
generation so stale runs cannot clear callbacks owned by a fresher run.
"""
if not session_key or not callable(callback):
return
if generation is None:
self._post_delivery_callbacks[session_key] = callback
else:
self._post_delivery_callbacks[session_key] = (int(generation), callback)
def pop_post_delivery_callback(
self,
session_key: str,
*,
generation: int | None = None,
) -> Callable | None:
"""Pop a deferred callback, optionally requiring generation ownership."""
if not session_key:
return None
entry = self._post_delivery_callbacks.get(session_key)
if entry is None:
return None
if isinstance(entry, tuple) and len(entry) == 2:
entry_generation, callback = entry
if generation is not None and int(entry_generation) != int(generation):
return None
self._post_delivery_callbacks.pop(session_key, None)
return callback if callable(callback) else None
if generation is not None:
return None
self._post_delivery_callbacks.pop(session_key, None)
return entry if callable(entry) else None
# ── Processing lifecycle hooks ──────────────────────────────────────────
# Subclasses override these to react to message processing events
# (e.g. Discord adds 👀/✅/❌ reactions).
@ -1684,8 +1779,6 @@ class BasePlatformAdapter(ABC):
HERMES_HUMAN_DELAY_MIN_MS: minimum delay in ms (default 800, custom mode)
HERMES_HUMAN_DELAY_MAX_MS: maximum delay in ms (default 2500, custom mode)
"""
import random
mode = os.getenv("HERMES_HUMAN_DELAY_MODE", "off").lower()
if mode == "off":
return 0.0
@ -1714,10 +1807,23 @@ class BasePlatformAdapter(ABC):
# Fall back to a new Event only if the entry was removed externally.
interrupt_event = self._active_sessions.get(session_key) or asyncio.Event()
self._active_sessions[session_key] = interrupt_event
callback_generation = getattr(interrupt_event, "_hermes_run_generation", None)
# Start continuous typing indicator (refreshes every 2 seconds)
_thread_metadata = {"thread_id": event.source.thread_id} if event.source.thread_id else None
typing_task = asyncio.create_task(self._keep_typing(event.source.chat_id, metadata=_thread_metadata))
_keep_typing_kwargs = {"metadata": _thread_metadata}
try:
_keep_typing_sig = inspect.signature(self._keep_typing)
except (TypeError, ValueError):
_keep_typing_sig = None
if _keep_typing_sig is None or "stop_event" in _keep_typing_sig.parameters:
_keep_typing_kwargs["stop_event"] = interrupt_event
typing_task = asyncio.create_task(
self._keep_typing(
event.source.chat_id,
**_keep_typing_kwargs,
)
)
try:
await self._run_processing_hook("on_processing_start", event)
@ -1976,7 +2082,14 @@ class BasePlatformAdapter(ABC):
finally:
# Fire any one-shot post-delivery callback registered for this
# session (e.g. deferred background-review notifications).
_post_cb = getattr(self, "_post_delivery_callbacks", {}).pop(session_key, None)
_callback_generation = callback_generation
if hasattr(self, "pop_post_delivery_callback"):
_post_cb = self.pop_post_delivery_callback(
session_key,
generation=_callback_generation,
)
else:
_post_cb = getattr(self, "_post_delivery_callbacks", {}).pop(session_key, None)
if callable(_post_cb):
try:
_post_cb()
@ -2022,10 +2135,10 @@ class BasePlatformAdapter(ABC):
pass
# Leave _active_sessions[session_key] populated — the drain
# task's own lifecycle will clean it up.
return
# Clean up session tracking
if session_key in self._active_sessions:
del self._active_sessions[session_key]
else:
# Clean up session tracking
if session_key in self._active_sessions:
del self._active_sessions[session_key]
async def cancel_background_tasks(self) -> None:
"""Cancel any in-flight background message-processing tasks.
@ -2033,12 +2146,26 @@ class BasePlatformAdapter(ABC):
Used during gateway shutdown/replacement so active sessions from the old
process do not keep running after adapters are being torn down.
"""
tasks = [task for task in self._background_tasks if not task.done()]
for task in tasks:
self._expected_cancelled_tasks.add(task)
task.cancel()
if tasks:
# Loop until no new tasks appear. Without this, a message
# arriving during the `await asyncio.gather` below would spawn
# a fresh _process_message_background task (added to
# self._background_tasks at line ~1668 via handle_message),
# and the _background_tasks.clear() at the end of this method
# would drop the reference — the task runs untracked against a
# disconnecting adapter, logs send-failures, and may linger
# until it completes on its own. Retrying the drain until the
# task set stabilizes closes the window.
MAX_DRAIN_ROUNDS = 5
for _ in range(MAX_DRAIN_ROUNDS):
tasks = [task for task in self._background_tasks if not task.done()]
if not tasks:
break
for task in tasks:
self._expected_cancelled_tasks.add(task)
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
# Loop: late-arrival tasks spawned during the gather above
# will be in self._background_tasks now. Re-check.
self._background_tasks.clear()
self._expected_cancelled_tasks.clear()
self._pending_messages.clear()

View file

@ -75,7 +75,7 @@ def _redact(text: str) -> str:
def check_bluebubbles_requirements() -> bool:
try:
import aiohttp # noqa: F401
import httpx as _httpx # noqa: F401
import httpx # noqa: F401
except ImportError:
return False
return True

View file

@ -498,6 +498,7 @@ class DiscordAdapter(BasePlatformAdapter):
self._allowed_role_ids: set = set() # For DISCORD_ALLOWED_ROLES filtering
# Voice channel state (per-guild)
self._voice_clients: Dict[int, Any] = {} # guild_id -> VoiceClient
self._voice_locks: Dict[int, asyncio.Lock] = {} # guild_id -> serialize join/leave
# Text batching: merge rapid successive messages (Telegram-style)
self._text_batch_delay_seconds = float(os.getenv("HERMES_DISCORD_TEXT_BATCH_DELAY_SECONDS", "0.6"))
self._text_batch_split_delay_seconds = float(os.getenv("HERMES_DISCORD_TEXT_BATCH_SPLIT_DELAY_SECONDS", "2.0"))
@ -540,7 +541,6 @@ class DiscordAdapter(BasePlatformAdapter):
# ctypes.util.find_library fails on macOS with Homebrew-installed libs,
# so fall back to known Homebrew paths if needed.
if not opus_path:
import sys
_homebrew_paths = (
"/opt/homebrew/lib/libopus.dylib", # Apple Silicon
"/usr/local/lib/libopus.dylib", # Intel Mac
@ -636,6 +636,15 @@ class DiscordAdapter(BasePlatformAdapter):
@self._client.event
async def on_message(message: DiscordMessage):
# Block until _resolve_allowed_usernames has swapped
# any raw usernames in DISCORD_ALLOWED_USERS for numeric
# IDs (otherwise on_message's author.id lookup can miss).
if not adapter_self._ready_event.is_set():
try:
await asyncio.wait_for(adapter_self._ready_event.wait(), timeout=30.0)
except asyncio.TimeoutError:
pass
# Dedup: Discord RESUME replays events after reconnects (#4777)
if adapter_self._dedup.is_duplicate(str(message.id)):
return
@ -1071,6 +1080,8 @@ class DiscordAdapter(BasePlatformAdapter):
chat_id: str,
message_id: str,
content: str,
*,
finalize: bool = False,
) -> SendResult:
"""Edit a previously sent Discord message."""
if not self._client:
@ -1237,51 +1248,53 @@ class DiscordAdapter(BasePlatformAdapter):
return False
guild_id = channel.guild.id
# Already connected in this guild?
existing = self._voice_clients.get(guild_id)
if existing and existing.is_connected():
if existing.channel.id == channel.id:
async with self._voice_locks.setdefault(guild_id, asyncio.Lock()):
# Already connected in this guild?
existing = self._voice_clients.get(guild_id)
if existing and existing.is_connected():
if existing.channel.id == channel.id:
self._reset_voice_timeout(guild_id)
return True
await existing.move_to(channel)
self._reset_voice_timeout(guild_id)
return True
await existing.move_to(channel)
vc = await channel.connect()
self._voice_clients[guild_id] = vc
self._reset_voice_timeout(guild_id)
# Start voice receiver (Phase 2: listen to users)
try:
receiver = VoiceReceiver(vc, allowed_user_ids=self._allowed_user_ids)
receiver.start()
self._voice_receivers[guild_id] = receiver
self._voice_listen_tasks[guild_id] = asyncio.ensure_future(
self._voice_listen_loop(guild_id)
)
except Exception as e:
logger.warning("Voice receiver failed to start: %s", e)
return True
vc = await channel.connect()
self._voice_clients[guild_id] = vc
self._reset_voice_timeout(guild_id)
# Start voice receiver (Phase 2: listen to users)
try:
receiver = VoiceReceiver(vc, allowed_user_ids=self._allowed_user_ids)
receiver.start()
self._voice_receivers[guild_id] = receiver
self._voice_listen_tasks[guild_id] = asyncio.ensure_future(
self._voice_listen_loop(guild_id)
)
except Exception as e:
logger.warning("Voice receiver failed to start: %s", e)
return True
async def leave_voice_channel(self, guild_id: int) -> None:
"""Disconnect from the voice channel in a guild."""
# Stop voice receiver first
receiver = self._voice_receivers.pop(guild_id, None)
if receiver:
receiver.stop()
listen_task = self._voice_listen_tasks.pop(guild_id, None)
if listen_task:
listen_task.cancel()
async with self._voice_locks.setdefault(guild_id, asyncio.Lock()):
# Stop voice receiver first
receiver = self._voice_receivers.pop(guild_id, None)
if receiver:
receiver.stop()
listen_task = self._voice_listen_tasks.pop(guild_id, None)
if listen_task:
listen_task.cancel()
vc = self._voice_clients.pop(guild_id, None)
if vc and vc.is_connected():
await vc.disconnect()
task = self._voice_timeout_tasks.pop(guild_id, None)
if task:
task.cancel()
self._voice_text_channels.pop(guild_id, None)
self._voice_sources.pop(guild_id, None)
vc = self._voice_clients.pop(guild_id, None)
if vc and vc.is_connected():
await vc.disconnect()
task = self._voice_timeout_tasks.pop(guild_id, None)
if task:
task.cancel()
self._voice_text_channels.pop(guild_id, None)
self._voice_sources.pop(guild_id, None)
# Maximum seconds to wait for voice playback before giving up
PLAYBACK_TIMEOUT = 120
@ -1408,8 +1421,7 @@ class DiscordAdapter(BasePlatformAdapter):
speaking_user_ids: set = set()
receiver = self._voice_receivers.get(guild_id)
if receiver:
import time as _time
now = _time.monotonic()
now = time.monotonic()
with receiver._lock:
for ssrc, last_t in receiver._last_packet_time.items():
# Consider "speaking" if audio received within last 2 seconds
@ -2948,6 +2960,17 @@ class DiscordAdapter(BasePlatformAdapter):
parent_channel_id = self._get_parent_channel_id(message.channel)
is_voice_linked_channel = False
# Save mention-stripped text before auto-threading since create_thread()
# can clobber message.content, breaking /command detection in channels.
raw_content = message.content.strip()
normalized_content = raw_content
mention_prefix = False
if self._client.user and self._client.user in message.mentions:
mention_prefix = True
normalized_content = normalized_content.replace(f"<@{self._client.user.id}>", "").strip()
normalized_content = normalized_content.replace(f"<@!{self._client.user.id}>", "").strip()
message.content = normalized_content
if not isinstance(message.channel, discord.DMChannel):
channel_ids = {str(message.channel.id)}
if parent_channel_id:
@ -2985,13 +3008,8 @@ class DiscordAdapter(BasePlatformAdapter):
in_bot_thread = is_thread and thread_id in self._threads
if require_mention and not is_free_channel and not in_bot_thread:
if self._client.user not in message.mentions:
if self._client.user not in message.mentions and not mention_prefix:
return
if self._client.user and self._client.user in message.mentions:
message.content = message.content.replace(f"<@{self._client.user.id}>", "").strip()
message.content = message.content.replace(f"<@!{self._client.user.id}>", "").strip()
# Auto-thread: when enabled, automatically create a thread for every
# @mention in a text channel so each conversation is isolated (like Slack).
# Messages already inside threads or DMs are unaffected.
@ -3013,7 +3031,7 @@ class DiscordAdapter(BasePlatformAdapter):
# Determine message type
msg_type = MessageType.TEXT
if message.content.startswith("/"):
if normalized_content.startswith("/"):
msg_type = MessageType.COMMAND
elif message.attachments:
# Check attachment types
@ -3153,7 +3171,9 @@ class DiscordAdapter(BasePlatformAdapter):
att.filename, e, exc_info=True,
)
event_text = message.content
# Use normalized_content (saved before auto-threading) instead of message.content,
# to detect /slash commands in channel messages.
event_text = normalized_content
if pending_text_injection:
event_text = f"{pending_text_injection}\n\n{event_text}" if event_text else pending_text_injection
@ -3265,7 +3285,20 @@ class DiscordAdapter(BasePlatformAdapter):
"[Discord] Flushing text batch %s (%d chars)",
key, len(event.text or ""),
)
await self.handle_message(event)
# Shield the downstream dispatch so that a subsequent chunk
# arriving while handle_message is mid-flight cannot cancel
# the running agent turn. _enqueue_text_event always cancels
# the prior flush task when a new chunk lands; without this
# shield, CancelledError would propagate from our task down
# into handle_message → the agent's streaming request,
# aborting the response the user was waiting on. The new
# chunk is handled by the fresh flush task regardless.
await asyncio.shield(self.handle_message(event))
except asyncio.CancelledError:
# Only reached if cancel landed before the pop — the shielded
# handle_message is unaffected either way. Let the task exit
# cleanly so the finally block cleans up.
pass
finally:
if self._pending_text_batch_tasks.get(key) is current_task:
self._pending_text_batch_tasks.pop(key, None)

View file

@ -8,7 +8,8 @@ Supports:
- Gateway allowlist integration via FEISHU_ALLOWED_USERS
- Persistent dedup state across restarts
- Per-chat serial message processing (matches openclaw createChatQueue)
- Persistent ACK emoji reaction on inbound messages
- Processing status reactions: Typing while working, removed on success,
swapped for CrossMark on failure
- Reaction events routed as synthetic text events (matches openclaw)
- Interactive card button-click events routed as synthetic COMMAND events
- Webhook anomaly tracking (matches openclaw createWebhookAnomalyTracker)
@ -29,6 +30,7 @@ import re
import threading
import time
import uuid
from collections import OrderedDict
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
@ -98,6 +100,7 @@ from gateway.platforms.base import (
BasePlatformAdapter,
MessageEvent,
MessageType,
ProcessingOutcome,
SendResult,
SUPPORTED_DOCUMENT_TYPES,
cache_document_from_bytes,
@ -119,6 +122,8 @@ _MARKDOWN_HINT_RE = re.compile(
re.MULTILINE,
)
_MARKDOWN_LINK_RE = re.compile(r"\[([^\]]+)\]\(([^)]+)\)")
_MARKDOWN_FENCE_OPEN_RE = re.compile(r"^```([^\n`]*)\s*$")
_MARKDOWN_FENCE_CLOSE_RE = re.compile(r"^```\s*$")
_MENTION_RE = re.compile(r"@_user_\d+")
_MULTISPACE_RE = re.compile(r"[ \t]{2,}")
_POST_CONTENT_INVALID_RE = re.compile(r"content format of the post type is incorrect", re.IGNORECASE)
@ -188,7 +193,17 @@ _APPROVAL_LABEL_MAP: Dict[str, str] = {
}
_FEISHU_BOT_MSG_TRACK_SIZE = 512 # LRU size for tracking sent message IDs
_FEISHU_REPLY_FALLBACK_CODES = frozenset({230011, 231003}) # reply target withdrawn/missing → create fallback
_FEISHU_ACK_EMOJI = "OK"
# Feishu reactions render as prominent badges, unlike Discord/Telegram's
# small footer emoji — a success badge on every message would add noise, so
# we only mark start (Typing) and failure (CrossMark); the reply itself is
# the success signal.
_FEISHU_REACTION_IN_PROGRESS = "Typing"
_FEISHU_REACTION_FAILURE = "CrossMark"
# Bound on the (message_id → reaction_id) handle cache. Happy-path entries
# drain on completion; the cap is a safeguard against unbounded growth from
# delete-failures, not a capacity plan.
_FEISHU_PROCESSING_REACTION_CACHE_SIZE = 1024
# QR onboarding constants
_ONBOARD_ACCOUNTS_URLS = {
@ -430,23 +445,66 @@ def _coerce_required_int(value: Any, default: int, min_value: int = 0) -> int:
def _build_markdown_post_payload(content: str) -> str:
rows = _build_markdown_post_rows(content)
return json.dumps(
{
"zh_cn": {
"content": [
[
{
"tag": "md",
"text": content,
}
]
],
"content": rows,
}
},
ensure_ascii=False,
)
def _build_markdown_post_rows(content: str) -> List[List[Dict[str, str]]]:
"""Build Feishu post rows while isolating fenced code blocks.
Feishu's `md` renderer can swallow trailing content when a fenced code block
appears inside one large markdown element. Split the reply at real fence
lines so prose before/after the code block remains visible while code stays
in a dedicated row.
"""
if not content:
return [[{"tag": "md", "text": ""}]]
if "```" not in content:
return [[{"tag": "md", "text": content}]]
rows: List[List[Dict[str, str]]] = []
current: List[str] = []
in_code_block = False
def _flush_current() -> None:
nonlocal current
if not current:
return
segment = "\n".join(current)
if segment.strip():
rows.append([{"tag": "md", "text": segment}])
current = []
for raw_line in content.splitlines():
stripped_line = raw_line.strip()
is_fence = bool(
_MARKDOWN_FENCE_CLOSE_RE.match(stripped_line)
if in_code_block
else _MARKDOWN_FENCE_OPEN_RE.match(stripped_line)
)
if is_fence:
if not in_code_block:
_flush_current()
current.append(raw_line)
in_code_block = not in_code_block
if not in_code_block:
_flush_current()
continue
current.append(raw_line)
_flush_current()
return rows or [[{"tag": "md", "text": content}]]
def parse_feishu_post_payload(payload: Any) -> FeishuPostParseResult:
resolved = _resolve_post_payload(payload)
if not resolved:
@ -1096,6 +1154,9 @@ class FeishuAdapter(BasePlatformAdapter):
# Exec approval button state (approval_id → {session_key, message_id, chat_id})
self._approval_state: Dict[int, Dict[str, str]] = {}
self._approval_counter = itertools.count(1)
# Feishu reaction deletion requires the opaque reaction_id returned
# by create, so we cache it per message_id.
self._pending_processing_reactions: "OrderedDict[str, str]" = OrderedDict()
self._load_seen_message_ids()
@staticmethod
@ -1423,6 +1484,8 @@ class FeishuAdapter(BasePlatformAdapter):
chat_id: str,
message_id: str,
content: str,
*,
finalize: bool = False,
) -> SendResult:
"""Edit a previously sent Feishu text/post message."""
if not self._client:
@ -1925,8 +1988,8 @@ class FeishuAdapter(BasePlatformAdapter):
if not message_id or self._is_duplicate(message_id):
logger.debug("[Feishu] Dropping duplicate/missing message_id: %s", message_id)
return
if getattr(sender, "sender_type", "") == "bot":
logger.debug("[Feishu] Dropping bot-originated event: %s", message_id)
if self._is_self_sent_bot_message(event):
logger.debug("[Feishu] Dropping self-sent bot event: %s", message_id)
return
chat_type = getattr(message, "chat_type", "p2p")
@ -2003,12 +2066,12 @@ class FeishuAdapter(BasePlatformAdapter):
operator_type,
emoji_type,
)
# Only process reactions from real users. Ignore app/bot-generated reactions
# and Hermes' own ACK emoji to avoid feedback loops.
# Drop bot/app-origin reactions to break the feedback loop from our
# own lifecycle reactions. A human reacting with the same emoji (e.g.
# clicking Typing on a bot message) is still routed through.
loop = self._loop
if (
operator_type in {"bot", "app"}
or emoji_type == _FEISHU_ACK_EMOJI
or not message_id
or loop is None
or bool(getattr(loop, "is_closed", lambda: False)())
@ -2232,33 +2295,35 @@ class FeishuAdapter(BasePlatformAdapter):
async def _handle_message_with_guards(self, event: MessageEvent) -> None:
"""Dispatch a single event through the agent pipeline with per-chat serialization
and a persistent ACK emoji reaction before processing starts.
before handing the event off to the agent.
- Per-chat lock: ensures messages in the same chat are processed one at a time
(matches openclaw's createChatQueue serial queue behaviour).
- ACK indicator: adds a CHECK reaction to the triggering message before handing
off to the agent and leaves it in place as a receipt marker.
Per-chat lock ensures messages in the same chat are processed one at a
time (matches openclaw's createChatQueue serial queue behaviour).
"""
chat_id = getattr(event.source, "chat_id", "") or "" if event.source else ""
chat_lock = self._get_chat_lock(chat_id)
async with chat_lock:
message_id = event.message_id
if message_id:
await self._add_ack_reaction(message_id)
await self.handle_message(event)
async def _add_ack_reaction(self, message_id: str) -> Optional[str]:
"""Add a persistent ACK emoji reaction to signal the message was received."""
if not self._client or not message_id:
# =========================================================================
# Processing status reactions
# =========================================================================
def _reactions_enabled(self) -> bool:
return os.getenv("FEISHU_REACTIONS", "true").strip().lower() not in ("false", "0", "no")
async def _add_reaction(self, message_id: str, emoji_type: str) -> Optional[str]:
"""Return the reaction_id on success, else None. The id is needed later for deletion."""
if not self._client or not message_id or not emoji_type:
return None
try:
from lark_oapi.api.im.v1 import ( # lazy import — keeps optional dep optional
from lark_oapi.api.im.v1 import (
CreateMessageReactionRequest,
CreateMessageReactionRequestBody,
)
body = (
CreateMessageReactionRequestBody.builder()
.reaction_type({"emoji_type": _FEISHU_ACK_EMOJI})
.reaction_type({"emoji_type": emoji_type})
.build()
)
request = (
@ -2271,16 +2336,93 @@ class FeishuAdapter(BasePlatformAdapter):
if response and getattr(response, "success", lambda: False)():
data = getattr(response, "data", None)
return getattr(data, "reaction_id", None)
logger.warning(
"[Feishu] Failed to add ack reaction to %s: code=%s msg=%s",
logger.debug(
"[Feishu] Add reaction %s on %s rejected: code=%s msg=%s",
emoji_type,
message_id,
getattr(response, "code", None),
getattr(response, "msg", None),
)
except Exception:
logger.warning("[Feishu] Failed to add ack reaction to %s", message_id, exc_info=True)
logger.warning(
"[Feishu] Add reaction %s on %s raised",
emoji_type,
message_id,
exc_info=True,
)
return None
async def _remove_reaction(self, message_id: str, reaction_id: str) -> bool:
if not self._client or not message_id or not reaction_id:
return False
try:
from lark_oapi.api.im.v1 import DeleteMessageReactionRequest
request = (
DeleteMessageReactionRequest.builder()
.message_id(message_id)
.reaction_id(reaction_id)
.build()
)
response = await asyncio.to_thread(self._client.im.v1.message_reaction.delete, request)
if response and getattr(response, "success", lambda: False)():
return True
logger.debug(
"[Feishu] Remove reaction %s on %s rejected: code=%s msg=%s",
reaction_id,
message_id,
getattr(response, "code", None),
getattr(response, "msg", None),
)
except Exception:
logger.warning(
"[Feishu] Remove reaction %s on %s raised",
reaction_id,
message_id,
exc_info=True,
)
return False
def _remember_processing_reaction(self, message_id: str, reaction_id: str) -> None:
cache = self._pending_processing_reactions
cache[message_id] = reaction_id
cache.move_to_end(message_id)
while len(cache) > _FEISHU_PROCESSING_REACTION_CACHE_SIZE:
cache.popitem(last=False)
def _pop_processing_reaction(self, message_id: str) -> Optional[str]:
return self._pending_processing_reactions.pop(message_id, None)
async def on_processing_start(self, event: MessageEvent) -> None:
if not self._reactions_enabled():
return
message_id = event.message_id
if not message_id or message_id in self._pending_processing_reactions:
return
reaction_id = await self._add_reaction(message_id, _FEISHU_REACTION_IN_PROGRESS)
if reaction_id:
self._remember_processing_reaction(message_id, reaction_id)
async def on_processing_complete(
self, event: MessageEvent, outcome: ProcessingOutcome
) -> None:
if not self._reactions_enabled():
return
message_id = event.message_id
if not message_id:
return
start_reaction_id = self._pending_processing_reactions.get(message_id)
if start_reaction_id:
if not await self._remove_reaction(message_id, start_reaction_id):
# Don't stack a second badge on top of a Typing we couldn't
# remove — UI would read as both "working" and "done/failed"
# simultaneously. Keep the handle so LRU eventually evicts it.
return
self._pop_processing_reaction(message_id)
if outcome is ProcessingOutcome.FAILURE:
await self._add_reaction(message_id, _FEISHU_REACTION_FAILURE)
# =========================================================================
# Webhook server and security
# =========================================================================
@ -3249,6 +3391,23 @@ class FeishuAdapter(BasePlatformAdapter):
return self._post_mentions_bot(normalized.mentioned_ids)
return False
def _is_self_sent_bot_message(self, event: Any) -> bool:
"""Return True only for Feishu events emitted by this Hermes bot."""
sender = getattr(event, "sender", None)
sender_type = str(getattr(sender, "sender_type", "") or "").strip().lower()
if sender_type not in {"bot", "app"}:
return False
sender_id = getattr(sender, "sender_id", None)
sender_open_id = str(getattr(sender_id, "open_id", "") or "").strip()
sender_user_id = str(getattr(sender_id, "user_id", "") or "").strip()
if self._bot_open_id and sender_open_id == self._bot_open_id:
return True
if self._bot_user_id and sender_user_id == self._bot_user_id:
return True
return False
def _message_mentions_bot(self, mentions: List[Any]) -> bool:
"""Check whether any mention targets the configured or inferred bot identity."""
for mention in mentions:
@ -3276,10 +3435,55 @@ class FeishuAdapter(BasePlatformAdapter):
return False
async def _hydrate_bot_identity(self) -> None:
"""Best-effort discovery of bot identity for precise group mention gating."""
"""Best-effort discovery of bot identity for precise group mention gating
and self-sent bot event filtering.
Populates ``_bot_open_id`` and ``_bot_name`` from /open-apis/bot/v3/info
(no extra scopes required beyond the tenant access token). Falls back to
the application info endpoint for ``_bot_name`` only when the first probe
doesn't return it. Each field is hydrated independently — a value already
supplied via env vars (FEISHU_BOT_OPEN_ID / FEISHU_BOT_USER_ID /
FEISHU_BOT_NAME) is preserved and skips its probe.
"""
if not self._client:
return
if any((self._bot_open_id, self._bot_user_id, self._bot_name)):
if self._bot_open_id and self._bot_name:
# Everything the self-send filter and precise mention gate need is
# already in place; nothing to probe.
return
# Primary probe: /open-apis/bot/v3/info — returns bot_name + open_id, no
# extra scopes required. This is the same endpoint the onboarding wizard
# uses via probe_bot().
if not self._bot_open_id or not self._bot_name:
try:
resp = await asyncio.to_thread(
self._client.request,
method="GET",
url="/open-apis/bot/v3/info",
body=None,
raw_response=True,
)
content = getattr(resp, "content", None)
if content:
payload = json.loads(content)
parsed = _parse_bot_response(payload) or {}
open_id = (parsed.get("bot_open_id") or "").strip()
bot_name = (parsed.get("bot_name") or "").strip()
if open_id and not self._bot_open_id:
self._bot_open_id = open_id
if bot_name and not self._bot_name:
self._bot_name = bot_name
except Exception:
logger.debug(
"[Feishu] /bot/v3/info probe failed during hydration",
exc_info=True,
)
# Fallback probe for _bot_name only: application info endpoint. Needs
# admin:app.info:readonly or application:application:self_manage scope,
# so it's best-effort.
if self._bot_name:
return
try:
request = self._build_get_application_request(app_id=self._app_id, lang="en_us")
@ -3288,17 +3492,17 @@ class FeishuAdapter(BasePlatformAdapter):
code = getattr(response, "code", None)
if code == 99991672:
logger.warning(
"[Feishu] Unable to hydrate bot identity from application info. "
"[Feishu] Unable to hydrate bot name from application info. "
"Grant admin:app.info:readonly or application:application:self_manage "
"so group @mention gating can resolve the bot name precisely."
)
return
app = getattr(getattr(response, "data", None), "app", None)
app_name = (getattr(app, "app_name", None) or "").strip()
if app_name:
if app_name and not self._bot_name:
self._bot_name = app_name
except Exception:
logger.debug("[Feishu] Failed to hydrate bot identity", exc_info=True)
logger.debug("[Feishu] Failed to hydrate bot name from application info", exc_info=True)
# =========================================================================
# Deduplication — seen message ID cache (persistent)

View file

@ -825,7 +825,7 @@ class MatrixAdapter(BasePlatformAdapter):
async def edit_message(
self, chat_id: str, message_id: str, content: str
self, chat_id: str, message_id: str, content: str, *, finalize: bool = False
) -> SendResult:
"""Edit an existing message (via m.replace)."""

View file

@ -304,7 +304,7 @@ class MattermostAdapter(BasePlatformAdapter):
)
async def edit_message(
self, chat_id: str, message_id: str, content: str
self, chat_id: str, message_id: str, content: str, *, finalize: bool = False
) -> SendResult:
"""Edit an existing post."""
formatted = self.format_message(content)
@ -410,7 +410,6 @@ class MattermostAdapter(BasePlatformAdapter):
logger.warning("Mattermost: blocked unsafe URL (SSRF protection)")
return await self.send(chat_id, f"{caption or ''}\n{url}".strip(), reply_to)
import asyncio
import aiohttp
last_exc = None

View file

@ -26,9 +26,8 @@ from .adapter import ( # noqa: F401
# -- Onboard (QR-code scan-to-configure) -----------------------------------
from .onboard import ( # noqa: F401
BindStatus,
create_bind_task,
poll_bind_result,
build_connect_url,
qr_register,
)
from .crypto import decrypt_secret, generate_bind_key # noqa: F401
@ -44,9 +43,8 @@ __all__ = [
"_ssrf_redirect_guard",
# onboard
"BindStatus",
"create_bind_task",
"poll_bind_result",
"build_connect_url",
"qr_register",
# crypto
"decrypt_secret",
"generate_bind_key",

View file

@ -1086,11 +1086,8 @@ class QQAdapter(BasePlatformAdapter):
return MessageType.VIDEO
if "image" in first_type or "photo" in first_type:
return MessageType.PHOTO
# Unknown content type with an attachment — don't assume PHOTO
# to prevent non-image files from being sent to vision analysis.
logger.debug(
"[%s] Unknown media content_type '%s', defaulting to TEXT",
self._log_tag,
"Unknown media content_type '%s', defaulting to TEXT",
first_type,
)
return MessageType.TEXT
@ -1826,14 +1823,12 @@ class QQAdapter(BasePlatformAdapter):
body["file_name"] = file_name
# Retry transient upload failures
last_exc = None
for attempt in range(3):
try:
return await self._api_request(
"POST", path, body, timeout=FILE_UPLOAD_TIMEOUT
)
except RuntimeError as exc:
last_exc = exc
err_msg = str(exc)
if any(
kw in err_msg
@ -1842,8 +1837,8 @@ class QQAdapter(BasePlatformAdapter):
raise
if attempt < 2:
await asyncio.sleep(1.5 * (attempt + 1))
raise last_exc # type: ignore[misc]
else:
raise
# Maximum time (seconds) to wait for reconnection before giving up on send.
_RECONNECT_WAIT_SECONDS = 15.0

View file

@ -1,6 +1,10 @@
"""
QQBot scan-to-configure (QR code onboard) module.
Mirrors the Feishu onboarding pattern: synchronous HTTP + a single public
entry-point ``qr_register()`` that handles the full flow (create task
display QR code poll decrypt credentials).
Calls the ``q.qq.com`` ``create_bind_task`` / ``poll_bind_result`` APIs to
generate a QR-code URL and poll for scan completion. On success the caller
receives the bot's *app_id*, *client_secret* (decrypted locally), and the
@ -12,18 +16,20 @@ Reference: https://bot.q.qq.com/wiki/develop/api-v2/
from __future__ import annotations
import logging
import time
from enum import IntEnum
from typing import Tuple
from typing import Optional, Tuple
from urllib.parse import quote
from .constants import (
ONBOARD_API_TIMEOUT,
ONBOARD_CREATE_PATH,
ONBOARD_POLL_INTERVAL,
ONBOARD_POLL_PATH,
PORTAL_HOST,
QR_URL_TEMPLATE,
)
from .crypto import generate_bind_key
from .crypto import decrypt_secret, generate_bind_key
from .utils import get_api_headers
logger = logging.getLogger(__name__)
@ -35,7 +41,7 @@ logger = logging.getLogger(__name__)
class BindStatus(IntEnum):
"""Status codes returned by ``poll_bind_result``."""
"""Status codes returned by ``_poll_bind_result``."""
NONE = 0
PENDING = 1
@ -44,18 +50,40 @@ class BindStatus(IntEnum):
# ---------------------------------------------------------------------------
# Public API
# QR rendering
# ---------------------------------------------------------------------------
try:
import qrcode as _qrcode_mod
except (ImportError, TypeError):
_qrcode_mod = None # type: ignore[assignment]
def _render_qr(url: str) -> bool:
"""Try to render a QR code in the terminal. Returns True if successful."""
if _qrcode_mod is None:
return False
try:
qr = _qrcode_mod.QRCode(
error_correction=_qrcode_mod.constants.ERROR_CORRECT_M,
border=2,
)
qr.add_data(url)
qr.make(fit=True)
qr.print_ascii(invert=True)
return True
except Exception:
return False
# ---------------------------------------------------------------------------
# Synchronous HTTP helpers (mirrors Feishu _post_registration pattern)
# ---------------------------------------------------------------------------
async def create_bind_task(
timeout: float = ONBOARD_API_TIMEOUT,
) -> Tuple[str, str]:
def _create_bind_task(timeout: float = ONBOARD_API_TIMEOUT) -> Tuple[str, str]:
"""Create a bind task and return *(task_id, aes_key_base64)*.
The AES key is generated locally and sent to the server so it can
encrypt the bot credentials before returning them.
Raises:
RuntimeError: If the API returns a non-zero ``retcode``.
"""
@ -64,8 +92,8 @@ async def create_bind_task(
url = f"https://{PORTAL_HOST}{ONBOARD_CREATE_PATH}"
key = generate_bind_key()
async with httpx.AsyncClient(timeout=timeout, follow_redirects=True) as client:
resp = await client.post(url, json={"key": key}, headers=get_api_headers())
with httpx.Client(timeout=timeout, follow_redirects=True) as client:
resp = client.post(url, json={"key": key}, headers=get_api_headers())
resp.raise_for_status()
data = resp.json()
@ -80,7 +108,7 @@ async def create_bind_task(
return task_id, key
async def poll_bind_result(
def _poll_bind_result(
task_id: str,
timeout: float = ONBOARD_API_TIMEOUT,
) -> Tuple[BindStatus, str, str, str]:
@ -89,12 +117,6 @@ async def poll_bind_result(
Returns:
A 4-tuple of ``(status, bot_appid, bot_encrypt_secret, user_openid)``.
* ``bot_encrypt_secret`` is AES-256-GCM encrypted decrypt it with
:func:`~gateway.platforms.qqbot.crypto.decrypt_secret` using the
key from :func:`create_bind_task`.
* ``user_openid`` is the OpenID of the person who scanned the code
(available when ``status == COMPLETED``).
Raises:
RuntimeError: If the API returns a non-zero ``retcode``.
"""
@ -102,8 +124,8 @@ async def poll_bind_result(
url = f"https://{PORTAL_HOST}{ONBOARD_POLL_PATH}"
async with httpx.AsyncClient(timeout=timeout, follow_redirects=True) as client:
resp = await client.post(url, json={"task_id": task_id}, headers=get_api_headers())
with httpx.Client(timeout=timeout, follow_redirects=True) as client:
resp = client.post(url, json={"task_id": task_id}, headers=get_api_headers())
resp.raise_for_status()
data = resp.json()
@ -122,3 +144,77 @@ async def poll_bind_result(
def build_connect_url(task_id: str) -> str:
"""Build the QR-code target URL for a given *task_id*."""
return QR_URL_TEMPLATE.format(task_id=quote(task_id))
# ---------------------------------------------------------------------------
# Public entry-point
# ---------------------------------------------------------------------------
_MAX_REFRESHES = 3
def qr_register(timeout_seconds: int = 600) -> Optional[dict]:
"""Run the QQBot scan-to-configure QR registration flow.
Mirrors ``feishu.qr_register()``: handles create display poll
decrypt in one call. Unexpected errors propagate to the caller.
:returns:
``{"app_id": ..., "client_secret": ..., "user_openid": ...}`` on
success, or ``None`` on failure / expiry / cancellation.
"""
deadline = time.monotonic() + timeout_seconds
for refresh_count in range(_MAX_REFRESHES + 1):
# ── Create bind task ──
try:
task_id, aes_key = _create_bind_task()
except Exception as exc:
logger.warning("[QQBot onboard] Failed to create bind task: %s", exc)
return None
url = build_connect_url(task_id)
# ── Display QR code + URL ──
print()
if _render_qr(url):
print(f" Scan the QR code above, or open this URL directly:\n {url}")
else:
print(f" Open this URL in QQ on your phone:\n {url}")
print(" Tip: pip install qrcode to display a scannable QR code here")
print()
# ── Poll loop ──
while time.monotonic() < deadline:
try:
status, app_id, encrypted_secret, user_openid = _poll_bind_result(task_id)
except Exception:
time.sleep(ONBOARD_POLL_INTERVAL)
continue
if status == BindStatus.COMPLETED:
client_secret = decrypt_secret(encrypted_secret, aes_key)
print()
print(f" QR scan complete! (App ID: {app_id})")
if user_openid:
print(f" Scanner's OpenID: {user_openid}")
return {
"app_id": app_id,
"client_secret": client_secret,
"user_openid": user_openid,
}
if status == BindStatus.EXPIRED:
if refresh_count >= _MAX_REFRESHES:
logger.warning("[QQBot onboard] QR code expired %d times — giving up", _MAX_REFRESHES)
return None
print(f"\n QR code expired, refreshing... ({refresh_count + 1}/{_MAX_REFRESHES})")
break # next for-loop iteration creates a new task
time.sleep(ONBOARD_POLL_INTERVAL)
else:
# deadline reached without completing
logger.warning("[QQBot onboard] Poll timed out after %ds", timeout_seconds)
return None
return None

View file

@ -18,6 +18,7 @@ import logging
import os
import random
import time
import uuid
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, List, Optional, Any
@ -127,6 +128,27 @@ def _render_mentions(text: str, mentions: list) -> str:
return text
def _is_signal_service_id(value: str) -> bool:
"""Return True if *value* already looks like a Signal service identifier."""
if not value:
return False
if value.startswith("PNI:") or value.startswith("u:"):
return True
try:
uuid.UUID(value)
return True
except (ValueError, AttributeError, TypeError):
return False
def _looks_like_e164_number(value: str) -> bool:
"""Return True for a plausible E.164 phone number."""
if not value or not value.startswith("+"):
return False
digits = value[1:]
return digits.isdigit() and 7 <= len(digits) <= 15
def check_signal_requirements() -> bool:
"""Check if Signal is configured (has URL and account)."""
return bool(os.getenv("SIGNAL_HTTP_URL") and os.getenv("SIGNAL_ACCOUNT"))
@ -179,6 +201,12 @@ class SignalAdapter(BasePlatformAdapter):
# in Note to Self / self-chat mode (mirrors WhatsApp recentlySentIds)
self._recent_sent_timestamps: set = set()
self._max_recent_timestamps = 50
# Signal increasingly exposes ACI/PNI UUIDs as stable recipient IDs.
# Keep a best-effort mapping so outbound sends can upgrade from a
# phone number to the corresponding UUID when signal-cli prefers it.
self._recipient_uuid_by_number: Dict[str, str] = {}
self._recipient_number_by_uuid: Dict[str, str] = {}
self._recipient_cache_lock = asyncio.Lock()
logger.info("Signal adapter initialized: url=%s account=%s groups=%s",
self.http_url, redact_phone(self.account),
@ -195,31 +223,40 @@ class SignalAdapter(BasePlatformAdapter):
return False
# Acquire scoped lock to prevent duplicate Signal listeners for the same phone
lock_acquired = False
try:
if not self._acquire_platform_lock('signal-phone', self.account, 'Signal account'):
return False
lock_acquired = True
except Exception as e:
logger.warning("Signal: Could not acquire phone lock (non-fatal): %s", e)
self.client = httpx.AsyncClient(timeout=30.0)
# Health check — verify signal-cli daemon is reachable
try:
resp = await self.client.get(f"{self.http_url}/api/v1/check", timeout=10.0)
if resp.status_code != 200:
logger.error("Signal: health check failed (status %d)", resp.status_code)
# Health check — verify signal-cli daemon is reachable
try:
resp = await self.client.get(f"{self.http_url}/api/v1/check", timeout=10.0)
if resp.status_code != 200:
logger.error("Signal: health check failed (status %d)", resp.status_code)
return False
except Exception as e:
logger.error("Signal: cannot reach signal-cli at %s: %s", self.http_url, e)
return False
except Exception as e:
logger.error("Signal: cannot reach signal-cli at %s: %s", self.http_url, e)
return False
self._running = True
self._last_sse_activity = time.time()
self._sse_task = asyncio.create_task(self._sse_listener())
self._health_monitor_task = asyncio.create_task(self._health_monitor())
self._running = True
self._last_sse_activity = time.time()
self._sse_task = asyncio.create_task(self._sse_listener())
self._health_monitor_task = asyncio.create_task(self._health_monitor())
logger.info("Signal: connected to %s", self.http_url)
return True
logger.info("Signal: connected to %s", self.http_url)
return True
finally:
if not self._running:
if self.client:
await self.client.aclose()
self.client = None
if lock_acquired:
self._release_platform_lock()
async def disconnect(self) -> None:
"""Stop SSE listener and clean up."""
@ -400,6 +437,7 @@ class SignalAdapter(BasePlatformAdapter):
)
sender_name = envelope_data.get("sourceName", "")
sender_uuid = envelope_data.get("sourceUuid", "")
self._remember_recipient_identifiers(sender, sender_uuid)
if not sender:
logger.debug("Signal: ignoring envelope with no sender")
@ -518,6 +556,64 @@ class SignalAdapter(BasePlatformAdapter):
await self.handle_message(event)
def _remember_recipient_identifiers(self, number: Optional[str], service_id: Optional[str]) -> None:
"""Cache any number↔UUID mapping observed from Signal envelopes."""
if not number or not service_id or not _is_signal_service_id(service_id):
return
self._recipient_uuid_by_number[number] = service_id
self._recipient_number_by_uuid[service_id] = number
def _extract_contact_uuid(self, contact: Any, phone_number: str) -> Optional[str]:
"""Best-effort extraction of a Signal service ID from listContacts output."""
if not isinstance(contact, dict):
return None
number = contact.get("number")
recipient = contact.get("recipient")
service_id = contact.get("uuid") or contact.get("serviceId")
if not service_id:
profile = contact.get("profile")
if isinstance(profile, dict):
service_id = profile.get("serviceId") or profile.get("uuid")
if service_id and _is_signal_service_id(service_id):
matches_number = number == phone_number or recipient == phone_number
if matches_number:
return service_id
return None
async def _resolve_recipient(self, chat_id: str) -> str:
"""Return the preferred Signal recipient identifier for a direct chat."""
if (
not chat_id
or chat_id.startswith("group:")
or _is_signal_service_id(chat_id)
or not _looks_like_e164_number(chat_id)
):
return chat_id
cached = self._recipient_uuid_by_number.get(chat_id)
if cached:
return cached
async with self._recipient_cache_lock:
cached = self._recipient_uuid_by_number.get(chat_id)
if cached:
return cached
contacts = await self._rpc("listContacts", {
"account": self.account,
"allRecipients": True,
})
if isinstance(contacts, list):
for contact in contacts:
number = contact.get("number") if isinstance(contact, dict) else None
service_id = self._extract_contact_uuid(contact, chat_id)
if number and service_id:
self._remember_recipient_identifiers(number, service_id)
return self._recipient_uuid_by_number.get(chat_id, chat_id)
# ------------------------------------------------------------------
# Attachment Handling
# ------------------------------------------------------------------
@ -633,7 +729,7 @@ class SignalAdapter(BasePlatformAdapter):
if chat_id.startswith("group:"):
params["groupId"] = chat_id[6:]
else:
params["recipient"] = [chat_id]
params["recipient"] = [await self._resolve_recipient(chat_id)]
result = await self._rpc("send", params)
@ -684,7 +780,7 @@ class SignalAdapter(BasePlatformAdapter):
if chat_id.startswith("group:"):
params["groupId"] = chat_id[6:]
else:
params["recipient"] = [chat_id]
params["recipient"] = [await self._resolve_recipient(chat_id)]
fails = self._typing_failures.get(chat_id, 0)
result = await self._rpc(
@ -745,7 +841,7 @@ class SignalAdapter(BasePlatformAdapter):
if chat_id.startswith("group:"):
params["groupId"] = chat_id[6:]
else:
params["recipient"] = [chat_id]
params["recipient"] = [await self._resolve_recipient(chat_id)]
result = await self._rpc("send", params)
if result is not None:
@ -784,7 +880,7 @@ class SignalAdapter(BasePlatformAdapter):
if chat_id.startswith("group:"):
params["groupId"] = chat_id[6:]
else:
params["recipient"] = [chat_id]
params["recipient"] = [await self._resolve_recipient(chat_id)]
result = await self._rpc("send", params)
if result is not None:

View file

@ -38,6 +38,7 @@ from gateway.platforms.base import (
BasePlatformAdapter,
MessageEvent,
MessageType,
ProcessingOutcome,
SendResult,
SUPPORTED_DOCUMENT_TYPES,
safe_url_for_log,
@ -113,6 +114,11 @@ class SlackAdapter(BasePlatformAdapter):
# Cache for _fetch_thread_context results: cache_key → _ThreadContextCache
self._thread_context_cache: Dict[str, _ThreadContextCache] = {}
self._THREAD_CACHE_TTL = 60.0
# Track message IDs that should get reaction lifecycle (DMs / @mentions).
self._reacting_message_ids: set = set()
# Track active assistant thread status indicators so stop_typing can
# clear them (chat_id → thread_ts).
self._active_status_threads: Dict[str, str] = {}
async def connect(self) -> bool:
"""Connect to Slack via Socket Mode."""
@ -150,9 +156,11 @@ class SlackAdapter(BasePlatformAdapter):
except Exception as e:
logger.warning("[Slack] Failed to read %s: %s", tokens_file, e)
lock_acquired = False
try:
if not self._acquire_platform_lock('slack-app-token', app_token, 'Slack app token'):
return False
lock_acquired = True
# First token is the primary — used for AsyncApp / Socket Mode
primary_token = bot_tokens[0]
@ -228,6 +236,9 @@ class SlackAdapter(BasePlatformAdapter):
except Exception as e: # pragma: no cover - defensive logging
logger.error("[Slack] Connection failed: %s", e, exc_info=True)
return False
finally:
if lock_acquired and not self._running:
self._release_platform_lock()
async def disconnect(self) -> None:
"""Disconnect from Slack."""
@ -316,6 +327,8 @@ class SlackAdapter(BasePlatformAdapter):
chat_id: str,
message_id: str,
content: str,
*,
finalize: bool = False,
) -> SendResult:
"""Edit a previously sent Slack message."""
if not self._app:
@ -355,6 +368,7 @@ class SlackAdapter(BasePlatformAdapter):
if not thread_ts:
return # Can only set status in a thread context
self._active_status_threads[chat_id] = thread_ts
try:
await self._get_client(chat_id).assistant_threads_setStatus(
channel_id=chat_id,
@ -366,6 +380,22 @@ class SlackAdapter(BasePlatformAdapter):
# in an assistant-enabled context. Falls back to reactions.
logger.debug("[Slack] assistant.threads.setStatus failed: %s", e)
async def stop_typing(self, chat_id: str) -> None:
"""Clear the assistant thread status indicator."""
if not self._app:
return
thread_ts = self._active_status_threads.pop(chat_id, None)
if not thread_ts:
return
try:
await self._get_client(chat_id).assistant_threads_setStatus(
channel_id=chat_id,
thread_ts=thread_ts,
status="",
)
except Exception as e:
logger.debug("[Slack] assistant.threads.setStatus clear failed: %s", e)
def _dm_top_level_threads_as_sessions(self) -> bool:
"""Whether top-level Slack DMs get per-message session threads.
@ -577,6 +607,38 @@ class SlackAdapter(BasePlatformAdapter):
logger.debug("[Slack] reactions.remove failed (%s): %s", emoji, e)
return False
def _reactions_enabled(self) -> bool:
"""Check if message reactions are enabled via config/env."""
return os.getenv("SLACK_REACTIONS", "true").lower() not in ("false", "0", "no")
async def on_processing_start(self, event: MessageEvent) -> None:
"""Add an in-progress reaction when message processing begins."""
if not self._reactions_enabled():
return
ts = getattr(event, "message_id", None)
if not ts or ts not in self._reacting_message_ids:
return
channel_id = getattr(event.source, "chat_id", None)
if channel_id:
await self._add_reaction(channel_id, ts, "eyes")
async def on_processing_complete(self, event: MessageEvent, outcome: ProcessingOutcome) -> None:
"""Swap the in-progress reaction for a final success/failure reaction."""
if not self._reactions_enabled():
return
ts = getattr(event, "message_id", None)
if not ts or ts not in self._reacting_message_ids:
return
self._reacting_message_ids.discard(ts)
channel_id = getattr(event.source, "chat_id", None)
if not channel_id:
return
await self._remove_reaction(channel_id, ts, "eyes")
if outcome == ProcessingOutcome.SUCCESS:
await self._add_reaction(channel_id, ts, "white_check_mark")
elif outcome == ProcessingOutcome.FAILURE:
await self._add_reaction(channel_id, ts, "x")
# ----- User identity resolution -----
async def _resolve_user_name(self, user_id: str, chat_id: str = "") -> str:
@ -1206,17 +1268,12 @@ class SlackAdapter(BasePlatformAdapter):
# Only react when bot is directly addressed (DM or @mention).
# In listen-all channels (require_mention=false), reacting to every
# casual message would be noisy.
_should_react = is_dm or is_mentioned
_should_react = (is_dm or is_mentioned) and self._reactions_enabled()
if _should_react:
await self._add_reaction(channel_id, ts, "eyes")
self._reacting_message_ids.add(ts)
await self.handle_message(msg_event)
if _should_react:
await self._remove_reaction(channel_id, ts, "eyes")
await self._add_reaction(channel_id, ts, "white_check_mark")
# ----- Approval button support (Block Kit) -----
async def send_exec_approval(
@ -1593,11 +1650,9 @@ class SlackAdapter(BasePlatformAdapter):
async def _download_slack_file(self, url: str, ext: str, audio: bool = False, team_id: str = "") -> str:
"""Download a Slack file using the bot token for auth, with retry."""
import asyncio
import httpx
bot_token = self._team_clients[team_id].token if team_id and team_id in self._team_clients else self.config.token
last_exc = None
async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as client:
for attempt in range(3):
@ -1627,7 +1682,6 @@ class SlackAdapter(BasePlatformAdapter):
from gateway.platforms.base import cache_image_from_bytes
return cache_image_from_bytes(response.content, ext)
except (httpx.TimeoutException, httpx.HTTPStatusError) as exc:
last_exc = exc
if isinstance(exc, httpx.HTTPStatusError) and exc.response.status_code < 429:
raise
if attempt < 2:
@ -1636,15 +1690,12 @@ class SlackAdapter(BasePlatformAdapter):
await asyncio.sleep(1.5 * (attempt + 1))
continue
raise
raise last_exc
async def _download_slack_file_bytes(self, url: str, team_id: str = "") -> bytes:
"""Download a Slack file and return raw bytes, with retry."""
import asyncio
import httpx
bot_token = self._team_clients[team_id].token if team_id and team_id in self._team_clients else self.config.token
last_exc = None
async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as client:
for attempt in range(3):
@ -1656,7 +1707,6 @@ class SlackAdapter(BasePlatformAdapter):
response.raise_for_status()
return response.content
except (httpx.TimeoutException, httpx.HTTPStatusError) as exc:
last_exc = exc
if isinstance(exc, httpx.HTTPStatusError) and exc.response.status_code < 429:
raise
if attempt < 2:
@ -1665,7 +1715,6 @@ class SlackAdapter(BasePlatformAdapter):
await asyncio.sleep(1.5 * (attempt + 1))
continue
raise
raise last_exc
# ── Channel mention gating ─────────────────────────────────────────────

View file

@ -11,6 +11,7 @@ import asyncio
import json
import logging
import os
import tempfile
import html as _html
import re
from typing import Dict, List, Optional, Any
@ -70,8 +71,10 @@ from gateway.platforms.base import (
SendResult,
cache_image_from_bytes,
cache_audio_from_bytes,
cache_video_from_bytes,
cache_document_from_bytes,
resolve_proxy_url,
SUPPORTED_VIDEO_TYPES,
SUPPORTED_DOCUMENT_TYPES,
utf16_len,
_prefix_within_utf16_limit,
@ -493,6 +496,13 @@ class TelegramAdapter(BasePlatformAdapter):
"[%s] DM topic '%s' already exists in chat %s (will be mapped from incoming messages)",
self.name, name, chat_id,
)
elif "not a forum" in error_text or "forums_disabled" in error_text:
logger.warning(
"[%s] Cannot create DM topic '%s' in chat %s: Topics mode is not enabled. "
"The user must open the DM with this bot in Telegram, tap the bot name "
"at the top, and enable 'Topics' in chat settings before topics can be created.",
self.name, name, chat_id,
)
else:
logger.warning(
"[%s] Failed to create DM topic '%s' in chat %s: %s",
@ -534,8 +544,23 @@ class TelegramAdapter(BasePlatformAdapter):
break
if changed:
with open(config_path, "w") as f:
_yaml.dump(config, f, default_flow_style=False, sort_keys=False)
fd, tmp_path = tempfile.mkstemp(
dir=str(config_path.parent),
suffix=".tmp",
prefix=".config_",
)
try:
with os.fdopen(fd, "w", encoding="utf-8") as f:
_yaml.dump(config, f, default_flow_style=False, sort_keys=False)
f.flush()
os.fsync(f.fileno())
os.replace(tmp_path, config_path)
except BaseException:
try:
os.unlink(tmp_path)
except OSError:
pass
raise
logger.info(
"[%s] Persisted thread_id=%s for topic '%s' in config.yaml",
self.name, thread_id, topic_name,
@ -769,8 +794,28 @@ class TelegramAdapter(BasePlatformAdapter):
# Telegram pushes updates to our HTTP endpoint. This
# enables cloud platforms (Fly.io, Railway) to auto-wake
# suspended machines on inbound HTTP traffic.
#
# SECURITY: TELEGRAM_WEBHOOK_SECRET is REQUIRED. Without it,
# python-telegram-bot passes secret_token=None and the
# webhook endpoint accepts any HTTP POST — attackers can
# inject forged updates as if from Telegram. Refuse to
# start rather than silently run in fail-open mode.
# See GHSA-3vpc-7q5r-276h.
webhook_port = int(os.getenv("TELEGRAM_WEBHOOK_PORT", "8443"))
webhook_secret = os.getenv("TELEGRAM_WEBHOOK_SECRET", "").strip() or None
webhook_secret = os.getenv("TELEGRAM_WEBHOOK_SECRET", "").strip()
if not webhook_secret:
raise RuntimeError(
"TELEGRAM_WEBHOOK_SECRET is required when "
"TELEGRAM_WEBHOOK_URL is set. Without it, the "
"webhook endpoint accepts forged updates from "
"anyone who can reach it — see "
"https://github.com/NousResearch/hermes-agent/"
"security/advisories/GHSA-3vpc-7q5r-276h.\n\n"
"Generate a secret and set it in your .env:\n"
" export TELEGRAM_WEBHOOK_SECRET=\"$(openssl rand -hex 32)\"\n\n"
"Then register it with Telegram when setting the "
"webhook via setWebhook's secret_token parameter."
)
from urllib.parse import urlparse
webhook_path = urlparse(webhook_url).path or "/telegram"
@ -1081,6 +1126,8 @@ class TelegramAdapter(BasePlatformAdapter):
chat_id: str,
message_id: str,
content: str,
*,
finalize: bool = False,
) -> SendResult:
"""Edit a previously sent Telegram message."""
if not self._bot:
@ -1657,6 +1704,21 @@ class TelegramAdapter(BasePlatformAdapter):
except Exception as exc:
logger.error("Failed to write update response from callback: %s", exc)
def _missing_media_path_error(self, label: str, path: str) -> str:
"""Build an actionable file-not-found error for gateway MEDIA delivery.
Paths like /workspace/... or /output/... often only exist inside the
Docker sandbox, while the gateway process runs on the host.
"""
error = f"{label} file not found: {path}"
if path.startswith(("/workspace/", "/output/", "/outputs/")):
error += (
" (path may only exist inside the Docker sandbox. "
"Bind-mount a host directory and emit the host-visible "
"path in MEDIA: for gateway file delivery.)"
)
return error
async def send_voice(
self,
chat_id: str,
@ -1671,9 +1733,8 @@ class TelegramAdapter(BasePlatformAdapter):
return SendResult(success=False, error="Not connected")
try:
import os
if not os.path.exists(audio_path):
return SendResult(success=False, error=f"Audio file not found: {audio_path}")
return SendResult(success=False, error=self._missing_media_path_error("Audio", audio_path))
with open(audio_path, "rb") as audio_file:
# .ogg files -> send as voice (round playable bubble)
@ -1720,9 +1781,8 @@ class TelegramAdapter(BasePlatformAdapter):
return SendResult(success=False, error="Not connected")
try:
import os
if not os.path.exists(image_path):
return SendResult(success=False, error=f"Image file not found: {image_path}")
return SendResult(success=False, error=self._missing_media_path_error("Image", image_path))
_thread = self._metadata_thread_id(metadata)
with open(image_path, "rb") as image_file:
@ -1759,7 +1819,7 @@ class TelegramAdapter(BasePlatformAdapter):
try:
if not os.path.exists(file_path):
return SendResult(success=False, error=f"File not found: {file_path}")
return SendResult(success=False, error=self._missing_media_path_error("File", file_path))
display_name = file_name or os.path.basename(file_path)
_thread = self._metadata_thread_id(metadata)
@ -1793,7 +1853,7 @@ class TelegramAdapter(BasePlatformAdapter):
try:
if not os.path.exists(video_path):
return SendResult(success=False, error=f"Video file not found: {video_path}")
return SendResult(success=False, error=self._missing_media_path_error("Video", video_path))
_thread = self._metadata_thread_id(metadata)
with open(video_path, "rb") as f:
@ -2033,7 +2093,7 @@ class TelegramAdapter(BasePlatformAdapter):
url = m.group(2).replace('\\', '\\\\').replace(')', '\\)')
return _ph(f'[{display}]({url})')
text = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', _convert_link, text)
text = re.sub(r'\[([^\]]+)\]\(([^()]*(?:\([^()]*\)[^()]*)*)\)', _convert_link, text)
# 4) Convert markdown headers (## Title) → bold *Title*
def _convert_header(m):
@ -2241,22 +2301,27 @@ class TelegramAdapter(BasePlatformAdapter):
bot_username = (getattr(self._bot, "username", None) or "").lstrip("@").lower()
bot_id = getattr(self._bot, "id", None)
expected = f"@{bot_username}" if bot_username else None
def _iter_sources():
yield getattr(message, "text", None) or "", getattr(message, "entities", None) or []
yield getattr(message, "caption", None) or "", getattr(message, "caption_entities", None) or []
# Telegram parses mentions server-side and emits MessageEntity objects
# (type=mention for @username, type=text_mention for @FirstName targeting
# a user without a public username). Only those entities are authoritative —
# raw substring matches like "foo@hermes_bot.example" are not mentions
# (bug #12545). Entities also correctly handle @handles inside URLs, code
# blocks, and quoted text, where a regex scan would over-match.
for source_text, entities in _iter_sources():
if bot_username and f"@{bot_username}" in source_text.lower():
return True
for entity in entities:
entity_type = str(getattr(entity, "type", "")).split(".")[-1].lower()
if entity_type == "mention" and bot_username:
if entity_type == "mention" and expected:
offset = int(getattr(entity, "offset", -1))
length = int(getattr(entity, "length", 0))
if offset < 0 or length <= 0:
continue
if source_text[offset:offset + length].strip().lower() == f"@{bot_username}":
if source_text[offset:offset + length].strip().lower() == expected:
return True
elif entity_type == "text_mention":
user = getattr(entity, "user", None)
@ -2288,10 +2353,16 @@ class TelegramAdapter(BasePlatformAdapter):
DMs remain unrestricted. Group/supergroup messages are accepted when:
- the chat is explicitly allowlisted in ``free_response_chats``
- ``require_mention`` is disabled
- the message is a command
- the message replies to the bot
- the bot is @mentioned
- the text/caption matches a configured regex wake-word pattern
When ``require_mention`` is enabled, slash commands are not given
special treatment they must pass the same mention/reply checks
as any other group message. Users can still trigger commands via
the Telegram bot menu (``/command@botname``) or by explicitly
mentioning the bot (``@botname /command``), both of which are
recognised as mentions by :meth:`_message_mentions_bot`.
"""
if not self._is_group_chat(message):
return True
@ -2306,8 +2377,6 @@ class TelegramAdapter(BasePlatformAdapter):
return True
if not self._telegram_require_mention():
return True
if is_command:
return True
if self._is_reply_to_bot(message):
return True
if self._message_mentions_bot(message):
@ -2590,6 +2659,23 @@ class TelegramAdapter(BasePlatformAdapter):
except Exception as e:
logger.warning("[Telegram] Failed to cache audio: %s", e, exc_info=True)
elif msg.video:
try:
file_obj = await msg.video.get_file()
video_bytes = await file_obj.download_as_bytearray()
ext = ".mp4"
if getattr(file_obj, "file_path", None):
for candidate in SUPPORTED_VIDEO_TYPES:
if file_obj.file_path.lower().endswith(candidate):
ext = candidate
break
cached_path = cache_video_from_bytes(bytes(video_bytes), ext=ext)
event.media_urls = [cached_path]
event.media_types = [SUPPORTED_VIDEO_TYPES.get(ext, "video/mp4")]
logger.info("[Telegram] Cached user video at %s", cached_path)
except Exception as e:
logger.warning("[Telegram] Failed to cache video: %s", e, exc_info=True)
# Download document files to cache for agent processing
elif msg.document:
doc = msg.document
@ -2606,6 +2692,21 @@ class TelegramAdapter(BasePlatformAdapter):
mime_to_ext = {v: k for k, v in SUPPORTED_DOCUMENT_TYPES.items()}
ext = mime_to_ext.get(doc.mime_type, "")
if not ext and doc.mime_type:
video_mime_to_ext = {v: k for k, v in SUPPORTED_VIDEO_TYPES.items()}
ext = video_mime_to_ext.get(doc.mime_type, "")
if ext in SUPPORTED_VIDEO_TYPES:
file_obj = await doc.get_file()
video_bytes = await file_obj.download_as_bytearray()
cached_path = cache_video_from_bytes(bytes(video_bytes), ext=ext)
event.media_urls = [cached_path]
event.media_types = [SUPPORTED_VIDEO_TYPES[ext]]
event.message_type = MessageType.VIDEO
logger.info("[Telegram] Cached user video document at %s", cached_path)
await self.handle_message(event)
return
# Check if supported
if ext not in SUPPORTED_DOCUMENT_TYPES:
supported_list = ", ".join(sorted(SUPPORTED_DOCUMENT_TYPES.keys()))
@ -2744,13 +2845,11 @@ class TelegramAdapter(BasePlatformAdapter):
logger.info("[Telegram] Analyzing sticker at %s", cached_path)
from tools.vision_tools import vision_analyze_tool
import json as _json
result_json = await vision_analyze_tool(
image_url=cached_path,
user_prompt=STICKER_VISION_PROMPT,
)
result = _json.loads(result_json)
result = json.loads(result_json)
if result.get("success"):
description = result.get("analysis", "a sticker")

View file

@ -13,6 +13,10 @@ Each route defines:
- skills: optional list of skills to load for the agent
- deliver: where to send the response (github_comment, telegram, etc.)
- deliver_extra: additional delivery config (repo, pr_number, chat_id)
- deliver_only: if true, skip the agent the rendered prompt IS the
message that gets delivered. Use for external push notifications
(Supabase, monitoring alerts, inter-agent pings) where zero LLM cost
and sub-second delivery matter more than agent reasoning.
Security:
- HMAC secret is required per route (validated at startup)
@ -122,6 +126,19 @@ class WebhookAdapter(BasePlatformAdapter):
f"For testing without auth, set secret to '{_INSECURE_NO_AUTH}'."
)
# deliver_only routes bypass the agent — the POST body becomes a
# direct push notification via the configured delivery target.
# Validate up-front so misconfiguration surfaces at startup rather
# than on the first webhook POST.
if route.get("deliver_only"):
deliver = route.get("deliver", "log")
if not deliver or deliver == "log":
raise ValueError(
f"[webhook] Route '{name}' has deliver_only=true but "
f"deliver is '{deliver}'. Direct delivery requires a "
f"real target (telegram, discord, slack, github_comment, etc.)."
)
app = web.Application()
app.router.add_get("/health", self._handle_health)
app.router.add_post("/webhooks/{route_name}", self._handle_webhook)
@ -296,24 +313,14 @@ class WebhookAdapter(BasePlatformAdapter):
{"error": "Payload too large"}, status=413
)
# ── Rate limiting ────────────────────────────────────────
now = time.time()
window = self._rate_counts.setdefault(route_name, [])
window[:] = [t for t in window if now - t < 60]
if len(window) >= self._rate_limit:
return web.json_response(
{"error": "Rate limit exceeded"}, status=429
)
window.append(now)
# Read body
# Read body (must be done before any validation)
try:
raw_body = await request.read()
except Exception as e:
logger.error("[webhook] Failed to read body: %s", e)
return web.json_response({"error": "Bad request"}, status=400)
# Validate HMAC signature (skip for INSECURE_NO_AUTH testing mode)
# Validate HMAC signature FIRST (skip for INSECURE_NO_AUTH testing mode)
secret = route_config.get("secret", self._global_secret)
if secret and secret != _INSECURE_NO_AUTH:
if not self._validate_signature(request, raw_body, secret):
@ -324,6 +331,16 @@ class WebhookAdapter(BasePlatformAdapter):
{"error": "Invalid signature"}, status=401
)
# ── Rate limiting (after auth) ───────────────────────────
now = time.time()
window = self._rate_counts.setdefault(route_name, [])
window[:] = [t for t in window if now - t < 60]
if len(window) >= self._rate_limit:
return web.json_response(
{"error": "Rate limit exceeded"}, status=429
)
window.append(now)
# Parse payload
try:
payload = json.loads(raw_body)
@ -419,6 +436,64 @@ class WebhookAdapter(BasePlatformAdapter):
)
self._seen_deliveries[delivery_id] = now
# ── Direct delivery mode (deliver_only) ─────────────────
# Skip the agent entirely — the rendered prompt IS the message we
# deliver. Use case: external services (Supabase, monitoring,
# cron jobs, other agents) that need to push a plain notification
# to a user's chat with zero LLM cost. Reuses the same HMAC auth,
# rate limiting, idempotency, and template rendering as agent mode.
if route_config.get("deliver_only"):
delivery = {
"deliver": route_config.get("deliver", "log"),
"deliver_extra": self._render_delivery_extra(
route_config.get("deliver_extra", {}), payload
),
"payload": payload,
}
logger.info(
"[webhook] direct-deliver event=%s route=%s target=%s msg_len=%d delivery=%s",
event_type,
route_name,
delivery["deliver"],
len(prompt),
delivery_id,
)
try:
result = await self._direct_deliver(prompt, delivery)
except Exception:
logger.exception(
"[webhook] direct-deliver failed route=%s delivery=%s",
route_name,
delivery_id,
)
return web.json_response(
{"status": "error", "error": "Delivery failed", "delivery_id": delivery_id},
status=502,
)
if result.success:
return web.json_response(
{
"status": "delivered",
"route": route_name,
"target": delivery["deliver"],
"delivery_id": delivery_id,
},
status=200,
)
# Delivery attempted but target rejected it — surface as 502
# with a generic error (don't leak adapter-level detail).
logger.warning(
"[webhook] direct-deliver target rejected route=%s target=%s error=%s",
route_name,
delivery["deliver"],
result.error,
)
return web.json_response(
{"status": "error", "error": "Delivery failed", "delivery_id": delivery_id},
status=502,
)
# Use delivery_id in session key so concurrent webhooks on the
# same route get independent agent runs (not queued/interrupted).
session_chat_id = f"webhook:{route_name}:{delivery_id}"
@ -572,6 +647,34 @@ class WebhookAdapter(BasePlatformAdapter):
# Response delivery
# ------------------------------------------------------------------
async def _direct_deliver(
self, content: str, delivery: dict
) -> SendResult:
"""Deliver *content* directly without invoking the agent.
Used by ``deliver_only`` routes: the rendered template becomes the
literal message body, and we dispatch to the same delivery helpers
that the agent-mode ``send()`` flow uses. All target types that
work in agent mode work here Telegram, Discord, Slack, GitHub
PR comments, etc.
"""
deliver_type = delivery.get("deliver", "log")
if deliver_type == "log":
# Shouldn't reach here — startup validation rejects deliver_only
# with deliver=log — but guard defensively.
logger.info("[webhook] direct-deliver log-only: %s", content[:200])
return SendResult(success=True)
if deliver_type == "github_comment":
return await self._deliver_github_comment(content, delivery)
# Fall through to the cross-platform dispatcher, which validates the
# target name and routes via the gateway runner.
return await self._deliver_cross_platform(
deliver_type, content, delivery
)
async def _deliver_github_comment(
self, content: str, delivery: dict
) -> SendResult:

View file

@ -624,13 +624,16 @@ class WeComAdapter(BasePlatformAdapter):
msgtype = str(body.get("msgtype") or "").lower()
if msgtype == "mixed":
mixed = body.get("mixed") if isinstance(body.get("mixed"), dict) else {}
items = mixed.get("msg_item") if isinstance(mixed.get("msg_item"), list) else []
_raw_mixed = body.get("mixed")
mixed = _raw_mixed if isinstance(_raw_mixed, dict) else {}
_raw_items = mixed.get("msg_item")
items = _raw_items if isinstance(_raw_items, list) else []
for item in items:
if not isinstance(item, dict):
continue
if str(item.get("msgtype") or "").lower() == "text":
text_block = item.get("text") if isinstance(item.get("text"), dict) else {}
_raw_text = item.get("text")
text_block = _raw_text if isinstance(_raw_text, dict) else {}
content = str(text_block.get("content") or "").strip()
if content:
text_parts.append(content)
@ -672,8 +675,10 @@ class WeComAdapter(BasePlatformAdapter):
msgtype = str(body.get("msgtype") or "").lower()
if msgtype == "mixed":
mixed = body.get("mixed") if isinstance(body.get("mixed"), dict) else {}
items = mixed.get("msg_item") if isinstance(mixed.get("msg_item"), list) else []
_raw_mixed = body.get("mixed")
mixed = _raw_mixed if isinstance(_raw_mixed, dict) else {}
_raw_items = mixed.get("msg_item")
items = _raw_items if isinstance(_raw_items, list) else []
for item in items:
if not isinstance(item, dict):
continue
@ -1459,3 +1464,134 @@ class WeComAdapter(BasePlatformAdapter):
"name": chat_id,
"type": "group" if chat_id and chat_id.lower().startswith("group") else "dm",
}
# ------------------------------------------------------------------
# QR code scan flow for obtaining bot credentials
# ------------------------------------------------------------------
_QR_GENERATE_URL = "https://work.weixin.qq.com/ai/qc/generate"
_QR_QUERY_URL = "https://work.weixin.qq.com/ai/qc/query_result"
_QR_CODE_PAGE = "https://work.weixin.qq.com/ai/qc/gen?source=hermes&scode="
_QR_POLL_INTERVAL = 3 # seconds
_QR_POLL_TIMEOUT = 300 # 5 minutes
def qr_scan_for_bot_info(
*,
timeout_seconds: int = _QR_POLL_TIMEOUT,
) -> Optional[Dict[str, str]]:
"""Run the WeCom QR scan flow to obtain bot_id and secret.
Fetches a QR code from WeCom, renders it in the terminal, and polls
until the user scans it or the timeout expires.
Returns ``{"bot_id": ..., "secret": ...}`` on success, ``None`` on
failure or timeout.
Note: the ``work.weixin.qq.com/ai/qc/{generate,query_result}`` endpoints
used here are not part of WeCom's public developer API — they back the
admin-console web UI's bot-creation flow and may change without notice.
The same pattern is used by the feishu/dingtalk QR setup wizards.
"""
try:
import urllib.request
import urllib.parse
except ImportError: # pragma: no cover
logger.error("urllib is required for WeCom QR scan")
return None
generate_url = f"{_QR_GENERATE_URL}?source=hermes"
# ── Step 1: Fetch QR code ──
print(" Connecting to WeCom...", end="", flush=True)
try:
req = urllib.request.Request(generate_url, headers={"User-Agent": "HermesAgent/1.0"})
with urllib.request.urlopen(req, timeout=15) as resp:
raw = json.loads(resp.read().decode("utf-8"))
except Exception as exc:
logger.error("WeCom QR: failed to fetch QR code: %s", exc)
print(f" failed: {exc}")
return None
data = raw.get("data") or {}
scode = str(data.get("scode") or "").strip()
auth_url = str(data.get("auth_url") or "").strip()
if not scode or not auth_url:
logger.error("WeCom QR: unexpected response format: %s", raw)
print(" failed: unexpected response format")
return None
print(" done.")
# ── Step 2: Render QR code in terminal ──
print()
qr_rendered = False
try:
import qrcode as _qrcode
qr = _qrcode.QRCode()
qr.add_data(auth_url)
qr.make(fit=True)
qr.print_ascii(invert=True)
qr_rendered = True
except ImportError:
pass
except Exception:
pass
page_url = f"{_QR_CODE_PAGE}{urllib.parse.quote(scode)}"
if qr_rendered:
print(f"\n Scan the QR code above, or open this URL directly:\n {page_url}")
else:
print(f" Open this URL in WeCom on your phone:\n\n {page_url}\n")
print(" Tip: pip install qrcode to display a scannable QR code here next time")
print()
print(" Fetching configuration results...", end="", flush=True)
# ── Step 3: Poll for result ──
import time
deadline = time.time() + timeout_seconds
query_url = f"{_QR_QUERY_URL}?scode={urllib.parse.quote(scode)}"
poll_count = 0
while time.time() < deadline:
try:
req = urllib.request.Request(query_url, headers={"User-Agent": "HermesAgent/1.0"})
with urllib.request.urlopen(req, timeout=10) as resp:
result = json.loads(resp.read().decode("utf-8"))
except Exception as exc:
logger.debug("WeCom QR poll error: %s", exc)
time.sleep(_QR_POLL_INTERVAL)
continue
poll_count += 1
# Print a dot on every poll so progress is visible within 3s.
print(".", end="", flush=True)
result_data = result.get("data") or {}
status = str(result_data.get("status") or "").lower()
if status == "success":
print() # newline after "Fetching configuration results..." dots
bot_info = result_data.get("bot_info") or {}
bot_id = str(bot_info.get("botid") or bot_info.get("bot_id") or "").strip()
secret = str(bot_info.get("secret") or "").strip()
if bot_id and secret:
return {"bot_id": bot_id, "secret": secret}
logger.warning(
"WeCom QR: scan reported success but bot_info missing or incomplete: %s",
result_data,
)
print(
" QR scan reported success but no bot credentials were returned.\n"
" This usually means the bot was not actually created on the WeCom side.\n"
" Falling back to manual credential entry."
)
return None
time.sleep(_QR_POLL_INTERVAL)
print() # newline after dots
print(f" QR scan timed out ({timeout_seconds // 60} minutes). Please try again.")
return None

View file

@ -66,6 +66,37 @@ def _kill_port_process(port: int) -> None:
except Exception:
pass
def _terminate_bridge_process(proc, *, force: bool = False) -> None:
"""Terminate the bridge process using process-tree semantics where possible."""
if _IS_WINDOWS:
cmd = ["taskkill", "/PID", str(proc.pid), "/T"]
if force:
cmd.append("/F")
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=10,
)
except FileNotFoundError:
if force:
proc.kill()
else:
proc.terminate()
return
if result.returncode != 0:
details = (result.stderr or result.stdout or "").strip()
raise OSError(details or f"taskkill failed for PID {proc.pid}")
return
import signal
sig = signal.SIGTERM if not force else signal.SIGKILL
os.killpg(os.getpgid(proc.pid), sig)
import sys
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
@ -118,6 +149,10 @@ class WhatsAppAdapter(BasePlatformAdapter):
- bridge_script: Path to the Node.js bridge script
- bridge_port: Port for HTTP communication (default: 3000)
- session_path: Path to store WhatsApp session data
- dm_policy: "open" | "allowlist" | "disabled" how DMs are handled (default: "open")
- allow_from: List of sender IDs allowed in DMs (when dm_policy="allowlist")
- group_policy: "open" | "allowlist" | "disabled" which groups are processed (default: "open")
- group_allow_from: List of group JIDs allowed (when group_policy="allowlist")
"""
# WhatsApp message limits — practical UX limit, not protocol max.
@ -140,6 +175,10 @@ class WhatsAppAdapter(BasePlatformAdapter):
get_hermes_dir("platforms/whatsapp/session", "whatsapp/session")
))
self._reply_prefix: Optional[str] = config.extra.get("reply_prefix")
self._dm_policy = str(config.extra.get("dm_policy") or os.getenv("WHATSAPP_DM_POLICY", "open")).strip().lower()
self._allow_from = self._coerce_allow_list(config.extra.get("allow_from") or config.extra.get("allowFrom"))
self._group_policy = str(config.extra.get("group_policy") or os.getenv("WHATSAPP_GROUP_POLICY", "open")).strip().lower()
self._group_allow_from = self._coerce_allow_list(config.extra.get("group_allow_from") or config.extra.get("groupAllowFrom"))
self._mention_patterns = self._compile_mention_patterns()
self._message_queue: asyncio.Queue = asyncio.Queue()
self._bridge_log_fh = None
@ -163,6 +202,33 @@ class WhatsAppAdapter(BasePlatformAdapter):
return {str(part).strip() for part in raw if str(part).strip()}
return {part.strip() for part in str(raw).split(",") if part.strip()}
@staticmethod
def _coerce_allow_list(raw) -> set[str]:
"""Parse allow_from / group_allow_from from config or env var."""
if raw is None:
return set()
if isinstance(raw, list):
return {str(part).strip() for part in raw if str(part).strip()}
return {part.strip() for part in str(raw).split(",") if part.strip()}
def _is_dm_allowed(self, sender_id: str) -> bool:
"""Check whether a DM from the given sender should be processed."""
if self._dm_policy == "disabled":
return False
if self._dm_policy == "allowlist":
return sender_id in self._allow_from
# "open" — all DMs allowed
return True
def _is_group_allowed(self, chat_id: str) -> bool:
"""Check whether a group chat should be processed."""
if self._group_policy == "disabled":
return False
if self._group_policy == "allowlist":
return chat_id in self._group_allow_from
# "open" — all groups allowed
return True
def _compile_mention_patterns(self):
patterns = self.config.extra.get("mention_patterns")
if patterns is None:
@ -255,8 +321,18 @@ class WhatsAppAdapter(BasePlatformAdapter):
return cleaned.strip() or text
def _should_process_message(self, data: Dict[str, Any]) -> bool:
if not data.get("isGroup"):
is_group = data.get("isGroup", False)
if is_group:
chat_id = str(data.get("chatId") or "")
if not self._is_group_allowed(chat_id):
return False
else:
sender_id = str(data.get("senderId") or data.get("from") or "")
if not self._is_dm_allowed(sender_id):
return False
# DMs that pass the policy gate are always processed
return True
# Group messages: check mention / free-response settings
chat_id = str(data.get("chatId") or "")
if chat_id in self._whatsapp_free_response_chats():
return True
@ -289,39 +365,40 @@ class WhatsAppAdapter(BasePlatformAdapter):
logger.info("[%s] Bridge found at %s", self.name, bridge_path)
# Acquire scoped lock to prevent duplicate sessions
lock_acquired = False
try:
if not self._acquire_platform_lock('whatsapp-session', str(self._session_path), 'WhatsApp session'):
return False
lock_acquired = True
except Exception as e:
logger.warning("[%s] Could not acquire session lock (non-fatal): %s", self.name, e)
# Auto-install npm dependencies if node_modules doesn't exist
bridge_dir = bridge_path.parent
if not (bridge_dir / "node_modules").exists():
print(f"[{self.name}] Installing WhatsApp bridge dependencies...")
try:
install_result = subprocess.run(
["npm", "install", "--silent"],
cwd=str(bridge_dir),
capture_output=True,
text=True,
timeout=60,
)
if install_result.returncode != 0:
print(f"[{self.name}] npm install failed: {install_result.stderr}")
return False
print(f"[{self.name}] Dependencies installed")
except Exception as e:
print(f"[{self.name}] Failed to install dependencies: {e}")
return False
try:
# Auto-install npm dependencies if node_modules doesn't exist
bridge_dir = bridge_path.parent
if not (bridge_dir / "node_modules").exists():
print(f"[{self.name}] Installing WhatsApp bridge dependencies...")
try:
install_result = subprocess.run(
["npm", "install", "--silent"],
cwd=str(bridge_dir),
capture_output=True,
text=True,
timeout=60,
)
if install_result.returncode != 0:
print(f"[{self.name}] npm install failed: {install_result.stderr}")
return False
print(f"[{self.name}] Dependencies installed")
except Exception as e:
print(f"[{self.name}] Failed to install dependencies: {e}")
return False
# Ensure session directory exists
self._session_path.mkdir(parents=True, exist_ok=True)
# Check if bridge is already running and connected
import aiohttp
import asyncio
try:
async with aiohttp.ClientSession() as session:
async with session.get(
@ -452,10 +529,13 @@ class WhatsAppAdapter(BasePlatformAdapter):
return True
except Exception as e:
self._release_platform_lock()
logger.error("[%s] Failed to start bridge: %s", self.name, e, exc_info=True)
self._close_bridge_log()
return False
finally:
if not self._running:
if lock_acquired:
self._release_platform_lock()
self._close_bridge_log()
def _close_bridge_log(self) -> None:
"""Close the bridge log file handle if open."""
@ -487,22 +567,14 @@ class WhatsAppAdapter(BasePlatformAdapter):
"""Stop the WhatsApp bridge and clean up any orphaned processes."""
if self._bridge_process:
try:
# Kill the entire process group so child node processes die too
import signal
try:
if _IS_WINDOWS:
self._bridge_process.terminate()
else:
os.killpg(os.getpgid(self._bridge_process.pid), signal.SIGTERM)
_terminate_bridge_process(self._bridge_process, force=False)
except (ProcessLookupError, PermissionError):
self._bridge_process.terminate()
await asyncio.sleep(1)
if self._bridge_process.poll() is None:
try:
if _IS_WINDOWS:
self._bridge_process.kill()
else:
os.killpg(os.getpgid(self._bridge_process.pid), signal.SIGKILL)
_terminate_bridge_process(self._bridge_process, force=True)
except (ProcessLookupError, PermissionError):
self._bridge_process.kill()
except Exception as e:
@ -655,6 +727,8 @@ class WhatsAppAdapter(BasePlatformAdapter):
chat_id: str,
message_id: str,
content: str,
*,
finalize: bool = False,
) -> SendResult:
"""Edit a previously sent message via the WhatsApp bridge."""
if not self._running or not self._http_session:
@ -766,6 +840,17 @@ class WhatsAppAdapter(BasePlatformAdapter):
"""Send a video natively via bridge — plays inline in WhatsApp."""
return await self._send_media_to_bridge(chat_id, video_path, "video", caption)
async def send_voice(
self,
chat_id: str,
audio_path: str,
caption: Optional[str] = None,
reply_to: Optional[str] = None,
**kwargs,
) -> SendResult:
"""Send an audio file as a WhatsApp voice message via bridge."""
return await self._send_media_to_bridge(chat_id, audio_path, "audio", caption)
async def send_document(
self,
chat_id: str,

File diff suppressed because it is too large Load diff

View file

@ -152,6 +152,7 @@ class SessionContext:
source: SessionSource
connected_platforms: List[Platform]
home_channels: Dict[Platform, HomeChannel]
shared_multi_user_session: bool = False
# Session metadata
session_key: str = ""
@ -166,6 +167,7 @@ class SessionContext:
"home_channels": {
p.value: hc.to_dict() for p, hc in self.home_channels.items()
},
"shared_multi_user_session": self.shared_multi_user_session,
"session_key": self.session_key,
"session_id": self.session_id,
"created_at": self.created_at.isoformat() if self.created_at else None,
@ -240,18 +242,16 @@ def build_session_context_prompt(
lines.append(f"**Channel Topic:** {context.source.chat_topic}")
# User identity.
# In shared thread sessions (non-DM with thread_id), multiple users
# contribute to the same conversation. Don't pin a single user name
# in the system prompt — it changes per-turn and would bust the prompt
# cache. Instead, note that this is a multi-user thread; individual
# sender names are prefixed on each user message by the gateway.
_is_shared_thread = (
context.source.chat_type != "dm"
and context.source.thread_id
)
if _is_shared_thread:
# In shared multi-user sessions (shared threads OR shared non-thread groups
# when group_sessions_per_user=False), multiple users contribute to the same
# conversation. Don't pin a single user name in the system prompt — it
# changes per-turn and would bust the prompt cache. Instead, note that
# this is a multi-user session; individual sender names are prefixed on
# each user message by the gateway.
if context.shared_multi_user_session:
session_label = "Multi-user thread" if context.source.thread_id else "Multi-user session"
lines.append(
"**Session type:** Multi-user thread — messages are prefixed "
f"**Session type:** {session_label} — messages are prefixed "
"with [sender name]. Multiple users may participate."
)
elif context.source.user_name:
@ -467,6 +467,27 @@ class SessionEntry:
)
def is_shared_multi_user_session(
source: SessionSource,
*,
group_sessions_per_user: bool = True,
thread_sessions_per_user: bool = False,
) -> bool:
"""Return True when a non-DM session is shared across participants.
Mirrors the isolation rules in :func:`build_session_key`:
- DMs are never shared.
- Threads are shared unless ``thread_sessions_per_user`` is True.
- Non-thread group/channel sessions are shared unless
``group_sessions_per_user`` is True (default: True = isolated).
"""
if source.chat_type == "dm":
return False
if source.thread_id:
return not thread_sessions_per_user
return not group_sessions_per_user
def build_session_key(
source: SessionSource,
group_sessions_per_user: bool = True,
@ -926,12 +947,18 @@ class SessionStore:
continue
# Never prune sessions with an active background process
# attached — the user may still be waiting on output.
# The callback is keyed by session_key (see process_registry.
# has_active_for_session); passing session_id here used to
# never match, so active sessions got pruned anyway.
if self._has_active_processes_fn is not None:
try:
if self._has_active_processes_fn(entry.session_id):
if self._has_active_processes_fn(entry.session_key):
continue
except Exception:
pass
except Exception as exc:
logger.debug(
"has_active_processes_fn raised during prune for %s: %s",
entry.session_key, exc,
)
if entry.updated_at < cutoff:
removed_keys.append(key)
for key in removed_keys:
@ -1120,6 +1147,10 @@ class SessionStore:
tool_name=message.get("tool_name"),
tool_calls=message.get("tool_calls"),
tool_call_id=message.get("tool_call_id"),
reasoning=message.get("reasoning") if message.get("role") == "assistant" else None,
reasoning_content=message.get("reasoning_content") if message.get("role") == "assistant" else None,
reasoning_details=message.get("reasoning_details") if message.get("role") == "assistant" else None,
codex_reasoning_items=message.get("codex_reasoning_items") if message.get("role") == "assistant" else None,
)
except Exception as e:
logger.debug("Session DB operation failed: %s", e)
@ -1149,6 +1180,7 @@ class SessionStore:
tool_calls=msg.get("tool_calls"),
tool_call_id=msg.get("tool_call_id"),
reasoning=msg.get("reasoning") if role == "assistant" else None,
reasoning_content=msg.get("reasoning_content") if role == "assistant" else None,
reasoning_details=msg.get("reasoning_details") if role == "assistant" else None,
codex_reasoning_items=msg.get("codex_reasoning_items") if role == "assistant" else None,
)
@ -1232,6 +1264,11 @@ def build_session_context(
source=source,
connected_platforms=connected,
home_channels=home_channels,
shared_multi_user_session=is_shared_multi_user_session(
source,
group_sessions_per_user=getattr(config, "group_sessions_per_user", True),
thread_sessions_per_user=getattr(config, "thread_sessions_per_user", False),
),
)
if session_entry:

View file

@ -56,6 +56,12 @@ _SESSION_USER_ID: ContextVar = ContextVar("HERMES_SESSION_USER_ID", default=_UNS
_SESSION_USER_NAME: ContextVar = ContextVar("HERMES_SESSION_USER_NAME", default=_UNSET)
_SESSION_KEY: ContextVar = ContextVar("HERMES_SESSION_KEY", default=_UNSET)
# Cron auto-delivery vars — set per-job in run_job() so concurrent jobs
# don't clobber each other's delivery targets.
_CRON_AUTO_DELIVER_PLATFORM: ContextVar = ContextVar("HERMES_CRON_AUTO_DELIVER_PLATFORM", default=_UNSET)
_CRON_AUTO_DELIVER_CHAT_ID: ContextVar = ContextVar("HERMES_CRON_AUTO_DELIVER_CHAT_ID", default=_UNSET)
_CRON_AUTO_DELIVER_THREAD_ID: ContextVar = ContextVar("HERMES_CRON_AUTO_DELIVER_THREAD_ID", default=_UNSET)
_VAR_MAP = {
"HERMES_SESSION_PLATFORM": _SESSION_PLATFORM,
"HERMES_SESSION_CHAT_ID": _SESSION_CHAT_ID,
@ -64,6 +70,9 @@ _VAR_MAP = {
"HERMES_SESSION_USER_ID": _SESSION_USER_ID,
"HERMES_SESSION_USER_NAME": _SESSION_USER_NAME,
"HERMES_SESSION_KEY": _SESSION_KEY,
"HERMES_CRON_AUTO_DELIVER_PLATFORM": _CRON_AUTO_DELIVER_PLATFORM,
"HERMES_CRON_AUTO_DELIVER_CHAT_ID": _CRON_AUTO_DELIVER_CHAT_ID,
"HERMES_CRON_AUTO_DELIVER_THREAD_ID": _CRON_AUTO_DELIVER_THREAD_ID,
}

View file

@ -225,8 +225,28 @@ def _cleanup_invalid_pid_path(pid_path: Path, *, cleanup_stale: bool) -> None:
def write_pid_file() -> None:
"""Write the current process PID and metadata to the gateway PID file."""
_write_json_file(_get_pid_path(), _build_pid_record())
"""Write the current process PID and metadata to the gateway PID file.
Uses atomic O_CREAT | O_EXCL creation so that concurrent --replace
invocations race: exactly one process wins and the rest get
FileExistsError.
"""
path = _get_pid_path()
path.parent.mkdir(parents=True, exist_ok=True)
record = json.dumps(_build_pid_record())
try:
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
except FileExistsError:
raise # Let caller decide: another gateway is racing us
try:
with os.fdopen(fd, "w", encoding="utf-8") as f:
f.write(record)
except Exception:
try:
path.unlink(missing_ok=True)
except OSError:
pass
raise
def write_runtime_status(

View file

@ -430,6 +430,21 @@ class GatewayStreamConsumer:
# a real string like "msg_1", not "__no_edit__", so that case
# still resets and creates a fresh segment as intended.)
if got_segment_break:
# If the segment-break edit failed to deliver the
# accumulated content (flood control that has not yet
# promoted to fallback mode, or fallback mode itself),
# _accumulated still holds pre-boundary text the user
# never saw. Flush that tail as a continuation message
# before the reset below wipes _accumulated — otherwise
# text generated before the tool boundary is silently
# dropped (issue #8124).
if (
self._accumulated
and not current_update_visible
and self._message_id
and self._message_id != "__no_edit__"
):
await self._flush_segment_tail_on_edit_failure()
self._reset_segment_state(preserve_no_edit=True)
await asyncio.sleep(0.05) # Small yield to not busy-loop
@ -556,6 +571,30 @@ class GatewayStreamConsumer:
if final_text.strip() and final_text != self._visible_prefix():
continuation = final_text
else:
# Defence-in-depth for #7183: the last edit may still show the
# cursor character because fallback mode was entered after an
# edit failure left it stuck. Try one final edit to strip it
# so the message doesn't freeze with a visible ▉. Best-effort
# — if this edit also fails (flood control still active),
# _try_strip_cursor has already been called on fallback entry
# and the adaptive-backoff retries will have had their shot.
if (
self._message_id
and self._last_sent_text
and self.cfg.cursor
and self._last_sent_text.endswith(self.cfg.cursor)
):
clean_text = self._last_sent_text[:-len(self.cfg.cursor)]
try:
result = await self.adapter.edit_message(
chat_id=self.chat_id,
message_id=self._message_id,
content=clean_text,
)
if result.success:
self._last_sent_text = clean_text
except Exception:
pass
self._already_sent = True
self._final_response_sent = True
return
@ -620,6 +659,39 @@ class GatewayStreamConsumer:
err_lower = err.lower()
return "flood" in err_lower or "retry after" in err_lower or "rate" in err_lower
async def _flush_segment_tail_on_edit_failure(self) -> None:
"""Deliver un-sent tail content before a segment-break reset.
When an edit fails (flood control, transport error) and a tool
boundary arrives before the next retry, ``_accumulated`` holds text
that was generated but never shown to the user. Without this flush,
the segment reset would discard that tail and leave a frozen cursor
in the partial message.
Sends the tail that sits after the last successfully-delivered
prefix as a new message, and best-effort strips the stuck cursor
from the previous partial message.
"""
if not self._fallback_final_send:
await self._try_strip_cursor()
visible = self._fallback_prefix or self._visible_prefix()
tail = self._accumulated
if visible and tail.startswith(visible):
tail = tail[len(visible):].lstrip()
tail = self._clean_for_display(tail)
if not tail.strip():
return
try:
result = await self.adapter.send(
chat_id=self.chat_id,
content=tail,
metadata=self.metadata,
)
if result.success:
self._already_sent = True
except Exception as e:
logger.error("Segment-break tail flush error: %s", e)
async def _try_strip_cursor(self) -> None:
"""Best-effort edit to remove the cursor from the last visible message.

View file

@ -20,6 +20,7 @@ import logging
import os
import shutil
import shlex
import ssl
import stat
import base64
import hashlib
@ -71,6 +72,8 @@ DEFAULT_QWEN_BASE_URL = "https://portal.qwen.ai/v1"
DEFAULT_GITHUB_MODELS_BASE_URL = "https://api.githubcopilot.com"
DEFAULT_COPILOT_ACP_BASE_URL = "acp://copilot"
DEFAULT_OLLAMA_CLOUD_BASE_URL = "https://ollama.com/v1"
STEPFUN_STEP_PLAN_INTL_BASE_URL = "https://api.stepfun.ai/step_plan/v1"
STEPFUN_STEP_PLAN_CN_BASE_URL = "https://api.stepfun.com/step_plan/v1"
CODEX_OAUTH_CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann"
CODEX_OAUTH_TOKEN_URL = "https://auth.openai.com/oauth/token"
CODEX_ACCESS_TOKEN_REFRESH_SKEW_SECONDS = 120
@ -151,7 +154,7 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
id="gemini",
name="Google AI Studio",
auth_type="api_key",
inference_base_url="https://generativelanguage.googleapis.com/v1beta/openai",
inference_base_url="https://generativelanguage.googleapis.com/v1beta",
api_key_env_vars=("GOOGLE_API_KEY", "GEMINI_API_KEY"),
base_url_env_var="GEMINI_BASE_URL",
),
@ -167,8 +170,11 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
id="kimi-coding",
name="Kimi / Moonshot",
auth_type="api_key",
# Legacy platform.moonshot.ai keys use this endpoint (OpenAI-compat).
# sk-kimi- (Kimi Code) keys are auto-redirected to api.kimi.com/coding
# by _resolve_kimi_base_url() below.
inference_base_url="https://api.moonshot.ai/v1",
api_key_env_vars=("KIMI_API_KEY",),
api_key_env_vars=("KIMI_API_KEY", "KIMI_CODING_API_KEY"),
base_url_env_var="KIMI_BASE_URL",
),
"kimi-coding-cn": ProviderConfig(
@ -178,6 +184,14 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
inference_base_url="https://api.moonshot.cn/v1",
api_key_env_vars=("KIMI_CN_API_KEY",),
),
"stepfun": ProviderConfig(
id="stepfun",
name="StepFun Step Plan",
auth_type="api_key",
inference_base_url=STEPFUN_STEP_PLAN_INTL_BASE_URL,
api_key_env_vars=("STEPFUN_API_KEY",),
base_url_env_var="STEPFUN_BASE_URL",
),
"arcee": ProviderConfig(
id="arcee",
name="Arcee AI",
@ -339,10 +353,16 @@ def get_anthropic_key() -> str:
# =============================================================================
# Kimi Code (kimi.com/code) issues keys prefixed "sk-kimi-" that only work
# on api.kimi.com/coding/v1. Legacy keys from platform.moonshot.ai work on
# api.moonshot.ai/v1 (the default). Auto-detect when user hasn't set
# on api.kimi.com/coding. Legacy keys from platform.moonshot.ai work on
# api.moonshot.ai/v1 (the old default). Auto-detect when user hasn't set
# KIMI_BASE_URL explicitly.
KIMI_CODE_BASE_URL = "https://api.kimi.com/coding/v1"
#
# Note: the base URL intentionally has NO /v1 suffix. The /coding endpoint
# speaks the Anthropic Messages protocol, and the anthropic SDK appends
# "/v1/messages" internally — so "/coding" + SDK suffix → "/coding/v1/messages"
# (the correct target). Using "/coding/v1" here would produce
# "/coding/v1/v1/messages" (a 404).
KIMI_CODE_BASE_URL = "https://api.kimi.com/coding"
def _resolve_kimi_base_url(api_key: str, default_url: str, env_override: str) -> str:
@ -353,6 +373,9 @@ def _resolve_kimi_base_url(api_key: str, default_url: str, env_override: str) ->
"""
if env_override:
return env_override
# No key → nothing to infer from. Return default without inspecting.
if not api_key:
return default_url
if api_key.startswith("sk-kimi-"):
return KIMI_CODE_BASE_URL
return default_url
@ -480,6 +503,14 @@ def _resolve_zai_base_url(api_key: str, default_url: str, env_override: str) ->
if env_override:
return env_override
# No API key set → don't probe (would fire N×M HTTPS requests with an
# empty Bearer token, all returning 401). This path is hit during
# auxiliary-client auto-detection when the user has no Z.AI credentials
# at all — the caller discards the result immediately, so the probe is
# pure latency for every AIAgent construction.
if not api_key:
return default_url
# Check provider-state cache for a previously-detected endpoint.
auth_store = _load_auth_store()
state = _load_provider_state(auth_store, "zai") or {}
@ -971,6 +1002,7 @@ def resolve_provider(
"x-ai": "xai", "x.ai": "xai", "grok": "xai",
"kimi": "kimi-coding", "kimi-for-coding": "kimi-coding", "moonshot": "kimi-coding",
"kimi-cn": "kimi-coding-cn", "moonshot-cn": "kimi-coding-cn",
"step": "stepfun", "stepfun-coding-plan": "stepfun",
"arcee-ai": "arcee", "arceeai": "arcee",
"minimax-china": "minimax-cn", "minimax_cn": "minimax-cn",
"claude": "anthropic", "claude-code": "anthropic",
@ -1652,7 +1684,7 @@ def _resolve_verify(
insecure: Optional[bool] = None,
ca_bundle: Optional[str] = None,
auth_state: Optional[Dict[str, Any]] = None,
) -> bool | str:
) -> bool | ssl.SSLContext:
tls_state = auth_state.get("tls") if isinstance(auth_state, dict) else {}
tls_state = tls_state if isinstance(tls_state, dict) else {}
@ -1672,13 +1704,12 @@ def _resolve_verify(
if effective_ca:
ca_path = str(effective_ca)
if not os.path.isfile(ca_path):
import logging
logging.getLogger("hermes.auth").warning(
logger.warning(
"CA bundle path does not exist: %s — falling back to default certificates",
ca_path,
)
return True
return ca_path
return ssl.create_default_context(cafile=ca_path)
return True
@ -2721,6 +2752,17 @@ def _update_config_for_provider(
# Clear stale base_url to prevent contamination when switching providers
model_cfg.pop("base_url", None)
# Clear stale api_key/api_mode left over from a previous custom provider.
# When the user switches from e.g. a MiniMax custom endpoint
# (api_mode=anthropic_messages, api_key=mxp-...) to a built-in provider
# (e.g. OpenRouter), the stale api_key/api_mode would override the new
# provider's credentials and transport choice. Built-in providers that
# need a specific api_mode (copilot, xai) set it at request-resolution
# time via `_copilot_runtime_api_mode` / `_detect_api_mode_for_url`, so
# removing the persisted value here is safe.
model_cfg.pop("api_key", None)
model_cfg.pop("api_mode", None)
# When switching to a non-OpenRouter provider, ensure model.default is
# valid for the new provider. An OpenRouter-formatted name like
# "anthropic/claude-opus-4.6" will fail on direct-API providers.
@ -3353,7 +3395,7 @@ def _login_nous(args, pconfig: ProviderConfig) -> None:
)
from hermes_cli.models import (
_PROVIDER_MODELS, get_pricing_for_provider, filter_nous_free_models,
_PROVIDER_MODELS, get_pricing_for_provider,
check_nous_free_tier, partition_nous_models_by_tier,
)
model_ids = _PROVIDER_MODELS.get("nous", [])
@ -3362,7 +3404,6 @@ def _login_nous(args, pconfig: ProviderConfig) -> None:
unavailable_models: list = []
if model_ids:
pricing = get_pricing_for_provider("nous")
model_ids = filter_nous_free_models(model_ids, pricing)
free_tier = check_nous_free_tier()
if free_tier:
model_ids, unavailable_models = partition_nous_models_by_tier(

View file

@ -152,6 +152,23 @@ def auth_add_command(args) -> None:
pool = load_pool(provider)
# Clear ALL suppressions for this provider — re-adding a credential is
# a strong signal the user wants auth re-enabled. This covers env:*
# (shell-exported vars), gh_cli (copilot), claude_code, qwen-cli,
# device_code (codex), etc. One consistent re-engagement pattern.
# Matches the Codex device_code re-link pattern that predates this.
if not provider.startswith(CUSTOM_POOL_PREFIX):
try:
from hermes_cli.auth import (
_load_auth_store,
unsuppress_credential_source,
)
suppressed = _load_auth_store().get("suppressed_sources", {})
for src in list(suppressed.get(provider, []) or []):
unsuppress_credential_source(provider, src)
except Exception:
pass
if requested_type == AUTH_TYPE_API_KEY:
token = (getattr(args, "api_key", None) or "").strip()
if not token:
@ -338,71 +355,28 @@ def auth_remove_command(args) -> None:
raise SystemExit(f'No credential matching "{target}" for provider {provider}.')
print(f"Removed {provider} credential #{index} ({removed.label})")
# If this was an env-seeded credential, also clear the env var from .env
# so it doesn't get re-seeded on the next load_pool() call.
if removed.source.startswith("env:"):
env_var = removed.source[len("env:"):]
if env_var:
from hermes_cli.config import remove_env_value
cleared = remove_env_value(env_var)
if cleared:
print(f"Cleared {env_var} from .env")
# Unified removal dispatch. Every credential source Hermes reads from
# (env vars, external OAuth files, auth.json blocks, custom config)
# has a RemovalStep registered in agent.credential_sources. The step
# handles its source-specific cleanup and we centralise suppression +
# user-facing output here so every source behaves identically from
# the user's perspective.
from agent.credential_sources import find_removal_step
from hermes_cli.auth import suppress_credential_source
# If this was a singleton-seeded credential (OAuth device_code, hermes_pkce),
# clear the underlying auth store / credential file so it doesn't get
# re-seeded on the next load_pool() call.
elif provider == "openai-codex" and (
removed.source == "device_code" or removed.source.endswith(":device_code")
):
# Codex tokens live in TWO places: the Hermes auth store and
# ~/.codex/auth.json (the Codex CLI shared file). On every refresh,
# refresh_codex_oauth_pure() writes to both. So clearing only the
# Hermes auth store is not enough — _seed_from_singletons() will
# auto-import from ~/.codex/auth.json on the next load_pool() and
# the removal is instantly undone. Mark the source as suppressed
# so auto-import is skipped; leave ~/.codex/auth.json untouched so
# the Codex CLI itself keeps working.
from hermes_cli.auth import (
_load_auth_store, _save_auth_store, _auth_store_lock,
suppress_credential_source,
)
with _auth_store_lock():
auth_store = _load_auth_store()
providers_dict = auth_store.get("providers")
if isinstance(providers_dict, dict) and provider in providers_dict:
del providers_dict[provider]
_save_auth_store(auth_store)
print(f"Cleared {provider} OAuth tokens from auth store")
suppress_credential_source(provider, "device_code")
print("Suppressed openai-codex device_code source — it will not be re-seeded.")
print("Note: Codex CLI credentials still live in ~/.codex/auth.json")
print("Run `hermes auth add openai-codex` to re-enable if needed.")
step = find_removal_step(provider, removed.source)
if step is None:
# Unregistered source — e.g. "manual", which has nothing external
# to clean up. The pool entry is already gone; we're done.
return
elif removed.source == "device_code" and provider == "nous":
from hermes_cli.auth import (
_load_auth_store, _save_auth_store, _auth_store_lock,
)
with _auth_store_lock():
auth_store = _load_auth_store()
providers_dict = auth_store.get("providers")
if isinstance(providers_dict, dict) and provider in providers_dict:
del providers_dict[provider]
_save_auth_store(auth_store)
print(f"Cleared {provider} OAuth tokens from auth store")
elif removed.source == "hermes_pkce" and provider == "anthropic":
from hermes_constants import get_hermes_home
oauth_file = get_hermes_home() / ".anthropic_oauth.json"
if oauth_file.exists():
oauth_file.unlink()
print("Cleared Hermes Anthropic OAuth credentials")
elif removed.source == "claude_code" and provider == "anthropic":
from hermes_cli.auth import suppress_credential_source
suppress_credential_source(provider, "claude_code")
print("Suppressed claude_code credential — it will not be re-seeded.")
print("Note: Claude Code credentials still live in ~/.claude/.credentials.json")
print("Run `hermes auth add anthropic` to re-enable if needed.")
result = step.remove_fn(provider, removed)
for line in result.cleaned:
print(line)
if result.suppress:
suppress_credential_source(provider, removed.source)
for line in result.hints:
print(line)
def auth_reset_command(args) -> None:

View file

@ -201,7 +201,7 @@ def run_backup(args) -> None:
else:
zf.write(abs_path, arcname=str(rel_path))
total_bytes += abs_path.stat().st_size
except (PermissionError, OSError) as exc:
except (PermissionError, OSError, ValueError) as exc:
errors.append(f" {rel_path}: {exc}")
continue

View file

@ -24,7 +24,6 @@ _FORWARD_COMPAT_TEMPLATE_MODELS: List[tuple[str, tuple[str, ...]]] = [
("gpt-5.4-mini", ("gpt-5.3-codex", "gpt-5.2-codex")),
("gpt-5.4", ("gpt-5.3-codex", "gpt-5.2-codex")),
("gpt-5.3-codex", ("gpt-5.2-codex",)),
("gpt-5.3-codex-spark", ("gpt-5.3-codex", "gpt-5.2-codex")),
]

View file

@ -497,9 +497,8 @@ def _collect_gateway_skill_entries(
# --- Tier 1: Plugin slash commands (never trimmed) ---------------------
plugin_pairs: list[tuple[str, str]] = []
try:
from hermes_cli.plugins import get_plugin_manager
pm = get_plugin_manager()
plugin_cmds = getattr(pm, "_plugin_commands", {})
from hermes_cli.plugins import get_plugin_commands
plugin_cmds = get_plugin_commands()
for cmd_name in sorted(plugin_cmds):
name = sanitize_name(cmd_name) if sanitize_name else cmd_name
if not name:
@ -925,12 +924,22 @@ class SlashCommandCompleter(Completer):
display_meta=meta,
)
# If the user typed @file: or @folder:, delegate to path completions
# If the user typed @file: / @folder: (or just @file / @folder with
# no colon yet), delegate to path completions. Accepting the bare
# form lets the picker surface directories as soon as the user has
# typed `@folder`, without requiring them to first accept the static
# `@folder:` hint and re-trigger completion.
for prefix in ("@file:", "@folder:"):
if word.startswith(prefix):
path_part = word[len(prefix):] or "."
bare = prefix[:-1]
if word == bare or word.startswith(prefix):
want_dir = prefix == "@folder:"
path_part = '' if word == bare else word[len(prefix):]
expanded = os.path.expanduser(path_part)
if expanded.endswith("/"):
if not expanded or expanded == ".":
search_dir, match_prefix = ".", ""
elif expanded.endswith("/"):
search_dir, match_prefix = expanded, ""
else:
search_dir = os.path.dirname(expanded) or "."
@ -946,15 +955,21 @@ class SlashCommandCompleter(Completer):
for entry in sorted(entries):
if match_prefix and not entry.lower().startswith(prefix_lower):
continue
if count >= limit:
break
full_path = os.path.join(search_dir, entry)
is_dir = os.path.isdir(full_path)
# `@folder:` must only surface directories; `@file:` only
# regular files. Without this filter `@folder:` listed
# every .env / .gitignore in the cwd, defeating the
# explicit prefix and confusing users expecting a
# directory picker.
if want_dir != is_dir:
continue
if count >= limit:
break
display_path = os.path.relpath(full_path)
suffix = "/" if is_dir else ""
kind = "folder" if is_dir else "file"
meta = "dir" if is_dir else _file_size_label(full_path)
completion = f"@{kind}:{display_path}{suffix}"
completion = f"{prefix}{display_path}{suffix}"
yield Completion(
completion,
start_position=-len(word),

View file

@ -13,6 +13,7 @@ This module provides:
"""
import copy
import logging
import os
import platform
import re
@ -24,6 +25,7 @@ from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Any, Optional, List, Tuple
logger = logging.getLogger(__name__)
_IS_WINDOWS = platform.system() == "Windows"
_ENV_VAR_NAME_RE = re.compile(r"^[A-Za-z_][A-Za-z0-9_]*$")
@ -385,6 +387,26 @@ DEFAULT_CONFIG = {
# (terminal and execute_code). Skill-declared required_environment_variables
# are passed through automatically; this list is for non-skill use cases.
"env_passthrough": [],
# Extra files to source in the login shell when building the
# per-session environment snapshot. Use this when tools like nvm,
# pyenv, asdf, or custom PATH entries are registered by files that
# a bash login shell would skip — most commonly ``~/.bashrc``
# (bash doesn't source bashrc in non-interactive login mode) or
# zsh-specific files like ``~/.zshrc`` / ``~/.zprofile``.
# Paths support ``~`` / ``${VAR}``. Missing files are silently
# skipped. When empty, Hermes auto-appends ``~/.bashrc`` if the
# snapshot shell is bash (this is the ``auto_source_bashrc``
# behaviour — disable with that key if you want strict login-only
# semantics).
"shell_init_files": [],
# When true (default), Hermes sources ``~/.bashrc`` in the login
# shell used to build the environment snapshot. This captures
# PATH additions, shell functions, and aliases defined in the
# user's bashrc — which a plain ``bash -l -c`` would otherwise
# miss because bash skips bashrc in non-interactive login mode.
# Turn this off if you have a bashrc that misbehaves when sourced
# non-interactively (e.g. one that hard-exits on TTY checks).
"auto_source_bashrc": True,
"docker_image": "nikolaik/python-nodejs:python3.11-nodejs20",
"docker_forward_env": [],
# Explicit environment variables to set inside Docker containers.
@ -403,7 +425,11 @@ DEFAULT_CONFIG = {
"container_persistent": True, # Persist filesystem across sessions
# Docker volume mounts — share host directories with the container.
# Each entry is "host_path:container_path" (standard Docker -v syntax).
# Example: ["/home/user/projects:/workspace/projects", "/data:/data"]
# Example:
# ["/home/user/projects:/workspace/projects",
# "/home/user/.hermes/cache/documents:/output"]
# For gateway MEDIA delivery, write inside Docker to /output/... and emit
# the host-visible path in MEDIA:, not the container path.
"docker_volumes": [],
# Explicit opt-in: mount the host cwd into /workspace for Docker sessions.
# Default off because passing host directories into a sandbox weakens isolation.
@ -470,13 +496,6 @@ DEFAULT_CONFIG = {
},
},
"smart_model_routing": {
"enabled": False,
"max_simple_chars": 160,
"max_simple_words": 28,
"cheap_model": {},
},
# Auxiliary model config — provider:model for each side task.
# Format: provider is the provider name, model is the model slug.
# "auto" for provider = auto-detect best available provider.
@ -490,6 +509,7 @@ DEFAULT_CONFIG = {
"base_url": "", # direct OpenAI-compatible endpoint (takes precedence over provider)
"api_key": "", # API key for base_url (falls back to OPENAI_API_KEY)
"timeout": 120, # seconds — LLM API call timeout; vision payloads need generous timeout
"extra_body": {}, # OpenAI-compatible provider-specific request fields
"download_timeout": 30, # seconds — image HTTP download timeout; increase for slow connections
},
"web_extract": {
@ -498,6 +518,7 @@ DEFAULT_CONFIG = {
"base_url": "",
"api_key": "",
"timeout": 360, # seconds (6min) — per-attempt LLM summarization timeout; increase for slow local models
"extra_body": {},
},
"compression": {
"provider": "auto",
@ -505,6 +526,7 @@ DEFAULT_CONFIG = {
"base_url": "",
"api_key": "",
"timeout": 120, # seconds — compression summarises large contexts; increase for local models
"extra_body": {},
},
"session_search": {
"provider": "auto",
@ -512,6 +534,8 @@ DEFAULT_CONFIG = {
"base_url": "",
"api_key": "",
"timeout": 30,
"extra_body": {},
"max_concurrency": 3, # Clamp parallel summaries to avoid request-burst 429s on small providers
},
"skills_hub": {
"provider": "auto",
@ -519,6 +543,7 @@ DEFAULT_CONFIG = {
"base_url": "",
"api_key": "",
"timeout": 30,
"extra_body": {},
},
"approval": {
"provider": "auto",
@ -526,6 +551,7 @@ DEFAULT_CONFIG = {
"base_url": "",
"api_key": "",
"timeout": 30,
"extra_body": {},
},
"mcp": {
"provider": "auto",
@ -533,6 +559,7 @@ DEFAULT_CONFIG = {
"base_url": "",
"api_key": "",
"timeout": 30,
"extra_body": {},
},
"flush_memories": {
"provider": "auto",
@ -540,6 +567,7 @@ DEFAULT_CONFIG = {
"base_url": "",
"api_key": "",
"timeout": 30,
"extra_body": {},
},
"title_generation": {
"provider": "auto",
@ -547,6 +575,7 @@ DEFAULT_CONFIG = {
"base_url": "",
"api_key": "",
"timeout": 30,
"extra_body": {},
},
},
@ -558,9 +587,14 @@ DEFAULT_CONFIG = {
"bell_on_complete": False,
"show_reasoning": False,
"streaming": False,
"final_response_markdown": "strip", # render | strip | raw
"inline_diffs": True, # Show inline diff previews for write actions (write_file, patch, skill_manage)
"show_cost": False, # Show $ cost in the status bar (off by default)
"skin": "default",
"user_message_preview": { # CLI: how many submitted user-message lines to echo back in scrollback
"first_lines": 2,
"last_lines": 2,
},
"interim_assistant_messages": True, # Gateway: show natural mid-turn assistant status messages
"tool_progress_command": False, # Enable /verbose command in messaging gateway
"tool_progress_overrides": {}, # DEPRECATED — use display.platforms instead
@ -579,6 +613,10 @@ DEFAULT_CONFIG = {
},
# Text-to-speech configuration
# Each provider supports an optional `max_text_length:` override for the
# per-request input-character cap. Omit it to use the provider's documented
# limit (OpenAI 4096, xAI 15000, MiniMax 10000, ElevenLabs 5k-40k model-aware,
# Gemini 5000, Edge 5000, Mistral 4000, NeuTTS/KittenTTS 2000).
"tts": {
"provider": "edge", # "edge" (free) | "elevenlabs" (premium) | "openai" | "xai" | "minimax" | "mistral" | "neutts" (local)
"edge": {
@ -631,6 +669,7 @@ DEFAULT_CONFIG = {
"record_key": "ctrl+b",
"max_recording_seconds": 120,
"auto_tts": False,
"beep_enabled": True, # Play record start/stop beeps in CLI voice mode
"silence_threshold": 200, # RMS below this = silence (0-32767)
"silence_duration": 3.0, # Seconds of silence before auto-stop
},
@ -677,6 +716,12 @@ DEFAULT_CONFIG = {
# independent of the parent's max_iterations)
"reasoning_effort": "", # reasoning effort for subagents: "xhigh", "high", "medium",
# "low", "minimal", "none" (empty = inherit parent's level)
"max_concurrent_children": 3, # max parallel children per batch; floor of 1 enforced, no ceiling
# Orchestrator role controls (see tools/delegate_tool.py:_get_max_spawn_depth
# and _get_orchestrator_enabled). Values are clamped to [1, 3] with a
# warning log if out of range.
"max_spawn_depth": 1, # depth cap (1 = flat [default], 2 = orchestrator→leaf, 3 = three-level)
"orchestrator_enabled": True, # kill switch for role="orchestrator"
},
# Ephemeral prefill messages file — JSON list of {role, content} dicts
@ -689,6 +734,20 @@ DEFAULT_CONFIG = {
# always goes to ~/.hermes/skills/.
"skills": {
"external_dirs": [], # e.g. ["~/.agents/skills", "/shared/team-skills"]
# Substitute ${HERMES_SKILL_DIR} and ${HERMES_SESSION_ID} in SKILL.md
# content with the absolute skill directory and the active session id
# before the agent sees it. Lets skill authors reference bundled
# scripts without the agent having to join paths.
"template_vars": True,
# Pre-execute inline shell snippets written as !`cmd` in SKILL.md
# body. Their stdout is inlined into the skill message before the
# agent reads it, so skills can inject dynamic context (dates, git
# state, detected tool versions, …). Off by default because any
# content from the skill author runs on the host without approval;
# only enable for skill sources you trust.
"inline_shell": False,
# Timeout (seconds) for each !`cmd` snippet when inline_shell is on.
"inline_shell_timeout": 10,
},
# Honcho AI-native memory -- reads ~/.honcho/config.json as single source of truth.
@ -708,6 +767,14 @@ DEFAULT_CONFIG = {
"auto_thread": True, # Auto-create threads on @mention in channels (like Slack)
"reactions": True, # Add 👀/✅/❌ reactions to messages during processing
"channel_prompts": {}, # Per-channel ephemeral system prompts (forum parents apply to child threads)
# discord_server tool: restrict which actions the agent may call.
# Default (empty) = all actions allowed (subject to bot privileged intents).
# Accepts comma-separated string ("list_guilds,list_channels,fetch_messages")
# or YAML list. Unknown names are dropped with a warning at load time.
# Actions: list_guilds, server_info, list_channels, channel_info,
# list_roles, member_info, search_members, fetch_messages, list_pins,
# pin_message, unpin_message, create_thread, add_role, remove_role.
"server_actions": "",
},
# WhatsApp platform settings (gateway mode)
@ -751,6 +818,21 @@ DEFAULT_CONFIG = {
"command_allowlist": [],
# User-defined quick commands that bypass the agent loop (type: exec only)
"quick_commands": {},
# Shell-script hooks — declarative bridge that invokes shell scripts
# on plugin-hook events (pre_tool_call, post_tool_call, pre_llm_call,
# subagent_stop, etc.). Each entry maps an event name to a list of
# {matcher, command, timeout} dicts. First registration of a new
# command prompts the user for consent; subsequent runs reuse the
# stored approval from ~/.hermes/shell-hooks-allowlist.json.
# See `website/docs/user-guide/features/hooks.md` for schema + examples.
"hooks": {},
# Auto-accept shell-hook registrations without a TTY prompt. Also
# toggleable per-invocation via --accept-hooks or HERMES_ACCEPT_HOOKS=1.
# Gateway / cron / non-interactive runs need this (or one of the other
# channels) to pick up newly-added hooks.
"hooks_auto_accept": False,
# Custom personalities — add your own entries here
# Supports string format: {"name": "system prompt"}
# Or dict format: {"name": {"description": "...", "system_prompt": "...", "tone": "...", "style": "..."}}
@ -774,6 +856,11 @@ DEFAULT_CONFIG = {
# Wrap delivered cron responses with a header (task name) and footer
# ("The agent cannot see this message"). Set to false for clean output.
"wrap_response": True,
# Maximum number of due jobs to run in parallel per tick.
# null/0 = unbounded (limited only by thread count).
# 1 = serial (pre-v0.9 behaviour).
# Also overridable via HERMES_CRON_MAX_PARALLEL env var.
"max_parallel_jobs": None,
},
# execute_code settings — controls the tool used for programmatic tool calls.
@ -806,8 +893,36 @@ DEFAULT_CONFIG = {
"force_ipv4": False,
},
# Session storage — controls automatic cleanup of ~/.hermes/state.db.
# state.db accumulates every session, message, tool call, and FTS5 index
# entry forever. Without auto-pruning, a heavy user (gateway + cron)
# reports 384MB+ databases with 68K+ messages, which slows down FTS5
# inserts, /resume listing, and insights queries.
"sessions": {
# When true, prune ended sessions older than retention_days once
# per (roughly) min_interval_hours at CLI/gateway/cron startup.
# Only touches ended sessions — active sessions are always preserved.
# Default false: session history is valuable for search recall, and
# silently deleting it could surprise users. Opt in explicitly.
"auto_prune": False,
# How many days of ended-session history to keep. Matches the
# default of ``hermes sessions prune``.
"retention_days": 90,
# VACUUM after a prune that actually deleted rows. SQLite does not
# reclaim disk space on DELETE — freed pages are just reused on
# subsequent INSERTs — so without VACUUM the file stays bloated
# even after pruning. VACUUM blocks writes for a few seconds per
# 100MB, so it only runs at startup, and only when prune deleted
# ≥1 session.
"vacuum_after_prune": True,
# Minimum hours between auto-maintenance runs (avoids repeating
# the sweep on every CLI invocation). Tracked via state_meta in
# state.db itself, so it's shared across all processes.
"min_interval_hours": 24,
},
# Config schema version - bump this when adding new required fields
"_config_version": 19,
"_config_version": 22,
}
# =============================================================================
@ -963,6 +1078,22 @@ OPTIONAL_ENV_VARS = {
"category": "provider",
"advanced": True,
},
"STEPFUN_API_KEY": {
"description": "StepFun Step Plan API key",
"prompt": "StepFun Step Plan API key",
"url": "https://platform.stepfun.com/",
"password": True,
"category": "provider",
"advanced": True,
},
"STEPFUN_BASE_URL": {
"description": "StepFun Step Plan base URL override",
"prompt": "StepFun Step Plan base URL (leave empty for default)",
"url": None,
"password": False,
"category": "provider",
"advanced": True,
},
"ARCEEAI_API_KEY": {
"description": "Arcee AI API key",
"prompt": "Arcee AI API key",
@ -1830,12 +1961,53 @@ def _normalize_custom_provider_entry(
if not isinstance(entry, dict):
return None
# Accept camelCase aliases commonly used in hand-written configs.
_CAMEL_ALIASES: Dict[str, str] = {
"apiKey": "api_key",
"baseUrl": "base_url",
"apiMode": "api_mode",
"keyEnv": "key_env",
"defaultModel": "default_model",
"contextLength": "context_length",
"rateLimitDelay": "rate_limit_delay",
}
_KNOWN_KEYS = {
"name", "api", "url", "base_url", "api_key", "key_env",
"api_mode", "transport", "model", "default_model", "models",
"context_length", "rate_limit_delay",
}
for camel, snake in _CAMEL_ALIASES.items():
if camel in entry and snake not in entry:
logger.warning(
"providers.%s: camelCase key '%s' auto-mapped to '%s' "
"(use snake_case to avoid this warning)",
provider_key or "?", camel, snake,
)
entry[snake] = entry[camel]
unknown = set(entry.keys()) - _KNOWN_KEYS - set(_CAMEL_ALIASES.keys())
if unknown:
logger.warning(
"providers.%s: unknown config keys ignored: %s",
provider_key or "?", ", ".join(sorted(unknown)),
)
from urllib.parse import urlparse
base_url = ""
for url_key in ("api", "url", "base_url"):
for url_key in ("base_url", "url", "api"):
raw_url = entry.get(url_key)
if isinstance(raw_url, str) and raw_url.strip():
base_url = raw_url.strip()
break
candidate = raw_url.strip()
parsed = urlparse(candidate)
if parsed.scheme and parsed.netloc:
base_url = candidate
break
else:
logger.warning(
"providers.%s: '%s' value '%s' is not a valid URL "
"(no scheme or host) — skipped",
provider_key or "?", url_key, candidate,
)
if not base_url:
return None
@ -1974,6 +2146,7 @@ _KNOWN_ROOT_KEYS = {
"fallback_providers", "credential_pool_strategies", "toolsets",
"agent", "terminal", "display", "compression", "delegation",
"auxiliary", "custom_providers", "context", "memory", "gateway",
"sessions",
}
# Valid fields inside a custom_providers list entry
@ -2131,7 +2304,6 @@ def print_config_warnings(config: Optional[Dict[str, Any]] = None) -> None:
if not issues:
return
import sys
lines = ["\033[33m⚠ Config issues detected in config.yaml:\033[0m"]
for ci in issues:
marker = "\033[31m✗\033[0m" if ci.severity == "error" else "\033[33m⚠\033[0m"
@ -2146,7 +2318,6 @@ def warn_deprecated_cwd_env_vars(config: Optional[Dict[str, Any]] = None) -> Non
These env vars are deprecated the canonical setting is terminal.cwd
in config.yaml. Prints a migration hint to stderr.
"""
import os, sys
messaging_cwd = os.environ.get("MESSAGING_CWD")
terminal_cwd_env = os.environ.get("TERMINAL_CWD")
@ -2464,6 +2635,71 @@ def migrate_config(interactive: bool = True, quiet: bool = False) -> Dict[str, A
else:
print(" ✓ Removed unused compression.summary_* keys")
# ── Version 20 → 21: plugins are now opt-in; grandfather existing user plugins ──
# The loader now requires plugins to appear in ``plugins.enabled`` before
# loading. Existing installs had all discovered plugins loading by default
# (minus anything in ``plugins.disabled``). To avoid silently breaking
# those setups on upgrade, populate ``plugins.enabled`` with the set of
# currently-installed user plugins that aren't already disabled.
#
# Bundled plugins (shipped in the repo itself) are NOT grandfathered —
# they ship off for everyone, including existing users, so any user who
# wants one has to opt in explicitly.
if current_ver < 21:
config = read_raw_config()
plugins_cfg = config.get("plugins")
if not isinstance(plugins_cfg, dict):
plugins_cfg = {}
# Only migrate if the enabled allow-list hasn't been set yet.
if "enabled" not in plugins_cfg:
disabled = plugins_cfg.get("disabled", []) or []
if not isinstance(disabled, list):
disabled = []
disabled_set = set(disabled)
# Scan ``$HERMES_HOME/plugins/`` for currently installed user plugins.
grandfathered: List[str] = []
try:
user_plugins_dir = get_hermes_home() / "plugins"
if user_plugins_dir.is_dir():
for child in sorted(user_plugins_dir.iterdir()):
if not child.is_dir():
continue
manifest_file = child / "plugin.yaml"
if not manifest_file.exists():
manifest_file = child / "plugin.yml"
if not manifest_file.exists():
continue
try:
with open(manifest_file) as _mf:
manifest = yaml.safe_load(_mf) or {}
except Exception:
manifest = {}
name = manifest.get("name") or child.name
if name in disabled_set:
continue
grandfathered.append(name)
except Exception:
grandfathered = []
plugins_cfg["enabled"] = grandfathered
config["plugins"] = plugins_cfg
save_config(config)
results["config_added"].append(
f"plugins.enabled (opt-in allow-list, {len(grandfathered)} grandfathered)"
)
if not quiet:
if grandfathered:
print(
f" ✓ Plugins now opt-in: grandfathered "
f"{len(grandfathered)} existing plugin(s) into plugins.enabled"
)
else:
print(
" ✓ Plugins now opt-in: no existing plugins to grandfather. "
"Use `hermes plugins enable <name>` to activate."
)
if current_ver < latest_ver and not quiet:
print(f"Config version: {current_ver}{latest_ver}")
@ -2861,24 +3097,11 @@ _FALLBACK_COMMENT = """
# minimax (MINIMAX_API_KEY) — MiniMax
# minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China)
#
# For custom OpenAI-compatible endpoints, add base_url and api_key_env.
# For custom OpenAI-compatible endpoints, add base_url and key_env.
#
# fallback_model:
# provider: openrouter
# model: anthropic/claude-sonnet-4
#
# ── Smart Model Routing ────────────────────────────────────────────────
# Optional cheap-vs-strong routing for simple turns.
# Keeps the primary model for complex work, but can route short/simple
# messages to a cheaper model across providers.
#
# smart_model_routing:
# enabled: true
# max_simple_chars: 160
# max_simple_words: 28
# cheap_model:
# provider: openrouter
# model: google/gemini-2.5-flash
"""
@ -2905,24 +3128,11 @@ _COMMENTED_SECTIONS = """
# minimax (MINIMAX_API_KEY) — MiniMax
# minimax-cn (MINIMAX_CN_API_KEY) — MiniMax (China)
#
# For custom OpenAI-compatible endpoints, add base_url and api_key_env.
# For custom OpenAI-compatible endpoints, add base_url and key_env.
#
# fallback_model:
# provider: openrouter
# model: anthropic/claude-sonnet-4
#
# ── Smart Model Routing ────────────────────────────────────────────────
# Optional cheap-vs-strong routing for simple turns.
# Keeps the primary model for complex work, but can route short/simple
# messages to a cheaper model across providers.
#
# smart_model_routing:
# enabled: true
# max_simple_chars: 160
# max_simple_words: 28
# cheap_model:
# provider: openrouter
# model: google/gemini-2.5-flash
"""
@ -3115,7 +3325,6 @@ def _check_non_ascii_credential(key: str, value: str) -> str:
bad_chars.append(f" position {i}: {ch!r} (U+{ord(ch):04X})")
sanitized = value.encode("ascii", errors="ignore").decode("ascii")
import sys
print(
f"\n Warning: {key} contains non-ASCII characters that will break API requests.\n"
f" This usually happens when copy-pasting from a PDF, rich-text editor,\n"
@ -3385,6 +3594,10 @@ def show_config():
print(f" Personality: {display.get('personality', 'kawaii')}")
print(f" Reasoning: {'on' if display.get('show_reasoning', False) else 'off'}")
print(f" Bell: {'on' if display.get('bell_on_complete', False) else 'off'}")
ump = display.get('user_message_preview', {}) if isinstance(display.get('user_message_preview', {}), dict) else {}
ump_first = ump.get('first_lines', 2)
ump_last = ump.get('last_lines', 2)
print(f" User preview: first {ump_first} line(s), last {ump_last} line(s)")
# Terminal
print()

View file

@ -30,6 +30,7 @@ load_dotenv(PROJECT_ROOT / ".env", override=False, encoding="utf-8")
from hermes_cli.colors import Colors, color
from hermes_constants import OPENROUTER_MODELS_URL
from utils import base_url_host_matches
_PROVIDER_ENV_HINTS = (
@ -277,6 +278,86 @@ def run_doctor(args):
config_path = HERMES_HOME / 'config.yaml'
if config_path.exists():
check_ok(f"{_DHH}/config.yaml exists")
# Validate model.provider and model.default values
try:
import yaml as _yaml
cfg = _yaml.safe_load(config_path.read_text(encoding="utf-8")) or {}
model_section = cfg.get("model") or {}
provider_raw = (model_section.get("provider") or "").strip()
provider = provider_raw.lower()
default_model = (model_section.get("default") or model_section.get("model") or "").strip()
known_providers: set = set()
try:
from hermes_cli.auth import PROVIDER_REGISTRY
known_providers = set(PROVIDER_REGISTRY.keys()) | {"openrouter", "custom", "auto"}
except Exception:
pass
try:
from hermes_cli.auth import resolve_provider as _resolve_provider
except Exception:
_resolve_provider = None
canonical_provider = provider
if provider and _resolve_provider is not None and provider != "auto":
try:
canonical_provider = _resolve_provider(provider)
except Exception:
canonical_provider = None
if provider and provider != "auto":
if canonical_provider is None or (known_providers and canonical_provider not in known_providers):
known_list = ", ".join(sorted(known_providers)) if known_providers else "(unavailable)"
check_fail(
f"model.provider '{provider_raw}' is not a recognised provider",
f"(known: {known_list})",
)
issues.append(
f"model.provider '{provider_raw}' is unknown. "
f"Valid providers: {known_list}. "
f"Fix: run 'hermes config set model.provider <valid_provider>'"
)
# Warn if model is set to a provider-prefixed name on a provider that doesn't use them
if default_model and "/" in default_model and canonical_provider and canonical_provider not in ("openrouter", "custom", "auto", "ai-gateway", "kilocode", "opencode-zen", "huggingface", "nous"):
check_warn(
f"model.default '{default_model}' uses a vendor/model slug but provider is '{provider_raw}'",
"(vendor-prefixed slugs belong to aggregators like openrouter)",
)
issues.append(
f"model.default '{default_model}' is vendor-prefixed but model.provider is '{provider_raw}'. "
"Either set model.provider to 'openrouter', or drop the vendor prefix."
)
# Check credentials for the configured provider.
# Limit to API-key providers in PROVIDER_REGISTRY — other provider
# types (OAuth, SDK, openrouter/anthropic/custom/auto) have their
# own env-var checks elsewhere in doctor, and get_auth_status()
# returns a bare {logged_in: False} for anything it doesn't
# explicitly dispatch, which would produce false positives.
if canonical_provider and canonical_provider not in ("auto", "custom", "openrouter"):
try:
from hermes_cli.auth import PROVIDER_REGISTRY, get_auth_status
pconfig = PROVIDER_REGISTRY.get(canonical_provider)
if pconfig and getattr(pconfig, "auth_type", "") == "api_key":
status = get_auth_status(canonical_provider) or {}
configured = bool(status.get("configured") or status.get("logged_in") or status.get("api_key"))
if not configured:
check_fail(
f"model.provider '{canonical_provider}' is set but no API key is configured",
"(check ~/.hermes/.env or run 'hermes setup')",
)
issues.append(
f"No credentials found for provider '{canonical_provider}'. "
f"Run 'hermes setup' or set the provider's API key in {_DHH}/.env, "
f"or switch providers with 'hermes config set model.provider <name>'"
)
except Exception:
pass
except Exception as e:
check_warn("Could not validate model/provider config", f"({e})")
else:
fallback_config = PROJECT_ROOT / 'cli-config.yaml'
if fallback_config.exists():
@ -778,6 +859,16 @@ def run_doctor(args):
elif response.status_code == 401:
print(f"\r {color('', Colors.RED)} OpenRouter API {color('(invalid API key)', Colors.DIM)} ")
issues.append("Check OPENROUTER_API_KEY in .env")
elif response.status_code == 402:
print(f"\r {color('', Colors.RED)} OpenRouter API {color('(out of credits — payment required)', Colors.DIM)}")
issues.append(
"OpenRouter account has insufficient credits. "
"Fix: run 'hermes config set model.provider <provider>' to switch providers, "
"or fund your OpenRouter account at https://openrouter.ai/settings/credits"
)
elif response.status_code == 429:
print(f"\r {color('', Colors.RED)} OpenRouter API {color('(rate limited)', Colors.DIM)} ")
issues.append("OpenRouter rate limit hit — consider switching to a different provider or waiting")
else:
print(f"\r {color('', Colors.RED)} OpenRouter API {color(f'(HTTP {response.status_code})', Colors.DIM)} ")
except Exception as e:
@ -821,6 +912,7 @@ def run_doctor(args):
_apikey_providers = [
("Z.AI / GLM", ("GLM_API_KEY", "ZAI_API_KEY", "Z_AI_API_KEY"), "https://api.z.ai/api/paas/v4/models", "GLM_BASE_URL", True),
("Kimi / Moonshot", ("KIMI_API_KEY",), "https://api.moonshot.ai/v1/models", "KIMI_BASE_URL", True),
("StepFun Step Plan", ("STEPFUN_API_KEY",), "https://api.stepfun.ai/step_plan/v1/models", "STEPFUN_BASE_URL", True),
("Kimi / Moonshot (China)", ("KIMI_CN_API_KEY",), "https://api.moonshot.cn/v1/models", None, True),
("Arcee AI", ("ARCEEAI_API_KEY",), "https://api.arcee.ai/api/v1/models", "ARCEE_BASE_URL", True),
("DeepSeek", ("DEEPSEEK_API_KEY",), "https://api.deepseek.com/v1/models", "DEEPSEEK_BASE_URL", True),
@ -852,18 +944,22 @@ def run_doctor(args):
try:
import httpx
_base = os.getenv(_base_env, "") if _base_env else ""
# Auto-detect Kimi Code keys (sk-kimi-) → api.kimi.com
# Auto-detect Kimi Code keys (sk-kimi-) → api.kimi.com/coding/v1
# (OpenAI-compat surface, which exposes /models for health check).
if not _base and _key.startswith("sk-kimi-"):
_base = "https://api.kimi.com/coding/v1"
# Anthropic-compat endpoints (/anthropic) don't support /models.
# Rewrite to the OpenAI-compat /v1 surface for health checks.
# Anthropic-compat endpoints (/anthropic, api.kimi.com/coding
# with no /v1) don't support /models. Rewrite to the OpenAI-compat
# /v1 surface for health checks.
if _base and _base.rstrip("/").endswith("/anthropic"):
from agent.auxiliary_client import _to_openai_base_url
_base = _to_openai_base_url(_base)
if base_url_host_matches(_base, "api.kimi.com") and _base.rstrip("/").endswith("/coding"):
_base = _base.rstrip("/") + "/v1"
_url = (_base.rstrip("/") + "/models") if _base else _default_url
_headers = {"Authorization": f"Bearer {_key}"}
if "api.kimi.com" in _url.lower():
_headers["User-Agent"] = "KimiCLI/1.30.0"
if base_url_host_matches(_base, "api.kimi.com"):
_headers["User-Agent"] = "claude-code/0.1.0"
_resp = httpx.get(
_url,
headers=_headers,

View file

@ -160,7 +160,6 @@ def _config_overrides(config: dict) -> dict[str, str]:
("display", "streaming"),
("display", "skin"),
("display", "show_reasoning"),
("smart_model_routing", "enabled"),
("privacy", "redact_pii"),
("tts", "provider"),
]

View file

@ -3,6 +3,7 @@
from __future__ import annotations
import os
import sys
from pathlib import Path
from dotenv import load_dotenv
@ -14,6 +15,26 @@ from dotenv import load_dotenv
# pure ASCII (they become HTTP header values).
_CREDENTIAL_SUFFIXES = ("_API_KEY", "_TOKEN", "_SECRET", "_KEY")
# Names we've already warned about during this process, so repeated
# load_hermes_dotenv() calls (user env + project env, gateway hot-reload,
# tests) don't spam the same warning multiple times.
_WARNED_KEYS: set[str] = set()
def _format_offending_chars(value: str, limit: int = 3) -> str:
"""Return a compact 'U+XXXX ('c'), ...' summary of non-ASCII codepoints."""
seen: list[str] = []
for ch in value:
if ord(ch) > 127:
label = f"U+{ord(ch):04X}"
if ch.isprintable():
label += f" ({ch!r})"
if label not in seen:
seen.append(label)
if len(seen) >= limit:
break
return ", ".join(seen)
def _sanitize_loaded_credentials() -> None:
"""Strip non-ASCII characters from credential env vars in os.environ.
@ -21,14 +42,42 @@ def _sanitize_loaded_credentials() -> None:
Called after dotenv loads so the rest of the codebase never sees
non-ASCII API keys. Only touches env vars whose names end with
known credential suffixes (``_API_KEY``, ``_TOKEN``, etc.).
Emits a one-line warning to stderr when characters are stripped.
Silent stripping would mask copy-paste corruption (Unicode lookalike
glyphs from PDFs / rich-text editors, ZWSP from web pages) as opaque
provider-side "invalid API key" errors (see #6843).
"""
for key, value in list(os.environ.items()):
if not any(key.endswith(suffix) for suffix in _CREDENTIAL_SUFFIXES):
continue
try:
value.encode("ascii")
continue
except UnicodeEncodeError:
os.environ[key] = value.encode("ascii", errors="ignore").decode("ascii")
pass
cleaned = value.encode("ascii", errors="ignore").decode("ascii")
os.environ[key] = cleaned
if key in _WARNED_KEYS:
continue
_WARNED_KEYS.add(key)
stripped = len(value) - len(cleaned)
detail = _format_offending_chars(value) or "non-printable"
print(
f" Warning: {key} contained {stripped} non-ASCII character"
f"{'s' if stripped != 1 else ''} ({detail}) — stripped so the "
f"key can be sent as an HTTP header.",
file=sys.stderr,
)
print(
" This usually means the key was copy-pasted from a PDF, "
"rich-text editor, or web page that substituted lookalike\n"
" Unicode glyphs for ASCII letters. If authentication fails "
"(e.g. \"API key not valid\"), re-copy the key from the\n"
" provider's dashboard and run `hermes setup` (or edit the "
".env file in a plain-text editor).",
file=sys.stderr,
)
def _load_dotenv_with_fallback(path: Path, *, override: bool) -> None:
@ -111,6 +160,8 @@ def load_hermes_dotenv(
# Fix corrupted .env files before python-dotenv parses them (#8908).
if user_env.exists():
_sanitize_env_file_if_needed(user_env)
if project_env_path and project_env_path.exists():
_sanitize_env_file_if_needed(project_env_path)
if user_env.exists():
_load_dotenv_with_fallback(user_env, override=True)

View file

@ -994,8 +994,6 @@ def get_systemd_linger_status() -> tuple[bool | None, str]:
if not is_linux():
return None, "not supported on this platform"
import shutil
if not shutil.which("loginctl"):
return None, "loginctl not found"
@ -1347,7 +1345,6 @@ def _ensure_linger_enabled() -> None:
return
import getpass
import shutil
username = getpass.getuser()
linger_file = Path(f"/var/lib/systemd/linger/{username}")
@ -1656,7 +1653,6 @@ def get_launchd_label() -> str:
def _launchd_domain() -> str:
import os
return f"gui/{os.getuid()}"
@ -2643,9 +2639,120 @@ def _setup_dingtalk():
def _setup_wecom():
"""Configure WeCom (Enterprise WeChat) via the standard platform setup."""
wecom_platform = next(p for p in _PLATFORMS if p["key"] == "wecom")
_setup_standard_platform(wecom_platform)
"""Interactive setup for WeCom — scan QR code or manual credential input."""
print()
print(color(" ─── 💬 WeCom (Enterprise WeChat) Setup ───", Colors.CYAN))
existing_bot_id = get_env_value("WECOM_BOT_ID")
existing_secret = get_env_value("WECOM_SECRET")
if existing_bot_id and existing_secret:
print()
print_success("WeCom is already configured.")
if not prompt_yes_no(" Reconfigure WeCom?", False):
return
# ── Choose setup method ──
print()
method_choices = [
"Scan QR code to obtain Bot ID and Secret automatically (recommended)",
"Enter existing Bot ID and Secret manually",
]
method_idx = prompt_choice(" How would you like to set up WeCom?", method_choices, 0)
bot_id = None
secret = None
if method_idx == 0:
# ── QR scan flow ──
try:
from gateway.platforms.wecom import qr_scan_for_bot_info
except Exception as exc:
print_error(f" WeCom QR scan import failed: {exc}")
qr_scan_for_bot_info = None
if qr_scan_for_bot_info is not None:
try:
credentials = qr_scan_for_bot_info()
except KeyboardInterrupt:
print()
print_warning(" WeCom setup cancelled.")
return
except Exception as exc:
print_warning(f" QR scan failed: {exc}")
credentials = None
if credentials:
bot_id = credentials.get("bot_id", "")
secret = credentials.get("secret", "")
print_success(" ✔ QR scan successful! Bot ID and Secret obtained.")
if not bot_id or not secret:
print_info(" QR scan did not complete. Continuing with manual input.")
bot_id = None
secret = None
# ── Manual credential input ──
if not bot_id or not secret:
print()
print_info(" 1. Go to WeCom Application → Workspace → Smart Robot -> Create smart robots")
print_info(" 2. Select API Mode")
print_info(" 3. Copy the Bot ID and Secret from the bot's credentials info")
print_info(" 4. The bot connects via WebSocket — no public endpoint needed")
print()
bot_id = prompt(" Bot ID", password=False)
if not bot_id:
print_warning(" Skipped — WeCom won't work without a Bot ID.")
return
secret = prompt(" Secret", password=True)
if not secret:
print_warning(" Skipped — WeCom won't work without a Secret.")
return
# ── Save core credentials ──
save_env_value("WECOM_BOT_ID", bot_id)
save_env_value("WECOM_SECRET", secret)
# ── Allowed users (deny-by-default security) ──
print()
print_info(" The gateway DENIES all users by default for security.")
print_info(" Enter user IDs to create an allowlist, or leave empty.")
allowed = prompt(" Allowed user IDs (comma-separated, or empty)", password=False)
if allowed:
cleaned = allowed.replace(" ", "")
save_env_value("WECOM_ALLOWED_USERS", cleaned)
print_success(" Saved — only these users can interact with the bot.")
else:
print()
access_choices = [
"Enable open access (anyone can message the bot)",
"Use DM pairing (unknown users request access, you approve with 'hermes pairing approve')",
"Disable direct messages",
"Skip for now (bot will deny all users until configured)",
]
access_idx = prompt_choice(" How should unauthorized users be handled?", access_choices, 1)
if access_idx == 0:
save_env_value("WECOM_DM_POLICY", "open")
save_env_value("GATEWAY_ALLOW_ALL_USERS", "true")
print_warning(" Open access enabled — anyone can use your bot!")
elif access_idx == 1:
save_env_value("WECOM_DM_POLICY", "pairing")
print_success(" DM pairing mode — users will receive a code to request access.")
print_info(" Approve with: hermes pairing approve <platform> <code>")
elif access_idx == 2:
save_env_value("WECOM_DM_POLICY", "disabled")
print_warning(" Direct messages disabled.")
else:
print_info(" Skipped — configure later with 'hermes gateway setup'")
# ── Home channel (optional) ──
print()
print_info(" Chat ID for scheduled results and notifications.")
home = prompt(" Home chat ID (optional, for cron/notifications)", password=False)
if home:
save_env_value("WECOM_HOME_CHANNEL", home)
print_success(f" Home channel set to {home}")
print()
print_success("💬 WeCom configured!")
def _is_service_installed() -> bool:
@ -3025,7 +3132,8 @@ def _setup_qqbot():
if method_idx == 0:
# ── QR scan-to-configure ──
try:
credentials = _qqbot_qr_flow()
from gateway.platforms.qqbot import qr_register
credentials = qr_register()
except KeyboardInterrupt:
print()
print_warning(" QQ Bot setup cancelled.")
@ -3107,106 +3215,6 @@ def _setup_qqbot():
print_info(f" App ID: {credentials['app_id']}")
def _qqbot_render_qr(url: str) -> bool:
"""Try to render a QR code in the terminal. Returns True if successful."""
try:
import qrcode as _qr
qr = _qr.QRCode(border=1,error_correction=_qr.constants.ERROR_CORRECT_L)
qr.add_data(url)
qr.make(fit=True)
qr.print_ascii(invert=True)
return True
except Exception:
return False
def _qqbot_qr_flow():
"""Run the QR-code scan-to-configure flow.
Returns a dict with app_id, client_secret, user_openid on success,
or None on failure/cancel.
"""
try:
from gateway.platforms.qqbot import (
create_bind_task, poll_bind_result, build_connect_url,
decrypt_secret, BindStatus,
)
from gateway.platforms.qqbot.constants import ONBOARD_POLL_INTERVAL
except Exception as exc:
print_error(f" QQBot onboard import failed: {exc}")
return None
import asyncio
import time
MAX_REFRESHES = 3
refresh_count = 0
while refresh_count <= MAX_REFRESHES:
loop = asyncio.new_event_loop()
# ── Create bind task ──
try:
task_id, aes_key = loop.run_until_complete(create_bind_task())
except Exception as e:
print_warning(f" Failed to create bind task: {e}")
loop.close()
return None
url = build_connect_url(task_id)
# ── Display QR code + URL ──
print()
if _qqbot_render_qr(url):
print(f" Scan the QR code above, or open this URL directly:\n {url}")
else:
print(f" Open this URL in QQ on your phone:\n {url}")
print_info(" Tip: pip install qrcode to show a scannable QR code here")
# ── Poll loop (silent — keep QR visible at bottom) ──
try:
while True:
try:
status, app_id, encrypted_secret, user_openid = loop.run_until_complete(
poll_bind_result(task_id)
)
except Exception:
time.sleep(ONBOARD_POLL_INTERVAL)
continue
if status == BindStatus.COMPLETED:
client_secret = decrypt_secret(encrypted_secret, aes_key)
print()
print_success(f" QR scan complete! (App ID: {app_id})")
if user_openid:
print_info(f" Scanner's OpenID: {user_openid}")
return {
"app_id": app_id,
"client_secret": client_secret,
"user_openid": user_openid,
}
if status == BindStatus.EXPIRED:
refresh_count += 1
if refresh_count > MAX_REFRESHES:
print()
print_warning(f" QR code expired {MAX_REFRESHES} times — giving up.")
return None
print()
print_warning(f" QR code expired, refreshing... ({refresh_count}/{MAX_REFRESHES})")
loop.close()
break # outer while creates a new task
time.sleep(ONBOARD_POLL_INTERVAL)
except KeyboardInterrupt:
loop.close()
raise
finally:
loop.close()
return None
def _setup_signal():
"""Interactive setup for Signal messenger."""
import shutil
@ -3394,6 +3402,8 @@ def gateway_setup():
_setup_feishu()
elif platform["key"] == "qqbot":
_setup_qqbot()
elif platform["key"] == "wecom":
_setup_wecom()
else:
_setup_standard_platform(platform)

385
hermes_cli/hooks.py Normal file
View file

@ -0,0 +1,385 @@
"""hermes hooks — inspect and manage shell-script hooks.
Usage::
hermes hooks list
hermes hooks test <event> [--for-tool X] [--payload-file F]
hermes hooks revoke <command>
hermes hooks doctor
Consent records live under ``~/.hermes/shell-hooks-allowlist.json`` and
hook definitions come from the ``hooks:`` block in ``~/.hermes/config.yaml``
(the same config read by the CLI / gateway at startup).
This module is a thin CLI shell over :mod:`agent.shell_hooks`; every
shared concern (payload serialisation, response parsing, allowlist
format) lives there.
"""
from __future__ import annotations
import json
import os
from pathlib import Path
from typing import Any, Dict, List, Optional
def hooks_command(args) -> None:
"""Entry point for ``hermes hooks`` — dispatches to the requested action."""
sub = getattr(args, "hooks_action", None)
if not sub:
print("Usage: hermes hooks {list|test|revoke|doctor}")
print("Run 'hermes hooks --help' for details.")
return
if sub in ("list", "ls"):
_cmd_list(args)
elif sub == "test":
_cmd_test(args)
elif sub in ("revoke", "remove", "rm"):
_cmd_revoke(args)
elif sub == "doctor":
_cmd_doctor(args)
else:
print(f"Unknown hooks subcommand: {sub}")
# ---------------------------------------------------------------------------
# list
# ---------------------------------------------------------------------------
def _cmd_list(_args) -> None:
from hermes_cli.config import load_config
from agent import shell_hooks
specs = shell_hooks.iter_configured_hooks(load_config())
if not specs:
print("No shell hooks configured in ~/.hermes/config.yaml.")
print("See `hermes hooks --help` or")
print(" website/docs/user-guide/features/hooks.md")
print("for the config schema and worked examples.")
return
by_event: Dict[str, List] = {}
for spec in specs:
by_event.setdefault(spec.event, []).append(spec)
allowlist = shell_hooks.load_allowlist()
approved = {
(e.get("event"), e.get("command"))
for e in allowlist.get("approvals", [])
if isinstance(e, dict)
}
print(f"Configured shell hooks ({len(specs)} total):\n")
for event in sorted(by_event.keys()):
print(f" [{event}]")
for spec in by_event[event]:
is_approved = (spec.event, spec.command) in approved
status = "✓ allowed" if is_approved else "✗ not allowlisted"
matcher_part = f" matcher={spec.matcher!r}" if spec.matcher else ""
print(
f" - {spec.command}{matcher_part} "
f"(timeout={spec.timeout}s, {status})"
)
if is_approved:
entry = shell_hooks.allowlist_entry_for(spec.event, spec.command)
if entry and entry.get("approved_at"):
print(f" approved_at: {entry['approved_at']}")
mtime_now = shell_hooks.script_mtime_iso(spec.command)
mtime_at = entry.get("script_mtime_at_approval")
if mtime_now and mtime_at and mtime_now > mtime_at:
print(
f" ⚠ script modified since approval "
f"(was {mtime_at}, now {mtime_now}) — "
f"run `hermes hooks doctor` to re-validate"
)
print()
# ---------------------------------------------------------------------------
# test
# ---------------------------------------------------------------------------
# Synthetic kwargs matching the real invoke_hook() call sites — these are
# passed verbatim to agent.shell_hooks.run_once(), which routes them through
# the same _serialize_payload() that production firings use. That way the
# stdin a script sees under `hermes hooks test` and `hermes hooks doctor`
# is identical in shape to what it will see at runtime.
_DEFAULT_PAYLOADS = {
"pre_tool_call": {
"tool_name": "terminal",
"args": {"command": "echo hello"},
"session_id": "test-session",
"task_id": "test-task",
"tool_call_id": "test-call",
},
"post_tool_call": {
"tool_name": "terminal",
"args": {"command": "echo hello"},
"session_id": "test-session",
"task_id": "test-task",
"tool_call_id": "test-call",
"result": '{"output": "hello"}',
},
"pre_llm_call": {
"session_id": "test-session",
"user_message": "What is the weather?",
"conversation_history": [],
"is_first_turn": True,
"model": "gpt-4",
"platform": "cli",
},
"post_llm_call": {
"session_id": "test-session",
"model": "gpt-4",
"platform": "cli",
},
"on_session_start": {"session_id": "test-session"},
"on_session_end": {"session_id": "test-session"},
"on_session_finalize": {"session_id": "test-session"},
"on_session_reset": {"session_id": "test-session"},
"pre_api_request": {
"session_id": "test-session",
"task_id": "test-task",
"platform": "cli",
"model": "claude-sonnet-4-6",
"provider": "anthropic",
"base_url": "https://api.anthropic.com",
"api_mode": "anthropic_messages",
"api_call_count": 1,
"message_count": 4,
"tool_count": 12,
"approx_input_tokens": 2048,
"request_char_count": 8192,
"max_tokens": 4096,
},
"post_api_request": {
"session_id": "test-session",
"task_id": "test-task",
"platform": "cli",
"model": "claude-sonnet-4-6",
"provider": "anthropic",
"base_url": "https://api.anthropic.com",
"api_mode": "anthropic_messages",
"api_call_count": 1,
"api_duration": 1.234,
"finish_reason": "stop",
"message_count": 4,
"response_model": "claude-sonnet-4-6",
"usage": {"input_tokens": 2048, "output_tokens": 512},
"assistant_content_chars": 1200,
"assistant_tool_call_count": 0,
},
"subagent_stop": {
"parent_session_id": "parent-sess",
"child_role": None,
"child_summary": "Synthetic summary for hooks test",
"child_status": "completed",
"duration_ms": 1234,
},
}
def _cmd_test(args) -> None:
from hermes_cli.config import load_config
from hermes_cli.plugins import VALID_HOOKS
from agent import shell_hooks
event = args.event
if event not in VALID_HOOKS:
print(f"Unknown event: {event!r}")
print(f"Valid events: {', '.join(sorted(VALID_HOOKS))}")
return
# Synthetic kwargs in the same shape invoke_hook() would pass. Merged
# with --for-tool (overrides tool_name) and --payload-file (extra kwargs).
payload = dict(_DEFAULT_PAYLOADS.get(event, {"session_id": "test-session"}))
if getattr(args, "for_tool", None):
payload["tool_name"] = args.for_tool
if getattr(args, "payload_file", None):
try:
custom = json.loads(Path(args.payload_file).read_text())
if isinstance(custom, dict):
payload.update(custom)
else:
print(f"Warning: {args.payload_file} is not a JSON object; ignoring")
except Exception as exc:
print(f"Error reading payload file: {exc}")
return
specs = shell_hooks.iter_configured_hooks(load_config())
specs = [s for s in specs if s.event == event]
if getattr(args, "for_tool", None):
specs = [
s for s in specs
if s.event not in ("pre_tool_call", "post_tool_call")
or s.matches_tool(args.for_tool)
]
if not specs:
print(f"No shell hooks configured for event: {event}")
if getattr(args, "for_tool", None):
print(f"(with matcher filter --for-tool={args.for_tool})")
return
print(f"Firing {len(specs)} hook(s) for event '{event}':\n")
for spec in specs:
print(f"{spec.command}")
result = shell_hooks.run_once(spec, payload)
_print_run_result(result)
print()
def _print_run_result(result: Dict[str, Any]) -> None:
if result.get("error"):
print(f" ✗ error: {result['error']}")
return
if result.get("timed_out"):
print(f" ✗ timed out after {result['elapsed_seconds']}s")
return
rc = result.get("returncode")
elapsed = result.get("elapsed_seconds", 0)
print(f" exit={rc} elapsed={elapsed}s")
stdout = (result.get("stdout") or "").strip()
stderr = (result.get("stderr") or "").strip()
if stdout:
print(f" stdout: {_truncate(stdout, 400)}")
if stderr:
print(f" stderr: {_truncate(stderr, 400)}")
parsed = result.get("parsed")
if parsed:
print(f" parsed (Hermes wire shape): {json.dumps(parsed)}")
else:
print(" parsed: <none — hook contributed nothing to the dispatcher>")
def _truncate(s: str, n: int) -> str:
return s if len(s) <= n else s[: n - 3] + "..."
# ---------------------------------------------------------------------------
# revoke
# ---------------------------------------------------------------------------
def _cmd_revoke(args) -> None:
from agent import shell_hooks
removed = shell_hooks.revoke(args.command)
if removed == 0:
print(f"No allowlist entry found for command: {args.command}")
return
print(f"Removed {removed} allowlist entry/entries for: {args.command}")
print(
"Note: currently running CLI / gateway processes keep their "
"already-registered callbacks until they restart."
)
# ---------------------------------------------------------------------------
# doctor
# ---------------------------------------------------------------------------
def _cmd_doctor(_args) -> None:
from hermes_cli.config import load_config
from agent import shell_hooks
specs = shell_hooks.iter_configured_hooks(load_config())
if not specs:
print("No shell hooks configured — nothing to check.")
return
print(f"Checking {len(specs)} configured shell hook(s)...\n")
problems = 0
for spec in specs:
print(f" [{spec.event}] {spec.command}")
problems += _doctor_one(spec, shell_hooks)
print()
if problems:
print(f"{problems} issue(s) found. Fix before relying on these hooks.")
else:
print("All shell hooks look healthy.")
def _doctor_one(spec, shell_hooks) -> int:
problems = 0
# 1. Script exists and is executable
if shell_hooks.script_is_executable(spec.command):
print(" ✓ script exists and is executable")
else:
problems += 1
print(" ✗ script missing or not executable "
"(chmod +x the file, or fix the path)")
# 2. Allowlist status
entry = shell_hooks.allowlist_entry_for(spec.event, spec.command)
if entry:
print(f" ✓ allowlisted (approved {entry.get('approved_at', '?')})")
else:
problems += 1
print(" ✗ not allowlisted — hook will NOT fire at runtime "
"(run with --accept-hooks once, or confirm at the TTY prompt)")
# 3. Mtime drift
if entry and entry.get("script_mtime_at_approval"):
mtime_now = shell_hooks.script_mtime_iso(spec.command)
mtime_at = entry["script_mtime_at_approval"]
if mtime_now and mtime_at and mtime_now > mtime_at:
problems += 1
print(f" ⚠ script modified since approval "
f"(was {mtime_at}, now {mtime_now}) — review changes, "
f"then `hermes hooks revoke` + re-approve to refresh")
elif mtime_now and mtime_at and mtime_now == mtime_at:
print(" ✓ script unchanged since approval")
# 4. Produces valid JSON for a synthetic payload — only when the entry
# is already allowlisted. Otherwise `hermes hooks doctor` would execute
# every script listed in a freshly-pulled config before the user has
# reviewed them, which directly contradicts the documented workflow
# ("spot newly-added hooks *before they register*").
if not entry:
print(" skipped JSON smoke test — not allowlisted yet. "
"Approve the hook first (via TTY prompt or --accept-hooks), "
"then re-run `hermes hooks doctor`.")
elif shell_hooks.script_is_executable(spec.command):
payload = _DEFAULT_PAYLOADS.get(spec.event, {"extra": {}})
result = shell_hooks.run_once(spec, payload)
if result.get("timed_out"):
problems += 1
print(f" ✗ timed out after {result['elapsed_seconds']}s "
f"on synthetic payload (timeout={spec.timeout}s)")
elif result.get("error"):
problems += 1
print(f" ✗ execution error: {result['error']}")
else:
rc = result.get("returncode")
elapsed = result.get("elapsed_seconds", 0)
stdout = (result.get("stdout") or "").strip()
if stdout:
try:
json.loads(stdout)
print(f" ✓ produced valid JSON on synthetic payload "
f"(exit={rc}, {elapsed}s)")
except json.JSONDecodeError:
problems += 1
print(f" ✗ stdout was not valid JSON (exit={rc}, "
f"{elapsed}s): {_truncate(stdout, 120)}")
else:
print(f" ✓ ran clean with empty stdout "
f"(exit={rc}, {elapsed}s) — hook is observer-only")
return problems

View file

@ -51,6 +51,19 @@ import sys
from pathlib import Path
from typing import Optional
def _add_accept_hooks_flag(parser) -> None:
"""Attach the ``--accept-hooks`` flag. Shared across every agent
subparser so the flag works regardless of CLI position."""
parser.add_argument(
"--accept-hooks",
action="store_true",
default=argparse.SUPPRESS,
help=(
"Auto-approve unseen shell hooks without a TTY prompt "
"(equivalent to HERMES_ACCEPT_HOOKS=1 / hooks_auto_accept: true)."
),
)
def _require_tty(command_name: str) -> None:
"""Exit with a clear error if stdin is not a terminal.
@ -180,7 +193,7 @@ import time as _time
from datetime import datetime
from hermes_cli import __version__, __release_date__
from hermes_constants import OPENROUTER_BASE_URL
from hermes_constants import AI_GATEWAY_BASE_URL, OPENROUTER_BASE_URL
logger = logging.getLogger(__name__)
@ -605,7 +618,6 @@ def _exec_in_container(container_info: dict, cli_args: list):
container_info: dict with backend, container_name, exec_user, hermes_bin
cli_args: the original CLI arguments (everything after 'hermes')
"""
import shutil
backend = container_info["backend"]
container_name = container_info["container_name"]
@ -693,6 +705,10 @@ def _resolve_session_by_name_or_id(name_or_id: str) -> Optional[str]:
- If it looks like a session ID (contains underscore + hex), try direct lookup first.
- Otherwise, treat it as a title and use resolve_session_by_title (auto-latest).
- Falls back to the other method if the first doesn't match.
- If the resolved session is a compression root, follow the chain forward
to the latest continuation. Users who remember the old root ID (e.g.
from an exit summary printed before the bug fix, or from notes) get
resumed at the live tip instead of a stale parent with no messages.
"""
try:
from hermes_state import SessionDB
@ -701,14 +717,23 @@ def _resolve_session_by_name_or_id(name_or_id: str) -> Optional[str]:
# Try as exact session ID first
session = db.get_session(name_or_id)
resolved_id: Optional[str] = None
if session:
db.close()
return session["id"]
resolved_id = session["id"]
else:
# Try as title (with auto-latest for lineage)
resolved_id = db.resolve_session_by_title(name_or_id)
if resolved_id:
# Project forward through compression chain so resumes land on
# the live tip instead of a dead compressed parent.
try:
resolved_id = db.get_compression_tip(resolved_id) or resolved_id
except Exception:
pass
# Try as title (with auto-latest for lineage)
session_id = db.resolve_session_by_title(name_or_id)
db.close()
return session_id
return resolved_id
except Exception:
pass
return None
@ -990,6 +1015,17 @@ def _launch_tui(resume_session_id: Optional[str] = None, tui_dev: bool = False):
)
env.setdefault("HERMES_PYTHON", sys.executable)
env.setdefault("HERMES_CWD", os.getcwd())
# Guarantee an 8GB V8 heap + exposed GC for the TUI. Default node cap is
# ~1.54GB depending on version and can fatal-OOM on long sessions with
# large transcripts / reasoning blobs. Token-level merge: respect any
# user-supplied --max-old-space-size (they may have set it higher) and
# avoid duplicating --expose-gc.
_tokens = env.get("NODE_OPTIONS", "").split()
if not any(t.startswith("--max-old-space-size=") for t in _tokens):
_tokens.append("--max-old-space-size=8192")
if "--expose-gc" not in _tokens:
_tokens.append("--expose-gc")
env["NODE_OPTIONS"] = " ".join(_tokens)
if resume_session_id:
env["HERMES_TUI_RESUME"] = resume_session_id
@ -1144,8 +1180,6 @@ def cmd_gateway(args):
def cmd_whatsapp(args):
"""Set up WhatsApp: choose mode, configure, install bridge, pair via QR."""
_require_tty("whatsapp")
import subprocess
from pathlib import Path
from hermes_cli.config import get_env_value, save_env_value
print()
@ -1254,16 +1288,27 @@ def cmd_whatsapp(args):
return
if not (bridge_dir / "node_modules").exists():
print("\n→ Installing WhatsApp bridge dependencies...")
result = subprocess.run(
["npm", "install"],
cwd=str(bridge_dir),
capture_output=True,
text=True,
timeout=120,
)
print("\n→ Installing WhatsApp bridge dependencies (this can take a few minutes)...")
npm = shutil.which("npm")
if not npm:
print(" ✗ npm not found on PATH — install Node.js first")
return
try:
result = subprocess.run(
[npm, "install", "--no-fund", "--no-audit", "--progress=false"],
cwd=str(bridge_dir),
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
text=True,
)
except KeyboardInterrupt:
print("\n ✗ Install cancelled")
return
if result.returncode != 0:
print(f" ✗ npm install failed: {result.stderr}")
err = (result.stderr or "").strip()
preview = "\n".join(err.splitlines()[-30:]) if err else "(no output)"
print(" ✗ npm install failed:")
print(preview)
return
print(" ✓ Dependencies installed")
else:
@ -1282,8 +1327,6 @@ def cmd_whatsapp(args):
except (EOFError, KeyboardInterrupt):
response = "n"
if response.lower() in ("y", "yes"):
import shutil
shutil.rmtree(session_dir, ignore_errors=True)
session_dir.mkdir(parents=True, exist_ok=True)
print(" ✓ Session cleared")
@ -1379,8 +1422,6 @@ def select_provider_and_model(args=None):
# Read effective provider the same way the CLI does at startup:
# config.yaml model.provider > env var > auto-detect
import os
config_provider = None
model_cfg = config.get("model")
if isinstance(model_cfg, dict):
@ -1491,6 +1532,8 @@ def select_provider_and_model(args=None):
# Step 2: Provider-specific setup + model selection
if selected_provider == "openrouter":
_model_flow_openrouter(config, current_model)
elif selected_provider == "ai-gateway":
_model_flow_ai_gateway(config, current_model)
elif selected_provider == "nous":
_model_flow_nous(config, current_model, args=args)
elif selected_provider == "openai-codex":
@ -1523,6 +1566,8 @@ def select_provider_and_model(args=None):
_model_flow_anthropic(config, current_model)
elif selected_provider == "kimi-coding":
_model_flow_kimi(config, current_model)
elif selected_provider == "stepfun":
_model_flow_stepfun(config, current_model)
elif selected_provider == "bedrock":
_model_flow_bedrock(config, current_model)
elif selected_provider in (
@ -1536,7 +1581,6 @@ def select_provider_and_model(args=None):
"kilocode",
"opencode-zen",
"opencode-go",
"ai-gateway",
"alibaba",
"huggingface",
"xiaomi",
@ -2008,6 +2052,63 @@ def _model_flow_openrouter(config, current_model=""):
print("No change.")
def _model_flow_ai_gateway(config, current_model=""):
"""Vercel AI Gateway provider: ensure API key, then pick model with pricing."""
from hermes_cli.auth import (
_prompt_model_selection,
_save_model_choice,
deactivate_provider,
)
from hermes_cli.config import get_env_value, save_env_value
api_key = get_env_value("AI_GATEWAY_API_KEY")
if not api_key:
print("No Vercel AI Gateway API key configured.")
print("Create API key here: https://vercel.com/d?to=%2F%5Bteam%5D%2F%7E%2Fai-gateway&title=AI+Gateway")
print("Add a payment method to get $5 in free credits.")
print()
try:
import getpass
key = getpass.getpass("AI Gateway API key (or Enter to cancel): ").strip()
except (KeyboardInterrupt, EOFError):
print()
return
if not key:
print("Cancelled.")
return
save_env_value("AI_GATEWAY_API_KEY", key)
print("API key saved.")
print()
from hermes_cli.models import ai_gateway_model_ids, get_pricing_for_provider
models_list = ai_gateway_model_ids(force_refresh=True)
pricing = get_pricing_for_provider("ai-gateway", force_refresh=True)
selected = _prompt_model_selection(
models_list, current_model=current_model, pricing=pricing
)
if selected:
_save_model_choice(selected)
from hermes_cli.config import load_config, save_config
cfg = load_config()
model = cfg.get("model")
if not isinstance(model, dict):
model = {"default": model} if model else {}
cfg["model"] = model
model["provider"] = "ai-gateway"
model["base_url"] = AI_GATEWAY_BASE_URL
model["api_mode"] = "chat_completions"
save_config(cfg)
deactivate_provider()
print(f"Default model set to: {selected} (via Vercel AI Gateway)")
else:
print("No change.")
def _model_flow_nous(config, current_model="", args=None):
"""Nous Portal provider: ensure logged in, then pick model."""
from hermes_cli.auth import (
@ -2028,7 +2129,6 @@ def _model_flow_nous(config, current_model="", args=None):
save_env_value,
)
from hermes_cli.nous_subscription import prompt_enable_tool_gateway
import argparse
state = get_provider_auth_state("nous")
if not state or not state.get("access_token"):
@ -2067,7 +2167,6 @@ def _model_flow_nous(config, current_model="", args=None):
from hermes_cli.models import (
_PROVIDER_MODELS,
get_pricing_for_provider,
filter_nous_free_models,
check_nous_free_tier,
partition_nous_models_by_tier,
)
@ -2110,10 +2209,8 @@ def _model_flow_nous(config, current_model="", args=None):
# Check if user is on free tier
free_tier = check_nous_free_tier()
# For both tiers: apply the allowlist filter first (removes non-allowlisted
# free models and allowlist models that aren't actually free).
# Then for free users: partition remaining models into selectable/unavailable.
model_ids = filter_nous_free_models(model_ids, pricing)
# For free users: partition models into selectable/unavailable based on
# whether they are free per the Portal-reported pricing.
unavailable_models: list[str] = []
if free_tier:
model_ids, unavailable_models = partition_nous_models_by_tier(
@ -2196,7 +2293,6 @@ def _model_flow_openai_codex(config, current_model=""):
DEFAULT_CODEX_BASE_URL,
)
from hermes_cli.codex_models import get_codex_model_ids
import argparse
status = get_codex_auth_status()
if not status.get("logged_in"):
@ -2351,7 +2447,7 @@ def _model_flow_google_gemini_cli(_config, current_model=""):
return
models = list(_PROVIDER_MODELS.get("google-gemini-cli") or [])
default = current_model or (models[0] if models else "gemini-2.5-flash")
default = current_model or (models[0] if models else "gemini-3-flash-preview")
selected = _prompt_model_selection(models, current_model=default)
if selected:
_save_model_choice(selected)
@ -3327,8 +3423,9 @@ def _model_flow_kimi(config, current_model=""):
# Step 3: Model selection — show appropriate models for the endpoint
if is_coding_plan:
# Coding Plan models (kimi-k2.5 first)
# Coding Plan models (kimi-k2.6 first)
model_list = [
"kimi-k2.6",
"kimi-k2.5",
"kimi-for-coding",
"kimi-k2-thinking",
@ -3367,6 +3464,140 @@ def _model_flow_kimi(config, current_model=""):
print("No change.")
def _infer_stepfun_region(base_url: str) -> str:
"""Infer the current StepFun region from the configured endpoint."""
normalized = (base_url or "").strip().lower()
if "api.stepfun.com" in normalized:
return "china"
return "international"
def _stepfun_base_url_for_region(region: str) -> str:
from hermes_cli.auth import (
STEPFUN_STEP_PLAN_CN_BASE_URL,
STEPFUN_STEP_PLAN_INTL_BASE_URL,
)
return (
STEPFUN_STEP_PLAN_CN_BASE_URL
if region == "china"
else STEPFUN_STEP_PLAN_INTL_BASE_URL
)
def _model_flow_stepfun(config, current_model=""):
"""StepFun Step Plan flow with region-specific endpoints."""
from hermes_cli.auth import (
PROVIDER_REGISTRY,
_prompt_model_selection,
_save_model_choice,
deactivate_provider,
)
from hermes_cli.config import get_env_value, save_env_value, load_config, save_config
from hermes_cli.models import fetch_api_models
provider_id = "stepfun"
pconfig = PROVIDER_REGISTRY[provider_id]
key_env = pconfig.api_key_env_vars[0] if pconfig.api_key_env_vars else ""
base_url_env = pconfig.base_url_env_var or ""
existing_key = ""
for ev in pconfig.api_key_env_vars:
existing_key = get_env_value(ev) or os.getenv(ev, "")
if existing_key:
break
if not existing_key:
print(f"No {pconfig.name} API key configured.")
if key_env:
try:
import getpass
new_key = getpass.getpass(f"{key_env} (or Enter to cancel): ").strip()
except (KeyboardInterrupt, EOFError):
print()
return
if not new_key:
print("Cancelled.")
return
save_env_value(key_env, new_key)
existing_key = new_key
print("API key saved.")
print()
else:
print(f" {pconfig.name} API key: {existing_key[:8]}... ✓")
print()
current_base = ""
if base_url_env:
current_base = get_env_value(base_url_env) or os.getenv(base_url_env, "")
if not current_base:
model_cfg = config.get("model")
if isinstance(model_cfg, dict):
current_base = str(model_cfg.get("base_url") or "").strip()
current_region = _infer_stepfun_region(current_base or pconfig.inference_base_url)
region_choices = [
("international", f"International ({_stepfun_base_url_for_region('international')})"),
("china", f"China ({_stepfun_base_url_for_region('china')})"),
]
ordered_regions = []
for region_key, label in region_choices:
if region_key == current_region:
ordered_regions.insert(0, (region_key, f"{label} ← currently active"))
else:
ordered_regions.append((region_key, label))
ordered_regions.append(("cancel", "Cancel"))
region_idx = _prompt_provider_choice([label for _, label in ordered_regions])
if region_idx is None or ordered_regions[region_idx][0] == "cancel":
print("No change.")
return
selected_region = ordered_regions[region_idx][0]
effective_base = _stepfun_base_url_for_region(selected_region)
if base_url_env:
save_env_value(base_url_env, effective_base)
live_models = fetch_api_models(existing_key, effective_base)
if live_models:
model_list = live_models
print(f" Found {len(model_list)} model(s) from {pconfig.name} API")
else:
model_list = _PROVIDER_MODELS.get(provider_id, [])
if model_list:
print(
f" Could not auto-detect models from {pconfig.name} API — "
"showing Step Plan fallback catalog."
)
if model_list:
selected = _prompt_model_selection(model_list, current_model=current_model)
else:
try:
selected = input("Model name: ").strip()
except (KeyboardInterrupt, EOFError):
selected = None
if selected:
_save_model_choice(selected)
cfg = load_config()
model = cfg.get("model")
if not isinstance(model, dict):
model = {"default": model} if model else {}
cfg["model"] = model
model["provider"] = provider_id
model["base_url"] = effective_base
model.pop("api_mode", None)
save_config(cfg)
deactivate_provider()
config["model"] = dict(model)
print(f"Default model set to: {selected} (via {pconfig.name})")
else:
print("No change.")
def _model_flow_bedrock_api_key(config, region, current_model=""):
"""Bedrock API Key mode — uses the OpenAI-compatible bedrock-mantle endpoint.
@ -4067,6 +4298,12 @@ def cmd_webhook(args):
webhook_command(args)
def cmd_hooks(args):
"""Shell-hook inspection and management."""
from hermes_cli.hooks import hooks_command
hooks_command(args)
def cmd_doctor(args):
"""Check configuration and dependencies."""
from hermes_cli.doctor import run_doctor
@ -4176,9 +4413,7 @@ def _clear_bytecode_cache(root: Path) -> int:
]
if os.path.basename(dirpath) == "__pycache__":
try:
import shutil as _shutil
_shutil.rmtree(dirpath)
shutil.rmtree(dirpath)
removed += 1
except OSError:
pass
@ -4217,8 +4452,6 @@ def _gateway_prompt(prompt_text: str, default: str = "", timeout: float = 300.0)
tmp.replace(prompt_path)
# Poll for response
import time as _time
deadline = _time.monotonic() + timeout
while _time.monotonic() < deadline:
if response_path.exists():
@ -4250,7 +4483,6 @@ def _build_web_ui(web_dir: Path, *, fatal: bool = False) -> bool:
"""
if not (web_dir / "package.json").exists():
return True
import shutil
npm = shutil.which("npm")
if not npm:
@ -4287,7 +4519,6 @@ def _update_via_zip(args):
Used on Windows when git file I/O is broken (antivirus, NTFS filter
drivers causing 'Invalid argument' errors on file creation).
"""
import shutil
import tempfile
import zipfile
from urllib.request import urlretrieve
@ -4364,7 +4595,6 @@ def _update_via_zip(args):
# breaks on this machine, keep base deps and reinstall the remaining extras
# individually so update does not silently strip working capabilities.
print("→ Updating Python dependencies...")
import subprocess
uv_bin = shutil.which("uv")
if uv_bin:
@ -5115,9 +5345,11 @@ def _install_hangup_protection(gateway_mode: bool = False):
# (2) Mirror output to update.log and wrap stdio for broken-pipe
# tolerance. Any failure here is non-fatal; we just skip the wrap.
try:
from hermes_cli.config import get_hermes_home
# Late-bound import so tests can monkeypatch
# hermes_cli.config.get_hermes_home to simulate setup failure.
from hermes_cli.config import get_hermes_home as _get_hermes_home
logs_dir = get_hermes_home() / "logs"
logs_dir = _get_hermes_home() / "logs"
logs_dir.mkdir(parents=True, exist_ok=True)
log_path = logs_dir / "update.log"
log_file = open(log_path, "a", buffering=1, encoding="utf-8")
@ -5692,8 +5924,6 @@ def _cmd_update_impl(args, gateway_mode: bool):
# Verify the service actually survived the
# restart. systemctl restart returns 0 even
# if the new process crashes immediately.
import time as _time
_time.sleep(3)
verify = subprocess.run(
scope_cmd + ["is-active", svc_name],
@ -6346,6 +6576,17 @@ For more help on a command:
default=False,
help="Run in an isolated git worktree (for parallel agents)",
)
parser.add_argument(
"--accept-hooks",
action="store_true",
default=False,
help=(
"Auto-approve any unseen shell hooks declared in config.yaml "
"without a TTY prompt. Equivalent to HERMES_ACCEPT_HOOKS=1 or "
"hooks_auto_accept: true in config.yaml. Use on CI / headless "
"runs that can't prompt."
),
)
parser.add_argument(
"--skills",
"-s",
@ -6425,6 +6666,7 @@ For more help on a command:
"zai",
"kimi-coding",
"kimi-coding-cn",
"stepfun",
"minimax",
"minimax-cn",
"kilocode",
@ -6468,6 +6710,16 @@ For more help on a command:
default=argparse.SUPPRESS,
help="Run in an isolated git worktree (for parallel agents on the same repo)",
)
chat_parser.add_argument(
"--accept-hooks",
action="store_true",
default=argparse.SUPPRESS,
help=(
"Auto-approve any unseen shell hooks declared in config.yaml "
"without a TTY prompt (see also HERMES_ACCEPT_HOOKS env var and "
"hooks_auto_accept: in config.yaml)."
),
)
chat_parser.add_argument(
"--checkpoints",
action="store_true",
@ -6587,6 +6839,8 @@ For more help on a command:
action="store_true",
help="Replace any existing gateway instance (useful for systemd)",
)
_add_accept_hooks_flag(gateway_run)
_add_accept_hooks_flag(gateway_parser)
# gateway start
gateway_start = gateway_subparsers.add_parser(
@ -6951,6 +7205,7 @@ For more help on a command:
"run", help="Run a job on the next scheduler tick"
)
cron_run.add_argument("job_id", help="Job ID to trigger")
_add_accept_hooks_flag(cron_run)
cron_remove = cron_subparsers.add_parser(
"remove", aliases=["rm", "delete"], help="Remove a scheduled job"
@ -6961,8 +7216,9 @@ For more help on a command:
cron_subparsers.add_parser("status", help="Check if cron scheduler is running")
# cron tick (mostly for debugging)
cron_subparsers.add_parser("tick", help="Run due jobs once and exit")
cron_tick = cron_subparsers.add_parser("tick", help="Run due jobs once and exit")
_add_accept_hooks_flag(cron_tick)
_add_accept_hooks_flag(cron_parser)
cron_parser.set_defaults(func=cmd_cron)
# =========================================================================
@ -7002,6 +7258,13 @@ For more help on a command:
wh_sub.add_argument(
"--secret", default="", help="HMAC secret (auto-generated if omitted)"
)
wh_sub.add_argument(
"--deliver-only",
action="store_true",
help="Skip the agent — deliver the rendered prompt directly as the "
"message. Zero LLM cost. Requires --deliver to be a real target "
"(not 'log').",
)
webhook_subparsers.add_parser(
"list", aliases=["ls"], help="List all dynamic subscriptions"
@ -7022,6 +7285,67 @@ For more help on a command:
webhook_parser.set_defaults(func=cmd_webhook)
# =========================================================================
# hooks command — shell-hook inspection and management
# =========================================================================
hooks_parser = subparsers.add_parser(
"hooks",
help="Inspect and manage shell-script hooks",
description=(
"Inspect shell-script hooks declared in ~/.hermes/config.yaml, "
"test them against synthetic payloads, and manage the first-use "
"consent allowlist at ~/.hermes/shell-hooks-allowlist.json."
),
)
hooks_subparsers = hooks_parser.add_subparsers(dest="hooks_action")
hooks_subparsers.add_parser(
"list", aliases=["ls"],
help="List configured hooks with matcher, timeout, and consent status",
)
_hk_test = hooks_subparsers.add_parser(
"test",
help="Fire every hook matching <event> against a synthetic payload",
)
_hk_test.add_argument(
"event",
help="Hook event name (e.g. pre_tool_call, pre_llm_call, subagent_stop)",
)
_hk_test.add_argument(
"--for-tool", dest="for_tool", default=None,
help=(
"Only fire hooks whose matcher matches this tool name "
"(used for pre_tool_call / post_tool_call)"
),
)
_hk_test.add_argument(
"--payload-file", dest="payload_file", default=None,
help=(
"Path to a JSON file whose contents are merged into the "
"synthetic payload before execution"
),
)
_hk_revoke = hooks_subparsers.add_parser(
"revoke", aliases=["remove", "rm"],
help="Remove a command's allowlist entries (takes effect on next restart)",
)
_hk_revoke.add_argument(
"command",
help="The exact command string to revoke (as declared in config.yaml)",
)
hooks_subparsers.add_parser(
"doctor",
help=(
"Check each configured hook: exec bit, allowlist, mtime drift, "
"JSON validity, and synthetic run timing"
),
)
hooks_parser.set_defaults(func=cmd_hooks)
# =========================================================================
# doctor command
# =========================================================================
@ -7429,6 +7753,17 @@ Examples:
action="store_true",
help="Remove existing plugin and reinstall",
)
_install_enable_group = plugins_install.add_mutually_exclusive_group()
_install_enable_group.add_argument(
"--enable",
action="store_true",
help="Auto-enable the plugin after install (skip confirmation prompt)",
)
_install_enable_group.add_argument(
"--no-enable",
action="store_true",
help="Install disabled (skip confirmation prompt); enable later with `hermes plugins enable <name>`",
)
plugins_update = plugins_subparsers.add_parser(
"update", help="Pull latest changes for an installed plugin"
@ -7476,9 +7811,7 @@ Examples:
)
cmd_info["setup_fn"](plugin_parser)
except Exception as _exc:
import logging as _log
_log.getLogger(__name__).debug("Plugin CLI discovery failed: %s", _exc)
logging.getLogger(__name__).debug("Plugin CLI discovery failed: %s", _exc)
# =========================================================================
# memory command
@ -7684,6 +8017,7 @@ Examples:
action="store_true",
help="Enable verbose logging on stderr",
)
_add_accept_hooks_flag(mcp_serve_p)
mcp_add_p = mcp_sub.add_parser(
"add", help="Add an MCP server (discovery-first install)"
@ -7722,6 +8056,8 @@ Examples:
)
mcp_login_p.add_argument("name", help="Server name to re-authenticate")
_add_accept_hooks_flag(mcp_parser)
def cmd_mcp(args):
from hermes_cli.mcp_config import mcp_command
@ -7860,7 +8196,6 @@ Examples:
return
line = _json.dumps(data, ensure_ascii=False) + "\n"
if args.output == "-":
import sys
sys.stdout.write(line)
else:
@ -7870,7 +8205,6 @@ Examples:
else:
sessions = db.export_all(source=args.source)
if args.output == "-":
import sys
for s in sessions:
sys.stdout.write(_json.dumps(s, ensure_ascii=False) + "\n")
@ -7941,8 +8275,6 @@ Examples:
# Launch hermes --resume <id> by replacing the current process
print(f"Resuming session: {selected_id}")
import shutil
hermes_bin = shutil.which("hermes")
if hermes_bin:
os.execvp(hermes_bin, ["hermes", "--resume", selected_id])
@ -8133,6 +8465,7 @@ Examples:
help="Run Hermes Agent as an ACP (Agent Client Protocol) server",
description="Start Hermes Agent in ACP mode for editor integration (VS Code, Zed, JetBrains)",
)
_add_accept_hooks_flag(acp_parser)
def cmd_acp(args):
"""Launch Hermes Agent as an ACP server."""
@ -8406,6 +8739,42 @@ Examples:
cmd_version(args)
return
# Discover Python plugins and register shell hooks once, before any
# command that can fire lifecycle hooks. Both are idempotent; gated
# so introspection/management commands (hermes hooks list, cron
# list, gateway status, mcp add, ...) don't pay discovery cost or
# trigger consent prompts for hooks the user is still inspecting.
# Groups with mixed admin/CRUD vs. agent-running entries narrow via
# the nested subcommand (dest varies by parser).
_AGENT_COMMANDS = {None, "chat", "acp", "rl"}
_AGENT_SUBCOMMANDS = {
"cron": ("cron_command", {"run", "tick"}),
"gateway": ("gateway_command", {"run"}),
"mcp": ("mcp_action", {"serve"}),
}
_sub_attr, _sub_set = _AGENT_SUBCOMMANDS.get(args.command, (None, None))
if (
args.command in _AGENT_COMMANDS
or (_sub_attr and getattr(args, _sub_attr, None) in _sub_set)
):
_accept_hooks = bool(getattr(args, "accept_hooks", False))
try:
from hermes_cli.plugins import discover_plugins
discover_plugins()
except Exception:
logger.debug(
"plugin discovery failed at CLI startup", exc_info=True,
)
try:
from hermes_cli.config import load_config
from agent.shell_hooks import register_from_config
register_from_config(load_config(), accept_hooks=_accept_hooks)
except Exception:
logger.debug(
"shell-hook registration failed at CLI startup",
exc_info=True,
)
# Handle top-level --resume / --continue as shortcut to chat
if (args.resume or args.continue_last) and args.command is None:
args.command = "chat"

View file

@ -143,7 +143,7 @@ MODEL_ALIASES: dict[str, ModelIdentity] = {
# Z.AI / GLM
"glm": ModelIdentity("z-ai", "glm"),
# StepFun
# Step Plan (StepFun)
"step": ModelIdentity("stepfun", "step"),
# Xiaomi
@ -678,6 +678,7 @@ def switch_model(
_da = DIRECT_ALIASES.get(resolved_alias)
if _da is not None and _da.base_url:
base_url = _da.base_url
api_mode = "" # clear so determine_api_mode re-detects from URL
if not api_key:
api_key = "no-key-required"
@ -1035,21 +1036,49 @@ def list_authenticated_providers(
seen_slugs.add(_cp.slug.lower())
# --- 3. User-defined endpoints from config ---
# Track (name, base_url) of what section 3 emits so section 4 can skip
# any overlapping ``custom_providers:`` entries. Callers typically pass
# both (gateway/CLI invoke ``get_compatible_custom_providers()`` which
# merges ``providers:`` into the list) — without this, the same endpoint
# produces two picker rows: one bare-slug ("openrouter") from section 3
# and one "custom:openrouter" from section 4, both labelled identically.
_section3_emitted_pairs: set = set()
if user_providers and isinstance(user_providers, dict):
for ep_name, ep_cfg in user_providers.items():
if not isinstance(ep_cfg, dict):
continue
# Skip if this slug was already emitted (e.g. canonical provider
# with the same name) or will be picked up by section 4.
if ep_name.lower() in seen_slugs:
continue
display_name = ep_cfg.get("name", "") or ep_name
api_url = ep_cfg.get("api", "") or ep_cfg.get("url", "") or ""
default_model = ep_cfg.get("default_model", "")
# ``base_url`` is Hermes's canonical write key (matches
# custom_providers and _save_custom_provider); ``api`` / ``url``
# remain as fallbacks for hand-edited / legacy configs.
api_url = (
ep_cfg.get("base_url", "")
or ep_cfg.get("api", "")
or ep_cfg.get("url", "")
or ""
)
# ``default_model`` is the legacy key; ``model`` matches what
# custom_providers entries use, so accept either.
default_model = ep_cfg.get("default_model", "") or ep_cfg.get("model", "")
# Build models list from both default_model and full models array
models_list = []
if default_model:
models_list.append(default_model)
# Also include the full models list from config
# Also include the full models list from config.
# Hermes writes ``models:`` as a dict keyed by model id
# (see hermes_cli/main.py::_save_custom_provider); older
# configs or hand-edited files may still use a list.
cfg_models = ep_cfg.get("models", [])
if isinstance(cfg_models, list):
if isinstance(cfg_models, dict):
for m in cfg_models:
if m and m not in models_list:
models_list.append(m)
elif isinstance(cfg_models, list):
for m in cfg_models:
if m and m not in models_list:
models_list.append(m)
@ -1066,6 +1095,14 @@ def list_authenticated_providers(
"source": "user-config",
"api_url": api_url,
})
seen_slugs.add(ep_name.lower())
seen_slugs.add(custom_provider_slug(display_name).lower())
_pair = (
str(display_name).strip().lower(),
str(api_url).strip().rstrip("/").lower(),
)
if _pair[0] and _pair[1]:
_section3_emitted_pairs.add(_pair)
# --- 4. Saved custom providers from config ---
# Each ``custom_providers`` entry represents one model under a named
@ -1100,13 +1137,41 @@ def list_authenticated_providers(
"api_url": api_url,
"models": [],
}
# The singular ``model:`` field only holds the currently
# active model. Hermes's own writer (main.py::_save_custom_provider)
# stores every configured model as a dict under ``models:``;
# downstream readers (agent/models_dev.py, gateway/run.py,
# run_agent.py, hermes_cli/config.py) already consume that dict.
# The /model picker previously ignored it, so multi-model
# custom providers appeared to have only the active model.
default_model = (entry.get("model") or "").strip()
if default_model and default_model not in groups[slug]["models"]:
groups[slug]["models"].append(default_model)
cfg_models = entry.get("models", {})
if isinstance(cfg_models, dict):
for m in cfg_models:
if m and m not in groups[slug]["models"]:
groups[slug]["models"].append(m)
elif isinstance(cfg_models, list):
for m in cfg_models:
if m and m not in groups[slug]["models"]:
groups[slug]["models"].append(m)
for slug, grp in groups.items():
if slug.lower() in seen_slugs:
continue
# Skip if section 3 already emitted this endpoint under its
# ``providers:`` dict key — matches on (display_name, base_url),
# the tuple section 4 groups by. Prevents two picker rows
# labelled identically when callers pass both ``user_providers``
# and a compatibility-merged ``custom_providers`` list.
_pair_key = (
str(grp["name"]).strip().lower(),
str(grp["api_url"]).strip().rstrip("/").lower(),
)
if _pair_key[0] and _pair_key[1] and _pair_key in _section3_emitted_pairs:
continue
results.append({
"slug": slug,
"name": grp["name"],

View file

@ -16,6 +16,12 @@ from difflib import get_close_matches
from pathlib import Path
from typing import Any, NamedTuple, Optional
from hermes_cli import __version__ as _HERMES_VERSION
# Identify ourselves so endpoints fronted by Cloudflare's Browser Integrity
# Check (error 1010) don't reject the default ``Python-urllib/*`` signature.
_HERMES_USER_AGENT = f"hermes-cli/{_HERMES_VERSION}"
COPILOT_BASE_URL = "https://api.githubcopilot.com"
COPILOT_MODELS_URL = f"{COPILOT_BASE_URL}/models"
COPILOT_EDITOR_VERSION = "vscode/1.104.1"
@ -26,7 +32,7 @@ COPILOT_REASONING_EFFORTS_O_SERIES = ["low", "medium", "high"]
# Fallback OpenRouter snapshot used when the live catalog is unavailable.
# (model_id, display description shown in menus)
OPENROUTER_MODELS: list[tuple[str, str]] = [
("moonshotai/kimi-k2.5", "recommended"),
("moonshotai/kimi-k2.6", "recommended"),
("anthropic/claude-opus-4.7", ""),
("anthropic/claude-opus-4.6", ""),
("anthropic/claude-sonnet-4.6", ""),
@ -47,6 +53,7 @@ OPENROUTER_MODELS: list[tuple[str, str]] = [
("stepfun/step-3.5-flash", ""),
("minimax/minimax-m2.7", ""),
("minimax/minimax-m2.5", ""),
("minimax/minimax-m2.5:free", "free"),
("z-ai/glm-5.1", ""),
("z-ai/glm-5v-turbo", ""),
("z-ai/glm-5-turbo", ""),
@ -62,6 +69,31 @@ OPENROUTER_MODELS: list[tuple[str, str]] = [
_openrouter_catalog_cache: list[tuple[str, str]] | None = None
# Fallback Vercel AI Gateway snapshot used when the live catalog is unavailable.
# OSS / open-weight models prioritized first, then closed-source by family.
# Slugs match Vercel's actual /v1/models catalog (e.g. alibaba/ for Qwen,
# zai/ and xai/ without hyphens).
VERCEL_AI_GATEWAY_MODELS: list[tuple[str, str]] = [
("moonshotai/kimi-k2.6", "recommended"),
("alibaba/qwen3.6-plus", ""),
("zai/glm-5.1", ""),
("minimax/minimax-m2.7", ""),
("anthropic/claude-sonnet-4.6", ""),
("anthropic/claude-opus-4.7", ""),
("anthropic/claude-opus-4.6", ""),
("anthropic/claude-haiku-4.5", ""),
("openai/gpt-5.4", ""),
("openai/gpt-5.4-mini", ""),
("openai/gpt-5.3-codex", ""),
("google/gemini-3.1-pro-preview", ""),
("google/gemini-3-flash", ""),
("google/gemini-3.1-flash-lite-preview", ""),
("xai/grok-4.20-reasoning", ""),
]
_ai_gateway_catalog_cache: list[tuple[str, str]] | None = None
def _codex_curated_models() -> list[str]:
"""Derive the openai-codex curated list from codex_models.py.
@ -75,7 +107,7 @@ def _codex_curated_models() -> list[str]:
_PROVIDER_MODELS: dict[str, list[str]] = {
"nous": [
"moonshotai/kimi-k2.5",
"moonshotai/kimi-k2.6",
"xiaomi/mimo-v2-pro",
"anthropic/claude-opus-4.7",
"anthropic/claude-opus-4.6",
@ -94,17 +126,15 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"stepfun/step-3.5-flash",
"minimax/minimax-m2.7",
"minimax/minimax-m2.5",
"minimax/minimax-m2.5:free",
"z-ai/glm-5.1",
"z-ai/glm-5v-turbo",
"z-ai/glm-5-turbo",
"x-ai/grok-4.20-beta",
"nvidia/nemotron-3-super-120b-a12b",
"nvidia/nemotron-3-super-120b-a12b:free",
"arcee-ai/trinity-large-preview:free",
"arcee-ai/trinity-large-thinking",
"openai/gpt-5.4-pro",
"openai/gpt-5.4-nano",
"openrouter/elephant-alpha",
],
"openai-codex": _codex_curated_models(),
"copilot-acp": [
@ -128,16 +158,14 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
],
"gemini": [
"gemini-3.1-pro-preview",
"gemini-3-pro-preview",
"gemini-3-flash-preview",
"gemini-3.1-flash-lite-preview",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
],
"google-gemini-cli": [
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
"gemini-3.1-pro-preview",
"gemini-3-pro-preview",
"gemini-3-flash-preview",
],
"zai": [
"glm-5.1",
@ -161,12 +189,13 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
# (map to OpenRouter defaults — users get familiar picks on NIM)
"qwen/qwen3.5-397b-a17b",
"deepseek-ai/deepseek-v3.2",
"moonshotai/kimi-k2.5",
"moonshotai/kimi-k2.6",
"minimaxai/minimax-m2.5",
"z-ai/glm5",
"openai/gpt-oss-120b",
],
"kimi-coding": [
"kimi-k2.6",
"kimi-k2.5",
"kimi-for-coding",
"kimi-k2-thinking",
@ -175,12 +204,18 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"kimi-k2-0905-preview",
],
"kimi-coding-cn": [
"kimi-k2.6",
"kimi-k2.5",
"kimi-k2-thinking",
"kimi-k2-turbo-preview",
"kimi-k2-0905-preview",
],
"stepfun": [
"step-3.5-flash",
"step-3.5-flash-2603",
],
"moonshot": [
"kimi-k2.6",
"kimi-k2.5",
"kimi-k2-thinking",
"kimi-k2-turbo-preview",
@ -227,7 +262,6 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"gpt-5.4-pro",
"gpt-5.4",
"gpt-5.3-codex",
"gpt-5.3-codex-spark",
"gpt-5.2",
"gpt-5.2-codex",
"gpt-5.1",
@ -261,6 +295,7 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"big-pickle",
],
"opencode-go": [
"kimi-k2.6",
"kimi-k2.5",
"glm-5.1",
"glm-5",
@ -268,20 +303,8 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"mimo-v2-omni",
"minimax-m2.7",
"minimax-m2.5",
],
"ai-gateway": [
"anthropic/claude-opus-4.6",
"anthropic/claude-sonnet-4.6",
"anthropic/claude-sonnet-4.5",
"anthropic/claude-haiku-4.5",
"openai/gpt-5",
"openai/gpt-4.1",
"openai/gpt-4.1-mini",
"google/gemini-3-pro-preview",
"google/gemini-3-flash",
"google/gemini-2.5-pro",
"google/gemini-2.5-flash",
"deepseek/deepseek-v3.2",
"qwen3.6-plus",
"qwen3.5-plus",
],
"kilocode": [
"anthropic/claude-opus-4.6",
@ -315,6 +338,7 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"zai-org/GLM-5",
"XiaomiMiMo/MiMo-V2-Flash",
"moonshotai/Kimi-K2-Thinking",
"moonshotai/Kimi-K2.6",
],
# AWS Bedrock — static fallback list used when dynamic discovery is
# unavailable (no boto3, no credentials, or API error). The agent
@ -334,18 +358,18 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
],
}
# Vercel AI Gateway: derive the bare-model-id catalog from the curated
# ``VERCEL_AI_GATEWAY_MODELS`` snapshot so both the picker (tuples with descriptions)
# and the static fallback catalog (bare ids) stay in sync from a single
# source of truth.
_PROVIDER_MODELS["ai-gateway"] = [mid for mid, _ in VERCEL_AI_GATEWAY_MODELS]
# ---------------------------------------------------------------------------
# Nous Portal free-model filtering
# Nous Portal free-model helper
# ---------------------------------------------------------------------------
# Models that are ALLOWED to appear when priced as free on Nous Portal.
# Any other free model is hidden — prevents promotional/temporary free models
# from cluttering the selection when users are paying subscribers.
# Models in this list are ALSO filtered out if they are NOT free (i.e. they
# should only appear in the menu when they are genuinely free).
_NOUS_ALLOWED_FREE_MODELS: frozenset[str] = frozenset({
"xiaomi/mimo-v2-pro",
"xiaomi/mimo-v2-omni",
})
# The Nous Portal models endpoint is the source of truth for which models
# are currently offered (free or paid). We trust whatever it returns and
# surface it to users as-is — no local allowlist filtering.
def _is_model_free(model_id: str, pricing: dict[str, dict[str, str]]) -> bool:
@ -359,35 +383,6 @@ def _is_model_free(model_id: str, pricing: dict[str, dict[str, str]]) -> bool:
return False
def filter_nous_free_models(
model_ids: list[str],
pricing: dict[str, dict[str, str]],
) -> list[str]:
"""Filter the Nous Portal model list according to free-model policy.
Rules:
Paid models that are NOT in the allowlist keep (normal case).
Free models that are NOT in the allowlist drop.
Allowlist models that ARE free keep.
Allowlist models that are NOT free drop.
"""
if not pricing:
return model_ids # no pricing data — can't filter, show everything
result: list[str] = []
for mid in model_ids:
free = _is_model_free(mid, pricing)
if mid in _NOUS_ALLOWED_FREE_MODELS:
# Allowlist model: only show when it's actually free
if free:
result.append(mid)
else:
# Regular model: keep only when it's NOT free
if not free:
result.append(mid)
return result
# ---------------------------------------------------------------------------
# Nous Portal account tier detection
# ---------------------------------------------------------------------------
@ -451,8 +446,7 @@ def partition_nous_models_by_tier(
) -> tuple[list[str], list[str]]:
"""Split Nous models into (selectable, unavailable) based on user tier.
For paid-tier users: all models are selectable, none unavailable
(free-model filtering is handled separately by ``filter_nous_free_models``).
For paid-tier users: all models are selectable, none unavailable.
For free-tier users: only free models are selectable; paid models
are returned as unavailable (shown grayed out in the menu).
@ -491,8 +485,6 @@ def check_nous_free_tier() -> bool:
Returns False (assume paid) on any error never blocks paying users.
"""
global _free_tier_cache
import time
now = time.monotonic()
if _free_tier_cache is not None:
cached_result, cached_at = _free_tier_cache
@ -524,6 +516,157 @@ def check_nous_free_tier() -> bool:
return False # default to paid on error — don't block users
# ---------------------------------------------------------------------------
# Nous Portal recommended models
#
# The Portal publishes a curated list of suggested models (separated into
# paid and free tiers) plus dedicated recommendations for compaction (text
# summarisation / auxiliary) and vision tasks. We fetch it once per process
# with a TTL cache so callers can ask "what's the best aux model right now?"
# without hitting the network on every lookup.
#
# Shape of the response (fields we care about):
# {
# "paidRecommendedModels": [ {modelName, ...}, ... ],
# "freeRecommendedModels": [ {modelName, ...}, ... ],
# "paidRecommendedCompactionModel": {modelName, ...} | null,
# "paidRecommendedVisionModel": {modelName, ...} | null,
# "freeRecommendedCompactionModel": {modelName, ...} | null,
# "freeRecommendedVisionModel": {modelName, ...} | null,
# }
# ---------------------------------------------------------------------------
NOUS_RECOMMENDED_MODELS_PATH = "/api/nous/recommended-models"
_NOUS_RECOMMENDED_CACHE_TTL: int = 600 # seconds (10 minutes)
# (result_dict, timestamp) keyed by portal_base_url so staging vs prod don't collide.
_nous_recommended_cache: dict[str, tuple[dict[str, Any], float]] = {}
def fetch_nous_recommended_models(
portal_base_url: str = "",
timeout: float = 5.0,
*,
force_refresh: bool = False,
) -> dict[str, Any]:
"""Fetch the Nous Portal's curated recommended-models payload.
Hits ``<portal>/api/nous/recommended-models``. The endpoint is public
no auth is required. Results are cached per portal URL for
``_NOUS_RECOMMENDED_CACHE_TTL`` seconds; pass ``force_refresh=True`` to
bypass the cache.
Returns the parsed JSON dict on success, or ``{}`` on any failure
(network, parse, non-2xx). Callers must treat missing/null fields as
"no recommendation" and fall back to their own default.
"""
base = (portal_base_url or "https://portal.nousresearch.com").rstrip("/")
now = time.monotonic()
cached = _nous_recommended_cache.get(base)
if not force_refresh and cached is not None:
payload, cached_at = cached
if now - cached_at < _NOUS_RECOMMENDED_CACHE_TTL:
return payload
url = f"{base}{NOUS_RECOMMENDED_MODELS_PATH}"
try:
req = urllib.request.Request(
url,
headers={"Accept": "application/json"},
)
with urllib.request.urlopen(req, timeout=timeout) as resp:
data = json.loads(resp.read().decode())
if not isinstance(data, dict):
data = {}
except Exception:
data = {}
_nous_recommended_cache[base] = (data, now)
return data
def _resolve_nous_portal_url() -> str:
"""Best-effort lookup of the Portal base URL the user is authed against."""
try:
from hermes_cli.auth import (
DEFAULT_NOUS_PORTAL_URL,
get_provider_auth_state,
)
state = get_provider_auth_state("nous") or {}
portal = str(state.get("portal_base_url") or "").strip()
if portal:
return portal.rstrip("/")
return str(DEFAULT_NOUS_PORTAL_URL).rstrip("/")
except Exception:
return "https://portal.nousresearch.com"
def _extract_model_name(entry: Any) -> Optional[str]:
"""Pull the ``modelName`` field from a recommended-model entry, else None."""
if not isinstance(entry, dict):
return None
model_name = entry.get("modelName")
if isinstance(model_name, str) and model_name.strip():
return model_name.strip()
return None
def get_nous_recommended_aux_model(
*,
vision: bool = False,
free_tier: Optional[bool] = None,
portal_base_url: str = "",
force_refresh: bool = False,
) -> Optional[str]:
"""Return the Portal's recommended model name for an auxiliary task.
Picks the best field from the Portal's recommended-models payload:
* ``vision=True`` ``paidRecommendedVisionModel`` (paid tier) or
``freeRecommendedVisionModel`` (free tier)
* ``vision=False`` ``paidRecommendedCompactionModel`` or
``freeRecommendedCompactionModel``
When ``free_tier`` is ``None`` (default) the user's tier is auto-detected
via :func:`check_nous_free_tier`. Pass an explicit bool to bypass the
detection useful for tests or when the caller already knows the tier.
For paid-tier users we prefer the paid recommendation but gracefully fall
back to the free recommendation if the Portal returned ``null`` for the
paid field (common during the staged rollout of new paid models).
Returns ``None`` when every candidate is missing, null, or the fetch
fails callers should fall back to their own default (currently
``google/gemini-3-flash-preview``).
"""
base = portal_base_url or _resolve_nous_portal_url()
payload = fetch_nous_recommended_models(base, force_refresh=force_refresh)
if not payload:
return None
if free_tier is None:
try:
free_tier = check_nous_free_tier()
except Exception:
# On any detection error, assume paid — paid users see both fields
# anyway so this is a safe default that maximises model quality.
free_tier = False
if vision:
paid_key, free_key = "paidRecommendedVisionModel", "freeRecommendedVisionModel"
else:
paid_key, free_key = "paidRecommendedCompactionModel", "freeRecommendedCompactionModel"
# Preference order:
# free tier → free only
# paid tier → paid, then free (if paid field is null)
candidates = [free_key] if free_tier else [paid_key, free_key]
for key in candidates:
name = _extract_model_name(payload.get(key))
if name:
return name
return None
# ---------------------------------------------------------------------------
# Canonical provider list — single source of truth for provider identity.
# Every code path that lists, displays, or iterates providers derives from
@ -544,6 +687,7 @@ class ProviderEntry(NamedTuple):
CANONICAL_PROVIDERS: list[ProviderEntry] = [
ProviderEntry("nous", "Nous Portal", "Nous Portal (Nous Research subscription)"),
ProviderEntry("openrouter", "OpenRouter", "OpenRouter (100+ models, pay-per-use)"),
ProviderEntry("ai-gateway", "Vercel AI Gateway", "Vercel AI Gateway (200+ models, $5 free credit, no markup)"),
ProviderEntry("anthropic", "Anthropic", "Anthropic (Claude models — API key or Claude Code)"),
ProviderEntry("openai-codex", "OpenAI Codex", "OpenAI Codex"),
ProviderEntry("xiaomi", "Xiaomi MiMo", "Xiaomi MiMo (MiMo-V2 models — pro, omni, flash)"),
@ -552,13 +696,14 @@ CANONICAL_PROVIDERS: list[ProviderEntry] = [
ProviderEntry("copilot", "GitHub Copilot", "GitHub Copilot (uses GITHUB_TOKEN or gh auth token)"),
ProviderEntry("copilot-acp", "GitHub Copilot ACP", "GitHub Copilot ACP (spawns `copilot --acp --stdio`)"),
ProviderEntry("huggingface", "Hugging Face", "Hugging Face Inference Providers (20+ open models)"),
ProviderEntry("gemini", "Google AI Studio", "Google AI Studio (Gemini models — OpenAI-compatible endpoint)"),
ProviderEntry("gemini", "Google AI Studio", "Google AI Studio (Gemini models — native Gemini API)"),
ProviderEntry("google-gemini-cli", "Google Gemini (OAuth)", "Google Gemini via OAuth + Code Assist (free tier supported; no API key needed)"),
ProviderEntry("deepseek", "DeepSeek", "DeepSeek (DeepSeek-V3, R1, coder — direct API)"),
ProviderEntry("xai", "xAI", "xAI (Grok models — direct API)"),
ProviderEntry("zai", "Z.AI / GLM", "Z.AI / GLM (Zhipu AI direct API)"),
ProviderEntry("kimi-coding", "Kimi / Kimi Coding Plan", "Kimi Coding Plan (api.kimi.com) & Moonshot API"),
ProviderEntry("kimi-coding-cn", "Kimi / Moonshot (China)", "Kimi / Moonshot China (Moonshot CN direct API)"),
ProviderEntry("stepfun", "StepFun Step Plan", "StepFun Step Plan (agent/coding models via Step Plan API)"),
ProviderEntry("minimax", "MiniMax", "MiniMax (global direct API)"),
ProviderEntry("minimax-cn", "MiniMax (China)", "MiniMax China (domestic direct API)"),
ProviderEntry("alibaba", "Alibaba Cloud (DashScope)","Alibaba Cloud / DashScope Coding (Qwen + multi-provider)"),
@ -567,7 +712,6 @@ CANONICAL_PROVIDERS: list[ProviderEntry] = [
ProviderEntry("kilocode", "Kilo Code", "Kilo Code (Kilo Gateway API)"),
ProviderEntry("opencode-zen", "OpenCode Zen", "OpenCode Zen (35+ curated models, pay-as-you-go)"),
ProviderEntry("opencode-go", "OpenCode Go", "OpenCode Go (open models, $10/month subscription)"),
ProviderEntry("ai-gateway", "Vercel AI Gateway", "Vercel AI Gateway (200+ models, pay-per-use)"),
ProviderEntry("bedrock", "AWS Bedrock", "AWS Bedrock (Claude, Nova, Llama, DeepSeek — IAM or API key)"),
]
@ -594,6 +738,8 @@ _PROVIDER_ALIASES = {
"moonshot": "kimi-coding",
"kimi-cn": "kimi-coding-cn",
"moonshot-cn": "kimi-coding-cn",
"step": "stepfun",
"stepfun-coding-plan": "stepfun",
"arcee-ai": "arcee",
"arceeai": "arcee",
"minimax-china": "minimax-cn",
@ -663,6 +809,31 @@ def _openrouter_model_is_free(pricing: Any) -> bool:
return False
def _openrouter_model_supports_tools(item: Any) -> bool:
"""Return True when the model's ``supported_parameters`` advertise tool calling.
hermes-agent is tool-calling-first every provider path assumes the model
can invoke tools. Models that don't advertise ``tools`` in their
``supported_parameters`` (e.g. image-only or completion-only models) cannot
be driven by the agent loop and would fail at the first tool call.
**Permissive when the field is missing.** Some OpenRouter-compatible gateways
(Nous Portal, private mirrors, older catalog snapshots) don't populate
``supported_parameters`` at all. Treat that as "unknown capability → allow"
so the picker doesn't silently empty for those users. Only hide models
whose ``supported_parameters`` is an explicit list that omits ``tools``.
Ported from Kilo-Org/kilocode#9068.
"""
if not isinstance(item, dict):
return True
params = item.get("supported_parameters")
if not isinstance(params, list):
# Field absent / malformed / None — be permissive.
return True
return "tools" in params
def fetch_openrouter_models(
timeout: float = 8.0,
*,
@ -705,6 +876,11 @@ def fetch_openrouter_models(
live_item = live_by_id.get(preferred_id)
if live_item is None:
continue
# Hide models that don't advertise tool-calling support — hermes-agent
# requires it and surfacing them leads to immediate runtime failures
# when the user selects them. Ported from Kilo-Org/kilocode#9068.
if not _openrouter_model_supports_tools(live_item):
continue
desc = "free" if _openrouter_model_is_free(live_item.get("pricing")) else ""
curated.append((preferred_id, desc))
@ -722,6 +898,93 @@ def model_ids(*, force_refresh: bool = False) -> list[str]:
return [mid for mid, _ in fetch_openrouter_models(force_refresh=force_refresh)]
def _ai_gateway_model_is_free(pricing: Any) -> bool:
"""Return True if an AI Gateway model has $0 input AND output pricing."""
if not isinstance(pricing, dict):
return False
try:
return float(pricing.get("input", "0")) == 0 and float(pricing.get("output", "0")) == 0
except (TypeError, ValueError):
return False
def fetch_ai_gateway_models(
timeout: float = 8.0,
*,
force_refresh: bool = False,
) -> list[tuple[str, str]]:
"""Return the curated AI Gateway picker list, refreshed from the live catalog when possible."""
global _ai_gateway_catalog_cache
if _ai_gateway_catalog_cache is not None and not force_refresh:
return list(_ai_gateway_catalog_cache)
from hermes_constants import AI_GATEWAY_BASE_URL
fallback = list(VERCEL_AI_GATEWAY_MODELS)
preferred_ids = [mid for mid, _ in fallback]
try:
req = urllib.request.Request(
f"{AI_GATEWAY_BASE_URL.rstrip('/')}/models",
headers={"Accept": "application/json"},
)
with urllib.request.urlopen(req, timeout=timeout) as resp:
payload = json.loads(resp.read().decode())
except Exception:
return list(_ai_gateway_catalog_cache or fallback)
live_items = payload.get("data", [])
if not isinstance(live_items, list):
return list(_ai_gateway_catalog_cache or fallback)
live_by_id: dict[str, dict[str, Any]] = {}
for item in live_items:
if not isinstance(item, dict):
continue
mid = str(item.get("id") or "").strip()
if not mid:
continue
live_by_id[mid] = item
curated: list[tuple[str, str]] = []
for preferred_id in preferred_ids:
live_item = live_by_id.get(preferred_id)
if live_item is None:
continue
desc = "free" if _ai_gateway_model_is_free(live_item.get("pricing")) else ""
curated.append((preferred_id, desc))
if not curated:
return list(_ai_gateway_catalog_cache or fallback)
# If the live catalog offers a free Moonshot model, auto-promote it to
# position #1 as "recommended" — dynamic discovery without a PR.
free_moonshot = next(
(
mid
for mid, item in live_by_id.items()
if mid.startswith("moonshotai/")
and _ai_gateway_model_is_free(item.get("pricing"))
),
None,
)
if free_moonshot:
curated = [(mid, desc) for mid, desc in curated if mid != free_moonshot]
curated.insert(0, (free_moonshot, "recommended"))
else:
first_id, _ = curated[0]
curated[0] = (first_id, "recommended")
_ai_gateway_catalog_cache = curated
return list(curated)
def ai_gateway_model_ids(*, force_refresh: bool = False) -> list[str]:
"""Return just the AI Gateway model-id strings."""
return [mid for mid, _ in fetch_ai_gateway_models(force_refresh=force_refresh)]
# ---------------------------------------------------------------------------
@ -866,6 +1129,56 @@ def fetch_models_with_pricing(
return result
def fetch_ai_gateway_pricing(
timeout: float = 8.0,
*,
force_refresh: bool = False,
) -> dict[str, dict[str, str]]:
"""Fetch Vercel AI Gateway /v1/models and return hermes-shaped pricing.
Vercel uses ``input`` / ``output`` field names; hermes's picker expects
``prompt`` / ``completion``. This translates. Cache read/write field names
already match.
"""
from hermes_constants import AI_GATEWAY_BASE_URL
cache_key = AI_GATEWAY_BASE_URL.rstrip("/")
if not force_refresh and cache_key in _pricing_cache:
return _pricing_cache[cache_key]
try:
req = urllib.request.Request(
f"{cache_key}/models",
headers={"Accept": "application/json"},
)
with urllib.request.urlopen(req, timeout=timeout) as resp:
payload = json.loads(resp.read().decode())
except Exception:
_pricing_cache[cache_key] = {}
return {}
result: dict[str, dict[str, str]] = {}
for item in payload.get("data", []):
if not isinstance(item, dict):
continue
mid = item.get("id")
pricing = item.get("pricing")
if not (mid and isinstance(pricing, dict)):
continue
entry: dict[str, str] = {
"prompt": str(pricing.get("input", "")),
"completion": str(pricing.get("output", "")),
}
if pricing.get("input_cache_read"):
entry["input_cache_read"] = str(pricing["input_cache_read"])
if pricing.get("input_cache_write"):
entry["input_cache_write"] = str(pricing["input_cache_write"])
result[mid] = entry
_pricing_cache[cache_key] = result
return result
def _resolve_openrouter_api_key() -> str:
"""Best-effort OpenRouter API key for pricing fetch."""
return os.getenv("OPENROUTER_API_KEY", "").strip()
@ -884,7 +1197,7 @@ def _resolve_nous_pricing_credentials() -> tuple[str, str]:
def get_pricing_for_provider(provider: str, *, force_refresh: bool = False) -> dict[str, dict[str, str]]:
"""Return live pricing for providers that support it (openrouter, nous)."""
"""Return live pricing for providers that support it (openrouter, nous, ai-gateway)."""
normalized = normalize_provider(provider)
if normalized == "openrouter":
return fetch_models_with_pricing(
@ -892,6 +1205,8 @@ def get_pricing_for_provider(provider: str, *, force_refresh: bool = False) -> d
base_url="https://openrouter.ai/api",
force_refresh=force_refresh,
)
if normalized == "ai-gateway":
return fetch_ai_gateway_pricing(force_refresh=force_refresh)
if normalized == "nous":
api_key, base_url = _resolve_nous_pricing_credentials()
if base_url:
@ -1096,7 +1411,6 @@ def detect_provider_for_model(
from hermes_cli.auth import PROVIDER_REGISTRY
pconfig = PROVIDER_REGISTRY.get(direct_match)
if pconfig:
import os
for env_var in pconfig.api_key_env_vars:
if os.getenv(env_var, "").strip():
has_creds = True
@ -1306,6 +1620,19 @@ def provider_model_ids(provider: Optional[str], *, force_refresh: bool = False)
return live
except Exception:
pass
if normalized == "stepfun":
try:
from hermes_cli.auth import resolve_api_key_provider_credentials
creds = resolve_api_key_provider_credentials("stepfun")
api_key = str(creds.get("api_key") or "").strip()
base_url = str(creds.get("base_url") or "").strip()
if api_key and base_url:
live = fetch_api_models(api_key, base_url)
if live:
return live
except Exception:
pass
if normalized == "anthropic":
live = _fetch_anthropic_models()
if live:
@ -1771,7 +2098,7 @@ def probe_api_models(
candidates.append((alternate_base, True))
tried: list[str] = []
headers: dict[str, str] = {}
headers: dict[str, str] = {"User-Agent": _HERMES_USER_AGENT}
if api_key:
headers["Authorization"] = f"Bearer {api_key}"
if normalized.startswith(COPILOT_BASE_URL):
@ -2106,6 +2433,51 @@ def validate_requested_model(
),
}
# MiniMax providers don't expose a /models endpoint — validate against
# the static catalog instead, similar to openai-codex.
if normalized in ("minimax", "minimax-cn"):
try:
catalog_models = provider_model_ids(normalized)
except Exception:
catalog_models = []
if catalog_models:
# Case-insensitive lookup (catalog uses mixed case like MiniMax-M2.7)
catalog_lower = {m.lower(): m for m in catalog_models}
if requested_for_lookup.lower() in catalog_lower:
return {
"accepted": True,
"persist": True,
"recognized": True,
"message": None,
}
# Auto-correct close matches (case-insensitive)
catalog_lower_list = list(catalog_lower.keys())
auto = get_close_matches(requested_for_lookup.lower(), catalog_lower_list, n=1, cutoff=0.9)
if auto:
corrected = catalog_lower[auto[0]]
return {
"accepted": True,
"persist": True,
"recognized": True,
"corrected_model": corrected,
"message": f"Auto-corrected `{requested}` → `{corrected}`",
}
suggestions = get_close_matches(requested_for_lookup.lower(), catalog_lower_list, n=3, cutoff=0.5)
suggestion_text = ""
if suggestions:
suggestion_text = "\n Similar models: " + ", ".join(f"`{catalog_lower[s]}`" for s in suggestions)
return {
"accepted": True,
"persist": True,
"recognized": False,
"message": (
f"Note: `{requested}` was not found in the MiniMax catalog."
f"{suggestion_text}"
"\n MiniMax does not expose a /models endpoint, so Hermes cannot verify the model name."
"\n The model may still work if it exists on the server."
),
}
# Probe the live API to check if the model actually exists
api_models = fetch_api_models(api_key, base_url)
@ -2188,13 +2560,70 @@ def validate_requested_model(
except Exception:
pass # Fall through to generic warning
# Static-catalog fallback: when the /models probe was unreachable,
# validate against the curated list from provider_model_ids() — same
# pattern as the openai-codex and minimax branches above. This fixes
# /model switches in the gateway for providers like opencode-go and
# opencode-zen whose /models endpoint returns 404 against the HTML
# marketing site. Without this block, validate_requested_model would
# reject every model on such providers, switch_model() would return
# success=False, and the gateway would never write to
# _session_model_overrides.
provider_label = _PROVIDER_LABELS.get(normalized, normalized)
try:
catalog_models = provider_model_ids(normalized)
except Exception:
catalog_models = []
if catalog_models:
catalog_lower = {m.lower(): m for m in catalog_models}
if requested_for_lookup.lower() in catalog_lower:
return {
"accepted": True,
"persist": True,
"recognized": True,
"message": None,
}
catalog_lower_list = list(catalog_lower.keys())
auto = get_close_matches(
requested_for_lookup.lower(), catalog_lower_list, n=1, cutoff=0.9
)
if auto:
corrected = catalog_lower[auto[0]]
return {
"accepted": True,
"persist": True,
"recognized": True,
"corrected_model": corrected,
"message": f"Auto-corrected `{requested}` → `{corrected}`",
}
suggestions = get_close_matches(
requested_for_lookup.lower(), catalog_lower_list, n=3, cutoff=0.5
)
suggestion_text = ""
if suggestions:
suggestion_text = "\n Similar models: " + ", ".join(
f"`{catalog_lower[s]}`" for s in suggestions
)
return {
"accepted": True,
"persist": True,
"recognized": False,
"message": (
f"Note: `{requested}` was not found in the {provider_label} curated catalog "
f"and the /models endpoint was unreachable.{suggestion_text}"
f"\n The model may still work if it exists on the provider."
),
}
# No catalog available — accept with a warning, matching the comment's
# stated intent ("Accept and persist, but warn").
return {
"accepted": False,
"persist": False,
"accepted": True,
"persist": True,
"recognized": False,
"message": (
f"Could not reach the {provider_label} API to validate `{requested}`. "
f"Note: could not reach the {provider_label} API to validate `{requested}`. "
f"If the service isn't down, this model may not be valid."
),
}

View file

@ -10,6 +10,7 @@ from hermes_cli.auth import get_nous_auth_status
from hermes_cli.config import get_env_value, load_config
from tools.managed_tool_gateway import is_managed_tool_gateway_ready
from tools.tool_backend_helpers import (
fal_key_is_configured,
has_direct_modal_credentials,
managed_nous_tools_enabled,
normalize_browser_cloud_provider,
@ -271,7 +272,7 @@ def get_nous_subscription_features(
direct_firecrawl = bool(get_env_value("FIRECRAWL_API_KEY") or get_env_value("FIRECRAWL_API_URL"))
direct_parallel = bool(get_env_value("PARALLEL_API_KEY"))
direct_tavily = bool(get_env_value("TAVILY_API_KEY"))
direct_fal = bool(get_env_value("FAL_KEY"))
direct_fal = fal_key_is_configured()
direct_openai_tts = bool(resolve_openai_audio_api_key())
direct_elevenlabs = bool(get_env_value("ELEVENLABS_API_KEY"))
direct_camofox = bool(get_env_value("CAMOFOX_URL"))
@ -520,7 +521,7 @@ def apply_nous_managed_defaults(
browser_cfg["cloud_provider"] = "browser-use"
changed.add("browser")
if "image_gen" in selected_toolsets and not get_env_value("FAL_KEY"):
if "image_gen" in selected_toolsets and not fal_key_is_configured():
changed.add("image_gen")
return changed
@ -548,7 +549,7 @@ def _get_gateway_direct_credentials() -> Dict[str, bool]:
or get_env_value("TAVILY_API_KEY")
or get_env_value("EXA_API_KEY")
),
"image_gen": bool(get_env_value("FAL_KEY")),
"image_gen": fal_key_is_configured(),
"tts": bool(
resolve_openai_audio_api_key()
or get_env_value("ELEVENLABS_API_KEY")
@ -586,7 +587,6 @@ def get_gateway_eligible_tools(
return [], [], []
if config is None:
from hermes_cli.config import load_config
config = load_config() or {}
# Quick provider check without the heavy get_nous_subscription_features call

View file

@ -2,14 +2,20 @@
Hermes Plugin System
====================
Discovers, loads, and manages plugins from three sources:
Discovers, loads, and manages plugins from four sources:
1. **User plugins** ``~/.hermes/plugins/<name>/``
2. **Project plugins** ``./.hermes/plugins/<name>/`` (opt-in via
1. **Bundled plugins** ``<repo>/plugins/<name>/`` (shipped with hermes-agent;
``memory/`` and ``context_engine/`` subdirs are excluded they have their
own discovery paths)
2. **User plugins** ``~/.hermes/plugins/<name>/``
3. **Project plugins** ``./.hermes/plugins/<name>/`` (opt-in via
``HERMES_ENABLE_PROJECT_PLUGINS``)
3. **Pip plugins** packages that expose the ``hermes_agent.plugins``
4. **Pip plugins** packages that expose the ``hermes_agent.plugins``
entry-point group.
Later sources override earlier ones on name collision, so a user or project
plugin with the same name as a bundled plugin replaces it.
Each directory plugin must contain a ``plugin.yaml`` manifest **and** an
``__init__.py`` with a ``register(ctx)`` function.
@ -54,6 +60,8 @@ logger = logging.getLogger(__name__)
VALID_HOOKS: Set[str] = {
"pre_tool_call",
"post_tool_call",
"transform_terminal_output",
"transform_tool_result",
"pre_llm_call",
"post_llm_call",
"pre_api_request",
@ -62,6 +70,7 @@ VALID_HOOKS: Set[str] = {
"on_session_end",
"on_session_finalize",
"on_session_reset",
"subagent_stop",
}
ENTRY_POINTS_GROUP = "hermes_agent.plugins"
@ -75,7 +84,12 @@ def _env_enabled(name: str) -> bool:
def _get_disabled_plugins() -> set:
"""Read the disabled plugins list from config.yaml."""
"""Read the disabled plugins list from config.yaml.
Kept for backward compat and explicit deny-list semantics. A plugin
name in this set will never load, even if it appears in
``plugins.enabled``.
"""
try:
from hermes_cli.config import load_config
config = load_config()
@ -85,10 +99,43 @@ def _get_disabled_plugins() -> set:
return set()
def _get_enabled_plugins() -> Optional[set]:
"""Read the enabled-plugins allow-list from config.yaml.
Plugins are opt-in by default only plugins whose name appears in
this set are loaded. Returns:
* ``None`` the key is missing or malformed. Callers should treat
this as "nothing enabled yet" (the opt-in default); the first
``migrate_config`` run populates the key with a grandfathered set
of currently-installed user plugins so existing setups don't
break on upgrade.
* ``set()`` an empty list was explicitly set; nothing loads.
* ``set(...)`` the concrete allow-list.
"""
try:
from hermes_cli.config import load_config
config = load_config()
plugins_cfg = config.get("plugins")
if not isinstance(plugins_cfg, dict):
return None
if "enabled" not in plugins_cfg:
return None
enabled = plugins_cfg.get("enabled")
if not isinstance(enabled, list):
return None
return set(enabled)
except Exception:
return None
# ---------------------------------------------------------------------------
# Data classes
# ---------------------------------------------------------------------------
_VALID_PLUGIN_KINDS: Set[str] = {"standalone", "backend", "exclusive"}
@dataclass
class PluginManifest:
"""Parsed representation of a plugin.yaml manifest."""
@ -102,6 +149,23 @@ class PluginManifest:
provides_hooks: List[str] = field(default_factory=list)
source: str = "" # "user", "project", or "entrypoint"
path: Optional[str] = None
# Plugin kind — see plugins.py module docstring for semantics.
# ``standalone`` (default): hooks/tools of its own; opt-in via
# ``plugins.enabled``.
# ``backend``: pluggable backend for an existing core tool (e.g.
# image_gen). Built-in (bundled) backends auto-load;
# user-installed still gated by ``plugins.enabled``.
# ``exclusive``: category with exactly one active provider (memory).
# Selection via ``<category>.provider`` config key; the
# category's own discovery system handles loading and the
# general scanner skips these.
kind: str = "standalone"
# Registry key — path-derived, used by ``plugins.enabled``/``disabled``
# lookups and by ``hermes plugins list``. For a flat plugin at
# ``plugins/disk-cleanup/`` the key is ``disk-cleanup``; for a nested
# category plugin at ``plugins/image_gen/openai/`` the key is
# ``image_gen/openai``. When empty, falls back to ``name``.
key: str = ""
@dataclass
@ -322,6 +386,33 @@ class PluginContext:
self.manifest.name, engine.name,
)
# -- image gen provider registration ------------------------------------
def register_image_gen_provider(self, provider) -> None:
"""Register an image generation backend.
``provider`` must be an instance of
:class:`agent.image_gen_provider.ImageGenProvider`. The
``provider.name`` attribute is what ``image_gen.provider`` in
``config.yaml`` matches against when routing ``image_generate``
tool calls.
"""
from agent.image_gen_provider import ImageGenProvider
from agent.image_gen_registry import register_provider
if not isinstance(provider, ImageGenProvider):
logger.warning(
"Plugin '%s' tried to register an image_gen provider that does "
"not inherit from ImageGenProvider. Ignoring.",
self.manifest.name,
)
return
register_provider(provider)
logger.info(
"Plugin '%s' registered image_gen provider: %s",
self.manifest.name, provider.name,
)
# -- hook registration --------------------------------------------------
def register_hook(self, hook_name: str, callback: Callable) -> None:
@ -420,26 +511,103 @@ class PluginManager:
manifests: List[PluginManifest] = []
# 1. User plugins (~/.hermes/plugins/)
# 1. Bundled plugins (<repo>/plugins/<name>/)
#
# Repo-shipped plugins live next to hermes_cli/. Two layouts are
# supported (see ``_scan_directory`` for details):
#
# - flat: ``plugins/disk-cleanup/plugin.yaml`` (standalone)
# - category: ``plugins/image_gen/openai/plugin.yaml`` (backend)
#
# ``memory/`` and ``context_engine/`` are skipped at the top level —
# they have their own discovery systems. Porting those to the
# category-namespace ``kind: exclusive`` model is a future PR.
repo_plugins = Path(__file__).resolve().parent.parent / "plugins"
manifests.extend(
self._scan_directory(
repo_plugins,
source="bundled",
skip_names={"memory", "context_engine"},
)
)
# 2. User plugins (~/.hermes/plugins/)
user_dir = get_hermes_home() / "plugins"
manifests.extend(self._scan_directory(user_dir, source="user"))
# 2. Project plugins (./.hermes/plugins/)
# 3. Project plugins (./.hermes/plugins/)
if _env_enabled("HERMES_ENABLE_PROJECT_PLUGINS"):
project_dir = Path.cwd() / ".hermes" / "plugins"
manifests.extend(self._scan_directory(project_dir, source="project"))
# 3. Pip / entry-point plugins
# 4. Pip / entry-point plugins
manifests.extend(self._scan_entry_points())
# Load each manifest (skip user-disabled plugins)
# Load each manifest (skip user-disabled plugins).
# Later sources override earlier ones on key collision — user
# plugins take precedence over bundled, project plugins take
# precedence over user. Dedup here so we only load the final
# winner. Keys are path-derived (``image_gen/openai``,
# ``disk-cleanup``) so ``tts/openai`` and ``image_gen/openai``
# don't collide even when both manifests say ``name: openai``.
disabled = _get_disabled_plugins()
enabled = _get_enabled_plugins() # None = opt-in default (nothing enabled)
winners: Dict[str, PluginManifest] = {}
for manifest in manifests:
if manifest.name in disabled:
winners[manifest.key or manifest.name] = manifest
for manifest in winners.values():
lookup_key = manifest.key or manifest.name
# Explicit disable always wins (matches on key or on legacy
# bare name for back-compat with existing user configs).
if lookup_key in disabled or manifest.name in disabled:
loaded = LoadedPlugin(manifest=manifest, enabled=False)
loaded.error = "disabled via config"
self._plugins[manifest.name] = loaded
logger.debug("Skipping disabled plugin '%s'", manifest.name)
self._plugins[lookup_key] = loaded
logger.debug("Skipping disabled plugin '%s'", lookup_key)
continue
# Exclusive plugins (memory providers) have their own
# discovery/activation path. The general loader records the
# manifest for introspection but does not load the module.
if manifest.kind == "exclusive":
loaded = LoadedPlugin(manifest=manifest, enabled=False)
loaded.error = (
"exclusive plugin — activate via <category>.provider config"
)
self._plugins[lookup_key] = loaded
logger.debug(
"Skipping '%s' (exclusive, handled by category discovery)",
lookup_key,
)
continue
# Built-in backends auto-load — they ship with hermes and must
# just work. Selection among them (e.g. which image_gen backend
# services calls) is driven by ``<category>.provider`` config,
# enforced by the tool wrapper.
if manifest.kind == "backend" and manifest.source == "bundled":
self._load_plugin(manifest)
continue
# Everything else (standalone, user-installed backends,
# entry-point plugins) is opt-in via plugins.enabled.
# Accept both the path-derived key and the legacy bare name
# so existing configs keep working.
is_enabled = (
enabled is not None
and (lookup_key in enabled or manifest.name in enabled)
)
if not is_enabled:
loaded = LoadedPlugin(manifest=manifest, enabled=False)
loaded.error = (
"not enabled in config (run `hermes plugins enable {}` to activate)"
.format(lookup_key)
)
self._plugins[lookup_key] = loaded
logger.debug(
"Skipping '%s' (not in plugins.enabled)", lookup_key
)
continue
self._load_plugin(manifest)
@ -454,8 +622,46 @@ class PluginManager:
# Directory scanning
# -----------------------------------------------------------------------
def _scan_directory(self, path: Path, source: str) -> List[PluginManifest]:
"""Read ``plugin.yaml`` manifests from subdirectories of *path*."""
def _scan_directory(
self,
path: Path,
source: str,
skip_names: Optional[Set[str]] = None,
) -> List[PluginManifest]:
"""Read ``plugin.yaml`` manifests from subdirectories of *path*.
Supports two layouts, mixed freely:
* **Flat** ``<root>/<plugin-name>/plugin.yaml``. Key is
``<plugin-name>`` (e.g. ``disk-cleanup``).
* **Category** ``<root>/<category>/<plugin-name>/plugin.yaml``,
where the ``<category>`` directory itself has no ``plugin.yaml``.
Key is ``<category>/<plugin-name>`` (e.g. ``image_gen/openai``).
Depth is capped at two segments.
*skip_names* is an optional allow-list of names to ignore at the
top level (kept for back-compat; the current call sites no longer
pass it now that categories are first-class).
"""
return self._scan_directory_level(
path, source, skip_names=skip_names, prefix="", depth=0
)
def _scan_directory_level(
self,
path: Path,
source: str,
*,
skip_names: Optional[Set[str]],
prefix: str,
depth: int,
) -> List[PluginManifest]:
"""Recursive implementation of :meth:`_scan_directory`.
``prefix`` is the category path already accumulated ("" at root,
"image_gen" one level in). ``depth`` is the recursion depth; we
cap at 2 so ``<root>/a/b/c/`` is ignored.
"""
manifests: List[PluginManifest] = []
if not path.is_dir():
return manifests
@ -463,35 +669,112 @@ class PluginManager:
for child in sorted(path.iterdir()):
if not child.is_dir():
continue
if depth == 0 and skip_names and child.name in skip_names:
continue
manifest_file = child / "plugin.yaml"
if not manifest_file.exists():
manifest_file = child / "plugin.yml"
if not manifest_file.exists():
logger.debug("Skipping %s (no plugin.yaml)", child)
if manifest_file.exists():
manifest = self._parse_manifest(
manifest_file, child, source, prefix
)
if manifest is not None:
manifests.append(manifest)
continue
try:
if yaml is None:
logger.warning("PyYAML not installed cannot load %s", manifest_file)
continue
data = yaml.safe_load(manifest_file.read_text()) or {}
manifest = PluginManifest(
name=data.get("name", child.name),
version=str(data.get("version", "")),
description=data.get("description", ""),
author=data.get("author", ""),
requires_env=data.get("requires_env", []),
provides_tools=data.get("provides_tools", []),
provides_hooks=data.get("provides_hooks", []),
source=source,
path=str(child),
# No manifest at this level. If we're still within the depth
# cap, treat this directory as a category namespace and recurse
# one level in looking for children with manifests.
if depth >= 1:
logger.debug("Skipping %s (no plugin.yaml, depth cap reached)", child)
continue
sub_prefix = f"{prefix}/{child.name}" if prefix else child.name
manifests.extend(
self._scan_directory_level(
child,
source,
skip_names=None,
prefix=sub_prefix,
depth=depth + 1,
)
manifests.append(manifest)
except Exception as exc:
logger.warning("Failed to parse %s: %s", manifest_file, exc)
)
return manifests
def _parse_manifest(
self,
manifest_file: Path,
plugin_dir: Path,
source: str,
prefix: str,
) -> Optional[PluginManifest]:
"""Parse a single ``plugin.yaml`` into a :class:`PluginManifest`.
Returns ``None`` on parse failure (logs a warning).
"""
try:
if yaml is None:
logger.warning("PyYAML not installed cannot load %s", manifest_file)
return None
data = yaml.safe_load(manifest_file.read_text()) or {}
name = data.get("name", plugin_dir.name)
key = f"{prefix}/{plugin_dir.name}" if prefix else name
raw_kind = data.get("kind", "standalone")
if not isinstance(raw_kind, str):
raw_kind = "standalone"
kind = raw_kind.strip().lower()
if kind not in _VALID_PLUGIN_KINDS:
logger.warning(
"Plugin %s: unknown kind '%s' (valid: %s); treating as 'standalone'",
key, raw_kind, ", ".join(sorted(_VALID_PLUGIN_KINDS)),
)
kind = "standalone"
# Auto-coerce user-installed memory providers to kind="exclusive"
# so they're routed to plugins/memory discovery instead of being
# loaded by the general PluginManager (which has no
# register_memory_provider on PluginContext). Mirrors the
# heuristic in plugins/memory/__init__.py:_is_memory_provider_dir.
# Bundled memory providers are already skipped via skip_names.
if kind == "standalone" and "kind" not in data:
init_file = plugin_dir / "__init__.py"
if init_file.exists():
try:
source_text = init_file.read_text(errors="replace")[:8192]
if (
"register_memory_provider" in source_text
or "MemoryProvider" in source_text
):
kind = "exclusive"
logger.debug(
"Plugin %s: detected memory provider, "
"treating as kind='exclusive'",
key,
)
except Exception:
pass
return PluginManifest(
name=name,
version=str(data.get("version", "")),
description=data.get("description", ""),
author=data.get("author", ""),
requires_env=data.get("requires_env", []),
provides_tools=data.get("provides_tools", []),
provides_hooks=data.get("provides_hooks", []),
source=source,
path=str(plugin_dir),
kind=kind,
key=key,
)
except Exception as exc:
logger.warning("Failed to parse %s: %s", manifest_file, exc)
return None
# -----------------------------------------------------------------------
# Entry-point scanning
# -----------------------------------------------------------------------
@ -514,6 +797,7 @@ class PluginManager:
name=ep.name,
source="entrypoint",
path=ep.value,
key=ep.name,
)
manifests.append(manifest)
except Exception as exc:
@ -530,7 +814,7 @@ class PluginManager:
loaded = LoadedPlugin(manifest=manifest)
try:
if manifest.source in ("user", "project"):
if manifest.source in ("user", "project", "bundled"):
module = self._load_directory_module(manifest)
else:
module = self._load_entrypoint_module(manifest)
@ -575,10 +859,16 @@ class PluginManager:
loaded.error = str(exc)
logger.warning("Failed to load plugin '%s': %s", manifest.name, exc)
self._plugins[manifest.name] = loaded
self._plugins[manifest.key or manifest.name] = loaded
def _load_directory_module(self, manifest: PluginManifest) -> types.ModuleType:
"""Import a directory-based plugin as ``hermes_plugins.<name>``."""
"""Import a directory-based plugin as ``hermes_plugins.<slug>``.
The module slug is derived from ``manifest.key`` so category-namespaced
plugins (``image_gen/openai``) import as
``hermes_plugins.image_gen__openai`` without colliding with any
future ``tts/openai``.
"""
plugin_dir = Path(manifest.path) # type: ignore[arg-type]
init_file = plugin_dir / "__init__.py"
if not init_file.exists():
@ -591,7 +881,9 @@ class PluginManager:
ns_pkg.__package__ = _NS_PARENT
sys.modules[_NS_PARENT] = ns_pkg
module_name = f"{_NS_PARENT}.{manifest.name.replace('-', '_')}"
key = manifest.key or manifest.name
slug = key.replace("/", "__").replace("-", "_")
module_name = f"{_NS_PARENT}.{slug}"
spec = importlib.util.spec_from_file_location(
module_name,
init_file,
@ -672,10 +964,12 @@ class PluginManager:
def list_plugins(self) -> List[Dict[str, Any]]:
"""Return a list of info dicts for all discovered plugins."""
result: List[Dict[str, Any]] = []
for name, loaded in sorted(self._plugins.items()):
for key, loaded in sorted(self._plugins.items()):
result.append(
{
"name": name,
"name": loaded.manifest.name,
"key": loaded.manifest.key or loaded.manifest.name,
"kind": loaded.manifest.kind,
"version": loaded.manifest.version,
"description": loaded.manifest.description,
"source": loaded.manifest.source,
@ -779,23 +1073,31 @@ def get_pre_tool_call_block_message(
return None
def _ensure_plugins_discovered() -> PluginManager:
"""Return the global manager after running idempotent plugin discovery."""
manager = get_plugin_manager()
manager.discover_and_load()
return manager
def get_plugin_context_engine():
"""Return the plugin-registered context engine, or None."""
return get_plugin_manager()._context_engine
return _ensure_plugins_discovered()._context_engine
def get_plugin_command_handler(name: str) -> Optional[Callable]:
"""Return the handler for a plugin-registered slash command, or ``None``."""
entry = get_plugin_manager()._plugin_commands.get(name)
entry = _ensure_plugins_discovered()._plugin_commands.get(name)
return entry["handler"] if entry else None
def get_plugin_commands() -> Dict[str, dict]:
"""Return the full plugin commands dict (name → {handler, description, plugin}).
Safe to call before discovery returns an empty dict if no plugins loaded.
Triggers idempotent plugin discovery so callers can use plugin commands
before any explicit discover_plugins() call.
"""
return get_plugin_manager()._plugin_commands
return _ensure_plugins_discovered()._plugin_commands
def get_plugin_toolsets() -> List[tuple]:

View file

@ -15,6 +15,7 @@ import shutil
import subprocess
import sys
from pathlib import Path
from typing import Optional
from hermes_constants import get_hermes_home
@ -281,8 +282,16 @@ def _require_installed_plugin(name: str, plugins_dir: Path, console) -> Path:
# ---------------------------------------------------------------------------
def cmd_install(identifier: str, force: bool = False) -> None:
"""Install a plugin from a Git URL or owner/repo shorthand."""
def cmd_install(
identifier: str,
force: bool = False,
enable: Optional[bool] = None,
) -> None:
"""Install a plugin from a Git URL or owner/repo shorthand.
After install, prompt "Enable now? [y/N]" unless *enable* is provided
(True = auto-enable without prompting, False = install disabled).
"""
import tempfile
from rich.console import Console
@ -391,6 +400,40 @@ def cmd_install(identifier: str, force: bool = False) -> None:
_display_after_install(target, identifier)
# Determine the canonical plugin name for enable-list bookkeeping.
installed_name = installed_manifest.get("name") or target.name
# Decide whether to enable: explicit flag > interactive prompt > default off
should_enable = enable
if should_enable is None:
# Interactive prompt unless stdin isn't a TTY (scripted install).
if sys.stdin.isatty() and sys.stdout.isatty():
try:
answer = input(
f" Enable '{installed_name}' now? [y/N]: "
).strip().lower()
should_enable = answer in ("y", "yes")
except (EOFError, KeyboardInterrupt):
should_enable = False
else:
should_enable = False
if should_enable:
enabled = _get_enabled_set()
disabled = _get_disabled_set()
enabled.add(installed_name)
disabled.discard(installed_name)
_save_enabled_set(enabled)
_save_disabled_set(disabled)
console.print(
f"[green]✓[/green] Plugin [bold]{installed_name}[/bold] enabled."
)
else:
console.print(
f"[dim]Plugin installed but not enabled. "
f"Run `hermes plugins enable {installed_name}` to activate.[/dim]"
)
console.print("[dim]Restart the gateway for the plugin to take effect:[/dim]")
console.print("[dim] hermes gateway restart[/dim]")
console.print()
@ -468,7 +511,11 @@ def cmd_remove(name: str) -> None:
def _get_disabled_set() -> set:
"""Read the disabled plugins set from config.yaml."""
"""Read the disabled plugins set from config.yaml.
An explicit deny-list. A plugin name here never loads, even if also
listed in ``plugins.enabled``.
"""
try:
from hermes_cli.config import load_config
config = load_config()
@ -488,103 +535,196 @@ def _save_disabled_set(disabled: set) -> None:
save_config(config)
def _get_enabled_set() -> set:
"""Read the enabled plugins allow-list from config.yaml.
Plugins are opt-in: only names here are loaded. Returns ``set()`` if
the key is missing (same behaviour as "nothing enabled yet").
"""
try:
from hermes_cli.config import load_config
config = load_config()
plugins_cfg = config.get("plugins", {})
if not isinstance(plugins_cfg, dict):
return set()
enabled = plugins_cfg.get("enabled", [])
return set(enabled) if isinstance(enabled, list) else set()
except Exception:
return set()
def _save_enabled_set(enabled: set) -> None:
"""Write the enabled plugins list to config.yaml."""
from hermes_cli.config import load_config, save_config
config = load_config()
if "plugins" not in config:
config["plugins"] = {}
config["plugins"]["enabled"] = sorted(enabled)
save_config(config)
def cmd_enable(name: str) -> None:
"""Enable a previously disabled plugin."""
"""Add a plugin to the enabled allow-list (and remove it from disabled)."""
from rich.console import Console
console = Console()
plugins_dir = _plugins_dir()
# Verify the plugin exists
target = plugins_dir / name
if not target.is_dir():
console.print(f"[red]Plugin '{name}' is not installed.[/red]")
# Discover the plugin — check installed (user) AND bundled.
if not _plugin_exists(name):
console.print(f"[red]Plugin '{name}' is not installed or bundled.[/red]")
sys.exit(1)
enabled = _get_enabled_set()
disabled = _get_disabled_set()
if name not in disabled:
if name in enabled and name not in disabled:
console.print(f"[dim]Plugin '{name}' is already enabled.[/dim]")
return
enabled.add(name)
disabled.discard(name)
_save_enabled_set(enabled)
_save_disabled_set(disabled)
console.print(f"[green]✓[/green] Plugin [bold]{name}[/bold] enabled. Takes effect on next session.")
console.print(
f"[green]✓[/green] Plugin [bold]{name}[/bold] enabled. "
"Takes effect on next session."
)
def cmd_disable(name: str) -> None:
"""Disable a plugin without removing it."""
"""Remove a plugin from the enabled allow-list (and add to disabled)."""
from rich.console import Console
console = Console()
plugins_dir = _plugins_dir()
# Verify the plugin exists
target = plugins_dir / name
if not target.is_dir():
console.print(f"[red]Plugin '{name}' is not installed.[/red]")
if not _plugin_exists(name):
console.print(f"[red]Plugin '{name}' is not installed or bundled.[/red]")
sys.exit(1)
enabled = _get_enabled_set()
disabled = _get_disabled_set()
if name in disabled:
if name not in enabled and name in disabled:
console.print(f"[dim]Plugin '{name}' is already disabled.[/dim]")
return
enabled.discard(name)
disabled.add(name)
_save_enabled_set(enabled)
_save_disabled_set(disabled)
console.print(f"[yellow]\u2298[/yellow] Plugin [bold]{name}[/bold] disabled. Takes effect on next session.")
console.print(
f"[yellow]\u2298[/yellow] Plugin [bold]{name}[/bold] disabled. "
"Takes effect on next session."
)
def cmd_list() -> None:
"""List installed plugins."""
from rich.console import Console
from rich.table import Table
def _plugin_exists(name: str) -> bool:
"""Return True if a plugin with *name* is installed (user) or bundled."""
# Installed: directory name or manifest name match in user plugins dir
user_dir = _plugins_dir()
if user_dir.is_dir():
if (user_dir / name).is_dir():
return True
for child in user_dir.iterdir():
if not child.is_dir():
continue
manifest = _read_manifest(child)
if manifest.get("name") == name:
return True
# Bundled: <repo>/plugins/<name>/
from pathlib import Path as _P
import hermes_cli
repo_plugins = _P(hermes_cli.__file__).resolve().parent.parent / "plugins"
if repo_plugins.is_dir():
candidate = repo_plugins / name
if candidate.is_dir() and (
(candidate / "plugin.yaml").exists()
or (candidate / "plugin.yml").exists()
):
return True
return False
def _discover_all_plugins() -> list:
"""Return a list of (name, version, description, source, dir_path) for
every plugin the loader can see user + bundled + project.
Matches the ordering/dedup of ``PluginManager.discover_and_load``:
bundled first, then user, then project; user overrides bundled on
name collision.
"""
try:
import yaml
except ImportError:
yaml = None
console = Console()
plugins_dir = _plugins_dir()
seen: dict = {} # name -> (name, version, description, source, path)
dirs = sorted(d for d in plugins_dir.iterdir() if d.is_dir())
if not dirs:
# Bundled (<repo>/plugins/<name>/), excluding memory/ and context_engine/
import hermes_cli
repo_plugins = Path(hermes_cli.__file__).resolve().parent.parent / "plugins"
for base, source in ((repo_plugins, "bundled"), (_plugins_dir(), "user")):
if not base.is_dir():
continue
for d in sorted(base.iterdir()):
if not d.is_dir():
continue
if source == "bundled" and d.name in ("memory", "context_engine"):
continue
manifest_file = d / "plugin.yaml"
if not manifest_file.exists():
manifest_file = d / "plugin.yml"
if not manifest_file.exists():
continue
name = d.name
version = ""
description = ""
if yaml:
try:
with open(manifest_file) as f:
manifest = yaml.safe_load(f) or {}
name = manifest.get("name", d.name)
version = manifest.get("version", "")
description = manifest.get("description", "")
except Exception:
pass
# User plugins override bundled on name collision.
if name in seen and source == "bundled":
continue
src_label = source
if source == "user" and (d / ".git").exists():
src_label = "git"
seen[name] = (name, version, description, src_label, d)
return list(seen.values())
def cmd_list() -> None:
"""List all plugins (bundled + user) with enabled/disabled state."""
from rich.console import Console
from rich.table import Table
console = Console()
entries = _discover_all_plugins()
if not entries:
console.print("[dim]No plugins installed.[/dim]")
console.print("[dim]Install with:[/dim] hermes plugins install owner/repo")
return
enabled = _get_enabled_set()
disabled = _get_disabled_set()
table = Table(title="Installed Plugins", show_lines=False)
table = Table(title="Plugins", show_lines=False)
table.add_column("Name", style="bold")
table.add_column("Status")
table.add_column("Version", style="dim")
table.add_column("Description")
table.add_column("Source", style="dim")
for d in dirs:
manifest_file = d / "plugin.yaml"
name = d.name
version = ""
description = ""
source = "local"
if manifest_file.exists() and yaml:
try:
with open(manifest_file) as f:
manifest = yaml.safe_load(f) or {}
name = manifest.get("name", d.name)
version = manifest.get("version", "")
description = manifest.get("description", "")
except Exception:
pass
# Check if it's a git repo (installed via hermes plugins install)
if (d / ".git").exists():
source = "git"
is_disabled = name in disabled or d.name in disabled
status = "[red]disabled[/red]" if is_disabled else "[green]enabled[/green]"
for name, version, description, source, _dir in entries:
if name in disabled:
status = "[red]disabled[/red]"
elif name in enabled:
status = "[green]enabled[/green]"
else:
status = "[yellow]not enabled[/yellow]"
table.add_row(name, status, str(version), description, source)
console.print()
@ -592,6 +732,7 @@ def cmd_list() -> None:
console.print()
console.print("[dim]Interactive toggle:[/dim] hermes plugins")
console.print("[dim]Enable/disable:[/dim] hermes plugins enable/disable <name>")
console.print("[dim]Plugins are opt-in by default — only 'enabled' plugins load.[/dim]")
# ---------------------------------------------------------------------------
@ -742,41 +883,25 @@ def cmd_toggle() -> None:
"""Interactive composite UI — general plugins + provider plugin categories."""
from rich.console import Console
try:
import yaml
except ImportError:
yaml = None
console = Console()
plugins_dir = _plugins_dir()
# -- General plugins discovery --
dirs = sorted(d for d in plugins_dir.iterdir() if d.is_dir())
disabled = _get_disabled_set()
# -- General plugins discovery (bundled + user) --
entries = _discover_all_plugins()
enabled_set = _get_enabled_set()
disabled_set = _get_disabled_set()
plugin_names = []
plugin_labels = []
plugin_selected = set()
for i, d in enumerate(dirs):
manifest_file = d / "plugin.yaml"
name = d.name
description = ""
if manifest_file.exists() and yaml:
try:
with open(manifest_file) as f:
manifest = yaml.safe_load(f) or {}
name = manifest.get("name", d.name)
description = manifest.get("description", "")
except Exception:
pass
plugin_names.append(name)
for i, (name, _version, description, source, _d) in enumerate(entries):
label = f"{name} \u2014 {description}" if description else name
if source == "bundled":
label = f"{label} [bundled]"
plugin_names.append(name)
plugin_labels.append(label)
if name not in disabled and d.name not in disabled:
# Selected (enabled) when in enabled-set AND not in disabled-set
if name in enabled_set and name not in disabled_set:
plugin_selected.add(i)
# -- Provider categories --
@ -804,10 +929,10 @@ def cmd_toggle() -> None:
try:
import curses
_run_composite_ui(curses, plugin_names, plugin_labels, plugin_selected,
disabled, categories, console)
disabled_set, categories, console)
except ImportError:
_run_composite_fallback(plugin_names, plugin_labels, plugin_selected,
disabled, categories, console)
disabled_set, categories, console)
def _run_composite_ui(curses, plugin_names, plugin_labels, plugin_selected,
@ -1020,18 +1145,29 @@ def _run_composite_ui(curses, plugin_names, plugin_labels, plugin_selected,
curses.wrapper(_draw)
flush_stdin()
# Persist general plugin changes
new_disabled = set()
# Persist general plugin changes. The new allow-list is the set of
# plugin names that were checked; anything not checked is explicitly
# disabled (written to disabled-list) so it remains off even if the
# plugin code does something clever like auto-enable in the future.
new_enabled: set = set()
new_disabled: set = set(disabled) # preserve existing disabled state for unseen plugins
for i, name in enumerate(plugin_names):
if i not in chosen:
if i in chosen:
new_enabled.add(name)
new_disabled.discard(name)
else:
new_disabled.add(name)
if new_disabled != disabled:
prev_enabled = _get_enabled_set()
enabled_changed = new_enabled != prev_enabled
disabled_changed = new_disabled != disabled
if enabled_changed or disabled_changed:
_save_enabled_set(new_enabled)
_save_disabled_set(new_disabled)
enabled_count = len(plugin_names) - len(new_disabled)
console.print(
f"\n[green]\u2713[/green] General plugins: {enabled_count} enabled, "
f"{len(new_disabled)} disabled."
f"\n[green]\u2713[/green] General plugins: {len(new_enabled)} enabled, "
f"{len(plugin_names) - len(new_enabled)} disabled."
)
elif n_plugins > 0:
console.print("\n[dim]General plugins unchanged.[/dim]")
@ -1078,11 +1214,17 @@ def _run_composite_fallback(plugin_names, plugin_labels, plugin_selected,
return
print()
new_disabled = set()
new_enabled: set = set()
new_disabled: set = set(disabled)
for i, name in enumerate(plugin_names):
if i not in chosen:
if i in chosen:
new_enabled.add(name)
new_disabled.discard(name)
else:
new_disabled.add(name)
if new_disabled != disabled:
prev_enabled = _get_enabled_set()
if new_enabled != prev_enabled or new_disabled != disabled:
_save_enabled_set(new_enabled)
_save_disabled_set(new_disabled)
# Provider categories
@ -1108,7 +1250,17 @@ def plugins_command(args) -> None:
action = getattr(args, "plugins_action", None)
if action == "install":
cmd_install(args.identifier, force=getattr(args, "force", False))
# Map argparse tri-state: --enable=True, --no-enable=False, neither=None (prompt)
enable_arg = None
if getattr(args, "enable", False):
enable_arg = True
elif getattr(args, "no_enable", False):
enable_arg = False
cmd_install(
args.identifier,
force=getattr(args, "force", False),
enable=enable_arg,
)
elif action == "update":
cmd_update(args.name)
elif action in ("remove", "rm", "uninstall"):

View file

@ -23,6 +23,8 @@ import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from utils import base_url_host_matches, base_url_hostname
logger = logging.getLogger(__name__)
@ -92,6 +94,12 @@ HERMES_OVERLAYS: Dict[str, HermesOverlay] = {
transport="openai_chat",
base_url_env_var="KIMI_BASE_URL",
),
"stepfun": HermesOverlay(
transport="openai_chat",
extra_env_vars=("STEPFUN_API_KEY",),
base_url_override="https://api.stepfun.ai/step_plan/v1",
base_url_env_var="STEPFUN_BASE_URL",
),
"minimax": HermesOverlay(
transport="anthropic_messages",
base_url_env_var="MINIMAX_BASE_URL",
@ -208,6 +216,10 @@ ALIASES: Dict[str, str] = {
"kimi-coding-cn": "kimi-for-coding",
"moonshot": "kimi-for-coding",
# stepfun
"step": "stepfun",
"stepfun-coding-plan": "stepfun",
# minimax-cn
"minimax-china": "minimax-cn",
"minimax_cn": "minimax-cn",
@ -292,6 +304,7 @@ _LABEL_OVERRIDES: Dict[str, str] = {
"nous": "Nous Portal",
"openai-codex": "OpenAI Codex",
"copilot-acp": "GitHub Copilot ACP",
"stepfun": "StepFun Step Plan",
"xiaomi": "Xiaomi MiMo",
"local": "Local endpoint",
"bedrock": "AWS Bedrock",
@ -322,12 +335,16 @@ def normalize_provider(name: str) -> str:
def get_provider(name: str) -> Optional[ProviderDef]:
"""Look up a provider by id or alias, merging all data sources.
"""Look up a built-in provider by id or alias.
Resolution order:
1. Hermes overlays (for providers not in models.dev: nous, openai-codex, etc.)
2. models.dev catalog + Hermes overlay
3. User-defined providers from config (TODO: Phase 4)
User-defined providers from config.yaml (``providers:`` / ``custom_providers:``)
are resolved by :func:`resolve_provider_full`, which layers ``resolve_user_provider``
and ``resolve_custom_provider`` on top of this function. Callers that need
user-config support should use ``resolve_provider_full`` instead.
Returns a fully-resolved ProviderDef or None.
"""
@ -421,6 +438,16 @@ def determine_api_mode(provider: str, base_url: str = "") -> str:
"""
pdef = get_provider(provider)
if pdef is not None:
# Even for known providers, check URL heuristics for special endpoints
# (e.g. kimi /coding endpoint needs anthropic_messages even on 'custom')
if base_url:
url_lower = base_url.rstrip("/").lower()
if "api.kimi.com/coding" in url_lower:
return "anthropic_messages"
if url_lower.endswith("/anthropic") or "api.anthropic.com" in url_lower:
return "anthropic_messages"
if "api.openai.com" in url_lower:
return "codex_responses"
return TRANSPORT_TO_API_MODE.get(pdef.transport, "chat_completions")
# Direct provider checks for providers not in HERMES_OVERLAYS
@ -430,11 +457,14 @@ def determine_api_mode(provider: str, base_url: str = "") -> str:
# URL-based heuristics for custom / unknown providers
if base_url:
url_lower = base_url.rstrip("/").lower()
if url_lower.endswith("/anthropic") or "api.anthropic.com" in url_lower:
hostname = base_url_hostname(base_url)
if url_lower.endswith("/anthropic") or hostname == "api.anthropic.com":
return "anthropic_messages"
if "api.openai.com" in url_lower:
if hostname == "api.kimi.com" and "/coding" in url_lower:
return "anthropic_messages"
if hostname == "api.openai.com":
return "codex_responses"
if "bedrock-runtime" in url_lower and "amazonaws.com" in url_lower:
if hostname.startswith("bedrock-runtime.") and base_url_host_matches(base_url, "amazonaws.com"):
return "bedrock_converse"
return "chat_completions"

Some files were not shown because too many files have changed in this diff Show more