mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-30 01:41:43 +00:00
feat: add streaming LLM response support across all platforms
Cherry-picked from PR #828, resolved conflicts with main.
This commit is contained in:
parent
b2a4092783
commit
95d221c31c
6 changed files with 696 additions and 22 deletions
|
|
@ -219,6 +219,22 @@ compression:
|
|||
# Options: "auto", "openrouter", "nous", "main"
|
||||
# summary_provider: "auto"
|
||||
|
||||
# =============================================================================
|
||||
# Streaming (live token-by-token response display)
|
||||
# =============================================================================
|
||||
# When enabled, LLM responses stream token-by-token instead of appearing
|
||||
# all at once. Supported on Telegram, Discord, Slack (via message editing)
|
||||
# and the API server (via SSE). Disabled by default.
|
||||
#
|
||||
# streaming:
|
||||
# enabled: false # Master switch (default: off)
|
||||
# # Per-platform overrides:
|
||||
# # telegram: true
|
||||
# # discord: true
|
||||
# # api_server: true
|
||||
# # edit_interval: 1.5 # Seconds between message edits (default: 1.5)
|
||||
# # min_tokens: 20 # Tokens before first display (default: 20)
|
||||
|
||||
# =============================================================================
|
||||
# Auxiliary Models (Advanced — Experimental)
|
||||
# =============================================================================
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue