-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathconfig.example.yaml
More file actions
222 lines (205 loc) · 6.11 KB
/
config.example.yaml
File metadata and controls
222 lines (205 loc) · 6.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
# Example configuration for the AI Chats bridge.
homeserver:
address: https://matrix-client.example.com
domain: example.com
verify_ssl: true
async_media: true
appservice:
address: http://localhost:29345
hostname: 0.0.0.0
port: 29345
database: aichats.db
id: openai-gpt
bot:
username: gptbridge
displayname: "ChatGPT Bridge"
# avatar: mxc://example.com/abcdef
as_token: "GENERATE-A-TOKEN"
hs_token: "GENERATE-A-TOKEN"
logging:
level: info
format: json
database:
type: sqlite3-fk-wal
uri: file:aichats.db?_txlock=immediate
max_open_conns: 1
max_idle_conns: 1
metrics:
enabled: true
listen: 0.0.0.0:9000
encryption:
allow: true
default: false
delete_keys:
ratchet_on_decrypt: false
# AI Chats-specific options (shared with the embedded example in bridges/ai/integrations_config.go)
network:
# Connector-specific configuration lives under the `network:` section of the
# main config file.
beeper:
user_mxid: ""
base_url: ""
token: ""
models:
providers:
openai:
api_key: ""
base_url: "https://api.openai.com/v1"
models: []
openrouter:
api_key: ""
base_url: "https://openrouter.ai/api/v1"
models: []
magic_proxy:
api_key: ""
base_url: ""
models: []
default_system_prompt: |
You are a helpful, concise assistant.
Ask clarifying questions when needed.
Follow the user's intent and be accurate.
model_cache_duration: 6h
messages:
direct_chat:
history_limit: 20
group_chat:
history_limit: 50
queue:
mode: "collect"
debounce_ms: 1000
cap: 20
drop: "summarize"
commands:
owner_allow_from: []
tool_approvals:
enabled: true
ttl_seconds: 600
require_for_mcp: true
require_for_tools: ["message", "cron", "gravatar_set", "create_agent", "fork_agent", "edit_agent", "delete_agent", "modify_room", "sessions_send", "sessions_spawn", "run_internal_command"]
channels:
matrix:
reply_to_mode: "first"
session:
scope: "per-sender"
main_key: "main"
tools:
web:
search:
provider: "exa"
fallbacks: []
exa:
api_key: ""
base_url: "https://api.exa.ai"
type: "auto"
num_results: 5
include_text: false
text_max_chars: 500
highlights: true
fetch:
provider: "direct"
fallbacks: ["exa"]
exa:
api_key: ""
base_url: "https://api.exa.ai"
include_text: true
text_max_chars: 5000
direct:
enabled: true
timeout_seconds: 30
max_chars: 50000
max_redirects: 3
links:
enabled: true
max_urls_inbound: 3
max_urls_outbound: 5
fetch_timeout: 10s
max_content_chars: 500
max_page_bytes: 10485760
max_image_bytes: 5242880
cache_ttl: 1h
mcp:
enable_stdio: false
vfs:
apply_patch:
enabled: false
allow_models: []
media:
concurrency: 2
image:
enabled: true
prompt: "Describe the image."
max_bytes: 10485760
max_chars: 500
timeout_seconds: 60
models:
- provider: "openrouter"
model: "google/gemini-3-flash-preview"
audio:
enabled: true
prompt: "Transcribe the audio."
language: ""
max_bytes: 20971520
timeout_seconds: 60
models:
- provider: "openai"
model: "gpt-4o-mini-transcribe"
video:
enabled: true
prompt: "Describe the video."
max_bytes: 52428800
timeout_seconds: 120
models:
- provider: "openrouter"
model: "google/gemini-3-flash-preview"
agents:
defaults:
model:
primary: ""
fallbacks: []
image_model:
primary: ""
fallbacks: []
image_generation_model:
primary: ""
fallbacks: []
pdf_model:
primary: ""
fallbacks: []
pdf_engine: "mistral-ocr"
compaction:
# cache-ttl keeps a cached compacted prompt for ttl; other modes can force
# more aggressive recomputation or fallback behavior in runtime defaults.
mode: "cache-ttl"
# ttl applies to cached compaction snapshots when mode uses cache expiry.
ttl: "1h"
# enabled gates soft trimming, hard clear, summarization, and token reserves.
enabled: true
soft_trim_ratio: 0.3
hard_clear_ratio: 0.5
# keep a few recent assistant turns and require enough prunable text before trimming.
keep_last_assistants: 3
min_prunable_chars: 50000
# soft trim keeps head/tail context from oversized tool results before full compaction.
soft_trim_max_chars: 4000
soft_trim_head_chars: 1500
soft_trim_tail_chars: 1500
hard_clear_enabled: true
hard_clear_placeholder: "[Old tool result content cleared]"
# summarization condenses old history before hard clearing it entirely.
summarization_enabled: true
summarization_model: "openai/gpt-5.2"
max_summary_tokens: 500
# safeguard preserves recent history/tokens; alternative modes may trade fidelity for space.
compaction_mode: "safeguard"
keep_recent_tokens: 20000
max_history_share: 0.5
reserve_tokens: 20000
reserve_tokens_floor: 20000
identifier_policy: "strict"
post_compaction_refresh_prompt: "[Post-compaction context refresh]\nRe-anchor to the latest user intent and preserve unresolved tasks and identifiers."
overflow_flush:
# overflow flush runs a last tool-only pass near the soft threshold before compaction.
enabled: true
soft_threshold_tokens: 4000
prompt: "Pre-compaction overflow flush. Persist any durable notes now if your tools support it. If nothing to store, reply with NO_REPLY."
system_prompt: "Pre-compaction overflow flush turn. The session is near auto-compaction; persist durable notes if possible. You may reply, but usually NO_REPLY is correct."