-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.example.toml
More file actions
51 lines (43 loc) · 1.53 KB
/
config.example.toml
File metadata and controls
51 lines (43 loc) · 1.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
[server]
host = "0.0.0.0"
port = 8080
worker_threads = 4
timeout_seconds = 60
rate_limit_per_minute = 100 # Max requests per IP per minute (can also set via MEMORYOS_RATE_LIMIT env var)
[llm]
default_provider = "openai"
default_model = "gpt-4o-mini"
# Add one or more providers below. The key (e.g. "openai") must match default_provider.
# Supported provider types: openai, gemini, claude, ollama, deepseek, openrouter, azure-openai, cohere, groq, mistral
[llm.providers.openai]
type = "openai"
base_url = "https://api.openai.com/v1"
api_key = "sk-your-openai-key"
# api_key_env = "OPENAI_API_KEY" # Alternative: read from env var
# Example: add more providers
# [llm.providers.ollama]
# type = "ollama"
# base_url = "http://localhost:11434/v1"
# api_key = ""
# [llm.providers.deepseek]
# type = "deepseek"
# base_url = "https://api.deepseek.com/v1"
# api_key_env = "DEEPSEEK_API_KEY"
[storage.redis]
url = "redis://localhost:6379"
ttl_seconds = 3600
max_messages = 20
[storage.vector]
url = "http://localhost:6334"
# Embedding configuration (optional)
# If not configured, will use fallback simple embedding
[embedding]
# api_key = "sk-your-embedding-key" # Or set OPENAI_API_KEY env var
# base_url = "https://api.openai.com/v1"
# model = "text-embedding-3-small"
# Authentication configuration
[auth]
enabled = false
# admin_keys = ["admin-secret-key"] # Admin API keys for management endpoints
# api_keys = ["user-api-key-1"] # Regular API keys for normal endpoints
# use_redis_store = false # Use Qdrant for persistent API key storage