-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathenv.example
More file actions
131 lines (121 loc) · 4.35 KB
/
env.example
File metadata and controls
131 lines (121 loc) · 4.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# ============================================
# Application Configuration
# ============================================
APP_NAME=RAG System
APP_ENV=development
APP_DEBUG=True
APP_HOST=0.0.0.0
APP_PORT=8000
API_VERSION=v1
# Keep-alive timeout cho API (giây)
APP_TIMEOUT_KEEP_ALIVE=30
# ============================================
# Qdrant Vector Database Configuration
# ============================================
# URL của Qdrant server (localhost khi chạy local, tên service khi dùng Docker)
QDRANT_URL=http://localhost:6333
# API key cho Qdrant (để trống nếu không cần)
QDRANT_API_KEY=
# Timeout kết nối (giây)
QDRANT_TIMEOUT=30
# ============================================
# Ollama LLM Configuration
# ============================================
# URL của Ollama server (localhost khi chạy local, tên service khi dùng Docker)
OLLAMA_URL=http://localhost:11434
# Model LLM mặc định (llama2, mistral, phi, llama3, etc.)
# Note: When using Docker Compose, this model will be automatically pulled on startup
# Common models: llama2, mistral, phi, llama3, codellama, gpt4all, etc.
# For a full list: https://ollama.com/library
OLLAMA_MODEL=llama2
# Timeout cho request (giây)
OLLAMA_TIMEOUT=120
# Temperature cho LLM (0.0 - 2.0)
OLLAMA_TEMPERATURE=0.7
# Số token tối đa trong response
OLLAMA_MAX_TOKENS=2000
# ============================================
# Embedding Model Configuration
# ============================================
# Granite embedding model - IBM's small English embedding model
EMBEDDING_MODEL_NAME=ibm-granite/granite-embedding-small-english-r2
# Batch size cho embedding generation (1-64, giảm nếu lỗi OOM)
EMBEDDING_BATCH_SIZE=32
# Bật cache cho embeddings
EMBEDDING_CACHE_ENABLED=True
# ============================================
# Document Processing Configuration
# ============================================
# Kích thước file tối đa (bytes) - 10MB
DOCUMENT_MAX_SIZE=10485760
# Kích thước chunk (ký tự)
DOCUMENT_CHUNK_SIZE=1000
# Overlap giữa các chunks (ký tự)
DOCUMENT_CHUNK_OVERLAP=200
# Các định dạng file được hỗ trợ (phân cách bằng dấu phẩy)
DOCUMENT_SUPPORTED_FORMATS=pdf,docx,txt,md
# ============================================
# Storage Configuration
# ============================================
# Đường dẫn lưu documents
STORAGE_STORAGE_PATH=./data/documents
# Đường dẫn cache models
STORAGE_MODEL_CACHE_PATH=./data/models
# Đường dẫn cache chung
STORAGE_CACHE_PATH=./data/cache
# Đường dẫn logs
STORAGE_LOG_PATH=./logs
# ============================================
# Logging Configuration
# ============================================
# Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL
LOG_LEVEL=INFO
# Log format: json hoặc text
LOG_FORMAT=json
# ============================================
# Security Configuration
# ============================================
# JWT Secret Key (THAY ĐỔI trong production!)
JWT_SECRET_KEY=change-this-secret-key-in-production
# JWT Algorithm
JWT_ALGORITHM=HS256
# JWT expiration (giờ)
JWT_EXPIRATION_HOURS=24
# Bật rate limiting
RATE_LIMIT_ENABLED=True
# Số request tối đa mỗi phút
RATE_LIMIT_REQUESTS=100
# Cửa sổ thời gian rate limit (giây)
RATE_LIMIT_WINDOW=60
# ============================================
# Celery Task Queue Configuration
# ============================================
# Redis broker URL (localhost khi chạy local, tên service khi dùng Docker)
CELERY_BROKER_URL=redis://localhost:6379/0
# Redis result backend URL
CELERY_RESULT_BACKEND=redis://localhost:6379/0
# Task serializer
CELERY_TASK_SERIALIZER=json
# Result serializer
CELERY_RESULT_SERIALIZER=json
# Accepted content types
CELERY_ACCEPT_CONTENT=json
# Timezone
CELERY_TIMEZONE=UTC
# Enable UTC
CELERY_ENABLE_UTC=True
# Track task started
CELERY_TASK_TRACK_STARTED=True
# Task time limit (giây)
CELERY_TASK_TIME_LIMIT=3600
# Task soft time limit (giây)
CELERY_TASK_SOFT_TIME_LIMIT=3000
# Worker prefetch multiplier
CELERY_WORKER_PREFETCH_MULTIPLIER=4
# Max tasks per worker child
CELERY_WORKER_MAX_TASKS_PER_CHILD=1000
# Worker pool type: 'prefork', 'threads', 'solo', or 'gevent'
# Use 'threads' or 'solo' to avoid SIGSEGV with ML models (PyTorch/sentence-transformers)
# Worker pool type: 'prefork', 'threads', 'solo', or 'gevent'
# Use 'solo' to avoid SIGSEGV with ML models (PyTorch/sentence-transformers)
CELERY_WORKER_POOL=solo