-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
112 lines (84 loc) · 3.42 KB
/
.env.example
File metadata and controls
112 lines (84 loc) · 3.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
# Mind Daemon Configuration File
# Copy this file to .env and fill in your actual values
# =============================================================================
# BCI (Brain-Computer Interface) Configuration
# =============================================================================
# Emotiv Cortex API Credentials
# Get these from your Emotiv account at emotiv.com
EMOTIV_CLIENT_ID=your_emotiv_client_id_here
EMOTIV_CLIENT_SECRET=your_emotiv_client_secret_here
# BCI Device Configuration
BCI_DEVICE_TYPE=emotiv
BCI_SAMPLING_RATE=128
BCI_CHANNELS=14
# =============================================================================
# LLM API Configuration
# =============================================================================
# MiniMax API (for intelligent decision making)
# Get API key from https://api.minimaxi.com (JWT token format)
MINIMAX_API_KEY=your_minimax_api_key_here
# MiniMax Group ID (optional, may be required in some cases)
MINIMAX_GROUP_ID=your_group_id_here
# MiniMax API endpoint (usually no need to change)
MINIMAX_BASE_URL=https://api.minimaxi.com/v1/text/chatcompletion_v2
# ===== RDK X5 远程连接配置 =====
# `192.168.128.10` 为通过Type-C闪连接口连接开发板时默认地址
RDK_HOST=192.168.128.10
RDK_USER=root
RDK_PASSWORD=root
# MiniMax model selection:
# - MiniMax-M1: Inference model, more output tokens, recommend streaming
# - MiniMax-Text-01: Text model, suitable for structured output
MINIMAX_MODEL=MiniMax-Text-01
# OpenAI API (alternative/backup LLM)
OPENAI_API_KEY=your_openai_api_key_here
OPENAI_MODEL=gpt-3.5-turbo
# =============================================================================
# System Paths Configuration
# =============================================================================
# Music directory (contains focus/ and relax/ subdirectories)
MUSIC_DIR=/Users/m3airmima0000/Mind-Daemon/music
# Window.py path for halo effects
WINDOW_PY_PATH=/Users/m3airmima0000/Mind-Daemon/src/mind_daemon/peripheral/window.py
# Data storage directory
DATA_DIR=/Users/m3airmima0000/Mind-Daemon/data
# =============================================================================
# Server Configuration
# =============================================================================
# WebSocket server settings
WEBSOCKET_HOST=localhost
WEBSOCKET_PORT=8889
# TCP socket server settings (legacy)
TCP_HOST=localhost
TCP_PORT=8888
# =============================================================================
# System Behavior Configuration
# =============================================================================
# Analysis intervals (in seconds)
STATE_ANALYSIS_INTERVAL=1
LLM_ANALYSIS_INTERVAL=300
ENVIRONMENT_CONTROL_INTERVAL=10
# Threshold values for state detection (0.0 - 1.0)
ATTENTION_THRESHOLD=0.6
STRESS_THRESHOLD=0.7
FATIGUE_THRESHOLD=0.8
RELAXATION_THRESHOLD=0.5
# Music player settings
MUSIC_VOLUME=0.6
MUSIC_SWITCH_COOLDOWN=60
# Halo controller settings
HALO_ACTIVATION_COOLDOWN=30
HALO_COLOR_CHANGE_COOLDOWN=15
# =============================================================================
# Development & Debug Configuration
# =============================================================================
# Logging level (DEBUG, INFO, WARNING, ERROR)
LOG_LEVEL=INFO
# Enable development mode (uses mock data when BCI not available)
DEV_MODE=false
# Enable verbose output
VERBOSE=false
# Data collection settings
ENABLE_DATA_LOGGING=true
ROLLING_WINDOW_SIZE=100
EOF < /dev/null