-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathrun.sh
More file actions
executable file
·309 lines (267 loc) · 10.4 KB
/
run.sh
File metadata and controls
executable file
·309 lines (267 loc) · 10.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
#!/bin/bash
# ============================================================
# QuantLLM 一键执行脚本
# 用法: bash /opt/quant-llm/run.sh [step]
#
# 不带参数:执行全部流程
# 带参数:执行指定步骤
# crawl — 仅数据采集(A股+多市场)
# recalc — 重算技术指标(不爬取,从basic重算advanced)
# fund-flow — 爬取资金流数据
# convert — 仅数据转换
# predict — 生成预测性训练数据(实际收益标签)
# generate — 数据增强(FinGPT+量化计算+推理链)
# factors — 生成个股因子文件(PE/PB/ROE/北向5日)
# merge — 仅合并训练集
# train — 仅训练
# export — 导出 GGUF
# eval — 模型评估
# backtest — 回测验证
# trade-live — 双层实盘决策(规则+Qwen)
# all — 全部流程
# ============================================================
set -e
PROJECT_DIR="/opt/quant-llm"
SCRIPTS_DIR="$PROJECT_DIR/scripts"
DATA_DIR="$PROJECT_DIR/training-data"
VENV="$PROJECT_DIR/finetune-env/bin/activate"
OLLAMA_URL="http://localhost:11434"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
log() { echo -e "${GREEN}[$(date '+%H:%M:%S')]${NC} $*"; }
warn() { echo -e "${YELLOW}[$(date '+%H:%M:%S')] WARN:${NC} $*"; }
err() { echo -e "${RED}[$(date '+%H:%M:%S')] ERROR:${NC} $*"; exit 1; }
cd "$PROJECT_DIR"
# ============================================================
# 环境检查
# ============================================================
check_env() {
log "环境检查..."
if [ ! -f "$VENV" ]; then
err "微调虚拟环境不存在: $VENV"
fi
source "$VENV"
python3 -c "import akshare" 2>/dev/null || err "akshare 未安装,运行: pip install akshare"
python3 -c "import pandas" 2>/dev/null || err "pandas 未安装"
python3 -c "import numpy" 2>/dev/null || err "numpy 未安装"
log "环境检查通过"
}
# ============================================================
# Step 1: 数据采集
# ============================================================
step_crawl() {
log "========== Step 1: 数据采集 =========="
log "[1/2] A股全量历史行情..."
python3 "$SCRIPTS_DIR/crawl_ashare.py"
log "A股采集完成"
log "[2/2] 期货 + ETF + 可转债..."
python3 "$SCRIPTS_DIR/crawl_multi_market.py"
log "多市场采集完成"
}
# ============================================================
# Step 2: 数据转换
# ============================================================
step_convert() {
log "========== Step 2: 数据转换 =========="
# 检查是否有行情数据
ashare_count=$(ls "$DATA_DIR/ashare/advanced/" 2>/dev/null | wc -l)
multi_count=$(ls "$DATA_DIR/futures/advanced/" "$DATA_DIR/etf/advanced/" "$DATA_DIR/cbond/advanced/" 2>/dev/null | wc -l)
if [ "$ashare_count" -eq 0 ] && [ "$multi_count" -eq 0 ]; then
err "没有行情数据,请先执行: bash run.sh crawl"
fi
log "全市场行情 → 训练数据..."
python3 "$SCRIPTS_DIR/convert_all_to_training.py"
log "转换完成"
}
# ============================================================
# Step 2.5: 数据增强(FinGPT + 量化计算 + 推理链)
# ============================================================
step_generate() {
log "========== Step 2.5: 数据增强 =========="
# FinGPT(仅需网络)
log "[1/3] FinGPT A股预测数据..."
python3 "$SCRIPTS_DIR/fetch_fingpt_data.py" || warn "FinGPT 数据下载失败,跳过"
# 量化计算种子扩展(需 ollama)
if curl -s "$OLLAMA_URL/api/tags" >/dev/null 2>&1; then
log "[2/3] 量化计算种子扩展 (qwen3:14b)..."
python3 "$SCRIPTS_DIR/generate_quant_calculations.py" || warn "量化计算扩展失败,跳过"
log "[3/3] 推理链增强 (deepseek-r1:32b)..."
python3 "$SCRIPTS_DIR/add_reasoning_chains.py" || warn "推理链增强失败,跳过"
else
warn "ollama 不可用,跳过量化计算和推理链增强"
fi
log "数据增强完成"
}
# ============================================================
# Step 2.6: 因子数据构建(基本面+北向)
# ============================================================
step_factors() {
log "========== Step 2.6: 因子数据构建 =========="
python3 "$SCRIPTS_DIR/build_stock_factors.py" --max-symbols 1200 --roe-limit 260
log "因子文件生成完成: $DATA_DIR/factors/stock_factors_latest.json"
}
# ============================================================
# Step 3: 合并训练集
# ============================================================
step_merge() {
log "========== Step 3: 合并训练集 =========="
# 合并全部数据源 → v4
log "合并最终训练集..."
python3 "$SCRIPTS_DIR/merge_and_retrain.py"
v4_count=$(wc -l < "$DATA_DIR/merged_train_v4.jsonl")
log "最终训练集: merged_train_v4.jsonl (${v4_count} 条)"
}
# ============================================================
# Step 4: 模型训练
# ============================================================
step_train() {
log "========== Step 4: QLoRA 训练 =========="
if [ ! -f "$DATA_DIR/merged_train_v4.jsonl" ] && [ ! -f "$DATA_DIR/merged_train_v3_clean.jsonl" ]; then
err "训练数据不存在,请先执行: bash run.sh merge"
fi
# 检查 GPU
if ! nvidia-smi &>/dev/null; then
err "未检测到 GPU"
fi
gpu_mem=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits | head -1)
if [ "$gpu_mem" -gt 5000 ]; then
warn "GPU 显存已占用 ${gpu_mem}MB,尝试释放 ollama 模型..."
curl -s "$OLLAMA_URL/api/generate" -d '{"model":"qwen3:14b","keep_alive":0}' >/dev/null 2>&1 || true
curl -s "$OLLAMA_URL/api/generate" -d '{"model":"deepseek-r1:32b","keep_alive":0}' >/dev/null 2>&1 || true
sleep 5
gpu_mem=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits | head -1)
if [ "$gpu_mem" -gt 5000 ]; then
err "GPU 显存仍占用 ${gpu_mem}MB,无法启动训练。请手动释放显存。"
fi
log "GPU 显存已释放"
fi
log "开始训练..."
"$PROJECT_DIR/finetune-env/bin/python3" "$SCRIPTS_DIR/train.py"
log "训练完成!模型输出目录见 config.yaml -> model.output_dir"
}
# ============================================================
# Step 5: 导出 GGUF
# ============================================================
step_export() {
log "========== Step 5: 导出 GGUF =========="
python3 "$SCRIPTS_DIR/export_gguf.py"
log "导出完成"
}
# ============================================================
# Step 6: 模型评估
# ============================================================
step_eval() {
log "========== Step 6: 模型评估 =========="
# 释放 ollama 显存
curl -s "$OLLAMA_URL/api/generate" -d '{"model":"qwen3:14b","keep_alive":0}' >/dev/null 2>&1 || true
curl -s "$OLLAMA_URL/api/generate" -d '{"model":"deepseek-r1:32b","keep_alive":0}' >/dev/null 2>&1 || true
sleep 3
python3 "$SCRIPTS_DIR/evaluate.py"
log "评估完成"
}
# ============================================================
# 主流程
# ============================================================
STEP="${1:-all}"
echo ""
echo "============================================================"
echo " QuantLLM — 量化交易 AI 助手"
echo " 执行步骤: $STEP"
echo " 时间: $(date '+%Y-%m-%d %H:%M:%S')"
echo "============================================================"
echo ""
check_env
case "$STEP" in
crawl)
step_crawl
;;
recalc)
log "========== 重算技术指标 =========="
python3 "$SCRIPTS_DIR/crawl_ashare.py" --recalc
python3 "$SCRIPTS_DIR/crawl_multi_market.py" --recalc
log "指标重算完成"
;;
fund-flow)
log "========== 资金流数据采集 =========="
python3 "$SCRIPTS_DIR/crawl_fund_flow.py"
log "资金流采集完成"
;;
convert)
step_convert
;;
predict)
log "========== 预测性训练数据 =========="
python3 "$SCRIPTS_DIR/generate_predictive_data.py"
log "预测数据生成完成"
;;
generate)
step_generate
;;
factors)
step_factors
;;
merge)
step_merge
;;
train)
step_train
;;
export)
step_export
;;
eval)
step_eval
;;
backtest)
log "========== 回测验证 =========="
python3 "$SCRIPTS_DIR/backtest_signals.py"
log "回测完成"
;;
trade-live)
log "========== 双层实盘决策(规则+Qwen) =========="
shift || true
python3 "$SCRIPTS_DIR/trade_live_qwen.py" "$@"
log "交易决策完成"
;;
all)
step_crawl
step_convert
step_generate
step_factors
step_merge
step_train
;;
rag-build)
log "========== RAG: 构建索引 =========="
python3 "$SCRIPTS_DIR/rag_build_index.py"
log "RAG 索引构建完成"
;;
rag-serve)
log "========== RAG: 增强推理服务 =========="
python3 "$SCRIPTS_DIR/rag_serve.py"
;;
*)
echo "用法: bash run.sh [crawl|recalc|fund-flow|convert|predict|generate|factors|merge|train|export|eval|backtest|trade-live|rag-build|rag-serve|all]"
echo ""
echo " crawl 数据采集(A股+期货+ETF+可转债)"
echo " recalc 重算技术指标(从basic重算,不爬取)"
echo " fund-flow 爬取资金流数据"
echo " convert 行情数据 → 训练问答对"
echo " predict 生成预测性训练数据(实际收益标签)"
echo " generate 数据增强(FinGPT+量化计算+推理链)"
echo " factors 构建个股因子文件(PE/PB/ROE/北向5日)"
echo " merge 合并所有数据源 → 最终训练集"
echo " train QLoRA 微调训练"
echo " export 导出 GGUF 格式"
echo " eval 模型评估"
echo " backtest 回测验证(对比沪深300)"
echo " rag-build 构建 RAG 检索索引"
echo " rag-serve 启动 RAG 增强推理服务"
echo " all 执行全部流程(默认)"
exit 1
;;
esac
echo ""
log "========== 完成 =========="