-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathrun_all_experiments.py
More file actions
370 lines (287 loc) · 13.3 KB
/
run_all_experiments.py
File metadata and controls
370 lines (287 loc) · 13.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
#!/usr/bin/env python3
"""
Run all structural break detectors and collect comprehensive results.
Usage:
python run_all_experiments.py --data-dir ./data --output results.csv
python run_all_experiments.py --data-dir ./data --output results.csv --skip-train
python run_all_experiments.py --data-dir ./data --detectors xgb_70_statistical welch_ttest
"""
import os
import sys
import time
import argparse
import importlib.util
from pathlib import Path
from datetime import datetime
import traceback
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
from sklearn.metrics import (
roc_auc_score, accuracy_score, recall_score, f1_score, confusion_matrix
)
def get_all_detectors(base_dir: str) -> list:
"""Get all detector directories."""
detectors = []
for item in sorted(os.listdir(base_dir)):
item_path = os.path.join(base_dir, item)
if os.path.isdir(item_path):
if all(os.path.exists(os.path.join(item_path, f))
for f in ['features.py', 'model.py', 'main.py']):
detectors.append(item)
return detectors
def load_module_from_path(module_name: str, file_path: str):
"""Dynamically load a Python module from file path."""
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
sys.modules[module_name] = module
spec.loader.exec_module(module)
return module
def calculate_metrics(y_true: np.ndarray, y_prob: np.ndarray, threshold: float = 0.5) -> dict:
"""Calculate metrics: ROC AUC, F1, Accuracy, Recall, TP, FP, TN, FN."""
y_true = np.array(y_true).astype(int)
y_prob = np.array(y_prob)
y_pred = (y_prob >= threshold).astype(int)
metrics = {}
# Confusion Matrix
tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels=[0, 1]).ravel()
metrics['TP'] = int(tp)
metrics['FP'] = int(fp)
metrics['TN'] = int(tn)
metrics['FN'] = int(fn)
# Core metrics
metrics['accuracy'] = round(accuracy_score(y_true, y_pred), 4)
metrics['recall'] = round(recall_score(y_true, y_pred, zero_division=0), 4)
metrics['f1_score'] = round(f1_score(y_true, y_pred, zero_division=0), 4)
# ROC AUC
try:
metrics['roc_auc'] = round(roc_auc_score(y_true, y_prob), 4)
except:
metrics['roc_auc'] = None
return metrics
def run_detector(detector_name: str, base_dir: str, data_dir: str,
skip_train: bool = False, max_samples: int = 8000) -> dict:
"""Run a single detector and return comprehensive results."""
detector_dir = os.path.join(base_dir, detector_name)
model_path = os.path.join(detector_dir, 'model.joblib')
result = {
'detector': detector_name,
'train_time': None,
'eval_time': None,
'status': 'pending',
'error': None
}
original_dir = os.getcwd()
os.chdir(detector_dir)
if detector_dir not in sys.path:
sys.path.insert(0, detector_dir)
try:
main_module = load_module_from_path(
f'{detector_name}_main',
os.path.join(detector_dir, 'main.py')
)
# Training
if not skip_train or not os.path.exists(model_path):
print(f"\n{'='*60}")
print(f"Training: {detector_name}")
print(f"{'='*60}")
train_start = time.time()
if hasattr(main_module, 'train'):
import inspect
sig = inspect.signature(main_module.train)
params = list(sig.parameters.keys())
if 'max_samples' in params:
main_module.train(data_dir, model_path, max_samples=max_samples)
else:
main_module.train(data_dir, model_path)
else:
raise AttributeError("No train function found")
train_time = time.time() - train_start
result['train_time'] = round(train_time, 2)
print(f"Training completed in {train_time:.2f}s")
# Evaluation with full metrics
print(f"\n{'='*60}")
print(f"Evaluating: {detector_name}")
print(f"{'='*60}")
eval_start = time.time()
# Load test data and get predictions
y_test = pd.read_parquet(f"{data_dir}/y_test.reduced.parquet").squeeze()
predictions = []
true_labels = []
if hasattr(main_module, 'infer'):
for series_id, prob in main_module.infer(data_dir, model_path):
predictions.append(prob)
true_labels.append(y_test.loc[series_id])
else:
raise AttributeError("No infer function found")
eval_time = time.time() - eval_start
result['eval_time'] = round(eval_time, 2)
# Calculate all metrics
metrics = calculate_metrics(true_labels, predictions)
result.update(metrics)
result['status'] = 'success'
print(f"Evaluation completed in {eval_time:.2f}s")
print(f"ROC AUC: {result['roc_auc']}")
print(f"Accuracy: {result['accuracy']}")
print(f"F1 Score: {result['f1_score']}")
print(f"Confusion Matrix: TP={result['TP']}, FP={result['FP']}, TN={result['TN']}, FN={result['FN']}")
except Exception as e:
result['status'] = 'error'
result['error'] = str(e)
print(f"ERROR in {detector_name}: {e}")
traceback.print_exc()
finally:
os.chdir(original_dir)
if detector_dir in sys.path:
sys.path.remove(detector_dir)
modules_to_remove = [k for k in sys.modules.keys()
if detector_name in k or k in ['features', 'model', 'main']]
for mod in modules_to_remove:
del sys.modules[mod]
return result
def run_all_detectors(base_dir: str, data_dir: str, detectors: list = None,
skip_train: bool = False, max_samples: int = 8000) -> pd.DataFrame:
"""Run all detectors and return results DataFrame."""
if detectors is None:
detectors = get_all_detectors(base_dir)
print(f"\nFound {len(detectors)} detectors to run:")
for d in detectors:
print(f" - {d}")
results = []
for i, detector in enumerate(detectors, 1):
print(f"\n{'#'*60}")
print(f"# [{i}/{len(detectors)}] {detector}")
print(f"{'#'*60}")
result = run_detector(detector, base_dir, data_dir, skip_train, max_samples)
results.append(result)
# Save intermediate results
df = pd.DataFrame(results)
df.to_csv(os.path.join(base_dir, 'results_intermediate.csv'), index=False)
return pd.DataFrame(results)
def print_summary(df: pd.DataFrame):
"""Print results summary."""
print("\n" + "="*110)
print("RESULTS SUMMARY")
print("="*110)
df_sorted = df.sort_values('roc_auc', ascending=False, na_position='last')
# Results table
print(f"\n{'Rank':<4} {'Detector':<32} {'ROC AUC':<9} {'F1':<9} {'Accuracy':<10} {'Recall':<9} {'TP':<6} {'FP':<6} {'TN':<6} {'FN':<6}")
print("-"*110)
for i, row in enumerate(df_sorted.itertuples(), 1):
roc = f"{row.roc_auc:.4f}" if pd.notna(row.roc_auc) else "N/A"
f1 = f"{row.f1_score:.4f}" if hasattr(row, 'f1_score') and pd.notna(row.f1_score) else "N/A"
acc = f"{row.accuracy:.4f}" if hasattr(row, 'accuracy') and pd.notna(row.accuracy) else "N/A"
rec = f"{row.recall:.4f}" if hasattr(row, 'recall') and pd.notna(row.recall) else "N/A"
tp = str(int(row.TP)) if hasattr(row, 'TP') and pd.notna(row.TP) else "N/A"
fp = str(int(row.FP)) if hasattr(row, 'FP') and pd.notna(row.FP) else "N/A"
tn = str(int(row.TN)) if hasattr(row, 'TN') and pd.notna(row.TN) else "N/A"
fn = str(int(row.FN)) if hasattr(row, 'FN') and pd.notna(row.FN) else "N/A"
print(f"{i:<4} {row.detector:<32} {roc:<9} {f1:<9} {acc:<10} {rec:<9} {tp:<6} {fp:<6} {tn:<6} {fn:<6}")
print("-"*110)
# Best performers
successful = df[df['status'] == 'success']
if len(successful) > 0:
print(f"\nBest Performers (successful runs: {len(successful)}/{len(df)}):")
for metric in ['roc_auc', 'f1_score', 'accuracy', 'recall']:
if metric in successful.columns:
values = successful[metric].dropna()
if len(values) > 0:
best_idx = successful[metric].idxmax()
best_detector = successful.loc[best_idx, 'detector']
print(f" Best {metric:12s}: {values.max():.4f} ({best_detector})")
if 'train_time' in successful.columns:
print(f" Total train time: {successful['train_time'].sum():.1f}s")
# Failed detectors
failed = df[df['status'] == 'error']
if len(failed) > 0:
print(f"\nFailed detectors ({len(failed)}):")
for _, row in failed.iterrows():
print(f" - {row['detector']}: {row['error'][:50]}...")
def save_results(df: pd.DataFrame, output_path: str, base_dir: str):
"""Save results to CSV and markdown."""
# Reorder columns
priority_cols = [
'detector', 'status', 'roc_auc', 'f1_score', 'accuracy', 'recall',
'TP', 'FP', 'TN', 'FN',
'train_time', 'eval_time', 'error'
]
cols = [c for c in priority_cols if c in df.columns]
cols += [c for c in df.columns if c not in cols]
df = df[cols]
# Save CSV
df.to_csv(output_path, index=False)
print(f"\nResults saved to: {output_path}")
# Save markdown
md_path = output_path.replace('.csv', '.md')
df_sorted = df.sort_values('roc_auc', ascending=False, na_position='last')
with open(md_path, 'w') as f:
f.write("# Structural Break Detection - Results\n\n")
f.write(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
# Results table
f.write("## Performance Ranking\n\n")
f.write("| Rank | Detector | ROC AUC | F1 | Accuracy | Recall | TP | FP | TN | FN |\n")
f.write("|------|----------|---------|-----|----------|--------|----|----|----|----|" + "\n")
for i, row in enumerate(df_sorted.itertuples(), 1):
roc = f"{row.roc_auc:.4f}" if pd.notna(row.roc_auc) else "N/A"
f1 = f"{row.f1_score:.4f}" if hasattr(row, 'f1_score') and pd.notna(row.f1_score) else "N/A"
acc = f"{row.accuracy:.4f}" if hasattr(row, 'accuracy') and pd.notna(row.accuracy) else "N/A"
rec = f"{row.recall:.4f}" if hasattr(row, 'recall') and pd.notna(row.recall) else "N/A"
tp = str(int(row.TP)) if hasattr(row, 'TP') and pd.notna(row.TP) else "N/A"
fp = str(int(row.FP)) if hasattr(row, 'FP') and pd.notna(row.FP) else "N/A"
tn = str(int(row.TN)) if hasattr(row, 'TN') and pd.notna(row.TN) else "N/A"
fn = str(int(row.FN)) if hasattr(row, 'FN') and pd.notna(row.FN) else "N/A"
f.write(f"| {i} | `{row.detector}` | {roc} | {f1} | {acc} | {rec} | {tp} | {fp} | {tn} | {fn} |\n")
# Best performers
successful = df[df['status'] == 'success']
if len(successful) > 0:
f.write("\n## Best Performers\n\n")
f.write(f"- **Total Detectors**: {len(df)}\n")
f.write(f"- **Successful**: {len(successful)}\n")
f.write(f"- **Failed**: {len(df) - len(successful)}\n\n")
for metric in ['roc_auc', 'f1_score', 'accuracy', 'recall']:
if metric in successful.columns:
values = successful[metric].dropna()
if len(values) > 0:
best_idx = successful[metric].idxmax()
best_detector = successful.loc[best_idx, 'detector']
f.write(f"- **Best {metric}**: {values.max():.4f} (`{best_detector}`)\n")
print(f"Markdown saved to: {md_path}")
def main():
parser = argparse.ArgumentParser(description="Run all structural break detectors")
parser.add_argument("--data-dir", required=True, help="Path to data directory")
parser.add_argument("--output", default="results.csv", help="Output CSV file")
parser.add_argument("--skip-train", action="store_true", help="Skip training, only evaluate")
parser.add_argument("--max-samples", type=int, default=8000, help="Max training samples")
parser.add_argument("--detectors", nargs="+", help="Specific detectors to run")
args = parser.parse_args()
base_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.abspath(args.data_dir)
if not os.path.exists(data_dir):
print(f"Error: Data directory not found: {data_dir}")
sys.exit(1)
required_files = ['X_train.parquet', 'y_train.parquet']
for f in required_files:
if not os.path.exists(os.path.join(data_dir, f)):
print(f"Error: Required file not found: {f}")
sys.exit(1)
print("="*80)
print("STRUCTURAL BREAK DETECTION - EXPERIMENT RUNNER")
print("="*80)
print(f"Base directory: {base_dir}")
print(f"Data directory: {data_dir}")
print(f"Max samples: {args.max_samples}")
print(f"Skip training: {args.skip_train}")
df = run_all_detectors(
base_dir=base_dir,
data_dir=data_dir,
detectors=args.detectors,
skip_train=args.skip_train,
max_samples=args.max_samples
)
print_summary(df)
output_path = os.path.join(base_dir, args.output)
save_results(df, output_path, base_dir)
print("\nDone!")
if __name__ == "__main__":
main()