-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathmain.py
More file actions
23 lines (19 loc) · 673 Bytes
/
main.py
File metadata and controls
23 lines (19 loc) · 673 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
"""
The script to run all experiments in parallel.
"""
from experiment import run_experiment
from joblib import Parallel, delayed, cpu_count
# Experiment names and dataset paths
# dl10 -> distance limit is 10
# nfl50 -> number of files limit is 50
# sws365 -> sliding window size is 365
experiments = [
("pig_dl10_nfl50_sws365", "data/pig_change_sets.json"),
("hive_dl10_nfl50_sws365", "data/hive_change_sets.json"),
("hadoop_dl10_nfl50_sws365", "data/hadoop_change_sets.json"),
]
# Run all in parallel using all CPUs.
Parallel(n_jobs=-1)(
delayed(run_experiment)(experiment_name, dataset_path)
for experiment_name, dataset_path in experiments
)