forked from mahmoodlab/CLAM
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcreate_splits_seq.py
More file actions
58 lines (47 loc) · 2.48 KB
/
create_splits_seq.py
File metadata and controls
58 lines (47 loc) · 2.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import pdb
import os
import pandas as pd
from dataset_modules.dataset_generic import Generic_WSI_Classification_Dataset, Generic_MIL_Dataset, save_splits
from utils.task_utils import get_dataset_args, tasks
import argparse
import numpy as np
parser = argparse.ArgumentParser(description='Creating splits for whole slide classification')
parser.add_argument('--label_frac', type=float, default= 1.0,
help='fraction of labels (default: 1)')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
parser.add_argument('--k', type=int, default=1,
help='number of splits (default: 1)')
parser.add_argument('--task', type=str, choices=list(tasks.keys()),
help='task name')
parser.add_argument('--val_frac', type=float, default= 0.1,
help='fraction of labels for validation (default: 0.1)')
parser.add_argument('--test_frac', type=float, default= 0.1,
help='fraction of labels for test (default: 0.1)')
args = parser.parse_args()
# Get dataset arguments from task config
dataset_args = get_dataset_args(args, args.task)
dataset = Generic_WSI_Classification_Dataset(**dataset_args)
num_slides_cls = np.array([len(cls_ids) for cls_ids in dataset.patient_cls_ids])
val_num = np.round(num_slides_cls * args.val_frac).astype(int)
test_num = np.round(num_slides_cls * args.test_frac).astype(int)
if __name__ == '__main__':
import shutil
if args.label_frac > 0:
label_fracs = [args.label_frac]
else:
label_fracs = [0.1, 0.25, 0.5, 0.75, 1.0]
for lf in label_fracs:
split_dir = 'splits/' + str(args.task) + '_{}'.format(int(lf * 100))
# Clear out the split_dir if it exists, then recreate it
if os.path.exists(split_dir):
shutil.rmtree(split_dir)
os.makedirs(split_dir, exist_ok=True)
dataset.create_splits(k=args.k, val_num=val_num, test_num=test_num, label_frac=lf)
for i in range(args.k):
dataset.set_splits()
descriptor_df = dataset.test_split_gen(return_descriptor=True)
splits = dataset.return_splits(from_id=True)
save_splits(splits, ['train', 'val', 'test'], os.path.join(split_dir, 'splits_{}.csv'.format(i)))
save_splits(splits, ['train', 'val', 'test'], os.path.join(split_dir, 'splits_{}_bool.csv'.format(i)), boolean_style=True)
descriptor_df.to_csv(os.path.join(split_dir, 'splits_{}_descriptor.csv'.format(i)))