-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathconfiguration.py
More file actions
70 lines (49 loc) · 2.85 KB
/
configuration.py
File metadata and controls
70 lines (49 loc) · 2.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import argparse
import types
class Configuration():
def __init__(self):
parser = argparse.ArgumentParser(description='Unsupervised Learning of Object Landmarks via Self-Training Correspondence')
parser.add_argument('--experiment_name',help='Please assign a unique name for each experiment. Use the same name for both training set 1 and 2.',required=True)
parser.add_argument('--dataset_name', choices=['CelebA','LS3D'], default='CelebA',help='Select training dataset')
parser.add_argument('--K', default=10 ,help='Select number of discovered landmarks K')
parser.add_argument('--gpunum', default=1)
parser.add_argument('--num_workers', default=0, help='Number of workers',type=int)
parser.add_argument('--resume', action='store_true', help='If True stage 1 and 2 will resume form last saved checkpoint and pseudogroundtruth.')
parser.add_argument('--stage',default=1,help='Speficy the stage of the algorithm you want to evaluate on eval.py')
parser.add_argument('--path_to_checkpoint',default=None)
args = parser.parse_args()
hyperparameters=types.SimpleNamespace()
hyperparameters.experiment_name=args.experiment_name
hyperparameters.dataset_name=args.dataset_name
hyperparameters.gpunum=int(args.gpunum)
hyperparameters.num_workers=args.num_workers
hyperparameters.resume=args.resume
hyperparameters.path_to_checkpoint=args.path_to_checkpoint
hyperparameters.K=int(args.K)
hyperparameters.eval_Stage=int(args.stage)
#params Whole Pipeline
hyperparameters.lr=1e-4
hyperparameters.weight_decay=1e-5
hyperparameters.batchSize=16
hyperparameters.useflip=True
#params Stage 1
hyperparameters.batchSize_superpoint=16
hyperparameters.confidence_thres_superpoint=0.15
hyperparameters.bootstrapping_iterations=30000
hyperparameters.iterations_per_round=5000
hyperparameters.total_iterations_stage1=200000
hyperparameters.remove_superpoint_outliers_percentage=0.4
hyperparameters.M=100
hyperparameters.confidence_thres_FAN=0.15
hyperparameters.nms_thres_FAN=2
hyperparameters.lr_step_schedual_stage1=[150000,180000]
hyperparameters.nms_thres_superpoint=8
#params Stage 2
hyperparameters.lr_step_schedual_stage2=[50000,70000]
hyperparameters.total_iterations_stage2=200000
hyperparameters.save_checkpoint_frequency=20000
#scale for different number of gpus
hyperparameters.iterations_per_round=int(hyperparameters.iterations_per_round/hyperparameters.gpunum)
hyperparameters.lr=hyperparameters.lr*hyperparameters.gpunum
hyperparameters.batchSize=hyperparameters.batchSize*hyperparameters.gpunum
self.params=hyperparameters