-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy paththesis_run_model_fits_2.py
More file actions
executable file
·122 lines (96 loc) · 3.94 KB
/
thesis_run_model_fits_2.py
File metadata and controls
executable file
·122 lines (96 loc) · 3.94 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# Get which device the script should run on from command line
import sys
if len(sys.argv) == 1:
device = 'cpu'
else:
device = sys.argv[1]
from preprocExperimentSetup import *
# File system management
import os
import errno
import zipfile
import torch
import itertools
# Get current git hash to ensure reproducible results
import git
git_cur_repo = git.Repo(search_parent_directories=True)
git_cur_sha = git_cur_repo.head.object.hexsha
# Define which datasets do we want to work with
data_dir='/nfs/data/gergo/Neurofinder_update/'
all_dataset_names = ['neurofinder.00.00',
#'neurofinder.00.01',
'neurofinder.00.00.test',
'neurofinder.00.01.test',
'neurofinder.01.00',
'neurofinder.01.00.test',
'neurofinder.01.01.test',
'neurofinder.02.00',
#'neurofinder.02.01',
'neurofinder.02.00.test',
'neurofinder.02.01.test',
'neurofinder.03.00',
'neurofinder.03.00.test',
'neurofinder.04.00',
#'neurofinder.04.01',
'neurofinder.04.00.test'#,
#'neurofinder.04.01.test'
]
#import nbimporter
#from examineTrainingData import *
# Doesn't work with python scripts, just copy those 3 functions here manually for now
from thesis_final_func_defs import *
# Loading the appropriate training data type
stamp_git = '_gitsha_' + '2bd0d720de0995be6b0f1795304839f9877cb6c3'
stamp_training_type = '_rPC_1_origPMgain_useNans'
# --------------------------------------------------------------
# Set up training all models
# --------------------------------------------------------------
# Set up all combinations of priors, likelihoods and datasets
# Priors
all_priors = [
{
'mean' : gpytorch.means.ZeroMean(),
'kernel' : preprocKernels.WhiteNoiseKernelBugfix(variances=torch.tensor([10.]))
},
# {
# 'mean' : gpytorch.means.ConstantMean(),
# 'kernel' : k1
# }
]
# Likelihoods
all_likelihood_classes = [
#preprocLikelihoods.LinearGainLikelihood,
preprocLikelihoods.PoissonInputPhotomultiplierLikelihood,
#preprocLikelihoods.PoissonInputUnderamplifiedPhotomultiplierLikelihood
]
for likelihood_class, prior_model_base, dataset_name in itertools.product(all_likelihood_classes, all_priors, all_dataset_names):
print(dataset_name, likelihood_class, prior_model_base)
# Load the training data (created with appropriate stamps)
trainingData = loadTrainingData(
dataset_name = dataset_name,
data_dir = data_dir,
stamp = stamp_git + stamp_training_type
)
# Downsample the training data before use
stamp_trainingCoverage = '_targetCoverage_10'
trainingDataUniform = downsampleTrainingData(trainingData, filter_width= 15, targetCoverage=0.10)
for name, arr in trainingData.items(): # Move training data to the appropriate device
trainingData[name] = torch.tensor(arr).to(device)
# Set up current prior and likelihood
prior_model = {
'mean' : copy.deepcopy(prior_model_base['mean']),
'kernel' : copy.deepcopy(prior_model_base['kernel'])
}
likelihood_model = likelihood_class()
# Clean cuda cache if being used
if torch.cuda.is_available():
torch.cuda.empty_cache()
stamp_modelGridType = '_grid_50_9'
mll=trainModel(dataset_name, trainingData, prior_model, likelihood_model, device=device,
data_dir='/nfs/data/gergo/Neurofinder_update/',
stamp = stamp_git + stamp_training_type + stamp_trainingCoverage + stamp_modelGridType,
n_iter = 30, x_batchsize=2**13, y_batchsize = 200, manual_seed=2713,
verbose = 1,
model_grid_size = 50,
model_interp_point_number = 9
)