-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
150 lines (126 loc) · 7.76 KB
/
main.py
File metadata and controls
150 lines (126 loc) · 7.76 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import os
import train_methods
import eval_methods
import argparse
import cPickle
def add_arguments(parser):
parser.register("type", "bool", lambda v: v.lower() == "true")
# Data
parser.add_argument("--x_train_path", type=str, default=None, help="Input train path.")
parser.add_argument("--y_train_path", type=str, default=None, help="Output train path.")
parser.add_argument("--x_val_path", type=str, default=None, help="Input validation path.")
parser.add_argument("--y_val_path", type=str, default=None, help="Output validation path.")
parser.add_argument("--out_dir", type=str, default=None, help="Output folder to save the trained models.")
parser.add_argument("--embs_matrix_file", type=str, default=None, help="Pretrained embeddings file")
parser.add_argument("--vocab_file", type=str, default=None, help="Vocabulary file.")
# Model
parser.add_argument("--model_type", type=str, default="ffn", help="ffn|rnn. Type of model to run.")
parser.add_argument("--model_name", type=str, default="test_model", help="Name of the model.")
parser.add_argument("--input_size", type=int, default=100, help="Size of the input features.")
parser.add_argument("--output_size", type=int, default=1, help="Output size.")
parser.add_argument("--max_seq_length", type=int, default=100, help="Maximum sequence length.")
parser.add_argument("--hidden_activation", type=str, default="relu", help="Hidden layer activation function.")
parser.add_argument("--out_activation", type=str, default="sigmoid", help="Output layer activation function.")
parser.add_argument("--kernel_initializer", type=str, default="glorot_uniform", help="Kernels initializer.")
parser.add_argument("--rnn_unit_type", type=str, default="rnn", help="rnn | gru | lstm. Type of RNN hidden unit.")
parser.add_argument("--hidden_dims", type=str, default="32",
help="A comma separated list of hidden sizes for each of the model layers. For RNN, only 1 layer"
" is supported.")
parser.add_argument("--embed_dim", type=int, default=32, help="Embedding layer size.")
parser.add_argument("--emb_trainable", type="bool", default=True, help="Whether to use a trainable embeddings layer.")
parser.add_argument("--bidirectional", type="bool", default=False, help="Whether to use o bidirectional RNN.")
parser.add_argument("--kernel_regularizer", type=str, default=None, help="Kernel regularizer for FFN model.")
parser.add_argument("--recurrent_regularizer", type=str, default=None, help="Recurrent layer regularizer.")
parser.add_argument("--recurrent_dropout", type=float, default=0.0, help="Dropout for hidden to hidden units.")
parser.add_argument("--input_dropout", type=float, default=0.0, help="Dropout for input to hidden units.")
parser.add_argument("--layers", type=int, default=1, help="Number of layers. For RNN, only 1 layer is supported.")
parser.add_argument("--dropouts", type=str, default="0.0", help="A comma separated list of dropout values for each of "
"the FFN layers.")
parser.add_argument("--attention", type="bool", default=False, help="Whether to use attention.")
# Evaluation
parser.add_argument("--eval_weights_ckpt", type=str, default=None, help="Checkpoint to load model weights for evaluation.")
parser.add_argument("--eval_x_data", type=str, default=None, help="Input evaluation path.")
parser.add_argument("--eval_y_data", type=str, default=None, help="Output evaluation path.")
parser.add_argument("--eval_res_folder", type=str, default=None, help="Output folder to save evaluation results.")
parser.add_argument("--plot_attn", type="bool", default=False, help="Whether to plot attention weights.")
parser.add_argument("--attn_sample_size", type=int, default=1000, help="Sample size to compute attention weights")
# Training
parser.add_argument("--n_epochs", type=int, default=10, help="Number of epochss.")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size.")
parser.add_argument("--learning_rate", type=float, default=0.01, help="Learning rate.")
parser.add_argument("--save_checkpoint", type="bool", default=True, help="Whether to save a checkpoint at each epoch.")
parser.add_argument("--early_stopping", type="bool", default=True, help="Whether to do early stopping during training.")
parser.add_argument("--verbose", type=int, default=1, help="0|1. Verbose for training/evaluation.")
parser.add_argument("--loss", type=str, default="binary_crossentropy", help="Loss function.")
# Other
parser.add_argument("--params_file", type=str, default=None, help="Load parameters from file.")
parser.add_argument("--gpu", type=str, default=None, help="Which GPU to use.")
def process_params(params):
params["hidden_dims"] = [int(token) for token in params["hidden_dims"].split(",")]
params["dropouts"] = [float(token) for token in params["dropouts"].split(",")]
params["embs_matrix"] = None
import cPickle
if sorted(params["embs_matrix_file"]):
embs_matrix = cPickle.load(open(params["embs_matrix_file"]))
params["embs_matrix"]=embs_matrix
else:params["embs_matrix"]=None
# Add error messages for inconsistent parameter combinations.
if params["layers"]>1 and params["model_type"]=="rnn":
raise ValueError("We only support RNN with 1 layer.")
import cPickle
if params["vocab_file"]:
vocab = cPickle.load(open(params["vocab_file"]))
if params["model_type"]=="rnn":
params["input_size"]=len(vocab)+1
else: params["input_size"]=len(vocab)
def save_params(params):
print("Params saved in %s."%os.path.join(params["out_dir"],params["model_name"]+"_params.pickle"))
cPickle.dump(params, open(os.path.join(params["out_dir"],params["model_name"]+"_params.pickle"),"wb"))
def load_params(filepath):
return cPickle.load(open(filepath))
def ensure_compatible_params(loaded_params, input_params):
keys = input_params.keys()
# if there are new keys in input params, add them to the loaded params.
for key in keys:
if key not in loaded_params:
loaded_params[key]=input_params[key]
# update the values of the following keys
keys_to_update = {"eval_weights_ckpt", "eval_x_data", "eval_y_data", "eval_res_folder", "attn_sample_size", "verbose"}
for key in keys:
if key in keys_to_update and loaded_params[key] != input_params[key]:
print("# Updating hparams.%s: %s -> %s" %
(key, loaded_params[key],
input_params[key]))
loaded_params[key] = input_params[key]
return loaded_params
def run_main(params):
import os
if params.gpu is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = params.gpu
params = vars(params)
if params["params_file"] is not None:
loaded_params = load_params(params["params_file"])
params = ensure_compatible_params(loaded_params, params)
else:
process_params(params)
save_params(params)
print("Parameters: ")
for key in params.keys():
print str(key)+" = "+str(params[key])
model_type=params["model_type"]
if params["eval_weights_ckpt"] is not None:
if model_type == "rnn":
eval_methods.evaluate_rnn(params)
elif model_type == "ffn":
eval_methods.evaluate_ffn(params)
else:
if model_type=="rnn":
train_methods.train_rnn(params)
elif model_type=="ffn":
train_methods.train_ffn(params)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# add the possible command line arguments.
add_arguments(parser)
params, unparsed = parser.parse_known_args()
run_main(params)