-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel.py
More file actions
139 lines (111 loc) · 4.44 KB
/
model.py
File metadata and controls
139 lines (111 loc) · 4.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import tensorflow as tf
import numpy as np
class Network:
def __init__(self, config, scope):
# initialize the member variable
self.config = config
self.train_step = 0
self.scope = scope
# tf configs & set the session
conf = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
conf.gpu_options.allow_growth = True
self.sess = tf.Session(config=conf)
# define network
self.initialize_network()
# about the model save and load
self.saver = tf.train.Saver(max_to_keep=3)
if self.config.model_load == True:
self.model_load()
else:
self.sess.run(tf.initialize_all_variables())
# self.saver = tf.train.Saver(max_to_keep=3)
def initialize_network(self):
# the input tensor
self.image_input = tf.placeholder(
tf.float32, shape=[None] + self.config.input_shape, name="image_input" + self.scope)
out = self.image_input
print(out)
# cnn part
with tf.variable_scope("cnn_part" + self.scope):
for filters, kernel_size, strides in zip(self.config.filters, self.config.kernel_size, self.config.strides):
layer = tf.layers.conv3d(
inputs=out,
filters=filters,
kernel_size=kernel_size,
strides=strides,
activation=tf.nn.relu,
padding=self.config.padding
)
print(layer)
out = layer
# flatten the tensor for dnn input
self.cnn_output = tf.layers.flatten(out)
out = self.cnn_output
# dnn part
with tf.variable_scope("dnn_part" + self.scope):
for output_num in self.config.dnn_shape:
if output_num != 16 * 7:
fn = tf.nn.relu
else:
fn = None
layer = tf.layers.dense(
inputs=out,
units=output_num,
activation=fn
)
out = layer
self.dnn_output = out
# for debug
print(self.dnn_output)
# standard matrix input
self.standard_mat = tf.placeholder(
tf.float32, shape=[None, 16 * 7], name="standard_mat" + self.scope)
# define the loss and the optimizer
with tf.variable_scope("train_part" + self.scope):
self.loss = tf.reduce_mean(
(tf.square(self.dnn_output - self.standard_mat)))
self.trainer = tf.train.AdamOptimizer(
self.config.learning_rate).minimize(self.loss)
def train(self, input_buffer, output_buffer):
# record the train step
self.train_step = self.train_step + 1
# print(type(input_upside_buffer))
# print(type(output_upside_buffer))
# apply train and fetch the loss in on step
_, loss = self.sess.run([self.trainer, self.loss], feed_dict={
self.image_input: input_buffer,
self.standard_mat: output_buffer
})
# print the loss
if self.train_step % 10 == 1:
print(self.scope + "now learning step: %d, now loss: %f" %
(self.train_step, loss))
# save the model every xx step
if self.train_step % self.config.every_steps_save == 1:
self.model_save()
def test(self, data):
# test on the training set
loss, output = self.sess.run([self.loss, self.dnn_output], feed_dict={
self.image_input: data["image_input"],
self.standard_mat: data["standard_mat"]
})
print("loss on test set:", loss)
print("output:", output)
def return_mat(self, data):
# fetch the output
output = self.sess.run([self.dnn_output], feed_dict={
self.image_input: data
})
return output
def model_save(self, name=None):
# model save
print("now training step %d...model saving..." % (self.train_step))
if name == None:
self.saver.save(self.sess, "model/training_step" + self.scope,
global_step=self.train_step)
else:
self.saver.save(self.sess, name)
def model_load(self):
self.saver.restore(self.sess, "model/training_step" + self.scope + "_26001")
print(self.scope, "load over.")