-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel_architectures.py
More file actions
122 lines (107 loc) · 4.32 KB
/
model_architectures.py
File metadata and controls
122 lines (107 loc) · 4.32 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
from keras.models import Sequential
from keras.layers import Input, Dense, Activation, Flatten
import keras.optimizers
import keras.initializers
import tensorflow as tf
import math
def minimal_dense(input_len, output_activation, nb_actions, lr):
model = Sequential()
model.add(Dense(16, input_dim=input_len))
model.add(Activation('sigmoid'))
model.add(Dense(nb_actions))
model.add(Activation(output_activation))
print(model.summary())
#opt = keras.optimizers.Adam(learning_rate=0.00001)
opt = keras.optimizers.Adam(learning_rate=lr)
model.compile(loss=tf.keras.losses.Huber(), optimizer=opt)
#model.compile(optimizer='adam', loss=tf.keras.losses.Huber())
return model
def dense(input_len, output_activation, nb_actions, lr):
model = Sequential()
model.add(Dense(16, input_dim=input_len))
model.add(Activation('sigmoid'))
model.add(Dense(32))
model.add(Activation('sigmoid'))
model.add(Dense(64))
model.add(Activation('sigmoid'))
model.add(Dense(128))
model.add(Activation('sigmoid'))
model.add(Dense(64))
model.add(Activation('sigmoid'))
model.add(Dense(32))
model.add(Activation('sigmoid'))
model.add(Dense(nb_actions))
model.add(Activation(output_activation))
print(model.summary())
opt = keras.optimizers.Adam(learning_rate=lr)
model.compile(loss=tf.keras.losses.Huber(), optimizer=opt)
#model.compile(optimizer='adam', loss=tf.keras.losses.Huber())
return model
def small_dense(input_len, output_activation, nb_actions, lr, seed_):
model = Sequential()
initializer = keras.initializers.glorot_uniform(seed = seed_)
model.add(Dense(16, input_dim=input_len, activation = 'sigmoid', kernel_initializer=initializer))
model.add(Dense(32, activation = 'sigmoid', kernel_initializer=initializer))
model.add(Dense(nb_actions, activation = output_activation, kernel_initializer=initializer))
print(model.summary())
#opt = keras.optimizers.Adam(learning_rate=0.00001)
opt = keras.optimizers.Adam(learning_rate=lr)
model.compile(loss=tf.keras.losses.Huber(), optimizer=opt)
#model.compile(optimizer='adam', loss=tf.keras.losses.Huber())
return model
'''
def small_dense(input_len, output_activation, nb_actions, lr):
model = Sequential()
model.add(Dense(16, input_dim=input_len))
model.add(Activation('sigmoid'))
model.add(Dense(32))
model.add(Activation('sigmoid'))
model.add(Dense(nb_actions))
model.add(Activation(output_activation))
print(model.summary())
#opt = keras.optimizers.Adam(learning_rate=0.00001)
opt = keras.optimizers.Adam(learning_rate=lr)
model.compile(loss=tf.keras.losses.Huber(), optimizer=opt)
#model.compile(optimizer='adam', loss=tf.keras.losses.Huber())
return model
'''
def big_dense(input_len, output_activation, nb_actions, lr):
model = Sequential()
model.add(Dense(16, input_dim=input_len))
model.add(Activation('sigmoid'))
model.add(Dense(32))
model.add(Activation('sigmoid'))
model.add(Dense(64))
model.add(Activation('sigmoid'))
model.add(Dense(128))
model.add(Activation('sigmoid'))
model.add(Dense(256))
model.add(Activation('sigmoid'))
model.add(Dense(512))
model.add(Activation('sigmoid'))
model.add(Dense(256))
model.add(Activation('sigmoid'))
model.add(Dense(128))
model.add(Activation('sigmoid'))
model.add(Dense(64))
model.add(Activation('sigmoid'))
model.add(Dense(32))
model.add(Activation('sigmoid'))
model.add(Dense(nb_actions))
model.add(Activation(output_activation))
print(model.summary())
opt = keras.optimizers.Adam(learning_rate=lr)
model.compile(loss=tf.keras.losses.Huber(), optimizer=opt)
#model.compile(optimizer='adam', loss=tf.keras.losses.Huber())
return model
def mellowmax(omega, x):
sum_ = sum((math.exp(omega * val) for val in x))
return math.log(sum_/len(x))/omega
def huber_loss(y_true, y_pred, clip_delta=1.0):
error = y_true - y_pred
cond = tf.keras.backend.abs(error) < clip_delta
squared_loss = 0.5 * tf.keras.backend.square(error)
linear_loss = clip_delta * (tf.keras.backend.abs(error) - 0.5 * clip_delta)
return tf.where(cond, squared_loss, linear_loss)
def huber_loss_mean(y_true, y_pred, clip_delta=1.0):
return tf.keras.backend.mean(huber_loss(y_true, y_pred, clip_delta))