-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathSimpleRNN.py
More file actions
193 lines (132 loc) · 5.16 KB
/
SimpleRNN.py
File metadata and controls
193 lines (132 loc) · 5.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
# http://github.com/timestocome
# converted to python 3, streamlined code, clarified code
# added init weights into init function
# fixed and streamlined backpropagation update function
# adjusted parameters for faster, better accuracy
# removed unused variables and code
# added comments for clarity
# adapted from
# https://deeplearningcourses.com/c/deep-learning-recurrent-neural-networks-in-python
# https://udemy.com/deep-learning-recurrent-neural-networks-in-python
import numpy as np
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
# setup theano on GPU if possible
GPU = True
if GPU:
print("Device set to GPU")
try: theano.config.device = 'gpu'
except: pass # its already set
theano.config.floatX = 'float32'
else:
print("Running with CPU")
rng = np.random.RandomState(27) # prime random number generator
# Network parameters
# collect them all up here so it's easier to adjust them
n_hidden = 4
n_out = 2
learning_rate = 10e-4
epochs = 20
nbit = 12
# create data to use in training
# creates all possible binary combinations for nbits
def create_parity_pairs():
n = 2 **nbit # number of possible combinations
x = np.zeros((n, nbit))
y = np.zeros(n)
for i in range(n):
for j in range(nbit):
if i % (2 **(j+1)) != 0:
i -= 2 **j
x[i,j] = 1
y[i] = x[i].sum() % 2
x = x.reshape(n, x.shape[1], 1)
y = y.reshape(y.shape[0], 1)
return x.astype('float32'), y.astype('int32')
class SimpleRNN:
def __init__(self, number_samples):
# set up weights and biases
d = 1 # depth of x
n = number_samples
# init
Wx = np.asarray(rng.uniform(
low = -np.sqrt(2. /(d + n_hidden)),
high = np.sqrt(2. /(d + n_hidden)),
size = (d, n_hidden)
))
self.Wx = theano.shared(Wx, name='Wx', borrow=True)
Wh = np.asarray(rng.uniform(
low = -np.sqrt(2. /(d + n_hidden)),
high = np.sqrt(2 /(d + n_hidden)),
size = (n_hidden, n_hidden)
))
self.Wh = theano.shared(Wh, name='Wh', borrow=True)
bh = np.zeros(n_hidden)
self.bh = theano.shared(bh, name='bh', borrow=True)
ho = np.zeros(n_hidden)
self.ho = theano.shared(ho, name='ho', borrow=True)
Wo = np.asarray(rng.uniform(
low = -np.sqrt(2. /(n_hidden + n_out)),
high = np.sqrt(2. /(n_hidden + n_out)),
size = (n_hidden, n_out)
))
self.Wo = theano.shared(Wo, name='Wo', borrow=True)
bo = np.zeros(n_out)
self.bo = theano.shared(bo, name='bo', borrow=True)
# values to adjust with back propagation
self.parameters = [self.Wx, self.Wh, self.bh, self.ho, self.Wo, self.bo]
# recurrence functions
thX = T.fmatrix('x')
thY = T.ivector('y')
# feed forward equations
def recurrence(x_t, h_t1):
h_t = T.nnet.relu( T.dot(x_t, self.Wx) + T.dot(h_t1, self.Wh) + self.bh )
y_t = T.nnet.softmax( T.dot(h_t, self.Wo) + self.bo )
return h_t, y_t
# loop over feed forward equations once for each bit in the sequence
# send previous hidden output back through and collect prediction
[h, y_predicted], _ = theano.scan(
fn = recurrence,
outputs_info = [self.ho, None],
sequences = thX,
n_steps = thX.shape[0],
)
# probability of x given y
py_x = y_predicted[:, 0, :]
prediction = T.argmax(py_x, axis=1) # fetch most likely prediction
# cost functions for gradients and tracking progress
cost = -T.mean( T.log(py_x[T.arange(thY.shape[0]), thY])) # cross entropy
gradients = T.grad(cost, self.parameters) # derivatives
updates = [(p, p - learning_rate * g) for p, g in zip(self.parameters, gradients)]
# training and prediction functions
self.predict_op = theano.function(inputs = [thX], outputs = prediction)
self.train_op = theano.function(
inputs = [thX, thY],
outputs = cost,
updates = updates
)
def train(self, x, y):
costs = []
# number of times to loop through all of data set
for i in range(epochs):
x, y = shuffle(x, y) # things work better when you shuffle the data
cost = 0
for j in range(len(y)):
c = self.train_op(x[j], y[j])
cost += c
# output cost so user can see training progress
cost /= len(y)
print ("i:", i, "cost:", cost, "%")
costs.append(cost)
# graph to show accuracy progress - cost function should decrease
plt.plot(costs)
plt.show()
def parity():
# create training data
x,y = create_parity_pairs()
# create and train network
rnn = SimpleRNN(len(y))
rnn.train(x, y)
parity()