-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconv.py
More file actions
163 lines (110 loc) · 4.91 KB
/
conv.py
File metadata and controls
163 lines (110 loc) · 4.91 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import tensorflow as tf
#import tensorflow.examples.tutorials.mnist.input_data as input_data
import input_data
import sys
import numpy as np
import matplotlib.pyplot as plt
#"linear size of the system "
lx=16
#parameters of the neural network and cost function
numberlabels=2 # Number of phases under consideration (2 for the Ising model on the square lattice)
hiddenunits1=100 # number of hidden unites in the hidden layer
lamb=0.001 # regularization parameter
beta=1.0 #``inverse temperature'' of the sigmoid neuron
#Parameters of the optimization
#batch size for the gradient descent
bsize=1500
# number of iterations
niter=4000
# temperature list at which the training/test sets were generated
tlist=[0.000001,99999.9]
# Description of the input data
Ntemp=40 # number of different temperatures used in the training and testing data
samples_per_T=250 # number of samples per temperature value in the testing set
Nord=20 # number of temperatures in the ordered phase
#reading the data in the directory txt
mnist = input_data.read_data_sets(numberlabels,lx,'txt', one_hot=True)
print "reading sets ok"
#sys.exit("pare aqui")
# defining weighs and initlizatinon
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# defining the application of the sigmoid functions
def layers(x, W,b):
return tf.nn.sigmoid(beta*tf.matmul(x, W)+b)
# defines the hidden layer argument for investigating what the neural net learns upon training
def hidlay(x,W,b):
return tf.matmul(x, W)+b
# defining the model: input(spin configuration) and label (Ferromagnet/paramagnet)
x = tf.placeholder("float", shape=[None, lx*lx]) # spin configuration
y_ = tf.placeholder("float", shape=[None, numberlabels]) # label in the form of a one hot vector
#first layer (hidden layer)
#defining the weights and bias of the hidden layer
W_1 = weight_variable([lx*lx,hiddenunits1])
b_1 = bias_variable([hiddenunits1])
hl=hidlay(x,W_1,b_1)
#Apply a sigmoid
O1 = layers(x, W_1,b_1)
#second layer(output layer in this case)
W_2 = weight_variable([hiddenunits1,numberlabels])
b_2 = bias_variable([numberlabels])
O2=layers(O1,W_2,b_2)
y_conv=O2
#Train and Evaluate the Model
# cost function to minimize (with a small L2 regularization (lamb))
cross_entropy = tf.reduce_mean(-y_*tf.log(tf.clip_by_value(y_conv,1e-10,1.0) )-(1.0-y_)*tf.log(tf.clip_by_value(1.0-y_conv,1e-10,1.0))) +(lamb)*(tf.nn.l2_loss(W_1)+tf.nn.l2_loss(W_2))
#defining the optimizer to be used in the minimization of the cross entropy
optimizer= tf.train.AdamOptimizer(0.0005)
# train step amounts to minimizing the cross entropy using gradient descent computed by the optimizer
train_step = optimizer.minimize(cross_entropy)
#predictions
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) # checks the correct predictions by comparing the results of the neural net with the labels
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) # measures the accuracy
# initializing the session
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# training
for i in range(niter):
batch = mnist.train.next_batch(bsize)
#batch=(mnist.train.images[:,:].reshape(bsize,lx*lx), mnist.train.labels[:,:].reshape((bsize,numberlabels)) )
if i%100 == 0:
train_accuracy = sess.run(accuracy,feed_dict={
x:batch[0], y_: batch[1]})
print "step %d, training accuracy %g"%(i, train_accuracy)
print sess.run(cross_entropy,feed_dict={x: batch[0], y_: batch[1]})
print "test accuracy %g"%sess.run(accuracy, feed_dict={
x: mnist.test.images, y_: mnist.test.labels})
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]})
print "test accuracy %g"%sess.run(accuracy, feed_dict={
x: mnist.test.images, y_: mnist.test.labels})
W1s=sess.run(W_1)
b1s=sess.run(b_1)
np.savetxt('W1.txt', W1s)
np.savetxt('b1.txt', b1s)
#producing plots of the results
f = open('nnout.dat', 'w')
# Average output of neural net over the test set
ii=0
for i in range(Ntemp):
av=0.0
for j in range(samples_per_T):
batch=(mnist.test.images[ii,:].reshape((1,lx*lx)),mnist.test.labels[ii,:].reshape((1,numberlabels)))
res=sess.run(y_conv,feed_dict={x: batch[0], y_: batch[1]})
av=av+res
ii=ii+1
av=av/samples_per_T
f.write(str(i)+' '+str(tlist[i])+' '+str(av[0,0])+' '+str(av[0,1])+"\n")
#print i,av
f.close()
# Average accuracy vs temperature over the test set
f = open('acc.dat', 'w')
for ii in range(Ntemp):
batch=(mnist.test.images[ii*samples_per_T:ii*samples_per_T+samples_per_T,:].reshape(samples_per_T,lx*lx), mnist.test.labels[ii*samples_per_T:ii*samples_per_T+samples_per_T,:].reshape((samples_per_T,numberlabels)) )
train_accuracy = sess.run(accuracy,feed_dict={
x:batch[0], y_: batch[1]})
f.write(str(ii)+' '+str(tlist[ii])+' '+str(train_accuracy)+"\n") #
f.close()