-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathLoadData.py
More file actions
127 lines (73 loc) · 3.05 KB
/
LoadData.py
File metadata and controls
127 lines (73 loc) · 3.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#!/python
import numpy as np
import pandas as pd
import pickle
import LoadRawMNISTData
# 28x28 images 0-783 as ints
# training set has 42000 images [label, pixel0, pixel1, ... , pixel783]
# testing set has 28000 images[pixel0, pixel1, ... , pixel783]
# easier to read in csv and store as pickle file, only need to do this once
# submission.pkl is the validation data whose answers we'll submit to kaggle
#
# break the training.csv file into training and testing data
#
# can reshuffle testing and training data by re-running this function or change
# sizes of test/train.
def csv_to_pickle():
testing_images, testing_labels = LoadRawMNISTData.load_mnist("testing")
print("testing ? ", len(testing_labels))
training_images, training_labels = LoadRawMNISTData.load_mnist("training")
print("training ? ", len(training_labels))
# convert image data from 0-255 ints to 0.0-1.0 floats
testing_images = testing_images / 255.0
training_images = training_images / 255.0
# zip image data and label data together
testing_data = zip(testing_images, testing_labels)
training_data = zip(training_images, training_labels)
# write data out to files
pickle_out = open('test.pkl', 'wb')
pickle.dump(testing_data, pickle_out)
pickle_out.close()
pickle_out = open('train.pkl', 'wb')
pickle.dump(training_data, pickle_out)
pickle_out.close()
# uncomment line below to convert the raw files into pickle files
# once you've written the pickle files there's no need to rerun this function
#csv_to_pickle()
# load up data from pkl file
def load_data():
file = 'train.pkl'
with open(file, 'rb') as f:
training_data = pickle.load(f)
f.close()
training = list(training_data)
file = 'test.pkl'
with open(file, 'rb') as f:
testing_data = pickle.load(f)
f.close()
testing = list(testing_data)
return (training, testing)
# re-shape data to match network requirements
def load_data_wrapper():
tr_d, te_d = load_data()
tr_i, tr_l = zip(*tr_d)
tr_image = np.asarray(tr_i)
tr_label = np.asarray(tr_l)
te_i, te_l = zip(*te_d)
te_image = np.asarray(te_i)
te_label = np.asarray(te_l)
training_inputs = [np.reshape(x, (784, 1)) for x in tr_image]
training_results = [vectorized_result(y) for y in tr_label]
training_data = zip(training_inputs, training_results)
test_inputs = [np.reshape(x, (784, 1)) for x in te_image]
test_data = zip(test_inputs, te_label)
return(tr_image, tr_label, te_image, te_label) # return images and labels
#return (training_data, test_data) # return combined image/labels
# convert 0-9 labels to 10 zero arrays with a 1 in the correct position
def vectorized_result(j):
e = np.zeros((10, 1))
e[j] = 1.0
return e
# read in data
training_data, test_data = load_data()
print("Successfully loaded training/testing?", len(training_data), len(test_data))