-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdeeplearn_fer.py
More file actions
117 lines (89 loc) · 3.23 KB
/
deeplearn_fer.py
File metadata and controls
117 lines (89 loc) · 3.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import sys
import os
import numpy as np
import pandas
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.utils import np_utils
#initialize config
batch_size = 64
epochs = 40
w,h=48,48
output_file = "fer2013"
dataset = "fer2013"
#load dataset
print("... loading dataset")
ds = pandas.read_csv(f"{dataset}.csv")
n_classes = (ds['emotion'].max()+1)
print(f"... {n_classes} classes found")
#prep lists
train_data_img, train_data_lab, test_data_img, test_data_lab = [],[],[],[]
#populate lists
print("... populating training data")
for index, row in ds.iterrows():
val=row['pixels'].split(" ")
try:
#training set
if 'Training' in row['Usage']:
train_data_img.append(np.array(val, 'float32'))
train_data_lab.append(row['emotion'])
#test/validation set
elif 'PublicTest' in row['Usage']:
test_data_img.append(np.array(val, 'float32'))
test_data_lab.append(row['emotion'])
except:
print(f"[error] {index} - {row}")
#convert to NP array
train_data_img, train_data_lab = np.array(train_data_img,'float32'), np.array(train_data_lab,'float32')
test_data_img, test_data_lab = np.array(test_data_img,'float32'), np.array(test_data_lab,'float32')
train_data_lab=np_utils.to_categorical(train_data_lab, num_classes=n_classes)
test_data_lab=np_utils.to_categorical(test_data_lab, num_classes=n_classes)
#normalize data between 0 and 1
train_data_img -= np.mean(train_data_img, axis=0)
train_data_img /= np.std(train_data_img, axis=0)
test_data_img -= np.mean(test_data_img, axis=0)
test_data_img /= np.std(test_data_img, axis=0)
test_data_img = test_data_img.reshape(test_data_img.shape[0], w, h, 1)
train_data_img = train_data_img.reshape(train_data_img.shape[0], w, h, 1)
#keras cnn
model = Sequential()
# L1
model.add(Conv2D(64, kernel_size=(3,3), activation='relu', input_shape=(train_data_img.shape[1:])))
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(Dropout(0.5))
# L2
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(Dropout(0.5))
# L3
model.add(Conv2D(128, (3,3), activation='relu'))
model.add(Conv2D(128, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
# l4
model.add(Flatten())
model.add(Dense(1024,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1024,activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(n_classes,activation='softmax'))
model.compile(
loss=categorical_crossentropy,
optimizer=Adam(),
metrics=['accuracy'])
model.fit(train_data_img, train_data_lab,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(test_data_img, test_data_lab),
shuffle=True)
#save model
model_json=model.to_json()
with open(f"{output_file}.json","w") as json_file:
json_file.write(model_json)
model.save_weights(f"{output_file}.h5")