-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathprocess.py
More file actions
174 lines (162 loc) · 7.78 KB
/
process.py
File metadata and controls
174 lines (162 loc) · 7.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
import numpy as np
import math
# split data l into n folds
def chunks(l, n):
# For item i in a range that is a length of l,
for i in range(0, len(l), n):
# Create an index range for l of n items:
yield l[i:i+n]
# get required data
def getInput(num_of_data):
while True:
num_of_folds = input("Number of folds: ")
if (num_of_folds.isnumeric() == False):
print("WARNING : Invalid input must be numeric.")
elif (int(num_of_folds) > num_of_data):
print("WARNING : Number of folds exceeds the size of the dataset.")
elif (int(num_of_folds) <= 1):
print("WARNING : Number of folds cannot lower than or equal to 1.")
else:
break
num_of_folds = int(num_of_folds)
# choose neural network architecture
print("#### Select your neural network architecture ####")
while True:
num_of_layers = input("Number of hidden layers : ")
if (num_of_layers.isnumeric() == False):
print("WARNING : Invalid input must be numeric")
elif (int(num_of_layers) < 1):
print("WARNING : Number of layers must greater than or equal to 1.")
else:
break
num_of_layers = int(num_of_layers)
# choose number or nodes in each layer
num_of_nodes_in_hidden_layer = []
# separate input data in to k parts for cross-validation
for i in range(0, num_of_layers):
while True:
num_of_nodes = input("Number of nodes in layer " + str(i + 1) + " : ")
if (num_of_nodes.isnumeric() == False):
print("WARNING : Invalid input must be numeric.")
elif (int(num_of_nodes) < 1):
print("WARNING : Number of nodes must greater than or equal to 1.")
else:
num_of_nodes = int(num_of_nodes)
num_of_nodes_in_hidden_layer.append(num_of_nodes)
break
return num_of_folds, num_of_layers, num_of_nodes_in_hidden_layer
# scaling data to be in desired range
def scaling(list_input):
max_value = max(list_input)
min_value = min(list_input)
list_result = []
for element in list_input:
result = ((element - min_value) / (max_value - min_value))
result = round(result, 7)
list_result.append(result)
return list_result
# create individual by initialize weight
def createParticle(num_of_hidden_layers, num_of_nodes_in_hidden_layer, num_of_input = 8, num_of_output = 2):
# num_of_input = 30
# num_of_output = 1
# all layers that have weights = hidden layer + output layer
num_of_all_layer = num_of_hidden_layers + 1
# create weight for hidden layer to hidden layer
list_weight_hidden_hidden = []
for layer_index in range(0, num_of_hidden_layers):
list_weight_each_layer = []
for node_index in range(0, num_of_nodes_in_hidden_layer[layer_index]):
list_weight_each_node = []
# num_of_weight = weight from previous layer + weight bias
# 1st element in weight_to_this_node is weight_bias
# Input layer -> 1st hidden layer
if (layer_index == 0):
num_of_weight = num_of_input + 1
weight_to_this_node = np.random.uniform(low = -1.0, high = 1.0, size = num_of_weight)
list_weight_each_node.append(weight_to_this_node)
# hidden layer -> hidden layer
else:
num_of_weight = num_of_nodes_in_hidden_layer[layer_index - 1] + 1
weight_to_this_node = np.random.uniform(low = -1.0, high = 1.0, size = num_of_weight)
list_weight_each_node.append(weight_to_this_node)
list_weight_each_layer.extend(list_weight_each_node)
list_weight_hidden_hidden.append(list_weight_each_layer)
# craete weight for hidden to output
list_weight_hidden_output = []
for node_index in range(0, num_of_output):
# number of weight from the last hidden layer to output node = number of node in the last hidden layer + weight bias
num_of_weight = num_of_nodes_in_hidden_layer[num_of_hidden_layers - 1] + 1
weight_to_this_node = np.random.uniform(low = -1.0, high = 1.0, size = num_of_weight)
list_weight_hidden_output.append(weight_to_this_node)
# combine all layers together
list_all_weight = []
list_all_weight.extend(list_weight_hidden_hidden)
list_all_weight.append(list_weight_hidden_output)
return list_all_weight
# create output from each node in network
def createY(num_of_hidden_layers, num_of_nodes_in_hidden_layer, num_of_input = 30, num_of_output = 2):
list_Y_hidden = []
for layer_index in range(0, num_of_hidden_layers):
list_Y_each_layer = np.zeros(num_of_nodes_in_hidden_layer[layer_index])
list_Y_hidden.append(list_Y_each_layer)
list_Y_output = []
for node_index in range(0, num_of_output):
list_Y_each_layer = np.zeros(num_of_output)
list_Y_output.append(list_Y_each_layer)
list_all_Y = []
list_all_Y.extend(list_Y_hidden)
list_all_Y.extend(list_Y_output)
return list_all_Y
def sigmoid(x):
result = (1 / (1 + math.exp(x)))
result = round(result, 7)
return result
# create a list contains pbest of each sample
def createListPbest(num_of_samples):
# list_pbest = np.random.uniform(low = -1.0, high = 1.0, size = num_of_samples)
list_pbest = np.ones(num_of_samples)
return list_pbest
# create a list contains velocity of each sample
def createListVelocity(num_of_hidden_layers, num_of_nodes_in_hidden_layer, num_of_input = 8, num_of_output = 2):
# num_of_input = 30
# num_of_output = 1
# all layers that have weights = hidden layer + output layer
num_of_all_layer = num_of_hidden_layers + 1
# create weight for hidden layer to hidden layer
list_weight_hidden_hidden = []
for layer_index in range(0, num_of_hidden_layers):
list_weight_each_layer = []
for node_index in range(0, num_of_nodes_in_hidden_layer[layer_index]):
list_weight_each_node = []
# num_of_weight = weight from previous layer + weight bias
# 1st element in weight_to_this_node is weight_bias
# Input layer -> 1st hidden layer
if (layer_index == 0):
num_of_weight = num_of_input + 1
weight_to_this_node = np.ones(num_of_weight)
list_weight_each_node.append(weight_to_this_node)
# hidden layer -> hidden layer
else:
num_of_weight = num_of_nodes_in_hidden_layer[layer_index - 1] + 1
weight_to_this_node = np.ones(num_of_weight)
list_weight_each_node.append(weight_to_this_node)
list_weight_each_layer.extend(list_weight_each_node)
list_weight_hidden_hidden.append(list_weight_each_layer)
# craete weight for hidden to output
list_weight_hidden_output = []
for node_index in range(0, num_of_output):
# number of weight from the last hidden layer to output node = number of node in the last hidden layer + weight bias
num_of_weight = num_of_nodes_in_hidden_layer[num_of_hidden_layers - 1] + 1
weight_to_this_node = np.ones(num_of_weight)
list_weight_hidden_output.append(weight_to_this_node)
# combine all layers together
list_all_weight = []
list_all_weight.extend(list_weight_hidden_hidden)
list_all_weight.append(list_weight_hidden_output)
return list_all_weight
# calculate mean absolute error
def mae(actual, predict_1, predict_2):
error_1 = math.fabs(actual[0] - predict_1)
error_2 = math.fabs(actual[1] - predict_2)
mae_value = ((error_1 + error_2) / 2)
return mae_value