-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathclassify1.py
More file actions
103 lines (83 loc) · 3.24 KB
/
classify1.py
File metadata and controls
103 lines (83 loc) · 3.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
#10.10.2024
#Machine Learning lab
#The goal is to identify holes in pictures of mars
import numpy as np
import matplotlib.pyplot as plt
import perceptron_Carlos as Perceptron
X_train = np.load("Xtrain1.npy")
y_train = np.load("Ytrain1.npy")
######## Counting labels ##########
train_total = y_train.shape[0]
n_craters = 0
n_plain = 0
for element in y_train:
if element== 0:
n_plain += 1
else: n_craters += 1
print("DESCRIPTION OF DATASET")
print()
print("Number of training images = " + str(train_total))
print("Percentage of craters is " + str(100*n_craters/train_total) + " %")
print("Percentage of plain is " + str(100*n_plain/train_total) + " %")
######## Shortening for now ##########
print("Shape of input data is " + str(X_train.shape))
X_train = X_train[0:30]
y_train = y_train[0:30]
#### Normalize #################################
X_train_means = np.mean(X_train,axis = 0) #Important for finale!
X_train_centered = X_train - X_train_means
X_train_centered_maxs = np.max(np.abs(X_train_centered), axis=0) # Important for finale!
X_train_normalised = X_train_centered / X_train_centered_maxs
X_train_normalised_std_devs = np.std(X_train_centered, axis=0 ) #Important for finale!
X_train_normalised = X_train_normalised/ X_train_normalised_std_devs
y_train_mean = np.mean(y_train) #Important for finale!
y_train_centered = y_train - y_train_mean
y_train_centered_max = np.max(y_train_centered)
y_train_normalised = y_train_centered / y_train_centered_max
y_train_normalised_std_dev = np.std(y_train_centered) #use standard deviation to normalise gaussian noise
y_train_normalised = y_train_normalised/ y_train_normalised_std_dev
def display_image(vector, L, H):
# Convert the vector to a 2D array (image) with shape (H, L)
image = np.array(vector).reshape(H, L)
# Display the image using matplotlib
plt.imshow(image, cmap='gray') # Using 'gray' colormap for grayscale images
plt.axis('off') # Hide axes
if(0):
for i in range(20):
display_image(X_train[i],48,48)
if(Y_train[i]==1):
plt.title("Image "+ str(i) + " has a cratter")
else:
plt.title("Image "+ str(i) + " is clean")
plt.figure()
P = Perceptron.perceptron()
P.data = X_train_normalised
P.labels = y_train_normalised
P.weight_init()
P.w *= -0.5
P.learning_step = 2
P.steepness = 10
#print(np.array(y_train))
print("----------------------------------------")
n_epochs = 10
all_errors =[]
for i in range(n_epochs):
#print("----------------------------------")
#print("Updating with deltas = "+ str(P.gradient_step() ))
#print("Real Labels =" + str(P.labels))
P.stochastic_gradient_epoch()
pred_labels = P.pred_labels()
pred_labels = [round(ii) for ii in pred_labels]
error = np.sum(np.square(pred_labels-y_train))
all_errors.append(error)
if(i%1==0):
print("Iteration "+ str(i))
#print(pred_labels)
#print("Predicted-label error =" + str(y_train-pred_labels) + "Error =" + str(error))
#print( "Current error= " + str(P.Error() ))
## plotting error with gradient steps ##
plt.plot(all_errors, color='blue')
plt.xlabel('Iteration')
plt.ylabel('Squared errors')
plt.title('Error evolution while learning')
plt.show()