-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsvm.py
More file actions
126 lines (88 loc) · 3.33 KB
/
svm.py
File metadata and controls
126 lines (88 loc) · 3.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import math
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
print(X)
print(y)
names = iris.target_names
print(X.shape)
print(y.shape)
df = pd.DataFrame(X,columns=iris.feature_names)
df['species'] = iris.target
df['species'] = df['species'].replace(to_replace = [0,1,2],value = ['setpsa','versicolor','virginical'])
print(df)
#exploratory data analysis
sns.pairplot(df,hue = 'species',palette='Set2')
plt.show()
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=1)
#test size is 20% of data set is test set.
#applying support vector machine
from sklearn.svm import SVC#support vector classification
svm = SVC(kernel = 'linear',random_state=0)
#creating instance for svm.
svm.fit(x_train,y_train)
pred = svm.predict(x_test)
print(svm.predict(x_test))
#prediction.
#accuracy.
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,pred))
#printing accuracy score.
#printing confusion matrics.
print(confusion_matrix(y_test,pred))
#as there is no true negative or true positive value here in diagonal so 100% accuracy.
#using different kernel.
rbf_svm = SVC(kernel='rbf',random_state=0)
rbf_svm.fit(x_train,y_train)
pred = rbf_svm.predict(x_test)
print(accuracy_score(y_test,pred))
#...........................................................................
#.............................BUILDING SVM ALGO.............................
#...........................................................................
class SVM_classifier():
#Initiating hyperparameters...
def _init(self,learn_rate,no_iter,lambda_para):
self.learn_rate = learn_rate
self.no_iter = no_iter
self.lambda_para = lambda_para
#fitting data to model...
def fit(self,X,Y):
#m ---> Number of datapoints ---> number of rows...
#n ---> Number of input features ---> number of columns...
self.m,self.n = X.shape
self.w = np.zeros(self.n)
self.b = 0
self.X = X
self.Y = Y
# updating gradient descent...
for i in range(self.no_iter):
self.update_weight()
#updating weights with each iteration...
def update_weight(self):
y_label = np.where(self.Y <= 0,-1,1)
#...Building gradients...
for index, x_i in enumerate(self.X):
condition = y_label[index]*(np.dot(x_i,self.w) - self) >= 1
if(condition == True):
dw = 2*self.w*self.lambda_para
db = 0
else:
dw = 2*self.lambda_para*self.w - np.dot(x_i,y_label[index])
db = y_label[index]
self.w = self.w - dw*self.learn_rate
self.b = self.b - db*self.learn_rate
#prediction....
def pred(self,):
output = np.dot(self.X,self.w) - self.b
#.. As said we use sign of ouput for our classification..
predicted_labels = np.sign(output)
y_hat = np.where(predicted_labels <= 0, 0, 1)
return y_hat