-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathleaf_classification.py
More file actions
52 lines (41 loc) · 1.72 KB
/
leaf_classification.py
File metadata and controls
52 lines (41 loc) · 1.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
'''
Using basic logistic regression
with some tuning
i ahcieved top 100 rank :)
Abhishek Bhatt(Jamia Millia Islamia B.Tech (CSE))
'''
#import required libraries
import numpy as np
import pandas as pd
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
#read files from pwd
train=pd.read_csv('train.csv')
test=pd.read_csv('test.csv')
# preparing y_train by encoding the non-numerical values
le=LabelEncoder().fit(train.species)
y_train=le.transform(train.species)
#dropping the id column as it is useless for training
X_train=train.drop(['id','species'],axis=1)
#feature scaling(very important step)
scaler=StandardScaler().fit(X_train)
X_train=scaler.transform(X_train)
#training data by using exhaustive search for optimal values of C->a Logistic Regression parameter(C=1/lambda)
#various C values to choose from for exhaustive search
C_vals={'C':[0.0001,0.001,0.01,0.1,1,10,100,200,500,1000]}
logreg=LogisticRegression(solver='lbfgs', multi_class='multinomial')
grid=GridSearchCV(logreg,C_vals,cv=10,scoring='log_loss')
#fitting data for training
grid.fit(X_train,y_train)
#creating id column for submision file
ids=test.id
#creating test file for calculating probablity atrix for each class(species)
X_test=test.drop(['id'],axis=1)
X_test=scaler.transform(X_test)
#create predictive probablities for each specie of leaf
y_test=grid.predict_proba(X_test)
#creating submission file
submission=pd.DataFrame(y_test,index=ids,columns=le.classes_)
submission.to_csv('submission.csv')