-
Notifications
You must be signed in to change notification settings - Fork 520
Expand file tree
/
Copy pathDiabetes-prediction
More file actions
97 lines (58 loc) · 2.33 KB
/
Diabetes-prediction
File metadata and controls
97 lines (58 loc) · 2.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# -*- coding: utf-8 -*-
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
!pip install pyspark==2.4.4
"""# Environment Path"""
import os
os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-8-openjdk-amd64'
"""# Run a SparkSession"""
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("spark").getOrCreate()
"""# Clone Diabetes Dataset"""
! git clone https://github.com/education454/diabetes_dataset
! ls diabetes_dataset
df = spark.read.csv('/content/diabetes_dataset/diabetes.csv',header=True,inferSchema=True)
df.show()
df.printSchema()
df.groupby('Outcome').count().show()
df.describe().show()
"""# Cleaning Data"""
for col in df.columns:
print(col+":",df[df[col].isNull()].count())
def count_zeros():
columns_list =['Glucose','BloodPressure','SkinThickness','Insulin','BMI']
for i in columns_list:
print(i+":",df[df[i]==0].count())
count_zeros()
from pyspark.sql.functions import *
for i in df.columns[1:6]:
data = df.agg({i:'mean'}).first()[0]
print("Mean value for {} is {}".format(i,int(data)))
df = df.withColumn(i,when(df[i]==0,int(data)).otherwise(df[i]))
df.show()
"""# Correlation"""
for col in df.columns:
print("correlation to outcome for {} is {}".format(col,df.stat.corr('Outcome',col)))
"""# Feature Selection"""
from pyspark.ml.feature import VectorAssembler
assembler = VectorAssembler(inputCols=['Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigreeFunction','Age'],outputCol='features')
output_data = assembler.transform(df)
output_data.printSchema()
output_data.show()
"""# Build & Train Model"""
from pyspark.ml.classification import LogisticRegression
final_data = output_data.select('features','Outcome')
final_data.printSchema()
train , test = final_data.randomSplit([0.7,0.3])
models = LogisticRegression(labelCol='Outcome')
model = models.fit(train)
summary = model.summary
summary.predictions.describe().show()
"""# Evaluation & Test Model"""
from pyspark.ml.evaluation import BinaryClassificationEvaluator
predictions = model.evaluate(test)
predictions.predictions.show(20)
evaluator = BinaryClassificationEvaluator(rawPredictionCol='rawPrediction', labelCol='Outcome')
evaluator.evaluate(model.transform(test))
model.save("model")
from pyspark.ml.classification import LogisticRegressionModel
model = LogisticRegressionModel.load('model')