-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathindex.py
More file actions
98 lines (84 loc) · 2.97 KB
/
index.py
File metadata and controls
98 lines (84 loc) · 2.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# Credit Scoring Model
# Objective: Predict if an individual is creditworthy (Good or Bad Credit)
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import (
accuracy_score,
precision_score,
recall_score,
f1_score,
roc_auc_score,
classification_report
)
# -----------------------------
# 1️⃣ Simulated Dataset
# -----------------------------
np.random.seed(42)
data = pd.DataFrame({
"income": np.random.randint(2000, 10000, 500),
"debt": np.random.randint(0, 8000, 500),
"payment_history": np.random.randint(0, 10, 500), # higher = better
"loan_amount": np.random.randint(500, 5000, 500),
"age": np.random.randint(18, 65, 500)
})
# Target variable: creditworthy (1 = Good, 0 = Bad)
data["creditworthy"] = np.where(
(data["income"] > 4000)
& (data["debt"] < 5000)
& (data["payment_history"] > 4),
1, 0
)
# -----------------------------
# 2️⃣ Feature Engineering
# -----------------------------
data["debt_to_income_ratio"] = data["debt"] / (data["income"] + 1)
data["income_log"] = np.log1p(data["income"])
# Features & Target
X = data.drop("creditworthy", axis=1)
y = data["creditworthy"]
# -----------------------------
# 3️⃣ Train-Test Split
# -----------------------------
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42)
# -----------------------------
# 4️⃣ Feature Scaling
# -----------------------------
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# -----------------------------
# 5️⃣ Model Training
# -----------------------------
models = {
"Logistic Regression": LogisticRegression(),
"Decision Tree": DecisionTreeClassifier(random_state=42),
"Random Forest": RandomForestClassifier(random_state=42)
}
results = {}
for name, model in models.items():
model.fit(X_train_scaled, y_train)
y_pred = model.predict(X_test_scaled)
results[name] = {
"Accuracy": accuracy_score(y_test, y_pred),
"Precision": precision_score(y_test, y_pred),
"Recall": recall_score(y_test, y_pred),
"F1 Score": f1_score(y_test, y_pred),
"ROC-AUC": roc_auc_score(y_test, y_pred)
}
# -----------------------------
# 6️⃣ Model Evaluation
# -----------------------------
results_df = pd.DataFrame(results).T
print("📊 Model Performance Comparison:\n")
print(results_df.round(3))
# Detailed classification report for best model
best_model_name = results_df["F1 Score"].idxmax()
print(f"\n🏆 Best Model: {best_model_name}\n")
print(classification_report(
y_test, models[best_model_name].predict(X_test_scaled)))