-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_cdm_public_model.py
More file actions
184 lines (158 loc) · 6.47 KB
/
train_cdm_public_model.py
File metadata and controls
184 lines (158 loc) · 6.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import argparse
import json
import os
import pickle
from datetime import datetime, timezone
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from utils.cdm_features import apply_cdm_derived_features
def _safe_float_series(s: pd.Series) -> pd.Series:
return pd.to_numeric(s, errors="coerce")
def _prepare_features(df: pd.DataFrame) -> tuple[pd.DataFrame, list[str], list[str]]:
work = apply_cdm_derived_features(df)
categorical = []
for col in (
"SAT1_OBJECT_TYPE",
"SAT2_OBJECT_TYPE",
"SAT1_RCS",
"SAT2_RCS",
"sat1_OBJECT_TYPE",
"sat2_OBJECT_TYPE",
"sat1_RCS_SIZE",
"sat2_RCS_SIZE",
):
if col in work.columns:
categorical.append(col)
numeric = []
for col in ("MIN_RNG", "log_min_rng", "hours_to_tca"):
if col in work.columns:
numeric.append(col)
for col in ("delta_inclination", "delta_mean_motion", "excl_vol_sum"):
if col in work.columns:
numeric.append(col)
return work, numeric, categorical
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--dataset-csv", required=True)
parser.add_argument("--train-csv", default=None)
parser.add_argument("--val-csv", default=None)
parser.add_argument("--test-csv", default=None)
parser.add_argument(
"--target",
choices=["emergency_reportable", "pc_bucket_high", "pc_risk_class", "pc_quantile_class", "pc_present"],
default="emergency_reportable",
)
parser.add_argument("--out-dir", default="models/cdm_public")
parser.add_argument("--test-size", type=float, default=0.2)
parser.add_argument("--seed", type=int, default=42)
args = parser.parse_args()
if args.train_csv and args.test_csv:
df_train_raw = pd.read_csv(args.train_csv)
df_test_raw = pd.read_csv(args.test_csv)
df_val_raw = pd.read_csv(args.val_csv) if args.val_csv else None
else:
df_all = pd.read_csv(args.dataset_csv)
if args.target not in df_all.columns:
raise SystemExit(f"Target column not found: {args.target}")
if len(df_all) < 100:
raise SystemExit("Dataset too small for meaningful evaluation (need >= 100 rows)")
df_train_raw, df_test_raw = train_test_split(
df_all,
test_size=args.test_size,
random_state=args.seed,
stratify=df_all[args.target] if df_all[args.target].nunique() > 1 else None,
)
df_val_raw = None
for df_part in [p for p in [df_train_raw, df_val_raw, df_test_raw] if p is not None]:
if args.target not in df_part.columns:
raise SystemExit(f"Target column not found: {args.target}")
df_train, numeric_cols, categorical_cols = _prepare_features(df_train_raw)
df_test, _, _ = _prepare_features(df_test_raw)
df_val = None
if df_val_raw is not None:
df_val, _, _ = _prepare_features(df_val_raw)
drop_cols = {args.target}
if "PC" in df_train.columns:
drop_cols.add("PC")
def to_x_y(df_in: pd.DataFrame) -> tuple[pd.DataFrame, pd.Series]:
y_raw = pd.to_numeric(df_in[args.target], errors="coerce")
mask = y_raw.notna()
y_out = y_raw[mask].astype(int)
x_out = df_in.drop(columns=[c for c in drop_cols if c in df_in.columns], errors="ignore")
x_out = x_out[[c for c in numeric_cols + categorical_cols if c in x_out.columns]]
x_out = x_out.loc[mask].copy()
if x_out.empty:
raise SystemExit("No features available to train on")
return x_out, y_out
X_train, y_train = to_x_y(df_train)
X_test, y_test = to_x_y(df_test)
X_val = None
y_val = None
if df_val is not None:
X_val, y_val = to_x_y(df_val)
numeric_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="median")),
("scaler", StandardScaler()),
]
)
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_cols),
("cat", categorical_transformer, categorical_cols),
],
remainder="drop",
)
clf = LogisticRegression(max_iter=4000, class_weight="balanced")
model = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", clf)])
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
report = classification_report(y_test, y_pred, output_dict=True, zero_division=0)
matrix = confusion_matrix(y_test, y_pred).tolist()
val_report = None
val_matrix = None
if X_val is not None and y_val is not None:
y_val_pred = model.predict(X_val)
val_report = classification_report(y_val, y_val_pred, output_dict=True, zero_division=0)
val_matrix = confusion_matrix(y_val, y_val_pred).tolist()
os.makedirs(args.out_dir, exist_ok=True)
stamp = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
model_path = os.path.join(args.out_dir, f"{args.target}_{stamp}.pkl")
metrics_path = os.path.join(args.out_dir, f"{args.target}_{stamp}_metrics.json")
with open(model_path, "wb") as f:
pickle.dump(model, f)
with open(metrics_path, "w", encoding="utf-8") as f:
json.dump(
{
"target": args.target,
"n_rows_train": int(len(df_train)),
"n_rows_val": int(len(df_val)) if df_val is not None else 0,
"n_rows_test": int(len(df_test)),
"features_numeric": numeric_cols,
"features_categorical": categorical_cols,
"test_classification_report": report,
"test_confusion_matrix": matrix,
"val_classification_report": val_report,
"val_confusion_matrix": val_matrix,
},
f,
indent=2,
)
print(f"Saved model -> {model_path}")
print(f"Saved metrics -> {metrics_path}")
print(json.dumps(report, indent=2)[:1200])
if __name__ == "__main__":
main()