Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
74 changes: 51 additions & 23 deletions frame/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from tqdm import tqdm
from sklearn import metrics

from frame.source import models
from frame.source import models, train
from torch_geometric.loader import DataLoader
device = "cuda" if torch.cuda.is_available() else "cpu"

Expand All @@ -31,6 +31,7 @@ def main():
path_checkpoint = config["path_checkpoint"]
model_name = config.get("model", "gat").lower()
batch_size = config.get("batch_size", 64)
task = config.get("task", "classification").lower()

# * Initialize
name = config["name"]
Expand Down Expand Up @@ -68,31 +69,58 @@ def main():
batch=data.batch)

# * Read prediction values
detach = torch.sigmoid(model_out).cpu().detach()
pred_lbl = (detach >= 0.5).int()
pred = list(torch.ravel(detach).cpu().detach().numpy())
if task == "classification":
detach = torch.sigmoid(model_out).cpu().detach()
pred = list(torch.ravel(detach).cpu().detach().numpy())
pred_lbl = (detach >= 0.5).int()
agg_lbl += pred_lbl.flatten().tolist()
else:
detach = model_out.cpu().detach()
pred = list(torch.ravel(detach).cpu().detach().numpy())
pred_lbl = None

# * Save prediction values
agg_pred += pred
agg_lbl += pred_lbl.flatten().tolist()
agg_true += data.y.flatten().tolist()

# * Get metrics
acc = metrics.accuracy_score(agg_true, agg_lbl)
acc_bal = metrics.balanced_accuracy_score(agg_true, agg_lbl)
f1 = metrics.f1_score(agg_true, agg_lbl, zero_division=0)
prec = metrics.precision_score(agg_true, agg_lbl, zero_division=0)
rec = metrics.recall_score(agg_true, agg_lbl, zero_division=0)
mcc = metrics.matthews_corrcoef(agg_true, agg_lbl)
roc_auc = metrics.roc_auc_score(agg_true, agg_pred)
avg_prec = metrics.average_precision_score(agg_true, agg_pred)

print(f"\n========= {name}"
f"\n{'Accuracy:':<19}{round(acc, 3)}"
f"\n{'Balanced Accuracy:':<19}{round(acc_bal, 3)}"
f"\n{'F1:':<19}{round(f1, 3)}"
f"\n{'MCC:':<19}{round(mcc, 3)}"
f"\n{'Precision:':<19}{round(prec, 3)}"
f"\n{'Recall:':<19}{round(rec, 3)}"
f"\n{'Avg. Precision:':<19}{round(avg_prec, 3)}"
f"\n{'ROC-AUC:':<19}{round(roc_auc, 3)}\n")
if task == "classification":
acc = metrics.accuracy_score(agg_true, agg_lbl)
acc_bal = metrics.balanced_accuracy_score(agg_true, agg_lbl)
f1 = metrics.f1_score(agg_true, agg_lbl, zero_division=0)
prec = metrics.precision_score(agg_true, agg_lbl, zero_division=0)
rec = metrics.recall_score(agg_true, agg_lbl, zero_division=0)
mcc = metrics.matthews_corrcoef(agg_true, agg_lbl)
roc_auc = metrics.roc_auc_score(agg_true, agg_pred)
avg_prec = metrics.average_precision_score(agg_true, agg_pred)

print(f"\n========= {name}"
f"\n{'Accuracy:':<19}{round(acc, 3)}"
f"\n{'Balanced Accuracy:':<19}{round(acc_bal, 3)}"
f"\n{'F1:':<19}{round(f1, 3)}"
f"\n{'MCC:':<19}{round(mcc, 3)}"
f"\n{'Precision:':<19}{round(prec, 3)}"
f"\n{'Recall:':<19}{round(rec, 3)}"
f"\n{'Avg. Precision:':<19}{round(avg_prec, 3)}"
f"\n{'ROC-AUC:':<19}{round(roc_auc, 3)}\n")

else:
r2 = metrics.r2_score(agg_true, agg_pred)
rmse = metrics.root_mean_squared_error(agg_true, agg_pred)
mae = metrics.mean_absolute_error(agg_true, agg_pred)

rto_r2, _ = train.reg_through_origin(agg_true, agg_pred)
ccc = train.concordance_correlation(agg_true, agg_pred)
roy_c = train.roy_criteria(agg_true, agg_pred, inverse=False)
roy_c_inv = train.roy_criteria(agg_true, agg_pred, inverse=True)
delta = train.golbraikh_tropsha(agg_true, agg_pred)

print(f"\n========= {name}"
f"\n{'R²:':<19}{round(r2, 3)}"
f"\n{'RMSE:':<19}{round(rmse, 3)}"
f"\n{'MAE:':<19}{round(mae, 3)}"
f"\n{'RTO R²:':<19}{round(rto_r2, 3)}"
f"\n{'CCC:':<19}{round(ccc, 3)}"
f"\n{'Roy Criteria:':<19}{round(roy_c, 3)}"
f"\n{'Roy C. Inverse:':<19}{round(roy_c_inv, 3)}"
f"\n{'Delta:':<19}{round(delta, 3)}\n")
18 changes: 14 additions & 4 deletions frame/explain.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ def main():
path_checkpoint = config["path_checkpoint"]
model_name = config.get("model", "gat").lower()
batch_size = config.get("batch_size", 64)
task = config.get("task", "classification").lower()

# * Initialize
name = config["name"]
Expand Down Expand Up @@ -62,12 +63,16 @@ def main():
model.load_state_dict(torch.load(path_checkpoint))
model.eval()

if task == "classification":
mode = "multiclass_classification"
else:
mode = "regression"
explainer = Explainer(model=model,
algorithm=CaptumExplainer("IntegratedGradients"),
explanation_type="model",
edge_mask_type="object",
node_mask_type="attributes",
model_config=dict(mode="multiclass_classification",
model_config=dict(mode=mode,
task_level="graph",
return_type="raw"))

Expand All @@ -81,9 +86,14 @@ def main():
batch=data.batch)

# * Read prediction values
detach = torch.sigmoid(model_out).cpu().detach()
pred_lbl = (detach >= 0.5).int()
pred = list(torch.ravel(detach).cpu().detach().numpy())
if task == "classification":
detach = torch.sigmoid(model_out).cpu().detach()
pred = list(torch.ravel(detach).cpu().detach().numpy())
pred_lbl = (detach >= 0.5).int()
else:
detach = model_out.cpu().detach()
pred = list(torch.ravel(detach).cpu().detach().numpy())
pred_lbl = [None] * detach.shape[0]

# * Explain
explanation = explainer(data.x.float(), data.edge_index,
Expand Down
14 changes: 11 additions & 3 deletions frame/source/explain/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,15 @@ def retrieve_info(self, graphs):
def _info_atom(self, graphs):
batch_num = self.batch.unique()
masks = [self.mask[self.batch == b] for b in batch_num]
pred_label = ""

for idx in range(len(masks)):
data = graphs[idx]
real_label = int(data.y.cpu().numpy()[0])
pred = self.pred[idx]
pred_label = self.pred_lbl[idx].numpy()[0]

if self.pred_lbl[idx] is not None:
pred_label = self.pred_lbl[idx].numpy()[0]

text = (f"{data.idx},{data.smiles},{real_label},"
f"{pred_label},{pred:.3f}\n")
Expand All @@ -72,14 +75,17 @@ def _info_atom(self, graphs):
def _info_fragment(self, graphs):
batch_num = self.batch.unique()
masks = [self.mask[self.batch == b] for b in batch_num]
pred_label = ""

for idx, node_mask in enumerate(masks):
data = graphs[idx]
real_label = int(data.y.cpu().numpy()[0])
pred = self.pred[idx]
pred_label = self.pred_lbl[idx].numpy()[0]
fragments = np.array(data.frag)

if self.pred_lbl[idx] is not None:
pred_label = self.pred_lbl[idx].numpy()[0]

mask_list = node_mask.cpu().numpy().tolist()
mask_list = [[f"{m:.3f}" for m in mask] for mask in mask_list]

Expand All @@ -97,12 +103,14 @@ def _info_fragment(self, graphs):
def plot_explanations(self, graphs):
batch_num = self.batch.unique()
masks = [self.mask[self.batch == b] for b in batch_num]
pred_label = ""

for idx, node_mask in enumerate(masks):
data = graphs[idx]
name = data.idx
pred = self.pred[idx]
pred_label = self.pred_lbl[idx].numpy()[0]
if self.pred_lbl[idx] is not None:
pred_label = self.pred_lbl[idx].numpy()[0]

if self.loader == "default":
self._explain_atom(data, node_mask, pred, pred_label, name)
Expand Down
9 changes: 7 additions & 2 deletions frame/source/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@


def model_setup(model_name, config):
task = config["task"]
model = select_model(model_name, config)

base_optimizer = torch.optim.Adam(model.parameters(),
Expand All @@ -19,8 +20,12 @@ def model_setup(model_name, config):
T_max=100,
eta_min=1e-6)

bce_weight = config["bce_weight"]
lossfn = torch.nn.BCEWithLogitsLoss(pos_weight=bce_weight).to(device)
if task == "classification":
bce_weight = config["bce_weight"]
lossfn = torch.nn.BCEWithLogitsLoss(pos_weight=bce_weight).to(device)

else:
lossfn = torch.nn.MSELoss()

return model, optimizer, scheduler, lossfn

Expand Down
11 changes: 10 additions & 1 deletion frame/source/train/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,17 @@
from frame.source.train.optimizer import Lookahead
from frame.source.train.epoch import train_epoch, valid_epoch
from frame.source.train.metrics import (reg_through_origin,
concordance_correlation,
roy_criteria,
golbraikh_tropsha)


__all__ = ["train_epoch",
"valid_epoch",

"Lookahead"]
"Lookahead",

"reg_through_origin",
"concordance_correlation",
"roy_criteria",
"golbraikh_tropsha"]
77 changes: 53 additions & 24 deletions frame/source/train/epoch.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
from sklearn import metrics
import torch.backends.cudnn as cudnn

from frame.source.train import metrics as reg_metrics


random.seed(8)
np.random.seed(8)
Expand Down Expand Up @@ -49,7 +51,7 @@ def train_epoch(model, optim, scheduler, lossfn, loader):


@torch.no_grad()
def valid_epoch(model, loader):
def valid_epoch(model, task, loader):
model.eval()

true = []
Expand All @@ -66,34 +68,61 @@ def valid_epoch(model, loader):
batch=batch.batch)

# * Read prediction values
detach = torch.sigmoid(out).cpu().detach()
discretized = (detach >= 0.5).int()
batch_pred = list(torch.ravel(detach).cpu().detach().numpy())
batch_label = list(torch.ravel(discretized).cpu().detach().numpy())

batch_true = list(torch.ravel(batch.y).cpu().detach().numpy())

if task == "classification":
detach = torch.sigmoid(out).cpu().detach()
discretized = (detach >= 0.5).int()
batch_pred = list(torch.ravel(detach).cpu().detach().numpy())
batch_label = list(torch.ravel(discretized).cpu().detach().numpy())
label = label + batch_label

else:
detach = out.cpu().detach()
batch_pred = list(torch.ravel(detach).cpu().detach().numpy())

true = true + batch_true
pred = pred + batch_pred
label = label + batch_label

# * Get metrics
acc = metrics.accuracy_score(true, label)
acc_bal = metrics.balanced_accuracy_score(true, label)
f1 = metrics.f1_score(true, label, zero_division=0)
prec = metrics.precision_score(true, label, zero_division=0)
rec = metrics.recall_score(true, label, zero_division=0)
mcc = metrics.matthews_corrcoef(true, label)
roc_auc = metrics.roc_auc_score(true, pred)
avg_prec = metrics.average_precision_score(true, pred)

result = {"acc": round(acc, 3),
"acc_bal": round(acc_bal, 3),
"f1": round(f1, 3),
"prec": round(prec, 3),
"rec": round(rec, 3),
"mcc": round(mcc, 3),
"avg_prec": round(avg_prec, 3),
"roc_auc": round(roc_auc, 3)}
if task == "classification":
acc = metrics.accuracy_score(true, label)
acc_bal = metrics.balanced_accuracy_score(true, label)
f1 = metrics.f1_score(true, label, zero_division=0)
prec = metrics.precision_score(true, label, zero_division=0)
rec = metrics.recall_score(true, label, zero_division=0)
mcc = metrics.matthews_corrcoef(true, label)
roc_auc = metrics.roc_auc_score(true, pred)
avg_prec = metrics.average_precision_score(true, pred)

result = {"optim": round(mcc, 3),
"acc": round(acc, 3),
"acc_bal": round(acc_bal, 3),
"f1": round(f1, 3),
"prec": round(prec, 3),
"rec": round(rec, 3),
"mcc": round(mcc, 3),
"avg_prec": round(avg_prec, 3),
"roc_auc": round(roc_auc, 3)}
else:
r2 = metrics.r2_score(true, pred)
rmse = metrics.root_mean_squared_error(true, pred)
mae = metrics.mean_absolute_error(true, pred)

rto_r2, _ = reg_metrics.reg_through_origin(true, pred)
ccc = reg_metrics.concordance_correlation(true, pred)
roy_c = reg_metrics.roy_criteria(true, pred, inverse=False)
roy_c_inv = reg_metrics.roy_criteria(true, pred, inverse=True)
delta = reg_metrics.golbraikh_tropsha(true, pred)

result = {"optim": round(ccc, 3),
"r2": round(r2, 3),
"rmse": round(rmse, 3),
"mae": round(mae, 3),
"rto_r2": round(rto_r2, 3),
"ccc": round(ccc, 3),
"roy_c": round(roy_c, 3),
"roy_c_inv": round(roy_c_inv, 3),
"delta": round(delta, 3)}

return result
Loading