This repository was archived by the owner on Aug 7, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel_configuration_tests.py
More file actions
60 lines (46 loc) · 2.02 KB
/
model_configuration_tests.py
File metadata and controls
60 lines (46 loc) · 2.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
#1. Imports
import random
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import wandb
from data.datasets import make_loaders, ColorizationDataset
from models.conv_autoencoder import ColorizationNet
# 2. Reproducibility Settings
torch.backends.cudnn.deterministic = True
random.seed(hash("setting random seeds") % 2**32 - 1)
np.random.seed(hash("improves reproducibility") % 2**32 - 1)
torch.manual_seed(hash("by removing stochasticity") % 2**32 - 1)
torch.cuda.manual_seed_all(hash("so runs are repeatable") % 2**32 - 1)
# Device configuration
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
path = "data/face_images"
dataset = ColorizationDataset(root_dir=path)
base_config = dict(epochs=30, batch_size=16, learning_rate=5e-4, weight_decay=1e-4, criterion="MSELoss")
configs = [
# Comparison 1: Loss Function
{**base_config, "criterion": "MSELoss"},
{**base_config, "criterion": "SmoothL1Loss"},
# Comparison 2: Regularization (Weight Decay)
{**base_config, "weight_decay": 1e-4},
{**base_config, "weight_decay": 1e-3},
# Comparison 3: Learning Rate
{**base_config, "learning_rate": 5e-4},
{**base_config, "learning_rate": 1e-3},
]
for cfg in configs:
with wandb.init(project="Neural_Networks_Project_UAB", config=cfg):
config = wandb.config
train_loader, val_loader, test_loader = make_loaders(dataset, config['batch_size'])
model = ColorizationNet().to(device)
print(model)
if config['criterion'] == "MSELoss":
criterion = nn.MSELoss()
else:
criterion = nn.SmoothL1Loss()
optimizer = optim.AdamW(model.parameters(), lr=config['learning_rate'], weight_decay=config['weight_decay'])
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=3)
model.train_model(train_loader, val_loader, config['epochs'], criterion, optimizer, scheduler, device)
model.test_model(test_loader, device)