-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathCNN_Design.py
More file actions
121 lines (90 loc) · 3.23 KB
/
CNN_Design.py
File metadata and controls
121 lines (90 loc) · 3.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import timeit
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import random_split
from torchvision import transforms, datasets
import numpy as np
import random
from torchvision.models import resnet18, ResNet18_Weights
weight = ResNet18_Weights.DEFAULT
preprocess = weight.transforms()
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
set_seed(100)
def get_config_dict(pretrain):
"""
pretrain: 0 or 1. Can be used if you need different configs for part 1 and 2.
"""
if pretrain == 0:
config = {
"batch_size": 100,
"lr": 1e-3,
"num_epochs": 15,
"weight_decay": 1e-3,
"save_criteria": None,
}
else:
config = {
"batch_size": 100,
"lr": 1e-5,
"num_epochs": 8,
"weight_decay": 1e-3,
"save_criteria": "accuracy",
}
return config
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 12, 5) #Convolution layer
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(12, 32, 5)
self.fc1 = nn.Linear(32 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
#Convolutions
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
#Linear Classifiers
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class PretrainedNet(nn.Module):
def __init__(self):
super(PretrainedNet, self).__init__()
self.model = resnet18(weights=weight)
print("Model summary:",self.model)
def forward(self, x):
x = self.model(x)
return x
def load_dataset(pretrain):
"""
pretrain: 0 or 1. Can be used if you need to define different dataset splits/transformations/augmentations for part 2.
returns:
train_dataset, valid_dataset: Dataset for training your model
test_transforms: Default is None. Edit if you would like transformations applied to the test set.
"""
if pretrain == 1:
full_dataset = datasets.CIFAR10(root='./data', train=True, download=True,
transform=preprocess)
train_dataset, valid_dataset = random_split(full_dataset, [38000, 12000])
test_transforms = preprocess
else:
full_dataset = datasets.CIFAR10(root='./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
train_dataset, valid_dataset = random_split(full_dataset, [38000,12000])
test_transforms = None
return train_dataset, valid_dataset, test_transforms