-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
113 lines (101 loc) · 3.91 KB
/
train.py
File metadata and controls
113 lines (101 loc) · 3.91 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import argparse
import torch
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
import json
from PIL import Image
from torch.autograd import Variable
import torchvision.models as models
import torch
from torch import nn, optim
import futility
import futility
import fmodel
arch = {"vgg16":25088,
"densenet121":1024}
parser = argparse.ArgumentParser(
description = 'Parser for train.py'
)
parser.add_argument('data_dir', action="store", default="./flowers/")
parser.add_argument('--save_dir', action="store", default="./checkpoint.pth")
parser.add_argument('--arch', action="store", default="vgg16")
parser.add_argument('--learning_rate', action="store", type=float,default=0.01)
parser.add_argument('--hidden_units', action="store", dest="hidden_units", type=int, default=512)
parser.add_argument('--epochs', action="store", default=3, type=int)
parser.add_argument('--dropout', action="store", type=float, default=0.5)
parser.add_argument('--gpu', action="store", default="gpu")
args = parser.parse_args()
where = args.data_dir
path = args.save_dir
lr = args.learning_rate
struct = args.arch
hidden_units = args.hidden_units
power = args.gpu
epochs = args.epochs
dropout = args.dropout
if power == 'gpu':
device = 'cuda'
else:
device = 'cpu'
def main():
trainloader, validloader, testloader, train_data = futility.load_data(where)
model, criterion = fmodel.setup_network(struct,dropout,hidden_units,lr,power)
optimizer = optim.Adam(model.classifier.parameters(), lr= 0.001)
# Train Model
steps = 0
running_loss = 0
print_every = 5
print("--Training starting--")
for epoch in range(epochs):
for inputs, labels in trainloader:
steps += 1
# Move input and label tensors to the default device
if torch.cuda.is_available() and power =='gpu':
inputs, labels = inputs.to('cuda'), labels.to('cuda')
optimizer.zero_grad()
#Forward pass
logps = model.forward(inputs)
loss = criterion(logps, labels)
# Backward pass
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
valid_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in validloader:
inputs, labels = inputs.to('cuda'), labels.to('cuda')
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
valid_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch {epoch+1}/{epochs}.. "
f"Loss: {running_loss/print_every:.3f}.. "
f"Validation Loss: {valid_loss/len(validloader):.3f}.. "
f"Accuracy: {accuracy/len(validloader):.3f}")
running_loss = 0
model.train()
# fmodel.save_checkpoint(traindata,model,path,struct,hidden_units,dropout,lr)
model.class_to_idx = train_data.class_to_idx
torch.save({'structure' :struct,
'hidden_units':hidden_units,
'dropout':dropout,
'learning_rate':lr,
'no_of_epochs':epochs,
'state_dict':model.state_dict(),
'class_to_idx':model.class_to_idx},
path)
print("Saved checkpoint!")
if __name__== "__main__":
main()