-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathproject.py
More file actions
110 lines (88 loc) · 3.26 KB
/
project.py
File metadata and controls
110 lines (88 loc) · 3.26 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
from __future__ import print_function
from torch.autograd import Variable
import time
import numpy as np
import copy
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.init as init
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
mnist_train = torchvision.datasets.MNIST(root='./data', train=True, transform=transform, download=True)
mnist_test = torchvision.datasets.MNIST(root='./data', train=False, transform=transform, download=True)
trainloader = torch.utils.data.DataLoader(mnist_train, batch_size=128, shuffle=False, num_workers=2)
testloader = torch.utils.data.DataLoader(mnist_test, batch_size=128, shuffle=False, num_workers=2)
batchsize = 128
lambda1 = 0.01
lr = 1e-3
trials = 50
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.fc1 = nn.Linear(28*28, 500)
self.fc2 = nn.Linear(500, 256)
self.fc3 = nn.Linear(256, 10)
def forward(self, x):
x = x.view(-1, 28*28)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
return x
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2, 2)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2, 2)
x = F.relu(x)
x = x.view(-1, 4*4*50)
x = self.fc1(x)
x = self.fc2(x)
return x
def l1_penalty(var):
return torch.abs(var).sum()
def sparsify(param, sparsity):
return F.hardshrink(param, sparsity).data
total = 0
correct = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = Variable(inputs, volatile=True), Variable(targets, volatile=True)
outputs = model2(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print('Epoch : %d Test Acc : %.3f' % (epoch, 100.*correct/total))
print('--------------------------------------------------------------')
model.train()
for sparsity in [0.001, 0.005, 0.01, 0.05, 0.1]:
model2 = copy.deepcopy(model)
for param in model2.parameters():
param.data = sparsify(param, sparsity)
cnt, tot = 0, 0
for param in model2.parameters():
tot += param.data.view(-1).size()[0]
for val in param.data.view(-1):
if val == 0.:
cnt += 1
print(str(cnt*100./tot) + "% sparse")
model2.eval()
total = 0
correct = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = Variable(inputs, volatile=True), Variable(targets, volatile=True)
outputs = model2(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
print('Test Accuracy : %.3f' % (100.*correct/total))