-
Notifications
You must be signed in to change notification settings - Fork 65
Expand file tree
/
Copy pathsubmodules.py
More file actions
executable file
·92 lines (76 loc) · 2.75 KB
/
submodules.py
File metadata and controls
executable file
·92 lines (76 loc) · 2.75 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# freda (todo) :
import torch.nn as nn
import torch
import numpy as np
def conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1):
if batchNorm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.1,inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True),
nn.LeakyReLU(0.1,inplace=True)
)
def i_conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1, bias = True):
if batchNorm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=bias),
nn.BatchNorm2d(out_planes),
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=bias),
)
def predict_flow(in_planes):
return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True)
def deconv(in_planes, out_planes):
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True),
nn.LeakyReLU(0.1,inplace=True)
)
class tofp16(nn.Module):
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
class tofp32(nn.Module):
def __init__(self):
super(tofp32, self).__init__()
def forward(self, input):
return input.float()
def init_deconv_bilinear(weight):
f_shape = weight.size()
heigh, width = f_shape[-2], f_shape[-1]
f = np.ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([heigh, width])
for x in range(width):
for y in range(heigh):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weight.data.fill_(0.)
for i in range(f_shape[0]):
for j in range(f_shape[1]):
weight.data[i,j,:,:] = torch.from_numpy(bilinear)
def save_grad(grads, name):
def hook(grad):
grads[name] = grad
return hook
'''
def save_grad(grads, name):
def hook(grad):
grads[name] = grad
return hook
import torch
from channelnorm_package.modules.channelnorm import ChannelNorm
model = ChannelNorm().cuda()
grads = {}
a = 100*torch.autograd.Variable(torch.randn((1,3,5,5)).cuda(), requires_grad=True)
a.register_hook(save_grad(grads, 'a'))
b = model(a)
y = torch.mean(b)
y.backward()
'''