-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodels.py
More file actions
108 lines (84 loc) · 3.49 KB
/
models.py
File metadata and controls
108 lines (84 loc) · 3.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import collections
from tqdm import tqdm
class Autoencoder(nn.Module):
def __init__(self, nuc_pair_size, embedding_dims, e_hidden_dims, bottleneck_dims, d_hidden_dims, seq_length, dropout_size = 0):
super().__init__()
nuc_pair_size +=1
self.seq_length= seq_length
# define the vars over here (layers, objects)
self.embedding= nn.Embedding( nuc_pair_size, embedding_dims)
self.rnn1= nn.LSTM(input_size= embedding_dims, hidden_size= e_hidden_dims, num_layers= 1, bidirectional= True)
self.fc1= nn.Linear(in_features = e_hidden_dims*2, out_features= bottleneck_dims)
self.a1= nn.ReLU(True)
self.dropout= nn.Dropout(dropout_size)
self.fc2= nn.Linear(in_features = bottleneck_dims, out_features= d_hidden_dims)
self.rnn2= nn.LSTM(input_size= d_hidden_dims, hidden_size= d_hidden_dims, num_layers=1, bidirectional=True)
self.fc3= nn.Linear(in_features= d_hidden_dims*2, out_features= nuc_pair_size)
def encoder(self, x):
#print("encoder's x",x.size())
x= self.embedding(x).permute(1,0,2)
#print("encoder's x after emedding",x.size())
_,(hidden_states, _)= self.rnn1(x)
#print("encoder's hidd",hidden_states.size())
# hidden_states = hidden_states[1]
# hidden_states= hidden_states[None, :,:]
hidden_states= torch.cat((hidden_states[-2,:,:], hidden_states[-1,:,:]), dim= 1)
#print("mamamamam", hidden_states.size())
lv= self.fc1(hidden_states) # latent vector
# print("encoder's lv",lv.size())
lv= self.dropout(lv)
# print("encoder's lv",lv.size())
return lv
def decoder(self, lv):
# import pdb
# pdb.set_trace*()
# print("dencoder's lv",lv.size())
lv= self.fc2(lv)
output, _= self.rnn2(lv.repeat(self.seq_length,1,1)) #,(lv,lv))
#output=
# print("dencoder's output rnn",output.size())
output= output.permute(1,0,2)
# print("dencoder's permute",output.size())
logits= self.fc3(output)
# print("dencoder's logits",logits.size())
return logits.transpose(1,2)
def forward(self,x):
lv= self.encoder(x)
logits= self.decoder(lv)
return (lv.squeeze(), logits)
class GanBlock(nn.Module):
def __init__(self, block_dims):
super().__init__()
self.nnet= nn.Sequential(
nn.Linear(block_dims, block_dims),
nn.ReLU(True),
nn.Linear(block_dims, block_dims)
# nn.ReLU(True)
)
def forward(self,x):
return self.nnet(x) #+ x
class Generator(nn.Module):
def __init__(self, n_layers, block_dims ):
super().__init__()
self.gnet= nn.Sequential(
*[GanBlock(block_dims) for _ in range(n_layers)]
)
def forward(self,x):
x= self.gnet(x)
return x
class Critic(nn.Module):
def __init__(self, n_layers, block_dims):
super().__init__()
self.cnet= nn.Sequential(*[GanBlock(block_dims) for _ in range(n_layers)]
)
def forward(self,x):
x= self.cnet(x)
return x