-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathattention.py
More file actions
102 lines (60 loc) · 3.11 KB
/
attention.py
File metadata and controls
102 lines (60 loc) · 3.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import torch
from torch import nn
from torch.nn import functional as F
import math
class SelfAttention(nn.Module):
def __init__(self, n_heads, d_embed, in_proj_bias = True, out_proj_bias= True):
super().__init()
#Combining the Wq, Wk, and Wv matrices into one
self.in_proj = nn.Linear(d_embed, 3 * d_embed, bias= in_proj_bias)
#Represent the Wo Matrix
self.out_proj = nn.Linear(d_embed, d_embed, bias= out_proj_bias)
self.n_heads = n_heads
self.d_head = d_embed // n_heads
def forward(self, x, causal_mask = False):
input_shape = x.shape
batch_size, sequence_length, d_embed = input_shape
interim_shape = (batch_size, sequence_length, self.n_heads, self.d_head)
# (Batch_Size, Seq_Len, Dim) -> (Batch_Size, Seq_Len, Dim * 3) -> 3 tensor of shape (Batch_Size, Seq_Len, Dim)
q, k ,v = self.in_proj(x).chunk(3, dim= -1)
# (Batch_Size, Seq_Len, Dim) -> (Batch_Size, Seq_Len, H, Dim / H) -> (Batch_Size, H, Seq_Len, Dim / H)
q = q.view(interim_shape).transpose(1,2)
k = k.view(interim_shape).transpose(1,2)
v = v.view(interim_shape).transpose(1,2)
weight = q @ k.transpose(-1,-2)
if causal_mask:
mask = torch.ones_like(weight, dtype=torch.bool).triu(1)
weight.masked_fill_(mask, -torch.inf)
weight /= math.sqrt(self.d_head)
weight = F.softmax(weight, dim = -1)
output = weight @ v
output = output.transpose(1,2)
output = output.reshape(input_shape)
output = self.out_proj(output)
return output
class CrossAttention(nn.Module):
def __init__(self, n_heads, d_embed, d_cross, in_proj_bias = True, out_proj_bias = True):
super().__init__()
self.q_proj = nn.Linear(d_embed, d_embed, bias = in_proj_bias)
self.k_proj = nn.Linear(d_cross, d_embed, bias = in_proj_bias)
self.v_proj = nn.Linear(d_cross, d_embed, bias = in_proj_bias)
self.out_proj = nn.Linear(d_embed, d_embed, bias = out_proj_bias)
self.n_heads = n_heads
self.d_head = d_embed // n_heads
def forward(self, x, y):
input_shape = x.shape
batch_size, sequence_length, d_embed = input_shape
interim_shape = (batch_size, -1, self.n_heads, self.d_head)
q = self.q_proj(x)
k = self.k_proj(x)
v = self.v_proj(x)
q = q.view(interim_shape).transpose(1,2)
k = k.view(interim_shape).transpose(1,2)
v = v.view(interim_shape).transpose(1,2)
weight = q @ k.transpose(-1,-2)
weight /= math.sqrt(self.d_head)
weight = F.softmax(weight, dim = -1)
output = weight @ v
output = output.transpose(1,2).contiguous()
output = output.view(input_shape)
return output