-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpos_utils.py
More file actions
132 lines (99 loc) · 4.63 KB
/
pos_utils.py
File metadata and controls
132 lines (99 loc) · 4.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
import torch
# --------------------------------------------------------
# 2D sine-cosine position embedding
# References:
# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py
# MoCo v3: https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
def get_2d_sincos_pos_embed_grid(embed_dim, grid_size, cls_token=False):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = torch.arange(grid_size, dtype=torch.float32)
grid_w = torch.arange(grid_size, dtype=torch.float32)
grid = torch.meshgrid(grid_w, grid_h, indexing='xy') # here w goes first
grid = torch.stack(grid, dim=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = torch.concatenate([torch.zeros([1, embed_dim]), pos_embed], dim=0)
return pos_embed
def get_2dplus_sincos_pos_embed_grid(embed_dim, grid_size, patch_size, cls_token=False):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = torch.arange(grid_size, dtype=torch.float32)
grid_w = torch.arange(grid_size, dtype=torch.float32)
grid_h, grid_w = torch.meshgrid(grid_w, grid_h, indexing='xy') # here w goes first
grid = torch.stack([grid_h, grid_w, grid_h + patch_size, grid_w + patch_size], dim=0)
grid = grid.reshape([4, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = torch.concatenate([torch.zeros([1, embed_dim]), pos_embed], dim=0)
return pos_embed
def get_2d_sincos_pos_embed_coords(embed_dim, patch_coords, cls_token=False):
"""
patch_coords: [[x1,y1], [x2,y2], ...]
"""
if patch_coords.shape[2] == 4:
patch_coords = patch_coords[..., 0:2]
b, q, t = patch_coords.shape
assert t == 2
patch_coords = patch_coords.reshape((b * q, 2)).float() / 16
grid = patch_coords.permute(1, 0)
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
pos_embed = pos_embed.reshape((b, q, embed_dim))
if cls_token:
pos_embed = torch.cat(
[torch.zeros([b, 1, embed_dim], dtype=patch_coords.dtype, device=patch_coords.device), pos_embed], dim=1)
return pos_embed.detach()
def get_2dplus_sincos_pos_embed_coords(embed_dim, patch_coords, cls_token=False):
"""
patch_coords: [[x1,y1], [x2,y2], ...]
"""
b, q, t = patch_coords.shape
assert t == 4
patch_coords = patch_coords.reshape((b * q, 4)).float() / 16
grid = patch_coords.permute(1, 0)
pos_embed = get_2dplus_sincos_pos_embed_from_grid(embed_dim, grid)
pos_embed = pos_embed.reshape((b, q, embed_dim))
if cls_token:
pos_embed = torch.cat(
[torch.zeros([b, 1, embed_dim], dtype=patch_coords.dtype, device=patch_coords.device), pos_embed], dim=1)
return pos_embed.detach()
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = torch.cat([emb_h, emb_w], dim=1) # (H*W, D)
return emb
def get_2dplus_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 4 == 0
# use half of dimensions to encode grid_h
emb_h1 = get_1d_sincos_pos_embed_from_grid(embed_dim // 4, grid[0])
emb_w1 = get_1d_sincos_pos_embed_from_grid(embed_dim // 4, grid[1])
emb_h2 = get_1d_sincos_pos_embed_from_grid(embed_dim // 4, grid[2])
emb_w2 = get_1d_sincos_pos_embed_from_grid(embed_dim // 4, grid[3])
emb = torch.cat([emb_h1, emb_w1, emb_h2, emb_w2], dim=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = torch.arange(end=embed_dim // 2, dtype=torch.float32, device=pos.device)
omega /= embed_dim / 2.
omega = 1. / 10000 ** omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = torch.einsum('m,d->md', pos, omega) # (M, D/2), outer product
emb_sin = torch.sin(out) # (M, D/2)
emb_cos = torch.cos(out) # (M, D/2)
emb = torch.cat([emb_sin, emb_cos], dim=1) # (M, D)
return emb