-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathwsgat.py
More file actions
250 lines (210 loc) · 10 KB
/
wsgat.py
File metadata and controls
250 lines (210 loc) · 10 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
from typing import Optional, Tuple, Union
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter, ParameterList
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.nn.inits import glorot, zeros
from torch_geometric.typing import (
Adj,
OptPairTensor,
OptTensor,
Size,
SparseTensor,
torch_sparse,
)
from torch_geometric.typing import NoneType # noqa
from torch_geometric.utils import (
add_self_loops,
is_torch_sparse_tensor,
remove_self_loops,
softmax,
)
from torch_geometric.utils.sparse import set_sparse_value
class wsGATConv(MessagePassing):
r"""The wsGAT graph attentional operator from the `"wsGAT: Weighted and Signed Graph Attention Networks for Link Prediction"
<https://doi.org/10.1007/978-3-030-93409-5_31>`_ paper.
```
Grassia, M., Mangioni, G. (2022). wsGAT: Weighted and Signed Graph Attention Networks for Link Prediction.
In: Benito, R.M., Cherifi, C., Cherifi, H., Moro, E., Rocha, L.M., Sales-Pardo, M. (eds) Complex Networks & Their Applications X.
COMPLEX NETWORKS 2021. Studies in Computational Intelligence, vol 1072. Springer, Cham. https://doi.org/10.1007/978-3-030-93409-5_31
```
"""
def __init__(
self,
in_channels: Union[int, Tuple[int, int]],
out_channels: int,
heads: int = 1,
attention_layers: int = 1,
concat: bool = True,
dropout: float = 0.0,
add_self_loops: bool = True,
edge_dim: Optional[int] = None,
fill_value: Union[float, Tensor, str] = 'mean',
bias: bool = True,
**kwargs,
):
kwargs.setdefault('aggr', 'add')
super().__init__(node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.concat = concat
self.dropout = dropout
self.add_self_loops = add_self_loops
self.edge_dim = edge_dim
self.fill_value = fill_value
# In case we are operating in bipartite graphs, we apply separate
# transformations 'lin_src' and 'lin_dst' to source and target nodes:
if isinstance(in_channels, int):
self.lin_src = Linear(in_channels, heads * out_channels,
bias=False, weight_initializer='glorot')
self.lin_dst = self.lin_src
else:
self.lin_src = Linear(in_channels[0], heads * out_channels, False,
weight_initializer='glorot')
self.lin_dst = Linear(in_channels[1], heads * out_channels, False,
weight_initializer='glorot')
# The learnable parameters to compute attention coefficients:
self.attention_layers = ParameterList()
for i in range(attention_layers - 1):
self.attention_layers.append(
Parameter(torch.empty(heads, 3 * out_channels, 3 * out_channels))
)
self.attention_layers.append(
Parameter(torch.empty(heads, 1, 3 * out_channels))
)
if edge_dim is not None:
self.lin_edge = Linear(edge_dim, heads * out_channels, bias=False,
weight_initializer='glorot')
self.att_edge = Parameter(torch.empty(1, heads, out_channels))
else:
self.lin_edge = None
self.register_parameter('att_edge', None)
if bias and concat:
self.bias = Parameter(torch.empty(heads * out_channels))
elif bias and not concat:
self.bias = Parameter(torch.empty(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
super().reset_parameters()
self.lin_src.reset_parameters()
self.lin_dst.reset_parameters()
if self.lin_edge is not None:
self.lin_edge.reset_parameters()
for layer in self.attention_layers:
glorot(layer)
glorot(self.att_edge)
zeros(self.bias)
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
edge_attr: OptTensor = None, size: Size = None,
return_attention_weights=None):
# type: (Union[Tensor, OptPairTensor], Tensor, OptTensor, Size, NoneType) -> Tensor # noqa
# type: (Union[Tensor, OptPairTensor], SparseTensor, OptTensor, Size, NoneType) -> Tensor # noqa
# type: (Union[Tensor, OptPairTensor], Tensor, OptTensor, Size, bool) -> Tuple[Tensor, Tuple[Tensor, Tensor]] # noqa
# type: (Union[Tensor, OptPairTensor], SparseTensor, OptTensor, Size, bool) -> Tuple[Tensor, SparseTensor] # noqa
r"""Runs the forward pass of the module.
Args:
return_attention_weights (bool, optional): If set to :obj:`True`,
will additionally return the tuple
:obj:`(edge_index, attention_weights)`, holding the computed
attention weights for each edge. (default: :obj:`None`)
"""
# NOTE: attention weights will be returned whenever
# `return_attention_weights` is set to a value, regardless of its
# actual value (might be `True` or `False`). This is a current somewhat
# hacky workaround to allow for TorchScript support via the
# `torch.jit._overload` decorator, as we can only change the output
# arguments conditioned on type (`None` or `bool`), not based on its
# actual value.
H, C = self.heads, self.out_channels
# We first transform the input node features. If a tuple is passed, we
# transform source and target node features via separate weights:
if isinstance(x, Tensor):
assert x.dim() == 2, "Static graphs not supported in 'wsGATConv'"
x_src = x_dst = self.lin_src(x).view(-1, H, C)
else: # Tuple of source and target node features:
x_src, x_dst = x
assert x_src.dim() == 2, "Static graphs not supported in 'wsGATConv'"
x_src = self.lin_src(x_src).view(-1, H, C)
if x_dst is not None:
x_dst = self.lin_dst(x_dst).view(-1, H, C)
if self.add_self_loops:
if isinstance(edge_index, Tensor):
# We only want to add self-loops for nodes that appear both as
# source and target nodes:
num_nodes = x_src.size(0)
if x_dst is not None:
num_nodes = min(num_nodes, x_dst.size(0))
num_nodes = min(size) if size is not None else num_nodes
edge_index, edge_attr = remove_self_loops(
edge_index, edge_attr)
edge_index, edge_attr = add_self_loops(
edge_index, edge_attr, fill_value=self.fill_value,
num_nodes=num_nodes)
elif isinstance(edge_index, SparseTensor):
if self.edge_dim is None:
edge_index = torch_sparse.set_diag(edge_index)
else:
raise NotImplementedError(
"The usage of 'edge_attr' and 'add_self_loops' "
"simultaneously is currently not yet supported for "
"'edge_index' in a 'SparseTensor' form")
x = (x_src, x_dst)
# # Next, we compute node-level attention coefficients, both for source
# # and target nodes (if present):
# edge_updater_type: (alpha: OptPairTensor, edge_attr: OptTensor)
alpha = self.edge_updater(edge_index,
x=x,
# alpha=alpha,
edge_attr=edge_attr,
)
# propagate_type: (x: OptPairTensor, alpha: Tensor)
out = self.propagate(edge_index, x=x, alpha=alpha, size=size)
if self.concat:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out = out + self.bias
if isinstance(return_attention_weights, bool):
if isinstance(edge_index, Tensor):
if is_torch_sparse_tensor(edge_index):
# TODO TorchScript requires to return a tuple
adj = set_sparse_value(edge_index, alpha)
return out, (adj, alpha)
else:
return out, (edge_index, alpha)
elif isinstance(edge_index, SparseTensor):
return out, edge_index.set_value(alpha, layout='coo')
else:
return out
def edge_update(self, x_i: Tensor, x_j: Tensor,
edge_attr: OptTensor, index: Tensor, ptr: OptTensor,
size_i: Optional[int]) -> Tensor:
if index.numel() == 0:
raise NotImplementedError("Empty edge index not supported yet")
if edge_attr is not None and self.lin_edge is not None:
if edge_attr.dim() == 1:
edge_attr = edge_attr.view(-1, 1)
edge_attr = self.lin_edge(edge_attr)
edge_attr = edge_attr.view(-1, self.heads, self.out_channels)
alpha = torch.cat([x_i, x_j, edge_attr], dim=2)
else:
alpha = torch.cat([x_i, x_j], dim=2)
for i, layer in enumerate(self.attention_layers):
alpha = torch.matmul(layer, alpha.unsqueeze(-1)).squeeze(-1)
alpha = alpha.tanh()
alpha = alpha.squeeze()
alpha = alpha.sign() * softmax(alpha.abs(), index, ptr, size_i)
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
return alpha
def message(self, x_j: Tensor, alpha: Tensor) -> Tensor:
return alpha.unsqueeze(-1) * x_j
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, heads={self.heads}, '
f'attention_layers={len(self.attention_layers)}')