-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathattribution_tracker.py
More file actions
261 lines (208 loc) · 10.9 KB
/
attribution_tracker.py
File metadata and controls
261 lines (208 loc) · 10.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
import torch
import torch.nn as nn
import torch.nn.functional as F
class CSA:
def __init__(self, model, num_inputs, single_pred = True, mean_input = None):
self.model = model
self.num_inputs = num_inputs
self.device = next(model.parameters()).device
self.single_pred = single_pred
self.mean_input = mean_input
self.layer_dims = self.get_layer_dimensions()
def get_layer_dimensions(self):
dims = []
for name, module in self.model.named_modules():
if isinstance(module, nn.Linear):
dims.append(module.out_features)
return dims
@staticmethod
def get_contrib_fracs(contributions, spikes):
"""
Weight by whether contribution was critical for spike occurring.
"""
total_v = contributions.sum(dim=2)
threshold = 1.0
v_without = total_v.unsqueeze(2) - contributions
is_spiking = (spikes > 0.5).unsqueeze(2)
would_not_spike = (v_without < threshold)
critical = is_spiking & would_not_spike
weighted = contributions * critical.float()
total = weighted.abs().sum(dim=2, keepdim=True) + 1e-8
frac = weighted / total
return frac
def forward(self, spike_input):
"""
spike_input: [batch, time, input_dim]
Returns:
contribs_t: list of [time, batch, layer_out, input_dim]
silence_contribs_t: same shape
"""
batch_size, T, _ = spike_input.shape
num_layers = len(self.model.layers) + 1
states = []
for dim in self.layer_dims[:-1]:
states.append((
torch.zeros(batch_size, dim, device=self.device),
torch.zeros(batch_size, dim, device=self.device),
torch.zeros(batch_size, dim, device=self.device)
))
states.append((
torch.zeros(batch_size, self.layer_dims[-1], device=self.device),
torch.zeros(batch_size, self.layer_dims[-1], device=self.device),
torch.zeros(batch_size, self.layer_dims[-1], device=self.device)
))
contribs = [torch.zeros(batch_size, dim, self.num_inputs, device=self.device)
for dim in self.layer_dims]
silence_contribs = [torch.zeros(batch_size, dim, self.num_inputs, device=self.device)
for dim in self.layer_dims]
contribs_t = []
silence_contribs_t = []
for t in range(T):
x = spike_input[:, t, :]
layer_input = x
for i, layer in enumerate(self.model.layers):
spike_out, states[i] = layer(layer_input, states[i])
contribs[i] *= self.model.vdecay
silence_contribs[i] *= self.model.vdecay
if i == 0:
if self.mean_input is not None:
threshold = (self.mean_input.to(self.device)
if torch.is_tensor(self.mean_input)
else torch.tensor(self.mean_input, device=self.device))
above_mean = torch.clamp(layer_input - threshold, min=0.0)
below_mean = torch.clamp(threshold - layer_input, min=0.0)
else:
# Spike input
above_mean = (layer_input > 0.5).float()
below_mean = 1 - above_mean
weight = layer.psp_func.weight
contribs[i] += torch.einsum('oi,bi->boi', weight, above_mean)
silence_contribs[i] += torch.einsum('oi,bi->boi', weight, below_mean)
else:
weight = layer.psp_func.weight
spike_fracs = self.get_contrib_fracs(contribs[i-1], layer_input)
silence_fracs = self.get_contrib_fracs(silence_contribs[i-1], 1 - layer_input)
contribs[i] += torch.bmm(weight.unsqueeze(0).expand(batch_size, *weight.shape),
spike_fracs)
silence_contribs[i] += torch.bmm(weight.unsqueeze(0).expand(batch_size, *weight.shape),
silence_fracs)
contribs[i-1] *= (layer_input < 0.5).float().unsqueeze(2)
silence_contribs[i-1] *= (layer_input > 0.5).float().unsqueeze(2)
layer_input = spike_out
spike_out, states[-1] = self.model.output_layer(layer_input, states[-1])
contribs[-1] *= self.model.vdecay
silence_contribs[-1] *= self.model.vdecay
weight = self.model.output_layer.psp_func.weight
if self.model.num_hidden_layers == 0:
contribs[-1] += torch.einsum('oi,bi->boi', weight, layer_input)
silence_contribs[-1] += torch.einsum('oi,bi->boi', weight, 1 - layer_input)
else:
spike_fracs = self.get_contrib_fracs(contribs[-2], layer_input)
silence_fracs = self.get_contrib_fracs(silence_contribs[-2], 1 - layer_input)
contribs[-1] += torch.bmm(weight.unsqueeze(0).expand(batch_size, *weight.shape),
spike_fracs)
silence_contribs[-1] += torch.bmm(weight.unsqueeze(0).expand(batch_size, *weight.shape),
silence_fracs)
contribs[-2] *= (layer_input < 0.5).float().unsqueeze(2)
silence_contribs[-2] *= (layer_input > 0.5).float().unsqueeze(2)
contribs_t.append([c.clone() for c in contribs])
silence_contribs_t.append([c.clone() for c in silence_contribs])
if self.single_pred:
return contribs[-1], silence_contribs[-1]
contribs_t = [torch.stack([step[i] for step in contribs_t], dim=0) for i in range(num_layers)]
silence_contribs_t = [torch.stack([step[i] for step in silence_contribs_t], dim=0) for i in range(num_layers)]
return contribs_t[-1], silence_contribs_t[-1]
class TSATracker:
"""
Converted from https://github.com/ElisaNguyen/tsa-explanations to work for my model setup
"""
def __init__(self, model, include_nonspikes=True, mean_input=None):
self.model = model
self.device = model.device
self.include_nonspikes = include_nonspikes
self.decay = torch.tensor(float(model.vdecay), device=self.device, dtype=torch.float32)
self.in_dim = model.input_dim
self.out_dim = model.num_classes
self.weights = []
for lyr in getattr(model, "layers", []):
self.weights.append(lyr.psp_func.weight.detach().to(self.device, dtype=torch.float32))
self.weights.append(model.output_layer.psp_func.weight.detach().to(self.device, dtype=torch.float32))
self.presyn_dims = []
if len(getattr(model, "layers", [])) == 0:
self.presyn_dims.append(model.input_dim)
else:
self.presyn_dims.append(model.input_dim)
for lyr in model.layers:
self.presyn_dims.append(lyr.psp_func.weight.shape[0])
self.mean_input = mean_input
def _init_states(self, batch_size: int):
hidden_states = []
for size in self.model.layer_sizes:
state = (
torch.zeros(batch_size, size, device=self.device, dtype=torch.float32),
torch.zeros(batch_size, size, device=self.device, dtype=torch.float32),
torch.zeros(batch_size, size, device=self.device, dtype=torch.float32),
)
hidden_states.append(state)
out_state = (
torch.zeros(batch_size, self.out_dim, device=self.device, dtype=torch.float32),
torch.zeros(batch_size, self.out_dim, device=self.device, dtype=torch.float32),
torch.zeros(batch_size, self.out_dim, device=self.device, dtype=torch.float32),
)
return hidden_states, out_state
def forward(self, inputs: torch.Tensor):
inputs = inputs.to(self.device, dtype=torch.float32)
B, T, D = inputs.shape
L = len(self.weights)
N_list = [torch.zeros(B, self.presyn_dims[l], device=self.device, dtype=torch.float32) for l in range(L)]
hidden_states, out_state = self._init_states(B)
if self.mean_input is None:
self.mean_input = inputs.mean(dim=(0, 1)).to(self.device, dtype=torch.float32) # [in_dim]
else:
self.mean_input = (self.mean_input.to(self.device, dtype=torch.float32)
if torch.is_tensor(self.mean_input)
else torch.tensor(float(self.mean_input), device=self.device, dtype=torch.float32))
A_time = []
for t in range(T):
x_t = inputs[:, t, :]
layer_in = x_t
s_pres = []
for i, layer in enumerate(self.model.layers):
s_pres.append(layer_in)
layer_out, hidden_states[i] = layer(layer_in, hidden_states[i])
layer_in = layer_out
s_pres.append(layer_in)
out_spike, out_state = self.model.output_layer(layer_in, out_state)
u_out = out_state[2]
P_t = F.softmax(u_out, dim=1)
for l in range(L):
s_l = s_pres[l]
if l == 0 and not torch.all((s_l == 0) | (s_l == 1)):
above = torch.clamp(s_l - self.mean_input, min=0.0)
below = torch.clamp(self.mean_input - s_l, min=0.0)
if self.include_nonspikes:
n_l = s_l.shape[1]
N_list[l] = self.decay * N_list[l] + above - (below / max(n_l, 1))
else:
N_list[l] = self.decay * N_list[l] + above
else:
if self.include_nonspikes:
n_l = s_l.shape[1]
N_list[l] = self.decay * N_list[l] + s_l - (1.0 / max(n_l, 1)) * (1.0 - s_l)
else:
N_list[l] = self.decay * N_list[l] + s_l
M_list = []
for l in range(L):
W = self.weights[l]
W_col = W.t()
Nl = N_list[l]
M_l = W_col.unsqueeze(0) * Nl.unsqueeze(2)
M_list.append(M_l)
CI = torch.eye(self.in_dim, device=self.device, dtype=torch.float32).unsqueeze(0)
CI = CI.expand(B, self.in_dim, self.in_dim).clone()
for l in range(L):
CI = torch.bmm(CI, M_list[l])
CI_weighted = CI * P_t.unsqueeze(1)
A_bt = CI_weighted.transpose(1, 2).contiguous()
A_time.append(A_bt)
return torch.stack(A_time, dim=0)