-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathlda_vb.py
More file actions
515 lines (461 loc) · 18.6 KB
/
lda_vb.py
File metadata and controls
515 lines (461 loc) · 18.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
import numpy as np
from scipy.special import psi as psi
from scipy.special import polygamma as pg
import time
import pickle
# This is the LDA implementation to use
# Corpus can be passed in, or loaded from .csv files in joes style
# K = number of topics
# eta = hyperparameter for topics (i.e. pseudo word counts)
# alpha = initial Dirichlet hyperparameter
# update_alpha = boolean to determine whether or not alpha is updated at each iteration
# word_index is a dictionary storing the position of each feature in numpy arrays
# word_index is only used in multi-file as it's important that features are always in the same order.
# In single file it is created internally
class VariationalLDA(object):
def __init__(self,corpus=None,K = 20,eta=0.1,
alpha=1,update_alpha=True,word_index=None,normalise = -1,
topic_index = None,topic_metadata = None):
self.corpus = corpus
self.word_index = word_index
self.normalise = normalise
# If the corpus exists, make the word index and the (unused?) word doc matrix
if not self.corpus == None:
self.n_docs = len(self.corpus)
if self.word_index == None:
self.word_index = self.find_unique_words()
print "Object created with {} documents".format(self.n_docs)
self.n_words = len(self.word_index)
self.make_doc_index()
if self.normalise > -1:
print "Normalising intensities"
self.normalise_intensities()
self.K = K
self.alpha = alpha
# If alpha is a single value, make it into a vector
if type(self.alpha) == int or type(self.alpha) == float:
self.alpha = self.alpha*np.ones(self.K)
self.eta = eta # Smoothing parameter for beta
self.update_alpha = update_alpha
self.doc_metadata = None
self.n_fixed_topics = 0
self.topic_index = topic_index
self.topic_metadata = topic_metadata
if self.topic_index == None:
self.topic_index = {}
for topic_pos in range(self.K):
topic_name = 'motif_{}'.format(topic_pos)
self.topic_index[topic_name] = topic_pos
if self.topic_metadata == None:
self.topic_metadata = {}
for topic in self.topic_index:
self.topic_metadata[topic] = {'name':topic,'type':'learnt'}
def add_fixed_topics_formulas(self,topics,prob_thresh = 0.5):
# Adds fixed topics by matching on chemical formulas
from formula import Formula
print "Matching topics based on formulas"
ti = [(topic,self.topic_index[topic]) for topic in self.topic_index]
ti = sorted(ti,key = lambda x:x[1])
topic_reverse,_ = zip(*ti)
self.beta_matrix = np.zeros((self.K,len(self.word_index)),np.float)
self.n_fixed_topics = 0
frag_formulas = {}
loss_formulas = {}
for word in self.word_index:
split_word = word.split('_')
if len(split_word) == 3:
formula = Formula(split_word[2])
if word.startswith('loss'):
loss_formulas[str(formula)] = word
else:
frag_formulas[str(formula)] = word
for topic in topics['beta']:
matched_probability = 0.0
matches = {}
for word,probability in topics['beta'][topic].items():
split_word = word.split('_')
if len(split_word) == 3: # it has a formula
formula_string = str(Formula(split_word[2]))
matched_word = None
if word.startswith('loss') and formula_string in loss_formulas:
matched_word = loss_formulas[formula_string]
elif word.startswith('fragment') and formula_string in frag_formulas:
matched_word = frag_formulas[formula_string]
if not matched_word == None:
matches[word] = matched_word
matched_probability += probability
print "Topic: {}, {} probability matched ({})".format(topic,matched_probability,
topics['topic_metadata'][topic].get('annotation',""))
if matched_probability > prob_thresh:
# We have a match
for word in matches:
self.beta_matrix[self.n_fixed_topics,self.word_index[matches[word]]] = topics['beta'][topic][word]
# Normalise
self.beta_matrix[self.n_fixed_topics,:] /= self.beta_matrix[self.n_fixed_topics,:].sum()
topic_here = topic_reverse[self.n_fixed_topics]
print "Match accepted, storing as {}".format(topic_here)
self.topic_metadata[topic_here]['type'] = 'fixed'
for key,val in topics['topic_metadata'][topic].items():
self.topic_metadata[topic_here][key] = val
self.n_fixed_topics += 1
def add_fixed_topics(self,topics,topic_metadata = None,mass_tol = 5,prob_thresh = 0.5):
print "Matching topics"
ti = [(topic,self.topic_index[topic]) for topic in self.topic_index]
ti = sorted(ti,key = lambda x:x[1])
topic_reverse,_ = zip(*ti)
self.beta_matrix = np.zeros((self.K,len(self.word_index)),np.float)
self.n_fixed_topics = 0
fragment_masses = np.array([float(f.split('_')[1]) for f in self.word_index if f.startswith('fragment')])
fragment_names = [f for f in self.word_index if f.startswith('fragment')]
loss_masses = np.array([float(f.split('_')[1]) for f in self.word_index if f.startswith('loss')])
loss_names = [f for f in self.word_index if f.startswith('loss')]
for topic in topics:
print "Mass2Motif: {}".format(topic)
topic_name_here = topic_reverse[self.n_fixed_topics]
# self.n_fixed_topics = len(topics)
temp_beta = np.zeros(len(self.word_index),np.float)
probability_matched = 0.0
for word in topics[topic]:
word_mass = float(word.split('_')[1])
if word.startswith('fragment'):
mass_err = 1e6*np.abs(fragment_masses - word_mass)/fragment_masses
min_err = mass_err.min()
if min_err < mass_tol:
matched_word = fragment_names[mass_err.argmin()]
temp_beta[self.word_index[matched_word]] = topics[topic][word]
probability_matched += topics[topic][word]
# print "\t Matched: {} with {}".format(word,matched_word)
else:
# print "\t Couldn't match {}".format(word)
pass
else:
mass_err = 1e6*np.abs(loss_masses - word_mass)/loss_masses
min_err = mass_err.min()
if min_err < 2*mass_tol:
matched_word = loss_names[mass_err.argmin()]
temp_beta[self.word_index[matched_word]] = topics[topic][word]
probability_matched += topics[topic][word]
# print "\t Matched: {} with {}".format(word,matched_word)
else:
# print "\t Couldn't match {}".format(word)
pass
print "\t matched {} of the probability".format(probability_matched)
if probability_matched > prob_thresh:
self.topic_metadata[topic_name_here]['type'] = 'fixed'
self.beta_matrix[self.n_fixed_topics,:] = temp_beta
# copy the metadata. If there is a name field in the incoming topic, save it as old_name
if topic_metadata:
for metadata_item in topic_metadata[topic]:
if metadata_item == 'name':
self.topic_metadata[topic_name_here]['old_name'] = topic_metadata[topic][metadata_item]
else:
self.topic_metadata[topic_name_here][metadata_item] = topic_metadata[topic][metadata_item]
self.n_fixed_topics += 1
# Normalise
self.beta_matrix[:self.n_fixed_topics,:] /= self.beta_matrix[:self.n_fixed_topics,:].sum(axis=1)[:,None]
print "Matched {}/{} topics at prob_thresh={}".format(self.n_fixed_topics,len(topics),prob_thresh)
def normalise_intensities(self):
for doc in self.corpus:
max_i = 0.0
for word in self.corpus[doc]:
if self.corpus[doc][word] > max_i:
max_i = self.corpus[doc][word]
for word in self.corpus[doc]:
self.corpus[doc][word] = int(self.normalise*self.corpus[doc][word]/max_i)
# Load the features from a Joe .csv file. Pass the file name up until the _ms1.csv or _ms2.csv
# these are added here
# The scale factor is what we multiply intensities by
def load_features_from_csv(self,prefix,scale_factor=100.0):
# Load the MS1 peaks (MS1 object defined below)
self.ms1peaks = []
self.doc_metadata = {}
ms1file = prefix + '_ms1.csv'
with open(ms1file,'r') as f:
heads = f.readline()
for line in f:
split_line = line.split(',')
ms1_id = split_line[1]
mz = float(split_line[5])
rt = float(split_line[4])
name = split_line[5] + '_' + split_line[4]
intensity = float(split_line[6])
new_ms1 = MS1(ms1_id,mz,rt,intensity,name)
self.ms1peaks.append(name)
self.doc_metadata[name] = {}
self.doc_metadata[name]['parentmass'] = mz
self.doc_metadata[name]['rt'] = rt
self.doc_metadata[name]['intensity'] = intensity
self.doc_metadata[name]['id'] = ms1_id
print "Loaded {} MS1 peaks".format(len(self.ms1peaks))
parent_id_list = [self.doc_metadata[name]['id'] for name in self.ms1peaks]
# Load the ms2 objects
frag_file = prefix + '_ms2.csv'
features = []
self.corpus = {}
with open(frag_file,'r') as f:
heads = f.readline().split(',')
for line in f:
split_line = line.rstrip().split(',')
frag_name = split_line[10]
if not frag_name == 'NA':
frag_name = frag_name[1:-1]
frag_id = 'fragment_' + frag_name
loss_name = split_line[11]
if not loss_name == 'NA':
loss_name = loss_name[1:-1]
loss_id = 'loss_' + loss_name
if not frag_id == "fragment_NA":
if not frag_id in features:
features.append(frag_id)
frag_idx = features.index(frag_id)
if not loss_id == "loss_NA":
if not loss_id in features:
features.append(loss_id)
loss_idx = features.index(loss_id)
intensity = float(split_line[6])
parent_id = split_line[2]
# Find the parent
parent = self.ms1peaks[parent_id_list.index(parent_id)]
# If we've not seen this parent before, create it as an empty dict
if not parent in self.corpus:
self.corpus[parent] = {}
# Store the ms2 features in the parent dictionary
if not frag_id == "fragment_NA":
self.corpus[parent][frag_id] = intensity * scale_factor
if not loss_id == "loss_NA":
self.corpus[parent][loss_id] = intensity * scale_factor
self.n_docs = len(self.corpus)
if self.word_index == None:
self.word_index = self.find_unique_words()
print "Object created with {} documents".format(self.n_docs)
self.n_words = len(self.word_index)
# I don't think this does anything - I will check
self.make_doc_index()
if self.normalise > -1:
print "Normalising intensities"
self.normalise_intensities()
# Run the VB inference. Verbose = True means it gives output each iteration
# initialise = True initialises (i.e. restarts the algorithm)
# This means we can run the algorithm from where it got to.
# First time its run, initialise has to be True
def run_vb(self,n_its = 1,verbose=True,initialise=True):
if initialise:
print "Initialising"
self.init_vb()
print "Starting iterations"
for it in range(n_its):
start_time = time.clock()
diff = self.vb_step()
end_time = time.clock()
self.its_performed += 1
estimated_finish = ((end_time - start_time)*(n_its - it)/60.0)
if verbose:
print "Iteration {} (change = {}) ({} seconds, I think I'll finish in {:06.2f} minutes)".format(it,diff,end_time - start_time,estimated_finish)
# D a VB step
def vb_step(self):
# Run an e-step
temp_beta = self.e_step()
temp_beta += self.eta
# Do the normalisation in the m step
if self.n_fixed_topics > 0:
temp_beta[:self.n_fixed_topics,:] = self.beta_matrix[:self.n_fixed_topics,:]
temp_beta /= temp_beta.sum(axis=1)[:,None]
# Compute how much the word probabilities have changed
total_difference = (np.abs(temp_beta - self.beta_matrix)).sum()
self.beta_matrix = temp_beta
# If we're updating alpha, run the alpha update
if self.update_alpha:
self.alpha = self.alpha_nr()
return total_difference
# self.m_step()
# Newton-Raphson procedure for updating alpha
def alpha_nr(self,maxit=20,init_alpha=[]):
M,K = self.gamma_matrix.shape
if not len(init_alpha) > 0:
init_alpha = self.gamma_matrix.mean(axis=0)/K
alpha = init_alpha.copy()
alphap = init_alpha.copy()
g_term = (psi(self.gamma_matrix) - psi(self.gamma_matrix.sum(axis=1))[:,None]).sum(axis=0)
for it in range(maxit):
grad = M *(psi(alpha.sum()) - psi(alpha)) + g_term
H = -M*np.diag(pg(1,alpha)) + M*pg(1,alpha.sum())
alpha_new = alpha - np.dot(np.linalg.inv(H),grad)
if (alpha_new < 0).sum() > 0:
init_alpha /= 10.0
return self.alpha_nr(maxit=maxit,init_alpha = init_alpha)
diff = np.sum(np.abs(alpha-alpha_new))
alpha = alpha_new
if diff < 1e-6 and it > 1:
return alpha
return alpha
# TODO: tidy up and comment this function
def e_step(self):
temp_beta = np.zeros((self.K,self.n_words))
for doc in self.corpus:
d = self.doc_index[doc]
temp_gamma = np.zeros(self.K) + self.alpha
for word in self.corpus[doc]:
w = self.word_index[word]
self.phi_matrix[doc][word] = self.beta_matrix[:,w]*np.exp(psi(self.gamma_matrix[d,:])).T
# for k in range(self.K):
# self.phi_matrix[doc][word][k] = self.beta_matrix[k,w]*np.exp(scipy.special.psi(self.gamma_matrix[d,k]))
self.phi_matrix[doc][word] /= self.phi_matrix[doc][word].sum()
temp_gamma += self.phi_matrix[doc][word]*self.corpus[doc][word]
temp_beta[:,w] += self.phi_matrix[doc][word] * self.corpus[doc][word]
# self.phi_matrix[d,:,:] = (self.beta_matrix * self.word_matrix[d,:][None,:] * (np.exp(scipy.special.psi(self.gamma_matrix[d,:]))[:,None])).T
# self.phi_matrix[d,:,:] /= self.phi_matrix[d,:,:].sum(axis=1)[:,None]
# self.gamma_matrix[d,:] = self.alpha + self.phi_matrix[d,:,:].sum(axis=0)
self.gamma_matrix[d,:] = temp_gamma
return temp_beta
# Function to find the unique words in the corpus and assign them to indices
def find_unique_words(self):
word_index = {}
pos = 0
for doc in self.corpus:
for word in self.corpus[doc]:
if not word in word_index:
word_index[word] = pos
pos += 1
print "Found {} unique words".format(len(word_index))
return word_index
# Pretty sure this matrix is never used
def make_doc_index(self):
self.doc_index = {}
doc_pos = 0
for doc in self.corpus:
self.doc_index[doc] = doc_pos
doc_pos += 1
# Initialise the VB algorithm
# TODO: tidy this up
def init_vb(self):
# self.gamma_matrix = np.zeros((self.n_docs,self.K),np.float) + 1.0
# self.phi_matrix = np.zeros((self.n_docs,self.n_words,self.K))
self.its_performed = 0
self.phi_matrix = {}
self.gamma_matrix = np.zeros((self.n_docs,self.K))
for doc in self.corpus:
self.phi_matrix[doc] = {}
for word in self.corpus[doc]:
self.phi_matrix[doc][word] = np.zeros(self.K)
d = self.doc_index[doc]
doc_total = 0.0
for word in self.corpus[doc]:
doc_total += self.corpus[doc][word]
self.gamma_matrix[d,:] = self.alpha + 1.0*doc_total/self.K
# # Normalise this to sum to 1
# self.phi_matrix /= self.phi_matrix.sum(axis=2)[:,:,None]
# Initialise the betas
if self.n_fixed_topics == 0:
self.beta_matrix = np.random.rand(self.K,self.n_words)
else:
self.beta_matrix[self.n_fixed_topics:,] = np.random.rand(self.K - self.n_fixed_topics,self.n_words)
self.beta_matrix /= self.beta_matrix.sum(axis=1)[:,None]
# Function to return a dictionary with keys equal to documents and values equal to the probability
# of the requested document (used for visusaling in DESI imaging)
def get_topic_as_doc_dict(self,topic_id,thresh = 0.001,normalise=False):
top = {}
mat = self.gamma_matrix
if normalise:
mat = self.get_expect_theta()
for doc in self.doc_index:
pos = self.doc_index[doc]
if mat[pos,topic_id] >= thresh:
top[doc] = mat[pos,topic_id]
return top
# Return a topic as a dictionary over words
def get_topic_as_dict(self,topic_id):
top = {}
for word in self.word_index:
top[word] = self.beta_matrix[topic_id,self.word_index[word]]
return top
# Return the topic probabilities for all documents
# Note that self.doc_index maps the document names to their
# position in this matrix
def get_expect_theta(self):
e_theta = self.gamma_matrix.copy()
e_theta /= e_theta.sum(axis=1)[:,None]
return e_theta
def get_beta(self):
return self.beta_matrix.copy()
def make_dictionary(self,metadata=None,min_prob_to_keep_beta = 1e-3,
min_prob_to_keep_phi = 1e-2,min_prob_to_keep_theta = 1e-2,
filename = None):
if metadata == None:
if self.doc_metadata == None:
metadata = {}
for doc in self.corpus:
metadata[doc] = {'name': doc,'parentmass': float(doc.split('_')[0])}
else:
metadata = self.doc_metadata
lda_dict = {}
lda_dict['corpus'] = self.corpus
lda_dict['word_index'] = self.word_index
lda_dict['doc_index'] = self.doc_index
lda_dict['K'] = self.K
lda_dict['alpha'] = list(self.alpha)
lda_dict['beta'] = {}
lda_dict['doc_metadata'] = metadata
lda_dict['topic_index'] = self.topic_index
lda_dict['topic_metadata'] = self.topic_metadata
# Create the inverse indexes
wi = []
for i in self.word_index:
wi.append((i,self.word_index[i]))
wi = sorted(wi,key = lambda x: x[1])
di = []
for i in self.doc_index:
di.append((i,self.doc_index[i]))
di = sorted(di,key=lambda x: x[1])
ri,i = zip(*wi)
ri = list(ri)
di,i = zip(*di)
di = list(di)
# make a reverse index for topics
tp = [(topic,self.topic_index[topic]) for topic in self.topic_index]
tp = sorted(tp,key = lambda x: x[1])
reverse,_ = zip(*tp)
for k in range(self.K):
pos = np.where(self.beta_matrix[k,:]>min_prob_to_keep_beta)[0]
motif_name = reverse[k]
# motif_name = 'motif_{}'.format(k)
lda_dict['beta'][motif_name] = {}
for p in pos:
word_name = ri[p]
lda_dict['beta'][motif_name][word_name] = self.beta_matrix[k,p]
eth = self.get_expect_theta()
lda_dict['theta'] = {}
for i,t in enumerate(eth):
doc = di[i]
lda_dict['theta'][doc] = {}
pos = np.where(t > min_prob_to_keep_theta)[0]
for p in pos:
motif_name = reverse[p]
# motif_name = 'motif_{}'.format(p)
lda_dict['theta'][doc][motif_name] = t[p]
lda_dict['phi'] = {}
ndocs = 0
for doc in self.corpus:
ndocs += 1
lda_dict['phi'][doc] = {}
for word in self.corpus[doc]:
lda_dict['phi'][doc][word] = {}
pos = np.where(self.phi_matrix[doc][word] >= min_prob_to_keep_phi)[0]
for p in pos:
motif_name = reverse[p]
lda_dict['phi'][doc][word][motif_name] = self.phi_matrix[doc][word][p]
if ndocs % 500 == 0:
print "Done {}".format(ndocs)
if not filename == None:
with open(filename,'w') as f:
pickle.dump(lda_dict,f)
return lda_dict
# MS1 object used by Variational Bayes LDA
class MS1(object):
def __init__(self,ms1_id,mz,rt,intensity,name):
self.ms1_id = ms1_id
self.mz = mz
self.rt = rt
self.intensity = intensity
self.name = name
def __str__(self):
return self.name