-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathgensim_bow.py
More file actions
219 lines (181 loc) · 7.96 KB
/
gensim_bow.py
File metadata and controls
219 lines (181 loc) · 7.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
#https://gist.github.com/aronwc/8248457
import numpy as np
import scipy
import time
from gensim import matutils, corpora
from gensim.models.ldamodel import LdaModel
from sklearn import linear_model
from sklearn import svm
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
import string
from stemming.porter2 import stem
import matplotlib.pyplot as plt
import math
'''Machine learning methods'''
def print_features(clf, vocab, n=10):
""" Print sorted list of non-zero features/weights. """
coef = clf.coef_[0]
print 'positive features: %s' % (' '.join(['%s/%.2f' % (vocab[j], coef[j]) for j in np.argsort(coef)[::-1][:n] if coef[j] > 0]))
print 'negative features: %s' % (' '.join(['%s/%.2f' % (vocab[j], coef[j]) for j in np.argsort(coef)[:n] if coef[j] < 0]))
def fit_classifier(X, y, C=0.1):
# Smaller C means fewer features selected.
#clf = linear_model.LogisticRegression(penalty='l1', C=C)
clf = svm.SVC()
clf.fit(X, y)
return clf
#Use classifier to make predictions on data
#Can return those predictions and compare to true labels to calculate error'''
def make_predictions(clf,data):
return clf.predict(data)
#Compute and return classification error by comparing predictions to labels
def compute_error(predictions,labels):
comp_preds = predictions - labels
errorRate = np.count_nonzero(comp_preds)*1.0/np.size(comp_preds)
return errorRate
'''LDA methods'''
def fit_lda(corpus, dictionary, num_topics=5, passes=20):
print 'fitting lda...'
return LdaModel(corpus, id2word=dictionary,num_topics=num_topics,passes=num_passes)
def print_topics(lda, vocab, n=10):
""" Print the top words for each topic. """
topics = lda.show_topics(num_topics=n, formatted=False)
for ti, topic in enumerate(topics):
print 'topic %d: %s' % (ti, ' '.join('%s/%.2f' % (t[1], t[0]) for t in topic))
def getTopicDistributions(lda, documents):
topic_dists = [lda[document] for document in documents]
return topic_dists
def getTopicDistributionFeatures(topic_distributions, num_topics):
topicDist_features = [ [(topic_dist[i][1] if i < len(topic_dist) and topic_dist[i][0] == i else 0) for i in range(num_topics)] for topic_dist in topic_distributions]
return topicDist_features
'''Text processing methods'''
#Read in and stop words from a file and save them as a set
def getStopWords():
stoplist = list()
for word in open('stopwords.txt', 'r'):
stoplist.append(word.strip())
return stoplist
def trainTest_split(full_data, train_proportion):
data = full_data.data
labels = full_data.target
num_docs = len(data)
num_train = int(train_proportion * num_docs)
train_data = data[0:num_train]
test_data = data[num_train:num_docs]
train_labels = labels[0:num_train]
test_labels = labels[num_train:num_docs]
return (train_data, test_data, train_labels, test_labels)
def process_documents(documents):
#standard list of stopwords from http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
#read in from file called stopwords.txt
#each stopword on its own line
stoplist = getStopWords()
stopChars = string.punctuation + "-*\\|___" + "0123456789"
stopChars_list = list(stopChars)
for doc in range(0,len(documents)):
document = documents[doc]
for punct in stopChars_list:
document = document.replace(punct,' ')
documents[doc] = document
texts = [[stem(word) for word in document.lower().split() if word not in stoplist] for document in documents]
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
processed_texts = [[token for token in text if frequency[token] > 1] for text in texts]
return processed_texts
def getDictionary(vocab):
# vocab_set = set()
# count = 0
# for text in texts:
# for word in text:
# count += 1
# vocab_set.add(word)
# vocab = sorted(list(vocab_set))
dictionary = dict([(i, s) for i, s in enumerate(vocab)])
#print dictionary
return dictionary
#return corpora.Dictionary(texts)
def getCorpus(texts, dictionary):
corpus = [dictionary.doc2bow(text) for text in texts]
return corpus
def sparse2bow(td_sparse_csr):
num_docs = td_sparse_csr.shape[0]
num_terms = td_sparse_csr.shape[1]
#td_sparse_csr = scipy.sparse.coo_matrix.tocsr(td_sparse)
documents = []
for doc in range(0,num_docs):
sparse_doc = td_sparse_csr.getrow(doc).todense()
document = []
for term in range(0,num_terms):
if sparse_doc[0,term] > 0:
document.append((term, sparse_doc[0,term]))
documents.append(document)
return documents
'''Run program'''
if (__name__ == '__main__'):
# Load data.
#allows you to specify which categories you want
#shuffles the data so it's in random order
rand = np.random.mtrand.RandomState(8675309)
cats = ['rec.sport.baseball', 'sci.crypt']
all_data = fetch_20newsgroups(categories=cats,shuffle=True,random_state=rand, remove=('headers', 'footers', 'quotes'))
train_proportion = 0.8
train_data, test_data, train_labels, test_labels = trainTest_split(all_data, train_proportion)
vec = CountVectorizer(min_df=10, stop_words='english')
tdm_train = vec.fit_transform(train_data)
train_vocab = vec.get_feature_names()
tdm_test = vec.fit_transform(test_data)
#Create bag of words representation of corpus
#TODO: load and save dictionary, LDA model instead of recomputing each time (use gensim save and load methods)
#https://radimrehurek.com/gensim/tut1.html
train_texts = process_documents(train_data)
test_texts = process_documents(test_data)
#Dictionary and corpus used to fit LDA model must be based only off training data
dictionary = corpora.Dictionary(train_texts)
#corpus = getCorpus(train_texts, dictionary)
#test_bow_docs = getCorpus(test_texts, dictionary)
corpus = sparse2bow(tdm_train)
test_bow_docs = sparse2bow(tdm_test)
#Fit and time LDA.
num_topics_list = np.arange(3,33,3)
num_passes = 3
train_error_data = []
test_error_data = []
perplexity_data = []
for num_topics in num_topics_list:
start_time = time.time()
lda_dict = getDictionary(train_vocab)
print("Extracting %d topics..." % num_topics)
lda = fit_lda(corpus, lda_dict,num_topics=num_topics,passes=num_passes)
print("Performed LDA in %f seconds" % (time.time() - start_time))
#print out LDA topics
print_topics(lda,dictionary.keys(),num_topics)
#Log perplexity of test documents
log_perplexity_bound = lda.log_perplexity(test_bow_docs)
perplexity_bound = math.exp(log_perplexity_bound)
print "Lower bound on perplexity: ", perplexity_bound
#Get distribution of topics for training documents
train_topic_dists = getTopicDistributions(lda, corpus)
train_topicDist_features = getTopicDistributionFeatures(train_topic_dists, num_topics)
#Get distribution of topics for training documents
test_topic_dists = getTopicDistributions(lda, test_bow_docs)
test_topicDist_features = getTopicDistributionFeatures(test_topic_dists, num_topics)
#Fit classifier
clf = fit_classifier(train_topicDist_features,train_labels)
#Test classifier
train_preds = make_predictions(clf, train_topicDist_features)
test_preds = make_predictions(clf,test_topicDist_features)
#Compute and display error rate
train_error = compute_error(train_preds, train_labels)
print "Training error using LDA topic distributions as features: ", train_error
test_error = compute_error(test_preds, test_labels)
print "Test error using LDA topic distributions as features: ", test_error
perplexity_data.append(log_perplexity_bound)
train_error_data.append(train_error)
test_error_data.append(test_error)
plt.plot(num_topics_list, test_error_data, 'b-')
plt.figure()
plt.plot(num_topics_list, perplexity_data, 'r-')
plt.show()