-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathadjacency.py
More file actions
141 lines (124 loc) · 3.56 KB
/
adjacency.py
File metadata and controls
141 lines (124 loc) · 3.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
# -*- coding: utf-8 -*-
"""Adjacency.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1-POwDLlEe6xXn72LbY6_DyZ0fhFKwo-5
"""
import os
import pandas as pd
import numpy as np
import re
import pickle as pkl
from math import log
import scipy.sparse as sp
from config import config
import sys
def create_df(filename):
with open(filename,'rb') as f: #MR
q = f.readlines()
texts=[]
for i in range(len(q)):
# print(i)
texts.append(q[i].decode('latin-1'))
###################create dataframe##########################
df = pd.DataFrame(texts, columns=['text'])
df['label'] = df['text']
df['label'] =df['label'].apply(lambda s: s.split(" ")[0])
df['text']=df['text'].apply(lambda s: s[1:])
df['label'] = df['label'].astype(int)
return df
f = sys.argv[1]
#this is for MR
with open(f,'rb') as f:
q = f.readlines()
texts=[]
labels=[]
for i in range(len(q)):
s = q[i].decode('latin-1')
texts.append(s[2:].strip())
labels.append(int(s[0]))
word_set = set()
word_freq = {}
for doc in texts:
words_doc = doc.split()
for w in words_doc:
word_set.add(w)
if w in word_freq:
word_freq[w] += 1
else:
word_freq[w] = 1
vocab = list(word_set)
config['vocab_size'] = len(vocab)
np.savetxt('vocab.txt', vocab, fmt='%s')
map_word_index = {}
for i in range(len(vocab)):
map_word_index[vocab[i]] = i
window = config['window']
windows = []
#creating context windows for every document
for doc in texts:
words = doc.split()
length = len(words)
if length <= window: #add all words if length of doc is less than context window size
windows.append(words)
else:
for j in range(length - window + 1):
window_ = words[j: j + window]
windows.append(window_)
word_window_freq = {}
for window in windows:
appeared = set()
for i in range(len(window)):
if window[i] in appeared:
continue
if window[i] in word_window_freq:
word_window_freq[window[i]] += 1
else:
word_window_freq[window[i]] = 1
appeared.add(window[i])
word_pair_count = {}
#for every phrase in windows : make pairs of words and take their count --> #W(i,j)
for window in windows: #window: phrase under context size
for i in range(1, len(window)):
for j in range(0, i):
word_i = window[i]
word_i_id = map_word_index[word_i]
word_j = window[j]
word_j_id = map_word_index[word_j]
if word_i_id == word_j_id:
continue
word_pair_str = str(word_i_id) + ',' + str(word_j_id)
if word_pair_str in word_pair_count:
word_pair_count[word_pair_str] += 1
else:
word_pair_count[word_pair_str] = 1
word_pair_str = str(word_j_id) + ',' + str(word_i_id)
if word_pair_str in word_pair_count:
word_pair_count[word_pair_str] += 1
else:
word_pair_count[word_pair_str] = 1
#PMI
row = []
col = []
weight = []
train_size = 0
num_window = len(windows)
for key in word_pair_count:
temp = key.split(',')
i = int(temp[0])
j = int(temp[1])
count = word_pair_count[key]
word_freq_i = word_window_freq[vocab[i]]
word_freq_j = word_window_freq[vocab[j]]
pmi = log((1.0 * count / num_window) /
(1.0 * word_freq_i * word_freq_j/(num_window * num_window)))
if pmi <= 0:
continue
row.append(train_size + i)
col.append(train_size + j)
weight.append(pmi)
node_size = len(vocab)
adj = sp.csr_matrix(
(weight, (row, col)), shape=(node_size, node_size))
with open(config['adj_path'].format(config['dataset']), 'wb') as f:
pkl.dump(adj, f)