-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsearch_engine.py
More file actions
90 lines (76 loc) · 3.3 KB
/
search_engine.py
File metadata and controls
90 lines (76 loc) · 3.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
from reader import ReadFile
from configuration import ConfigClass
from parser_module import Parse
from indexer import Indexer
from searcher import Searcher
from configuration import ConfigClass
import pandas as pd
import datetime
def run_engine(corpus_path_, output_path_, stemming_):
"""
:return:
"""
number_of_documents = 0
config = ConfigClass(corpuspath=corpus_path_,outputpath=output_path_,stemming=stemming_)
config.corpusPath = corpus_path_
config.savedFileMainFolder=output_path_
r = ReadFile(corpus_path=config.get__corpusPath())
p = Parse()
indexer = Indexer(config)
pathes = r.get_all_path_of_parquet()
length_of_array = len(pathes)
iteration = 0
is_stemmer = config.toStem
parsed_doc_list = list()
for i in range(0, length_of_array):
documents_list = r.get_documents(pathes[i][0], pathes[i][0])
for doc, j in zip(documents_list, range(len(documents_list))):
parsed_document = p.parse_doc(doc, stemmer=is_stemmer)
if parsed_document == None:
continue
parsed_doc_list.append(parsed_document)
number_of_documents += 1
if number_of_documents % 200000 == 0:
for doc in parsed_doc_list:
indexer.add_new_doc(doc)
indexer.write_posting_to_txt_file_lower_upper(iteration)
iteration += 1
parsed_doc_list.clear()
parsed_doc_list = list()
elif j == len(documents_list) - 1 and i == length_of_array - 1:
for doc in parsed_doc_list:
indexer.add_new_doc(doc)
indexer.write_posting_to_txt_file_lower_upper(iteration)
parsed_doc_list.clear()
parsed_doc_list = list()
indexer.merge_posting_file()
indexer.merge_two_last_posting_file()
indexer.split_posting_file_and_create_inverted_index()
indexer.write_inverted_index_to_txt_file()
number_of_documents = 0
def load_index(path):
inverted_index =Indexer.load_inverted_index_to_dictionary_offline(path)
return inverted_index
def search_and_rank_query(query, inverted_index, k,path):
p = Parse()
query_as_list = p.parse_sentence(query)
searcher = Searcher(inverted_index,path)
relevant_docs = searcher.relevant_docs_from_posting(query_as_list, inverted_index)
ranked_docs = searcher.ranker.rank_relevant_doc(relevant_docs)
return searcher.ranker.retrieve_top_k(ranked_docs, k)
def main(corpus_path, output_path, stemming, queries, num_doc_to_retrieve):
run_engine(corpus_path_=corpus_path, output_path_=output_path,stemming_=stemming)
# k = int(input("Please enter number of docs to retrieve: "))
inverted_index = load_index(output_path)
tuple_list=list()
index = 0
for query in queries:
dict = search_and_rank_query(query, inverted_index, num_doc_to_retrieve,output_path)
for key in dict.keys():
print('tweet id: {}, score (unique common words with query): {}'.format(key, dict[key]))
tupl = (index,key,dict[key])
tuple_list.append(tupl)
index+=1
def write_to_csv(tuple_list):
df=pd.DataFrame(tuple_list,columns=["Query_num","Tweet_id","Rank"])
df.to_csv('results.csv')