-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtokenize.py
More file actions
217 lines (194 loc) · 8.46 KB
/
tokenize.py
File metadata and controls
217 lines (194 loc) · 8.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
from os.path import isfile, basename, abspath
import argparse
import sys
################################################################################
# Binary Tree Structure #
################################################################################
class Node:
def __init__(self, character, code=-1):
self.character =character
self.code =code
self.rChild =None
self.lChild =None
class Tree:
def __init__(self):
self.root =None
self.currentNode =None
self.lastNode =None
self.numWords =0
def print_tree(self, node, prefix=""):
if node is None:
return
prefix+=node.character
if node.code != -1:
print("{:9} {}".format(node.code, prefix))
self.print_tree(node.rChild, prefix)
self.print_tree(node.lChild, prefix[:-1])
def set_word_number(self, node, lvl=0):
if node is None:
return
if lvl == 0:
self.numWords=0
if node.code != -1:
self.numWords +=1
self.set_word_number(node.rChild, lvl+1)
self.set_word_number(node.lChild, lvl+1)
################################################################################
# Main Class #
################################################################################
class Tokenizer:
def __init__(self):
self.fileObj =None # variable to handle file/stdin
self.curr_node =None # current node in data structure (Tokenize)
#----------------------------------------------------------------------#
self.rChar ='' # calu
self.wordCode =-1 # code_mot
self.wordEnd =True # fin_mot
#----------------------------------------------------------------------#
self.sep =' .,;:!?\'"`-+/\\\n\t_\0' # separators
self.spaces =' \n\t\0' # spacing characters
self.tok =Tree() # tokenizer data structure
def print_vocab(self):
self.tok.print_tree(self.tok.root)
def process_input(self, process, filePath=None):
if filePath is None: # read from STDIN by default
self.fileObj =sys.stdin
else: # or from file, if a valid path is specified
if not isfile(filePath):
print("[ERREUR] le fichier \""+filePath+"\" n'existe pas.")
exit(4)
self.fileObj =open(filePath, 'r')
process()
if filePath is not None:
self.fileObj.close()
self.fileObj =None
#==========================================================================#
#====================== Binary Tree Construction ==========================#
def read_char(self):
self.wordEnd=True
self.wordCode =-1
self.rChar=''
line ="\n"
while self.fileObj is not None and line != "":
line=self.fileObj.readline()
tmp =line.rstrip().split(' ')
if len(tmp) == 1 and tmp[0] == "":
continue # empty line
if len(tmp) != 2:
print( "[ERREUR] format du fichier de lexique incorrect!\n"
+"Ligne: \""+line+"\"")
exit(3)
if tmp[0].isdigit() == False:
print("[ERREUR] code "+tmp[0]+" is not a digit!\nWord="+tmp[1])
exit(2)
self.wordEnd =False
self.wordCode =int(tmp[0])
for i in range(len(tmp[1])):
self.rChar =tmp[1][i]
if i == len(tmp[1])-1:
self.wordEnd =True
yield True
yield False
# lire_lexique()
def read_lexicon(self):
# Attention: On boucle de façon un peu spéciale (sur un générateur dont
# la valeur de retour nous dit s'il faut sortir), mais c'est pour coller
# avec la méthode vue en TD
for t in self.read_char():
if not t: # prevent loop on empty files
break
if self.tok.root is None: # Init case (very first char)
r =self.tok.root =Node(self.rChar)
p =None
if self.wordEnd: # Maybe the first char is terminal?
r.code =self.wordCode
p =self.tok.root
continue
elif p is None: # new branch to the right (next char)
p =Node(self.rChar)
r.rChild =p
else: # new branch to the left (alt. char)
# keep looking for this alternative character
while p is not None and p.character != self.rChar:
r =p
p =p.lChild
# create new alternate character if we didn't found it
if p is None:
p =Node(self.rChar)
r.lChild =p
# Check for next move...
if self.wordEnd: # If it's the last character
p.code =self.wordCode # set the code
p =self.tok.root # and go back to the root for next word
else: # if there's another character following,
r =p # the "current" node become the futur "parent"
p =p.rChild # and we keep going to the right; current=rightChild
def load_lexicon(self, filePath=None):
self.process_input(self.read_lexicon, filePath)
#==========================================================================#
#============================= Tokenizer ==================================#
def get_txt(self):
line ="\n"
while self.fileObj is not None and line != "":
line =self.fileObj.readline()
yield line
def tokenize(self, line, i):
self.wordCode=-1
imot=i
self.curr_node =self.tok.root # this can cut the analysis if end of
# line is reached before last character
# (when the word is cut in two)
while i < len(line) and self.curr_node is not None:
if (line[i]==self.curr_node.character or
(line[i]==' ' and self.curr_node.character=='_')): # composed word
if ( self.curr_node.code!=-1 and
(((len(line)>i+1 and line[i+1] in self.sep) or
len(line)==i+1) or line[i] in self.sep)
):
self.wordCode=self.curr_node.code
imot=i
self.curr_node=self.curr_node.rChild
i+=1
else:
self.curr_node=self.curr_node.lChild
return imot
def txt2code(self):
for l in self.get_txt():
i =0
while i < len(l):
if (l[i] not in self.spaces): # escape all spacing characters
i=self.tokenize(l,i)
if (self.wordCode != -1):
print(str(self.wordCode)+" ", end='')
else: # unknown word, print code '0' and get to next word
print("0 ", end='')
j =i
while i<len(l) and l[i] not in self.sep:
i +=1
if i>j:
i-=1 # keep going to next valid character
elif l[i]=="\n":
print()
i +=1
def process_tok(self, filePath):
self.process_input(self.txt2code, filePath)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-lex", help="Path to a lexicon file.",
default=None)
parser.add_argument("-txt", help="Path to a text file.",
default=None)
parser.add_argument("--verbose", "--v", help="If outputs are needed.",
action="store_true")
args = parser.parse_args()
if args.lex is None and args.txt is None:
print("[ERROR] Can't read both inputs at the same time in stdin")
exit(1)
tokenizer =Tokenizer()
if args.verbose:
print("Chargement du vocabulaire...")
tokenizer.load_lexicon(args.lex)
if args.verbose:
print("VOCAB:")
tokenizer.print_vocab()
tokenizer.process_tok(args.txt)