-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsubmission.py
More file actions
293 lines (227 loc) · 10.1 KB
/
submission.py
File metadata and controls
293 lines (227 loc) · 10.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
from test import test_model
import torch
from transformers import AutoModelForQuestionAnswering, AutoTokenizer
import csv
from tqdm.notebook import tqdm
from typing import List, Tuple, Dict, Any
import json
model_path = "/data/jw/model/kobigbird_v2_ep1_max1024_lr5e-05_data1,2_54_0"
model_name = "kobigbird_v2_ep1_max1024_lr5e-05_data1,2_54_0"
tokenizer = AutoTokenizer.from_pretrained('monologg/kobigbird-bert-base')
load_model = AutoModelForQuestionAnswering.from_pretrained(model_path)
load_model.cuda()
class KoMRC:
def __init__(self, data, indices: List[Tuple[int, int, int]]):
self._data = data
self._indices = indices
# Json을 불러오는 메소드
@classmethod
def load(cls, file_path: str):
with open(file_path, 'r', encoding='utf-8') as fd:
data = json.load(fd)
indices = []
for d_id, document in enumerate(data['data']):
for p_id, paragraph in enumerate(document['paragraphs']):
for q_id, _ in enumerate(paragraph['qas']):
indices.append((d_id, p_id, q_id))
return cls(data, indices)
# Json을 불러오는 메소드
@classmethod
def loads(cls, *file_path: str):
datas = {'data': []}
indices = []
for f in file_path:
with open(f, 'r', encoding='utf-8') as fd:
data = json.load(fd)
datas['data'] += data['data']
for d_id, document in enumerate(datas['data']):
for p_id, paragraph in enumerate(document['paragraphs']):
for q_id, _ in enumerate(paragraph['qas']):
indices.append((d_id, p_id, q_id))
return cls(datas, indices)
# 데이터 셋을 잘라내는 메소드
@classmethod
def split(cls, dataset, eval_ratio: float=.1):
indices = list(dataset._indices)
random.shuffle(indices)
train_indices = indices[int(len(indices) * eval_ratio):]
eval_indices = indices[:int(len(indices) * eval_ratio)]
return cls(dataset._data, train_indices), cls(dataset._data, eval_indices)
def __getitem__(self, index: int) -> Dict[str, Any]:
d_id, p_id, q_id = self._indices[index]
paragraph = self._data['data'][d_id]['paragraphs'][p_id]
qa = paragraph['qas'][q_id]
if 'guid' in qa:
guid = qa['guid']
else:
guid = uuid.uuid4().hex
context = paragraph['context'].replace('\n', 'n').replace('\xad', ' ').replace('\xa0', ' ').replace('\u200b', ' ')
question = qa['question'].replace('\n', 'n').replace('\xad', ' ').replace('\xa0', ' ').replace('\u200b', ' ')
answers = qa['answers']
if answers != None:
for a in answers:
a['text'] = a['text'].replace('\n', 'n').replace('\xad', ' ').replace('\xa0', ' ').replace('\u200b', ' ')
else:
answers = None
return {'guid': guid,
'context': context,
'question': question,
'answers': answers
}
def __len__(self) -> int:
return len(self._indices)
class TokenizedKoMRC(KoMRC):
def __init__(self, data, indices: List[Tuple[int, int, int]]) -> None:
super().__init__(data, indices)
self._tokenizer = tokenizer
def _tokenize_with_position(self, sentence: str) -> List[Tuple[str, Tuple[int, int]]]:
position = 0
tokens = []
sentence_tokens = []
for word in sentence.split():
if '[UNK]' in tokenizer.tokenize(word):
sentence_tokens.append(word)
else:
sentence_tokens += tokenizer.tokenize(word)
for morph in sentence_tokens:
if len(morph) > 2:
if morph[:2] == '##':
morph = morph[2:]
position = sentence.find(morph, position)
tokens.append((morph, (position, position + len(morph))))
position += len(morph)
return tokens
def __getitem__(self, index: int) -> Dict[str, Any]:
sample = super().__getitem__(index)
# sample = {'guid': guid, 'context': context, 'question': question, 'answers': answers}
context, position = zip(*self._tokenize_with_position(sample['context']))
context, position = list(context), list(position)
question = self._tokenizer.tokenize(sample['question'])
if sample['answers'] is not None:
answers = []
for answer in sample['answers']:
for start, (position_start, position_end) in enumerate(position):
if position_start <= answer['answer_start'] < position_end:
break
else:
print(context, answer)
# print(answer['guid'])
print(answer['answer_start'])
raise ValueError("No mathced start position")
target = ''.join(answer['text'].split(' '))
source = ''
for end, morph in enumerate(context[start:], start):
source += morph
if target in source:
break
else:
print(context, answer)
#print(answer['guid'])
print(answer['answer_start'])
raise ValueError("No Matched end position")
answers.append({'start': start, 'end': end})
answer_text = sample['answers'][0]['text']
else:
answers = None
answer_text = None
return {
'guid': sample['guid'],
'context_original': sample['context'],
'context_position': position,
'question_original': sample['question'],
'context': context,
'question': question,
'answers': answers,
'answers_text': answer_text
}
class Indexer:
def __init__(self, vocabs: List[str], max_length: int=1024):
self.max_length = max_length
self.vocabs = vocabs
@property
def vocab_size(self):
return len(self.vocabs)
@property
def pad_id(self):
return tokenizer.vocab['[PAD]']
@property
def unk_id(self):
return tokenizer.vocab['[UNK]']
@property
def cls_id(self):
return tokenizer.vocab['[CLS]']
@property
def sep_id(self):
return tokenizer.vocab['[SEP]']
def sample2ids(self, sample: Dict[str, Any],) -> Dict[str, Any]:
context = [tokenizer.convert_tokens_to_ids(token) for token in sample['context']]
question = [tokenizer.convert_tokens_to_ids(token) for token in sample['question']]
context = context[:self.max_length-len(question)-3] # Truncate context
input_ids = [self.cls_id] + question + [self.sep_id] + context + [self.sep_id]
token_type_ids = [0] * (len(question) + 1) + [1] * (len(context) + 2)
if sample['answers'] is not None:
answer = sample['answers'][0]
start = min(len(question) + 2 + answer['start'], self.max_length - 1)
end = min(len(question) + 2 + answer['end'], self.max_length - 1)
else:
start = None
end = None
return {
'guid': sample['guid'],
'context': sample['context_original'],
'question': sample['question_original'],
'position': sample['context_position'],
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'start': start,
'end': end
}
class IndexerWrappedDataset:
def __init__(self, dataset: TokenizedKoMRC, indexer: Indexer) -> None:
self._dataset = dataset
self._indexer = indexer
def __len__(self) -> int:
return len(self._dataset)
def __getitem__(self, index: int) -> Dict[str, Any]:
sample = self._indexer.sample2ids(self._dataset[index])
sample['attention_mask'] = [1] * len(sample['input_ids'])
return sample
test_dataset = TokenizedKoMRC.load('/data/jw/Data/test.json')
indexer_test = Indexer(list(tokenizer.vocab.keys()))
indexed_test_dataset = IndexerWrappedDataset(test_dataset, indexer_test)
start_visualize = []
end_visualize = []
with torch.no_grad(), open(f'/data/jw/submission/{model_name}', 'w') as fd:
writer = csv.writer(fd)
writer.writerow(['Id', 'Predicted'])
rows = []
# for sample in tqdm(test_dataset, "Testing"):
for sample in tqdm(indexed_test_dataset, "Testing"):
input_ids, token_type_ids = [torch.tensor(sample[key], dtype=torch.long, device="cuda") for key in ("input_ids", "token_type_ids")]
# print(sample)
load_model.eval()
with torch.no_grad():
output = load_model(input_ids=input_ids[None, :], token_type_ids=token_type_ids[None, :])
start_logits = output.start_logits
end_logits = output.end_logits
start_logits.squeeze_(0), end_logits.squeeze_(0)
start_prob = start_logits[token_type_ids.bool()][1:-1].softmax(-1)
end_prob = end_logits[token_type_ids.bool()][1:-1].softmax(-1)
probability = torch.triu(start_prob[:, None] @ end_prob[None, :])
# 토큰 길이 8까지만
for row in range(len(start_prob) - 8):
probability[row] = torch.cat((probability[row][:8+row].cpu(), torch.Tensor([0] * (len(start_prob)-(8+row))).cpu()), 0)
index = torch.argmax(probability).item()
start = index // len(end_prob)
end = index % len(end_prob)
# 확률 너무 낮으면 자르기
if start_prob[start] >= 0 or end_prob[end] >= 0:
start_str = sample['position'][start][0]
end_str = sample['position'][end][1]
else:
start_str = 0
end_str = 0
start_visualize.append((list(start_prob.cpu()), (start, end), (start_str, end_str)))
end_visualize.append((list(end_prob.cpu()), (start, end), (start_str, end_str)))
rows.append([sample["guid"], sample['context'][start_str:end_str]])
writer.writerows(rows)