-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun.py
More file actions
345 lines (295 loc) · 13.5 KB
/
run.py
File metadata and controls
345 lines (295 loc) · 13.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
# coding=utf-8
import numpy as np
import os
import sys
import time
from evaluate import load
import urllib.request, json
import re
import argparse
from sent_similarity import get_scores
from self_bleu import calculate_selfBleu
from bert_score.utils import model2layers
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
bertscore = load("bertscore/bertscore.py")
file_path = "data/domain_lfqa_test.json"
questions1 = []
references1 = []
with open(file_path, "r", encoding="utf-8") as file:
data = json.load(file)
for entry in data:
question = entry.get("question", "")
answer = entry.get("answer", "")
questions1.append(question)
references1.append(answer)
parser = argparse.ArgumentParser()
parser.add_argument('--reference_file_eli5', type=str, default='data/eli5_test_200_new.json', help='Reference file4.')
args, _ = parser.parse_known_args()
with open(args.reference_file_eli5, 'r', encoding='utf-8') as file:
lines = file.readlines()
data4 = [json.loads(line) for line in lines]
references4 = [ex["answer"] for ex in data4]
questions4 = [ex["question"] for ex in data4]
sys_prompt = "You are an all-around expert with deep insights in various fields."
with open("Prompt/DPprompt2.json", "r") as json_file:
all_prompt = json.load(json_file)
decomposition_goal = all_prompt["decomposition_goal"]
decomposition_rule = all_prompt["decomposition_rule"]
decomposition_example = all_prompt["decomposition_example"]
decomposition_act = all_prompt["decomposition_act"]
prefix = all_prompt["prefix"]
merge_goal = all_prompt["merge_goal"]
merge_rule = all_prompt["merge_rule"]
merge_example = all_prompt["merge_example"]
merge_act = all_prompt["merge_act"]
self_ask_prompt = all_prompt["self_ask_prompt"]
chain_of_thought_prompt = all_prompt["chain_of_thought_prompt"]
def compute_bertscore(predictions, references):
results = bertscore.compute(
predictions=predictions,
references=references,
model_type="facebook/roberta-large",
num_layers=model2layers["roberta-large"],
)
return results['f1']
def calculate_rouge(predictions, references):
rouge = load("rouge")
results = rouge.compute(predictions=predictions, references=references)
print(results)
return results
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', " ", text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(str(s)))))
def extract_answer(generated):
if '\n' not in generated:
last_line = generated
else:
last_line = generated.split('\n')[-1]
if ':' not in last_line:
after_colon = last_line
else:
after_colon = generated.split(':')[-1]
after_colon = after_colon.strip()
if not after_colon.strip():
return ""
return normalize_answer(after_colon)
def parse_questions(question_string):
questions = question_string.split('\n')
print("questions",questions)
sub_questions = []
for question in questions:
match = re.match(r'^Q\d+: (.+\?)$', question)
if match:
sub_question = match.group(1)
sub_questions.append(sub_question)
print("sub_question",sub_question)
print("sub_questions",sub_questions)
return sub_questions
def extract_questions(text):
pattern = r'(Q\d+:.+?\?)'
matches = re.findall(pattern, text)
print("matches",matches)
return matches
def check_answer_completeness(answer):
answer = answer.rstrip()
if answer.endswith(':') or answer.endswith(',') or not answer.endswith(('.', '!', '?')):
return False
else:
return True
model_path = "model/Llama-2-7b-chat-hf"
def output_7b1(questions):
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, device_map="auto", offload_buffers=True)
model =model.eval()
tokenizer = AutoTokenizer.from_pretrained(model_path)
tokenizer.pad_token = tokenizer.eos_token
answers=[]
input_ids = tokenizer(questions, return_tensors="pt",add_special_tokens=False).input_ids.to("cuda")
outputs = model.generate(input_ids, max_new_tokens=1024, do_sample=True, top_p=0.9, temperature=0.9)
ans = tokenizer.decode(outputs[0])
answers.append(ans)
return answers
def output_7b2(questions):
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, device_map="auto", offload_buffers=True)
tokenizer = AutoTokenizer.from_pretrained(model_path)
answers=[]
input_ids = tokenizer(questions, return_tensors="pt").input_ids.to("cuda")
outputs = model.generate(input_ids, max_new_tokens=256, do_sample=True, top_p=0.9, temperature=0.9)
ans = tokenizer.decode(outputs[0])
answers.append(ans)
return answers
def remove_sequence(A, B):
"""
Remove sequences in list B that are present in sequence A.
Args:
A (str): The sequence to be removed.
B (list): List of sequences.
Returns:
list: List B with sequences from A removed.
"""
sub_questions = A.split('\n')[1:-1]
processed_B = []
for sequence in B:
if not any(sub_question in sequence for sub_question in sub_questions):
processed_B.append(sequence)
return processed_B
def get_top_sublists_and_removed_elements(arrage):
sublists_and_scores = [(arrage[:i] + arrage[i+1:], calculate_selfBleu(arrage[:i] + arrage[i+1:]), arrage[i]) for i in range(len(arrage))]
sublists_and_scores.sort(key=lambda x: x[1], reverse=True)
top_10_sublists_and_removed_elements = sublists_and_scores[:10]
removed_elements = [item[2] for item in top_10_sublists_and_removed_elements]
return removed_elements
def renumber_and_merge_questions(questions):
renumbered_questions = []
for i, question in enumerate(questions, start=1):
colon_index = question.index(':')
new_question = f"Q{i}{question[colon_index:]}"
renumbered_questions.append(new_question)
result = ' '.join(renumbered_questions)
return result
def renumber_questions(questions):
renumbered_questions = []
for i, question in enumerate(questions, start=1):
colon_index = question.index(':')
new_question = f"Q{i}{question[colon_index:]}"
renumbered_questions.append(new_question)
return renumbered_questions
rouge_results = []
for experiment in range(3):
answers1 = []
answers2 = []
i = 0
n = len(questions1)
while i < n:
prompt1 = decomposition_goal + '\n' + decomposition_rule + '\n' + decomposition_example + '\n' + "Main question:" + questions1[i] + '\n' + decomposition_act
max_score1 = 0
max_output11 = ""
max_output1 = ""
d_loop_key = 0
threshold_d = 0.45
prompt11 = prompt1
while d_loop_key < 5:
d_loop_key += 1
output1 = output_7b1(prompt11)
x = len(prompt11) # remove
output1 = output1[0][x:-4] # remove prompt1
output1_arrays = extract_questions(output1)
output1_arrays = get_top_sublists_and_removed_elements(output1_arrays)
output1_arrays = renumber_questions(output1_arrays)
output1 = renumber_and_merge_questions(output1_arrays)
prompt11 = prompt1 + '\n' + output1
score1 = calculate_selfBleu(output1_arrays)
score11 = get_scores(questions1[i], [output1])
score_sb = 1 - score1
score1 = 2 * score_sb * score11[0] / (score_sb + score11[0])
if score1 > threshold_d and check_answer_completeness(output1):
max_score1 = score1
max_output1 = output1
break
elif not check_answer_completeness(output1):
loop_prompt1 = "Your answer is not a complete sentence. Please generate a complete answer."
else:
loop_prompt1 = f"The score for problem decomposition is {score1}, which is lower than the threshold of {threshold_d}. This indicates that our decomposition results did not meet our requirements: comprehensiveness, diversity. Refine this sub-questions to meet our requirements.\nMain question:{questions1[i]}{decomposition_act}"
if max_score1 < score1 and score1 > 0:
max_score1 = score1
max_output1 = output1
prompt11 = prompt11 + '\n' + loop_prompt1
output1_arrays = extract_questions(max_output1)
merged_output2 = ""
merge_answer = ""
for j, output1_array in enumerate(output1_arrays, 1):
print("subquestion:", output1_array)
a_loop_key = 0
prompt_prefix1 = "Please answer the question\n"
prompt2 = f'Question: {output1_array}\n{prompt_prefix1}Answer:'
max_score = 0
max_output = ""
prompt22 = prompt2
while a_loop_key < 5:
a_loop_key += 1
threshold_a = 0.75
output2_array = output_7b2(prompt22)
x = len(prompt22) # remove
output2_array = output2_array[0][x:-4]
prompt22 = prompt2 + '\n' + output2_array
score2 = get_scores(output1_array, [output2_array])
if score2[0] >= threshold_a and check_answer_completeness(output2_array):
max_score = score2[0]
max_output = output2_array
break
elif not check_answer_completeness(output2_array):
loop_prompt2 = "Your answer is not a complete sentence. Please generate a complete answer."
else:
loop_prompt2 = f"The score for this question's response is {score2[0]}, which is lower than the threshold of {threshold_a}. This indicates that there is too much confusion regarding the answer to this question. Please refine the answer to improve the score."
if max_score < score2[0] :
max_score = score2[0]
max_output = output2_array
prompt22 = prompt22 + '\n' + loop_prompt2
print("max_score, output:", max_output)
subquestion = f"Q{j}:{output1_arrays[j-1]}\n"
subanswer = f"A{j}:{max_output}\n\n"
merged_output2 += subquestion
merged_output2 += subanswer
prompt3 = merge_goal + '\n' + merge_rule + '\n' + merge_example + '\n' + "Main question:" + questions1[i] + '\n' + merged_output2 + merge_act
m_loop_key = 0
max_score3 = 0
max_output3 = ""
prompt33 = prompt3
while m_loop_key < 5:
m_loop_key += 1
output3 = output_7b1(prompt33)
x = len(prompt33) # remove
output3 = output3[0][x:-4]
if not merged_output2:
max_score3 = -1024
max_output3 = output3
break
prompt33 = prompt3 + '\n' + output3
score3 = compute_bertscore([questions1[i]], [output3])
threshold_m = 0.75
if score3[0] >= threshold_m and check_answer_completeness(output3):
max_score3 = score3[0]
max_output3 = output3
break
elif not check_answer_completeness(output3):
loop_prompt3 = "Your answer is not a complete sentence. Please generate a complete answer."
else:
loop_prompt3 = f"The score for the final answer is {score3[0]}, which is lower than the threshold of {threshold_m}. This means the final answer does not meet our set of two rules. Please refine the final answer to improve its effectiveness score."
if max_score3 < score3[0]:
max_score3 = score3[0]
max_output3 = output3
prompt33 = prompt33 + '\n' + loop_prompt3
answers1.append(max_output3)
answers2.append(merged_output2)
i += 1
with open(f'results/llama/expertqa_answers{experiment + 1}(0.450.750.75).json', 'w') as file:
json.dump(answers1, file)
with open(f'results/llama/QAdatabase/expertqa_answers{experiment + 1}(0.450.750.75)_QAdatabase.json', 'w') as file:
json.dump(answers2, file)
print(f"llama expertqa 457575 experiment{experiment + 1}:")
rouge_score = calculate_rouge(answers1, references1)
rouge_results.append(rouge_score)
rouge1_scores = [result['rouge1'] for result in rouge_results]
rouge2_scores = [result['rouge2'] for result in rouge_results]
rougeL_scores = [result['rougeL'] for result in rouge_results]
rouge1_mean = np.mean(rouge1_scores)
rouge2_mean = np.mean(rouge2_scores)
rougeL_mean = np.mean(rougeL_scores)
rouge1_std = np.std(rouge1_scores)
rouge2_std = np.std(rouge2_scores)
rougeL_std = np.std(rougeL_scores)
print("Rouge-1 Mean:", rouge1_mean)
print("Rouge-1 Std Dev:", rouge1_std)
print("Rouge-2 Mean:", rouge2_mean)
print("Rouge-2 Std Dev:", rouge2_std)
print("Rouge-L Mean:", rougeL_mean)
print("Rouge-L Std Dev:", rougeL_std)