-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathinference_7b.py
More file actions
183 lines (151 loc) · 7.27 KB
/
inference_7b.py
File metadata and controls
183 lines (151 loc) · 7.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
import sys
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "7"
import fire
import torch
# from peft import PeftModel
import transformers
# import gradio as gr
import json
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # (保证程序cuda序号与实际cuda序号对应)
os.environ['CUDA_VISIBLE_DEVICES'] = "0,1" # (代表仅使用第0,1号GPU)
assert (
"LlamaTokenizer" in transformers._import_structure["models.llama"]
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
try:
if torch.backends.mps.is_available():
device = "mps"
except:
pass
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/converted_models/llama2-chat-7b" # "/path/to/WizardLM13B",
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Lunyu-LLM"
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/converted_models/llama-1-13b"
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/converted_models/llama-2-13b-chat"
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/trained_models/RL_evol_llama1_7b_ms/checkpoint-240"
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/converted_models/llama2-chat-7b"
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/converted_models/llama-1-7b"
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/trained_models/llama2_7b_gpt_ans_25k_25dot25k_40k_40dot3k/checkpoint-30"
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/trained_models/llama2_7b_gpt_ans_25k_25dot25k/checkpoint-30"
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/trained_models/RL_evol_llama2_chat_7B_new_wizard13b_lr_re/checkpoint-30"
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/trained_models/llama1_7b_gpt_ans_25k_25dot25k_40k_40dot3k/checkpoint-200"
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/trained_models/llama1_7b_gpt_ans_25k_25dot25k/checkpoint-200"
# base_model = "/home/gushangding/MyCode_vt/Lunyu_model/Llama-X/trained_models/RL_evol_llama1_7b_ms/checkpoint-400"
base_model = "/home/gushangding/MyCode_vt/Lunyu_model/wizardlm/wizardLM-7B-HF/wizardLM-7B-HF"
# assert base_model, (
# "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
# )
tokenizer = LlamaTokenizer.from_pretrained(base_model)
load_8bit = False
if device == "cuda":
model = LlamaForCausalLM.from_pretrained(
base_model,
load_in_8bit=load_8bit,
torch_dtype=torch.float16,
device_map="auto",
)
elif device == "mps":
model = LlamaForCausalLM.from_pretrained(
base_model,
device_map={"": device},
torch_dtype=torch.float16,
)
# unwind broken decapoda-research config
model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
model.config.bos_token_id = 1
model.config.eos_token_id = 2
if not load_8bit:
model.half() # seems to fix bugs for some users.
if torch.__version__ >= "2" and sys.platform != "win32":
model = torch.compile(model)
# load_8bit: bool = False,
# input_data_path = "/path/to/WizardLM_testset.jsonl",
# output_data_path = "/path/to/WizardLM_testset_output.jsonl",
class Call_model():
model.eval()
def evaluate(self, instruction):
# print("instruction---------:", instruction)
# print("X------------:", X)
# {instruction}\n\n### Response:
final_output = self.inference(instruction + "\n\n### Response:")
# print("final_output-----------------:", final_output)
return final_output
def inference(self,
batch_data,
input=None,
temperature=1,
top_p=0.95,
top_k=40,
num_beams=1,
max_new_tokens=4096,
**kwargs,
):
prompts = f"""A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {batch_data} ASSISTANT:"""
# prompts = batch_data
inputs = tokenizer(prompts, return_tensors="pt")
input_ids = inputs["input_ids"].to(device)
generation_config = GenerationConfig(
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
**kwargs,
)
with torch.no_grad():
generation_output = model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=max_new_tokens,
)
s = generation_output.sequences
output = tokenizer.batch_decode(s, skip_special_tokens=True)
output = output[0].split("ASSISTANT:")[1].strip()
# print("output------------------------:", output)
return output
# input_data = open(input_data_path, mode='r', encoding='utf-8')
# output_data = open(output_data_path, mode='w', encoding='utf-8')
# instruction = ""
# final_output = inference(instruction)
# print("final_output-----------------:", final_output)
# for num, line in enumerate(input_data.readlines()):
# print(num)
# print(line)
# one_data = json.loads(line)
# id = one_data["idx"]
# Category = one_data["idx"]
# instruction = one_data["Instruction"]
# final_output = inference(instruction)
# new_data = {
# "id": id,
# "instruction": instruction,
# "wizardlm-13b": final_output
# }
# output_data.write(json.dumps(new_data) + '\n')
if __name__ == "__main__":
# fire.Fire(main)
# prompt = "What are the names of some famous actors that started their careers on Broadway?" #"How are you?"
# prompt = input("Please input:")
prompt = "Given a set of shoe size, add up the total size: Size 4, Size 7, Size 9"
# "Step by step, how would you solve this equation? 3x + 6 = 24"
# "Suppose I have 12 eggs. I drop 2 and eat 5. How many eggs do I have left?"
# "Step by step, how would you solve this equation? (7x + 7) + (3x + 4) = 15"
# "Find the 13th root of 1000"
#"Identify all prime numbers between 50 and 60."
# "What is the square root of 5929?"
# "Identify three prime numbers between 1 and 10."
# "Elaborate on the sequential methodology you would employ to isolate the variable within this intricate second-degree polynomial equation."
# "Given a set of shoe size, add up the total size: Size 4, Size 7, Size 9"
# "Given that f(x) = 5x^3 - 2x + 3, find the value of f(2)"
#"Step by step, how would you solve this equation? (7x + 7)/(3x + 4) = 5" #"IDescribe a task that takes place at a dinner table." # "Write a simple guide for uploading the bibliography database on Overleaf."
prompt = str(prompt)
model_evaluate = Call_model()
prompt_state = model_evaluate.evaluate(prompt)
# print("Output--------------:", prompt_state)
print(prompt_state)