-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstudent_1B_ft.py
More file actions
135 lines (118 loc) · 3.91 KB
/
student_1B_ft.py
File metadata and controls
135 lines (118 loc) · 3.91 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import sys
import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
from peft import get_peft_model, LoraConfig, TaskType
from datasets import load_dataset
import wandb
# Login to Weights & Biases
wandb.login(key="087368339ed8baba87cb31a426e0c71d9c971779")
os.environ["WANDB_PROJECT"] = "ell884-proj"
from huggingface_hub import login
login("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")
# import sys
# sys.path = [p for p in sys.path if "MonteCLoRA/peft" not in p]
model_name = "/home/models/Llama-3.2-1B" # Replace with your desired HF model
# Load tokenizer and model in full precision
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
token='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'
)
tokenizer.chat_template = None
# Apply LoRA
peft_config = LoraConfig(
r=8,
lora_alpha=16,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
lora_dropout=0.1,
bias="none",
task_type=TaskType.CAUSAL_LM,
)
model = get_peft_model(model, peft_config)
tokenizer.pad_token = tokenizer.eos_token
# Prompt formatting
EOS_TOKEN = tokenizer.eos_token
def formatting_prompts_func(example):
instruction = example["instruction"]
input_text = example.get("input", "")
output_text = example["output"]
if input_text:
prompt = f"""### Instruction:
{instruction}
### Input:
{input_text}
### Response:
{output_text}""" + EOS_TOKEN
else:
prompt = f"""### Instruction:
{instruction}
### Response:
{output_text}""" + EOS_TOKEN
# prompt = example["text"]
tokens = tokenizer(prompt, padding="max_length", truncation=True, max_length=512)
tokens["labels"] = tokens["input_ids"].copy() # so model can compute loss
return tokens
# return {"text": prompt}
# return prompt
# Load and prepare dataset
dataset = load_dataset("json", data_files="instruction_cnndm.jsonl")["train"]
dataset = dataset.shuffle(seed=120).select(range(int(0.15 * len(dataset)))).train_test_split(test_size=0.1, seed=121)
print(dataset)
train_data = dataset["train"].map(formatting_prompts_func)
val_data = dataset["test"].map(formatting_prompts_func)
print(train_data)
print(train_data[0])
# Training
trainer = Trainer(
model=model,
# processing_class=tokenizer,
# train_dataset=train_data,
# eval_dataset=val_data,
# dataset_text_field="text",
# max_seq_length=1024,
tokenizer=tokenizer,
train_dataset=train_data,
eval_dataset=val_data,
# dataset_text_field="text",
# formatting_func=lambda x: x['text'],
args=TrainingArguments(
# dataset_num_proc=4,
# packing=False,
label_names = ["labels"],
per_device_train_batch_size=2,
gradient_accumulation_steps=4,
warmup_steps=50,
num_train_epochs=4,
learning_rate=1e-4,
fp16=torch.cuda.is_available(),
logging_steps=50,
eval_strategy='steps',
eval_steps=500,
save_total_limit=5,
save_strategy='steps',
save_steps=500,
load_best_model_at_end=True,
metric_for_best_model='eval_loss',
greater_is_better=False,
optim="adamw_torch_fused",
weight_decay=0.01,
lr_scheduler_type="linear",
seed=3407,
output_dir="outputs",
report_to="wandb",
run_name='student-1B-cnn-r8'
),
)
# GPU info
gpu_stats = torch.cuda.get_device_properties(0)
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
print(f"{start_gpu_memory} GB of memory reserved.")
# Train and save
trainer_stats = trainer.train()
model.save_pretrained("student_cnn_r8")
tokenizer.save_pretrained("student_cnn_r8")