-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
69 lines (55 loc) · 2.09 KB
/
train.py
File metadata and controls
69 lines (55 loc) · 2.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import pandas as pd
from transformers import T5Tokenizer, T5ForConditionalGeneration, Trainer, TrainingArguments
from datasets import load_dataset, Dataset
# Load the tokenizer and model (use T5-base)
model_name = "t5-small"
tokenizer = T5Tokenizer.from_pretrained('./extended-tokenizer')
model = T5ForConditionalGeneration.from_pretrained(model_name)
model.resize_token_embeddings(len(tokenizer))
train_df = pd.read_csv('train_data.csv')
val_df = pd.read_csv('val_data.csv')
train_df.dropna(inplace=True)
val_df.dropna(inplace=True)
# Convert pandas DataFrame to Hugging Face Dataset
train_dataset = Dataset.from_pandas(train_df)
val_dataset = Dataset.from_pandas(val_df)
# Tokenize the input and target texts
def preprocess_data(examples):
inputs = examples['input_text']
targets = examples['target_text']
model_inputs = tokenizer(inputs, max_length=128, truncation=True, padding='max_length')
# Tokenize the targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=128, truncation=True, padding='max_length')
model_inputs['labels'] = labels['input_ids']
return model_inputs
train_dataset = train_dataset.map(preprocess_data, batched=True)
val_dataset = val_dataset.map(preprocess_data, batched=True)
# Training arguments
training_args = TrainingArguments(
output_dir='./results',
evaluation_strategy='epoch',
learning_rate=1e-4,
per_device_train_batch_size=8,
per_device_eval_batch_size=8,
num_train_epochs=30,
weight_decay=0.01,
warmup_steps=500, # Add warmup steps
gradient_accumulation_steps=2, # Accumulate gradients if memory is an issue
logging_dir='./logs', # Directory for storing logs
logging_steps=200, # Log every 200 steps
save_steps=1000, # Save the model every 1000 steps
save_total_limit=5,
)
# Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
)
# Fine-tune the model
trainer.train()
# Save the fine-tuned model
model.save_pretrained('./fine-tuned-t5-base')
tokenizer.save_pretrained('./fine-tuned-t5-base')