-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_distilbert.py
More file actions
116 lines (93 loc) · 3.01 KB
/
train_distilbert.py
File metadata and controls
116 lines (93 loc) · 3.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import os
import torch
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from transformers import DistilBertTokenizerFast, DistilBertForSequenceClassification
from torch.optim import AdamW
from sklearn.model_selection import train_test_split
# =========================
# 1. LOAD DATA
# =========================
labels, texts = [], []
with open("data/spam.csv", encoding="latin-1") as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.split(";", 1)
if len(parts) == 2:
label, text = parts
if label in ["ham", "spam"]:
labels.append(0 if label == "ham" else 1)
texts.append(text)
df = pd.DataFrame({"text": texts, "label": labels})
print(f"Total data: {len(df)}")
# =========================
# 2. SPLIT DATA
# =========================
X_train, X_test, y_train, y_test = train_test_split(
df["text"],
df["label"],
test_size=0.2,
random_state=42,
stratify=df["label"]
)
# =========================
# 3. TOKENIZER
# =========================
tokenizer = DistilBertTokenizerFast.from_pretrained(
"distilbert-base-uncased"
)
class SpamDataset(Dataset):
def __init__(self, texts, labels):
self.encodings = tokenizer(
list(texts),
truncation=True,
padding=True,
max_length=128
)
self.labels = labels.values
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
item = {k: torch.tensor(v[idx]) for k, v in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item
train_dataset = SpamDataset(X_train, y_train)
train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
# =========================
# 4. MODEL
# =========================
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device:", device)
model = DistilBertForSequenceClassification.from_pretrained(
"distilbert-base-uncased",
num_labels=2
)
model.to(device)
optimizer = AdamW(model.parameters(), lr=2e-5)
# =========================
# 5. TRAINING
# =========================
EPOCHS = 3
for epoch in range(EPOCHS):
model.train()
total_loss = 0
for batch in train_loader:
optimizer.zero_grad()
batch = {k: v.to(device) for k, v in batch.items()}
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
total_loss += loss.item()
avg_loss = total_loss / len(train_loader)
print(f"Epoch {epoch+1}/{EPOCHS} | Loss: {avg_loss:.4f}")
# =========================
# 6. SAVE MODEL (PASTI ADA)
# =========================
SAVE_DIR = "model_distilbert"
os.makedirs(SAVE_DIR, exist_ok=True)
model.save_pretrained(SAVE_DIR)
tokenizer.save_pretrained(SAVE_DIR)
print(f"\n✅ MODEL DISTILBERT BERHASIL DISIMPAN DI: {SAVE_DIR}/")