-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdataset.py
More file actions
46 lines (37 loc) · 1.28 KB
/
dataset.py
File metadata and controls
46 lines (37 loc) · 1.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import torch
from torch import Tensor, tensor
from typing import Dict, Union
class TextDataset(torch.utils.data.Dataset):
def __init__(
self,
text: str,
characters: int,
block_size: int,
train: bool=True
):
"""
Construct character level encding-decoding
"""
super().__init__()
self.text = text
self.characters = characters
self.block_size = block_size
self.train = train
self.stoi = {s:i for i,s in enumerate(self.characters)}
self.itos = {i:s for i,s in enumerate(self.characters)}
self.enc = lambda s: [self.stoi[c] for c in s]
self.dec = lambda l: ''.join([self.itos[i] for i in l])
self.data = tensor(self.enc(self.text), dtype=torch.long)
chunk_len = int(len(self.data) * 0.8)
if self.train:
self.data = self.data[:chunk_len]
else:
self.data = self.data[chunk_len:]
def __getitem__(self, idx):
# grab a chunk of (block_size + 1) characters from the data
chunk = self.data[idx:idx + self.block_size + 1]
x = chunk[:-1]
y = chunk[1:]
return x, y
def __len__(self) -> int:
return len(self.data) - self.block_size