-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsonar_loader.py
More file actions
60 lines (43 loc) · 1.79 KB
/
sonar_loader.py
File metadata and controls
60 lines (43 loc) · 1.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import os
import numpy as np
import torch
import torch.utils.data
from PIL import Image
import natsort
import cv2
from torchvision.transforms import transforms
class sonarDataset(torch.utils.data.Dataset):
def __init__(self, root, classes, transform=None):
# dataset path
self.root = root
self.CLASSES = classes
self.transform = transform
self.imgs = list(natsort.natsorted(os.listdir(os.path.join(root, "Images"))))
self.masks = list(natsort.natsorted(os.listdir(os.path.join(root, "Masks"))))
self.class_values = [i for i in range(0,len(self.CLASSES))]
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
# load images ad masks
img = cv2.imread(os.path.join(self.root, "Images", self.imgs[idx]), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (320,480))
mask = cv2.imread(os.path.join(self.root, "Masks", self.masks[idx]), cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (320,480))
img = np.array(img) / 255.
img = np.expand_dims(img, axis=0).astype(np.float32)
mask = torch.from_numpy(mask).long()
sample = {'image': img, 'mask':mask}
if self.transform:
sample = self.transform(sample)
return sample
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, mask = sample['image'], sample['mask']
# standard scaling would be probably better then dividing by 255 (subtract mean and divide by std of the dataset)
image = np.array(image) / 255.
print("To tensor")
sample = {'image': torch.from_numpy(image).permute(2, 0, 1).float(),
'mask': torch.from_numpy(mask).long(),
}
return sample