forked from mrzhu-cool/pix2pix-pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
dataset.py
54 lines (42 loc) · 1.87 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
from os import listdir
from os.path import join
import random
from PIL import Image
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from utils import is_image_file, load_img
class DatasetFromFolder(data.Dataset):
def __init__(self, image_dir, direction):
super(DatasetFromFolder, self).__init__()
self.direction = direction
self.a_path = join(image_dir, "a")
self.b_path = join(image_dir, "b")
self.image_filenames = [x for x in listdir(self.a_path) if is_image_file(x)]
transform_list = [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
self.transform = transforms.Compose(transform_list)
def __getitem__(self, index):
a = Image.open(join(self.a_path, self.image_filenames[index])).convert('RGB')
b = Image.open(join(self.b_path, self.image_filenames[index])).convert('RGB')
a = a.resize((286, 286), Image.BICUBIC)
b = b.resize((286, 286), Image.BICUBIC)
a = transforms.ToTensor()(a)
b = transforms.ToTensor()(b)
w_offset = random.randint(0, max(0, 286 - 256 - 1))
h_offset = random.randint(0, max(0, 286 - 256 - 1))
a = a[:, h_offset:h_offset + 256, w_offset:w_offset + 256]
b = b[:, h_offset:h_offset + 256, w_offset:w_offset + 256]
a = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(a)
b = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(b)
if random.random() < 0.5:
idx = [i for i in range(a.size(2) - 1, -1, -1)]
idx = torch.LongTensor(idx)
a = a.index_select(2, idx)
b = b.index_select(2, idx)
if self.direction == "a2b":
return a, b
else:
return b, a
def __len__(self):
return len(self.image_filenames)