-
Notifications
You must be signed in to change notification settings - Fork 11
/
Copy pathdatasets.py
81 lines (67 loc) · 2.56 KB
/
datasets.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import os
import random
import torch
import torch.nn.functional as F
from torch.utils import data
import numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_float
from scipy.io import loadmat
from utils import random_crop
class UniformTrainDataset(data.Dataset):
def __init__(self, root, datasize, ps, train=False, transform=False):
folder = 'tr' if train else 'va'
datapath = os.path.join(root, 'data_' + folder + '_%05d_ps_%03d.pt' % (datasize, ps))
self.data = torch.load(datapath)
self.transform = transform
self.train = train
def __getitem__(self, index):
if self.train:
x = self.data['x'][index]
if self.transform:
# lr flip
if np.random.rand(1) < 0.5:
x = x.flip(-1)
# ud flip
if np.random.rand(1) < 0.5:
x = x.flip(-2)
# rotation
if np.random.rand(1) < 0.5:
x = torch.rot90(x, 1, [-2, -1])
# random crop
ps = min(x.shape[-1], x.shape[-2]) - 10
x = random_crop(x, ps)
idx_ke = np.random.randint(len(self.data['k']))
k = self.data['k'][idx_ke]
d = self.data['d'][idx_ke]
hks = k.shape[-1] // 2
padding = (hks, hks, hks, hks)
y = F.conv2d(F.pad(x, padding, 'reflect'), k.unsqueeze(0)).squeeze(0)
x = x.squeeze(0)
return x, y, k, d
else:
x = self.data['x'][index]
k = self.data['k'][index]
d = self.data['d'][index]
hks = k.shape[-1] // 2
padding = (hks, hks, hks, hks)
y = F.conv2d(F.pad(x, padding, 'reflect'), k.unsqueeze(0)).squeeze(0)
x = x.squeeze(0)
return x, y, k, d
def __len__(self):
return len(self.data['x'])
class NonUniformTrainDataset(data.Dataset):
def __init__(self, root, datasize, ps, train=False, transform=False):
folder = 'tr' if train else 'va'
datapath = os.path.join(root, 'data_' + folder + '_%05d_ps_%03d.pt' % (datasize, ps))
self.data = torch.load(datapath)
self.transform = transform
self.train = train
def __getitem__(self, index):
x = self.data['img'][index].unsqueeze(0)
y = self.data['imblur'][index].unsqueeze(0)
mag = self.data['mag'][index]
ori = self.data['ori'][index]
return x, y, mag, ori
def __len__(self):
return len(self.data['img'])