-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathdata_dataset.py
172 lines (151 loc) · 6.98 KB
/
data_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
# -*- coding: utf-8 -*-
"""
************************************************************************
Copyright 2020 Institute of Theoretical and Applied Informatics,
Polish Academy of Sciences (ITAI PAS) https://www.iitis.pl
author: K. Książek, P.Głomb, M. Romaszewski
The code in this file is based on the code from library: https://github.com/nshaud/DeepHyperX
for paper
N. Audebert, B. Le Saux and S. Lefevre, "Deep Learning for Classification of Hyperspectral Data: A Comparative Review,"
in IEEE Geoscience and Remote Sensing Magazine, vol. 7, no. 2, pp. 159-173, June 2019.
The code is used for RESEARCH AND NON COMMERCIAL PURPOSES under the licence:
https://github.com/nshaud/DeepHyperX/blob/master/License
Therefore, the original authors license is used for the code in this file.
************************************************************************
Code for experiments in the paper by
K. Książek, M. Romaszewski, P. Głomb, B. Grabowski, M. Cholewa
`Blood Stains Classification with Hyperspectral
Imaging and Deep Neural Networks'
This file contains the PyTorch dataset for hyperspectral images and
related helpers.
"""
import spectral
import numpy as np
import torch
import torch.utils
import torch.utils.data
import os
from tqdm import tqdm
# ----------------------------------------------------------------------------
class TqdmUpTo(tqdm):
"""Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
def update_to(self, b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # will also set self.n = b * bsize
# ----------------------------------------------------------------------------
class HyperX(torch.utils.data.Dataset):
""" Generic class for a hyperspectral scene """
def __init__(self, data, gt, **hyperparams):
"""
Args:
data: 3D hyperspectral image
gt: 2D array of labels
patch_size: int, size of the spatial neighbourhood
center_pixel: bool, set to True to consider only the label of the
center pixel
data_augmentation: bool, set to True to perform random flips
supervision: 'full' or 'semi' supervised algorithms
"""
super(HyperX, self).__init__()
self.data = data
self.label = gt
self.name = hyperparams['dataset']
self.patch_size = hyperparams['patch_size']
self.ignored_labels = set(hyperparams['ignored_labels'])
self.flip_augmentation = hyperparams['flip_augmentation']
self.radiation_augmentation = hyperparams['radiation_augmentation']
self.mixture_augmentation = hyperparams['mixture_augmentation']
self.center_pixel = hyperparams['center_pixel']
supervision = hyperparams['supervision']
# Fully supervised : use all pixels with label not ignored
if supervision == 'full':
mask = np.ones_like(gt)
for l in self.ignored_labels:
mask[gt == l] = 0
# Semi-supervised : use all pixels, except padding
elif supervision == 'semi':
mask = np.ones_like(gt)
mask = self.extend_border(mask)
x_pos, y_pos = np.nonzero(mask)
p = self.patch_size // 2
self.indices = np.array([(x, y) for x, y in zip(x_pos, y_pos) if x >= p and x < data.shape[0] - p and y >= p and y < data.shape[1] - p])
self.labels = [self.label[x, y] for x, y in self.indices]
np.random.shuffle(self.indices)
def extend_border(self, mask, border_value=255, ignored_value=0):
p = self.patch_size // 2
border_indices = np.where(self.label == border_value)
for x_pos, y_pos in zip(*border_indices):
x_min = max(0, x_pos-p)
x_max = x_pos+p+1
y_min = max(0, y_pos-p)
y_max = y_pos+p+1
mask[x_min:x_max, y_min:y_max] = ignored_value
return mask
@staticmethod
def flip(*arrays):
horizontal = np.random.random() > 0.5
vertical = np.random.random() > 0.5
if horizontal:
arrays = [np.fliplr(arr) for arr in arrays]
if vertical:
arrays = [np.flipud(arr) for arr in arrays]
return arrays
@staticmethod
def radiation_noise(data, alpha_range=(0.9, 1.1), beta=1/25):
alpha = np.random.uniform(*alpha_range)
noise = np.random.normal(loc=0., scale=1.0, size=data.shape)
return alpha * data + beta * noise
def mixture_noise(self, data, label, beta=1/25):
alpha1, alpha2 = np.random.uniform(0.01, 1., size=2)
noise = np.random.normal(loc=0., scale=1.0, size=data.shape)
data2 = np.zeros_like(data)
for idx, value in np.ndenumerate(label):
if value not in self.ignored_labels:
l_indices = np.nonzero(self.labels == value)[0]
l_indice = np.random.choice(l_indices)
assert(self.labels[l_indice] == value)
x, y = self.indices[l_indice]
data2[idx] = self.data[x, y]
return (alpha1 * data + alpha2 * data2) / (alpha1 + alpha2) + beta * noise
def __len__(self):
return len(self.indices)
def __getitem__(self, i):
x, y = self.indices[i]
x1, y1 = x - self.patch_size // 2, y - self.patch_size // 2
x2, y2 = x1 + self.patch_size, y1 + self.patch_size
data = self.data[x1:x2, y1:y2]
label = self.label[x1:x2, y1:y2]
if self.flip_augmentation and self.patch_size > 1:
# Perform data augmentation (only on 2D patches)
data, label = self.flip(data, label)
if self.radiation_augmentation and np.random.random() < 0.1:
data = self.radiation_noise(data)
if self.mixture_augmentation and np.random.random() < 0.2:
data = self.mixture_noise(data, label)
# Copy the data into numpy arrays (PyTorch doesn't like numpy views)
data = np.asarray(np.copy(data).transpose((2, 0, 1)), dtype='float32')
label = np.asarray(np.copy(label), dtype='int64')
# Load the data into PyTorch tensors
data = torch.from_numpy(data)
label = torch.from_numpy(label)
# Extract the center label if needed
if self.center_pixel and self.patch_size > 1:
label = label[self.patch_size // 2, self.patch_size // 2]
# Remove unused dimensions when we work with invidual spectrums
elif self.patch_size == 1:
data = data[:, 0, 0]
label = label[0, 0]
# Add a fourth dimension for 3D CNN
if self.patch_size > 1:
# Make 4D data ((Batch x) Planes x Channels x Width x Height)
data = data.unsqueeze(0)
return data, label