Skip to content

Commit

Permalink
0.1.5
Browse files Browse the repository at this point in the history
5. Fix typos and bugs in `data` and `modules`.
6. Add properties `nlayers`, `input_size` for networks in `modules`.
  • Loading branch information
cainmagi committed Mar 14, 2021
1 parent df3a8ac commit 4b8cb65
Show file tree
Hide file tree
Showing 12 changed files with 198 additions and 47 deletions.
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,12 +104,14 @@ The compatibility test is shown as below. The checked item means this package pe

## Update reports

### 0.1.5 @ 3/9/2021
### 0.1.5 @ 3/14/2021

1. Add `DecoderNet` to our standard `module` protocol.
2. Fix some bugs of `data.h5py` and `data.preprocs`.
3. Make `draw.setFigure` enhanced by `contextlib`.
4. Add a title in `Readme.md`.
5. Fix typos and bugs in `data` and `modules`.
6. Add properties `nlayers`, `input_size` for networks in `modules`.

### 0.1.2 @ 2/27/2021

Expand Down
5 changes: 4 additions & 1 deletion mdnc/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,13 @@
################################################################
# Update reports:
# ---------------
# 0.1.5 @ 3/9/2021
# 0.1.5 @ 3/14/2021
# 1. Add DecoderNet to our standard module protocol.
# 2. Fix some bugs of data.h5py and data.preprocs.
# 3. Make draw.setFigure enhanced by contextlib.
# 4. Add a title in `Readme.md`.
# 5. Fix typos and bugs in `data` and `modules`.
# 6. Add properties `nlayers`, `input_size` for networks in `modules`.
# 0.1.2 @ 2/27/2021
# 1. Fix more feature problems in `contribs.torchsummary`.
# 2. Fix bugs and finish `data.preprocs`.
Expand Down
2 changes: 2 additions & 0 deletions mdnc/data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
################################################################
# Update reports:
# ---------------
# 0.1.5 @ 3/14/2021
# 1. Fix typos and bugs.
# 0.1.2 @ 2/27/2021
# 1. Finish preprocs.
# - Update the implementation of preprocs.ProcAbstract.
Expand Down
2 changes: 1 addition & 1 deletion mdnc/data/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ def test_1d(self):
plt.show()

def test_2d(self):
x = engine.preprocs.ProcNSTScaler(parent=engine.preprocs.ProcLifter(a=1.0, parent=engine.preprocs.ProcPad(pad_width=((0, 0), (10, -10), (-10, 10)), mode='constant', constant_values=0.0)))
x = engine.preprocs.ProcNSTScaler(dim=2, parent=engine.preprocs.ProcLifter(a=1.0, parent=engine.preprocs.ProcPad(pad_width=((0, 0), (10, -10), (-10, 10)), mode='constant', constant_values=0.0)))
with open(self.file_name, 'wb') as f:
pickle.dump(x, f)
with open(self.file_name, 'rb') as f:
Expand Down
3 changes: 1 addition & 2 deletions mdnc/data/h5py.py
Original file line number Diff line number Diff line change
Expand Up @@ -783,8 +783,7 @@ def open(self, file_name, enable_read=None):
if self.__in_context:
raise RuntimeError('data.h5py: Should not open a file when the saver is managing a context, because there is already an opened file. Try to exit the context or create a new different saver.')
file_name, file_ext = os.path.splitext(file_name)
if file_ext != '.h5':
file_name += '.h5'
file_name += '.h5'
self.close()
if enable_read is None:
enable_read = self.enable_read
Expand Down
14 changes: 8 additions & 6 deletions mdnc/data/preprocs.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,8 +404,8 @@ def __init__(self, shift=None, scale=None, axis=-1, inds=None, parent=None):
self.axis = axis

def preprocess(self, x):
xmean = np.mean(x, axis=self.axis) if self.shift is None else self.shift
xscale = np.amax(np.abs(x - xmean), axis=self.axis) if self.scale is None else self.scale
xmean = np.mean(x, axis=self.axis, keepdims=True) if self.shift is None else self.shift
xscale = np.amax(np.abs(x - xmean), axis=self.axis, keepdims=True) if self.scale is None else self.scale
self.set_mem('xmean', xmean)
self.set_mem('xscale', xscale)
return (x - xmean) / xscale
Expand All @@ -426,7 +426,7 @@ class ProcNSTScaler(ProcAbstract):
https://stackoverflow.com/a/49317610
'''

def __init__(self, dim=2, kernel_length=9, epsilon=1e-6, inds=None, parent=None):
def __init__(self, dim, kernel_length=9, epsilon=1e-6, inds=None, parent=None):
'''Initialization.
Arguments:
dim: the dimension of the input data (to be normalized).
Expand All @@ -439,6 +439,8 @@ def __init__(self, dim=2, kernel_length=9, epsilon=1e-6, inds=None, parent=None)
be used as the parent of the current instance.
'''
super().__init__(inds=inds, parent=parent)
if dim not in (1, 2, 3):
raise ValueError('data.preprocs: The argument "dim" requires to be 1, 2, or 3.')
self.__dim = dim
self.__kernel_length = kernel_length
self.epsilon = epsilon
Expand Down Expand Up @@ -888,11 +890,11 @@ def __split_pad_width(pad_width):
return tuple(pad_width_), tuple(crop_width)
else:
raise ValueError('data.preprocs: the crop arguments could not get separated from the pad arguments. The given arguments "pad_width" may be not valid.')

@property
def pad_width(self):
return getattr
return object.__getattribute__(self, '_ProcPad__pad_width_')

@pad_width.setter
def pad_width(self, value):
self.__pad_width, self.__crop_width = self.__split_pad_width(value)
Expand Down
12 changes: 12 additions & 0 deletions mdnc/data/webtools.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,6 +332,18 @@ def init_set_list(file_name='web-data'):
'dataset_file_name_01.txt',
'dataset_file_name_02.txt'
]
},
{
'tag': 'test',
'asset': 'test-datasets-2.tar.xz',
'items': [
'test_data_h5converter.h5',
'test_data_h5cparser.h5',
'test_data_h5cparser_seq.h5',
'test_data_h5gparser.h5',
'test_data_h5seqconverter1.h5',
'test_data_h5seqconverter2.h5'
]
}
],
'user': 'cainmagi',
Expand Down
3 changes: 3 additions & 0 deletions mdnc/modules/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@
# 0.1.5 @ 3/2/2021
# 1. Add DecoderNet to conv, resnet.
# 2. Rename ConvNet by EncoderNet in conv, resnet.
# 3. Fix typos and bugs.
# 4. Add `nlayers` for all networks. Add `input_size` for
# decoders.
# 0.1.0 @ 2/26/2021
# 1. Create sub-packages: conv, resnet.
################################################################
Expand Down
2 changes: 2 additions & 0 deletions mdnc/modules/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ def test_networks(self):
print('modules.modules: Test {0}d networks.'.format(order))
for net in self.networks:
test_module = net(order=order, in_planes=input_size[0])
print('{0} with {1} layers along its depth.'.format(type(test_module).__name__, test_module.nlayers))
torchsummary.summary(test_module, input_size=input_size, device='cpu')
del test_module

Expand All @@ -77,6 +78,7 @@ def test_decodernets(self):
print('modules.modules: Test {0}d decoders.'.format(order))
for net in self.net_decs:
test_module = net(order=order, in_length=2, out_size=out_size[1:])
print('{0} with {1} layers along its depth.'.format(type(test_module).__name__, test_module.nlayers))
torchsummary.summary(test_module, input_size=(2, ), device='cpu')
del test_module

Expand Down
79 changes: 67 additions & 12 deletions mdnc/modules/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,20 @@
# -*- coding: UTF-8 -*-
'''
################################################################
# Modules - 1D convolutional network
# Modules - convolutional network
# @ Modern Deep Network Toolkits for pyTorch
# Yuchen Jin @ cainmagi@gmail.com
# Requirements: (Pay attention to version)
# python 3.5+
# pyTorch 1.0.0+
# This module is the definition of the 1D convolutional network.
# This module is the definition of the convolutional network.
# The network could be initialized here and used for training
# and processing.
################################################################
'''

import functools

import torch
import torch.nn as nn

Expand Down Expand Up @@ -71,7 +73,7 @@ def __init__(self, order, in_planes, out_planes, kernel_size=3, stride=1, paddin
'''
super().__init__()
ConvNd = get_convnd(order=order)
is_stride = check_is_stride(stride)
is_stride = check_is_stride(stride, output_size=output_size, scaler=scaler)
seq = []
if normalizer == 'null':
if (not is_stride) or scaler == 'down':
Expand Down Expand Up @@ -183,7 +185,7 @@ def __init__(self, order, in_planes, out_planes, hidden_planes=None,
_ConvModernNd(order, (in_planes + ex_planes) if i == 0 else hidden_planes, hidden_planes,
kernel_size=kernel_size, padding=padding, stride=1, scaler='down')
)
self.conv_scale = _ConvModernNd(order, hidden_planes, out_planes,
self.conv_scale = _ConvModernNd(order, hidden_planes if stack_level > 1 else (in_planes + ex_planes), out_planes,
kernel_size=kernel_size, padding=padding, stride=stride, scaler=scaler)

@staticmethod
Expand Down Expand Up @@ -235,13 +237,14 @@ def __init__(self, order, channel, layers, kernel_size=3, in_planes=1, out_plane
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
Arguments (optional):
kernel_size: the kernel size of each block.
in_planes: the channel number of the input data.
out_planes: the channel number of the output data.
kernel_size: the kernel size of each block.
'''
super().__init__()
if len(layers) < 2:
raise ValueError('modules.conv: The argument "layers" should contain at least 2 values, but provide "{0}"'.format(layers))
self.__layers = layers
ConvNd = get_convnd(order=order)

ksize_e, psize_e, _ = cal_kernel_padding(kernel_size, ksize_plus=2)
Expand Down Expand Up @@ -273,7 +276,16 @@ def __init__(self, order, channel, layers, kernel_size=3, in_planes=1, out_plane
self.conv_up_list.append(
_BlockConvStkNd(order, channel, channel, hidden_planes=channel, kernel_size=ksize, padding=psize,
stride=1, stack_level=layers[0], ex_planes=channel, scaler='down'))
self.conv_final = ConvNd(channel, in_planes, kernel_size=ksize_e, stride=1, padding=psize_e, bias=True)
self.conv_final = ConvNd(channel, out_planes, kernel_size=ksize_e, stride=1, padding=psize_e, bias=True)

@property
def nlayers(self):
'''Return number of convolutional layers along the depth.
'''
if len(self.__layers) == 0:
return 0
n_layers = functools.reduce(lambda x, y: x + 2 * y, self.__layers[:-1], self.__layers[-1]) + 2
return n_layers

def forward(self, x):
x = self.conv_first(x)
Expand Down Expand Up @@ -314,6 +326,7 @@ def __init__(self, order, channel, layers, kernel_size=3, in_planes=1, out_plane
super().__init__()
if len(layers) < 2:
raise ValueError('modules.conv: The argument "layers" should contain at least 2 values, but provide "{0}"'.format(layers))
self.__layers = layers
ConvNd = get_convnd(order=order)

ksize_e, psize_e, _ = cal_kernel_padding(kernel_size, ksize_plus=2)
Expand Down Expand Up @@ -345,7 +358,16 @@ def __init__(self, order, channel, layers, kernel_size=3, in_planes=1, out_plane
self.conv_up_list.append(
_BlockConvStkNd(order, channel, channel, hidden_planes=channel, kernel_size=ksize, padding=psize,
stride=1, stack_level=layers[0], scaler='down'))
self.conv_final = ConvNd(channel, in_planes, kernel_size=ksize_e, stride=1, padding=psize_e, bias=True)
self.conv_final = ConvNd(channel, out_planes, kernel_size=ksize_e, stride=1, padding=psize_e, bias=True)

@property
def nlayers(self):
'''Return number of convolutional layers along the depth.
'''
if len(self.__layers) == 0:
return 0
n_layers = functools.reduce(lambda x, y: x + 2 * y, self.__layers[:-1], self.__layers[-1]) + 2
return n_layers

@staticmethod
def cropping(x, x_ref_s):
Expand Down Expand Up @@ -399,6 +421,7 @@ def __init__(self, order, channel, layers, kernel_size=3, in_planes=1, out_lengt
super().__init__()
if len(layers) < 2:
raise ValueError('modules.conv: The argument "layers" should contain at least 2 values, but provide "{0}"'.format(layers))
self.__layers = layers
ConvNd = get_convnd(order=order)

ksize_e, psize_e, _ = cal_kernel_padding(kernel_size, ksize_plus=2)
Expand All @@ -422,12 +445,23 @@ def __init__(self, order, channel, layers, kernel_size=3, in_planes=1, out_lengt
if self.is_out_vector:
self.fc = nn.Linear(channel, out_length, bias=True)

@property
def nlayers(self):
'''Return number of convolutional layers along the depth.
'''
if len(self.__layers) == 0:
return 0
n_layers = functools.reduce(lambda x, y: x + y, self.__layers, 0) + 2
return n_layers

def forward(self, x):
for layer in self.netbody:
x = layer(x)
if self.is_out_vector:
x = torch.flatten(x, 1)
return self.fc(x)
else:
return x


class _DecoderNetNd(nn.Module):
Expand Down Expand Up @@ -462,12 +496,17 @@ def __init__(self, order, channel, layers, out_size, kernel_size=3, in_length=2,
super().__init__()
if len(layers) < 2:
raise ValueError('modules.conv: The argument "layers" should contain at least 2 values, but provide "{0}"'.format(layers))
self.__layers = layers
self.__in_length = in_length
ConvNd = get_convnd(order=order)
ksize_e, psize_e, _ = cal_kernel_padding(kernel_size, ksize_plus=2)
ksize, psize, stride = cal_kernel_padding(kernel_size)
self.__order = order
if isinstance(out_size, int):
out_size = (out_size, ) * order
self.shapes = cal_scaled_shapes(out_size, level=len(layers), stride=stride)
channels = tuple(map(lambda n: channel * (2 ** n), range(len(layers) - 1, -1, -1)))
self.__in_channel = channels[0]

# Require to convert the vector into channels
self.is_in_vector = (in_length is not None and in_length > 0)
Expand All @@ -489,6 +528,22 @@ def __init__(self, order, channel, layers, out_size, kernel_size=3, in_length=2,
self.netbody = netbody
self.conv_last = ConvNd(channels[-1], out_planes, kernel_size=ksize_e, stride=1, padding=psize_e, bias=True)

@property
def input_size(self):
if self.is_in_vector:
return (self.__in_length, )
else:
return (self.__in_channel, *self.shapes[-1])

@property
def nlayers(self):
'''Return number of convolutional layers along the depth.
'''
if len(self.__layers) == 0:
return 0
n_layers = functools.reduce(lambda x, y: x + y, self.__layers, 0) + (3 if self.is_in_vector else 2)
return n_layers

@staticmethod
def cropping(x, x_ref_s):
x_size = x.shape[2:]
Expand Down Expand Up @@ -672,9 +727,9 @@ def __init__(self, channel, layers, kernel_size=3, in_planes=1, out_planes=1):
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
Arguments (optional):
kernel_size: the kernel size of each block.
in_planes: the channel number of the input data.
out_planes: the channel number of the output data.
kernel_size: the kernel size of each block.
'''
super().__init__(1, channel=channel, layers=layers, kernel_size=kernel_size,
in_planes=in_planes, out_planes=out_planes)
Expand All @@ -698,9 +753,9 @@ def __init__(self, channel, layers, kernel_size=3, in_planes=1, out_planes=1):
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
Arguments (optional):
kernel_size: the kernel size of each block.
in_planes: the channel number of the input data.
out_planes: the channel number of the output data.
kernel_size: the kernel size of each block.
'''
super().__init__(order=2, channel=channel, layers=layers, kernel_size=kernel_size,
in_planes=in_planes, out_planes=out_planes)
Expand All @@ -724,9 +779,9 @@ def __init__(self, channel, layers, kernel_size=3, in_planes=1, out_planes=1):
of convolutional layers of a stage. The stage numer, i.e.
the depth of the network is the length of this list.
Arguments (optional):
kernel_size: the kernel size of each block.
in_planes: the channel number of the input data.
out_planes: the channel number of the output data.
kernel_size: the kernel size of each block.
'''
super().__init__(order=3, channel=channel, layers=layers, kernel_size=kernel_size,
in_planes=in_planes, out_planes=out_planes)
Expand Down Expand Up @@ -1267,7 +1322,7 @@ def decnet16(out_size, order=2, **kwargs):
'''Constructs a conv.DecoderNet-16 model.
Configurations:
Network depth: 5
Stage details: [2, 2, 3, 3, 3]
Stage details: [3, 3, 2, 2, 2]
First channel number: 64
Arguments:
out_size: the output shape of the network.
Expand All @@ -1276,7 +1331,7 @@ def decnet16(out_size, order=2, **kwargs):
Other Arguments (see mdnc.modules.conv.DecoderNet*d):
in_length, out_planes, kernel_size
'''
model = __get_decnet_nd(order)(64, [2, 2, 3, 3, 3], out_size=out_size, **kwargs)
model = __get_decnet_nd(order)(64, [3, 3, 2, 2, 2], out_size=out_size, **kwargs)
return model


Expand Down
Loading

0 comments on commit 4b8cb65

Please sign in to comment.