Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
cabooster authored Mar 14, 2022
1 parent 7de006b commit e65f1cf
Show file tree
Hide file tree
Showing 17 changed files with 3,778 additions and 0 deletions.
75 changes: 75 additions & 0 deletions DeepCAD_RT_pytorch/convert_pth_to_onnx.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import os
import torch
import torch.nn as nn
import argparse
import time
from deepcad.network import Network_3D_Unet
#############################################################################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--GPU', type=str, default='0', help="the index of GPU you will use for computation")
parser.add_argument('--pth_path', type=str, default='pth', help="pth file root path")
parser.add_argument('--denoise_model', type=str, default='calcium-mouse-neuron-full_202201111604_200_40', help='A folder containing models to be tested')
parser.add_argument('--patch_x', type=int, default=200, help="the width of 3D patches (patch size in x)")
parser.add_argument('--patch_y', type=int, default=200, help="the width of 3D patches (patch size in y)")
parser.add_argument('--patch_t', type=int, default=40, help="the width of 3D patches (patch size in t)")

opt = parser.parse_args()
opt.ngpu=str(opt.GPU).count(',')+1
print('\033[1;31mParameters -----> \033[0m')
print(opt)

########################################################################################################################
os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.GPU)
model_path = opt.pth_path + '//' + opt.denoise_model
model_list = list(os.walk(model_path, topdown=False))[-1][-1]
model_list.sort()

for i in range(len(model_list)):
aaa = model_list[i]
if '.yaml' in aaa:
yaml_name = model_list[i]
del model_list[i]

##############################################################################################################################################################
# network architecture and GPU access
denoise_generator = Network_3D_Unet(in_channels=1,
out_channels=1,
f_maps=16,
final_sigmoid=True)
if torch.cuda.is_available():
print('\033[1;31mUsing {} GPU for testing -----> \033[0m'.format(torch.cuda.device_count()))
denoise_generator = denoise_generator.cuda()
denoise_generator = nn.DataParallel(denoise_generator, device_ids=range(opt.ngpu))
cuda = True if torch.cuda.is_available() else False
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
##############################################################################################################################################################
time_start = time.time()
# Start processing
for pth_index in range(len(model_list)):
aaa = model_list[pth_index]
if '.pth' in aaa:
pth_name = model_list[pth_index]

# load model
model_name = opt.pth_path + '//' + opt.denoise_model + '//' + pth_name
if isinstance(denoise_generator, nn.DataParallel):
denoise_generator.module.load_state_dict(torch.load(model_name)) # parallel
denoise_generator.eval()
else:
denoise_generator.load_state_dict(torch.load(model_name)) # not parallel
denoise_generator.eval()

model = denoise_generator.cuda()
input_name = ['input']
output_name = ['output']

# input = torch.randn(1, 1, 80, 200, 200, requires_grad=True).cuda()

# input = torch.randn(1, 1, 150, 150, 150, requires_grad=True).cuda()

input = torch.randn(1, 1, opt.patch_t, opt.patch_x, opt.patch_y, requires_grad=True).cuda()
torch.onnx.export(model.module, input, pth_name.replace('.pth', '.onnx'), export_params=True,input_names=input_name, output_names=output_name,opset_version=11, verbose=True)

time_end = time.time()
print('Using time--->',time_end - time_start,'s')

1 change: 1 addition & 0 deletions DeepCAD_RT_pytorch/datasets/DataForPytorch/DownloadedData
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

312 changes: 312 additions & 0 deletions DeepCAD_RT_pytorch/deepcad/buildingblocks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,312 @@
import torch
from torch import nn as nn
from torch.nn import functional as F


def conv3d(in_channels, out_channels, kernel_size, bias, padding=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=padding, bias=bias)


def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, "Conv layer MUST be present"
assert order[0] not in 'rle', 'Non-linearity cannot be the first operation in the layer'

modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
# add learnable bias only in the absence of gatchnorm/groupnorm
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
# number of groups must be less or equal the number of channels
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']")

return modules


class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""

def __init__(self, in_channels, out_channels, kernel_size=3, order='cr', num_groups=8, padding=1):
super(SingleConv, self).__init__()

for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)


class DoubleConv(nn.Sequential):
"""
A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d).
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be changed however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ELU use order='cbe'.
Use padded convolutions to make sure that the output (H_out, W_out) is the same
as (H_in, W_in), so that you don't have to crop in the decoder path.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
encoder (bool): if True we're in the encoder path, otherwise we're in the decoder
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""

def __init__(self, in_channels, out_channels, encoder, kernel_size=3, order='cr', num_groups=8):
super(DoubleConv, self).__init__()
if encoder:
# we're in the encoder path
conv1_in_channels = in_channels
conv1_out_channels = out_channels // 2
if conv1_out_channels < in_channels:
conv1_out_channels = in_channels
conv2_in_channels, conv2_out_channels = conv1_out_channels, out_channels
else:
# we're in the decoder path, decrease the number of channels in the 1st convolution
conv1_in_channels, conv1_out_channels = in_channels, out_channels
conv2_in_channels, conv2_out_channels = out_channels, out_channels

# conv1
self.add_module('SingleConv1',
SingleConv(conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups))
# conv2
self.add_module('SingleConv2',
SingleConv(conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups))


class ExtResNetBlock(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""

def __init__(self, in_channels, out_channels, kernel_size=3, order='cge', num_groups=8, **kwargs):
super(ExtResNetBlock, self).__init__()

# first convolution
self.conv1 = SingleConv(in_channels, out_channels, kernel_size=kernel_size, order=order, num_groups=num_groups)
# residual block
self.conv2 = SingleConv(out_channels, out_channels, kernel_size=kernel_size, order=order, num_groups=num_groups)
# remove non-linearity from the 3rd convolution since it's going to be applied after adding the residual
n_order = order
for c in 'rel':
n_order = n_order.replace(c, '')
self.conv3 = SingleConv(out_channels, out_channels, kernel_size=kernel_size, order=n_order,
num_groups=num_groups)

# create non-linearity separately
if 'l' in order:
self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif 'e' in order:
self.non_linearity = nn.ELU(inplace=True)
else:
self.non_linearity = nn.ReLU(inplace=True)

def forward(self, x):
# apply first convolution and save the output as a residual
out = self.conv1(x)
residual = out

# residual block
out = self.conv2(out)
out = self.conv3(out)

out += residual
out = self.non_linearity(out)

return out


class Encoder(nn.Module):
"""
A single module from the encoder path consisting of the optional max
pooling layer (one may specify the MaxPool kernel_size to be different
than the standard (2,2,2), e.g. if the volumetric data is anisotropic
(make sure to use complementary scale_factor in the decoder path) followed by
a DoubleConv module.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
conv_kernel_size (int): size of the convolving kernel
apply_pooling (bool): if True use MaxPool3d before DoubleConv
pool_kernel_size (tuple): the size of the window to take a max over
pool_type (str): pooling layer: 'max' or 'avg'
basic_module(nn.Module): either ResNetBlock or DoubleConv
conv_layer_order (string): determines the order of layers
in `DoubleConv` module. See `DoubleConv` for more info.
num_groups (int): number of groups for the GroupNorm
"""

def __init__(self, in_channels, out_channels, conv_kernel_size=3, apply_pooling=True,
pool_kernel_size=(2, 2, 2), pool_type='max', basic_module=DoubleConv, conv_layer_order='cr',
num_groups=8):
super(Encoder, self).__init__()
assert pool_type in ['max', 'avg']
if apply_pooling:
if pool_type == 'max':
self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = None

self.basic_module = basic_module(in_channels, out_channels,
encoder=True,
kernel_size=conv_kernel_size,
order=conv_layer_order,
num_groups=num_groups)

def forward(self, x):
if self.pooling is not None:
x = self.pooling(x)
x = self.basic_module(x)
return x


class Decoder(nn.Module):
"""
A single module for decoder path consisting of the upsample layer
(either learned ConvTranspose3d or interpolation) followed by a DoubleConv
module.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
scale_factor (tuple): used as the multiplier for the image H/W/D in
case of nn.Upsample or as stride in case of ConvTranspose3d, must reverse the MaxPool3d operation
from the corresponding encoder
basic_module(nn.Module): either ResNetBlock or DoubleConv
conv_layer_order (string): determines the order of layers
in `DoubleConv` module. See `DoubleConv` for more info.
num_groups (int): number of groups for the GroupNorm
"""

def __init__(self, in_channels, out_channels, kernel_size=3,
scale_factor=(2, 2, 2), basic_module=DoubleConv, conv_layer_order='cr', num_groups=8):
super(Decoder, self).__init__()
if basic_module == DoubleConv:
# if DoubleConv is the basic_module use nearest neighbor interpolation for upsampling
self.upsample = None
else:
# otherwise use ConvTranspose3d (bear in mind your GPU memory)
# make sure that the output size reverses the MaxPool3d from the corresponding encoder
# (D_out = (D_in − 1) ×  stride[0] − 2 ×  padding[0] +  kernel_size[0] +  output_padding[0])
# also scale the number of channels from in_channels to out_channels so that summation joining
# works correctly
self.upsample = nn.ConvTranspose3d(in_channels,
out_channels,
kernel_size=kernel_size,
stride=scale_factor,
padding=1,
output_padding=1)
# adapt the number of in_channels for the ExtResNetBlock
in_channels = out_channels

self.basic_module = basic_module(in_channels, out_channels,
encoder=False,
kernel_size=kernel_size,
order=conv_layer_order,
num_groups=num_groups)

def forward(self, encoder_features, x):
if self.upsample is None:
# use nearest neighbor interpolation and concatenation joining
output_size = encoder_features.size()[2:]
x = F.interpolate(x, size=output_size, mode='nearest')
# concatenate encoder_features (encoder path) with the upsampled input across channel dimension
x = torch.cat((encoder_features, x), dim=1)
else:
# use ConvTranspose3d and summation joining
x = self.upsample(x)
x += encoder_features

x = self.basic_module(x)
return x


class FinalConv(nn.Sequential):
"""
A module consisting of a convolution layer (e.g. Conv3d+ReLU+GroupNorm3d) and the final 1x1 convolution
which reduces the number of channels to 'out_channels'.
with the number of output channels 'out_channels // 2' and 'out_channels' respectively.
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be change however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ReLU use order='cbr'.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
num_groups (int): number of groups for the GroupNorm
"""

def __init__(self, in_channels, out_channels, kernel_size=3, order='cr', num_groups=8):
super(FinalConv, self).__init__()

# conv1
self.add_module('SingleConv', SingleConv(in_channels, in_channels, kernel_size, order, num_groups))

# in the last layer a 1×1 convolution reduces the number of output channels to out_channels
final_conv = nn.Conv3d(in_channels, out_channels, 1)
self.add_module('final_conv', final_conv)
Loading

0 comments on commit e65f1cf

Please sign in to comment.