diff --git a/DeepCAD_RT_pytorch/convert_pth_to_onnx.py b/DeepCAD_RT_pytorch/convert_pth_to_onnx.py new file mode 100644 index 0000000..11f410c --- /dev/null +++ b/DeepCAD_RT_pytorch/convert_pth_to_onnx.py @@ -0,0 +1,75 @@ +import os +import torch +import torch.nn as nn +import argparse +import time +from deepcad.network import Network_3D_Unet +############################################################################################################################################# +parser = argparse.ArgumentParser() +parser.add_argument('--GPU', type=str, default='0', help="the index of GPU you will use for computation") +parser.add_argument('--pth_path', type=str, default='pth', help="pth file root path") +parser.add_argument('--denoise_model', type=str, default='calcium-mouse-neuron-full_202201111604_200_40', help='A folder containing models to be tested') +parser.add_argument('--patch_x', type=int, default=200, help="the width of 3D patches (patch size in x)") +parser.add_argument('--patch_y', type=int, default=200, help="the width of 3D patches (patch size in y)") +parser.add_argument('--patch_t', type=int, default=40, help="the width of 3D patches (patch size in t)") + +opt = parser.parse_args() +opt.ngpu=str(opt.GPU).count(',')+1 +print('\033[1;31mParameters -----> \033[0m') +print(opt) + +######################################################################################################################## +os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.GPU) +model_path = opt.pth_path + '//' + opt.denoise_model +model_list = list(os.walk(model_path, topdown=False))[-1][-1] +model_list.sort() + +for i in range(len(model_list)): + aaa = model_list[i] + if '.yaml' in aaa: + yaml_name = model_list[i] + del model_list[i] + +############################################################################################################################################################## +# network architecture and GPU access +denoise_generator = Network_3D_Unet(in_channels=1, + out_channels=1, + f_maps=16, + final_sigmoid=True) +if torch.cuda.is_available(): + print('\033[1;31mUsing {} GPU for testing -----> \033[0m'.format(torch.cuda.device_count())) + denoise_generator = denoise_generator.cuda() + denoise_generator = nn.DataParallel(denoise_generator, device_ids=range(opt.ngpu)) +cuda = True if torch.cuda.is_available() else False +Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor +############################################################################################################################################################## +time_start = time.time() +# Start processing +for pth_index in range(len(model_list)): + aaa = model_list[pth_index] + if '.pth' in aaa: + pth_name = model_list[pth_index] + + # load model + model_name = opt.pth_path + '//' + opt.denoise_model + '//' + pth_name + if isinstance(denoise_generator, nn.DataParallel): + denoise_generator.module.load_state_dict(torch.load(model_name)) # parallel + denoise_generator.eval() + else: + denoise_generator.load_state_dict(torch.load(model_name)) # not parallel + denoise_generator.eval() + + model = denoise_generator.cuda() + input_name = ['input'] + output_name = ['output'] + + # input = torch.randn(1, 1, 80, 200, 200, requires_grad=True).cuda() + + # input = torch.randn(1, 1, 150, 150, 150, requires_grad=True).cuda() + + input = torch.randn(1, 1, opt.patch_t, opt.patch_x, opt.patch_y, requires_grad=True).cuda() + torch.onnx.export(model.module, input, pth_name.replace('.pth', '.onnx'), export_params=True,input_names=input_name, output_names=output_name,opset_version=11, verbose=True) + +time_end = time.time() +print('Using time--->',time_end - time_start,'s') + diff --git a/DeepCAD_RT_pytorch/datasets/DataForPytorch/DownloadedData b/DeepCAD_RT_pytorch/datasets/DataForPytorch/DownloadedData new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/DeepCAD_RT_pytorch/datasets/DataForPytorch/DownloadedData @@ -0,0 +1 @@ + diff --git a/DeepCAD_RT_pytorch/deepcad/buildingblocks.py b/DeepCAD_RT_pytorch/deepcad/buildingblocks.py new file mode 100644 index 0000000..c7f1659 --- /dev/null +++ b/DeepCAD_RT_pytorch/deepcad/buildingblocks.py @@ -0,0 +1,312 @@ +import torch +from torch import nn as nn +from torch.nn import functional as F + + +def conv3d(in_channels, out_channels, kernel_size, bias, padding=1): + return nn.Conv3d(in_channels, out_channels, kernel_size, padding=padding, bias=bias) + + +def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1): + """ + Create a list of modules with together constitute a single conv layer with non-linearity + and optional batchnorm/groupnorm. + + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + order (string): order of things, e.g. + 'cr' -> conv + ReLU + 'crg' -> conv + ReLU + groupnorm + 'cl' -> conv + LeakyReLU + 'ce' -> conv + ELU + num_groups (int): number of groups for the GroupNorm + padding (int): add zero-padding to the input + + Return: + list of tuple (name, module) + """ + assert 'c' in order, "Conv layer MUST be present" + assert order[0] not in 'rle', 'Non-linearity cannot be the first operation in the layer' + + modules = [] + for i, char in enumerate(order): + if char == 'r': + modules.append(('ReLU', nn.ReLU(inplace=True))) + elif char == 'l': + modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True))) + elif char == 'e': + modules.append(('ELU', nn.ELU(inplace=True))) + elif char == 'c': + # add learnable bias only in the absence of gatchnorm/groupnorm + bias = not ('g' in order or 'b' in order) + modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding))) + elif char == 'g': + is_before_conv = i < order.index('c') + assert not is_before_conv, 'GroupNorm MUST go after the Conv3d' + # number of groups must be less or equal the number of channels + if out_channels < num_groups: + num_groups = out_channels + modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=out_channels))) + elif char == 'b': + is_before_conv = i < order.index('c') + if is_before_conv: + modules.append(('batchnorm', nn.BatchNorm3d(in_channels))) + else: + modules.append(('batchnorm', nn.BatchNorm3d(out_channels))) + else: + raise ValueError(f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']") + + return modules + + +class SingleConv(nn.Sequential): + """ + Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order + of operations can be specified via the `order` parameter + + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + kernel_size (int): size of the convolving kernel + order (string): determines the order of layers, e.g. + 'cr' -> conv + ReLU + 'crg' -> conv + ReLU + groupnorm + 'cl' -> conv + LeakyReLU + 'ce' -> conv + ELU + num_groups (int): number of groups for the GroupNorm + """ + + def __init__(self, in_channels, out_channels, kernel_size=3, order='cr', num_groups=8, padding=1): + super(SingleConv, self).__init__() + + for name, module in create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=padding): + self.add_module(name, module) + + +class DoubleConv(nn.Sequential): + """ + A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d). + We use (Conv3d+ReLU+GroupNorm3d) by default. + This can be changed however by providing the 'order' argument, e.g. in order + to change to Conv3d+BatchNorm3d+ELU use order='cbe'. + Use padded convolutions to make sure that the output (H_out, W_out) is the same + as (H_in, W_in), so that you don't have to crop in the decoder path. + + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + encoder (bool): if True we're in the encoder path, otherwise we're in the decoder + kernel_size (int): size of the convolving kernel + order (string): determines the order of layers, e.g. + 'cr' -> conv + ReLU + 'crg' -> conv + ReLU + groupnorm + 'cl' -> conv + LeakyReLU + 'ce' -> conv + ELU + num_groups (int): number of groups for the GroupNorm + """ + + def __init__(self, in_channels, out_channels, encoder, kernel_size=3, order='cr', num_groups=8): + super(DoubleConv, self).__init__() + if encoder: + # we're in the encoder path + conv1_in_channels = in_channels + conv1_out_channels = out_channels // 2 + if conv1_out_channels < in_channels: + conv1_out_channels = in_channels + conv2_in_channels, conv2_out_channels = conv1_out_channels, out_channels + else: + # we're in the decoder path, decrease the number of channels in the 1st convolution + conv1_in_channels, conv1_out_channels = in_channels, out_channels + conv2_in_channels, conv2_out_channels = out_channels, out_channels + + # conv1 + self.add_module('SingleConv1', + SingleConv(conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups)) + # conv2 + self.add_module('SingleConv2', + SingleConv(conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups)) + + +class ExtResNetBlock(nn.Module): + """ + Basic UNet block consisting of a SingleConv followed by the residual block. + The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number + of output channels is compatible with the residual block that follows. + This block can be used instead of standard DoubleConv in the Encoder module. + Motivated by: https://arxiv.org/pdf/1706.00120.pdf + + Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. + """ + + def __init__(self, in_channels, out_channels, kernel_size=3, order='cge', num_groups=8, **kwargs): + super(ExtResNetBlock, self).__init__() + + # first convolution + self.conv1 = SingleConv(in_channels, out_channels, kernel_size=kernel_size, order=order, num_groups=num_groups) + # residual block + self.conv2 = SingleConv(out_channels, out_channels, kernel_size=kernel_size, order=order, num_groups=num_groups) + # remove non-linearity from the 3rd convolution since it's going to be applied after adding the residual + n_order = order + for c in 'rel': + n_order = n_order.replace(c, '') + self.conv3 = SingleConv(out_channels, out_channels, kernel_size=kernel_size, order=n_order, + num_groups=num_groups) + + # create non-linearity separately + if 'l' in order: + self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True) + elif 'e' in order: + self.non_linearity = nn.ELU(inplace=True) + else: + self.non_linearity = nn.ReLU(inplace=True) + + def forward(self, x): + # apply first convolution and save the output as a residual + out = self.conv1(x) + residual = out + + # residual block + out = self.conv2(out) + out = self.conv3(out) + + out += residual + out = self.non_linearity(out) + + return out + + +class Encoder(nn.Module): + """ + A single module from the encoder path consisting of the optional max + pooling layer (one may specify the MaxPool kernel_size to be different + than the standard (2,2,2), e.g. if the volumetric data is anisotropic + (make sure to use complementary scale_factor in the decoder path) followed by + a DoubleConv module. + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + conv_kernel_size (int): size of the convolving kernel + apply_pooling (bool): if True use MaxPool3d before DoubleConv + pool_kernel_size (tuple): the size of the window to take a max over + pool_type (str): pooling layer: 'max' or 'avg' + basic_module(nn.Module): either ResNetBlock or DoubleConv + conv_layer_order (string): determines the order of layers + in `DoubleConv` module. See `DoubleConv` for more info. + num_groups (int): number of groups for the GroupNorm + """ + + def __init__(self, in_channels, out_channels, conv_kernel_size=3, apply_pooling=True, + pool_kernel_size=(2, 2, 2), pool_type='max', basic_module=DoubleConv, conv_layer_order='cr', + num_groups=8): + super(Encoder, self).__init__() + assert pool_type in ['max', 'avg'] + if apply_pooling: + if pool_type == 'max': + self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size) + else: + self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size) + else: + self.pooling = None + + self.basic_module = basic_module(in_channels, out_channels, + encoder=True, + kernel_size=conv_kernel_size, + order=conv_layer_order, + num_groups=num_groups) + + def forward(self, x): + if self.pooling is not None: + x = self.pooling(x) + x = self.basic_module(x) + return x + + +class Decoder(nn.Module): + """ + A single module for decoder path consisting of the upsample layer + (either learned ConvTranspose3d or interpolation) followed by a DoubleConv + module. + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + kernel_size (int): size of the convolving kernel + scale_factor (tuple): used as the multiplier for the image H/W/D in + case of nn.Upsample or as stride in case of ConvTranspose3d, must reverse the MaxPool3d operation + from the corresponding encoder + basic_module(nn.Module): either ResNetBlock or DoubleConv + conv_layer_order (string): determines the order of layers + in `DoubleConv` module. See `DoubleConv` for more info. + num_groups (int): number of groups for the GroupNorm + """ + + def __init__(self, in_channels, out_channels, kernel_size=3, + scale_factor=(2, 2, 2), basic_module=DoubleConv, conv_layer_order='cr', num_groups=8): + super(Decoder, self).__init__() + if basic_module == DoubleConv: + # if DoubleConv is the basic_module use nearest neighbor interpolation for upsampling + self.upsample = None + else: + # otherwise use ConvTranspose3d (bear in mind your GPU memory) + # make sure that the output size reverses the MaxPool3d from the corresponding encoder + # (D_out = (D_in − 1) ×  stride[0] − 2 ×  padding[0] +  kernel_size[0] +  output_padding[0]) + # also scale the number of channels from in_channels to out_channels so that summation joining + # works correctly + self.upsample = nn.ConvTranspose3d(in_channels, + out_channels, + kernel_size=kernel_size, + stride=scale_factor, + padding=1, + output_padding=1) + # adapt the number of in_channels for the ExtResNetBlock + in_channels = out_channels + + self.basic_module = basic_module(in_channels, out_channels, + encoder=False, + kernel_size=kernel_size, + order=conv_layer_order, + num_groups=num_groups) + + def forward(self, encoder_features, x): + if self.upsample is None: + # use nearest neighbor interpolation and concatenation joining + output_size = encoder_features.size()[2:] + x = F.interpolate(x, size=output_size, mode='nearest') + # concatenate encoder_features (encoder path) with the upsampled input across channel dimension + x = torch.cat((encoder_features, x), dim=1) + else: + # use ConvTranspose3d and summation joining + x = self.upsample(x) + x += encoder_features + + x = self.basic_module(x) + return x + + +class FinalConv(nn.Sequential): + """ + A module consisting of a convolution layer (e.g. Conv3d+ReLU+GroupNorm3d) and the final 1x1 convolution + which reduces the number of channels to 'out_channels'. + with the number of output channels 'out_channels // 2' and 'out_channels' respectively. + We use (Conv3d+ReLU+GroupNorm3d) by default. + This can be change however by providing the 'order' argument, e.g. in order + to change to Conv3d+BatchNorm3d+ReLU use order='cbr'. + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + kernel_size (int): size of the convolving kernel + order (string): determines the order of layers, e.g. + 'cr' -> conv + ReLU + 'crg' -> conv + ReLU + groupnorm + num_groups (int): number of groups for the GroupNorm + """ + + def __init__(self, in_channels, out_channels, kernel_size=3, order='cr', num_groups=8): + super(FinalConv, self).__init__() + + # conv1 + self.add_module('SingleConv', SingleConv(in_channels, in_channels, kernel_size, order, num_groups)) + + # in the last layer a 1×1 convolution reduces the number of output channels to out_channels + final_conv = nn.Conv3d(in_channels, out_channels, 1) + self.add_module('final_conv', final_conv) diff --git a/DeepCAD_RT_pytorch/deepcad/data_process.py b/DeepCAD_RT_pytorch/deepcad/data_process.py new file mode 100644 index 0000000..6802783 --- /dev/null +++ b/DeepCAD_RT_pytorch/deepcad/data_process.py @@ -0,0 +1,577 @@ +import numpy as np +import os +import tifffile as tiff +import random +import math +import torch +from torch.utils.data import Dataset +from skimage import io + + + +def random_transform(input, target): + """ + The function for data augmentation. Randomly select one method among five + transformation methods (including rotation and flip) or do not use data + augmentation. + + Args: + input, target : the input and target patch before data augmentation + Return: + input, target : the input and target patch after data augmentation + """ + p_trans = random.randrange(6) + if p_trans == 0: # no transformation + input = input + target = target + elif p_trans == 1: # left rotate 90 + input = np.rot90(input, k=1, axes=(1, 2)) + target = np.rot90(target, k=1, axes=(1, 2)) + elif p_trans == 2: # left rotate 180 + input = np.rot90(input, k=2, axes=(1, 2)) + target = np.rot90(target, k=2, axes=(1, 2)) + elif p_trans == 3: # left rotate 270 + input = np.rot90(input, k=3, axes=(1, 2)) + target = np.rot90(target, k=3, axes=(1, 2)) + elif p_trans == 4: # horizontal flip + input = input[:, :, ::-1] + target = target[:, :, ::-1] + elif p_trans == 5: # vertical flip + input = input[:, ::-1, :] + target = target[:, ::-1, :] + return input, target + + +class trainset(Dataset): + """ + Train set generator for pytorch training + + """ + + def __init__(self, name_list, coordinate_list, noise_img_all, stack_index): + self.name_list = name_list + self.coordinate_list = coordinate_list + self.noise_img_all = noise_img_all + self.stack_index = stack_index + + def __getitem__(self, index): + """ + For temporal stacks with a small lateral size or short recording period, sub-stacks can be + randomly cropped from the original stack to augment the training set according to the record + coordinate. Then, interlaced frames of each sub-stack are extracted to form two 3D tiles. + One of them serves as the input and the other serves as the target for network training + Args: + index : the index of 3D patchs used for training + Return: + input, target : the consecutive frames of the 3D noisy patch serve as the input and target of the network + """ + stack_index = self.stack_index[index] + noise_img = self.noise_img_all[stack_index] + single_coordinate = self.coordinate_list[self.name_list[index]] + init_h = single_coordinate['init_h'] + end_h = single_coordinate['end_h'] + init_w = single_coordinate['init_w'] + end_w = single_coordinate['end_w'] + init_s = single_coordinate['init_s'] + end_s = single_coordinate['end_s'] + input = noise_img[init_s:end_s:2, init_h:end_h, init_w:end_w] + target = noise_img[init_s + 1:end_s:2, init_h:end_h, init_w:end_w] + p_exc = random.random() # generate a random number determinate whether swap input and target + if p_exc < 0.5: + input, target = random_transform(input, target) + else: + temp = input + input = target + target = temp # Swap input and target + input, target = random_transform(input, target) + + input = torch.from_numpy(np.expand_dims(input, 0).copy()) + target = torch.from_numpy(np.expand_dims(target, 0).copy()) + return input, target + + def __len__(self): + return len(self.name_list) + + +class testset(Dataset): + """ + Test set generator for pytorch inference + + """ + + def __init__(self, name_list, coordinate_list, noise_img): + self.name_list = name_list + self.coordinate_list = coordinate_list + self.noise_img = noise_img + + def __getitem__(self, index): + """ + Generate the sub-stacks of the noisy image. + Args: + index : the index of 3D patch used for testing + Return: + noise_patch : the sub-stacks of the noisy image + single_coordinate : the specific coordinate of sub-stacks in the noisy image for stitching all sub-stacks + """ + single_coordinate = self.coordinate_list[self.name_list[index]] + init_h = single_coordinate['init_h'] + end_h = single_coordinate['end_h'] + init_w = single_coordinate['init_w'] + end_w = single_coordinate['end_w'] + init_s = single_coordinate['init_s'] + end_s = single_coordinate['end_s'] + noise_patch = self.noise_img[init_s:end_s, init_h:end_h, init_w:end_w] + noise_patch = torch.from_numpy(np.expand_dims(noise_patch, 0)) + return noise_patch, single_coordinate + + def __len__(self): + return len(self.name_list) + + +def get_gap_t(args, img, stack_num): + whole_x = img.shape[2] + whole_y = img.shape[1] + whole_t = img.shape[0] + print('whole_x -----> ', whole_x) + print('whole_y -----> ', whole_y) + print('whole_t -----> ', whole_t) + w_num = math.floor((whole_x - args.patch_x) / args.gap_x) + 1 + h_num = math.floor((whole_y - args.patch_y) / args.gap_y) + 1 + s_num = math.ceil(args.train_datasets_size / w_num / h_num / stack_num) + # print('w_num -----> ',w_num) + # print('h_num -----> ',h_num) + # print('s_num -----> ',s_num) + gap_t = math.floor((whole_t - args.patch_t * 2) / (s_num - 1)) + # print('gap_t -----> ',gap_t) + return gap_t + + +def train_preprocess_lessMemoryMulStacks(args): + patch_y = args.patch_y + patch_x = args.patch_x + patch_t2 = args.patch_t * 2 + gap_y = args.gap_y + gap_x = args.gap_x + im_folder = args.datasets_path + '//' + args.datasets_folder + + name_list = [] + coordinate_list = {} + stack_index = [] + noise_im_all = [] + ind = 0; + print('\033[1;31mImage list for training -----> \033[0m') + stack_num = len(list(os.walk(im_folder, topdown=False))[-1][-1]) + print('Total stack number -----> ', stack_num) + + for im_name in list(os.walk(im_folder, topdown=False))[-1][-1]: + print(im_name) + im_dir = im_folder + '//' + im_name + noise_im = tiff.imread(im_dir) + if noise_im.shape[0] > args.select_img_num: + noise_im = noise_im[0:args.select_img_num, :, :] + gap_t2 = get_gap_t(args, noise_im, stack_num) + args.gap_t = gap_t2 + # print('gap_t2 -----> ',gap_t2) + # print('noise_im shape -----> ',noise_im.shape) + # print('noise_im max -----> ',noise_im.max()) + # print('noise_im min -----> ',noise_im.min()) + noise_im = noise_im.astype(np.float32) / args.scale_factor # no preprocessing + # noise_im = (noise_im-noise_im.min()).astype(np.float32)/args.scale_factor + noise_im_all.append(noise_im) + + whole_x = noise_im.shape[2] + whole_y = noise_im.shape[1] + whole_t = noise_im.shape[0] + # print('int((whole_y-patch_y+gap_y)/gap_y) -----> ',int((whole_y-patch_y+gap_y)/gap_y)) + # print('int((whole_x-patch_x+gap_x)/gap_x) -----> ',int((whole_x-patch_x+gap_x)/gap_x)) + # print('int((whole_t-patch_t2+gap_t2)/gap_t2) -----> ',int((whole_t-patch_t2+gap_t2)/gap_t2)) + for x in range(0, int((whole_y - patch_y + gap_y) / gap_y)): + for y in range(0, int((whole_x - patch_x + gap_x) / gap_x)): + for z in range(0, int((whole_t - patch_t2 + gap_t2) / gap_t2)): + single_coordinate = {'init_h': 0, 'end_h': 0, 'init_w': 0, 'end_w': 0, 'init_s': 0, 'end_s': 0} + init_h = gap_y * x + end_h = gap_y * x + patch_y + init_w = gap_x * y + end_w = gap_x * y + patch_x + init_s = gap_t2 * z + end_s = gap_t2 * z + patch_t2 + single_coordinate['init_h'] = init_h + single_coordinate['end_h'] = end_h + single_coordinate['init_w'] = init_w + single_coordinate['end_w'] = end_w + single_coordinate['init_s'] = init_s + single_coordinate['end_s'] = end_s + # noise_patch1 = noise_im[init_s:end_s,init_h:end_h,init_w:end_w] + patch_name = args.datasets_folder + '_' + im_name.replace('.tif', '') + '_x' + str(x) + '_y' + str( + y) + '_z' + str(z) + # train_raw.append(noise_patch1.transpose(1,2,0)) + name_list.append(patch_name) + # print(' single_coordinate -----> ',single_coordinate) + coordinate_list[patch_name] = single_coordinate + stack_index.append(ind) + ind = ind + 1; + return name_list, noise_im_all, coordinate_list, stack_index + + +def singlebatch_test_save(single_coordinate, output_image, raw_image): + """ + Subtract overlapping regions (both the lateral and temporal overlaps) from the output sub-stacks (if the batch size equal to 1). + + Args: + single_coordinate : the coordinate dict of the image + output_image : the output sub-stack of the network + raw_image : the noisy sub-stack + Returns: + output_patch : the output patch after subtract the overlapping regions + raw_patch : the raw patch after subtract the overlapping regions + stack_start_ : the start coordinate of the patch in whole stack + stack_end_ : the end coordinate of the patch in whole stack + """ + stack_start_w = int(single_coordinate['stack_start_w']) + stack_end_w = int(single_coordinate['stack_end_w']) + patch_start_w = int(single_coordinate['patch_start_w']) + patch_end_w = int(single_coordinate['patch_end_w']) + + stack_start_h = int(single_coordinate['stack_start_h']) + stack_end_h = int(single_coordinate['stack_end_h']) + patch_start_h = int(single_coordinate['patch_start_h']) + patch_end_h = int(single_coordinate['patch_end_h']) + + stack_start_s = int(single_coordinate['stack_start_s']) + stack_end_s = int(single_coordinate['stack_end_s']) + patch_start_s = int(single_coordinate['patch_start_s']) + patch_end_s = int(single_coordinate['patch_end_s']) + + output_patch = output_image[patch_start_s:patch_end_s, patch_start_h:patch_end_h, patch_start_w:patch_end_w] + raw_patch = raw_image[patch_start_s:patch_end_s, patch_start_h:patch_end_h, patch_start_w:patch_end_w] + return output_patch, raw_patch, stack_start_w, stack_end_w, stack_start_h, stack_end_h, stack_start_s, stack_end_s + + +def multibatch_test_save(single_coordinate, id, output_image, raw_image): + """ + Subtract overlapping regions (both the lateral and temporal overlaps) from the output sub-stacks. (if the batch size larger than 1). + + Args: + single_coordinate : the coordinate dict of the image + output_image : the output sub-stack of the network + raw_image : the noisy sub-stack + Returns: + output_patch : the output patch after subtract the overlapping regions + raw_patch : the raw patch after subtract the overlapping regions + stack_start_ : the start coordinate of the patch in whole stack + stack_end_ : the end coordinate of the patch in whole stack + """ + stack_start_w_id = single_coordinate['stack_start_w'].numpy() + stack_start_w = int(stack_start_w_id[id]) + stack_end_w_id = single_coordinate['stack_end_w'].numpy() + stack_end_w = int(stack_end_w_id[id]) + patch_start_w_id = single_coordinate['patch_start_w'].numpy() + patch_start_w = int(patch_start_w_id[id]) + patch_end_w_id = single_coordinate['patch_end_w'].numpy() + patch_end_w = int(patch_end_w_id[id]) + + stack_start_h_id = single_coordinate['stack_start_h'].numpy() + stack_start_h = int(stack_start_h_id[id]) + stack_end_h_id = single_coordinate['stack_end_h'].numpy() + stack_end_h = int(stack_end_h_id[id]) + patch_start_h_id = single_coordinate['patch_start_h'].numpy() + patch_start_h = int(patch_start_h_id[id]) + patch_end_h_id = single_coordinate['patch_end_h'].numpy() + patch_end_h = int(patch_end_h_id[id]) + + stack_start_s_id = single_coordinate['stack_start_s'].numpy() + stack_start_s = int(stack_start_s_id[id]) + stack_end_s_id = single_coordinate['stack_end_s'].numpy() + stack_end_s = int(stack_end_s_id[id]) + patch_start_s_id = single_coordinate['patch_start_s'].numpy() + patch_start_s = int(patch_start_s_id[id]) + patch_end_s_id = single_coordinate['patch_end_s'].numpy() + patch_end_s = int(patch_end_s_id[id]) + + output_image_id = output_image[id] + raw_image_id = raw_image[id] + output_patch = output_image_id[patch_start_s:patch_end_s, patch_start_h:patch_end_h, patch_start_w:patch_end_w] + raw_patch = raw_image_id[patch_start_s:patch_end_s, patch_start_h:patch_end_h, patch_start_w:patch_end_w] + + return output_patch, raw_patch, stack_start_w, stack_end_w, stack_start_h, stack_end_h, stack_start_s, stack_end_s + + +def test_preprocess_lessMemoryNoTail_chooseOne(args, N): + patch_y = args.patch_y + patch_x = args.patch_x + patch_t2 = args.patch_t + gap_y = args.gap_y + gap_x = args.gap_x + gap_t2 = args.gap_t + cut_w = (patch_x - gap_x) / 2 + cut_h = (patch_y - gap_y) / 2 + cut_s = (patch_t2 - gap_t2) / 2 + im_folder = args.datasets_path + '//' + args.datasets_folder + + name_list = [] + # train_raw = [] + coordinate_list = {} + img_list = list(os.walk(im_folder, topdown=False))[-1][-1] + img_list.sort() + # print(img_list) + + im_name = img_list[N] + + im_dir = im_folder + '//' + im_name + noise_im = tiff.imread(im_dir) + # print('noise_im shape -----> ',noise_im.shape) + # print('noise_im max -----> ',noise_im.max()) + # print('noise_im min -----> ',noise_im.min()) + if noise_im.shape[0] > args.test_datasize: + noise_im = noise_im[0:args.test_datasize, :, :] + noise_im = noise_im.astype(np.float32) / args.scale_factor + # noise_im = (noise_im-noise_im.min()).astype(np.float32)/args.scale_factor + + whole_x = noise_im.shape[2] + whole_y = noise_im.shape[1] + whole_t = noise_im.shape[0] + + num_w = math.ceil((whole_x - patch_x + gap_x) / gap_x) + num_h = math.ceil((whole_y - patch_y + gap_y) / gap_y) + num_s = math.ceil((whole_t - patch_t2 + gap_t2) / gap_t2) + # print('int((whole_y-patch_y+gap_y)/gap_y) -----> ',int((whole_y-patch_y+gap_y)/gap_y)) + # print('int((whole_x-patch_x+gap_x)/gap_x) -----> ',int((whole_x-patch_x+gap_x)/gap_x)) + # print('int((whole_t-patch_t2+gap_t2)/gap_t2) -----> ',int((whole_t-patch_t2+gap_t2)/gap_t2)) + for x in range(0, num_h): + for y in range(0, num_w): + for z in range(0, num_s): + single_coordinate = {'init_h': 0, 'end_h': 0, 'init_w': 0, 'end_w': 0, 'init_s': 0, 'end_s': 0} + if x != (num_h - 1): + init_h = gap_y * x + end_h = gap_y * x + patch_y + elif x == (num_h - 1): + init_h = whole_y - patch_y + end_h = whole_y + + if y != (num_w - 1): + init_w = gap_x * y + end_w = gap_x * y + patch_x + elif y == (num_w - 1): + init_w = whole_x - patch_x + end_w = whole_x + + if z != (num_s - 1): + init_s = gap_t2 * z + end_s = gap_t2 * z + patch_t2 + elif z == (num_s - 1): + init_s = whole_t - patch_t2 + end_s = whole_t + single_coordinate['init_h'] = init_h + single_coordinate['end_h'] = end_h + single_coordinate['init_w'] = init_w + single_coordinate['end_w'] = end_w + single_coordinate['init_s'] = init_s + single_coordinate['end_s'] = end_s + + if y == 0: + single_coordinate['stack_start_w'] = y * gap_x + single_coordinate['stack_end_w'] = y * gap_x + patch_x - cut_w + single_coordinate['patch_start_w'] = 0 + single_coordinate['patch_end_w'] = patch_x - cut_w + elif y == num_w - 1: + single_coordinate['stack_start_w'] = whole_x - patch_x + cut_w + single_coordinate['stack_end_w'] = whole_x + single_coordinate['patch_start_w'] = cut_w + single_coordinate['patch_end_w'] = patch_x + else: + single_coordinate['stack_start_w'] = y * gap_x + cut_w + single_coordinate['stack_end_w'] = y * gap_x + patch_x - cut_w + single_coordinate['patch_start_w'] = cut_w + single_coordinate['patch_end_w'] = patch_x - cut_w + + if x == 0: + single_coordinate['stack_start_h'] = x * gap_y + single_coordinate['stack_end_h'] = x * gap_y + patch_y - cut_h + single_coordinate['patch_start_h'] = 0 + single_coordinate['patch_end_h'] = patch_y - cut_h + elif x == num_h - 1: + single_coordinate['stack_start_h'] = whole_y - patch_y + cut_h + single_coordinate['stack_end_h'] = whole_y + single_coordinate['patch_start_h'] = cut_h + single_coordinate['patch_end_h'] = patch_y + else: + single_coordinate['stack_start_h'] = x * gap_y + cut_h + single_coordinate['stack_end_h'] = x * gap_y + patch_y - cut_h + single_coordinate['patch_start_h'] = cut_h + single_coordinate['patch_end_h'] = patch_y - cut_h + + if z == 0: + single_coordinate['stack_start_s'] = z * gap_t2 + single_coordinate['stack_end_s'] = z * gap_t2 + patch_t2 - cut_s + single_coordinate['patch_start_s'] = 0 + single_coordinate['patch_end_s'] = patch_t2 - cut_s + elif z == num_s - 1: + single_coordinate['stack_start_s'] = whole_t - patch_t2 + cut_s + single_coordinate['stack_end_s'] = whole_t + single_coordinate['patch_start_s'] = cut_s + single_coordinate['patch_end_s'] = patch_t2 + else: + single_coordinate['stack_start_s'] = z * gap_t2 + cut_s + single_coordinate['stack_end_s'] = z * gap_t2 + patch_t2 - cut_s + single_coordinate['patch_start_s'] = cut_s + single_coordinate['patch_end_s'] = patch_t2 - cut_s + + # noise_patch1 = noise_im[init_s:end_s,init_h:end_h,init_w:end_w] + patch_name = args.datasets_folder + '_x' + str(x) + '_y' + str(y) + '_z' + str(z) + # train_raw.append(noise_patch1.transpose(1,2,0)) + name_list.append(patch_name) + # print(' single_coordinate -----> ',single_coordinate) + coordinate_list[patch_name] = single_coordinate + + return name_list, noise_im, coordinate_list + + +def test_preprocess_chooseOne(args, img_id): + """ + Choose one original noisy stack and partition it into thousands of 3D sub-stacks (patch) with the setting + overlap factor in each dimension. + + Args: + args : the train object containing input params for partition + img_id : the id of the test image + Returns: + name_list : the coordinates of 3D patch are indexed by the patch name in name_list + noise_im : the original noisy stacks + coordinate_list : record the coordinate of 3D patch preparing for partition in whole stack + im_name : the file name of the noisy stacks + + """ + + patch_y = args.patch_y + patch_x = args.patch_x + patch_t2 = args.patch_t + gap_y = args.gap_y + gap_x = args.gap_x + gap_t2 = int(args.patch_t * (1 - args.overlap_factor)) + cut_w = (patch_x - gap_x) / 2 + cut_h = (patch_y - gap_y) / 2 + cut_s = (patch_t2 - gap_t2) / 2 + im_folder = args.datasets_path + + name_list = [] + coordinate_list = {} + img_list = list(os.walk(im_folder, topdown=False))[-1][-1] + img_list.sort() + + im_name = img_list[img_id] + + + im_dir = im_folder + '//' + im_name + noise_im = tiff.imread(im_dir) + img_mean = noise_im.mean() + # print('noise_im max -----> ',noise_im.max()) + # print('noise_im min -----> ',noise_im.min()) + if noise_im.shape[0] > args.test_datasize: + noise_im = noise_im[0:args.test_datasize, :, :] + if args.print_img_name: + print('Testing image name -----> ', im_name) + print('Testing image shape -----> ', noise_im.shape) + # Minus mean before training + noise_im = noise_im.astype(np.float32)/args.scale_factor + noise_im = noise_im-img_mean + # No preprocessing + # noise_im = noise_im.astype(np.float32) / args.scale_factor + # noise_im = (noise_im-noise_im.min()).astype(np.float32)/args.scale_factor + + whole_x = noise_im.shape[2] + whole_y = noise_im.shape[1] + whole_t = noise_im.shape[0] + + num_w = math.ceil((whole_x - patch_x + gap_x) / gap_x) + num_h = math.ceil((whole_y - patch_y + gap_y) / gap_y) + num_s = math.ceil((whole_t - patch_t2 + gap_t2) / gap_t2) + # print('int((whole_y-patch_y+gap_y)/gap_y) -----> ',int((whole_y-patch_y+gap_y)/gap_y)) + # print('int((whole_x-patch_x+gap_x)/gap_x) -----> ',int((whole_x-patch_x+gap_x)/gap_x)) + # print('int((whole_t-patch_t2+gap_t2)/gap_t2) -----> ',int((whole_t-patch_t2+gap_t2)/gap_t2)) + for x in range(0, num_h): + for y in range(0, num_w): + for z in range(0, num_s): + single_coordinate = {'init_h': 0, 'end_h': 0, 'init_w': 0, 'end_w': 0, 'init_s': 0, 'end_s': 0} + if x != (num_h - 1): + init_h = gap_y * x + end_h = gap_y * x + patch_y + elif x == (num_h - 1): + init_h = whole_y - patch_y + end_h = whole_y + + if y != (num_w - 1): + init_w = gap_x * y + end_w = gap_x * y + patch_x + elif y == (num_w - 1): + init_w = whole_x - patch_x + end_w = whole_x + + if z != (num_s - 1): + init_s = gap_t2 * z + end_s = gap_t2 * z + patch_t2 + elif z == (num_s - 1): + init_s = whole_t - patch_t2 + end_s = whole_t + single_coordinate['init_h'] = init_h + single_coordinate['end_h'] = end_h + single_coordinate['init_w'] = init_w + single_coordinate['end_w'] = end_w + single_coordinate['init_s'] = init_s + single_coordinate['end_s'] = end_s + + if y == 0: + single_coordinate['stack_start_w'] = y * gap_x + single_coordinate['stack_end_w'] = y * gap_x + patch_x - cut_w + single_coordinate['patch_start_w'] = 0 + single_coordinate['patch_end_w'] = patch_x - cut_w + elif y == num_w - 1: + single_coordinate['stack_start_w'] = whole_x - patch_x + cut_w + single_coordinate['stack_end_w'] = whole_x + single_coordinate['patch_start_w'] = cut_w + single_coordinate['patch_end_w'] = patch_x + else: + single_coordinate['stack_start_w'] = y * gap_x + cut_w + single_coordinate['stack_end_w'] = y * gap_x + patch_x - cut_w + single_coordinate['patch_start_w'] = cut_w + single_coordinate['patch_end_w'] = patch_x - cut_w + + if x == 0: + single_coordinate['stack_start_h'] = x * gap_y + single_coordinate['stack_end_h'] = x * gap_y + patch_y - cut_h + single_coordinate['patch_start_h'] = 0 + single_coordinate['patch_end_h'] = patch_y - cut_h + elif x == num_h - 1: + single_coordinate['stack_start_h'] = whole_y - patch_y + cut_h + single_coordinate['stack_end_h'] = whole_y + single_coordinate['patch_start_h'] = cut_h + single_coordinate['patch_end_h'] = patch_y + else: + single_coordinate['stack_start_h'] = x * gap_y + cut_h + single_coordinate['stack_end_h'] = x * gap_y + patch_y - cut_h + single_coordinate['patch_start_h'] = cut_h + single_coordinate['patch_end_h'] = patch_y - cut_h + + if z == 0: + single_coordinate['stack_start_s'] = z * gap_t2 + single_coordinate['stack_end_s'] = z * gap_t2 + patch_t2 - cut_s + single_coordinate['patch_start_s'] = 0 + single_coordinate['patch_end_s'] = patch_t2 - cut_s + elif z == num_s - 1: + single_coordinate['stack_start_s'] = whole_t - patch_t2 + cut_s + single_coordinate['stack_end_s'] = whole_t + single_coordinate['patch_start_s'] = cut_s + single_coordinate['patch_end_s'] = patch_t2 + else: + single_coordinate['stack_start_s'] = z * gap_t2 + cut_s + single_coordinate['stack_end_s'] = z * gap_t2 + patch_t2 - cut_s + single_coordinate['patch_start_s'] = cut_s + single_coordinate['patch_end_s'] = patch_t2 - cut_s + + # noise_patch1 = noise_im[init_s:end_s,init_h:end_h,init_w:end_w] + patch_name = args.datasets_name + '_x' + str(x) + '_y' + str(y) + '_z' + str(z) + # train_raw.append(noise_patch1.transpose(1,2,0)) + name_list.append(patch_name) + # print(' single_coordinate -----> ',single_coordinate) + coordinate_list[patch_name] = single_coordinate + + return name_list, noise_im, coordinate_list, im_name, img_mean diff --git a/DeepCAD_RT_pytorch/deepcad/model_3DUnet.py b/DeepCAD_RT_pytorch/deepcad/model_3DUnet.py new file mode 100644 index 0000000..5b4d47e --- /dev/null +++ b/DeepCAD_RT_pytorch/deepcad/model_3DUnet.py @@ -0,0 +1,487 @@ +import importlib + +import torch +import torch.nn as nn + +from .buildingblocks import Encoder, Decoder, FinalConv, DoubleConv, ExtResNetBlock, SingleConv +from .utils import create_feature_maps + + +class UNet3D(nn.Module): + """ + 3DUnet model from + `"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation" + `. + + Args: + in_channels (int): number of input channels + out_channels (int): number of output segmentation masks; + Note that that the of out_channels might correspond to either + different semantic classes or to different binary segmentation mask. + It's up to the user of the class to interpret the out_channels and + use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class) + or BCEWithLogitsLoss (two-class) respectively) + f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number + of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4 + final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the + final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used + to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model. + layer_order (string): determines the order of layers + in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d. + See `SingleConv` for more info + init_channel_number (int): number of feature maps in the first conv layer of the encoder; default: 64 + num_groups (int): number of groups for the GroupNorm + """ + + def __init__(self, in_channels, out_channels, final_sigmoid, f_maps=64, layer_order='cr', num_groups=8, + **kwargs): + super(UNet3D, self).__init__() + + if isinstance(f_maps, int): + # use 4 levels in the encoder path as suggested in the paper + f_maps = create_feature_maps(f_maps, number_of_fmaps=4) + + # create encoder path consisting of Encoder modules. The length of the encoder is equal to `len(f_maps)` + # uses DoubleConv as a basic_module for the Encoder + encoders = [] + for i, out_feature_num in enumerate(f_maps): + if i == 0: + encoder = Encoder(in_channels, out_feature_num, apply_pooling=False, basic_module=DoubleConv, + conv_layer_order=layer_order, num_groups=num_groups) + else: + encoder = Encoder(f_maps[i - 1], out_feature_num, basic_module=DoubleConv, + conv_layer_order=layer_order, num_groups=num_groups) + encoders.append(encoder) + + self.encoders = nn.ModuleList(encoders) + + # create decoder path consisting of the Decoder modules. The length of the decoder is equal to `len(f_maps) - 1` + # uses DoubleConv as a basic_module for the Decoder + decoders = [] + reversed_f_maps = list(reversed(f_maps)) + for i in range(len(reversed_f_maps) - 1): + in_feature_num = reversed_f_maps[i] + reversed_f_maps[i + 1] + out_feature_num = reversed_f_maps[i + 1] + decoder = Decoder(in_feature_num, out_feature_num, basic_module=DoubleConv, + conv_layer_order=layer_order, num_groups=num_groups) + decoders.append(decoder) + + self.decoders = nn.ModuleList(decoders) + + # in the last layer a 1×1 convolution reduces the number of output + # channels to the number of labels + self.final_conv = nn.Conv3d(f_maps[0], out_channels, 1) + + if final_sigmoid: + self.final_activation = nn.Sigmoid() + else: + self.final_activation = nn.Softmax(dim=1) + + def forward(self, x): + # encoder part + encoders_features = [] + for encoder in self.encoders: + x = encoder(x) + # reverse the encoder outputs to be aligned with the decoder + encoders_features.insert(0, x) + + # remove the last encoder's output from the list + # !!remember: it's the 1st in the list + encoders_features = encoders_features[1:] + + # decoder part + for decoder, encoder_features in zip(self.decoders, encoders_features): + # pass the output from the corresponding encoder and the output + # of the previous decoder + x = decoder(encoder_features, x) + + x = self.final_conv(x) + + # apply final_activation (i.e. Sigmoid or Softmax) only for prediction. During training the network outputs + # logits and it's up to the user to normalize it before visualising with tensorboard or computing validation metric + # if not self.training: + # x = self.final_activation(x) + + return x + + +class ResidualUNet3D(nn.Module): + """ + Residual 3DUnet model implementation based on https://arxiv.org/pdf/1706.00120.pdf. + Uses ExtResNetBlock instead of DoubleConv as a basic building block as well as summation joining instead + of concatenation joining. Since the model effectively becomes a residual net, in theory it allows for deeper UNet. + + Args: + in_channels (int): number of input channels + out_channels (int): number of output segmentation masks; + Note that that the of out_channels might correspond to either + different semantic classes or to different binary segmentation mask. + It's up to the user of the class to interpret the out_channels and + use the proper loss criterion during training (i.e. NLLLoss (multi-class) + or BCELoss (two-class) respectively) + f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number + of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4,5 + final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the + final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used + to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model. + conv_layer_order (string): determines the order of layers + in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d. + See `SingleConv` for more info + init_channel_number (int): number of feature maps in the first conv layer of the encoder; default: 64 + num_groups (int): number of groups for the GroupNorm + skip_final_activation (bool): if True, skips the final normalization layer (sigmoid/softmax) and returns the + logits directly + """ + + def __init__(self, in_channels, out_channels, final_sigmoid, f_maps=32, conv_layer_order='cge', num_groups=8, + skip_final_activation=False, **kwargs): + super(ResidualUNet3D, self).__init__() + + if isinstance(f_maps, int): + # use 5 levels in the encoder path as suggested in the paper + f_maps = create_feature_maps(f_maps, number_of_fmaps=5) + + # create encoder path consisting of Encoder modules. The length of the encoder is equal to `len(f_maps)` + # uses ExtResNetBlock as a basic_module for the Encoder + encoders = [] + for i, out_feature_num in enumerate(f_maps): + if i == 0: + encoder = Encoder(in_channels, out_feature_num, apply_pooling=False, basic_module=ExtResNetBlock, + conv_layer_order=conv_layer_order, num_groups=num_groups) + else: + encoder = Encoder(f_maps[i - 1], out_feature_num, basic_module=ExtResNetBlock, + conv_layer_order=conv_layer_order, num_groups=num_groups) + encoders.append(encoder) + + self.encoders = nn.ModuleList(encoders) + + # create decoder path consisting of the Decoder modules. The length of the decoder is equal to `len(f_maps) - 1` + # uses ExtResNetBlock as a basic_module for the Decoder + decoders = [] + reversed_f_maps = list(reversed(f_maps)) + for i in range(len(reversed_f_maps) - 1): + decoder = Decoder(reversed_f_maps[i], reversed_f_maps[i + 1], basic_module=ExtResNetBlock, + conv_layer_order=conv_layer_order, num_groups=num_groups) + decoders.append(decoder) + + self.decoders = nn.ModuleList(decoders) + + # in the last layer a 1×1 convolution reduces the number of output + # channels to the number of labels + self.final_conv = nn.Conv3d(f_maps[0], out_channels, 1) + + if not skip_final_activation: + if final_sigmoid: + self.final_activation = nn.Sigmoid() + else: + self.final_activation = nn.Softmax(dim=1) + else: + self.final_activation = None + + def forward(self, x): + # encoder part + encoders_features = [] + for encoder in self.encoders: + x = encoder(x) + # reverse the encoder outputs to be aligned with the decoder + encoders_features.insert(0, x) + + # remove the last encoder's output from the list + # !!remember: it's the 1st in the list + encoders_features = encoders_features[1:] + + # decoder part + for decoder, encoder_features in zip(self.decoders, encoders_features): + # pass the output from the corresponding encoder and the output + # of the previous decoder + x = decoder(encoder_features, x) + + x = self.final_conv(x) + + # apply final_activation (i.e. Sigmoid or Softmax) only for prediction. During training the network outputs + # logits and it's up to the user to normalize it before visualising with tensorboard or computing validation metric + if not self.training and self.final_activation is not None: + x = self.final_activation(x) + + return x + + +class Noise2NoiseUNet3D(nn.Module): + """ + Residual 3DUnet model implementation based on https://arxiv.org/pdf/1706.00120.pdf. + Uses ExtResNetBlock instead of DoubleConv as a basic building block as well as summation joining instead + of concatenation joining. Since the model effectively becomes a residual net, in theory it allows for deeper UNet. + + Args: + in_channels (int): number of input channels + out_channels (int): number of output segmentation masks; + Note that that the of out_channels might correspond to either + different semantic classes or to different binary segmentation mask. + It's up to the user of the class to interpret the out_channels and + use the proper loss criterion during training (i.e. NLLLoss (multi-class) + or BCELoss (two-class) respectively) + f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number + of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4,5 + init_channel_number (int): number of feature maps in the first conv layer of the encoder; default: 64 + num_groups (int): number of groups for the GroupNorm + """ + + def __init__(self, in_channels, out_channels, f_maps=16, num_groups=8, **kwargs): + super(Noise2NoiseUNet3D, self).__init__() + + # Use LeakyReLU activation everywhere except the last layer + conv_layer_order = 'clg' + + if isinstance(f_maps, int): + # use 5 levels in the encoder path as suggested in the paper + f_maps = create_feature_maps(f_maps, number_of_fmaps=5) + + # create encoder path consisting of Encoder modules. The length of the encoder is equal to `len(f_maps)` + # uses DoubleConv as a basic_module for the Encoder + encoders = [] + for i, out_feature_num in enumerate(f_maps): + if i == 0: + encoder = Encoder(in_channels, out_feature_num, apply_pooling=False, basic_module=DoubleConv, + conv_layer_order=conv_layer_order, num_groups=num_groups) + else: + encoder = Encoder(f_maps[i - 1], out_feature_num, basic_module=DoubleConv, + conv_layer_order=conv_layer_order, num_groups=num_groups) + encoders.append(encoder) + + self.encoders = nn.ModuleList(encoders) + + # create decoder path consisting of the Decoder modules. The length of the decoder is equal to `len(f_maps) - 1` + # uses DoubleConv as a basic_module for the Decoder + decoders = [] + reversed_f_maps = list(reversed(f_maps)) + for i in range(len(reversed_f_maps) - 1): + in_feature_num = reversed_f_maps[i] + reversed_f_maps[i + 1] + out_feature_num = reversed_f_maps[i + 1] + decoder = Decoder(in_feature_num, out_feature_num, basic_module=DoubleConv, + conv_layer_order=conv_layer_order, num_groups=num_groups) + decoders.append(decoder) + + self.decoders = nn.ModuleList(decoders) + + # 1x1x1 conv + simple ReLU in the final convolution + self.final_conv = SingleConv(f_maps[0], out_channels, kernel_size=1, order='cr', padding=0) + + def forward(self, x): + # encoder part + encoders_features = [] + for encoder in self.encoders: + x = encoder(x) + # reverse the encoder outputs to be aligned with the decoder + encoders_features.insert(0, x) + + # remove the last encoder's output from the list + # !!remember: it's the 1st in the list + encoders_features = encoders_features[1:] + + # decoder part + for decoder, encoder_features in zip(self.decoders, encoders_features): + # pass the output from the corresponding encoder and the output + # of the previous decoder + x = decoder(encoder_features, x) + + x = self.final_conv(x) + + return x + + +def get_model(config): + def _model_class(class_name): + m = importlib.import_module('unet3d.model') #向a模块中导入c.py中的对象 + clazz = getattr(m, class_name) #getattr() 函数用于返回一个对象属性值。 + return clazz + + assert 'model' in config, 'Could not find model configuration' + model_config = config['model'] + model_class = _model_class(model_config['name']) + return model_class(**model_config) + + +###############################################Supervised Tags 3DUnet################################################### + +class TagsUNet3D(nn.Module): + """ + Supervised tags 3DUnet + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels; since most often we're trying to learn + 3D unit vectors we use 3 as a default value + output_heads (int): number of output heads from the network, each head corresponds to different + semantic tag/direction to be learned + conv_layer_order (string): determines the order of layers + in `DoubleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d. + See `DoubleConv` for more info + init_channel_number (int): number of feature maps in the first conv layer of the encoder; default: 64 + """ + + def __init__(self, in_channels, out_channels=3, output_heads=1, conv_layer_order='crg', init_channel_number=32, + **kwargs): + super(TagsUNet3D, self).__init__() + + # number of groups for the GroupNorm + num_groups = min(init_channel_number // 2, 32) + + # encoder path consist of 4 subsequent Encoder modules + # the number of features maps is the same as in the paper + self.encoders = nn.ModuleList([ + Encoder(in_channels, init_channel_number, apply_pooling=False, conv_layer_order=conv_layer_order, + num_groups=num_groups), + Encoder(init_channel_number, 2 * init_channel_number, conv_layer_order=conv_layer_order, + num_groups=num_groups), + Encoder(2 * init_channel_number, 4 * init_channel_number, conv_layer_order=conv_layer_order, + num_groups=num_groups), + Encoder(4 * init_channel_number, 8 * init_channel_number, conv_layer_order=conv_layer_order, + num_groups=num_groups) + ]) + + self.decoders = nn.ModuleList([ + Decoder(4 * init_channel_number + 8 * init_channel_number, 4 * init_channel_number, + conv_layer_order=conv_layer_order, num_groups=num_groups), + Decoder(2 * init_channel_number + 4 * init_channel_number, 2 * init_channel_number, + conv_layer_order=conv_layer_order, num_groups=num_groups), + Decoder(init_channel_number + 2 * init_channel_number, init_channel_number, + conv_layer_order=conv_layer_order, num_groups=num_groups) + ]) + + self.final_heads = nn.ModuleList( + [FinalConv(init_channel_number, out_channels, num_groups=num_groups) for _ in + range(output_heads)]) + + def forward(self, x): + # encoder part + encoders_features = [] + for encoder in self.encoders: + x = encoder(x) + # reverse the encoder outputs to be aligned with the decoder + encoders_features.insert(0, x) + + # remove the last encoder's output from the list + # !!remember: it's the 1st in the list + encoders_features = encoders_features[1:] + + # decoder part + for decoder, encoder_features in zip(self.decoders, encoders_features): + # pass the output from the corresponding encoder and the output + # of the previous decoder + x = decoder(encoder_features, x) + + # apply final layer per each output head + tags = [final_head(x) for final_head in self.final_heads] + + # normalize directions with L2 norm + return [tag / torch.norm(tag, p=2, dim=1).detach().clamp(min=1e-8) for tag in tags] + + +################################################Distance transform 3DUNet############################################## +class DistanceTransformUNet3D(nn.Module): + """ + Predict Distance Transform to the boundary signal based on the output from the Tags3DUnet. Fore training use either: + 1. PixelWiseCrossEntropyLoss if the distance transform is quantized (classification) + 2. MSELoss if the distance transform is continuous (regression) + Args: + in_channels (int): number of input channels + out_channels (int): number of output segmentation masks; + Note that that the of out_channels might correspond to either + different semantic classes or to different binary segmentation mask. + It's up to the user of the class to interpret the out_channels and + use the proper loss criterion during training (i.e. NLLLoss (multi-class) + or BCELoss (two-class) respectively) + final_sigmoid (bool): 'sigmoid'/'softmax' whether element-wise nn.Sigmoid or nn.Softmax should be applied after + the final 1x1 convolution + init_channel_number (int): number of feature maps in the first conv layer of the encoder; default: 64 + """ + + def __init__(self, in_channels, out_channels, final_sigmoid, init_channel_number=32, **kwargs): + super(DistanceTransformUNet3D, self).__init__() + + # number of groups for the GroupNorm + num_groups = min(init_channel_number // 2, 32) + + # encoder path consist of 4 subsequent Encoder modules + # the number of features maps is the same as in the paper + self.encoders = nn.ModuleList([ + Encoder(in_channels, init_channel_number, apply_pooling=False, conv_layer_order='crg', + num_groups=num_groups), + Encoder(init_channel_number, 2 * init_channel_number, pool_type='avg', conv_layer_order='crg', + num_groups=num_groups) + ]) + + self.decoders = nn.ModuleList([ + Decoder(3 * init_channel_number, init_channel_number, conv_layer_order='crg', num_groups=num_groups) + ]) + + # in the last layer a 1×1 convolution reduces the number of output + # channels to the number of labels + self.final_conv = nn.Conv3d(init_channel_number, out_channels, 1) + + if final_sigmoid: + self.final_activation = nn.Sigmoid() + else: + self.final_activation = nn.Softmax(dim=1) + + def forward(self, inputs): + # allow multiple heads + if isinstance(inputs, list) or isinstance(inputs, tuple): + x = torch.cat(inputs, dim=1) + else: + x = inputs + + # encoder part + encoders_features = [] + for encoder in self.encoders: + x = encoder(x) + # reverse the encoder outputs to be aligned with the decoder + encoders_features.insert(0, x) + + # remove the last encoder's output from the list + # !!remember: it's the 1st in the list + encoders_features = encoders_features[1:] + + # decoder part + for decoder, encoder_features in zip(self.decoders, encoders_features): + # pass the output from the corresponding encoder and the output + # of the previous decoder + x = decoder(encoder_features, x) + + # apply final 1x1 convolution + x = self.final_conv(x) + + # apply final_activation (i.e. Sigmoid or Softmax) only for prediction. During training the network outputs + # logits and it's up to the user to normalize it before visualising with tensorboard or computing validation metric + if not self.training: + x = self.final_activation(x) + + return x + + +class EndToEndDTUNet3D(nn.Module): + def __init__(self, tags_in_channels, tags_out_channels, tags_output_heads, tags_init_channel_number, + dt_in_channels, dt_out_channels, dt_final_sigmoid, dt_init_channel_number, + tags_net_path=None, dt_net_path=None, **kwargs): + super(EndToEndDTUNet3D, self).__init__() + + self.tags_net = TagsUNet3D(tags_in_channels, tags_out_channels, tags_output_heads, + init_channel_number=tags_init_channel_number) + if tags_net_path is not None: + # load pre-trained TagsUNet3D + self.tags_net = self._load_net(tags_net_path, self.tags_net) + + self.dt_net = DistanceTransformUNet3D(dt_in_channels, dt_out_channels, dt_final_sigmoid, + init_channel_number=dt_init_channel_number) + if dt_net_path is not None: + # load pre-trained DistanceTransformUNet3D + self.dt_net = self._load_net(dt_net_path, self.dt_net) + + @staticmethod + def _load_net(checkpoint_path, model): + state = torch.load(checkpoint_path) + model.load_state_dict(state['model_state_dict']) + return model + + def forward(self, x): + x = self.tags_net(x) + return self.dt_net(x) diff --git a/DeepCAD_RT_pytorch/deepcad/movie_display.py b/DeepCAD_RT_pytorch/deepcad/movie_display.py new file mode 100644 index 0000000..fe3435f --- /dev/null +++ b/DeepCAD_RT_pytorch/deepcad/movie_display.py @@ -0,0 +1,50 @@ +""" +Suite of functions that help display movie + +""" +import tifffile as tiff +import cv2 +import numpy as np +from csbdeep.utils import normalize + + +def display(filename, display_length, norm_min_percent, norm_max_percent): + """ + Display movie using opencv lib + + Args: + filename : display image file name + display_length : display frames number + norm_min_percent : minimum percentile of the image you want to retain + norm_max_percent : maximum percentile of the image you want to retain + """ + img = tiff.imread(filename) + img = img.astype(np.float32) + img = img[0:display_length, :, :] + img = normalize(img, norm_min_percent, norm_max_percent) + cv2.namedWindow('Raw video') + for i in range(display_length): + tempimg = img[i, :, :] + cv2.imshow('Raw video', tempimg) + cv2.waitKey(33) + cv2.destroyWindow('Raw video') + + +def test_img_display(img, display_length, norm_min_percent, norm_max_percent): + """ + Display movie using opencv lib + + Args: + img : display image file + display_length : display frames number + norm_min_percent : minimum percentile of the image you want to retain + norm_max_percent : maximum percentile of the image you want to retain + """ + img = img[50:display_length-50, :, :] # display the middle frames + img = normalize(img, norm_min_percent, norm_max_percent) + cv2.namedWindow('Denoised video') + for i in range(display_length - 100): + tempimg = img[i, :, :] + cv2.imshow('Denoised video', tempimg) + cv2.waitKey(33) + cv2.destroyWindow('Denoised video') diff --git a/DeepCAD_RT_pytorch/deepcad/network.py b/DeepCAD_RT_pytorch/deepcad/network.py new file mode 100644 index 0000000..bc72cd8 --- /dev/null +++ b/DeepCAD_RT_pytorch/deepcad/network.py @@ -0,0 +1,20 @@ +from .model_3DUnet import UNet3D +import torch.nn as nn + +class Network_3D_Unet(nn.Module): + def __init__(self, UNet_type = '3DUNet', in_channels=1, out_channels=1, f_maps=64, final_sigmoid = True): + super(Network_3D_Unet, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.final_sigmoid = final_sigmoid + + if UNet_type == '3DUNet': + self.Generator = UNet3D( in_channels = in_channels, + out_channels = out_channels, + f_maps = f_maps, + final_sigmoid = final_sigmoid) + + def forward(self, x): + fake_x = self.Generator(x) + return fake_x diff --git a/DeepCAD_RT_pytorch/deepcad/test_collection.py b/DeepCAD_RT_pytorch/deepcad/test_collection.py new file mode 100644 index 0000000..f367865 --- /dev/null +++ b/DeepCAD_RT_pytorch/deepcad/test_collection.py @@ -0,0 +1,319 @@ +import os +import numpy as np +import yaml +from .network import Network_3D_Unet +import torch +import torch.nn as nn +from torch.autograd import Variable +from torch.utils.data import DataLoader +import time +import datetime +from .data_process import test_preprocess_chooseOne, testset, multibatch_test_save, singlebatch_test_save +from skimage import io +from deepcad.movie_display import test_img_display + + +class testing_class(): + """ + Class implementing testing process + """ + + def __init__(self, params_dict): + """ + Constructor class for testing process + + Args: + params_dict: dict + The collection of testing params set by users + Returns: + self + + """ + self.overlap_factor = 0.5 + self.datasets_path = '' + self.fmap = 16 + self.output_dir = './results' + self.pth_dir = '' + self.batch_size = 1 + self.patch_t = 150 + self.patch_x = 150 + self.patch_y = 150 + self.gap_y = 115 + self.gap_x = 115 + self.gap_t = 115 + self.GPU = '0' + self.ngpu = 1 + self.num_workers = 0 + self.scale_factor = 1 + self.test_datasize = 400 + self.denoise_model = '' + self.visualize_images_per_epoch = False + self.save_test_images_per_epoch = False + self.set_params(params_dict) + + def run(self): + """ + General function for testing DeepCAD network. + + """ + # create some essential file for result storage + self.prepare_file() + # get models for processing + self.read_modellist() + # get stacks for processing + self.read_imglist() + # save some essential testing parameters in para.yaml + self.save_yaml_test() + # initialize denoise network with testing parameters. + self.initialize_network() + # specifies the GPU for the testing program. + self.distribute_GPU() + # start testing and result visualization during testing period (optional) + self.test() + + def prepare_file(self): + """ + Make data folder to store testing results + Important Fields: + self.datasets_name: the sub folder of the dataset + self.pth_path: the folder for pth file storage + + """ + + self.datasets_name = self.datasets_path.split("/", 1)[1] + # pth_name = self.datasets_name + '_' + datetime.datetime.now().strftime("%Y%m%d%H%M") + # self.pth_path = self.pth_dir + '/' + pth_name + # if not os.path.exists(self.pth_path): + # os.makedirs(self.pth_path) + + if not os.path.exists(self.output_dir): + os.mkdir(self.output_dir) + current_time = datetime.datetime.now().strftime("%Y%m%d%H%M") + self.output_path = self.output_dir + '//' + 'DataFolderIs_' + self.datasets_name + '_' + current_time + '_ModelFolderIs_' + self.denoise_model + if not os.path.exists(self.output_path): + os.mkdir(self.output_path) + + def set_params(self, params_dict): + """ + Set the params set by user to the testing class object and calculate some default parameters for testing + + """ + for key, value in params_dict.items(): + if hasattr(self, key): + setattr(self, key, value) + + self.gap_x = int(self.patch_x * (1 - self.overlap_factor)) # patch gap in x + self.gap_y = int(self.patch_y * (1 - self.overlap_factor)) # patch gap in y + self.gap_t = int(self.patch_t * (1 - self.overlap_factor)) # patch gap in t + self.ngpu = str(self.GPU).count(',') + 1 # check the number of GPU used for testing + self.batch_size = self.ngpu # By default, the batch size is equal to the number of GPU for minimal memory consumption + print('\033[1;31mTesting parameters -----> \033[0m') + print(self.__dict__) + + def read_imglist(self): + im_folder = self.datasets_path + self.img_list = list(os.walk(im_folder, topdown=False))[-1][-1] + self.img_list.sort() + print('\033[1;31mStacks for processing -----> \033[0m') + print('Total stack number -----> ', len(self.img_list)) + for img in self.img_list: print(img) + + def read_modellist(self): + + model_path = self.pth_dir + '//' + self.denoise_model + model_list = list(os.walk(model_path, topdown=False))[-1][-1] + model_list.sort() + + # calculate the number of model file + count_pth = 0 + for i in range(len(model_list)): + aaa = model_list[i] + if '.pth' in aaa: + count_pth = count_pth + 1 + self.model_list = model_list + self.model_list_length = count_pth + + def initialize_network(self): + """ + Initialize U-Net 3D network, which is the main network architecture of DeepCAD + + Important Fields: + self.fmap: the number of the feature map in U-Net 3D network. + self.local_model: the denoise network + + """ + denoise_generator = Network_3D_Unet(in_channels=1, + out_channels=1, + f_maps=self.fmap, + final_sigmoid=True) + self.local_model = denoise_generator + + def save_yaml_test(self): + """ + Save some essential params in para.yaml. + + """ + yaml_name = self.output_path + '//para.yaml' + para = {'datasets_path': 0, 'test_datasize': 0, 'denoise_model': 0, + 'output_dir': 0, 'pth_dir': 0, 'GPU': 0, 'batch_size': 0, + 'patch_x': 0, 'patch_y': 0, 'patch_t': 0, 'gap_y': 0, 'gap_x': 0, + 'gap_t': 0, 'fmap': 0, 'scale_factor': 0, 'overlap_factor': 0} + para["datasets_path"] = self.datasets_path + para["denoise_model"] = self.denoise_model + para["test_datasize"] = self.test_datasize + para["output_dir"] = self.output_dir + para["pth_dir"] = self.pth_dir + para["GPU"] = self.GPU + para["batch_size"] = self.batch_size + para["patch_x"] = self.patch_x + para["patch_y"] = self.patch_y + para["patch_t"] = self.patch_t + para["gap_x"] = self.gap_x + para["gap_y"] = self.gap_y + para["gap_t"] = self.gap_t + para["fmap"] = self.fmap + para["scale_factor"] = self.scale_factor + para["overlap_factor"] = self.overlap_factor + with open(yaml_name, 'w') as f: + yaml.dump(para, f) + + def distribute_GPU(self): + """ + Allocate the GPU for the testing program. Print the using GPU information to the screen. + For acceleration, multiple GPUs parallel testing is recommended. + + """ + os.environ["CUDA_VISIBLE_DEVICES"] = str(self.GPU) + if torch.cuda.is_available(): + self.local_model = self.local_model.cuda() + self.local_model = nn.DataParallel(self.local_model, device_ids=range(self.ngpu)) + print('\033[1;31mUsing {} GPU(s) for testing -----> \033[0m'.format(torch.cuda.device_count())) + cuda = True if torch.cuda.is_available() else False + Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor + + def test(self): + """ + Pytorch testing workflow + + """ + pth_count=0 + for pth_index in range(len(self.model_list)): + aaa = self.model_list[pth_index] + if '.pth' in aaa: + pth_count=pth_count+1 + pth_name = self.model_list[pth_index] + output_path_name = self.output_path + '//' + pth_name.replace('.pth', '') + if not os.path.exists(output_path_name): + os.mkdir(output_path_name) + + # load model + model_name = self.pth_dir + '//' + self.denoise_model + '//' + pth_name + if isinstance(self.local_model, nn.DataParallel): + self.local_model.module.load_state_dict(torch.load(model_name)) # parallel + self.local_model.eval() + else: + self.local_model.load_state_dict(torch.load(model_name)) # not parallel + self.local_model.eval() + self.local_model.cuda() + self.print_img_name = False + # test all stacks + for N in range(len(self.img_list)): + name_list, noise_img, coordinate_list,test_im_name, img_mean = test_preprocess_chooseOne(self, N) + # print(len(name_list)) + prev_time = time.time() + time_start = time.time() + denoise_img = np.zeros(noise_img.shape) + input_img = np.zeros(noise_img.shape) + + test_data = testset(name_list, coordinate_list, noise_img) + testloader = DataLoader(test_data, batch_size=self.batch_size, shuffle=False, + num_workers=self.num_workers) + for iteration, (noise_patch, single_coordinate) in enumerate(testloader): + noise_patch = noise_patch.cuda() + real_A = noise_patch + + real_A = Variable(real_A) + fake_B = self.local_model(real_A) + + # Determine approximate time left + batches_done = iteration + batches_left = 1 * len(testloader) - batches_done + time_left_seconds = int(batches_left * (time.time() - prev_time)) + time_left = datetime.timedelta(seconds=time_left_seconds) + prev_time = time.time() + + if iteration % 1 == 0: + time_end = time.time() + time_cost = time_end - time_start # datetime.timedelta(seconds= (time_end - time_start)) + print( + '\r[Model %d/%d, %s] [Stack %d/%d, %s] [Patch %d/%d] [Time Cost: %.0d s] [ETA: %s s] ' + % ( + pth_count, + self.model_list_length, + pth_name, + N + 1, + len(self.img_list), + self.img_list[N], + iteration + 1, + len(testloader), + time_cost, + time_left_seconds + ), end=' ') + + if (iteration + 1) % len(testloader) == 0: + print('\n', end=' ') + + # Enhanced sub-stacks are sequentially output from the network + output_image = np.squeeze(fake_B.cpu().detach().numpy()) + raw_image = np.squeeze(real_A.cpu().detach().numpy()) + if (output_image.ndim == 3): + postprocess_turn = 1 + else: + postprocess_turn = output_image.shape[0] + + # The final enhanced stack can be obtained by stitching all sub-stacks. + if (postprocess_turn > 1): + for id in range(postprocess_turn): + output_patch, raw_patch, stack_start_w, stack_end_w, stack_start_h, stack_end_h, stack_start_s, stack_end_s = multibatch_test_save( + single_coordinate, id, output_image, raw_image) + output_patch=output_patch+img_mean + raw_patch=raw_patch+img_mean + denoise_img[stack_start_s:stack_end_s, stack_start_h:stack_end_h, + stack_start_w:stack_end_w] \ + = output_patch * (np.sum(raw_patch) / np.sum(output_patch)) ** 0.5 + input_img[stack_start_s:stack_end_s, stack_start_h:stack_end_h, + stack_start_w:stack_end_w] \ + = raw_patch + else: + output_patch, raw_patch, stack_start_w, stack_end_w, stack_start_h, stack_end_h, stack_start_s, stack_end_s = singlebatch_test_save( + single_coordinate, output_image, raw_image) + output_patch=output_patch+img_mean + raw_patch=raw_patch+img_mean + denoise_img[stack_start_s:stack_end_s, stack_start_h:stack_end_h, stack_start_w:stack_end_w] \ + = output_patch * (np.sum(raw_patch) / np.sum(output_patch)) ** 0.5 + input_img[stack_start_s:stack_end_s, stack_start_h:stack_end_h, stack_start_w:stack_end_w] \ + = raw_patch + + # Stitching finish + output_img = denoise_img.squeeze().astype(np.float32) * self.scale_factor + del denoise_img + + + # Normalize and display inference image + if (self.visualize_images_per_epoch): + print('Displaying the denoised file ----->') + display_length = 200 + test_img_display(output_img, display_length=display_length, norm_min_percent=1, + norm_max_percent=99) + + # Save inference image + if (self.save_test_images_per_epoch): + if output_img.min()<0: + output_img = output_img.astype('int16') + else: + output_img = output_img.astype('uint16') + # output_img = output_img.astype('int16') + result_name = output_path_name + '//' + self.img_list[N].replace('.tif','') + '_' + pth_name.replace( + '.pth', '') + '_output.tif' + io.imsave(result_name, output_img, check_contrast=False) + print('Test finished. Save all results to disk.') diff --git a/DeepCAD_RT_pytorch/deepcad/train_collection.py b/DeepCAD_RT_pytorch/deepcad/train_collection.py new file mode 100644 index 0000000..7ec833d --- /dev/null +++ b/DeepCAD_RT_pytorch/deepcad/train_collection.py @@ -0,0 +1,446 @@ +import os +import datetime + +import numpy as np +import yaml + +from .network import Network_3D_Unet +import tifffile as tiff +import random +import math +import torch +from torch.utils.data import Dataset +import torch.nn as nn +from torch.autograd import Variable +from torch.utils.data import DataLoader +import time +import datetime +from .data_process import trainset, test_preprocess_chooseOne, testset, multibatch_test_save, singlebatch_test_save +from skimage import io +from deepcad.movie_display import test_img_display + + +class training_class(): + """ + Class implementing training process + """ + + + def __init__(self, params_dict): + """ + Constructor class for training process + + Args: + params_dict: dict + The collection of training params set by users + Returns: + self + + """ + self.overlap_factor = 0.5 + self.datasets_path = '' + self.n_epochs = 20 + self.fmap = 16 + self.output_dir = './results' + self.pth_dir = '' + self.batch_size = 1 + self.patch_t = 150 + self.patch_x = 150 + self.patch_y = 150 + self.gap_y = 115 + self.gap_x = 115 + self.gap_t = 115 + self.lr = 0.00001 + self.b1 = 0.5 + self.b2 = 0.999 + self.GPU = '0' + self.ngpu = 1 + self.num_workers = 0 + self.scale_factor = 1 + self.train_datasets_size = 2000 + self.select_img_num = 1000 + self.test_datasize = 400 # how many slices to be tested (use the first image in the folder by default) + self.visualize_images_per_epoch = False + self.save_test_images_per_epoch = False + self.set_params(params_dict) + def print_params(self): + print("{:<20} {:<20} ".format('datasets_path:', self.datasets_path)) + print("{:<20} {:<10} {:<10} {:<10} {:<10} {:<10}".format('train_datasets_size:', self.train_datasets_size, 'pth_dir:', + self.pth_dir,'batch_size:', self.batch_size)) + print("{:<20} {:<10} {:<10} {:<10} {:<10} {:<10}".format('overlap_factor:', self.overlap_factor,'fmap:',self.fmap, 'lr:',self.lr)) + print("{:<20} {:<10} {:<10} {:<10} {:<10} {:<10}".format('n_epochs:', self.n_epochs, 'GPU:',self.GPU, 'ngpu:',self.ngpu)) + print("{:<10} {:<10} {:<10} {:<10} {:<10} {:<10}".format('patch_t:', self.patch_t, 'patch_x:',self.patch_x, 'patch_y:',self.patch_y)) + print("{:<10} {:<10} {:<10} {:<10} {:<10} {:<10}".format('patch_t:', self.gap_t, 'gap_x:',self.gap_x, 'gap_y:',self.gap_y)) + print("{:<10} {:<10} {:<10} {:<10} {:<10} {:<10}".format('Adam b1:', self.b1, 'Adam b2:',self.b2, 'num_workers:',self.num_workers)) + print("{:<10} {:<7} {:<15} {:<6} {:<7} {:<10}".format('scale_factor:', self.scale_factor, 'select_img_num:',self.select_img_num, 'test_datasize:',self.test_datasize)) + print('visualize_images_per_epoch:',self.visualize_images_per_epoch,end="\t") + print('save_test_images_per_epoch:',self.save_test_images_per_epoch,end="\t") + def run(self): + """ + General function for training DeepCAD network. + + """ + # create some essential file for result storage + self.prepare_file() + # crop input tiff file into 3D patches + self.train_preprocess_lessMemoryMulStacks() + # save some essential training parameters in para.yaml + self.save_yaml_train() + # initialize denoise network with training parameters. + self.initialize_network() + # specifies the GPU for the training program. + self.distribute_GPU() + # start training and result visualization during training period (optional) + self.train() + + def prepare_file(self): + """ + Make data folder to store training results + Important Fields: + self.datasets_name: the sub folder of the dataset + self.pth_path: the folder for pth file storage + + """ + + self.datasets_name = self.datasets_path.split("/", 1)[1] + pth_name = self.datasets_name + '_' + datetime.datetime.now().strftime("%Y%m%d%H%M") + self.pth_path = self.pth_dir + '/' + pth_name + if not os.path.exists(self.pth_path): + os.makedirs(self.pth_path) + if not os.path.exists(self.output_dir): + os.mkdir(self.output_dir) + + def set_params(self, params_dict): + """ + Set the params set by user to the training class object and calculate some default parameters for training + + """ + for key, value in params_dict.items(): + if hasattr(self, key): + setattr(self, key, value) + + self.gap_x = int(self.patch_x * (1 - self.overlap_factor)) # patch gap in x + self.gap_y = int(self.patch_y * (1 - self.overlap_factor)) # patch gap in y + self.gap_t = int(self.patch_t * (1 - self.overlap_factor)) # patch gap in t + self.ngpu = str(self.GPU).count(',') + 1 # check the number of GPU used for training + self.batch_size = self.ngpu # By default, the batch size is equal to the number of GPU for minimal memory consumption + print('\033[1;31mTraining parameters -----> \033[0m') + print(self.__dict__) + # self.print_params() + + def initialize_network(self): + """ + Initialize U-Net 3D network, which is the main network architecture of DeepCAD + + Important Fields: + self.fmap: the number of the feature map in U-Net 3D network. + self.local_model: the denoise network + + """ + denoise_generator = Network_3D_Unet(in_channels=1, + out_channels=1, + f_maps=self.fmap, + final_sigmoid=True) + self.local_model = denoise_generator + + def get_gap_t(self): + """ + Calculate the patch gap in t according to the size of input data and the patch gap in x and y + + Important Fields: + self.gap_t: the patch gap in t. + + """ + w_num = math.floor((self.whole_x - self.patch_x) / self.gap_x) + 1 + h_num = math.floor((self.whole_y - self.patch_y) / self.gap_y) + 1 + s_num = math.ceil(self.train_datasets_size / w_num / h_num / self.stack_num) + self.gap_t = math.floor((self.whole_t - self.patch_t * 2) / (s_num - 1)) + + def train_preprocess_lessMemoryMulStacks(self): + """ + The original noisy stack is partitioned into thousands of 3D sub-stacks (patch) with the setting + overlap factor in each dimension. + + Important Fields: + self.name_list : the coordinates of 3D patch are indexed by the patch name in name_list. + self.coordinate_list : record the coordinate of 3D patch preparing for partition in whole stack. + self.stack_index : the index of the noisy stacks. + self.noise_im_all : the collection of all noisy stacks. + + """ + self.name_list = [] + self.coordinate_list = {} + self.stack_index = [] + self.noise_im_all = [] + ind = 0 + print('\033[1;31mImage list for training -----> \033[0m') + self.stack_num = len(list(os.walk(self.datasets_path, topdown=False))[-1][-1]) + print('Total stack number -----> ', self.stack_num) + + for im_name in list(os.walk(self.datasets_path, topdown=False))[-1][-1]: + print('Noise image name -----> ', im_name) + im_dir = self.datasets_path + '//' + im_name + noise_im = tiff.imread(im_dir) + if noise_im.shape[0] > self.select_img_num: + noise_im = noise_im[0:self.select_img_num, :, :] + self.whole_x = noise_im.shape[2] + self.whole_y = noise_im.shape[1] + self.whole_t = noise_im.shape[0] + print('Noise image shape -----> ', noise_im.shape) + # Calculate real gap_t + self.get_gap_t() + # No preprocessing + # noise_im = noise_im.astype(np.float32) / self.scale_factor + # Minus mean before training + noise_im = noise_im.astype(np.float32)/self.scale_factor + noise_im = noise_im-noise_im.mean() + + self.noise_im_all.append(noise_im) + patch_t2 = self.patch_t * 2 + for x in range(0, int((self.whole_y - self.patch_y + self.gap_y) / self.gap_y)): + for y in range(0, int((self.whole_x - self.patch_x + self.gap_x) / self.gap_x)): + for z in range(0, int((self.whole_t - patch_t2 + self.gap_t) / self.gap_t)): + single_coordinate = {'init_h': 0, 'end_h': 0, 'init_w': 0, 'end_w': 0, 'init_s': 0, 'end_s': 0} + init_h = self.gap_y * x + end_h = self.gap_y * x + self.patch_y + init_w = self.gap_x * y + end_w = self.gap_x * y + self.patch_x + init_s = self.gap_t * z + end_s = self.gap_t * z + patch_t2 + single_coordinate['init_h'] = init_h + single_coordinate['end_h'] = end_h + single_coordinate['init_w'] = init_w + single_coordinate['end_w'] = end_w + single_coordinate['init_s'] = init_s + single_coordinate['end_s'] = end_s + patch_name = self.datasets_name + '_' + im_name.replace('.tif', '') + '_x' + str( + x) + '_y' + str(y) + '_z' + str(z) + self.name_list.append(patch_name) + self.coordinate_list[patch_name] = single_coordinate + self.stack_index.append(ind) + ind = ind + 1 + + def save_yaml_train(self): + """ + Save some essential params in para.yaml. + + """ + yaml_name = self.pth_path + '//para.yaml' + para = {'n_epochs': 0, 'datasets_path': 0, 'overlap_factor': 0, + 'output_dir': 0, 'pth_path': 0, 'GPU': 0, 'batch_size': 0, + 'patch_x': 0, 'patch_y': 0, 'patch_t': 0, 'gap_y': 0, 'gap_x': 0, + 'gap_t': 0, 'lr': 0, 'b1': 0, 'b2': 0, 'fmap': 0, 'scale_factor': 0, + 'select_img_num': 0, 'train_datasets_size': 0} + para["n_epochs"] = self.n_epochs + para["datasets_path"] = self.datasets_path + para["output_dir"] = self.output_dir + para["pth_path"] = self.pth_path + para["GPU"] = self.GPU + para["batch_size"] = self.batch_size + para["patch_x"] = self.patch_x + para["patch_y"] = self.patch_y + para["patch_t"] = self.patch_t + para["gap_x"] = self.gap_x + para["gap_y"] = self.gap_y + para["gap_t"] = self.gap_t + para["lr"] = self.lr + para["b1"] = self.b1 + para["b2"] = self.b2 + para["fmap"] = self.fmap + para["scale_factor"] = self.scale_factor + para["select_img_num"] = self.select_img_num + para["train_datasets_size"] = self.train_datasets_size + para["overlap_factor"] = self.overlap_factor + with open(yaml_name, 'w') as f: + yaml.dump(para, f) + + def distribute_GPU(self): + """ + Allocate the GPU for the training program. Print the using GPU information to the screen. + For acceleration, multiple GPUs parallel training is recommended. + + """ + os.environ["CUDA_VISIBLE_DEVICES"] = str(self.GPU) + if torch.cuda.is_available(): + self.local_model = self.local_model.cuda() + self.local_model = nn.DataParallel(self.local_model, device_ids=range(self.ngpu)) + print('\033[1;31mUsing {} GPU(s) for training -----> \033[0m'.format(torch.cuda.device_count())) + + def train(self): + """ + Pytorch training workflow + + """ + optimizer_G = torch.optim.Adam(self.local_model.parameters(), lr=self.lr, betas=(self.b1, self.b2)) + cuda = True if torch.cuda.is_available() else False + Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor + prev_time = time.time() + time_start = time.time() + L1_pixelwise = torch.nn.L1Loss() + L2_pixelwise = torch.nn.MSELoss() + L2_pixelwise.cuda() + L1_pixelwise.cuda() + + for epoch in range(0, self.n_epochs): + train_data = trainset(self.name_list, self.coordinate_list, self.noise_im_all, self.stack_index) + trainloader = DataLoader(train_data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers) + for iteration, (input, target) in enumerate(trainloader): + # The input volume and corresponding target volume from data loader to train the deep neural network + input = input.cuda() + target = target.cuda() + real_A = input + real_B = target + real_A = Variable(real_A) + fake_B = self.local_model(real_A) + L1_loss = L1_pixelwise(fake_B, real_B) + L2_loss = L2_pixelwise(fake_B, real_B) + optimizer_G.zero_grad() + + # Calculate total loss + Total_loss = 0.5 * L1_loss + 0.5 * L2_loss + Total_loss.backward() + optimizer_G.step() + # Record and estimate the remaining time + batches_done = epoch * len(trainloader) + iteration + batches_left = self.n_epochs * len(trainloader) - batches_done + time_left = datetime.timedelta(seconds=int(batches_left * (time.time() - prev_time))) + prev_time = time.time() + + if iteration % 1 == 0: + time_end = time.time() + print( + '\r[Epoch %d/%d] [Batch %d/%d] [Total loss: %.2f, L1 Loss: %.2f, L2 Loss: %.2f] [ETA: %s] [Time cost: %.2d s] ' + % ( + epoch + 1, + self.n_epochs, + iteration + 1, + len(trainloader), + Total_loss.item(), + L1_loss.item(), + L2_loss.item(), + time_left, + time_end - time_start + ), end=' ') + + + + if (iteration + 1) % (len(trainloader)) == 0: + print('\n', end=' ') + # Save model at the end of every epoch + self.save_model(epoch, iteration) + # Start inference using the denoise model at the end of every epoch (optional) + if (self.visualize_images_per_epoch | self.save_test_images_per_epoch): + print('Testing model of epoch {} on the first noisy file ----->'.format(epoch + 1)) + self.test(epoch, iteration) + print('\n', end=' ') + print('Train finished. Save all models to disk.') + + def save_model(self, epoch, iteration): + """ + Model storage. + Args: + train_epoch : current train epoch number + train_iteration : current train_iteration number + """ + model_save_name = self.pth_path + '//E_' + str(epoch + 1).zfill(2) + '_Iter_' + str(iteration + 1).zfill( + 4) + '.pth' + if isinstance(self.local_model, nn.DataParallel): + torch.save(self.local_model.module.state_dict(), model_save_name) # parallel + else: + torch.save(self.local_model.state_dict(), model_save_name) # not parallel + + def test(self, train_epoch, train_iteration): + """ + Pytorch testing workflow + Args: + train_epoch : current train epoch number + train_iteration : current train_iteration number + """ + # Crop test file into 3D patches for inference + self.print_img_name = True + name_list, noise_img, coordinate_list, test_im_name, img_mean = test_preprocess_chooseOne(self, img_id=0) + # Record the inference time + prev_time = time.time() + time_start = time.time() + denoise_img = np.zeros(noise_img.shape) + input_img = np.zeros(noise_img.shape) + test_data = testset(name_list, coordinate_list, noise_img) + testloader = DataLoader(test_data, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers) + for iteration, (noise_patch, single_coordinate) in enumerate(testloader): + # Pre-trained models are loaded into memory and the sub-stacks are directly fed into the model. + noise_patch = noise_patch.cuda() + real_A = noise_patch + real_A = Variable(real_A) + fake_B = self.local_model(real_A) + + # Determine approximate time left + batches_done = iteration + batches_left = 1 * len(testloader) - batches_done + time_left_seconds = int(batches_left * (time.time() - prev_time)) + time_left = datetime.timedelta(seconds=time_left_seconds) + prev_time = time.time() + if iteration % 1 == 0: + time_end = time.time() + time_cost = time_end - time_start # datetime.timedelta(seconds= (time_end - time_start)) + print( + '\r [Patch %d/%d] [Time Cost: %.0d s] [ETA: %s s] ' + % ( + iteration + 1, + len(testloader), + time_cost, + time_left_seconds + ), end=' ') + + if (iteration + 1) % len(testloader) == 0: + print('\n', end=' ') + + # Enhanced sub-stacks are sequentially output from the network + output_image = np.squeeze(fake_B.cpu().detach().numpy()) + raw_image = np.squeeze(real_A.cpu().detach().numpy()) + if (output_image.ndim == 3): + postprocess_turn = 1 + else: + postprocess_turn = output_image.shape[0] + + # The final enhanced stack can be obtained by stitching all sub-stacks. + if (postprocess_turn > 1): + for id in range(postprocess_turn): + output_patch, raw_patch, stack_start_w, stack_end_w, stack_start_h, stack_end_h, stack_start_s, stack_end_s = multibatch_test_save( + single_coordinate, id, output_image, raw_image) + + output_patch=output_patch+img_mean + raw_patch=raw_patch+img_mean + + denoise_img[stack_start_s:stack_end_s, stack_start_h:stack_end_h, stack_start_w:stack_end_w] \ + = output_patch * (np.sum(raw_patch) / np.sum(output_patch)) ** 0.5 + input_img[stack_start_s:stack_end_s, stack_start_h:stack_end_h, stack_start_w:stack_end_w] \ + = raw_patch + else: + output_patch, raw_patch, stack_start_w, stack_end_w, stack_start_h, stack_end_h, stack_start_s, stack_end_s = singlebatch_test_save( + single_coordinate, output_image, raw_image) + + output_patch=output_patch+img_mean + raw_patch=raw_patch+img_mean + + denoise_img[stack_start_s:stack_end_s, stack_start_h:stack_end_h, stack_start_w:stack_end_w] \ + = output_patch * (np.sum(raw_patch) / np.sum(output_patch)) ** 0.5 + input_img[stack_start_s:stack_end_s, stack_start_h:stack_end_h, stack_start_w:stack_end_w] \ + = raw_patch + + # Stitching finish + output_img = denoise_img.squeeze().astype(np.float32) * self.scale_factor + del denoise_img + + # Normalize and display inference image + if (self.visualize_images_per_epoch): + print('Displaying the first denoised file ----->') + display_length = self.test_datasize + test_img_display(output_img, display_length=display_length, norm_min_percent=1, norm_max_percent=98) + + # Save inference image + if (self.save_test_images_per_epoch): + output_img = output_img[50:self.test_datasize-50, :, :] + output_img = output_img.astype('int16') + result_name = self.pth_path + '//' + test_im_name.replace('.tif', '') + '_' + 'E_' + str( + train_epoch + 1).zfill(2) + '_Iter_' + str(train_iteration + 1).zfill(4) + '.tif' + io.imsave(result_name, output_img, check_contrast=False) diff --git a/DeepCAD_RT_pytorch/deepcad/utils.py b/DeepCAD_RT_pytorch/deepcad/utils.py new file mode 100644 index 0000000..3c2efda --- /dev/null +++ b/DeepCAD_RT_pytorch/deepcad/utils.py @@ -0,0 +1,270 @@ +import os + +import matplotlib.pyplot as plt +import yaml +import gdown +import zipfile + +plt.ioff() +plt.switch_backend('agg') + + +######################################################################################################################## +def create_feature_maps(init_channel_number, number_of_fmaps): + return [init_channel_number * 2 ** k for k in range(number_of_fmaps)] + + +def save_yaml_train(opt, yaml_name): + para = {'n_epochs': 0, + 'datasets_folder': 0, + 'datasets_path': 0, + 'output_dir': 0, + 'pth_path': 0, + 'GPU': 0, + 'batch_size': 0, + 'patch_x': 0, + 'patch_y': 0, + 'patch_t': 0, + 'gap_y': 0, + 'gap_x': 0, + 'gap_t': 0, + 'lr': 0, + 'b1': 0, + 'b2': 0, + 'fmap': 0, + 'scale_factor': 0, + 'select_img_num': 0, + 'train_datasets_size': 0} + para["n_epochs"] = opt.n_epochs + para["datasets_folder"] = opt.datasets_folder + para["datasets_path"] = opt.datasets_path + para["output_dir"] = opt.output_dir + para["pth_path"] = opt.pth_path + para["GPU"] = opt.GPU + para["batch_size"] = opt.batch_size + para["patch_x"] = opt.patch_x + para["patch_y"] = opt.patch_y + para["patch_t"] = opt.patch_t + para["gap_x"] = opt.gap_x + para["gap_y"] = opt.gap_y + para["gap_t"] = opt.gap_t + para["lr"] = opt.lr + para["b1"] = opt.b1 + para["b2"] = opt.b2 + para["fmap"] = opt.fmap + para["scale_factor"] = opt.scale_factor + para["select_img_num"] = opt.select_img_num + para["train_datasets_size"] = opt.train_datasets_size + with open(yaml_name, 'w') as f: + data = yaml.dump(para, f) + + +def save_yaml_test(opt, yaml_name): + para = {'n_epochs': 0, + 'datasets_folder': 0, + 'datasets_path': 0, + 'output_dir': 0, + 'pth_path': 0, + 'GPU': 0, + 'batch_size': 0, + 'patch_x': 0, + 'patch_y': 0, + 'patch_t': 0, + 'gap_x': 0, + 'gap_y': 0, + 'gap_t': 0, + 'lr': 0, + 'b1': 0, + 'b2': 0, + 'fmap': 0, + 'scale_factor': 0, + 'denoise_model': 0, + 'test_datasize': 0} + para["n_epochs"] = opt.n_epochs + para["datasets_folder"] = opt.datasets_folder + para["datasets_path"] = opt.datasets_path + para["output_dir"] = opt.output_dir + para["pth_path"] = opt.pth_path + para["GPU"] = opt.GPU + para["batch_size"] = opt.batch_size + para["patch_x"] = opt.patch_x + para["patch_y"] = opt.patch_y + para["patch_t"] = opt.patch_t + para["gap_x"] = opt.gap_x + para["gap_y"] = opt.gap_y + para["gap_t"] = opt.gap_t + para["lr"] = opt.lr + para["b1"] = opt.b1 + para["b2"] = opt.b2 + para["fmap"] = opt.fmap + para["scale_factor"] = opt.scale_factor + para["denoise_model"] = opt.denoise_model + para["test_datasize"] = opt.test_datasize + with open(yaml_name, 'w') as f: + data = yaml.dump(para, f) + + +def read_yaml(opt, yaml_name): + with open(yaml_name) as f: + para = yaml.load(f, Loader=yaml.FullLoader) + print(para) + opt.n_epochspara = ["n_epochs"] + # opt.datasets_folder = para["datasets_folder"] + opt.output_dir = para["output_dir"] + opt.batch_size = para["batch_size"] + # opt.patch_t = para["patch_t"] + # opt.patch_x = para["patch_x"] + # opt.patch_y = para["patch_y"] + # opt.gap_y = para["gap_y"] + # opt.gap_x = para["gap_x"] + # opt.gap_t = para["gap_t"] + opt.lr = para["lr"] + opt.fmap = para["fmap"] + opt.b1 = para["b1"] + para["b2"] = opt.b2 + para["scale_factor"] = opt.scale_factor + + +def name2index(opt, input_name, num_h, num_w, num_s): + # print(input_name) + name_list = input_name.split('_') + # print(name_list) + z_part = name_list[-1] + # print(z_part) + y_part = name_list[-2] + # print(y_part) + x_part = name_list[-3] + # print(x_part) + z_index = int(z_part.replace('z', '')) + y_index = int(y_part.replace('y', '')) + x_index = int(x_part.replace('x', '')) + # print("x_index ---> ",x_index,"y_index ---> ", y_index,"z_index ---> ", z_index) + + cut_w = (opt.patch_x - opt.gap_x) / 2 + cut_h = (opt.patch_y - opt.gap_y) / 2 + cut_s = (opt.patch_t - opt.gap_t) / 2 + # print("z_index ---> ",cut_w, "cut_h ---> ",cut_h, "cut_s ---> ",cut_s) + if x_index == 0: + stack_start_w = x_index * opt.gap_x + stack_end_w = x_index * opt.gap_x + opt.patch_x - cut_w + patch_start_w = 0 + patch_end_w = opt.patch_x - cut_w + elif x_index == num_w - 1: + stack_start_w = x_index * opt.gap_x + cut_w + stack_end_w = x_index * opt.gap_x + opt.patch_x + patch_start_w = cut_w + patch_end_w = opt.patch_x + else: + stack_start_w = x_index * opt.gap_x + cut_w + stack_end_w = x_index * opt.gap_x + opt.patch_x - cut_w + patch_start_w = cut_w + patch_end_w = opt.patch_x - cut_w + + if y_index == 0: + stack_start_h = y_index * opt.gap_y + stack_end_h = y_index * opt.gap_y + opt.patch_y - cut_h + patch_start_h = 0 + patch_end_h = opt.patch_y - cut_h + elif y_index == num_h - 1: + stack_start_h = y_index * opt.gap_y + cut_h + stack_end_h = y_index * opt.gap_y + opt.patch_y + patch_start_h = cut_h + patch_end_h = opt.patch_y + else: + stack_start_h = y_index * opt.gap_y + cut_h + stack_end_h = y_index * opt.gap_y + opt.patch_y - cut_h + patch_start_h = cut_h + patch_end_h = opt.patch_y - cut_h + + if z_index == 0: + stack_start_s = z_index * opt.gap_t + stack_end_s = z_index * opt.gap_t + opt.patch_t - cut_s + patch_start_s = 0 + patch_end_s = opt.patch_t - cut_s + elif z_index == num_s - 1: + stack_start_s = z_index * opt.gap_t + cut_s + stack_end_s = z_index * opt.gap_t + opt.patch_t + patch_start_s = cut_s + patch_end_s = opt.patch_t + else: + stack_start_s = z_index * opt.gap_t + cut_s + stack_end_s = z_index * opt.gap_t + opt.patch_t - cut_s + patch_start_s = cut_s + patch_end_s = opt.patch_t - cut_s + return int(stack_start_w), int(stack_end_w), int(patch_start_w), int(patch_end_w), \ + int(stack_start_h), int(stack_end_h), int(patch_start_h), int(patch_end_h), \ + int(stack_start_s), int(stack_end_s), int(patch_start_s), int(patch_end_s) + + +def get_first_filename(img_dir): + """ + Acquire the the first filename of the image directory + Args: + img_dir : image directory name + Return: + first_filename : the first filename of the image directory + """ + train_list = list(os.walk(img_dir, topdown=False))[-1][-1] + first_filename = img_dir + '/' + train_list[0] + return first_filename + + +def download_demo(download_filename): + save_img_folder='datasets/'+download_filename+'_demo' + save_pth_name = download_filename + '_best_model_demo' + save_pth_folder='pth/'+save_pth_name + + if not os.path.exists(save_img_folder): + os.makedirs(save_img_folder) + if not os.path.exists(save_pth_folder): + os.makedirs(save_pth_folder) + file_dict = {'simulate_-2.51dBSNR_1000frames': [ + 'https://zenodo.org/record/5790790/files/noise_2RPN_-2.51dBSNR_1000frames_.tif?download=1', + 'https://zenodo.org/record/5790790/files/E_13_Iter_6198.pth?download=1', + 'https://zenodo.org/record/5790790/files/para.yaml?download=1'], + 'ATP_2D': [ + 'https://zenodo.org/record/5808571/files/zoom3.0_15Hz_0.65power_20umUnderrInjury_lowSNR_MC_corrBidir.tif?download=1', + 'https://zenodo.org/record/5808571/files/ATP_2D_E_20_Iter_6075.pth?download=1', + 'https://zenodo.org/record/5808571/files/ATP_2D_para.yaml?download=1'], + 'ATP_3D':['https://zenodo.org/record/5808571/files/zoom2.0_3DATP_30Hz_2umStep_60umRange_30Slice_0.1power_channel1_slice22.tif?download=1', + 'https://zenodo.org/record/5808571/files/ATP_3D_E_20_Iter_6350.pth?download=1', + 'https://zenodo.org/record/5808571/files/ATP_3D_para.yaml?download=1'], + 'NP_2D':['https://zenodo.org/record/5808571/files/02_neutrophil_0.25power_zoom4.0_10Hz_lowSNR.tif?download=1', + 'https://zenodo.org/record/5808571/files/NP_2D_E_20_Iter_6150.pth?download=1', + 'https://zenodo.org/record/5808571/files/NP_2D_para.yaml?download=1'], + 'NP_3D':['https://zenodo.org/record/5808571/files/zoom4.5_dz2um_slice15_power0.07_gan0.7_channel23_channel1_slice5.tif?download=1', + 'https://zenodo.org/record/5808571/files/NP_3D_E_20_Iter_6075.pth?download=1', + 'https://zenodo.org/record/5808571/files/NP_3D_para.yaml?download=1'], + 'mouse_spine':['https://zenodo.org/record/5808571/files/1_GCaMP6f_zoom4.5_50mWpower_40umdepth_30Hz_978x492x6500_lowSNR.tif?download=1', + 'https://zenodo.org/record/5808571/files/mouse_spine_E_30_Iter_6060.pth?download=1', + 'https://zenodo.org/record/5808571/files/mouse_spine_para.yaml?download=1'], + 'fish_localbrain': [ + 'https://zenodo.org/record/5808571/files/fish_localbrain_demo.zip?download=1', + 'https://zenodo.org/record/5808571/files/fish_localbrain_E_20_Iter_6175.pth?download=1', + 'https://zenodo.org/record/5808571/files/fish_localbrain_para.yaml?download=1'], + 'fish_wholebrain': [ + 'https://zenodo.org/record/5808571/files/fish_wholebrain_demo.zip?download=1', + 'https://zenodo.org/record/5808571/files/fish_wholebrain_E_20_Iter_6140.pth?download=1', + 'https://zenodo.org/record/5808571/files/fish_wholebrain_para.yaml?download=1'], + 'drosophila': [ + 'https://zenodo.org/record/5808571/files/drosophila_demo.zip?download=1', + 'https://zenodo.org/record/5808571/files/drosophila_E_30_Iter_6016.pth?download=1', + 'https://zenodo.org/record/5808571/files/drosophila_para.yaml?download=1'], + } + + url_sum = file_dict[download_filename] + + if not os.path.exists(save_img_folder+ '/' + download_filename +'.tif'): + if (download_filename == 'fish_localbrain') | (download_filename == 'fish_wholebrain') | (download_filename == 'drosophila'): + zip_path='datasets/' + download_filename + '_demo.zip' + gdown.download(url_sum[0], zip_path , quiet=False,resume=True) + with zipfile.ZipFile(zip_path, 'r') as zip_ref: + zip_ref.extractall('datasets') + os.remove(zip_path) + else: + gdown.download(url_sum[0], save_img_folder+ '/' + download_filename +'.tif', quiet=False,resume=True) + if not os.path.exists(save_pth_folder+ '/' + 'best_model.pth'): + gdown.download(url_sum[1], save_pth_folder+ '/' + 'best_model.pth', quiet=False,resume=True) + if not os.path.exists(save_pth_folder+ '/' + 'best_model.yaml'): + gdown.download(url_sum[2], save_pth_folder+ '/' + 'best_model.yaml', quiet=False,resume=True) + return save_img_folder,save_pth_name diff --git a/DeepCAD_RT_pytorch/demo_test_pipeline.py b/DeepCAD_RT_pytorch/demo_test_pipeline.py new file mode 100644 index 0000000..5672562 --- /dev/null +++ b/DeepCAD_RT_pytorch/demo_test_pipeline.py @@ -0,0 +1,72 @@ +""" +This file will help to demonstrate pipeline for testing microscopy data using the DeepCAD-RT algorithm. +The demo shows how to construct the params and call the relevant functions for testing DeepCAD-RT network. +The demo will automatically download tif file and corresponding model file for demo testing. +See inside for details. + +* This demo is also available as a jupyter notebook (see demo_test_pipeline.ipynb) and Colab notebook (see +DeepCAD_RT_demo_colab.ipynb) + +More information can be found in the companion paper. +""" +from deepcad.test_collection import testing_class +from deepcad.movie_display import display +from deepcad.utils import get_first_filename,download_demo + +# %% Select file(s) to be processed (download if not present) +download_demo_file = True +if download_demo_file: + file_name='fish_localbrain' # select the demo file you want to test (e.g. 'ATP_3D', 'fish_localbrain', 'NP_3D', ...) + datasets_path, denoise_model = download_demo(download_filename=file_name) +else: + datasets_path = 'datasets/fish_localbrain_demo' # folder containing tif files for testing + denoise_model = 'fish_localbrain_demo_202203051620' # A folder containing pth models to be tested + +# %% First setup some parameters for testing +test_datasize = 300 # the number of slices to be tested +GPU = '1' # the index of GPU you will use for computation (e.g. '0', '0,1', '0,1,2') +patch_xy = 150 # the width and height of 3D patches +patch_t = 150 # the time dimension of 3D patches +overlap_factor = 0.4 # the overlap factor between two adjacent patches +num_workers = 4 # if you use Windows system, set this to 0. + +# %% Setup some parameters for result visualization during testing period (optional) +visualize_images_per_epoch = True # choose whether to display inference performance after each epoch +save_test_images_per_epoch = True # choose whether to save inference image after each epoch in pth path + +# %% Play the demo noise movie (optional) +# playing the first noise movie using opencv. +display_images = False + +if display_images: + display_filename = get_first_filename(datasets_path) + print('\033[1;31mDisplaying the first raw file -----> \033[0m') + print(display_filename) + display_length = 500 # the frames number of the noise movie + # normalize the image and display + display(display_filename, display_length=display_length, norm_min_percent=0.5, norm_max_percent=99.8) + +test_dict = { + # dataset dependent parameters + 'patch_x': patch_xy, + 'patch_y': patch_xy, + 'patch_t': patch_t, + 'overlap_factor':overlap_factor, + 'scale_factor': 1, # the factor for image intensity scaling + 'test_datasize': test_datasize, + 'datasets_path': datasets_path, + 'pth_dir': './pth', # pth file root path + 'denoise_model' : denoise_model, + 'output_dir' : './results', # result file root path + # network related parameters + 'fmap': 16, # the number of feature maps + 'GPU': GPU, + 'num_workers': num_workers, + 'visualize_images_per_epoch': visualize_images_per_epoch, + 'save_test_images_per_epoch': save_test_images_per_epoch +} +# %%% Testing preparation +# first we create a testing class object with the specified parameters +tc = testing_class(test_dict) +# start the testing process +tc.run() diff --git a/DeepCAD_RT_pytorch/demo_train_pipeline.py b/DeepCAD_RT_pytorch/demo_train_pipeline.py new file mode 100644 index 0000000..4311cf3 --- /dev/null +++ b/DeepCAD_RT_pytorch/demo_train_pipeline.py @@ -0,0 +1,77 @@ +""" +This file will demonstrate pipeline for training microscopy data using the DeepCAD-RT algorithm. +The demo shows how to construct the params and call the relevant functions for training DeepCAD-RT network. +The demo will automatically download tif file for demo training. +See inside for details. + +* This demo is also available as a jupyter notebook (see demo_train_pipeline.ipynb) and Colab notebook (see +DeepCAD_RT_demo_colab.ipynb) + +More information can be found in the companion paper. +""" + +from deepcad.train_collection import training_class +from deepcad.movie_display import display +from deepcad.utils import get_first_filename,download_demo + +# %% Select file(s) to be processed (download if not present) +download_demo_file = True +if download_demo_file: + file_name='fish_localbrain' # select the demo file to be trained (e.g. 'ATP_3D', 'fish_localbrain', 'NP_3D', ...) + datasets_path, _ = download_demo(download_filename=file_name) +else: + datasets_path = 'datasets/fish_localbrain_demo' # folder containing tif files for training + +# %% First setup some parameters for training +n_epochs = 10 # the number of training epochs +GPU = '1' # the index of GPU used for computation (e.g. '0', '0,1', '0,1,2') +train_datasets_size = 6000 # dataset size for training (the number of patches) +patch_xy = 150 # the width and height of 3D patches +patch_t = 150 # the time dimension of 3D patches +overlap_factor = 0.25 # the overlap factor between two adjacent patches +pth_dir = './pth' # pth file and visualization result file path +num_workers = 4 # if you use Windows system, set this to 0. + +# %% Setup some parameters for result visualization during training period (optional) +visualize_images_per_epoch = True # choose whether to show inference performance after each epoch +save_test_images_per_epoch = True # choose whether to save inference image after each epoch in pth path + +# %% Play the demo noise movie (optional) +# playing the first noise movie using opencv. +display_images = True + +if display_images: + display_filename = get_first_filename(datasets_path) + print('\033[1;31mDisplaying the first raw file -----> \033[0m') + print(display_filename) + display_length = 300 # the frames number of the noise movie + # normalize the image and display + display(display_filename, display_length=display_length, norm_min_percent=1, norm_max_percent=98) + +train_dict = { + # dataset dependent parameters + 'patch_x': patch_xy, + 'patch_y': patch_xy, + 'patch_t': patch_t, + 'overlap_factor':overlap_factor, + 'scale_factor': 1, # the factor for image intensity scaling + 'select_img_num': 100000, # select the number of images used for training (use all frames by default) + 'train_datasets_size': train_datasets_size, + 'datasets_path': datasets_path, + 'pth_dir': pth_dir, + # network related parameters + 'n_epochs': n_epochs, + 'lr': 0.00005, # initial learning rate + 'b1': 0.5, # Adam: bata1 + 'b2': 0.999, # Adam: bata2 + 'fmap': 16, # the number of feature maps + 'GPU': GPU, + 'num_workers': num_workers, + 'visualize_images_per_epoch': visualize_images_per_epoch, + 'save_test_images_per_epoch': save_test_images_per_epoch +} +# %%% Training preparation +# first we create a training class object with the specified parameters +tc = training_class(train_dict) +# start the training process +tc.run() diff --git a/DeepCAD_RT_pytorch/notebooks/DeepCAD_RT_demo_colab.ipynb b/DeepCAD_RT_pytorch/notebooks/DeepCAD_RT_demo_colab.ipynb new file mode 100644 index 0000000..f7d65a3 --- /dev/null +++ b/DeepCAD_RT_pytorch/notebooks/DeepCAD_RT_demo_colab.ipynb @@ -0,0 +1,586 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "DeepCAD_RT_demo_colab.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# **DeepCAD-RT: A versatile toolbox for microscopy imaging denoising**\n", + "\n", + "\n", + "---" + ], + "metadata": { + "id": "yFtyDjHJucBd" + } + }, + { + "cell_type": "markdown", + "source": [ + "## **1. Initialization**" + ], + "metadata": { + "id": "bhvus4aAqBl7" + } + }, + { + "cell_type": "markdown", + "source": [ + "Install DeepCAD-RT (by default the torch GPU version is installed in COLAB notebook)" + ], + "metadata": { + "id": "YMo5KX_0qhkR" + } + }, + { + "cell_type": "code", + "source": [ + "!pip install deepcad\n" + ], + "metadata": { + "id": "UwcoNRB4UmWx", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "b639b4f9-bf69-4f9d-ce3b-878384ea92fb" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Collecting deepcad\n", + " Downloading deepcad-0.4.5.tar.gz (43 kB)\n", + "\u001b[K |████████████████████████████████| 43 kB 899 kB/s \n", + "\u001b[?25hRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from deepcad) (3.2.2)\n", + "Requirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from deepcad) (3.13)\n", + "Requirement already satisfied: tifffile in /usr/local/lib/python3.7/dist-packages (from deepcad) (2021.11.2)\n", + "Requirement already satisfied: scikit-image in /usr/local/lib/python3.7/dist-packages (from deepcad) (0.18.3)\n", + "Requirement already satisfied: opencv-python in /usr/local/lib/python3.7/dist-packages (from deepcad) (4.1.2.30)\n", + "Collecting csbdeep\n", + " Downloading csbdeep-0.6.3-py2.py3-none-any.whl (73 kB)\n", + "\u001b[K |████████████████████████████████| 73 kB 1.7 MB/s \n", + "\u001b[?25hCollecting gdown==4.2.0\n", + " Downloading gdown-4.2.0.tar.gz (13 kB)\n", + " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", + " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", + " Preparing wheel metadata ... \u001b[?25l\u001b[?25hdone\n", + "Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from gdown==4.2.0->deepcad) (1.15.0)\n", + "Requirement already satisfied: requests[socks] in /usr/local/lib/python3.7/dist-packages (from gdown==4.2.0->deepcad) (2.23.0)\n", + "Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.7/dist-packages (from gdown==4.2.0->deepcad) (4.6.3)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from gdown==4.2.0->deepcad) (3.6.0)\n", + "Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from gdown==4.2.0->deepcad) (4.63.0)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from csbdeep->deepcad) (1.21.5)\n", + "Collecting h5py<3\n", + " Downloading h5py-2.10.0-cp37-cp37m-manylinux1_x86_64.whl (2.9 MB)\n", + "\u001b[K |████████████████████████████████| 2.9 MB 31.6 MB/s \n", + "\u001b[?25hRequirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from csbdeep->deepcad) (1.4.1)\n", + "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->deepcad) (3.0.7)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->deepcad) (0.11.0)\n", + "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->deepcad) (2.8.2)\n", + "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->deepcad) (1.3.2)\n", + "Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests[socks]->gdown==4.2.0->deepcad) (3.0.4)\n", + "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests[socks]->gdown==4.2.0->deepcad) (1.24.3)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests[socks]->gdown==4.2.0->deepcad) (2021.10.8)\n", + "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests[socks]->gdown==4.2.0->deepcad) (2.10)\n", + "Requirement already satisfied: PySocks!=1.5.7,>=1.5.6 in /usr/local/lib/python3.7/dist-packages (from requests[socks]->gdown==4.2.0->deepcad) (1.7.1)\n", + "Requirement already satisfied: pillow!=7.1.0,!=7.1.1,>=4.3.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->deepcad) (7.1.2)\n", + "Requirement already satisfied: imageio>=2.3.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->deepcad) (2.4.1)\n", + "Requirement already satisfied: PyWavelets>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from scikit-image->deepcad) (1.2.0)\n", + "Requirement already satisfied: networkx>=2.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->deepcad) (2.6.3)\n", + "Building wheels for collected packages: deepcad, gdown\n", + " Building wheel for deepcad (setup.py) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for deepcad: filename=deepcad-0.4.5-py3-none-any.whl size=41746 sha256=db743e0757bfd0fddc98a45e2cf67967f4378a4a45d47490007a7f0d9ccc4123\n", + " Stored in directory: /root/.cache/pip/wheels/2f/24/56/5c8484e59c414268954e4caccc263d20d633f98f856afd8601\n", + " Building wheel for gdown (PEP 517) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for gdown: filename=gdown-4.2.0-py3-none-any.whl size=14262 sha256=b5491f551c69b56ca7b3d96b0de0e2d775107ff8579b57d362c73d4f28b1a1e3\n", + " Stored in directory: /root/.cache/pip/wheels/8c/17/ff/58721d1fabdb87c21a0529948cf39e2be9af90ddbe4ad65944\n", + "Successfully built deepcad gdown\n", + "Installing collected packages: h5py, gdown, csbdeep, deepcad\n", + " Attempting uninstall: h5py\n", + " Found existing installation: h5py 3.1.0\n", + " Uninstalling h5py-3.1.0:\n", + " Successfully uninstalled h5py-3.1.0\n", + " Attempting uninstall: gdown\n", + " Found existing installation: gdown 4.2.2\n", + " Uninstalling gdown-4.2.2:\n", + " Successfully uninstalled gdown-4.2.2\n", + "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", + "tensorflow 2.8.0 requires tf-estimator-nightly==2.8.0.dev2021122109, which is not installed.\u001b[0m\n", + "Successfully installed csbdeep-0.6.3 deepcad-0.4.5 gdown-4.2.0 h5py-2.10.0\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "## **2. Check GPU support**" + ], + "metadata": { + "id": "QKqVmDcFuFxQ" + } + }, + { + "cell_type": "markdown", + "source": [ + "The implement of DeepCAD-RT need GPU support. If you do not have GPU support, please follow the following instruction." + ], + "metadata": { + "id": "HNEc07r4ZIu_" + } + }, + { + "cell_type": "code", + "source": [ + "import torch\n", + "\n", + "if torch.cuda.is_available():\n", + " print('\\033[1;31mGPU accessiable. Use GPU for computation.\\033[0m')\n", + " gpu_id = torch.cuda.current_device()\n", + " total_memory = torch.cuda.get_device_properties(gpu_id).total_memory/1024/1024\n", + " alloc_memory = torch.cuda.memory_allocated(0)/1024/1024\n", + " print('GPU ID: ', gpu_id, '|', torch.cuda.get_device_name(), \\\n", + " '| Memory: {:.0f} MB'.format(total_memory))\n", + " ! nvcc --version\n", + " print('PyTorch version: ', torch.__version__)\n", + "else:\n", + " print('\\033[1;31mNo GPU support. Please enable GPUs for the notebook:\\033[0m')\n", + " print(' 1. Navigate to Edit → Notebook Settings')\n", + " print(' 2. Select GPU from the Hardware Accelerator drop-down')" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "odTKv1zeWub0", + "outputId": "cd97848b-09a1-40fc-eecb-d4c069e77d16" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;31mGPU accessiable. Use GPU for computation.\u001b[0m\n", + "GPU ID: 0 | Tesla K80 | Memory: 11441 MB\n", + "nvcc: NVIDIA (R) Cuda compiler driver\n", + "Copyright (c) 2005-2020 NVIDIA Corporation\n", + "Built on Mon_Oct_12_20:09:46_PDT_2020\n", + "Cuda compilation tools, release 11.1, V11.1.105\n", + "Build cuda_11.1.TC455_06.29190527_0\n", + "PyTorch version: 1.10.0+cu111\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "## **Option 1: Start the training process**" + ], + "metadata": { + "id": "06EUtJn60zu3" + } + }, + { + "cell_type": "code", + "source": [ + "from deepcad.train_collection import training_class\n", + "from deepcad.movie_display import display\n", + "from deepcad.utils import get_first_filename, download_demo" + ], + "metadata": { + "id": "RyuU3Amg2TQg" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "### Select file(s) to be processed (download if not present)\n", + "\n", + "* The `download_demo` function will download the specific file for you and return the complete path to the image and the best pretrained model files, which will be stored in your `/content/datasets` and `/content/pth` directory, respectively. \n", + "\n", + "* If you adapt this demo for your own data, make sure to pass the datasets folder name of your file(s)." + ], + "metadata": { + "id": "_3gC347DEmyZ" + } + }, + { + "cell_type": "code", + "source": [ + "download_demo_file = True\n", + "if download_demo_file:\n", + " file_name='fish_localbrain' # select the demo file you want to train (e.g. 'ATP_3D', 'fish_localbrain', 'NP_3D', ...)\n", + " datasets_path, _ =download_demo(download_filename=file_name)\n", + "else:\n", + " datasets_path = 'datasets/2RPN-1000' # folder containing files for training" + ], + "metadata": { + "id": "niNqCREo_-uo", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "223f0780-646f-45fb-efa6-a6182e7def17" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "Downloading...\n", + "From: https://zenodo.org/record/5808571/files/fish_localbrain_demo.zip?download=1\n", + "To: /content/datasets/fish_localbrain_demo.zip\n", + "100%|██████████| 2.54G/2.54G [04:12<00:00, 10.1MB/s]\n", + "Downloading...\n", + "From: https://zenodo.org/record/5808571/files/fish_localbrain_E_20_Iter_6175.pth?download=1\n", + "To: /content/pth/fish_localbrain_best_model_demo/best_model.pth\n", + "100%|██████████| 4.10M/4.10M [00:06<00:00, 605kB/s]\n", + "Downloading...\n", + "From: https://zenodo.org/record/5808571/files/fish_localbrain_para.yaml?download=1\n", + "To: /content/pth/fish_localbrain_best_model_demo/best_model.yaml\n", + "100%|██████████| 337/337 [00:00<00:00, 689kB/s]\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "### Setup some parameters for training\n", + "Default setting shows the parameters suitable for demo file. You can change the training parameters accroding to your training data and device. For supervising the training process, you can set the flag for result storage to `True`." + ], + "metadata": { + "id": "2zh3EGyxFEOy" + } + }, + { + "cell_type": "code", + "source": [ + "n_epochs = 10 # number of training epochs\n", + "GPU = '0' # the index of GPU you will use for computation (e.g. '0', '0,1', '0,1,2')\n", + "train_datasets_size = 6000 # datasets size for training (how many patches)\n", + "patch_xyt = 150 # the width, height, length of 3D patches (use isotropic patch size by default)\n", + "overlap_factor = 0.4 # the overlap factor between two adjacent patches\n", + "num_workers = 2 # if you use Windows system, set this to 0.\n", + "\n", + "# Setup some parameters for result visualization during training period (optional)\n", + "save_test_images_per_epoch = True # flag to select whether save inference image after each epoch in pth path" + ], + "metadata": { + "id": "t_dBgQa7B86l" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "### Create a training class object with the specified parameters\n", + "You can creating a parameters object by passing all the parameters as a single dictionary. Parameters not defined in the dictionary will assume their default values. If you want to change these parameters, please refer to DeepCAD-RT Development mode." + ], + "metadata": { + "id": "_tKOdVTsFMHY" + } + }, + { + "cell_type": "code", + "source": [ + "train_dict = {\n", + " # dataset dependent parameters\n", + " 'patch_x': patch_xyt, # you can change these params if use anisotropy patch size\n", + " 'patch_y': patch_xyt,\n", + " 'patch_t': patch_xyt,\n", + " 'overlap_factor':overlap_factor,\n", + " 'scale_factor': 1, # the factor for image intensity scaling\n", + " 'select_img_num': 2000, # select the number of images used for training (use 2000 frames in colab)\n", + " 'train_datasets_size': train_datasets_size,\n", + " 'datasets_path': datasets_path,\n", + " 'pth_dir': './pth',\n", + " \n", + " # network related parameters\n", + " 'n_epochs': n_epochs,\n", + " 'lr': 0.00005, # initial learning rate\n", + " 'b1': 0.5, # Adam: bata1\n", + " 'b2': 0.999, # Adam: bata2\n", + " 'fmap': 16, # number of feature maps\n", + " 'GPU': GPU,\n", + " 'num_workers': num_workers,\n", + " 'visualize_images_per_epoch': False,\n", + " 'save_test_images_per_epoch': save_test_images_per_epoch\n", + "}\n", + "tc = training_class(train_dict)" + ], + "metadata": { + "id": "GdLk5BXiCCY3", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "66b7349b-db50-487f-92e8-e119ba539131" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;31mTraining parameters -----> \u001b[0m\n", + "{'overlap_factor': 0.4, 'datasets_path': 'datasets/fish_localbrain_demo', 'n_epochs': 3, 'fmap': 16, 'output_dir': './results', 'pth_dir': './pth', 'batch_size': 1, 'patch_t': 150, 'patch_x': 150, 'patch_y': 150, 'gap_y': 90, 'gap_x': 90, 'gap_t': 90, 'lr': 5e-05, 'b1': 0.5, 'b2': 0.999, 'GPU': '0', 'ngpu': 1, 'num_workers': 2, 'scale_factor': 1, 'train_datasets_size': 200, 'select_img_num': 2000, 'test_datasize': 400, 'visualize_images_per_epoch': False, 'save_test_images_per_epoch': True}\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "### Start training\n", + "The training models and visual results can be downloaded in /content/pth folder." + ], + "metadata": { + "id": "gnEGOjTOFQPI" + } + }, + { + "cell_type": "code", + "source": [ + "tc.run()" + ], + "metadata": { + "id": "kkIdGXkHCZkD", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "dd2eccaf-4664-4246-fe24-83a31eaf53f4" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;31mImage list for training -----> \u001b[0m\n", + "Total stack number -----> 1\n", + "Noise image name -----> fish_localbrain.tif\n", + "Noise images shape -----> (2000, 492, 492)\n", + "\u001b[1;31mUsing 1 GPU(s) for training -----> \u001b[0m\n", + "[Epoch 1/3] [Batch 208/208] [Total loss: 29865.46, L1 Loss: 170.30, L2 Loss: 59560.63] [ETA: 0:22:58] [Time cost: 685 s] \n", + " Testing model of epoch 1 on the first noisy file ----->\n", + "Testing image name -----> fish_localbrain.tif\n", + "Noise images shape -----> (400, 492, 492)\n", + " [Patch 100/100] [Time Cost: 78 s] [ETA: 0 s] \n", + "[Epoch 2/3] [Batch 208/208] [Total loss: 43497.01, L1 Loss: 201.03, L2 Loss: 86792.98] [ETA: 0:11:33] [Time cost: 1470 s] \n", + " Testing model of epoch 2 on the first noisy file ----->\n", + "Testing image name -----> fish_localbrain.tif\n", + "Noise images shape -----> (400, 492, 492)\n", + " [Patch 100/100] [Time Cost: 78 s] [ETA: 0 s] \n", + "[Epoch 3/3] [Batch 208/208] [Total loss: 37577.43, L1 Loss: 188.65, L2 Loss: 74966.22] [ETA: 0:00:03] [Time cost: 2256 s] \n", + " Testing model of epoch 3 on the first noisy file ----->\n", + "Testing image name -----> fish_localbrain.tif\n", + "Noise images shape -----> (400, 492, 492)\n", + " [Patch 100/100] [Time Cost: 78 s] [ETA: 0 s] \n", + " Train finished. Save all models to disk.\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "## **Option 2: Start the testing process**" + ], + "metadata": { + "id": "ltA6wbPeCtFt" + } + }, + { + "cell_type": "code", + "source": [ + "from deepcad.test_collection import testing_class\n", + "from deepcad.movie_display import display\n", + "from deepcad.utils import get_first_filename, download_demo" + ], + "metadata": { + "id": "p96ZCZ6wCzPL" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "### Select file(s) to be processed (download if not present)\n", + "* The `download_demo` function will download the specific file for you and return the complete path to the image and the best pretrained model files, which will be stored in your `/content/datasets` and `/content/pth` directory, respectively. \n", + "\n", + "* If you adapt this demo for your own data, make sure to pass the datasets folder name of your file(s)." + ], + "metadata": { + "id": "iZpYyVojFVu9" + } + }, + { + "cell_type": "code", + "source": [ + "download_demo_file = True\n", + "if download_demo_file:\n", + " file_name='fish_localbrain' # select the demo file you want to test (e.g. 'ATP_3D', 'fish_localbrain', 'NP_3D', ...)\n", + " datasets_path, denoise_model =download_demo(download_filename=file_name)\n", + "else:\n", + " datasets_path = 'datasets/2RPN-1000' # folder containing files for testing\n", + " denoise_model = '2RPN_202112101128_ov_0.5' # A folder containing models to be tested" + ], + "metadata": { + "id": "2iPngYrODd9_" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "### Setup some parameters for testing\n", + "Default setting shows the parameters suitable for demo file. You can change the testing parameters accroding to your testing data and device." + ], + "metadata": { + "id": "XrJ5Nm1EFaSW" + } + }, + { + "cell_type": "code", + "source": [ + "test_datasize = 300 # the number of slices to be tested\n", + "GPU = '0' # the index of GPU you will use for computation (e.g. '0', '0,1', '0,1,2')\n", + "patch_xyt = 150 # the width, height, length of 3D patches (use isotropic patch size by default)\n", + "overlap_factor = 0.4 # the overlap factor between two adjacent patches\n", + "num_workers = 2 # if you use Windows system, set this to 0." + ], + "metadata": { + "id": "tlRdRHgLDice" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "### Create a testing class object with the specified parameters\n", + "You can create a parameters object by passing all the parameters as a single dictionary. Parameters not defined in the dictionary will assume their default values. If you want to change these parameters, please refer to DeepCAD-RT Development mode." + ], + "metadata": { + "id": "xwuEGn0BFdma" + } + }, + { + "cell_type": "code", + "source": [ + "test_dict = {\n", + " # dataset dependent parameters\n", + " 'patch_x': patch_xyt, # you can change these params if use anisotropy patch size\n", + " 'patch_y': patch_xyt,\n", + " 'patch_t': patch_xyt,\n", + " 'overlap_factor':overlap_factor,\n", + " 'scale_factor': 1, # the factor for image intensity scaling\n", + " 'test_datasize': test_datasize,\n", + " 'datasets_path': datasets_path,\n", + " 'pth_dir': './pth', # pth file root path\n", + " 'denoise_model' : denoise_model,\n", + " 'output_dir' : './results', # result file root path\n", + " # network related parameters\n", + " 'fmap': 16, # number of feature maps\n", + " 'GPU': GPU,\n", + " 'num_workers': num_workers,\n", + " 'visualize_images_per_epoch': False,\n", + " 'save_test_images_per_epoch': True\n", + "}\n", + "\n", + "tc = testing_class(test_dict)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ooKSzMBTDnwp", + "outputId": "52eff028-f544-49db-f84f-401f514c34f6" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;31mTesting parameters -----> \u001b[0m\n", + "{'overlap_factor': 0.4, 'datasets_path': 'datasets/fish_localbrain_demo', 'fmap': 16, 'output_dir': './results', 'pth_dir': './pth', 'batch_size': 1, 'patch_t': 150, 'patch_x': 150, 'patch_y': 150, 'gap_y': 90, 'gap_x': 90, 'gap_t': 90, 'GPU': '0', 'ngpu': 1, 'num_workers': 2, 'scale_factor': 1, 'test_datasize': 300, 'denoise_model': 'fish_localbrain_best_model_demo', 'visualize_images_per_epoch': False, 'save_test_images_per_epoch': True}\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "### Start testing\n", + "The testing result can be downloaded in `/content/results` folder." + ], + "metadata": { + "id": "eIYYoOrlFh_a" + } + }, + { + "cell_type": "code", + "source": [ + "tc.run()" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ives3zjkDsyY", + "outputId": "337290e5-c046-4f85-d3bb-f021b06577d9" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1;31mStacks for processing -----> \u001b[0m\n", + "Total stack number -----> 1\n", + "fish_localbrain.tif\n", + "\u001b[1;31mUsing 1 GPU(s) for testing -----> \u001b[0m\n", + "[Model 1/1, best_model.pth] [Stack 1/1, fish_localbrain.tif] [Patch 75/75] [Time Cost: 57 s] [ETA: 0 s] \n", + " Test finished. Save all results to disk.\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "---\n", + "\n", + "## Thank you for using DeepCAD-RT!" + ], + "metadata": { + "id": "Ka4QXIXuUmER" + } + } + ] +} \ No newline at end of file diff --git a/DeepCAD_RT_pytorch/notebooks/demo_test_pipeline.ipynb b/DeepCAD_RT_pytorch/notebooks/demo_test_pipeline.ipynb new file mode 100644 index 0000000..ff9839c --- /dev/null +++ b/DeepCAD_RT_pytorch/notebooks/demo_test_pipeline.ipynb @@ -0,0 +1,230 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# DeepCAD-RT testing pipeline \n", + "\n", + "This file will demonstrate pipeline for testing microscopy data using the DeepCAD-RT algorithm.
\n", + "The demo shows how to construct the params and call the relevant functions for testing DeepCAD-RT network. In addition, it will automatically download tif file and corresponding model file for demo testing.
\n", + "\n", + "More information can be found in the companion paper.\n", + "See inside for details." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from deepcad.test_collection import testing_class\n", + "from deepcad.movie_display import display\n", + "from deepcad.utils import get_first_filename, download_demo" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Select file(s) to be processed (download if not present)\n", + "The `download_demo` function will download the specific file for you and return the complete path to the file which will be stored in your `datasets` directory. If you adapt this demo for your data make sure to pass the datasets folder name of your file(s)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Downloading...\n", + "From: https://zenodo.org/record/5790790/files/noise_2RPN_-2.51dBSNR_1000frames_.tif?download=1\n", + "To: E:\\01-LYX\\pipPackage\\DeepCAD_RT_pytorch\\notebooks\\datasets\\simulate_-2.51dBSNR_1000frames_demo\\simulate_-2.51dBSNR_1000frames.tif\n", + "100%|████████████████████████████████████████████████████████████████████████████████| 486M/486M [08:07<00:00, 998kB/s]\n", + "Downloading...\n", + "From: https://zenodo.org/record/5790790/files/E_13_Iter_6198.pth?download=1\n", + "To: E:\\01-LYX\\pipPackage\\DeepCAD_RT_pytorch\\notebooks\\pth\\simulate_-2.51dBSNR_1000frames_best_model_demo\\best_model.pth\n", + "100%|█████████████████████████████████████████████████████████████████████████████| 4.10M/4.10M [00:02<00:00, 1.56MB/s]\n", + "Downloading...\n", + "From: https://zenodo.org/record/5790790/files/para.yaml?download=1\n", + "To: E:\\01-LYX\\pipPackage\\DeepCAD_RT_pytorch\\notebooks\\pth\\simulate_-2.51dBSNR_1000frames_best_model_demo\\best_model.yaml\n", + "100%|██████████████████████████████████████████████████████████████████████████████████| 292/292 [00:00<00:00, 293kB/s]\n" + ] + } + ], + "source": [ + "download_demo_file = True\n", + "if download_demo_file:\n", + " file_name='simulate_-2.51dBSNR_1000frames' # select the demo file you want to test (e.g. 'ATP_3D', 'fish_localbrain', 'NP_3D', ...)\n", + " datasets_path, denoise_model =download_demo(download_filename=file_name)\n", + "else:\n", + " datasets_path = 'datasets/2RPN-1000' # folder containing files for testing\n", + " denoise_model = '2RPN_202112101128_ov_0.5' # A folder containing models to be tested" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Set up some parameters for testing\n", + "Default setting shows the parameters suitable for demo file. You can change the testing parameters accroding to your testing data and device. For supervising the testing process, you can set the flag for visualization and result storage to `True`." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "test_datasize = 300 # the number of slices to be tested\n", + "GPU = '0' # the index of GPU you will use for computation (e.g. '0', '0,1', '0,1,2')\n", + "patch_xy = 150 # the width and height of 3D patches\n", + "patch_t = 150 # the time dimension of 3D patches\n", + "overlap_factor = 0.4 # the overlap factor between two adjacent patches\n", + "num_workers = 0 # if you use Windows system, set this to 0.\n", + "\n", + "# Setup some parameters for result visualization during testing period (optional)\n", + "visualize_images_per_epoch = False # choose whether to display inference performance after each epoch\n", + "save_test_images_per_epoch = True # choose whether to save inference image after each epoch in pth path" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Play the demo noisy movie (optional)\n", + "Play the first noisy movie (optional). This will require loading the movie in memory which in general is not needed by the pipeline. Displaying the movie uses the OpenCV library. " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[1;31mDisplaying the first raw file -----> \u001b[0m\n", + "datasets/simulate_-2.51dBSNR_1000frames_demo/simulate_-2.51dBSNR_1000frames.tif\n" + ] + } + ], + "source": [ + "display_images = True\n", + "\n", + "if display_images:\n", + " display_filename = get_first_filename(datasets_path)\n", + " print('\\033[1;31mDisplaying the first raw file -----> \\033[0m')\n", + " print(display_filename)\n", + " display_length = 300 # the frames number of the noise movie\n", + " # normalize the image and display\n", + " display(display_filename, display_length=display_length, norm_min_percent=1, norm_max_percent=98)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create a testing class object with the specified parameters\n", + "You will creat a parameters object by passing all the parameters as a single dictionary. Parameters not defined in the dictionary will assume their default values." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[1;31mTesting parameters -----> \u001b[0m\n", + "{'overlap_factor': 0.5, 'datasets_path': 'datasets/simulate_-2.51dBSNR_1000frames_demo', 'fmap': 16, 'output_dir': './results', 'pth_dir': './pth', 'batch_size': 1, 'patch_t': 150, 'patch_x': 150, 'patch_y': 150, 'gap_y': 75, 'gap_x': 75, 'gap_t': 75, 'GPU': '0', 'ngpu': 1, 'num_workers': 0, 'scale_factor': 1, 'test_datasize': 300, 'denoise_model': 'simulate_-2.51dBSNR_1000frames_best_model_demo', 'visualize_images_per_epoch': False, 'save_test_images_per_epoch': True}\n" + ] + } + ], + "source": [ + "test_dict = {\n", + " # dataset dependent parameters\n", + " 'patch_x': patch_xy, # you can change these params if use anisotropy patch size\n", + " 'patch_y': patch_xy,\n", + " 'patch_t': patch_t,\n", + " 'overlap_factor':overlap_factor,\n", + " 'scale_factor': 1, # the factor for image intensity scaling\n", + " 'test_datasize': test_datasize,\n", + " 'datasets_path': datasets_path,\n", + " 'pth_dir': './pth', # pth file root path\n", + " 'denoise_model' : denoise_model,\n", + " 'output_dir' : './results', # result file root path\n", + " # network related parameters\n", + " 'fmap': 16, # number of feature maps\n", + " 'GPU': GPU,\n", + " 'num_workers': num_workers,\n", + " 'visualize_images_per_epoch': visualize_images_per_epoch,\n", + " 'save_test_images_per_epoch': save_test_images_per_epoch\n", + "}\n", + "\n", + "tc = testing_class(test_dict)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Start the testing process" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[1;31mStacks for processing -----> \u001b[0m\n", + "Total stack number -----> 1\n", + "simulate_-2.51dBSNR_1000frames.tif\n", + "\u001b[1;31mUsing 1 GPU(s) for testing -----> \u001b[0m\n", + "[Model 1/1, best_model.pth] [Stack 1/1, simulate_-2.51dBSNR_1000frames.tif] [Patch 108/108] [Time Cost: 15 s] [ETA: 0 s] \n", + " Test finished. Save all results to disk.\n" + ] + } + ], + "source": [ + "tc.run()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:deepcadpip2]", + "language": "python", + "name": "conda-env-deepcadpip2-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/DeepCAD_RT_pytorch/notebooks/demo_train_pipeline.ipynb b/DeepCAD_RT_pytorch/notebooks/demo_train_pipeline.ipynb new file mode 100644 index 0000000..01423da --- /dev/null +++ b/DeepCAD_RT_pytorch/notebooks/demo_train_pipeline.ipynb @@ -0,0 +1,254 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "# DeepCAD-RT training pipeline \n", + "\n", + "This file will demonstrate pipeline for training microscopy data using the DeepCAD-RT algorithm.
\n", + "The demo shows how to construct the params and call the relevant functions for training DeepCAD-RT network. In addition, it will automatically download tif file for demo training.
\n", + "\n", + "More information can be found in the companion paper.\n", + "See inside for details." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from deepcad.train_collection import training_class\n", + "from deepcad.movie_display import display\n", + "from deepcad.utils import get_first_filename,download_demo" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Select file(s) to be processed (download if not present)\n", + "The `download_demo` function will download the specific file for you and return the complete path to the file which will be stored in your `datasets` directory. If you adapt this demo for your own data make sure to pass the datasets folder name of your file(s)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "download_demo_file = False\n", + "if download_demo_file:\n", + " file_name='fish_localbrain' # select the demo file to be trained (e.g. 'ATP_3D', 'fish_localbrain', 'NP_3D', ...)\n", + " datasets_path, _ =download_demo(download_filename=file_name)\n", + "else:\n", + " datasets_path = 'datasets/simulate_-2.51dBSNR_1000frames_demo' # folder containing files for training" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Set up some parameters for training\n", + "Default setting shows the parameters suitable for demo file. You can change the training parameters accroding to your training data and device. For supervising the training process, you can set the flag for visualization and result storage to `True`." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "n_epochs = 5 # number of training epochs\n", + "GPU = '0' # the index of GPU you will use for computation (e.g. '0', '0,1', '0,1,2')\n", + "train_datasets_size = 6000 # datasets size for training (how many patches)\n", + "patch_xy = 150 # the width and height of 3D patches\n", + "patch_t = 150 # the time dimension of 3D patches\n", + "overlap_factor = 0.4 # the overlap factor between two adjacent patches\n", + "pth_dir = './pth' # pth file and result visualization path\n", + "num_workers = 0 # if you use Windows system, set this to 0.\n", + "\n", + "# Setup some parameters for result visualization during training period (optional)\n", + "visualize_images_per_epoch = True # choose whether to show inference performance after each epoch\n", + "save_test_images_per_epoch = True # choose whether to save inference image after each epoch in pth path" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Play the demo noisy movie (optional)\n", + "Play the first noisy movie (optional). This will require loading the movie in memory which in general is not needed by the pipeline. Displaying the movie uses the OpenCV library. " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[1;31mDisplaying the first raw file -----> \u001b[0m\n", + "datasets/simulate_-2.51dBSNR_1000frames_demo/simulate_-2.51dBSNR_1000frames.tif\n" + ] + } + ], + "source": [ + "display_images = True\n", + "\n", + "if display_images:\n", + " display_filename = get_first_filename(datasets_path)\n", + " print('\\033[1;31mDisplaying the first raw file -----> \\033[0m')\n", + " print(display_filename)\n", + " display_length = 300 # the frames number of the noise movie\n", + " # normalize the image and display\n", + " display(display_filename, display_length=display_length, norm_min_percent=1, norm_max_percent=98)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create a training class object with the specified parameters\n", + "You will creat a parameters object by passing all the parameters as a single dictionary. Parameters not defined in the dictionary will assume their default values." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[1;31mTraining parameters -----> \u001b[0m\n", + "{'overlap_factor': 0.4, 'datasets_path': 'datasets/simulate_-2.51dBSNR_1000frames_demo', 'n_epochs': 5, 'fmap': 16, 'output_dir': './results', 'pth_dir': './pth', 'batch_size': 1, 'patch_t': 150, 'patch_x': 150, 'patch_y': 150, 'gap_y': 90, 'gap_x': 90, 'gap_t': 90, 'lr': 5e-05, 'b1': 0.5, 'b2': 0.999, 'GPU': '0', 'ngpu': 1, 'num_workers': 0, 'scale_factor': 1, 'train_datasets_size': 200, 'select_img_num': 1000000, 'test_datasize': 400, 'visualize_images_per_epoch': True, 'save_test_images_per_epoch': True}\n" + ] + } + ], + "source": [ + "train_dict = {\n", + " # dataset dependent parameters\n", + " 'patch_x': patch_xy, # you can change these params if use anisotropy patch size\n", + " 'patch_y': patch_xy,\n", + " 'patch_t': patch_t,\n", + " 'overlap_factor':overlap_factor,\n", + " 'scale_factor': 1, # the factor for image intensity scaling\n", + " 'select_img_num': 1000000, # select the number of images used for training (use all frames by default)\n", + " 'train_datasets_size': train_datasets_size,\n", + " 'datasets_path': datasets_path,\n", + " 'pth_dir': pth_dir,\n", + " \n", + " # network related parameters\n", + " 'n_epochs': n_epochs,\n", + " 'lr': 0.00005, # initial learning rate\n", + " 'b1': 0.5, # Adam: bata1\n", + " 'b2': 0.999, # Adam: bata2\n", + " 'fmap': 16, # number of feature maps\n", + " 'GPU': GPU,\n", + " 'num_workers': num_workers,\n", + " 'visualize_images_per_epoch': visualize_images_per_epoch,\n", + " 'save_test_images_per_epoch': save_test_images_per_epoch\n", + "}\n", + "\n", + "tc = training_class(train_dict)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Start the training process" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[1;31mImage list for training -----> \u001b[0m\n", + "Total stack number -----> 1\n", + "Noise image name -----> simulate_-2.51dBSNR_1000frames.tif\n", + "Noise images shape -----> (1000, 490, 490)\n", + "\u001b[1;31mUsing 1 GPU(s) for training -----> \u001b[0m\n", + "[Epoch 1/5] [Batch 208/208] [Total loss: 133879.38, L1 Loss: 431.95, L2 Loss: 267326.81] [ETA: 0:08:59] [Time cost: 134 s] \n", + " Testing model of epoch 1 on the first noisy file ----->\n", + "Testing image name -----> simulate_-2.51dBSNR_1000frames.tif\n", + "Noise images shape -----> (400, 490, 490)\n", + " [Patch 100/100] [Time Cost: 15 s] [ETA: 0 s] \n", + " Displaying the first denoised file ----->\n", + "\n", + "[Epoch 2/5] [Batch 208/208] [Total loss: 146795.33, L1 Loss: 447.59, L2 Loss: 293143.06] [ETA: 0:06:48] [Time cost: 307 s] \n", + " Testing model of epoch 2 on the first noisy file ----->\n", + "Testing image name -----> simulate_-2.51dBSNR_1000frames.tif\n", + "Noise images shape -----> (400, 490, 490)\n", + " [Patch 100/100] [Time Cost: 14 s] [ETA: 0 s] \n", + " Displaying the first denoised file ----->\n", + "\n", + "[Epoch 3/5] [Batch 208/208] [Total loss: 166972.73, L1 Loss: 478.81, L2 Loss: 333466.66] [ETA: 0:04:26] [Time cost: 479 s] \n", + " Testing model of epoch 3 on the first noisy file ----->\n", + "Testing image name -----> simulate_-2.51dBSNR_1000frames.tif\n", + "Noise images shape -----> (400, 490, 490)\n", + " [Patch 100/100] [Time Cost: 15 s] [ETA: 0 s] \n", + " Displaying the first denoised file ----->\n", + "\n", + "[Epoch 4/5] [Batch 208/208] [Total loss: 139002.17, L1 Loss: 430.49, L2 Loss: 277573.84] [ETA: 0:02:30] [Time cost: 652 s] \n", + " Testing model of epoch 4 on the first noisy file ----->\n", + "Testing image name -----> simulate_-2.51dBSNR_1000frames.tif\n", + "Noise images shape -----> (400, 490, 490)\n", + " [Patch 100/100] [Time Cost: 15 s] [ETA: 0 s] \n", + " Displaying the first denoised file ----->\n", + "\n", + "[Epoch 5/5] [Batch 208/208] [Total loss: 166888.28, L1 Loss: 479.46, L2 Loss: 333297.09] [ETA: 0:00:00] [Time cost: 827 s] \n", + " Testing model of epoch 5 on the first noisy file ----->\n", + "Testing image name -----> simulate_-2.51dBSNR_1000frames.tif\n", + "Noise images shape -----> (400, 490, 490)\n", + " [Patch 100/100] [Time Cost: 15 s] [ETA: 0 s] \n", + " Displaying the first denoised file ----->\n", + "\n", + " Train finished. Save all models to disk.\n" + ] + } + ], + "source": [ + "tc.run()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:deepcadpip2]", + "language": "python", + "name": "conda-env-deepcadpip2-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/DeepCAD_RT_pytorch/pth/ModelForPytorch/DownloadedModel b/DeepCAD_RT_pytorch/pth/ModelForPytorch/DownloadedModel new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/DeepCAD_RT_pytorch/pth/ModelForPytorch/DownloadedModel @@ -0,0 +1 @@ + diff --git a/DeepCAD_RT_pytorch/results/results.txt b/DeepCAD_RT_pytorch/results/results.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/DeepCAD_RT_pytorch/results/results.txt @@ -0,0 +1 @@ +