-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit 7706598
Showing
29 changed files
with
4,884 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
/**/*~ | ||
/**/*.pyc | ||
/**/*.out | ||
/**/*.ini | ||
.ipynb_checkpoints |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
BSD 2-Clause License | ||
|
||
Copyright 2020-present Ferdia Sherry | ||
|
||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: | ||
|
||
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. | ||
|
||
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. | ||
|
||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
# bilevelmri | ||
|
||
This package implements the methods described in "Learning the Sampling Pattern for MRI" ('https://arxiv.org/abs/1906.08754'). | ||
|
||
## Installation | ||
|
||
The package can be installed by running | ||
|
||
``` | ||
git clone https://github.com/fsherry/bilevelmri.git && cd bilevelmri && pip install -e . && rm -r build | ||
``` | ||
|
||
## Usage | ||
|
||
For a basic example of how the various parts of this package can be combined to learn a sampling pattern, consider the notebook `learn_example_square.ipynb` | ||
|
||
## License | ||
|
||
This project is licensed under the BSD 2-Clause license (see LICENSE.txt). |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,99 @@ | ||
from .lower_level import lower_level_solver | ||
from .objective_function import obj_func_general_parametrisation, ObjectiveTracker | ||
|
||
import numpy as np | ||
import torch | ||
from scipy.optimize.lbfgsb import fmin_l_bfgs_b | ||
from skimage.metrics import peak_signal_noise_ratio as psnr | ||
from skimage.metrics import structural_similarity as ssim | ||
|
||
import datetime | ||
|
||
|
||
def learn(data, | ||
p_init, | ||
p_bounds, | ||
parametrisation, | ||
A, | ||
reg_func, | ||
loss, | ||
penalty, | ||
params, | ||
track=False): | ||
if track: | ||
tracker = ObjectiveTracker( | ||
params, parametrisation, print_to_stdout=True) | ||
|
||
@tracker | ||
def obj(p, data, params): | ||
f_data, f_pen, g = obj_func_general_parametrisation( | ||
p, data, parametrisation, A, reg_func, loss, penalty, params) | ||
return f_data, f_pen, g | ||
else: | ||
counter = 0 | ||
|
||
def callback(p): | ||
nonlocal counter | ||
counter += 1 | ||
S, alpha, eps = parametrisation(torch.tensor(p), params) | ||
S = S.reshape(-1).cpu().numpy() | ||
alpha = alpha.cpu().numpy() | ||
print( | ||
'\nIteration #{}: Current sampling rate {:.1f}%, alpha {:.2e}, eps {:.2e}' | ||
.format(counter, | ||
np.mean(S > 0) * 100, alpha.item(), eps.item())) | ||
|
||
def obj(p, data, params): | ||
f_data, f_pen, g = obj_func_general_parametrisation( | ||
p, data, parametrisation, A, reg_func, loss, penalty, params) | ||
return f_data + f_pen, g | ||
|
||
start_time = datetime.datetime.now() | ||
if 'pgtol' in params['alg_params']['LBFGSB']: | ||
pgtol = params['alg_params']['LBFGSB']['pgtol'] | ||
else: | ||
pgtol = 1e-10 | ||
if 'maxit' in params['alg_params']['LBFGSB']: | ||
maxiter = params['alg_params']['LBFGSB']['maxit'] | ||
else: | ||
maxiter = 1000 | ||
print('Learning sampling pattern:') | ||
p, _, info = fmin_l_bfgs_b( | ||
lambda p: obj(p, data, params), | ||
p_init, | ||
bounds=p_bounds, | ||
pgtol=pgtol, | ||
factr=0, | ||
maxiter=maxiter, | ||
callback=tracker.callback if track else callback) | ||
end_time = datetime.datetime.now() | ||
elapsed_time = end_time - start_time | ||
results = {'elapsed_time': elapsed_time, 'p': p, 'info': info} | ||
if track: | ||
results['tracker'] = tracker | ||
return results | ||
|
||
|
||
def compute_statistics(data, p, A, reg_func, parametrisation, params): | ||
recons = [] | ||
chunks = data['y'].split(10) | ||
for chunk in chunks: | ||
S, alpha, eps = parametrisation( | ||
torch.tensor(p, device=chunk.device), params) | ||
if 'xinit' in params['alg_params']['ll_sol'] and params['alg_params'][ | ||
'll_sol']['xinit'].shape[0] != chunk.shape[0]: | ||
del params['alg_params']['ll_sol']['xinit'] | ||
chunk_recon = lower_level_solver(chunk, S, alpha, eps, A, reg_func, | ||
params['alg_params']) | ||
recons.append(chunk_recon.to('cpu')) | ||
recons = torch.cat(recons, dim=0) | ||
ssims = [] | ||
psnrs = [] | ||
for i in range(recons.shape[0]): | ||
abs_recon = torch.sqrt(torch.sum(recons[i, :, :, :]**2, dim=2)).numpy() | ||
abs_clean = torch.sqrt(torch.sum(data['x'][i, :, :, :]**2, | ||
dim=2)).cpu().numpy() | ||
ssims.append(ssim(abs_clean, abs_recon)) | ||
psnrs.append(psnr(abs_clean, abs_recon)) | ||
results = {'recons': recons, 'ssims': ssims, 'psnrs': psnrs} | ||
return results |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
from .functionals import Functional, Squared2Norm | ||
from .smoothed1norm import Smoothed1Norm | ||
|
||
|
||
__all__ = ['Functional', 'Squared2Norm', 'Smoothed1Norm'] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,36 @@ | ||
import torch | ||
|
||
|
||
class Functional: | ||
def __init__(self): | ||
pass | ||
|
||
def __call__(self, x): | ||
raise NotImplementedError() | ||
|
||
def prox(self, x, tau): | ||
raise NotImplementedError() | ||
|
||
def grad(self, x): | ||
raise NotImplementedError() | ||
|
||
def hess(self, x, w): | ||
raise NotImplementedError() | ||
|
||
|
||
class Squared2Norm(Functional): | ||
def __init__(self): | ||
super().__init__() | ||
self.smoothness_bound = 1. | ||
|
||
def __call__(self, x): | ||
return 0.5 * torch.sum(x**2) | ||
|
||
def prox(self, x, tau): | ||
return x / (tau + 1.) | ||
|
||
def grad(self, x): | ||
return x | ||
|
||
def hess(self, x, w): | ||
return w |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,147 @@ | ||
from .functionals import Functional | ||
from ..linear_ops.wavelet_coefficients import WaveletCoefficients | ||
import torch | ||
|
||
|
||
def rho(x, gamma): | ||
out = torch.zeros_like(x) | ||
ind = x <= gamma | ||
out[ind] = -x[ind]**3 / (3 * gamma**2) + x[ind]**2 / gamma | ||
ind = x > gamma | ||
out[ind] = x[ind] - gamma / 3 | ||
return out | ||
|
||
|
||
def phi(x, gamma): | ||
out = torch.zeros_like(x) | ||
ind = x <= gamma | ||
out[ind] = -x[ind] / gamma**2 + 2 / gamma | ||
ind = x > gamma | ||
out[ind] = 1. / x[ind] | ||
return out | ||
|
||
|
||
def prox_rho(norm_x, gamma, tau): | ||
if tau == 0.: | ||
return norm_x | ||
else: | ||
C = torch.zeros_like(norm_x) | ||
ind = norm_x <= gamma + tau | ||
C[ind] = (gamma / tau) * ((tau + 0.5 * gamma) - torch.sqrt( | ||
(tau + 0.5 * gamma)**2 - tau * norm_x[ind])) | ||
ind = norm_x > gamma + tau | ||
C[ind] = norm_x[ind] - tau | ||
return C | ||
|
||
|
||
def psi(x, gamma): | ||
out = torch.zeros_like(x) | ||
ind = (x > 0) * (x <= gamma) | ||
out[ind] = -1. / (gamma**2 * x[ind]) | ||
ind = x > gamma | ||
out[ind] = -1. / x[ind]**3 | ||
return out | ||
|
||
|
||
class Smoothed1Norm(Functional): | ||
def __init__(self, gamma=1e-2): | ||
super().__init__() | ||
self.gamma = gamma | ||
self.smoothness_bound = 4. / gamma | ||
|
||
def __call__(self, z): | ||
# wavelet coefficients are given in the form of a tuple of a torch tensor and a list of torch tensors | ||
if isinstance(z, WaveletCoefficients): | ||
z = z.coefs | ||
norm = torch.sum( | ||
rho(torch.sqrt(torch.sum(z[0]**2, dim=1)), self.gamma)) | ||
for coefs in z[1]: | ||
norm += torch.sum( | ||
rho(torch.sqrt(torch.sum(coefs**2, dim=1)), self.gamma)) | ||
# image gradient | ||
elif isinstance(z, torch.Tensor): | ||
norm = torch.sum( | ||
rho(torch.sqrt(torch.sum(z**2, dim=(3, 4))), self.gamma)) | ||
else: | ||
raise NotImplementedError() | ||
return norm | ||
|
||
def prox(self, z, tau): | ||
# wavelet coefficients are given in the form of a tuple of a torch tensor and a list of torch tensors | ||
if isinstance(z, WaveletCoefficients): | ||
norms_l = torch.sqrt(torch.sum(z[0]**2, dim=1, | ||
keepdim=True)).expand(*z[0].shape) | ||
prox_l = torch.zeros_like(z[0]) | ||
ind = norms_l > 0 | ||
prox_l[ind] = prox_rho(norms_l[ind], self.gamma, | ||
tau) * z[0][ind] / norms_l[ind] | ||
prox_h = [] | ||
for coefs in z[1]: | ||
norms = torch.sqrt(torch.sum( | ||
coefs**2, dim=1, keepdim=True)).expand(*coefs.shape) | ||
prox = torch.zeros_like(coefs) | ||
ind = norms > 0. | ||
prox[ind] = prox_rho(norms[ind], self.gamma, | ||
tau) * coefs[ind] / norms[ind] | ||
prox_h.append(prox) | ||
prox = WaveletCoefficients((prox_l, prox_h)) | ||
# image gradient | ||
elif isinstance(z, torch.Tensor): | ||
norms = torch.sqrt(torch.sum(z**2, dim=(3, 4), | ||
keepdim=True)).expand(*z.shape) | ||
prox = torch.zeros_like(z) | ||
ind = norms > 0. | ||
prox[ind] = prox_rho(norms[ind], self.gamma, | ||
tau) * z[ind] / norms[ind] | ||
else: | ||
raise NotImplementedError() | ||
return prox | ||
|
||
def grad(self, z): | ||
# wavelet coefficients are given in the form of a tuple of a torch tensor and a list of torch tensors | ||
if isinstance(z, WaveletCoefficients): | ||
norms_l = torch.sqrt(torch.sum(z[0]**2, dim=1, | ||
keepdim=True)).expand(*z[0].shape) | ||
grad_l = phi(norms_l, self.gamma) * z[0] | ||
grad_h = [] | ||
for coefs in z[1]: | ||
norms = torch.sqrt(torch.sum( | ||
coefs**2, dim=1, keepdim=True)).expand(*coefs.shape) | ||
grad_ = phi(norms, self.gamma) * coefs | ||
grad_h.append(grad_) | ||
grad = WaveletCoefficients((grad_l, grad_h)) | ||
# image gradient | ||
elif isinstance(z, torch.Tensor): | ||
norms = torch.sqrt(torch.sum(z**2, dim=(3, 4), | ||
keepdim=True)).expand(*z.shape) | ||
grad = phi(norms, self.gamma) * z | ||
else: | ||
raise NotImplementedError() | ||
return grad | ||
|
||
def hess(self, z, w): | ||
# wavelet coefficients are given in the form of a tuple of a torch tensor and a list of torch tensors | ||
if isinstance(z, WaveletCoefficients): | ||
norms_l = torch.sqrt(torch.sum(z[0]**2, dim=1, | ||
keepdim=True)).expand(*z[0].shape) | ||
hess_l = psi(norms_l, self.gamma) * z[0] * torch.sum( | ||
z[0] * w[0], dim=1, keepdim=True) + phi(norms_l, | ||
self.gamma) * w[0] | ||
hess_h = [] | ||
for z_coefs, w_coefs in zip(z[1], w[1]): | ||
norms = torch.sqrt(torch.sum( | ||
z_coefs**2, dim=1, keepdim=True)).expand(*z_coefs.shape) | ||
hess_ = psi(norms, self.gamma) * z_coefs * torch.sum( | ||
z_coefs * w_coefs, dim=1, keepdim=True) + phi( | ||
norms, self.gamma) * w_coefs | ||
hess_h.append(hess_) | ||
hess_w = WaveletCoefficients((hess_l, hess_h)) | ||
# image gradient | ||
elif isinstance(z, torch.Tensor): | ||
norms = torch.sqrt(torch.sum(z**2, dim=(3, 4), | ||
keepdim=True)).expand(*z.shape) | ||
hess_w = psi(norms, self.gamma) * z * torch.sum( | ||
z * w, dim=(3, 4), keepdim=True) + phi(norms, self.gamma) * w | ||
else: | ||
raise NotImplementedError() | ||
return hess_w |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
from .linear_operator import LinearOperator | ||
|
||
|
||
__all__ = ['LinearOperator'] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
from .linear_operator import LinearOperator | ||
from ..utils.finite_diff import grad, div | ||
from math import sqrt | ||
import torch | ||
|
||
|
||
class Grad(LinearOperator): | ||
def __init__(self): | ||
super().__init__() | ||
self.norm_bound = sqrt(8.) | ||
|
||
def __call__(self, x): | ||
return grad(x) | ||
|
||
def T(self, z): | ||
return -div(z) | ||
|
||
|
||
class ProjectedGrad(LinearOperator): | ||
def __init__(self, vector_field): | ||
super().__init__() | ||
self.norm_bound = sqrt(8.) | ||
self.vector_field = vector_field | ||
|
||
def __call__(self, x): | ||
grad_x = grad(x) | ||
return grad_x - torch.sum( | ||
self.vector_field * grad_x, dim=(3, 4), | ||
keepdim=True) * self.vector_field | ||
|
||
def T(self, z): | ||
return -div( | ||
z - torch.sum(self.vector_field * z, dim=(3, 4), keepdim=True) * | ||
self.vector_field) | ||
|
||
|
||
class DTV(LinearOperator): | ||
def __init__(self, ref_ims, eta=1e-2): | ||
super().__init__() | ||
self.norm_bound = sqrt(8.) | ||
grad_refs = grad(ref_ims) | ||
self.normalised_refs = grad_refs / torch.sqrt( | ||
torch.sum(eta**2 + grad_refs**2, dim=(3, 4), keepdim=True)) | ||
self.proj_grad = ProjectedGrad(self.normalised_refs) | ||
|
||
def __call__(self, x): | ||
return self.proj_grad(x) | ||
|
||
def T(self, z): | ||
return self.proj_grad.T(z) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
class LinearOperator: | ||
def __init__(self): | ||
pass | ||
|
||
def __call__(self, x): | ||
raise NotImplementedError() | ||
|
||
def T(self, z): | ||
raise NotImplementedError() |
Oops, something went wrong.