Solving inverse problems with iterative algorithms is popular, especially for
large data. Due to time constraints, the number of possible iterations is
usually limited, potentially affecting the achievable accuracy. Given an error
one is willing to tolerate, an important question is whether it is possible to
modify the original iterations to obtain faster convergence to a minimizer
achieving the allowed error without increasing the computational cost of each
iteration considerably. Relying on recent recovery techniques developed for
settings in which the desired signal belongs to some low-dimensional set, we
show that using a coarse estimate of this set may lead to faster convergence at
the cost of an additional reconstruction error related to the accuracy of the
set approximation. Our theory ties to recent advances in sparse recovery,
compressed sensing, and deep learning. Particularly, it may provide a possible
explanation to the successful approximation of the l1-minimization solution by
neural networks with layers representing iterations, as practiced in the
learned iterative shrinkage-thresholding algorithm (LISTA).
Описание
[1605.09232] Tradeoffs between Convergence Speed and Reconstruction Accuracy in Inverse Problems
%0 Journal Article
%1 giryes2016tradeoffs
%A Giryes, Raja
%A Eldar, Yonina C.
%A Bronstein, Alex M.
%A Sapiro, Guillermo
%D 2016
%K convergence information readings reconstruction sparsity
%T Tradeoffs between Convergence Speed and Reconstruction Accuracy in
Inverse Problems
%U http://arxiv.org/abs/1605.09232
%X Solving inverse problems with iterative algorithms is popular, especially for
large data. Due to time constraints, the number of possible iterations is
usually limited, potentially affecting the achievable accuracy. Given an error
one is willing to tolerate, an important question is whether it is possible to
modify the original iterations to obtain faster convergence to a minimizer
achieving the allowed error without increasing the computational cost of each
iteration considerably. Relying on recent recovery techniques developed for
settings in which the desired signal belongs to some low-dimensional set, we
show that using a coarse estimate of this set may lead to faster convergence at
the cost of an additional reconstruction error related to the accuracy of the
set approximation. Our theory ties to recent advances in sparse recovery,
compressed sensing, and deep learning. Particularly, it may provide a possible
explanation to the successful approximation of the l1-minimization solution by
neural networks with layers representing iterations, as practiced in the
learned iterative shrinkage-thresholding algorithm (LISTA).
@article{giryes2016tradeoffs,
abstract = {Solving inverse problems with iterative algorithms is popular, especially for
large data. Due to time constraints, the number of possible iterations is
usually limited, potentially affecting the achievable accuracy. Given an error
one is willing to tolerate, an important question is whether it is possible to
modify the original iterations to obtain faster convergence to a minimizer
achieving the allowed error without increasing the computational cost of each
iteration considerably. Relying on recent recovery techniques developed for
settings in which the desired signal belongs to some low-dimensional set, we
show that using a coarse estimate of this set may lead to faster convergence at
the cost of an additional reconstruction error related to the accuracy of the
set approximation. Our theory ties to recent advances in sparse recovery,
compressed sensing, and deep learning. Particularly, it may provide a possible
explanation to the successful approximation of the l1-minimization solution by
neural networks with layers representing iterations, as practiced in the
learned iterative shrinkage-thresholding algorithm (LISTA).},
added-at = {2019-10-22T14:35:22.000+0200},
author = {Giryes, Raja and Eldar, Yonina C. and Bronstein, Alex M. and Sapiro, Guillermo},
biburl = {https://www.bibsonomy.org/bibtex/2a8649d3f65ab3fe9949918baba1135ee/kirk86},
description = {[1605.09232] Tradeoffs between Convergence Speed and Reconstruction Accuracy in Inverse Problems},
interhash = {2ef9a84c2276a1a272f1f45ee4923c81},
intrahash = {a8649d3f65ab3fe9949918baba1135ee},
keywords = {convergence information readings reconstruction sparsity},
note = {cite arxiv:1605.09232Comment: To appear in IEEE Transactions on Signal Processing},
timestamp = {2019-10-22T14:41:44.000+0200},
title = {Tradeoffs between Convergence Speed and Reconstruction Accuracy in
Inverse Problems},
url = {http://arxiv.org/abs/1605.09232},
year = 2016
}