Recently, a residual network (ResNet) with a single residual block has been
shown to outperform linear predictors, in the sense that all its local minima
are at least as good as the best linear predictor. We take a step towards
extending this result to deep ResNets. As motivation, we first show that there
exist datasets for which all local minima of a fully-connected ReLU network are
no better than the best linear predictor, while a ResNet can have strictly
better local minima. Second, we show that even at its global minimum, the
representation obtained from the residual blocks of a 2-block ResNet does not
necessarily improve monotonically as more blocks are added, highlighting a
fundamental difficulty in analyzing deep ResNets. Our main result on deep
ResNets shows that (under some geometric conditions) any critical point is
either (i) at least as good as the best linear predictor; or (ii) the Hessian
at this critical point has a strictly negative eigenvalue. Finally, we
complement our results by analyzing near-identity regions of deep ResNets,
obtaining size-independent upper bounds for the risk attained at critical
points as well as the Rademacher complexity.
Description
[1907.03922] Are deep ResNets provably better than linear predictors?
%0 Journal Article
%1 yun2019resnets
%A Yun, Chulhee
%A Sra, Suvrit
%A Jadbabaie, Ali
%D 2019
%K generalization objectives optimization readings theory
%T Are deep ResNets provably better than linear predictors?
%U http://arxiv.org/abs/1907.03922
%X Recently, a residual network (ResNet) with a single residual block has been
shown to outperform linear predictors, in the sense that all its local minima
are at least as good as the best linear predictor. We take a step towards
extending this result to deep ResNets. As motivation, we first show that there
exist datasets for which all local minima of a fully-connected ReLU network are
no better than the best linear predictor, while a ResNet can have strictly
better local minima. Second, we show that even at its global minimum, the
representation obtained from the residual blocks of a 2-block ResNet does not
necessarily improve monotonically as more blocks are added, highlighting a
fundamental difficulty in analyzing deep ResNets. Our main result on deep
ResNets shows that (under some geometric conditions) any critical point is
either (i) at least as good as the best linear predictor; or (ii) the Hessian
at this critical point has a strictly negative eigenvalue. Finally, we
complement our results by analyzing near-identity regions of deep ResNets,
obtaining size-independent upper bounds for the risk attained at critical
points as well as the Rademacher complexity.
@article{yun2019resnets,
abstract = {Recently, a residual network (ResNet) with a single residual block has been
shown to outperform linear predictors, in the sense that all its local minima
are at least as good as the best linear predictor. We take a step towards
extending this result to deep ResNets. As motivation, we first show that there
exist datasets for which all local minima of a fully-connected ReLU network are
no better than the best linear predictor, while a ResNet can have strictly
better local minima. Second, we show that even at its global minimum, the
representation obtained from the residual blocks of a 2-block ResNet does not
necessarily improve monotonically as more blocks are added, highlighting a
fundamental difficulty in analyzing deep ResNets. Our main result on deep
ResNets shows that (under some geometric conditions) any critical point is
either (i) at least as good as the best linear predictor; or (ii) the Hessian
at this critical point has a strictly negative eigenvalue. Finally, we
complement our results by analyzing near-identity regions of deep ResNets,
obtaining size-independent upper bounds for the risk attained at critical
points as well as the Rademacher complexity.},
added-at = {2019-10-27T21:02:18.000+0100},
author = {Yun, Chulhee and Sra, Suvrit and Jadbabaie, Ali},
biburl = {https://www.bibsonomy.org/bibtex/2f2a4b9ae2f366465e5819116533dcea1/kirk86},
description = {[1907.03922] Are deep ResNets provably better than linear predictors?},
interhash = {bbbb7b3e8b00193bdc303663f35f8551},
intrahash = {f2a4b9ae2f366465e5819116533dcea1},
keywords = {generalization objectives optimization readings theory},
note = {cite arxiv:1907.03922Comment: 17 pages},
timestamp = {2019-10-27T21:02:18.000+0100},
title = {Are deep ResNets provably better than linear predictors?},
url = {http://arxiv.org/abs/1907.03922},
year = 2019
}