Modern neural networks are highly overparameterized, with capacity to
substantially overfit to training data. Nevertheless, these networks often
generalize well in practice. It has also been observed that trained networks
can often be "compressed" to much smaller representations. The purpose of this
paper is to connect these two empirical observations. Our main technical result
is a generalization bound for compressed networks based on the compressed size.
Combined with off-the-shelf compression algorithms, the bound leads to state of
the art generalization guarantees; in particular, we provide the first
non-vacuous generalization guarantees for realistic architectures applied to
the ImageNet classification problem. As additional evidence connecting
compression and generalization, we show that compressibility of models that
tend to overfit is limited: We establish an absolute limit on expected
compressibility as a function of expected generalization error, where the
expectations are over the random choice of training examples. The bounds are
complemented by empirical results that show an increase in overfitting implies
an increase in the number of bits required to describe a trained network.
Description
[1804.05862] Compressibility and Generalization in Large-Scale Deep Learning
%0 Generic
%1 zhou2018compressibility
%A Zhou, Wenda
%A Veitch, Victor
%A Austern, Morgane
%A Adams, Ryan P.
%A Orbanz, Peter
%D 2018
%K compression generalization overfitting
%T Compressibility and Generalization in Large-Scale Deep Learning
%U http://arxiv.org/abs/1804.05862
%X Modern neural networks are highly overparameterized, with capacity to
substantially overfit to training data. Nevertheless, these networks often
generalize well in practice. It has also been observed that trained networks
can often be "compressed" to much smaller representations. The purpose of this
paper is to connect these two empirical observations. Our main technical result
is a generalization bound for compressed networks based on the compressed size.
Combined with off-the-shelf compression algorithms, the bound leads to state of
the art generalization guarantees; in particular, we provide the first
non-vacuous generalization guarantees for realistic architectures applied to
the ImageNet classification problem. As additional evidence connecting
compression and generalization, we show that compressibility of models that
tend to overfit is limited: We establish an absolute limit on expected
compressibility as a function of expected generalization error, where the
expectations are over the random choice of training examples. The bounds are
complemented by empirical results that show an increase in overfitting implies
an increase in the number of bits required to describe a trained network.
@misc{zhou2018compressibility,
abstract = {Modern neural networks are highly overparameterized, with capacity to
substantially overfit to training data. Nevertheless, these networks often
generalize well in practice. It has also been observed that trained networks
can often be "compressed" to much smaller representations. The purpose of this
paper is to connect these two empirical observations. Our main technical result
is a generalization bound for compressed networks based on the compressed size.
Combined with off-the-shelf compression algorithms, the bound leads to state of
the art generalization guarantees; in particular, we provide the first
non-vacuous generalization guarantees for realistic architectures applied to
the ImageNet classification problem. As additional evidence connecting
compression and generalization, we show that compressibility of models that
tend to overfit is limited: We establish an absolute limit on expected
compressibility as a function of expected generalization error, where the
expectations are over the random choice of training examples. The bounds are
complemented by empirical results that show an increase in overfitting implies
an increase in the number of bits required to describe a trained network.},
added-at = {2018-04-18T14:33:48.000+0200},
author = {Zhou, Wenda and Veitch, Victor and Austern, Morgane and Adams, Ryan P. and Orbanz, Peter},
biburl = {https://www.bibsonomy.org/bibtex/296a9e8199365aeaaf03e038618295b20/rcb},
description = {[1804.05862] Compressibility and Generalization in Large-Scale Deep Learning},
interhash = {a605dba8961f19d74ee23037fdd8d4d3},
intrahash = {96a9e8199365aeaaf03e038618295b20},
keywords = {compression generalization overfitting},
note = {cite arxiv:1804.05862Comment: 20 pages, 1 figure},
timestamp = {2018-04-18T14:33:48.000+0200},
title = {Compressibility and Generalization in Large-Scale Deep Learning},
url = {http://arxiv.org/abs/1804.05862},
year = 2018
}