Generalization of deep networks has been of great interest in recent years,
resulting in a number of theoretically and empirically motivated complexity
measures. However, most papers proposing such measures study only a small set
of models, leaving open the question of whether the conclusion drawn from those
experiments would remain valid in other settings. We present the first large
scale study of generalization in deep networks. We investigate more then 40
complexity measures taken from both theoretical bounds and empirical studies.
We train over 10,000 convolutional networks by systematically varying commonly
used hyperparameters. Hoping to uncover potentially causal relationships
between each measure and generalization, we analyze carefully controlled
experiments and show surprising failures of some measures as well as promising
measures for further research.
Description
[1912.02178] Fantastic Generalization Measures and Where to Find Them
%0 Journal Article
%1 jiang2019fantastic
%A Jiang, Yiding
%A Neyshabur, Behnam
%A Mobahi, Hossein
%A Krishnan, Dilip
%A Bengio, Samy
%D 2019
%K bounds deep-learning generalization readings theory
%T Fantastic Generalization Measures and Where to Find Them
%U http://arxiv.org/abs/1912.02178
%X Generalization of deep networks has been of great interest in recent years,
resulting in a number of theoretically and empirically motivated complexity
measures. However, most papers proposing such measures study only a small set
of models, leaving open the question of whether the conclusion drawn from those
experiments would remain valid in other settings. We present the first large
scale study of generalization in deep networks. We investigate more then 40
complexity measures taken from both theoretical bounds and empirical studies.
We train over 10,000 convolutional networks by systematically varying commonly
used hyperparameters. Hoping to uncover potentially causal relationships
between each measure and generalization, we analyze carefully controlled
experiments and show surprising failures of some measures as well as promising
measures for further research.
@article{jiang2019fantastic,
abstract = {Generalization of deep networks has been of great interest in recent years,
resulting in a number of theoretically and empirically motivated complexity
measures. However, most papers proposing such measures study only a small set
of models, leaving open the question of whether the conclusion drawn from those
experiments would remain valid in other settings. We present the first large
scale study of generalization in deep networks. We investigate more then 40
complexity measures taken from both theoretical bounds and empirical studies.
We train over 10,000 convolutional networks by systematically varying commonly
used hyperparameters. Hoping to uncover potentially causal relationships
between each measure and generalization, we analyze carefully controlled
experiments and show surprising failures of some measures as well as promising
measures for further research.},
added-at = {2019-12-05T13:59:35.000+0100},
author = {Jiang, Yiding and Neyshabur, Behnam and Mobahi, Hossein and Krishnan, Dilip and Bengio, Samy},
biburl = {https://www.bibsonomy.org/bibtex/29f3faafe709b661e3c715d55dc04dd8b/kirk86},
description = {[1912.02178] Fantastic Generalization Measures and Where to Find Them},
interhash = {9f39c1d5d2909ef22d09bbfaef72bb37},
intrahash = {9f3faafe709b661e3c715d55dc04dd8b},
keywords = {bounds deep-learning generalization readings theory},
note = {cite arxiv:1912.02178},
timestamp = {2019-12-05T13:59:35.000+0100},
title = {Fantastic Generalization Measures and Where to Find Them},
url = {http://arxiv.org/abs/1912.02178},
year = 2019
}