In practice it is often found that large over-parameterized neural networks
generalize better than their smaller counterparts, an observation that appears
to conflict with classical notions of function complexity, which typically
favor smaller models. In this work, we investigate this tension between
complexity and generalization through an extensive empirical exploration of two
natural metrics of complexity related to sensitivity to input perturbations.
Our experiments survey thousands of models with various fully-connected
architectures, optimizers, and other hyper-parameters, as well as four
different image classification datasets.
We find that trained neural networks are more robust to input perturbations
in the vicinity of the training data manifold, as measured by the norm of the
input-output Jacobian of the network, and that it correlates well with
generalization. We further establish that factors associated with poor
generalization $-$ such as full-batch training or using random labels $-$
correspond to lower robustness, while factors associated with good
generalization $-$ such as data augmentation and ReLU non-linearities $-$ give
rise to more robust functions. Finally, we demonstrate how the input-output
Jacobian norm can be predictive of generalization at the level of individual
test points.
Beschreibung
[1802.08760] Sensitivity and Generalization in Neural Networks: an Empirical Study
%0 Journal Article
%1 novak2018sensitivity
%A Novak, Roman
%A Bahri, Yasaman
%A Abolafia, Daniel A.
%A Pennington, Jeffrey
%A Sohl-Dickstein, Jascha
%D 2018
%K generalization readings robustness uncertainty
%T Sensitivity and Generalization in Neural Networks: an Empirical Study
%U http://arxiv.org/abs/1802.08760
%X In practice it is often found that large over-parameterized neural networks
generalize better than their smaller counterparts, an observation that appears
to conflict with classical notions of function complexity, which typically
favor smaller models. In this work, we investigate this tension between
complexity and generalization through an extensive empirical exploration of two
natural metrics of complexity related to sensitivity to input perturbations.
Our experiments survey thousands of models with various fully-connected
architectures, optimizers, and other hyper-parameters, as well as four
different image classification datasets.
We find that trained neural networks are more robust to input perturbations
in the vicinity of the training data manifold, as measured by the norm of the
input-output Jacobian of the network, and that it correlates well with
generalization. We further establish that factors associated with poor
generalization $-$ such as full-batch training or using random labels $-$
correspond to lower robustness, while factors associated with good
generalization $-$ such as data augmentation and ReLU non-linearities $-$ give
rise to more robust functions. Finally, we demonstrate how the input-output
Jacobian norm can be predictive of generalization at the level of individual
test points.
@article{novak2018sensitivity,
abstract = {In practice it is often found that large over-parameterized neural networks
generalize better than their smaller counterparts, an observation that appears
to conflict with classical notions of function complexity, which typically
favor smaller models. In this work, we investigate this tension between
complexity and generalization through an extensive empirical exploration of two
natural metrics of complexity related to sensitivity to input perturbations.
Our experiments survey thousands of models with various fully-connected
architectures, optimizers, and other hyper-parameters, as well as four
different image classification datasets.
We find that trained neural networks are more robust to input perturbations
in the vicinity of the training data manifold, as measured by the norm of the
input-output Jacobian of the network, and that it correlates well with
generalization. We further establish that factors associated with poor
generalization $-$ such as full-batch training or using random labels $-$
correspond to lower robustness, while factors associated with good
generalization $-$ such as data augmentation and ReLU non-linearities $-$ give
rise to more robust functions. Finally, we demonstrate how the input-output
Jacobian norm can be predictive of generalization at the level of individual
test points.},
added-at = {2020-03-13T19:10:27.000+0100},
author = {Novak, Roman and Bahri, Yasaman and Abolafia, Daniel A. and Pennington, Jeffrey and Sohl-Dickstein, Jascha},
biburl = {https://www.bibsonomy.org/bibtex/28a27f19d19c8feaa1f07a2e0aa8d33ff/kirk86},
description = {[1802.08760] Sensitivity and Generalization in Neural Networks: an Empirical Study},
interhash = {cff38becb9b0d5f56130a33197c4429f},
intrahash = {8a27f19d19c8feaa1f07a2e0aa8d33ff},
keywords = {generalization readings robustness uncertainty},
note = {cite arxiv:1802.08760Comment: Published as a conference paper at ICLR 2018},
timestamp = {2020-03-13T19:10:27.000+0100},
title = {Sensitivity and Generalization in Neural Networks: an Empirical Study},
url = {http://arxiv.org/abs/1802.08760},
year = 2018
}