Deep neural networks are typically trained by optimizing a loss function with
an SGD variant, in conjunction with a decaying learning rate, until
convergence. We show that simple averaging of multiple points along the
trajectory of SGD, with a cyclical or constant learning rate, leads to better
generalization than conventional training. We also show that this Stochastic
Weight Averaging (SWA) procedure finds much broader optima than SGD, and
approximates the recent Fast Geometric Ensembling (FGE) approach with a single
model. Using SWA we achieve notable improvement in test accuracy over
conventional SGD training on a range of state-of-the-art residual networks,
PyramidNets, DenseNets, and Shake-Shake networks on CIFAR-10, CIFAR-100, and
ImageNet. In short, SWA is extremely easy to implement, improves
generalization, and has almost no computational overhead.
%0 Generic
%1 citeulike:14571803
%A xxx,
%D 2018
%K augmentation
%T Averaging Weights Leads to Wider Optima and Better Generalization
%U http://arxiv.org/abs/1803.05407
%X Deep neural networks are typically trained by optimizing a loss function with
an SGD variant, in conjunction with a decaying learning rate, until
convergence. We show that simple averaging of multiple points along the
trajectory of SGD, with a cyclical or constant learning rate, leads to better
generalization than conventional training. We also show that this Stochastic
Weight Averaging (SWA) procedure finds much broader optima than SGD, and
approximates the recent Fast Geometric Ensembling (FGE) approach with a single
model. Using SWA we achieve notable improvement in test accuracy over
conventional SGD training on a range of state-of-the-art residual networks,
PyramidNets, DenseNets, and Shake-Shake networks on CIFAR-10, CIFAR-100, and
ImageNet. In short, SWA is extremely easy to implement, improves
generalization, and has almost no computational overhead.
@misc{citeulike:14571803,
abstract = {{Deep neural networks are typically trained by optimizing a loss function with
an SGD variant, in conjunction with a decaying learning rate, until
convergence. We show that simple averaging of multiple points along the
trajectory of SGD, with a cyclical or constant learning rate, leads to better
generalization than conventional training. We also show that this Stochastic
Weight Averaging (SWA) procedure finds much broader optima than SGD, and
approximates the recent Fast Geometric Ensembling (FGE) approach with a single
model. Using SWA we achieve notable improvement in test accuracy over
conventional SGD training on a range of state-of-the-art residual networks,
PyramidNets, DenseNets, and Shake-Shake networks on CIFAR-10, CIFAR-100, and
ImageNet. In short, SWA is extremely easy to implement, improves
generalization, and has almost no computational overhead.}},
added-at = {2019-02-27T22:23:29.000+0100},
archiveprefix = {arXiv},
author = {xxx},
biburl = {https://www.bibsonomy.org/bibtex/2502bd965c36df90bab97e48c900eb6c3/nmatsuk},
citeulike-article-id = {14571803},
citeulike-linkout-0 = {http://arxiv.org/abs/1803.05407},
citeulike-linkout-1 = {http://arxiv.org/pdf/1803.05407},
day = 8,
eprint = {1803.05407},
interhash = {622780e472e11c812a6ef9245c24535e},
intrahash = {502bd965c36df90bab97e48c900eb6c3},
keywords = {augmentation},
month = aug,
posted-at = {2018-04-19 16:57:17},
priority = {0},
timestamp = {2019-02-27T22:23:29.000+0100},
title = {{Averaging Weights Leads to Wider Optima and Better Generalization}},
url = {http://arxiv.org/abs/1803.05407},
year = 2018
}