The early phase of training of deep neural networks is critical for their
final performance. In this work, we study how the hyperparameters of stochastic
gradient descent (SGD) used in the early phase of training affect the rest of
the optimization trajectory. We argue for the existence of the "break-even"
point on this trajectory, beyond which the curvature of the loss surface and
noise in the gradient are implicitly regularized by SGD. In particular, we
demonstrate on multiple classification tasks that using a large learning rate
in the initial phase of training reduces the variance of the gradient, and
improves the conditioning of the covariance of gradients. These effects are
beneficial from the optimization perspective and become visible after the
break-even point. Complementing prior work, we also show that using a low
learning rate results in bad conditioning of the loss surface even for a neural
network with batch normalization layers. In short, our work shows that key
properties of the loss surface are strongly influenced by SGD in the early
phase of training. We argue that studying the impact of the identified effects
on generalization is a promising future direction.
Description
[2002.09572] The Break-Even Point on Optimization Trajectories of Deep Neural Networks
%0 Journal Article
%1 jastrzebski2020breakeven
%A Jastrzebski, Stanislaw
%A Szymczak, Maciej
%A Fort, Stanislav
%A Arpit, Devansh
%A Tabor, Jacek
%A Cho, Kyunghyun
%A Geras, Krzysztof
%D 2020
%K deep-learning optimization
%T The Break-Even Point on Optimization Trajectories of Deep Neural
Networks
%U http://arxiv.org/abs/2002.09572
%X The early phase of training of deep neural networks is critical for their
final performance. In this work, we study how the hyperparameters of stochastic
gradient descent (SGD) used in the early phase of training affect the rest of
the optimization trajectory. We argue for the existence of the "break-even"
point on this trajectory, beyond which the curvature of the loss surface and
noise in the gradient are implicitly regularized by SGD. In particular, we
demonstrate on multiple classification tasks that using a large learning rate
in the initial phase of training reduces the variance of the gradient, and
improves the conditioning of the covariance of gradients. These effects are
beneficial from the optimization perspective and become visible after the
break-even point. Complementing prior work, we also show that using a low
learning rate results in bad conditioning of the loss surface even for a neural
network with batch normalization layers. In short, our work shows that key
properties of the loss surface are strongly influenced by SGD in the early
phase of training. We argue that studying the impact of the identified effects
on generalization is a promising future direction.
@article{jastrzebski2020breakeven,
abstract = {The early phase of training of deep neural networks is critical for their
final performance. In this work, we study how the hyperparameters of stochastic
gradient descent (SGD) used in the early phase of training affect the rest of
the optimization trajectory. We argue for the existence of the "break-even"
point on this trajectory, beyond which the curvature of the loss surface and
noise in the gradient are implicitly regularized by SGD. In particular, we
demonstrate on multiple classification tasks that using a large learning rate
in the initial phase of training reduces the variance of the gradient, and
improves the conditioning of the covariance of gradients. These effects are
beneficial from the optimization perspective and become visible after the
break-even point. Complementing prior work, we also show that using a low
learning rate results in bad conditioning of the loss surface even for a neural
network with batch normalization layers. In short, our work shows that key
properties of the loss surface are strongly influenced by SGD in the early
phase of training. We argue that studying the impact of the identified effects
on generalization is a promising future direction.},
added-at = {2020-02-26T02:13:50.000+0100},
author = {Jastrzebski, Stanislaw and Szymczak, Maciej and Fort, Stanislav and Arpit, Devansh and Tabor, Jacek and Cho, Kyunghyun and Geras, Krzysztof},
biburl = {https://www.bibsonomy.org/bibtex/2246db3c76948f28390b6a5cfc1fd095a/kirk86},
description = {[2002.09572] The Break-Even Point on Optimization Trajectories of Deep Neural Networks},
interhash = {2190624eb2e25ba9cb68934c81ee0507},
intrahash = {246db3c76948f28390b6a5cfc1fd095a},
keywords = {deep-learning optimization},
note = {cite arxiv:2002.09572Comment: Accepted as a spotlight at ICLR 2020. The last two authors contributed equally},
timestamp = {2020-02-26T02:13:50.000+0100},
title = {The Break-Even Point on Optimization Trajectories of Deep Neural
Networks},
url = {http://arxiv.org/abs/2002.09572},
year = 2020
}