In an increasing number of domains it has been demonstrated that deep
learning models can be trained using relatively large batch sizes without
sacrificing data efficiency. However the limits of this massive data
parallelism seem to differ from domain to domain, ranging from batches of tens
of thousands in ImageNet to batches of millions in RL agents that play the game
Dota 2. To our knowledge there is limited conceptual understanding of why these
limits to batch size differ or how we might choose the correct batch size in a
new domain. In this paper, we demonstrate that a simple and easy-to-measure
statistic called the gradient noise scale predicts the largest useful batch
size across many domains and applications, including a number of supervised
learning datasets (MNIST, SVHN, CIFAR-10, ImageNet, Billion Word),
reinforcement learning domains (Atari and Dota), and even generative model
training (autoencoders on SVHN). We find that the noise scale increases as the
loss decreases over a training run and depends on the model size primarily
through improved model performance. Our empirically-motivated theory also
describes the tradeoff between compute-efficiency and time-efficiency, and
provides a rough model of the benefits of adaptive batch-size training.
%0 Generic
%1 openai2018empirical
%A McCandlish, Sam
%A Kaplan, Jared
%A Amodei, Dario
%A Team, OpenAI Dota
%D 2018
%K deep dl large-scale networks neural
%T An Empirical Model of Large-Batch Training
%U http://arxiv.org/abs/1812.06162
%X In an increasing number of domains it has been demonstrated that deep
learning models can be trained using relatively large batch sizes without
sacrificing data efficiency. However the limits of this massive data
parallelism seem to differ from domain to domain, ranging from batches of tens
of thousands in ImageNet to batches of millions in RL agents that play the game
Dota 2. To our knowledge there is limited conceptual understanding of why these
limits to batch size differ or how we might choose the correct batch size in a
new domain. In this paper, we demonstrate that a simple and easy-to-measure
statistic called the gradient noise scale predicts the largest useful batch
size across many domains and applications, including a number of supervised
learning datasets (MNIST, SVHN, CIFAR-10, ImageNet, Billion Word),
reinforcement learning domains (Atari and Dota), and even generative model
training (autoencoders on SVHN). We find that the noise scale increases as the
loss decreases over a training run and depends on the model size primarily
through improved model performance. Our empirically-motivated theory also
describes the tradeoff between compute-efficiency and time-efficiency, and
provides a rough model of the benefits of adaptive batch-size training.
@misc{openai2018empirical,
abstract = {In an increasing number of domains it has been demonstrated that deep
learning models can be trained using relatively large batch sizes without
sacrificing data efficiency. However the limits of this massive data
parallelism seem to differ from domain to domain, ranging from batches of tens
of thousands in ImageNet to batches of millions in RL agents that play the game
Dota 2. To our knowledge there is limited conceptual understanding of why these
limits to batch size differ or how we might choose the correct batch size in a
new domain. In this paper, we demonstrate that a simple and easy-to-measure
statistic called the gradient noise scale predicts the largest useful batch
size across many domains and applications, including a number of supervised
learning datasets (MNIST, SVHN, CIFAR-10, ImageNet, Billion Word),
reinforcement learning domains (Atari and Dota), and even generative model
training (autoencoders on SVHN). We find that the noise scale increases as the
loss decreases over a training run and depends on the model size primarily
through improved model performance. Our empirically-motivated theory also
describes the tradeoff between compute-efficiency and time-efficiency, and
provides a rough model of the benefits of adaptive batch-size training.},
added-at = {2019-06-04T16:08:32.000+0200},
author = {McCandlish, Sam and Kaplan, Jared and Amodei, Dario and Team, OpenAI Dota},
biburl = {https://www.bibsonomy.org/bibtex/27426a07c42316e0f0408af44ddf528c5/alrigazzi},
description = {An Empirical Model of Large-Batch Training},
interhash = {df087fc67778b02a54f2859cc9be269b},
intrahash = {7426a07c42316e0f0408af44ddf528c5},
keywords = {deep dl large-scale networks neural},
note = {cite arxiv:1812.06162},
timestamp = {2019-06-04T16:08:32.000+0200},
title = {An Empirical Model of Large-Batch Training},
url = {http://arxiv.org/abs/1812.06162},
year = 2018
}