A common way to speed up training of large convolutional networks is to add
computational units. Training is then performed using data-parallel synchronous
Stochastic Gradient Descent (SGD) with mini-batch divided between computational
units. With an increase in the number of nodes, the batch size grows. But
training with large batch size often results in the lower model accuracy. We
argue that the current recipe for large batch training (linear learning rate
scaling with warm-up) is not general enough and training may diverge. To
overcome this optimization difficulties we propose a new training algorithm
based on Layer-wise Adaptive Rate Scaling (LARS). Using LARS, we scaled Alexnet
up to a batch size of 8K, and Resnet-50 to a batch size of 32K without loss in
accuracy.
%0 Generic
%1 YouGit17Large
%A You, Yang
%A Gitman, Igor
%A Ginsburg, Boris
%D 2017
%K deep_learning distributed large_batch optimizer
%T Large Batch Training of Convolutional Networks
%U http://arxiv.org/abs/1708.03888
%X A common way to speed up training of large convolutional networks is to add
computational units. Training is then performed using data-parallel synchronous
Stochastic Gradient Descent (SGD) with mini-batch divided between computational
units. With an increase in the number of nodes, the batch size grows. But
training with large batch size often results in the lower model accuracy. We
argue that the current recipe for large batch training (linear learning rate
scaling with warm-up) is not general enough and training may diverge. To
overcome this optimization difficulties we propose a new training algorithm
based on Layer-wise Adaptive Rate Scaling (LARS). Using LARS, we scaled Alexnet
up to a batch size of 8K, and Resnet-50 to a batch size of 32K without loss in
accuracy.
@misc{YouGit17Large,
abstract = {A common way to speed up training of large convolutional networks is to add
computational units. Training is then performed using data-parallel synchronous
Stochastic Gradient Descent (SGD) with mini-batch divided between computational
units. With an increase in the number of nodes, the batch size grows. But
training with large batch size often results in the lower model accuracy. We
argue that the current recipe for large batch training (linear learning rate
scaling with warm-up) is not general enough and training may diverge. To
overcome this optimization difficulties we propose a new training algorithm
based on Layer-wise Adaptive Rate Scaling (LARS). Using LARS, we scaled Alexnet
up to a batch size of 8K, and Resnet-50 to a batch size of 32K without loss in
accuracy.},
added-at = {2018-06-26T08:30:28.000+0200},
author = {You, Yang and Gitman, Igor and Ginsburg, Boris},
biburl = {https://www.bibsonomy.org/bibtex/2392eb369400fb03dcaeab17ea718a376/loroch},
description = {1708.03888.pdf},
interhash = {132dd1b9e670c5c829b23125555e7d34},
intrahash = {392eb369400fb03dcaeab17ea718a376},
keywords = {deep_learning distributed large_batch optimizer},
note = {cite arxiv:1708.03888},
timestamp = {2018-06-26T08:30:28.000+0200},
title = {Large Batch Training of Convolutional Networks},
url = {http://arxiv.org/abs/1708.03888},
year = 2017
}