The creation of practical deep learning data-products often requires
parallelization across processors and computers to make deep learning feasible
on large data sets, but bottlenecks in communication bandwidth make it
difficult to attain good speedups through parallelism. Here we develop and test
8-bit approximation algorithms which make better use of the available bandwidth
by compressing 32-bit gradients and nonlinear activations to 8-bit
approximations. We show that these approximations do not decrease predictive
performance on MNIST, CIFAR10, and ImageNet for both model and data parallelism
and provide a data transfer speedup of 2x relative to 32-bit parallelism. We
build a predictive model for speedups based on our experimental data, verify
its validity on known speedup data, and show that we can obtain a speedup of
50x and more on a system of 96 GPUs compared to a speedup of 23x for 32-bit. We
compare our data types with other methods and show that 8-bit approximations
achieve state-of-the-art speedups for model parallelism. Thus 8-bit
approximation is an efficient method to parallelize convolutional networks on
very large systems of GPUs.
Описание
8-Bit Approximations for Parallelism in Deep Learning
%0 Generic
%1 dettmers2015approximations
%A Dettmers, Tim
%D 2015
%K deep dl large-scale networks neural
%T 8-Bit Approximations for Parallelism in Deep Learning
%U http://arxiv.org/abs/1511.04561
%X The creation of practical deep learning data-products often requires
parallelization across processors and computers to make deep learning feasible
on large data sets, but bottlenecks in communication bandwidth make it
difficult to attain good speedups through parallelism. Here we develop and test
8-bit approximation algorithms which make better use of the available bandwidth
by compressing 32-bit gradients and nonlinear activations to 8-bit
approximations. We show that these approximations do not decrease predictive
performance on MNIST, CIFAR10, and ImageNet for both model and data parallelism
and provide a data transfer speedup of 2x relative to 32-bit parallelism. We
build a predictive model for speedups based on our experimental data, verify
its validity on known speedup data, and show that we can obtain a speedup of
50x and more on a system of 96 GPUs compared to a speedup of 23x for 32-bit. We
compare our data types with other methods and show that 8-bit approximations
achieve state-of-the-art speedups for model parallelism. Thus 8-bit
approximation is an efficient method to parallelize convolutional networks on
very large systems of GPUs.
@misc{dettmers2015approximations,
abstract = {The creation of practical deep learning data-products often requires
parallelization across processors and computers to make deep learning feasible
on large data sets, but bottlenecks in communication bandwidth make it
difficult to attain good speedups through parallelism. Here we develop and test
8-bit approximation algorithms which make better use of the available bandwidth
by compressing 32-bit gradients and nonlinear activations to 8-bit
approximations. We show that these approximations do not decrease predictive
performance on MNIST, CIFAR10, and ImageNet for both model and data parallelism
and provide a data transfer speedup of 2x relative to 32-bit parallelism. We
build a predictive model for speedups based on our experimental data, verify
its validity on known speedup data, and show that we can obtain a speedup of
50x and more on a system of 96 GPUs compared to a speedup of 23x for 32-bit. We
compare our data types with other methods and show that 8-bit approximations
achieve state-of-the-art speedups for model parallelism. Thus 8-bit
approximation is an efficient method to parallelize convolutional networks on
very large systems of GPUs.},
added-at = {2019-06-04T16:00:37.000+0200},
author = {Dettmers, Tim},
biburl = {https://www.bibsonomy.org/bibtex/2821ea488a2a1ad5e4924472755fae760/alrigazzi},
description = {8-Bit Approximations for Parallelism in Deep Learning},
interhash = {ddb5d3ac19ebfd7de597a2fa76460455},
intrahash = {821ea488a2a1ad5e4924472755fae760},
keywords = {deep dl large-scale networks neural},
note = {cite arxiv:1511.04561},
timestamp = {2019-06-04T16:00:37.000+0200},
title = {8-Bit Approximations for Parallelism in Deep Learning},
url = {http://arxiv.org/abs/1511.04561},
year = 2015
}