I present a new way to parallelize the training of convolutional neural
networks across multiple GPUs. The method scales significantly better than all
alternatives when applied to modern convolutional neural networks.
Description
One weird trick for parallelizing convolutional neural networks
%0 Generic
%1 krizhevsky2014weird
%A Krizhevsky, Alex
%D 2014
%K deep dl large-scale networks neural
%T One weird trick for parallelizing convolutional neural networks
%U http://arxiv.org/abs/1404.5997
%X I present a new way to parallelize the training of convolutional neural
networks across multiple GPUs. The method scales significantly better than all
alternatives when applied to modern convolutional neural networks.
@misc{krizhevsky2014weird,
abstract = {I present a new way to parallelize the training of convolutional neural
networks across multiple GPUs. The method scales significantly better than all
alternatives when applied to modern convolutional neural networks.},
added-at = {2019-06-04T16:23:46.000+0200},
author = {Krizhevsky, Alex},
biburl = {https://www.bibsonomy.org/bibtex/24ab1584911dac28a8285d1961e84a7ab/alrigazzi},
description = {One weird trick for parallelizing convolutional neural networks},
interhash = {e3c374134d4510caa66e406f46a445ec},
intrahash = {4ab1584911dac28a8285d1961e84a7ab},
keywords = {deep dl large-scale networks neural},
note = {cite arxiv:1404.5997},
timestamp = {2019-06-04T16:23:46.000+0200},
title = {One weird trick for parallelizing convolutional neural networks},
url = {http://arxiv.org/abs/1404.5997},
year = 2014
}