The prevalent approach to sequence to sequence learning maps an input
sequence to a variable length output sequence via recurrent neural networks. We
introduce an architecture based entirely on convolutional neural networks.
Compared to recurrent models, computations over all elements can be fully
parallelized during training and optimization is easier since the number of
non-linearities is fixed and independent of the input length. Our use of gated
linear units eases gradient propagation and we equip each decoder layer with a
separate attention module. We outperform the accuracy of the deep LSTM setup of
Wu et al. (2016) on both WMT'14 English-German and WMT'14 English-French
translation at an order of magnitude faster speed, both on GPU and CPU.
%0 Journal Article
%1 gehring2017convolutional
%A Gehring, Jonas
%A Auli, Michael
%A Grangier, David
%A Yarats, Denis
%A Dauphin, Yann N.
%D 2017
%J CoRR
%K cnn deep_learning nntags seminar seq2seq thema thema:seq2seq
%T Convolutional Sequence to Sequence Learning
%U http://arxiv.org/abs/1705.03122
%X The prevalent approach to sequence to sequence learning maps an input
sequence to a variable length output sequence via recurrent neural networks. We
introduce an architecture based entirely on convolutional neural networks.
Compared to recurrent models, computations over all elements can be fully
parallelized during training and optimization is easier since the number of
non-linearities is fixed and independent of the input length. Our use of gated
linear units eases gradient propagation and we equip each decoder layer with a
separate attention module. We outperform the accuracy of the deep LSTM setup of
Wu et al. (2016) on both WMT'14 English-German and WMT'14 English-French
translation at an order of magnitude faster speed, both on GPU and CPU.
@article{gehring2017convolutional,
abstract = {The prevalent approach to sequence to sequence learning maps an input
sequence to a variable length output sequence via recurrent neural networks. We
introduce an architecture based entirely on convolutional neural networks.
Compared to recurrent models, computations over all elements can be fully
parallelized during training and optimization is easier since the number of
non-linearities is fixed and independent of the input length. Our use of gated
linear units eases gradient propagation and we equip each decoder layer with a
separate attention module. We outperform the accuracy of the deep LSTM setup of
Wu et al. (2016) on both WMT'14 English-German and WMT'14 English-French
translation at an order of magnitude faster speed, both on GPU and CPU.},
added-at = {2018-02-14T14:39:58.000+0100},
author = {Gehring, Jonas and Auli, Michael and Grangier, David and Yarats, Denis and Dauphin, Yann N.},
biburl = {https://www.bibsonomy.org/bibtex/2a08936f5b55b073f4bf07f701ba2f340/dallmann},
description = {1705.03122.pdf},
interhash = {e587989ba1c72777ef4ba48e3dd68ca7},
intrahash = {a08936f5b55b073f4bf07f701ba2f340},
journal = {CoRR},
keywords = {cnn deep_learning nntags seminar seq2seq thema thema:seq2seq},
note = {cite arxiv:1705.03122},
timestamp = {2018-09-10T09:51:33.000+0200},
title = {Convolutional Sequence to Sequence Learning},
url = {http://arxiv.org/abs/1705.03122},
year = 2017
}