We introduce the Block-Recurrent Transformer, which applies a transformer
layer in a recurrent fashion along a sequence, and has linear complexity with
respect to sequence length. Our recurrent cell operates on blocks of tokens
rather than single tokens during training, and leverages parallel computation
within a block in order to make efficient use of accelerator hardware. The cell
itself is strikingly simple. It is merely a transformer layer: it uses
self-attention and cross-attention to efficiently compute a recurrent function
over a large set of state vectors and tokens. Our design was inspired in part
by LSTM cells, and it uses LSTM-style gates, but it scales the typical LSTM
cell up by several orders of magnitude. Our implementation of recurrence has
the same cost in both computation time and parameter count as a conventional
transformer layer, but offers dramatically improved perplexity in language
modeling tasks over very long sequences. Our model out-performs a long-range
Transformer XL baseline by a wide margin, while running twice as fast. We
demonstrate its effectiveness on PG19 (books), arXiv papers, and GitHub source
code. Our code has been released as open source.
%0 Generic
%1 hutchins2022blockrecurrent
%A Hutchins, DeLesley
%A Schlag, Imanol
%A Wu, Yuhuai
%A Dyer, Ethan
%A Neyshabur, Behnam
%D 2022
%K rnn transformers
%T Block-Recurrent Transformers
%U http://arxiv.org/abs/2203.07852
%X We introduce the Block-Recurrent Transformer, which applies a transformer
layer in a recurrent fashion along a sequence, and has linear complexity with
respect to sequence length. Our recurrent cell operates on blocks of tokens
rather than single tokens during training, and leverages parallel computation
within a block in order to make efficient use of accelerator hardware. The cell
itself is strikingly simple. It is merely a transformer layer: it uses
self-attention and cross-attention to efficiently compute a recurrent function
over a large set of state vectors and tokens. Our design was inspired in part
by LSTM cells, and it uses LSTM-style gates, but it scales the typical LSTM
cell up by several orders of magnitude. Our implementation of recurrence has
the same cost in both computation time and parameter count as a conventional
transformer layer, but offers dramatically improved perplexity in language
modeling tasks over very long sequences. Our model out-performs a long-range
Transformer XL baseline by a wide margin, while running twice as fast. We
demonstrate its effectiveness on PG19 (books), arXiv papers, and GitHub source
code. Our code has been released as open source.
@misc{hutchins2022blockrecurrent,
abstract = {We introduce the Block-Recurrent Transformer, which applies a transformer
layer in a recurrent fashion along a sequence, and has linear complexity with
respect to sequence length. Our recurrent cell operates on blocks of tokens
rather than single tokens during training, and leverages parallel computation
within a block in order to make efficient use of accelerator hardware. The cell
itself is strikingly simple. It is merely a transformer layer: it uses
self-attention and cross-attention to efficiently compute a recurrent function
over a large set of state vectors and tokens. Our design was inspired in part
by LSTM cells, and it uses LSTM-style gates, but it scales the typical LSTM
cell up by several orders of magnitude. Our implementation of recurrence has
the same cost in both computation time and parameter count as a conventional
transformer layer, but offers dramatically improved perplexity in language
modeling tasks over very long sequences. Our model out-performs a long-range
Transformer XL baseline by a wide margin, while running twice as fast. We
demonstrate its effectiveness on PG19 (books), arXiv papers, and GitHub source
code. Our code has been released as open source.},
added-at = {2023-07-07T20:16:44.000+0200},
author = {Hutchins, DeLesley and Schlag, Imanol and Wu, Yuhuai and Dyer, Ethan and Neyshabur, Behnam},
biburl = {https://www.bibsonomy.org/bibtex/2f8896a7458efdd0ee85fe46d579abd77/wanderinglogic},
description = {Block-Recurrent Transformers},
interhash = {eda8b0e893a1a5fbad993669fe87cec4},
intrahash = {f8896a7458efdd0ee85fe46d579abd77},
keywords = {rnn transformers},
note = {cite arxiv:2203.07852Comment: Update to NeurIPS camera-ready version},
timestamp = {2023-07-07T20:16:44.000+0200},
title = {Block-Recurrent Transformers},
url = {http://arxiv.org/abs/2203.07852},
year = 2022
}