We show the formal equivalence of linearised self-attention mechanisms and
fast weight controllers from the early '90s, where a ``slow" neural net learns
by gradient descent to program the ``fast weights" of another net through
sequences of elementary programming instructions which are additive outer
products of self-invented activation patterns (today called keys and values).
Such Fast Weight Programmers (FWPs) learn to manipulate the contents of a
finite memory and dynamically interact with it. We infer a memory capacity
limitation of recent linearised softmax attention variants, and replace the
purely additive outer products by a delta rule-like programming instruction,
such that the FWP can more easily learn to correct the current mapping from
keys to values. The FWP also learns to compute dynamically changing learning
rates. We also propose a new kernel function to linearise attention which
balances simplicity and effectiveness. We conduct experiments on synthetic
retrieval problems as well as standard machine translation and language
modelling tasks which demonstrate the benefits of our methods.
%0 Generic
%1 schlag2021linear
%A Schlag, Imanol
%A Irie, Kazuki
%A Schmidhuber, Jürgen
%D 2021
%K fast transformer weight
%T Linear Transformers Are Secretly Fast Weight Programmers
%U http://arxiv.org/abs/2102.11174
%X We show the formal equivalence of linearised self-attention mechanisms and
fast weight controllers from the early '90s, where a ``slow" neural net learns
by gradient descent to program the ``fast weights" of another net through
sequences of elementary programming instructions which are additive outer
products of self-invented activation patterns (today called keys and values).
Such Fast Weight Programmers (FWPs) learn to manipulate the contents of a
finite memory and dynamically interact with it. We infer a memory capacity
limitation of recent linearised softmax attention variants, and replace the
purely additive outer products by a delta rule-like programming instruction,
such that the FWP can more easily learn to correct the current mapping from
keys to values. The FWP also learns to compute dynamically changing learning
rates. We also propose a new kernel function to linearise attention which
balances simplicity and effectiveness. We conduct experiments on synthetic
retrieval problems as well as standard machine translation and language
modelling tasks which demonstrate the benefits of our methods.
@misc{schlag2021linear,
abstract = {We show the formal equivalence of linearised self-attention mechanisms and
fast weight controllers from the early '90s, where a ``slow" neural net learns
by gradient descent to program the ``fast weights" of another net through
sequences of elementary programming instructions which are additive outer
products of self-invented activation patterns (today called keys and values).
Such Fast Weight Programmers (FWPs) learn to manipulate the contents of a
finite memory and dynamically interact with it. We infer a memory capacity
limitation of recent linearised softmax attention variants, and replace the
purely additive outer products by a delta rule-like programming instruction,
such that the FWP can more easily learn to correct the current mapping from
keys to values. The FWP also learns to compute dynamically changing learning
rates. We also propose a new kernel function to linearise attention which
balances simplicity and effectiveness. We conduct experiments on synthetic
retrieval problems as well as standard machine translation and language
modelling tasks which demonstrate the benefits of our methods.},
added-at = {2021-06-29T22:48:51.000+0200},
author = {Schlag, Imanol and Irie, Kazuki and Schmidhuber, Jürgen},
biburl = {https://www.bibsonomy.org/bibtex/2026b9b1bc98713788bbec2e8e5ed5b47/nosebrain},
interhash = {6558965976eabd221021e6ae1ace1c73},
intrahash = {026b9b1bc98713788bbec2e8e5ed5b47},
keywords = {fast transformer weight},
note = {cite arxiv:2102.11174},
timestamp = {2021-06-29T22:48:51.000+0200},
title = {Linear Transformers Are Secretly Fast Weight Programmers},
url = {http://arxiv.org/abs/2102.11174},
year = 2021
}