Increasing model size when pretraining natural language representations often
results in improved performance on downstream tasks. However, at some point
further model increases become harder due to GPU/TPU memory limitations and
longer training times. To address these problems, we present two
parameter-reduction techniques to lower memory consumption and increase the
training speed of BERT. Comprehensive empirical evidence shows that our
proposed methods lead to models that scale much better compared to the original
BERT. We also use a self-supervised loss that focuses on modeling
inter-sentence coherence, and show it consistently helps downstream tasks with
multi-sentence inputs. As a result, our best model establishes new
state-of-the-art results on the GLUE, RACE, and benchmarks while having
fewer parameters compared to BERT-large. The code and the pretrained models are
available at https://github.com/google-research/ALBERT.
%0 Generic
%1 lan2019albert
%A Lan, Zhenzhong
%A Chen, Mingda
%A Goodman, Sebastian
%A Gimpel, Kevin
%A Sharma, Piyush
%A Soricut, Radu
%D 2019
%K BERT lite nlp thema:ba thema:lm
%T ALBERT: A Lite BERT for Self-supervised Learning of Language
Representations
%U http://arxiv.org/abs/1909.11942
%X Increasing model size when pretraining natural language representations often
results in improved performance on downstream tasks. However, at some point
further model increases become harder due to GPU/TPU memory limitations and
longer training times. To address these problems, we present two
parameter-reduction techniques to lower memory consumption and increase the
training speed of BERT. Comprehensive empirical evidence shows that our
proposed methods lead to models that scale much better compared to the original
BERT. We also use a self-supervised loss that focuses on modeling
inter-sentence coherence, and show it consistently helps downstream tasks with
multi-sentence inputs. As a result, our best model establishes new
state-of-the-art results on the GLUE, RACE, and benchmarks while having
fewer parameters compared to BERT-large. The code and the pretrained models are
available at https://github.com/google-research/ALBERT.
@misc{lan2019albert,
abstract = {Increasing model size when pretraining natural language representations often
results in improved performance on downstream tasks. However, at some point
further model increases become harder due to GPU/TPU memory limitations and
longer training times. To address these problems, we present two
parameter-reduction techniques to lower memory consumption and increase the
training speed of BERT. Comprehensive empirical evidence shows that our
proposed methods lead to models that scale much better compared to the original
BERT. We also use a self-supervised loss that focuses on modeling
inter-sentence coherence, and show it consistently helps downstream tasks with
multi-sentence inputs. As a result, our best model establishes new
state-of-the-art results on the GLUE, RACE, and \squad benchmarks while having
fewer parameters compared to BERT-large. The code and the pretrained models are
available at https://github.com/google-research/ALBERT.},
added-at = {2021-03-29T15:46:34.000+0200},
author = {Lan, Zhenzhong and Chen, Mingda and Goodman, Sebastian and Gimpel, Kevin and Sharma, Piyush and Soricut, Radu},
biburl = {https://www.bibsonomy.org/bibtex/231219db789093ce55622945118b2a574/janpf},
description = {1909.11942.pdf},
interhash = {699f6f7886f9e8cd8a4626fd1ccb9d45},
intrahash = {31219db789093ce55622945118b2a574},
keywords = {BERT lite nlp thema:ba thema:lm},
note = {cite arxiv:1909.11942},
timestamp = {2021-10-07T10:27:58.000+0200},
title = {{ALBERT}: A Lite BERT for Self-supervised Learning of Language
Representations},
url = {http://arxiv.org/abs/1909.11942},
year = 2019
}