While the general idea of self-supervised learning is identical across
modalities, the actual algorithms and objectives differ widely because they
were developed with a single modality in mind. To get us closer to general
self-supervised learning, we present data2vec, a framework that uses the same
learning method for either speech, NLP or computer vision. The core idea is to
predict latent representations of the full input data based on a masked view of
the input in a self-distillation setup using a standard Transformer
architecture. Instead of predicting modality-specific targets such as words,
visual tokens or units of human speech which are local in nature, data2vec
predicts contextualized latent representations that contain information from
the entire input. Experiments on the major benchmarks of speech recognition,
image classification, and natural language understanding demonstrate a new
state of the art or competitive performance to predominant approaches.
Description
data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language
%0 Generic
%1 baevski2022data2vec
%A Baevski, Alexei
%A Hsu, Wei-Ning
%A Xu, Qiantong
%A Babu, Arun
%A Gu, Jiatao
%A Auli, Michael
%D 2022
%K AI Meta
%T data2vec: A General Framework for Self-supervised Learning in Speech,
Vision and Language
%U http://arxiv.org/abs/2202.03555
%X While the general idea of self-supervised learning is identical across
modalities, the actual algorithms and objectives differ widely because they
were developed with a single modality in mind. To get us closer to general
self-supervised learning, we present data2vec, a framework that uses the same
learning method for either speech, NLP or computer vision. The core idea is to
predict latent representations of the full input data based on a masked view of
the input in a self-distillation setup using a standard Transformer
architecture. Instead of predicting modality-specific targets such as words,
visual tokens or units of human speech which are local in nature, data2vec
predicts contextualized latent representations that contain information from
the entire input. Experiments on the major benchmarks of speech recognition,
image classification, and natural language understanding demonstrate a new
state of the art or competitive performance to predominant approaches.
@misc{baevski2022data2vec,
abstract = {While the general idea of self-supervised learning is identical across
modalities, the actual algorithms and objectives differ widely because they
were developed with a single modality in mind. To get us closer to general
self-supervised learning, we present data2vec, a framework that uses the same
learning method for either speech, NLP or computer vision. The core idea is to
predict latent representations of the full input data based on a masked view of
the input in a self-distillation setup using a standard Transformer
architecture. Instead of predicting modality-specific targets such as words,
visual tokens or units of human speech which are local in nature, data2vec
predicts contextualized latent representations that contain information from
the entire input. Experiments on the major benchmarks of speech recognition,
image classification, and natural language understanding demonstrate a new
state of the art or competitive performance to predominant approaches.},
added-at = {2022-03-12T15:46:36.000+0100},
author = {Baevski, Alexei and Hsu, Wei-Ning and Xu, Qiantong and Babu, Arun and Gu, Jiatao and Auli, Michael},
biburl = {https://www.bibsonomy.org/bibtex/282dc1df9faa1f8d0e4ef05746a807f4e/pvalois},
description = {data2vec: A General Framework for Self-supervised Learning in Speech, Vision and Language},
interhash = {57b2a80c0c87e58f3c60a4a4a5ad98db},
intrahash = {82dc1df9faa1f8d0e4ef05746a807f4e},
keywords = {AI Meta},
note = {cite arxiv:2202.03555},
timestamp = {2022-03-12T15:46:36.000+0100},
title = {data2vec: A General Framework for Self-supervised Learning in Speech,
Vision and Language},
url = {http://arxiv.org/abs/2202.03555},
year = 2022
}