The recently introduced continuous Skip-gram model is an efficient method for learning high-quality distributed vector representations that capture a large number of precise syntactic and semantic word relationships. In this paper we present several improvements that make the Skip-gram model more expressive and enable it to learn higher quality vectors more rapidly. We show that by subsampling frequent words we obtain significant speedup, and also learn higher quality representations as measured by our tasks. We also introduce Negative Sampling, a simplified variant of Noise Contrastive Estimation (NCE) that learns more accurate vectors for frequent words compared to the hierarchical softmax. An inherent limitation of word representations is their indifference to word order and their inability to represent idiomatic phrases. For example, the meanings of Canada'' and Äir'' cannot be easily combined to obtain Äir Canada''. Motivated by this example, we present a simple and efficient method for finding phrases, and show that their vector representations can be accurately learned by the Skip-gram model.
%0 Book Section
%1 mikolov2013distributed
%A Mikolov, Tomas
%A Sutskever, Ilya
%A Chen, Kai
%A Corrado, Greg S
%A Dean, Jeff
%B Advances in Neural Information Processing Systems 26
%D 2013
%E Burges, C.J.C.
%E Bottou, L.
%E Welling, M.
%E Ghahramani, Z.
%E Weinberger, K.Q.
%K final thema:kg_embeddings
%P 3111--3119
%T Distributed Representations of Words and Phrases and their Compositionality
%U http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality
%X The recently introduced continuous Skip-gram model is an efficient method for learning high-quality distributed vector representations that capture a large number of precise syntactic and semantic word relationships. In this paper we present several improvements that make the Skip-gram model more expressive and enable it to learn higher quality vectors more rapidly. We show that by subsampling frequent words we obtain significant speedup, and also learn higher quality representations as measured by our tasks. We also introduce Negative Sampling, a simplified variant of Noise Contrastive Estimation (NCE) that learns more accurate vectors for frequent words compared to the hierarchical softmax. An inherent limitation of word representations is their indifference to word order and their inability to represent idiomatic phrases. For example, the meanings of Canada'' and Äir'' cannot be easily combined to obtain Äir Canada''. Motivated by this example, we present a simple and efficient method for finding phrases, and show that their vector representations can be accurately learned by the Skip-gram model.
@incollection{mikolov2013distributed,
abstract = {The recently introduced continuous Skip-gram model is an efficient method for learning high-quality distributed vector representations that capture a large number of precise syntactic and semantic word relationships. In this paper we present several improvements that make the Skip-gram model more expressive and enable it to learn higher quality vectors more rapidly. We show that by subsampling frequent words we obtain significant speedup, and also learn higher quality representations as measured by our tasks. We also introduce Negative Sampling, a simplified variant of Noise Contrastive Estimation (NCE) that learns more accurate vectors for frequent words compared to the hierarchical softmax. An inherent limitation of word representations is their indifference to word order and their inability to represent idiomatic phrases. For example, the meanings of Canada'' and "Air'' cannot be easily combined to obtain "Air Canada''. Motivated by this example, we present a simple and efficient method for finding phrases, and show that their vector representations can be accurately learned by the Skip-gram model.},
added-at = {2021-05-13T15:11:27.000+0200},
author = {Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado, Greg S and Dean, Jeff},
biburl = {https://www.bibsonomy.org/bibtex/2f310b01a7363a94322e180ecd249bec0/simonh},
booktitle = {Advances in Neural Information Processing Systems 26},
editor = {Burges, C.J.C. and Bottou, L. and Welling, M. and Ghahramani, Z. and Weinberger, K.Q.},
interhash = {4d7ff49f008ec05928f11e50f2db1cf9},
intrahash = {f310b01a7363a94322e180ecd249bec0},
keywords = {final thema:kg_embeddings},
pages = {3111--3119},
timestamp = {2021-06-14T12:07:24.000+0200},
title = {Distributed Representations of Words and Phrases and their Compositionality},
url = {http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality},
year = 2013
}