The recently introduced continuous Skip-gram model is an efficient method for
learning high-quality distributed vector representations that capture a large
number of precise syntactic and semantic word relationships. In this paper we
present several extensions that improve both the quality of the vectors and the
training speed. By subsampling of the frequent words we obtain significant
speedup and also learn more regular word representations. We also describe a
simple alternative to the hierarchical softmax called negative sampling. An
inherent limitation of word representations is their indifference to word order
and their inability to represent idiomatic phrases. For example, the meanings
of "Canada" and Äir" cannot be easily combined to obtain Äir Canada".
Motivated by this example, we present a simple method for finding phrases in
text, and show that learning good vector representations for millions of
phrases is possible.
Description
[1310.4546] Distributed Representations of Words and Phrases and their Compositionality
%0 Generic
%1 mikolov2013distributed
%A Mikolov, Tomas
%A Sutskever, Ilya
%A Chen, Kai
%A Corrado, Greg
%A Dean, Jeffrey
%D 2013
%K thema:semcomp
%T Distributed Representations of Words and Phrases and their
Compositionality
%U http://arxiv.org/abs/1310.4546
%X The recently introduced continuous Skip-gram model is an efficient method for
learning high-quality distributed vector representations that capture a large
number of precise syntactic and semantic word relationships. In this paper we
present several extensions that improve both the quality of the vectors and the
training speed. By subsampling of the frequent words we obtain significant
speedup and also learn more regular word representations. We also describe a
simple alternative to the hierarchical softmax called negative sampling. An
inherent limitation of word representations is their indifference to word order
and their inability to represent idiomatic phrases. For example, the meanings
of "Canada" and Äir" cannot be easily combined to obtain Äir Canada".
Motivated by this example, we present a simple method for finding phrases in
text, and show that learning good vector representations for millions of
phrases is possible.
@misc{mikolov2013distributed,
abstract = {The recently introduced continuous Skip-gram model is an efficient method for
learning high-quality distributed vector representations that capture a large
number of precise syntactic and semantic word relationships. In this paper we
present several extensions that improve both the quality of the vectors and the
training speed. By subsampling of the frequent words we obtain significant
speedup and also learn more regular word representations. We also describe a
simple alternative to the hierarchical softmax called negative sampling. An
inherent limitation of word representations is their indifference to word order
and their inability to represent idiomatic phrases. For example, the meanings
of "Canada" and "Air" cannot be easily combined to obtain "Air Canada".
Motivated by this example, we present a simple method for finding phrases in
text, and show that learning good vector representations for millions of
phrases is possible.},
added-at = {2018-11-22T11:11:06.000+0100},
author = {Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado, Greg and Dean, Jeffrey},
biburl = {https://www.bibsonomy.org/bibtex/237fb87615c89155bc5cceb0fe4468036/benwesterath},
description = {[1310.4546] Distributed Representations of Words and Phrases and their Compositionality},
interhash = {4d7ff49f008ec05928f11e50f2db1cf9},
intrahash = {37fb87615c89155bc5cceb0fe4468036},
keywords = {thema:semcomp},
note = {cite arxiv:1310.4546},
timestamp = {2018-11-22T11:11:06.000+0100},
title = {Distributed Representations of Words and Phrases and their
Compositionality},
url = {http://arxiv.org/abs/1310.4546},
year = 2013
}