Prior work on computing semantic relatedness of words focused on representing their meaning in isolation, effectively disregarding inter-word affinities. We propose a large-scale data mining approach to learning word-word relatedness, where known pairs of related words impose constraints on the learning process. We learn for each word a low-dimensional representation, which strives to maximize the likelihood of a word given the contexts in which it appears. Our method, called CLEAR, is shown to significantly outperform previously published approaches. The proposed method is based on first principles, and is generic enough to exploit diverse types of text corpora, while having the flexibility to impose constraints on the derived word similarities. We also make publicly available a new labeled dataset for evaluating word relatedness algorithms, which we believe to be the largest such dataset to date.
%0 Conference Paper
%1 halawi2012largescale
%A Halawi, Guy
%A Dror, Gideon
%A Gabrilovich, Evgeniy
%A Koren, Yehuda
%B KDD
%C New York, NY, USA
%D 2012
%I ACM
%K constrained learning mturk relatedness word
%P 1406--1414
%R 10.1145/2339530.2339751
%T Large-scale Learning of Word Relatedness with Constraints
%U http://doi.acm.org/10.1145/2339530.2339751
%X Prior work on computing semantic relatedness of words focused on representing their meaning in isolation, effectively disregarding inter-word affinities. We propose a large-scale data mining approach to learning word-word relatedness, where known pairs of related words impose constraints on the learning process. We learn for each word a low-dimensional representation, which strives to maximize the likelihood of a word given the contexts in which it appears. Our method, called CLEAR, is shown to significantly outperform previously published approaches. The proposed method is based on first principles, and is generic enough to exploit diverse types of text corpora, while having the flexibility to impose constraints on the derived word similarities. We also make publicly available a new labeled dataset for evaluating word relatedness algorithms, which we believe to be the largest such dataset to date.
%@ 978-1-4503-1462-6
@inproceedings{halawi2012largescale,
abstract = {Prior work on computing semantic relatedness of words focused on representing their meaning in isolation, effectively disregarding inter-word affinities. We propose a large-scale data mining approach to learning word-word relatedness, where known pairs of related words impose constraints on the learning process. We learn for each word a low-dimensional representation, which strives to maximize the likelihood of a word given the contexts in which it appears. Our method, called CLEAR, is shown to significantly outperform previously published approaches. The proposed method is based on first principles, and is generic enough to exploit diverse types of text corpora, while having the flexibility to impose constraints on the derived word similarities. We also make publicly available a new labeled dataset for evaluating word relatedness algorithms, which we believe to be the largest such dataset to date.},
acmid = {2339751},
added-at = {2017-04-12T12:17:17.000+0200},
address = {New York, NY, USA},
author = {Halawi, Guy and Dror, Gideon and Gabrilovich, Evgeniy and Koren, Yehuda},
biburl = {https://www.bibsonomy.org/bibtex/2aa3d5bd9d00732aa81e5d8ce9d6d7491/thoni},
booktitle = {KDD},
doi = {10.1145/2339530.2339751},
interhash = {aba863240e36b7b0b5542e20fe17a3ee},
intrahash = {aa3d5bd9d00732aa81e5d8ce9d6d7491},
isbn = {978-1-4503-1462-6},
keywords = {constrained learning mturk relatedness word},
location = {Beijing, China},
numpages = {9},
pages = {1406--1414},
publisher = {ACM},
timestamp = {2017-05-14T13:51:57.000+0200},
title = {Large-scale Learning of Word Relatedness with Constraints},
url = {http://doi.acm.org/10.1145/2339530.2339751},
year = 2012
}