Evaluation measures act as objective functions to be optimized by information retrieval systems. Such objective functions must accurately reflect user requirements, particularly when tuning IR systems and learning ranking functions. Ambiguity in queries and redundancy in retrieved documents are poorly reflected by current evaluation measures. In this paper, we present a framework for evaluation that systematically rewards novelty and diversity. We develop this framework into a specific evaluation measure, based on cumulative gain. We demonstrate the feasibility of our approach using a test collection based on the TREC question answering track.
Description
Novelty and diversity in information retrieval evaluation
%0 Conference Paper
%1 1390446
%A Clarke, Charles L.A.
%A Kolla, Maheedhar
%A Cormack, Gordon V.
%A Vechtomova, Olga
%A Ashkan, Azin
%A Büttcher, Stefan
%A MacKinnon, Ian
%B SIGIR '08: Proceedings of the 31st annual international ACM SIGIR conference on Research and development in information retrieval
%C New York, NY, USA
%D 2008
%I ACM
%K evaluation lesen suchkiste
%P 659--666
%R http://doi.acm.org/10.1145/1390334.1390446
%T Novelty and diversity in information retrieval evaluation
%U http://portal.acm.org/citation.cfm?id=1390446&dl=GUIDE&coll=GUIDE&CFID=23275471&CFTOKEN=23397023
%X Evaluation measures act as objective functions to be optimized by information retrieval systems. Such objective functions must accurately reflect user requirements, particularly when tuning IR systems and learning ranking functions. Ambiguity in queries and redundancy in retrieved documents are poorly reflected by current evaluation measures. In this paper, we present a framework for evaluation that systematically rewards novelty and diversity. We develop this framework into a specific evaluation measure, based on cumulative gain. We demonstrate the feasibility of our approach using a test collection based on the TREC question answering track.
%@ 978-1-60558-164-4
@inproceedings{1390446,
abstract = {Evaluation measures act as objective functions to be optimized by information retrieval systems. Such objective functions must accurately reflect user requirements, particularly when tuning IR systems and learning ranking functions. Ambiguity in queries and redundancy in retrieved documents are poorly reflected by current evaluation measures. In this paper, we present a framework for evaluation that systematically rewards novelty and diversity. We develop this framework into a specific evaluation measure, based on cumulative gain. We demonstrate the feasibility of our approach using a test collection based on the TREC question answering track.},
added-at = {2009-02-23T20:06:33.000+0100},
address = {New York, NY, USA},
author = {Clarke, Charles L.A. and Kolla, Maheedhar and Cormack, Gordon V. and Vechtomova, Olga and Ashkan, Azin and B\"{u}ttcher, Stefan and MacKinnon, Ian},
biburl = {https://www.bibsonomy.org/bibtex/29b1a9f5de1d59ec66aa06a66d8d751b6/tillk},
booktitle = {SIGIR '08: Proceedings of the 31st annual international ACM SIGIR conference on Research and development in information retrieval},
description = {Novelty and diversity in information retrieval evaluation},
doi = {http://doi.acm.org/10.1145/1390334.1390446},
interhash = {91453c13a6ad38f18a71c0825c2dc9f1},
intrahash = {9b1a9f5de1d59ec66aa06a66d8d751b6},
isbn = {978-1-60558-164-4},
keywords = {evaluation lesen suchkiste},
location = {Singapore, Singapore},
pages = {659--666},
publisher = {ACM},
timestamp = {2009-02-23T20:06:33.000+0100},
title = {Novelty and diversity in information retrieval evaluation},
url = {http://portal.acm.org/citation.cfm?id=1390446&dl=GUIDE&coll=GUIDE&CFID=23275471&CFTOKEN=23397023},
year = 2008
}