Recommender systems learn from past user preferences in order to predict future user interests and provide users with personalized suggestions. Previous research has demonstrated that biases in user profiles in the aggregate can influence the recommendations to users who do not share the majority preference. One consequence of this bias propagation effect is miscalibration, a mismatch between the types or categories of items that a user prefers and the items provided in recommendations. In this paper, we conduct a systematic analysis aimed at identifying key characteristics in user profiles that might lead to miscalibrated recommendations. We consider several categories of profile characteristics, including similarity to the average user, propensity towards popularity, profile diversity, and preference intensity. We develop predictive models of miscalibration and use these models to identify the most important features correlated with miscalibration, given different algorithms and dataset characteristics. Our analysis is intended to help system designers predict miscalibration effects and to develop recommendation algorithms with improved calibration properties.
Описание
Calibration in Collaborative Filtering Recommender Systems | Proceedings of the 31st ACM Conference on Hypertext and Social Media
%0 Conference Paper
%1 Lin_2020
%A Lin, Kun
%A Sonboli, Nasim
%A Mobasher, Bamshad
%A Burke, Robin
%B Proceedings of the 31st ACM Conference on Hypertext and Social Media
%D 2020
%I ACM
%K bias diversity ht2020 recommender
%P 187-206
%R 10.1145/3372923.3404793
%T Calibration in Collaborative Filtering Recommender Systems
%U https://doi.org/10.1145%2F3372923.3404793
%X Recommender systems learn from past user preferences in order to predict future user interests and provide users with personalized suggestions. Previous research has demonstrated that biases in user profiles in the aggregate can influence the recommendations to users who do not share the majority preference. One consequence of this bias propagation effect is miscalibration, a mismatch between the types or categories of items that a user prefers and the items provided in recommendations. In this paper, we conduct a systematic analysis aimed at identifying key characteristics in user profiles that might lead to miscalibrated recommendations. We consider several categories of profile characteristics, including similarity to the average user, propensity towards popularity, profile diversity, and preference intensity. We develop predictive models of miscalibration and use these models to identify the most important features correlated with miscalibration, given different algorithms and dataset characteristics. Our analysis is intended to help system designers predict miscalibration effects and to develop recommendation algorithms with improved calibration properties.
@inproceedings{Lin_2020,
abstract = {Recommender systems learn from past user preferences in order to predict future user interests and provide users with personalized suggestions. Previous research has demonstrated that biases in user profiles in the aggregate can influence the recommendations to users who do not share the majority preference. One consequence of this bias propagation effect is miscalibration, a mismatch between the types or categories of items that a user prefers and the items provided in recommendations. In this paper, we conduct a systematic analysis aimed at identifying key characteristics in user profiles that might lead to miscalibrated recommendations. We consider several categories of profile characteristics, including similarity to the average user, propensity towards popularity, profile diversity, and preference intensity. We develop predictive models of miscalibration and use these models to identify the most important features correlated with miscalibration, given different algorithms and dataset characteristics. Our analysis is intended to help system designers predict miscalibration effects and to develop recommendation algorithms with improved calibration properties.},
added-at = {2020-07-14T20:36:49.000+0200},
author = {Lin, Kun and Sonboli, Nasim and Mobasher, Bamshad and Burke, Robin},
biburl = {https://www.bibsonomy.org/bibtex/2a09b19fa78e218748e664ef78c60c82d/brusilovsky},
booktitle = {Proceedings of the 31st {ACM} Conference on Hypertext and Social Media},
description = {Calibration in Collaborative Filtering Recommender Systems | Proceedings of the 31st ACM Conference on Hypertext and Social Media},
doi = {10.1145/3372923.3404793},
interhash = {6ceec2fab4b173800ce8c8fcf4dd0e6e},
intrahash = {a09b19fa78e218748e664ef78c60c82d},
keywords = {bias diversity ht2020 recommender},
month = jul,
pages = {187-206},
publisher = {{ACM}},
timestamp = {2020-11-22T23:59:18.000+0100},
title = {Calibration in Collaborative Filtering Recommender Systems},
url = {https://doi.org/10.1145%2F3372923.3404793},
year = 2020
}