Data-driven decision-making consequential to individuals raises important
questions of accountability and justice. Indeed, European law provides
individuals limited rights to 'meaningful information about the logic' behind
significant, autonomous decisions such as loan approvals, insurance quotes, and
CV filtering. We undertake three experimental studies examining people's
perceptions of justice in algorithmic decision-making under different scenarios
and explanation styles. Dimensions of justice previously observed in response
to human decision-making appear similarly engaged in response to algorithmic
decisions. Qualitative analysis identified several concerns and heuristics
involved in justice perceptions including arbitrariness, generalisation, and
(in)dignity. Quantitative analysis indicates that explanation styles primarily
matter to justice perceptions only when subjects are exposed to multiple
different styles---under repeated exposure of one style, scenario effects
obscure any explanation effects. Our results suggests there may be no 'best'
approach to explaining algorithmic decisions, and that reflection on their
automated nature both implicates and mitigates justice dimensions.
%0 Generic
%1 binns2018reducing
%A Binns, Reuben
%A Van Kleek, Max
%A Veale, Michael
%A Lyngs, Ulrik
%A Zhao, Jun
%A Shadbolt, Nigel
%D 2018
%K algorithms decision-support
%R 10.1145/3173574.3173951
%T 'It's Reducing a Human Being to a Percentage'; Perceptions of Justice in
Algorithmic Decisions
%U http://arxiv.org/abs/1801.10408
%X Data-driven decision-making consequential to individuals raises important
questions of accountability and justice. Indeed, European law provides
individuals limited rights to 'meaningful information about the logic' behind
significant, autonomous decisions such as loan approvals, insurance quotes, and
CV filtering. We undertake three experimental studies examining people's
perceptions of justice in algorithmic decision-making under different scenarios
and explanation styles. Dimensions of justice previously observed in response
to human decision-making appear similarly engaged in response to algorithmic
decisions. Qualitative analysis identified several concerns and heuristics
involved in justice perceptions including arbitrariness, generalisation, and
(in)dignity. Quantitative analysis indicates that explanation styles primarily
matter to justice perceptions only when subjects are exposed to multiple
different styles---under repeated exposure of one style, scenario effects
obscure any explanation effects. Our results suggests there may be no 'best'
approach to explaining algorithmic decisions, and that reflection on their
automated nature both implicates and mitigates justice dimensions.
@misc{binns2018reducing,
abstract = {Data-driven decision-making consequential to individuals raises important
questions of accountability and justice. Indeed, European law provides
individuals limited rights to 'meaningful information about the logic' behind
significant, autonomous decisions such as loan approvals, insurance quotes, and
CV filtering. We undertake three experimental studies examining people's
perceptions of justice in algorithmic decision-making under different scenarios
and explanation styles. Dimensions of justice previously observed in response
to human decision-making appear similarly engaged in response to algorithmic
decisions. Qualitative analysis identified several concerns and heuristics
involved in justice perceptions including arbitrariness, generalisation, and
(in)dignity. Quantitative analysis indicates that explanation styles primarily
matter to justice perceptions only when subjects are exposed to multiple
different styles---under repeated exposure of one style, scenario effects
obscure any explanation effects. Our results suggests there may be no 'best'
approach to explaining algorithmic decisions, and that reflection on their
automated nature both implicates and mitigates justice dimensions.},
added-at = {2020-02-01T17:52:51.000+0100},
author = {Binns, Reuben and Van Kleek, Max and Veale, Michael and Lyngs, Ulrik and Zhao, Jun and Shadbolt, Nigel},
biburl = {https://www.bibsonomy.org/bibtex/2deddfb863bfb0ec90b3b6f768c523ac9/mstrohm},
doi = {10.1145/3173574.3173951},
interhash = {e47c14597640575b552e49c9c9669168},
intrahash = {deddfb863bfb0ec90b3b6f768c523ac9},
keywords = {algorithms decision-support},
note = {cite arxiv:1801.10408Comment: 14 pages, 3 figures, ACM Conference on Human Factors in Computing Systems (CHI'18), April 21--26, Montreal, Canada},
timestamp = {2020-02-01T17:52:51.000+0100},
title = {'It's Reducing a Human Being to a Percentage'; Perceptions of Justice in
Algorithmic Decisions},
url = {http://arxiv.org/abs/1801.10408},
year = 2018
}