We introduce instancewise feature selection as a methodology for model
interpretation. Our method is based on learning a function to extract a subset
of features that are most informative for each given example. This feature
selector is trained to maximize the mutual information between selected
features and the response variable, where the conditional distribution of the
response variable given the input is the model to be explained. We develop an
efficient variational approximation to the mutual information, and show the
effectiveness of our method on a variety of synthetic and real data sets using
both quantitative metrics and human evaluation.
Beschreibung
[1802.07814] Learning to Explain: An Information-Theoretic Perspective on Model Interpretation
%0 Generic
%1 chen2018learning
%A Chen, Jianbo
%A Song, Le
%A Wainwright, Martin J.
%A Jordan, Michael I.
%D 2018
%K interpretation machinelearning neuralnetwork
%T Learning to Explain: An Information-Theoretic Perspective on Model
Interpretation
%U http://arxiv.org/abs/1802.07814
%X We introduce instancewise feature selection as a methodology for model
interpretation. Our method is based on learning a function to extract a subset
of features that are most informative for each given example. This feature
selector is trained to maximize the mutual information between selected
features and the response variable, where the conditional distribution of the
response variable given the input is the model to be explained. We develop an
efficient variational approximation to the mutual information, and show the
effectiveness of our method on a variety of synthetic and real data sets using
both quantitative metrics and human evaluation.
@misc{chen2018learning,
abstract = {We introduce instancewise feature selection as a methodology for model
interpretation. Our method is based on learning a function to extract a subset
of features that are most informative for each given example. This feature
selector is trained to maximize the mutual information between selected
features and the response variable, where the conditional distribution of the
response variable given the input is the model to be explained. We develop an
efficient variational approximation to the mutual information, and show the
effectiveness of our method on a variety of synthetic and real data sets using
both quantitative metrics and human evaluation.},
added-at = {2020-10-06T21:28:11.000+0200},
author = {Chen, Jianbo and Song, Le and Wainwright, Martin J. and Jordan, Michael I.},
biburl = {https://www.bibsonomy.org/bibtex/29fe8213a3599bcdbf62f3077d3bf7a9d/cpankow},
description = {[1802.07814] Learning to Explain: An Information-Theoretic Perspective on Model Interpretation},
interhash = {a1818c78c28cb52e66696f44b05a1ca4},
intrahash = {9fe8213a3599bcdbf62f3077d3bf7a9d},
keywords = {interpretation machinelearning neuralnetwork},
note = {cite arxiv:1802.07814Comment: Accepted to ICML 2018 as a long oral},
timestamp = {2020-10-06T21:28:11.000+0200},
title = {Learning to Explain: An Information-Theoretic Perspective on Model
Interpretation},
url = {http://arxiv.org/abs/1802.07814},
year = 2018
}