Recent years have seen important advances in the quality of state-of-the-art
models, but this has come at the expense of models becoming less interpretable.
This survey presents an overview of the current state of Explainable AI (XAI),
considered within the domain of Natural Language Processing (NLP). We discuss
the main categorization of explanations, as well as the various ways
explanations can be arrived at and visualized. We detail the operations and
explainability techniques currently available for generating explanations for
NLP model predictions, to serve as a resource for model developers in the
community. Finally, we point out the current gaps and encourage directions for
future work in this important research area.
Описание
A Survey of the State of Explainable AI for Natural Language Processing
%0 Generic
%1 danilevsky2020survey
%A Danilevsky, Marina
%A Qian, Kun
%A Aharonov, Ranit
%A Katsis, Yannis
%A Kawas, Ban
%A Sen, Prithviraj
%D 2020
%K nlp survey xai
%T A Survey of the State of Explainable AI for Natural Language Processing
%U http://arxiv.org/abs/2010.00711
%X Recent years have seen important advances in the quality of state-of-the-art
models, but this has come at the expense of models becoming less interpretable.
This survey presents an overview of the current state of Explainable AI (XAI),
considered within the domain of Natural Language Processing (NLP). We discuss
the main categorization of explanations, as well as the various ways
explanations can be arrived at and visualized. We detail the operations and
explainability techniques currently available for generating explanations for
NLP model predictions, to serve as a resource for model developers in the
community. Finally, we point out the current gaps and encourage directions for
future work in this important research area.
@misc{danilevsky2020survey,
abstract = {Recent years have seen important advances in the quality of state-of-the-art
models, but this has come at the expense of models becoming less interpretable.
This survey presents an overview of the current state of Explainable AI (XAI),
considered within the domain of Natural Language Processing (NLP). We discuss
the main categorization of explanations, as well as the various ways
explanations can be arrived at and visualized. We detail the operations and
explainability techniques currently available for generating explanations for
NLP model predictions, to serve as a resource for model developers in the
community. Finally, we point out the current gaps and encourage directions for
future work in this important research area.},
added-at = {2021-04-10T11:49:07.000+0200},
author = {Danilevsky, Marina and Qian, Kun and Aharonov, Ranit and Katsis, Yannis and Kawas, Ban and Sen, Prithviraj},
biburl = {https://www.bibsonomy.org/bibtex/221db6f1d1a410750b00a9daa7815c0b8/hotho},
description = {A Survey of the State of Explainable AI for Natural Language Processing},
interhash = {60ce097ac62c228d9a32e7866c9b607b},
intrahash = {21db6f1d1a410750b00a9daa7815c0b8},
keywords = {nlp survey xai},
note = {cite arxiv:2010.00711Comment: To appear in AACL-IJCNLP 2020},
timestamp = {2021-04-10T11:49:07.000+0200},
title = {A Survey of the State of Explainable AI for Natural Language Processing},
url = {http://arxiv.org/abs/2010.00711},
year = 2020
}