Conversational question answering (ConvQA) is a simplified but concrete
setting of conversational search. One of its major challenges is to leverage
the conversation history to understand and answer the current question. In this
work, we propose a novel solution for ConvQA that involves three aspects.
First, we propose a positional history answer embedding method to encode
conversation history with position information using BERT in a natural way.
BERT is a powerful technique for text representation. Second, we design a
history attention mechanism (HAM) to conduct a "soft selection" for
conversation histories. This method attends to history turns with different
weights based on how helpful they are on answering the current question. Third,
in addition to handling conversation history, we take advantage of multi-task
learning (MTL) to do answer prediction along with another essential
conversation task (dialog act prediction) using a uniform model architecture.
MTL is able to learn more expressive and generic representations to improve the
performance of ConvQA. We demonstrate the effectiveness of our model with
extensive experimental evaluations on QuAC, a large-scale ConvQA dataset. We
show that position information plays an important role in conversation history
modeling. We also visualize the history attention and provide new insights into
conversation history understanding.
Описание
[1908.09456] Attentive History Selection for Conversational Question Answering
%0 Generic
%1 qu2019attentive
%A Qu, Chen
%A Yang, Liu
%A Qiu, Minghui
%A Zhang, Yongfeng
%A Chen, Cen
%A Croft, W. Bruce
%A Iyyer, Mohit
%D 2019
%K bert masterthesis qna
%R 10.1145/3357384.3357905
%T Attentive History Selection for Conversational Question Answering
%U http://arxiv.org/abs/1908.09456
%X Conversational question answering (ConvQA) is a simplified but concrete
setting of conversational search. One of its major challenges is to leverage
the conversation history to understand and answer the current question. In this
work, we propose a novel solution for ConvQA that involves three aspects.
First, we propose a positional history answer embedding method to encode
conversation history with position information using BERT in a natural way.
BERT is a powerful technique for text representation. Second, we design a
history attention mechanism (HAM) to conduct a "soft selection" for
conversation histories. This method attends to history turns with different
weights based on how helpful they are on answering the current question. Third,
in addition to handling conversation history, we take advantage of multi-task
learning (MTL) to do answer prediction along with another essential
conversation task (dialog act prediction) using a uniform model architecture.
MTL is able to learn more expressive and generic representations to improve the
performance of ConvQA. We demonstrate the effectiveness of our model with
extensive experimental evaluations on QuAC, a large-scale ConvQA dataset. We
show that position information plays an important role in conversation history
modeling. We also visualize the history attention and provide new insights into
conversation history understanding.
@misc{qu2019attentive,
abstract = {Conversational question answering (ConvQA) is a simplified but concrete
setting of conversational search. One of its major challenges is to leverage
the conversation history to understand and answer the current question. In this
work, we propose a novel solution for ConvQA that involves three aspects.
First, we propose a positional history answer embedding method to encode
conversation history with position information using BERT in a natural way.
BERT is a powerful technique for text representation. Second, we design a
history attention mechanism (HAM) to conduct a "soft selection" for
conversation histories. This method attends to history turns with different
weights based on how helpful they are on answering the current question. Third,
in addition to handling conversation history, we take advantage of multi-task
learning (MTL) to do answer prediction along with another essential
conversation task (dialog act prediction) using a uniform model architecture.
MTL is able to learn more expressive and generic representations to improve the
performance of ConvQA. We demonstrate the effectiveness of our model with
extensive experimental evaluations on QuAC, a large-scale ConvQA dataset. We
show that position information plays an important role in conversation history
modeling. We also visualize the history attention and provide new insights into
conversation history understanding.},
added-at = {2020-12-10T16:41:18.000+0100},
author = {Qu, Chen and Yang, Liu and Qiu, Minghui and Zhang, Yongfeng and Chen, Cen and Croft, W. Bruce and Iyyer, Mohit},
biburl = {https://www.bibsonomy.org/bibtex/2ceddbd5505b46bdf13a0301608f2eab2/festplatte},
description = {[1908.09456] Attentive History Selection for Conversational Question Answering},
doi = {10.1145/3357384.3357905},
interhash = {4a705ec81d3d0f5205ae36afa6dbb0e1},
intrahash = {ceddbd5505b46bdf13a0301608f2eab2},
keywords = {bert masterthesis qna},
note = {cite arxiv:1908.09456Comment: Accepted to CIKM 2019},
timestamp = {2020-12-10T16:41:18.000+0100},
title = {Attentive History Selection for Conversational Question Answering},
url = {http://arxiv.org/abs/1908.09456},
year = 2019
}