Automatic question generation aims at the generation of questions from a
context, with the corresponding answers being sub-spans of the given passage.
Whereas, most of the methods mostly rely on heuristic rules to generate
questions, more recently also neural network approaches have been proposed. In
this work, we propose a variant of the self-attention Transformer network
architectures model to generate meaningful and diverse questions. To this end,
we propose an easy to use model consisting of the conjunction of the
Transformer decoder GPT-2 model with Transformer encoder BERT for the
downstream task for question answering. The model is trained in an end-to-end
fashion, where the language model is trained to produce a question-answer-aware
input representation that facilitates to generate an answer focused question.
Our result of neural question generation from text on the SQuAD 1.1 dataset
suggests that our method can produce semantically correct and diverse
questions. Additionally, we assessed the performance of our proposed method for
the downstream task of question answering. The analysis shows that our proposed
generation & answering collaboration framework relatively improves both tasks
and is particularly powerful in the semi-supervised setup. The results further
suggest a robust and comparably lean pipeline facilitating question generation
in the small-data regime.
Beschreibung
[1911.02365] Learning to Answer by Learning to Ask: Getting the Best of GPT-2 and BERT Worlds
%0 Generic
%1 klein2019learning
%A Klein, Tassilo
%A Nabi, Moin
%D 2019
%K bert gpt2 masterthesis qg
%T Learning to Answer by Learning to Ask: Getting the Best of GPT-2 and
BERT Worlds
%U http://arxiv.org/abs/1911.02365
%X Automatic question generation aims at the generation of questions from a
context, with the corresponding answers being sub-spans of the given passage.
Whereas, most of the methods mostly rely on heuristic rules to generate
questions, more recently also neural network approaches have been proposed. In
this work, we propose a variant of the self-attention Transformer network
architectures model to generate meaningful and diverse questions. To this end,
we propose an easy to use model consisting of the conjunction of the
Transformer decoder GPT-2 model with Transformer encoder BERT for the
downstream task for question answering. The model is trained in an end-to-end
fashion, where the language model is trained to produce a question-answer-aware
input representation that facilitates to generate an answer focused question.
Our result of neural question generation from text on the SQuAD 1.1 dataset
suggests that our method can produce semantically correct and diverse
questions. Additionally, we assessed the performance of our proposed method for
the downstream task of question answering. The analysis shows that our proposed
generation & answering collaboration framework relatively improves both tasks
and is particularly powerful in the semi-supervised setup. The results further
suggest a robust and comparably lean pipeline facilitating question generation
in the small-data regime.
@misc{klein2019learning,
abstract = {Automatic question generation aims at the generation of questions from a
context, with the corresponding answers being sub-spans of the given passage.
Whereas, most of the methods mostly rely on heuristic rules to generate
questions, more recently also neural network approaches have been proposed. In
this work, we propose a variant of the self-attention Transformer network
architectures model to generate meaningful and diverse questions. To this end,
we propose an easy to use model consisting of the conjunction of the
Transformer decoder GPT-2 model with Transformer encoder BERT for the
downstream task for question answering. The model is trained in an end-to-end
fashion, where the language model is trained to produce a question-answer-aware
input representation that facilitates to generate an answer focused question.
Our result of neural question generation from text on the SQuAD 1.1 dataset
suggests that our method can produce semantically correct and diverse
questions. Additionally, we assessed the performance of our proposed method for
the downstream task of question answering. The analysis shows that our proposed
generation & answering collaboration framework relatively improves both tasks
and is particularly powerful in the semi-supervised setup. The results further
suggest a robust and comparably lean pipeline facilitating question generation
in the small-data regime.},
added-at = {2020-12-11T09:25:28.000+0100},
author = {Klein, Tassilo and Nabi, Moin},
biburl = {https://www.bibsonomy.org/bibtex/252fa0afac0b5a4e88d5c482cfb4b3e14/festplatte},
description = {[1911.02365] Learning to Answer by Learning to Ask: Getting the Best of GPT-2 and BERT Worlds},
interhash = {7f90eea46fdd0cd800f09de733d8032d},
intrahash = {52fa0afac0b5a4e88d5c482cfb4b3e14},
keywords = {bert gpt2 masterthesis qg},
note = {cite arxiv:1911.02365},
timestamp = {2020-12-11T09:25:28.000+0100},
title = {Learning to Answer by Learning to Ask: Getting the Best of GPT-2 and
BERT Worlds},
url = {http://arxiv.org/abs/1911.02365},
year = 2019
}