Software developers often resort to Stack Overflow (SO) to fill their
programming needs. Given the abundance of relevant posts, navigating them and
comparing different solutions is tedious and time-consuming. Recent work has
proposed to automatically summarize SO posts to concise text to facilitate the
navigation of SO posts. However, these techniques rely only on information
retrieval methods or heuristics for text summarization, which is insufficient
to handle the ambiguity and sophistication of natural language. This paper
presents a deep learning based framework called ASSORT for SO post
summarization. ASSORT includes two complementary learning methods, ASSORT_S and
ASSORT_IS, to address the lack of labeled training data for SO post
summarization. ASSORT_S is designed to directly train a novel ensemble learning
model with BERT embeddings and domainspecific features to account for the
unique characteristics of SO posts. By contrast, ASSORT_IS is designed to
reuse pre-trained models while addressing the domain shift challenge when no
training data is present (i.e., zero-shot learning). Both ASSORT_S and
ASSORT_IS outperform six existing techniques by at least 13% and 7%
respectively in terms of the F1 score. Furthermore, a human study shows that
participants significantly preferred summaries generated by ASSORT_S and
ASSORT_IS over the best baseline, while the preference difference between
ASSORT_S and ASSORT_IS was small.
%0 Generic
%1 kou2023automated
%A Kou, Bonan
%A Chen, Muhao
%A Zhang, Tianyi
%D 2023
%K javascript
%T Automated Summarization of Stack Overflow Posts
%U http://arxiv.org/abs/2305.16680
%X Software developers often resort to Stack Overflow (SO) to fill their
programming needs. Given the abundance of relevant posts, navigating them and
comparing different solutions is tedious and time-consuming. Recent work has
proposed to automatically summarize SO posts to concise text to facilitate the
navigation of SO posts. However, these techniques rely only on information
retrieval methods or heuristics for text summarization, which is insufficient
to handle the ambiguity and sophistication of natural language. This paper
presents a deep learning based framework called ASSORT for SO post
summarization. ASSORT includes two complementary learning methods, ASSORT_S and
ASSORT_IS, to address the lack of labeled training data for SO post
summarization. ASSORT_S is designed to directly train a novel ensemble learning
model with BERT embeddings and domainspecific features to account for the
unique characteristics of SO posts. By contrast, ASSORT_IS is designed to
reuse pre-trained models while addressing the domain shift challenge when no
training data is present (i.e., zero-shot learning). Both ASSORT_S and
ASSORT_IS outperform six existing techniques by at least 13% and 7%
respectively in terms of the F1 score. Furthermore, a human study shows that
participants significantly preferred summaries generated by ASSORT_S and
ASSORT_IS over the best baseline, while the preference difference between
ASSORT_S and ASSORT_IS was small.
@misc{kou2023automated,
abstract = {Software developers often resort to Stack Overflow (SO) to fill their
programming needs. Given the abundance of relevant posts, navigating them and
comparing different solutions is tedious and time-consuming. Recent work has
proposed to automatically summarize SO posts to concise text to facilitate the
navigation of SO posts. However, these techniques rely only on information
retrieval methods or heuristics for text summarization, which is insufficient
to handle the ambiguity and sophistication of natural language. This paper
presents a deep learning based framework called ASSORT for SO post
summarization. ASSORT includes two complementary learning methods, ASSORT_S and
ASSORT_{IS}, to address the lack of labeled training data for SO post
summarization. ASSORT_S is designed to directly train a novel ensemble learning
model with BERT embeddings and domainspecific features to account for the
unique characteristics of SO posts. By contrast, ASSORT_{IS} is designed to
reuse pre-trained models while addressing the domain shift challenge when no
training data is present (i.e., zero-shot learning). Both ASSORT_S and
ASSORT_{IS} outperform six existing techniques by at least 13% and 7%
respectively in terms of the F1 score. Furthermore, a human study shows that
participants significantly preferred summaries generated by ASSORT_S and
ASSORT_{IS} over the best baseline, while the preference difference between
ASSORT_S and ASSORT_{IS} was small.},
added-at = {2023-07-11T04:02:52.000+0200},
author = {Kou, Bonan and Chen, Muhao and Zhang, Tianyi},
biburl = {https://www.bibsonomy.org/bibtex/2169cbd6c1b27b97b7c3721b785d97420/woobanseok},
description = {Automated Summarization of Stack Overflow Posts},
interhash = {e1fe7ec4b2a5869e29d4bf9ee58934fb},
intrahash = {169cbd6c1b27b97b7c3721b785d97420},
keywords = {javascript},
note = {cite arxiv:2305.16680Comment: ICSE 2023},
timestamp = {2023-07-11T04:02:52.000+0200},
title = {Automated Summarization of Stack Overflow Posts},
url = {http://arxiv.org/abs/2305.16680},
year = 2023
}