Conversational agents (CAs) provide opportunities for improving the interaction in evaluation surveys. To investigate if and how a user-centered conversational evaluation tool impacts users' response quality and their experience, we build EVA - a novel conversational course evaluation tool for educational scenarios. In a field experiment with 128 students, we compared EVA against a static web survey. Our results confirm prior findings from literature about the positive effect of conversational evaluation tools in the domain of education. Second, we then investigate the differences between a voice-based and text-based conversational human-computer interaction of EVA in the same experimental set-up. Against our prior expectation, the students of the voice-based interaction answered with higher information quality but with lower quantity of information compared to the text-based modality. Our findings indicate that using a conversational CA (voice and text-based) results in a higher response quality and user experience compared to a static web survey interface.
%0 Journal Article
%1 ls_leimeister
%A Wambsganss, Thiemo
%A Zierau, Naim
%A Söllner, Matthias
%A Käser, Tanja
%A Koedinger, Kenneth R.
%A Leimeister, Jan Marco
%C New York, NY, USA
%D 2022
%I Association for Computing Machinery
%J Proceedings of the ACM on Human-Computer Interaction (PACMHCI)
%K conversational_agents course_evaluations educational_applications itegpub pub_jml pub_msö voice_interfaces
%N CSCW2
%P 1-27
%R 10.1145/3555619
%T Designing Conversational Evaluation Tools: A Comparison of Text and Voice Modalities to Improve Response Quality in Course Evaluations
%U https://pubs.wi-kassel.de/wp-content/uploads/2022/12/JML_901.pdf
%V 6
%X Conversational agents (CAs) provide opportunities for improving the interaction in evaluation surveys. To investigate if and how a user-centered conversational evaluation tool impacts users' response quality and their experience, we build EVA - a novel conversational course evaluation tool for educational scenarios. In a field experiment with 128 students, we compared EVA against a static web survey. Our results confirm prior findings from literature about the positive effect of conversational evaluation tools in the domain of education. Second, we then investigate the differences between a voice-based and text-based conversational human-computer interaction of EVA in the same experimental set-up. Against our prior expectation, the students of the voice-based interaction answered with higher information quality but with lower quantity of information compared to the text-based modality. Our findings indicate that using a conversational CA (voice and text-based) results in a higher response quality and user experience compared to a static web survey interface.
@article{ls_leimeister,
abstract = {Conversational agents (CAs) provide opportunities for improving the interaction in evaluation surveys. To investigate if and how a user-centered conversational evaluation tool impacts users' response quality and their experience, we build EVA - a novel conversational course evaluation tool for educational scenarios. In a field experiment with 128 students, we compared EVA against a static web survey. Our results confirm prior findings from literature about the positive effect of conversational evaluation tools in the domain of education. Second, we then investigate the differences between a voice-based and text-based conversational human-computer interaction of EVA in the same experimental set-up. Against our prior expectation, the students of the voice-based interaction answered with higher information quality but with lower quantity of information compared to the text-based modality. Our findings indicate that using a conversational CA (voice and text-based) results in a higher response quality and user experience compared to a static web survey interface.},
added-at = {2022-11-19T18:58:07.000+0100},
address = {New York, NY, USA},
articleno = {506},
author = {Wambsganss, Thiemo and Zierau, Naim and Söllner, Matthias and Käser, Tanja and Koedinger, Kenneth R. and Leimeister, Jan Marco},
biburl = {https://www.bibsonomy.org/bibtex/262cd0cbae3e7fdad360e0552378f5b14/ls_leimeister},
doi = {10.1145/3555619},
interhash = {8e197d8cc0750b9da6c0037e3eeb95c4},
intrahash = {62cd0cbae3e7fdad360e0552378f5b14},
issn = {2573-0142},
issue_date = {November 2022},
journal = {Proceedings of the ACM on Human-Computer Interaction (PACMHCI)},
keywords = {conversational_agents course_evaluations educational_applications itegpub pub_jml pub_msö voice_interfaces},
month = nov,
number = {CSCW2},
numpages = {27},
pages = {1-27},
publisher = {Association for Computing Machinery},
timestamp = {2023-03-20T13:01:28.000+0100},
title = {Designing Conversational Evaluation Tools: A Comparison of Text and Voice Modalities to Improve Response Quality in Course Evaluations},
url = {https://pubs.wi-kassel.de/wp-content/uploads/2022/12/JML_901.pdf},
volume = 6,
year = 2022
}