Ever since the emergence of large language models (LLMs) and related
applications, such as ChatGPT, its performance and error analysis for
programming tasks have been subject to research. In this work-in-progress
paper, we explore the potential of such LLMs for computing educators and
learners, as we analyze the feedback it generates to a given input containing
program code. In particular, we aim at (1) exploring how an LLM like ChatGPT
responds to students seeking help with their introductory programming tasks,
and (2) identifying feedback types in its responses. To achieve these goals, we
used students' programming sequences from a dataset gathered within a CS1
course as input for ChatGPT along with questions required to elicit feedback
and correct solutions. The results show that ChatGPT performs reasonably well
for some of the introductory programming tasks and student errors, which means
that students can potentially benefit. However, educators should provide
guidance on how to use the provided feedback, as it can contain misleading
information for novices.
Description
[2309.00029] Exploring the Potential of Large Language Models to Generate Formative Programming Feedback
%0 Generic
%1 kiesler2023exploring
%A Kiesler, Natalie
%A Lohr, Dominic
%A Keuning, Hieke
%D 2023
%K feedback llm programming progtutor
%T Exploring the Potential of Large Language Models to Generate Formative
Programming Feedback
%U http://arxiv.org/abs/2309.00029
%X Ever since the emergence of large language models (LLMs) and related
applications, such as ChatGPT, its performance and error analysis for
programming tasks have been subject to research. In this work-in-progress
paper, we explore the potential of such LLMs for computing educators and
learners, as we analyze the feedback it generates to a given input containing
program code. In particular, we aim at (1) exploring how an LLM like ChatGPT
responds to students seeking help with their introductory programming tasks,
and (2) identifying feedback types in its responses. To achieve these goals, we
used students' programming sequences from a dataset gathered within a CS1
course as input for ChatGPT along with questions required to elicit feedback
and correct solutions. The results show that ChatGPT performs reasonably well
for some of the introductory programming tasks and student errors, which means
that students can potentially benefit. However, educators should provide
guidance on how to use the provided feedback, as it can contain misleading
information for novices.
@misc{kiesler2023exploring,
abstract = {Ever since the emergence of large language models (LLMs) and related
applications, such as ChatGPT, its performance and error analysis for
programming tasks have been subject to research. In this work-in-progress
paper, we explore the potential of such LLMs for computing educators and
learners, as we analyze the feedback it generates to a given input containing
program code. In particular, we aim at (1) exploring how an LLM like ChatGPT
responds to students seeking help with their introductory programming tasks,
and (2) identifying feedback types in its responses. To achieve these goals, we
used students' programming sequences from a dataset gathered within a CS1
course as input for ChatGPT along with questions required to elicit feedback
and correct solutions. The results show that ChatGPT performs reasonably well
for some of the introductory programming tasks and student errors, which means
that students can potentially benefit. However, educators should provide
guidance on how to use the provided feedback, as it can contain misleading
information for novices.},
added-at = {2023-12-08T19:47:17.000+0100},
author = {Kiesler, Natalie and Lohr, Dominic and Keuning, Hieke},
biburl = {https://www.bibsonomy.org/bibtex/2b8da11a584f84f119b4eb9202ec674d6/brusilovsky},
description = {[2309.00029] Exploring the Potential of Large Language Models to Generate Formative Programming Feedback},
interhash = {f24fa8432d5e224bf3aa34fb54a2d12e},
intrahash = {b8da11a584f84f119b4eb9202ec674d6},
keywords = {feedback llm programming progtutor},
note = {cite arxiv:2309.00029Comment: Accepted to FIE 2023},
timestamp = {2023-12-08T19:47:17.000+0100},
title = {Exploring the Potential of Large Language Models to Generate Formative
Programming Feedback},
url = {http://arxiv.org/abs/2309.00029},
year = 2023
}