Most work on learning curves for ITSs has focused on the knowledge components (or skills) included in the curves, aggregated across students. But an aggregate learning curve need not have the same form as subsets of its underlying data, so learning curves for subpopulations of students may take different forms. We show that disaggregating a skill's aggregate learning curve into separate learning curves for different student subpopulations can reveal learning: 70\% of the skills that did not show learning and were identified as candidates for improvement did show learning when disaggregated. This phenomenon appears to be in part a characteristic of mastery learning. Disaggregated learning curves can reconcile an apparent mismatch between the tutor's runtime assessment of student knowledge and the post hoc assessment provided by the aggregate learning curve. More precise learning curves can be used to refine Bayesian knowledge tracing parameters and to improve skill model assessment metrics.
The point is that accumulating learning curves is not good since students are different. While good student graduate to different topics, weak students stay and it provides an impression that no learning goes on. Suggestion is to separate curves by the number of opportunities to reach mastery and decrease expectation what will it mean that the curve show learning
After that processs 70\% of skills that were not showing learning shows learning.
%0 Book Section
%1 citeulike:12476803
%A Murray,
%A Ritter, Steven
%A Nixon, Tristan
%A Schwiebert, Ryan
%A Hausmann, RobertG
%A Towle, Brendon
%A Fancsali, StephenE
%A Vuong, Annalies
%B Artificial Intelligence in Education
%D 2013
%E Lane,
%E Yacef, Kalina
%E Mostow, Jack
%E Pavlik, Philip
%I Springer Berlin Heidelberg
%K its learning-curve
%P 473--482
%R 10.1007/978-3-642-39112-5_48
%T Revealing the Learning in Learning Curves
%U http://dx.doi.org/10.1007/978-3-642-39112-5_48
%V 7926
%X Most work on learning curves for ITSs has focused on the knowledge components (or skills) included in the curves, aggregated across students. But an aggregate learning curve need not have the same form as subsets of its underlying data, so learning curves for subpopulations of students may take different forms. We show that disaggregating a skill's aggregate learning curve into separate learning curves for different student subpopulations can reveal learning: 70\% of the skills that did not show learning and were identified as candidates for improvement did show learning when disaggregated. This phenomenon appears to be in part a characteristic of mastery learning. Disaggregated learning curves can reconcile an apparent mismatch between the tutor's runtime assessment of student knowledge and the post hoc assessment provided by the aggregate learning curve. More precise learning curves can be used to refine Bayesian knowledge tracing parameters and to improve skill model assessment metrics.
@incollection{citeulike:12476803,
abstract = {{Most work on learning curves for ITSs has focused on the knowledge components (or skills) included in the curves, aggregated across students. But an aggregate learning curve need not have the same form as subsets of its underlying data, so learning curves for subpopulations of students may take different forms. We show that disaggregating a skill's aggregate learning curve into separate learning curves for different student subpopulations can reveal learning: 70\% of the skills that did not show learning and were identified as candidates for improvement did show learning when disaggregated. This phenomenon appears to be in part a characteristic of mastery learning. Disaggregated learning curves can reconcile an apparent mismatch between the tutor's runtime assessment of student knowledge and the post hoc assessment provided by the aggregate learning curve. More precise learning curves can be used to refine Bayesian knowledge tracing parameters and to improve skill model assessment metrics.}},
added-at = {2018-03-19T12:24:51.000+0100},
author = {Murray and Ritter, Steven and Nixon, Tristan and Schwiebert, Ryan and Hausmann, RobertG and Towle, Brendon and Fancsali, StephenE and Vuong, Annalies},
biburl = {https://www.bibsonomy.org/bibtex/2158fee6b9cad4da6da86d5e2e02c1ce3/aho},
booktitle = {Artificial Intelligence in Education},
citeulike-article-id = {12476803},
citeulike-linkout-0 = {http://dx.doi.org/10.1007/978-3-642-39112-5_48},
citeulike-linkout-1 = {http://link.springer.com/chapter/10.1007/978-3-642-39112-5_48},
comment = {The point is that accumulating learning curves is not good since students are different. While good student graduate to different topics, weak students stay and it provides an impression that no learning goes on. Suggestion is to separate curves by the number of opportunities to reach mastery and decrease expectation what will it mean that the curve show learning
After that processs 70\% of skills that were not showing learning shows learning.},
doi = {10.1007/978-3-642-39112-5_48},
editor = {Lane and Yacef, Kalina and Mostow, Jack and Pavlik, Philip},
interhash = {85b586d59f026f95605f7de2fca7d966},
intrahash = {158fee6b9cad4da6da86d5e2e02c1ce3},
keywords = {its learning-curve},
pages = {473--482},
posted-at = {2013-07-12 17:55:36},
priority = {2},
publisher = {Springer Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
timestamp = {2018-03-19T12:24:51.000+0100},
title = {{Revealing the Learning in Learning Curves}},
url = {http://dx.doi.org/10.1007/978-3-642-39112-5_48},
volume = 7926,
year = 2013
}