Massive Open Online Courses (MOOCs) use peer assessment to grade open ended questions at scale, allowing students to provide feedback. Relative to teacher based grading, peer assessment on MOOCs traditionally delivers lower quality feedback and fewer learner interactions. We present the identified peer review (IPR) framework, which provides non-blind peer assessment and incentives driving high quality feedback. We show that, compared to traditional peer assessment methods, IPR leads to significantly longer and more useful feedback as well as more discussion between peers.
%0 Conference Paper
%1 citeulike:14346942
%A Gamage, Dilrukshi
%A Whiting, Mark E.
%A Rajapakshe, Thejan
%A Thilakarathne, Haritha
%A Perera, Indika
%A Fernando, Shantha
%B Proceedings of the Fourth (2017) ACM Conference on Learning @ Scale
%C New York, NY, USA
%D 2017
%I ACM
%K assessment las2017 mooc peer-review
%P 315--318
%R 10.1145/3051457.3054013
%T Improving Assessment on MOOCs Through Peer Identification and Aligned Incentives
%U http://dx.doi.org/10.1145/3051457.3054013
%X Massive Open Online Courses (MOOCs) use peer assessment to grade open ended questions at scale, allowing students to provide feedback. Relative to teacher based grading, peer assessment on MOOCs traditionally delivers lower quality feedback and fewer learner interactions. We present the identified peer review (IPR) framework, which provides non-blind peer assessment and incentives driving high quality feedback. We show that, compared to traditional peer assessment methods, IPR leads to significantly longer and more useful feedback as well as more discussion between peers.
%@ 978-1-4503-4450-0
@inproceedings{citeulike:14346942,
abstract = {{Massive Open Online Courses (MOOCs) use peer assessment to grade open ended questions at scale, allowing students to provide feedback. Relative to teacher based grading, peer assessment on MOOCs traditionally delivers lower quality feedback and fewer learner interactions. We present the identified peer review (IPR) framework, which provides non-blind peer assessment and incentives driving high quality feedback. We show that, compared to traditional peer assessment methods, IPR leads to significantly longer and more useful feedback as well as more discussion between peers.}},
added-at = {2017-11-15T17:02:25.000+0100},
address = {New York, NY, USA},
author = {Gamage, Dilrukshi and Whiting, Mark E. and Rajapakshe, Thejan and Thilakarathne, Haritha and Perera, Indika and Fernando, Shantha},
biburl = {https://www.bibsonomy.org/bibtex/26220d0cd2eec7ce2e911c513d17b0a78/brusilovsky},
booktitle = {Proceedings of the Fourth (2017) ACM Conference on Learning @ Scale},
citeulike-article-id = {14346942},
citeulike-linkout-0 = {http://portal.acm.org/citation.cfm?id=3054013},
citeulike-linkout-1 = {http://dx.doi.org/10.1145/3051457.3054013},
doi = {10.1145/3051457.3054013},
interhash = {c6aec61f9145d73f1e425d6e5e2684eb},
intrahash = {6220d0cd2eec7ce2e911c513d17b0a78},
isbn = {978-1-4503-4450-0},
keywords = {assessment las2017 mooc peer-review},
location = {Cambridge, Massachusetts, USA},
pages = {315--318},
posted-at = {2017-04-29 18:57:54},
priority = {2},
publisher = {ACM},
series = {L@S '17},
timestamp = {2020-11-03T18:40:45.000+0100},
title = {{Improving Assessment on MOOCs Through Peer Identification and Aligned Incentives}},
url = {http://dx.doi.org/10.1145/3051457.3054013},
year = 2017
}