We introduce a three-phase, nine-step methodology for specification of clinical guidelines (GLs) by expert physicians, clinical editors, and knowledge engineers and for quantitative evaluation of the specification’s quality. We applied this methodology to a particular framework for incremental GL structuring (mark-up) and to GLs in three clinical domains. A gold-standard mark-up was created, including 196 plans and subplans, and 326 instances of ontological knowledge roles (KRs). A completeness measure of the acquired knowledge revealed that 97% of the plans and 91% of the KR instances of the GLs were recreated by the clinical editors. A correctness measure often revealed high variability within clinical editor pairs structuring each GL, but for all GLs and clinical editors the specification quality was significantly higher than random (p < 0.01). Procedural KRs were more difficult to mark-up than declarative KRs. We conclude that given an ontology-specific consensus, clinical editors with mark-up training can structure GL knowledge with high completeness, whereas the main demand for correct structuring is training in the ontology’s semantics.
Description
ScienceDirect.com - Journal of Biomedical Informatics - A quantitative assessment of a methodology for collaborative specification and evaluation of clinical guidelines
%0 Journal Article
%1 Shalom2008
%A Shalom, Erez
%A Shahar, Yuval
%A Taieb-Maimon, Meirav
%A Bar, Guy
%A Yarkoni, Avi
%A Young, Ohad
%A Martins, Susana B.
%A Vaszar, Laszlo
%A Goldstein, Mary K.
%A Liel, Yair
%A Leibowitz, Akiva
%A Marom, Tal
%A Lunenfeld, Eitan
%D 2008
%J Journal of Biomedical Informatics
%K collaboration evaluation guideline
%N 6
%P 889 - 903
%R 10.1016/j.jbi.2008.04.009
%T A quantitative assessment of a methodology for collaborative specification and evaluation of clinical guidelines
%U http://www.sciencedirect.com/science/article/pii/S1532046408000609
%V 41
%X We introduce a three-phase, nine-step methodology for specification of clinical guidelines (GLs) by expert physicians, clinical editors, and knowledge engineers and for quantitative evaluation of the specification’s quality. We applied this methodology to a particular framework for incremental GL structuring (mark-up) and to GLs in three clinical domains. A gold-standard mark-up was created, including 196 plans and subplans, and 326 instances of ontological knowledge roles (KRs). A completeness measure of the acquired knowledge revealed that 97% of the plans and 91% of the KR instances of the GLs were recreated by the clinical editors. A correctness measure often revealed high variability within clinical editor pairs structuring each GL, but for all GLs and clinical editors the specification quality was significantly higher than random (p < 0.01). Procedural KRs were more difficult to mark-up than declarative KRs. We conclude that given an ontology-specific consensus, clinical editors with mark-up training can structure GL knowledge with high completeness, whereas the main demand for correct structuring is training in the ontology’s semantics.
@article{Shalom2008,
abstract = {We introduce a three-phase, nine-step methodology for specification of clinical guidelines (GLs) by expert physicians, clinical editors, and knowledge engineers and for quantitative evaluation of the specification’s quality. We applied this methodology to a particular framework for incremental GL structuring (mark-up) and to GLs in three clinical domains. A gold-standard mark-up was created, including 196 plans and subplans, and 326 instances of ontological knowledge roles (KRs). A completeness measure of the acquired knowledge revealed that 97% of the plans and 91% of the KR instances of the GLs were recreated by the clinical editors. A correctness measure often revealed high variability within clinical editor pairs structuring each GL, but for all GLs and clinical editors the specification quality was significantly higher than random (p < 0.01). Procedural KRs were more difficult to mark-up than declarative KRs. We conclude that given an ontology-specific consensus, clinical editors with mark-up training can structure GL knowledge with high completeness, whereas the main demand for correct structuring is training in the ontology’s semantics.},
added-at = {2012-11-28T17:47:09.000+0100},
author = {Shalom, Erez and Shahar, Yuval and Taieb-Maimon, Meirav and Bar, Guy and Yarkoni, Avi and Young, Ohad and Martins, Susana B. and Vaszar, Laszlo and Goldstein, Mary K. and Liel, Yair and Leibowitz, Akiva and Marom, Tal and Lunenfeld, Eitan},
biburl = {https://www.bibsonomy.org/bibtex/24826dca9737140632144fd97075d4bed/rhatko},
description = {ScienceDirect.com - Journal of Biomedical Informatics - A quantitative assessment of a methodology for collaborative specification and evaluation of clinical guidelines},
doi = {10.1016/j.jbi.2008.04.009},
groups = {public},
interhash = {1de62b7d866960e9a67ebed8a9ff32fa},
intrahash = {4826dca9737140632144fd97075d4bed},
issn = {1532-0464},
journal = {Journal of Biomedical Informatics},
keywords = {collaboration evaluation guideline},
number = 6,
pages = {889 - 903},
timestamp = {2012-12-06T10:42:18.000+0100},
title = {A quantitative assessment of a methodology for collaborative specification and evaluation of clinical guidelines},
url = {http://www.sciencedirect.com/science/article/pii/S1532046408000609},
username = {rhatko},
volume = 41,
year = 2008
}