Supervised learning approaches to text classification are in practice often required to work with small and unsystematically collected training sets. The alternative to supervised learning is usually viewed to be building classifiers by hand, using a domain expert's understanding of which features of the text are related to the class of interest. This is expensive, requires a degree of sophistication about linguistics and classification, and makes it difficult to use combinations of weak predictors. We propose instead combining domain knowledge with training examples in a Bayesian framework. Domain knowledge is used to specify a prior distribution for the parameters of a logistic regression model, and labeled training data is used to produce a posterior distribution, whose mode we take as the final classifier. We show on three text categorization data sets that this approach can rescue what would otherwise be disastrously bad training situations, producing much more effective classifiers.
%0 Conference Paper
%1 conf/sigir/DayanikLMMG06
%A Dayanik, Aynur A.
%A Lewis, David D.
%A Madigan, David
%A Menkov, Vladimir
%A Genkin, Alexander
%B SIGIR
%D 2006
%E Efthimiadis, Efthimis N.
%E Dumais, Susan T.
%E Hawking, David
%E Järvelin, Kalervo
%I ACM
%K classification priors
%P 493-500
%T Constructing informative prior distributions from domain knowledge in text classification.
%U http://dblp.uni-trier.de/db/conf/sigir/sigir2006.html#DayanikLMMG06
%X Supervised learning approaches to text classification are in practice often required to work with small and unsystematically collected training sets. The alternative to supervised learning is usually viewed to be building classifiers by hand, using a domain expert's understanding of which features of the text are related to the class of interest. This is expensive, requires a degree of sophistication about linguistics and classification, and makes it difficult to use combinations of weak predictors. We propose instead combining domain knowledge with training examples in a Bayesian framework. Domain knowledge is used to specify a prior distribution for the parameters of a logistic regression model, and labeled training data is used to produce a posterior distribution, whose mode we take as the final classifier. We show on three text categorization data sets that this approach can rescue what would otherwise be disastrously bad training situations, producing much more effective classifiers.
%@ 1-59593-369-7
@inproceedings{conf/sigir/DayanikLMMG06,
abstract = { Supervised learning approaches to text classification are in practice often required to work with small and unsystematically collected training sets. The alternative to supervised learning is usually viewed to be building classifiers by hand, using a domain expert's understanding of which features of the text are related to the class of interest. This is expensive, requires a degree of sophistication about linguistics and classification, and makes it difficult to use combinations of weak predictors. We propose instead combining domain knowledge with training examples in a Bayesian framework. Domain knowledge is used to specify a prior distribution for the parameters of a logistic regression model, and labeled training data is used to produce a posterior distribution, whose mode we take as the final classifier. We show on three text categorization data sets that this approach can rescue what would otherwise be disastrously bad training situations, producing much more effective classifiers.},
added-at = {2007-05-30T18:43:30.000+0200},
author = {Dayanik, Aynur A. and Lewis, David D. and Madigan, David and Menkov, Vladimir and Genkin, Alexander},
biburl = {https://www.bibsonomy.org/bibtex/2635997d34b77c2dde18c229867257017/flawed},
booktitle = {SIGIR},
crossref = {conf/sigir/2006},
date = {2006-08-30},
editor = {Efthimiadis, Efthimis N. and Dumais, Susan T. and Hawking, David and Järvelin, Kalervo},
ee = {http://doi.acm.org/10.1145/1148170.1148255},
interhash = {ebf5d3c43767b002cb609672a781bc77},
intrahash = {635997d34b77c2dde18c229867257017},
isbn = {1-59593-369-7},
keywords = {classification priors},
pages = {493-500},
publisher = {ACM},
timestamp = {2007-05-30T18:43:30.000+0200},
title = {Constructing informative prior distributions from domain knowledge in text classification.},
url = {http://dblp.uni-trier.de/db/conf/sigir/sigir2006.html#DayanikLMMG06},
year = 2006
}