D. Klein, and C. Manning. Annual Meeting of the Association for Computational Linguistics, 41, page 423-430. (2003)
Abstract
We demonstrate that an unlexicalized PCFG can parse much more accurately than previously shown, by making use of simple, linguistically motivated state splits, which break down false independence assumptions latent in a vanilla treebank grammar. Indeed, its performance of 86.36% (LP/LR F1 ) is better than that of early lexicalized PCFG models, and surprisingly close to the current state-of-the-art. This result has potential uses beyond establishing a strong lower bound on the maximum possible accuracy of unlexicalized models: an unlexicalized PCFG is much more compact, easier to replicate, and easier to interpret than more complex lexical models, and the parsing algorithms are simpler, more widely understood, of lower asymptotic complexity, and easier to optimize.
%0 Conference Paper
%1 klein03accurate
%A Klein, Dan
%A Manning, Christopher D.
%B Annual Meeting of the Association for Computational Linguistics
%D 2003
%K NT2OD nlp parser stanford
%P 423-430
%T Accurate Unlexicalized Parsing
%U http://nlp.stanford.edu/~manning/papers/unlexicalized-parsing.pdf
%V 41
%X We demonstrate that an unlexicalized PCFG can parse much more accurately than previously shown, by making use of simple, linguistically motivated state splits, which break down false independence assumptions latent in a vanilla treebank grammar. Indeed, its performance of 86.36% (LP/LR F1 ) is better than that of early lexicalized PCFG models, and surprisingly close to the current state-of-the-art. This result has potential uses beyond establishing a strong lower bound on the maximum possible accuracy of unlexicalized models: an unlexicalized PCFG is much more compact, easier to replicate, and easier to interpret than more complex lexical models, and the parsing algorithms are simpler, more widely understood, of lower asymptotic complexity, and easier to optimize.
@inproceedings{klein03accurate,
abstract = {We demonstrate that an unlexicalized PCFG can parse much more accurately than previously shown, by making use of simple, linguistically motivated state splits, which break down false independence assumptions latent in a vanilla treebank grammar. Indeed, its performance of 86.36% (LP/LR F1 ) is better than that of early lexicalized PCFG models, and surprisingly close to the current state-of-the-art. This result has potential uses beyond establishing a strong lower bound on the maximum possible accuracy of unlexicalized models: an unlexicalized PCFG is much more compact, easier to replicate, and easier to interpret than more complex lexical models, and the parsing algorithms are simpler, more widely understood, of lower asymptotic complexity, and easier to optimize.},
added-at = {2007-05-01T18:55:53.000+0200},
author = {Klein, Dan and Manning, Christopher D.},
biburl = {https://www.bibsonomy.org/bibtex/2e0ba44454eec3f5bc7fd3b1d3555e9d9/butonic},
booktitle = {Annual Meeting of the Association for Computational Linguistics},
interhash = {8646581febfbe19fd420c5523ad0b597},
intrahash = {e0ba44454eec3f5bc7fd3b1d3555e9d9},
keywords = {NT2OD nlp parser stanford},
pages = {423-430},
school = {The Stanford Natural Language Processing Group},
timestamp = {2007-05-01T18:55:53.000+0200},
title = {Accurate Unlexicalized Parsing},
url = {http://nlp.stanford.edu/~manning/papers/unlexicalized-parsing.pdf},
volume = 41,
year = 2003
}