Random forests are a combination of tree predictors such that each tree depends on the values of a random vector sampled independently and with the same distribution for all trees in the forest. The generalization error for forests converges a.s. to a limit as the number of trees in the forest becomes large. The generalization error of a forest of tree classifiers depends on the strength of the individual trees in the forest and the correlation between them. Using a random selection of features to split each node yields error rates that compare favorably to
%0 Journal Article
%1 breiman2001random
%A Breiman, Leo
%D 2001
%I Kluwer Academic Publishers
%J Machine Learning
%K classification classifier dblp decision ensemble final forest forests imported kde learning machine ml mykopie origin random text-detection the_youtube_social_network thema:exploiting_place_features_in_link_prediction_on_location-based_social_networks trees uw_ss14_web2.0
%N 1
%P 5-32
%R 10.1023/A:1010933404324
%T Random Forests
%U http://dx.doi.org/10.1023/A%3A1010933404324
%V 45
%X Random forests are a combination of tree predictors such that each tree depends on the values of a random vector sampled independently and with the same distribution for all trees in the forest. The generalization error for forests converges a.s. to a limit as the number of trees in the forest becomes large. The generalization error of a forest of tree classifiers depends on the strength of the individual trees in the forest and the correlation between them. Using a random selection of features to split each node yields error rates that compare favorably to
@article{breiman2001random,
abstract = {Random forests are a combination of tree predictors such that each tree depends on the values of a random vector sampled independently and with the same distribution for all trees in the forest. The generalization error for forests converges a.s. to a limit as the number of trees in the forest becomes large. The generalization error of a forest of tree classifiers depends on the strength of the individual trees in the forest and the correlation between them. Using a random selection of features to split each node yields error rates that compare favorably to },
added-at = {2015-04-15T08:57:31.000+0200},
author = {Breiman, Leo},
biburl = {https://www.bibsonomy.org/bibtex/2b8187107bf870043f2f93669958858f1/kdepublication},
description = {Random Forests - Springer},
doi = {10.1023/A:1010933404324},
interhash = {4450d2e56555e7cb8f3817578e1dd4da},
intrahash = {b8187107bf870043f2f93669958858f1},
issn = {0885-6125},
journal = {Machine Learning},
keywords = {classification classifier dblp decision ensemble final forest forests imported kde learning machine ml mykopie origin random text-detection the_youtube_social_network thema:exploiting_place_features_in_link_prediction_on_location-based_social_networks trees uw_ss14_web2.0},
language = {English},
number = 1,
pages = {5-32},
publisher = {Kluwer Academic Publishers},
timestamp = {2015-04-24T14:37:24.000+0200},
title = {Random Forests},
url = {http://dx.doi.org/10.1023/A%3A1010933404324},
volume = 45,
year = 2001
}