The Indian buffet process is a stochastic process defining a probability distribution over equivalence classes of sparse binary matrices with a finite number of rows and an unbounded number of columns. This distribution is suitable for use as a prior in probabilistic models that represent objects using a potentially infinite array of features, or that involve bipartite graphs in which the size of at least one class of nodes is unknown. We give a detailed derivation of this distribution, and illustrate its use as a prior in an infinite latent feature model. We then review recent applications of the Indian buffet process in machine learning, discuss its extensions, and summarize its connections to other stochastic processes.
%0 Journal Article
%1 griffiths2011indian
%A Griffiths, Thomas L.
%A Ghahramani, Zoubin
%D 2011
%I JMLR.org
%J J. Mach. Learn. Res.
%K clustering hierarchical_Bayesian indian_buffet_process latent_variables review
%P 1185--1224
%T The Indian Buffet Process: An Introduction and Review
%U http://dl.acm.org/citation.cfm?id=1953048.2021039
%V 12
%X The Indian buffet process is a stochastic process defining a probability distribution over equivalence classes of sparse binary matrices with a finite number of rows and an unbounded number of columns. This distribution is suitable for use as a prior in probabilistic models that represent objects using a potentially infinite array of features, or that involve bipartite graphs in which the size of at least one class of nodes is unknown. We give a detailed derivation of this distribution, and illustrate its use as a prior in an infinite latent feature model. We then review recent applications of the Indian buffet process in machine learning, discuss its extensions, and summarize its connections to other stochastic processes.
@article{griffiths2011indian,
abstract = {The Indian buffet process is a stochastic process defining a probability distribution over equivalence classes of sparse binary matrices with a finite number of rows and an unbounded number of columns. This distribution is suitable for use as a prior in probabilistic models that represent objects using a potentially infinite array of features, or that involve bipartite graphs in which the size of at least one class of nodes is unknown. We give a detailed derivation of this distribution, and illustrate its use as a prior in an infinite latent feature model. We then review recent applications of the Indian buffet process in machine learning, discuss its extensions, and summarize its connections to other stochastic processes.},
acmid = {2021039},
added-at = {2014-03-16T11:30:32.000+0100},
author = {Griffiths, Thomas L. and Ghahramani, Zoubin},
biburl = {https://www.bibsonomy.org/bibtex/2fbe9fb77e65465ff2b42b99c71ec4f79/peter.ralph},
interhash = {8c81d1588b428746739983775b988b1c},
intrahash = {fbe9fb77e65465ff2b42b99c71ec4f79},
issn = {1532-4435},
issue_date = {2/1/2011},
journal = {J. Mach. Learn. Res.},
keywords = {clustering hierarchical_Bayesian indian_buffet_process latent_variables review},
month = jul,
numpages = {40},
pages = {1185--1224},
publisher = {JMLR.org},
timestamp = {2014-03-16T11:30:32.000+0100},
title = {The Indian Buffet Process: An Introduction and Review},
url = {http://dl.acm.org/citation.cfm?id=1953048.2021039},
volume = 12,
year = 2011
}