Theoretical results suggest that in order to learn the kind of complicated functions that can represent high-level abstractions (e.g., in vision, language, and other AI-level tasks), one may need deep architectures. Deep architectures are composed of multiple levels of non-linear operations, such as in neural nets with many hidden layers or in complicated propositional formulae re-using many sub-formulae. Searching the parameter space of deep architectures is a difficult task, but learning algorithms such as those for Deep Belief Networks have recently been proposed to tackle this problem with notable success, beating the state-of-the-art in certain areas. This monograph discusses the motivations and principles regarding learning algorithms for deep architectures, in particular those exploiting as building blocks unsupervised learning of single-layer models such as Restricted Boltzmann Machines, used to construct deeper models such as Deep Belief Networks.
%0 Journal Article
%1 Bengio09ftml
%A Bengio, Yoshua
%D 2009
%J Foundations and Trends in Machine Learning
%K 01801 paper numerical ai data pattern recognition analysis learn algorithm
%N 1
%P 1--127
%R 10.1561/2200000006
%T Learning Deep Architectures for AI
%V 2
%X Theoretical results suggest that in order to learn the kind of complicated functions that can represent high-level abstractions (e.g., in vision, language, and other AI-level tasks), one may need deep architectures. Deep architectures are composed of multiple levels of non-linear operations, such as in neural nets with many hidden layers or in complicated propositional formulae re-using many sub-formulae. Searching the parameter space of deep architectures is a difficult task, but learning algorithms such as those for Deep Belief Networks have recently been proposed to tackle this problem with notable success, beating the state-of-the-art in certain areas. This monograph discusses the motivations and principles regarding learning algorithms for deep architectures, in particular those exploiting as building blocks unsupervised learning of single-layer models such as Restricted Boltzmann Machines, used to construct deeper models such as Deep Belief Networks.
@article{Bengio09ftml,
abstract = {Theoretical results suggest that in order to learn the kind of complicated functions that can represent high-level abstractions (e.g., in vision, language, and other AI-level tasks), one may need deep architectures. Deep architectures are composed of multiple levels of non-linear operations, such as in neural nets with many hidden layers or in complicated propositional formulae re-using many sub-formulae. Searching the parameter space of deep architectures is a difficult task, but learning algorithms such as those for Deep Belief Networks have recently been proposed to tackle this problem with notable success, beating the state-of-the-art in certain areas. This monograph discusses the motivations and principles regarding learning algorithms for deep architectures, in particular those exploiting as building blocks unsupervised learning of single-layer models such as Restricted Boltzmann Machines, used to construct deeper models such as Deep Belief Networks.},
added-at = {2018-02-10T18:29:43.000+0100},
author = {Bengio, Yoshua},
biburl = {https://www.bibsonomy.org/bibtex/24ac7fbfc4e40b606733ec3d33b6c9e26/flint63},
description = {Book ISBN 978-1-60198-294-0},
doi = {10.1561/2200000006},
file = {Journal Issue:2009/Bengio09.pdf:PDF},
groups = {public},
interhash = {30174ec5e2667a039cdc30c5d359dc47},
intrahash = {4ac7fbfc4e40b606733ec3d33b6c9e26},
issn = {1935-8237},
journal = {Foundations and Trends in Machine Learning},
keywords = {01801 paper numerical ai data pattern recognition analysis learn algorithm},
number = 1,
pages = {1--127},
timestamp = {2018-04-16T12:36:58.000+0200},
title = {Learning Deep Architectures for {AI}},
username = {flint63},
volume = 2,
year = 2009
}