The key limiting factor in graphical model inference and learning is the complexity of the partition function. We thus ask the question: what are the most general conditions under which the partition function is tractable? The answer leads to a new kind of deep architecture, which we call sum product networks (SPNs) and will present in this abstract. The key idea of SPNs is to compactly represent the partition function by introducing multiple layers of hidden variables. An SPN is a rooted directed acyclic graph with variables as leaves, sums and products as internal nodes, and weighted edges.
%0 Conference Paper
%1 poon_sum-product_2011
%A Poon, H.
%A Domingos, P.
%B 2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)
%D 2011
%K Backpropagation, Computational Computer Decision Graphical Junctions, acyclic architecture, directed edges function, graph, graphical graphs, hidden inference, internal learning, leaves, model modeling, models, networks, nodes, partition rooted sum-product trees, variables, weighted
%P 689--690
%R 10.1109/ICCVW.2011.6130310
%T Sum-product networks: A new deep architecture
%X The key limiting factor in graphical model inference and learning is the complexity of the partition function. We thus ask the question: what are the most general conditions under which the partition function is tractable? The answer leads to a new kind of deep architecture, which we call sum product networks (SPNs) and will present in this abstract. The key idea of SPNs is to compactly represent the partition function by introducing multiple layers of hidden variables. An SPN is a rooted directed acyclic graph with variables as leaves, sums and products as internal nodes, and weighted edges.
@inproceedings{poon_sum-product_2011,
abstract = {The key limiting factor in graphical model inference and learning is the complexity of the partition function. We thus ask the question: what are the most general conditions under which the partition function is tractable? The answer leads to a new kind of deep architecture, which we call sum product networks (SPNs) and will present in this abstract. The key idea of SPNs is to compactly represent the partition function by introducing multiple layers of hidden variables. An SPN is a rooted directed acyclic graph with variables as leaves, sums and products as internal nodes, and weighted edges.},
added-at = {2017-01-09T13:57:26.000+0100},
author = {Poon, H. and Domingos, P.},
biburl = {https://www.bibsonomy.org/bibtex/2f8ec97db8d3a8db5b495312414d5db42/yourwelcome},
booktitle = {2011 {IEEE} {International} {Conference} on {Computer} {Vision} {Workshops} ({ICCV} {Workshops})},
doi = {10.1109/ICCVW.2011.6130310},
interhash = {2eb460d20c3defe5213932b39213f9fd},
intrahash = {f8ec97db8d3a8db5b495312414d5db42},
keywords = {Backpropagation, Computational Computer Decision Graphical Junctions, acyclic architecture, directed edges function, graph, graphical graphs, hidden inference, internal learning, leaves, model modeling, models, networks, nodes, partition rooted sum-product trees, variables, weighted},
month = nov,
pages = {689--690},
shorttitle = {Sum-product networks},
timestamp = {2017-01-09T14:01:11.000+0100},
title = {Sum-product networks: {A} new deep architecture},
year = 2011
}