Human conceptual knowledge supports the ability to generate novel yet highly
structured concepts, and the form of this conceptual knowledge is of great
interest to cognitive scientists. One tradition has emphasized structured
knowledge, viewing concepts as embedded in intuitive theories or organized in
complex symbolic knowledge structures. A second tradition has emphasized
statistical knowledge, viewing conceptual knowledge as an emerging from the
rich correlational structure captured by training neural networks and other
statistical models. In this paper, we explore a synthesis of these two
traditions through a novel neuro-symbolic model for generating new concepts.
Using simple visual concepts as a testbed, we bring together neural networks
and symbolic probabilistic programs to learn a generative model of novel
handwritten characters. Two alternative models are explored with more generic
neural network architectures. We compare each of these three models for their
likelihoods on held-out character classes and for the quality of their
productions, finding that our hybrid model learns the most convincing
representation and generalizes further from the training observations.
Description
[2003.08978] Generating new concepts with hybrid neuro-symbolic models
%0 Generic
%1 feinman2020generating
%A Feinman, Reuben
%A Lake, Brenden M.
%D 2020
%K generative machinelearning statistics
%T Generating new concepts with hybrid neuro-symbolic models
%U http://arxiv.org/abs/2003.08978
%X Human conceptual knowledge supports the ability to generate novel yet highly
structured concepts, and the form of this conceptual knowledge is of great
interest to cognitive scientists. One tradition has emphasized structured
knowledge, viewing concepts as embedded in intuitive theories or organized in
complex symbolic knowledge structures. A second tradition has emphasized
statistical knowledge, viewing conceptual knowledge as an emerging from the
rich correlational structure captured by training neural networks and other
statistical models. In this paper, we explore a synthesis of these two
traditions through a novel neuro-symbolic model for generating new concepts.
Using simple visual concepts as a testbed, we bring together neural networks
and symbolic probabilistic programs to learn a generative model of novel
handwritten characters. Two alternative models are explored with more generic
neural network architectures. We compare each of these three models for their
likelihoods on held-out character classes and for the quality of their
productions, finding that our hybrid model learns the most convincing
representation and generalizes further from the training observations.
@misc{feinman2020generating,
abstract = {Human conceptual knowledge supports the ability to generate novel yet highly
structured concepts, and the form of this conceptual knowledge is of great
interest to cognitive scientists. One tradition has emphasized structured
knowledge, viewing concepts as embedded in intuitive theories or organized in
complex symbolic knowledge structures. A second tradition has emphasized
statistical knowledge, viewing conceptual knowledge as an emerging from the
rich correlational structure captured by training neural networks and other
statistical models. In this paper, we explore a synthesis of these two
traditions through a novel neuro-symbolic model for generating new concepts.
Using simple visual concepts as a testbed, we bring together neural networks
and symbolic probabilistic programs to learn a generative model of novel
handwritten characters. Two alternative models are explored with more generic
neural network architectures. We compare each of these three models for their
likelihoods on held-out character classes and for the quality of their
productions, finding that our hybrid model learns the most convincing
representation and generalizes further from the training observations.},
added-at = {2020-03-27T01:15:20.000+0100},
author = {Feinman, Reuben and Lake, Brenden M.},
biburl = {https://www.bibsonomy.org/bibtex/2218b99fe864a341c29f5c996c128a117/cpankow},
description = {[2003.08978] Generating new concepts with hybrid neuro-symbolic models},
interhash = {98ca20aafb43006fd4c67659e3614cc7},
intrahash = {218b99fe864a341c29f5c996c128a117},
keywords = {generative machinelearning statistics},
note = {cite arxiv:2003.08978},
timestamp = {2020-07-16T02:14:32.000+0200},
title = {Generating new concepts with hybrid neuro-symbolic models},
url = {http://arxiv.org/abs/2003.08978},
year = 2020
}