Deep neural networks excel at function approximation, yet they are typically
trained from scratch for each new function. On the other hand, Bayesian
methods, such as Gaussian Processes (GPs), exploit prior knowledge to quickly
infer the shape of a new function at test time. Yet GPs are computationally
expensive, and it can be hard to design appropriate priors. In this paper we
propose a family of neural models, Conditional Neural Processes (CNPs), that
combine the benefits of both. CNPs are inspired by the flexibility of
stochastic processes such as GPs, but are structured as neural networks and
trained via gradient descent. CNPs make accurate predictions after observing
only a handful of training data points, yet scale to complex functions and
large datasets. We demonstrate the performance and versatility of the approach
on a range of canonical machine learning tasks, including regression,
classification and image completion.
%0 Generic
%1 garnelo2018conditional
%A Garnelo, Marta
%A Rosenbaum, Dan
%A Maddison, Chris J.
%A Ramalho, Tiago
%A Saxton, David
%A Shanahan, Murray
%A Teh, Yee Whye
%A Rezende, Danilo J.
%A Eslami, S. M. Ali
%D 2018
%K conditional deeplearning
%T Conditional Neural Processes
%U http://arxiv.org/abs/1807.01613
%X Deep neural networks excel at function approximation, yet they are typically
trained from scratch for each new function. On the other hand, Bayesian
methods, such as Gaussian Processes (GPs), exploit prior knowledge to quickly
infer the shape of a new function at test time. Yet GPs are computationally
expensive, and it can be hard to design appropriate priors. In this paper we
propose a family of neural models, Conditional Neural Processes (CNPs), that
combine the benefits of both. CNPs are inspired by the flexibility of
stochastic processes such as GPs, but are structured as neural networks and
trained via gradient descent. CNPs make accurate predictions after observing
only a handful of training data points, yet scale to complex functions and
large datasets. We demonstrate the performance and versatility of the approach
on a range of canonical machine learning tasks, including regression,
classification and image completion.
@misc{garnelo2018conditional,
abstract = {Deep neural networks excel at function approximation, yet they are typically
trained from scratch for each new function. On the other hand, Bayesian
methods, such as Gaussian Processes (GPs), exploit prior knowledge to quickly
infer the shape of a new function at test time. Yet GPs are computationally
expensive, and it can be hard to design appropriate priors. In this paper we
propose a family of neural models, Conditional Neural Processes (CNPs), that
combine the benefits of both. CNPs are inspired by the flexibility of
stochastic processes such as GPs, but are structured as neural networks and
trained via gradient descent. CNPs make accurate predictions after observing
only a handful of training data points, yet scale to complex functions and
large datasets. We demonstrate the performance and versatility of the approach
on a range of canonical machine learning tasks, including regression,
classification and image completion.},
added-at = {2021-06-26T11:07:36.000+0200},
author = {Garnelo, Marta and Rosenbaum, Dan and Maddison, Chris J. and Ramalho, Tiago and Saxton, David and Shanahan, Murray and Teh, Yee Whye and Rezende, Danilo J. and Eslami, S. M. Ali},
biburl = {https://www.bibsonomy.org/bibtex/27dd465868124858dcf37cf652ec6656b/shuncheng.wu},
description = {[1807.01613] Conditional Neural Processes},
interhash = {9070d8542749c53f151cc0537e365b6b},
intrahash = {7dd465868124858dcf37cf652ec6656b},
keywords = {conditional deeplearning},
note = {cite arxiv:1807.01613},
timestamp = {2021-06-26T11:07:36.000+0200},
title = {Conditional Neural Processes},
url = {http://arxiv.org/abs/1807.01613},
year = 2018
}