G. Dikov, and J. Bayer. Proceedings of Machine Learning Research, volume 89 of Proceedings of Machine Learning Research, page 730--738. PMLR, (16--18 Apr 2019)
Abstract
In this paper we propose a Bayesian method for estimating architectural parameters of neural networks, namely layer size and network depth. We do this by learning concrete distributions over these parameters. Our results show that regular networks with a learned structure can generalise better on small datasets, while fully stochastic networks can be more robust to parameter initialisation. The proposed method relies on standard neural variational learning and, unlike randomised architecture search, does not require a retraining of the model, thus keeping the computational overhead at minimum.
%0 Conference Paper
%1 pmlr-v89-dikov19a
%A Dikov, Georgi
%A Bayer, Justin
%B Proceedings of Machine Learning Research
%D 2019
%E Chaudhuri, Kamalika
%E Sugiyama, Masashi
%I PMLR
%K bayesian optimization
%P 730--738
%T Bayesian Learning of Neural Network Architectures
%U http://proceedings.mlr.press/v89/dikov19a.html
%V 89
%X In this paper we propose a Bayesian method for estimating architectural parameters of neural networks, namely layer size and network depth. We do this by learning concrete distributions over these parameters. Our results show that regular networks with a learned structure can generalise better on small datasets, while fully stochastic networks can be more robust to parameter initialisation. The proposed method relies on standard neural variational learning and, unlike randomised architecture search, does not require a retraining of the model, thus keeping the computational overhead at minimum.
@inproceedings{pmlr-v89-dikov19a,
abstract = {In this paper we propose a Bayesian method for estimating architectural parameters of neural networks, namely layer size and network depth. We do this by learning concrete distributions over these parameters. Our results show that regular networks with a learned structure can generalise better on small datasets, while fully stochastic networks can be more robust to parameter initialisation. The proposed method relies on standard neural variational learning and, unlike randomised architecture search, does not require a retraining of the model, thus keeping the computational overhead at minimum.},
added-at = {2019-12-11T13:51:02.000+0100},
author = {Dikov, Georgi and Bayer, Justin},
biburl = {https://www.bibsonomy.org/bibtex/289857823fc7d7e7a117545a59202fa53/kirk86},
booktitle = {Proceedings of Machine Learning Research},
description = {Bayesian Learning of Neural Network Architectures},
editor = {Chaudhuri, Kamalika and Sugiyama, Masashi},
interhash = {2e5e979f1fb36938778ce96592425cfa},
intrahash = {89857823fc7d7e7a117545a59202fa53},
keywords = {bayesian optimization},
month = {16--18 Apr},
pages = {730--738},
pdf = {http://proceedings.mlr.press/v89/dikov19a/dikov19a.pdf},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
timestamp = {2019-12-11T13:51:02.000+0100},
title = {Bayesian Learning of Neural Network Architectures},
url = {http://proceedings.mlr.press/v89/dikov19a.html},
volume = 89,
year = 2019
}