We empirically show that Bayesian inference can be inconsistent under
misspecification in simple linear regression problems, both in a model
averaging/selection and in a Bayesian ridge regression setting. We use the
standard linear model, which assumes homoskedasticity, whereas the data are
heteroskedastic, and observe that the posterior puts its mass on ever more
high-dimensional models as the sample size increases. To remedy the problem, we
equip the likelihood in Bayes' theorem with an exponent called the learning
rate, and we propose the Safe Bayesian method to learn the learning rate from
the data. SafeBayes tends to select small learning rates as soon the standard
posterior is not `cumulatively concentrated', and its results on our data are
quite encouraging.
Beschreibung
[1412.3730] Inconsistency of Bayesian Inference for Misspecified Linear Models, and a Proposal for Repairing It
%0 Journal Article
%1 grunwald2014inconsistency
%A Grünwald, Peter
%A van Ommen, Thijs
%D 2014
%K bayesian misspecification readings
%T Inconsistency of Bayesian Inference for Misspecified Linear Models, and
a Proposal for Repairing It
%U http://arxiv.org/abs/1412.3730
%X We empirically show that Bayesian inference can be inconsistent under
misspecification in simple linear regression problems, both in a model
averaging/selection and in a Bayesian ridge regression setting. We use the
standard linear model, which assumes homoskedasticity, whereas the data are
heteroskedastic, and observe that the posterior puts its mass on ever more
high-dimensional models as the sample size increases. To remedy the problem, we
equip the likelihood in Bayes' theorem with an exponent called the learning
rate, and we propose the Safe Bayesian method to learn the learning rate from
the data. SafeBayes tends to select small learning rates as soon the standard
posterior is not `cumulatively concentrated', and its results on our data are
quite encouraging.
@article{grunwald2014inconsistency,
abstract = {We empirically show that Bayesian inference can be inconsistent under
misspecification in simple linear regression problems, both in a model
averaging/selection and in a Bayesian ridge regression setting. We use the
standard linear model, which assumes homoskedasticity, whereas the data are
heteroskedastic, and observe that the posterior puts its mass on ever more
high-dimensional models as the sample size increases. To remedy the problem, we
equip the likelihood in Bayes' theorem with an exponent called the learning
rate, and we propose the Safe Bayesian method to learn the learning rate from
the data. SafeBayes tends to select small learning rates as soon the standard
posterior is not `cumulatively concentrated', and its results on our data are
quite encouraging.},
added-at = {2020-02-07T21:05:09.000+0100},
author = {Grünwald, Peter and van Ommen, Thijs},
biburl = {https://www.bibsonomy.org/bibtex/27f8e8371fcd451d5ce5dea3912b9a156/kirk86},
description = {[1412.3730] Inconsistency of Bayesian Inference for Misspecified Linear Models, and a Proposal for Repairing It},
interhash = {5a7c5103819e06feeba46d17e581fad7},
intrahash = {7f8e8371fcd451d5ce5dea3912b9a156},
keywords = {bayesian misspecification readings},
note = {cite arxiv:1412.3730Comment: 70 pages, 20 figures},
timestamp = {2020-02-07T21:05:09.000+0100},
title = {Inconsistency of Bayesian Inference for Misspecified Linear Models, and
a Proposal for Repairing It},
url = {http://arxiv.org/abs/1412.3730},
year = 2014
}