To make decisions based on a model fit by Auto-Encoding Variational Bayes
(AEVB), practitioners typically use importance sampling to estimate a
functional of the posterior distribution. The variational distribution found by
AEVB serves as the proposal distribution for importance sampling. However, this
proposal distribution may give unreliable (high variance) importance sampling
estimates, thus leading to poor decisions. We explore how changing the
objective function for learning the variational distribution, while continuing
to learn the generative model based on the ELBO, affects the quality of
downstream decisions. For a particular model, we characterize the error of
importance sampling as a function of posterior variance and show that proposal
distributions learned with evidence upper bounds are better. Motivated by these
theoretical results, we propose a novel variant of the VAE. In addition to
experimenting with MNIST, we present a full-fledged application of the proposed
method to single-cell RNA sequencing. In this challenging instance of multiple
hypothesis testing, the proposed method surpasses the current state of the art.
Description
[2002.07217] Decision-Making with Auto-Encoding Variational Bayes
%0 Journal Article
%1 lopez2020decisionmaking
%A Lopez, Romain
%A Boyeau, Pierre
%A Yosef, Nir
%A Jordan, Michael I.
%A Regier, Jeffrey
%D 2020
%K bayesian variational
%T Decision-Making with Auto-Encoding Variational Bayes
%U http://arxiv.org/abs/2002.07217
%X To make decisions based on a model fit by Auto-Encoding Variational Bayes
(AEVB), practitioners typically use importance sampling to estimate a
functional of the posterior distribution. The variational distribution found by
AEVB serves as the proposal distribution for importance sampling. However, this
proposal distribution may give unreliable (high variance) importance sampling
estimates, thus leading to poor decisions. We explore how changing the
objective function for learning the variational distribution, while continuing
to learn the generative model based on the ELBO, affects the quality of
downstream decisions. For a particular model, we characterize the error of
importance sampling as a function of posterior variance and show that proposal
distributions learned with evidence upper bounds are better. Motivated by these
theoretical results, we propose a novel variant of the VAE. In addition to
experimenting with MNIST, we present a full-fledged application of the proposed
method to single-cell RNA sequencing. In this challenging instance of multiple
hypothesis testing, the proposed method surpasses the current state of the art.
@article{lopez2020decisionmaking,
abstract = {To make decisions based on a model fit by Auto-Encoding Variational Bayes
(AEVB), practitioners typically use importance sampling to estimate a
functional of the posterior distribution. The variational distribution found by
AEVB serves as the proposal distribution for importance sampling. However, this
proposal distribution may give unreliable (high variance) importance sampling
estimates, thus leading to poor decisions. We explore how changing the
objective function for learning the variational distribution, while continuing
to learn the generative model based on the ELBO, affects the quality of
downstream decisions. For a particular model, we characterize the error of
importance sampling as a function of posterior variance and show that proposal
distributions learned with evidence upper bounds are better. Motivated by these
theoretical results, we propose a novel variant of the VAE. In addition to
experimenting with MNIST, we present a full-fledged application of the proposed
method to single-cell RNA sequencing. In this challenging instance of multiple
hypothesis testing, the proposed method surpasses the current state of the art.},
added-at = {2020-02-20T16:09:57.000+0100},
author = {Lopez, Romain and Boyeau, Pierre and Yosef, Nir and Jordan, Michael I. and Regier, Jeffrey},
biburl = {https://www.bibsonomy.org/bibtex/2cf9b34ea874794ece3329e741cdbe3a5/kirk86},
description = {[2002.07217] Decision-Making with Auto-Encoding Variational Bayes},
interhash = {14fa05b501932d38b5efb12f6656fbc5},
intrahash = {cf9b34ea874794ece3329e741cdbe3a5},
keywords = {bayesian variational},
note = {cite arxiv:2002.07217},
timestamp = {2020-02-20T16:09:57.000+0100},
title = {Decision-Making with Auto-Encoding Variational Bayes},
url = {http://arxiv.org/abs/2002.07217},
year = 2020
}