The framework of normalizing flows provides a general strategy for flexible
variational inference of posteriors over latent variables. We propose a new
type of normalizing flow, inverse autoregressive flow (IAF), that, in contrast
to earlier published flows, scales well to high-dimensional latent spaces. The
proposed flow consists of a chain of invertible transformations, where each
transformation is based on an autoregressive neural network. In experiments, we
show that IAF significantly improves upon diagonal Gaussian approximate
posteriors. In addition, we demonstrate that a novel type of variational
autoencoder, coupled with IAF, is competitive with neural autoregressive models
in terms of attained log-likelihood on natural images, while allowing
significantly faster synthesis.
Описание
[1606.04934] Improving Variational Inference with Inverse Autoregressive Flow
%0 Journal Article
%1 kingma2016improving
%A Kingma, Diederik P.
%A Salimans, Tim
%A Jozefowicz, Rafal
%A Chen, Xi
%A Sutskever, Ilya
%A Welling, Max
%D 2016
%K flows generative-models
%T Improving Variational Inference with Inverse Autoregressive Flow
%U http://arxiv.org/abs/1606.04934
%X The framework of normalizing flows provides a general strategy for flexible
variational inference of posteriors over latent variables. We propose a new
type of normalizing flow, inverse autoregressive flow (IAF), that, in contrast
to earlier published flows, scales well to high-dimensional latent spaces. The
proposed flow consists of a chain of invertible transformations, where each
transformation is based on an autoregressive neural network. In experiments, we
show that IAF significantly improves upon diagonal Gaussian approximate
posteriors. In addition, we demonstrate that a novel type of variational
autoencoder, coupled with IAF, is competitive with neural autoregressive models
in terms of attained log-likelihood on natural images, while allowing
significantly faster synthesis.
@article{kingma2016improving,
abstract = {The framework of normalizing flows provides a general strategy for flexible
variational inference of posteriors over latent variables. We propose a new
type of normalizing flow, inverse autoregressive flow (IAF), that, in contrast
to earlier published flows, scales well to high-dimensional latent spaces. The
proposed flow consists of a chain of invertible transformations, where each
transformation is based on an autoregressive neural network. In experiments, we
show that IAF significantly improves upon diagonal Gaussian approximate
posteriors. In addition, we demonstrate that a novel type of variational
autoencoder, coupled with IAF, is competitive with neural autoregressive models
in terms of attained log-likelihood on natural images, while allowing
significantly faster synthesis.},
added-at = {2019-12-09T11:33:20.000+0100},
author = {Kingma, Diederik P. and Salimans, Tim and Jozefowicz, Rafal and Chen, Xi and Sutskever, Ilya and Welling, Max},
biburl = {https://www.bibsonomy.org/bibtex/2f36e2c09a7c9a499f4b1996422dc5b20/kirk86},
description = {[1606.04934] Improving Variational Inference with Inverse Autoregressive Flow},
interhash = {a3f1f542018c28b73633ea1fb813fbb0},
intrahash = {f36e2c09a7c9a499f4b1996422dc5b20},
keywords = {flows generative-models},
note = {cite arxiv:1606.04934},
timestamp = {2019-12-09T11:33:20.000+0100},
title = {Improving Variational Inference with Inverse Autoregressive Flow},
url = {http://arxiv.org/abs/1606.04934},
year = 2016
}