Glow: Generative Flow with Invertible 1x1 Convolutions
D. Kingma, and P. Dhariwal. (2018)cite arxiv:1807.03039Comment: 15 pages; fixed typo in abstract.
Abstract
Flow-based generative models (Dinh et al., 2014) are conceptually attractive
due to tractability of the exact log-likelihood, tractability of exact
latent-variable inference, and parallelizability of both training and
synthesis. In this paper we propose Glow, a simple type of generative flow
using an invertible 1x1 convolution. Using our method we demonstrate a
significant improvement in log-likelihood on standard benchmarks. Perhaps most
strikingly, we demonstrate that a generative model optimized towards the plain
log-likelihood objective is capable of efficient realistic-looking synthesis
and manipulation of large images. The code for our model is available at
https://github.com/openai/glow
Description
[1807.03039] Glow: Generative Flow with Invertible 1x1 Convolutions
%0 Journal Article
%1 kingma2018generative
%A Kingma, Diederik P.
%A Dhariwal, Prafulla
%D 2018
%K flows generative-models
%T Glow: Generative Flow with Invertible 1x1 Convolutions
%U http://arxiv.org/abs/1807.03039
%X Flow-based generative models (Dinh et al., 2014) are conceptually attractive
due to tractability of the exact log-likelihood, tractability of exact
latent-variable inference, and parallelizability of both training and
synthesis. In this paper we propose Glow, a simple type of generative flow
using an invertible 1x1 convolution. Using our method we demonstrate a
significant improvement in log-likelihood on standard benchmarks. Perhaps most
strikingly, we demonstrate that a generative model optimized towards the plain
log-likelihood objective is capable of efficient realistic-looking synthesis
and manipulation of large images. The code for our model is available at
https://github.com/openai/glow
@article{kingma2018generative,
abstract = {Flow-based generative models (Dinh et al., 2014) are conceptually attractive
due to tractability of the exact log-likelihood, tractability of exact
latent-variable inference, and parallelizability of both training and
synthesis. In this paper we propose Glow, a simple type of generative flow
using an invertible 1x1 convolution. Using our method we demonstrate a
significant improvement in log-likelihood on standard benchmarks. Perhaps most
strikingly, we demonstrate that a generative model optimized towards the plain
log-likelihood objective is capable of efficient realistic-looking synthesis
and manipulation of large images. The code for our model is available at
https://github.com/openai/glow},
added-at = {2019-12-09T11:32:02.000+0100},
author = {Kingma, Diederik P. and Dhariwal, Prafulla},
biburl = {https://www.bibsonomy.org/bibtex/267b6df05ed48ddb6dc9fb6fc3247bc84/kirk86},
description = {[1807.03039] Glow: Generative Flow with Invertible 1x1 Convolutions},
interhash = {5c6e8244f0febbc1c7d1906c844d95e8},
intrahash = {67b6df05ed48ddb6dc9fb6fc3247bc84},
keywords = {flows generative-models},
note = {cite arxiv:1807.03039Comment: 15 pages; fixed typo in abstract},
timestamp = {2019-12-09T11:32:02.000+0100},
title = {Glow: Generative Flow with Invertible 1x1 Convolutions},
url = {http://arxiv.org/abs/1807.03039},
year = 2018
}