Compression is at the heart of effective representation learning. However,
lossy compression is typically achieved through simple parametric models like
Gaussian noise to preserve analytic tractability, and the limitations this
imposes on learning are largely unexplored. Further, the Gaussian prior
assumptions in models such as variational autoencoders (VAEs) provide only an
upper bound on the compression rate in general. We introduce a new noise
channel, Echo noise, that admits a simple, exact expression for mutual
information for arbitrary input distributions. The noise is constructed in a
data-driven fashion that does not require restrictive distributional
assumptions. With its complex encoding mechanism and exact rate regularization,
Echo leads to improved bounds on log-likelihood and dominates $\beta$-VAEs
across the achievable range of rate-distortion trade-offs. Further, we show
that Echo noise can outperform state-of-the-art flow methods without the need
to train complex distributional transformations
Description
[1904.07199] Exact Rate-Distortion in Autoencoders via Echo Noise
%0 Journal Article
%1 brekelmans2019exact
%A Brekelmans, Rob
%A Moyer, Daniel
%A Galstyan, Aram
%A Steeg, Greg Ver
%D 2019
%K compression mutual-information
%T Exact Rate-Distortion in Autoencoders via Echo Noise
%U http://arxiv.org/abs/1904.07199
%X Compression is at the heart of effective representation learning. However,
lossy compression is typically achieved through simple parametric models like
Gaussian noise to preserve analytic tractability, and the limitations this
imposes on learning are largely unexplored. Further, the Gaussian prior
assumptions in models such as variational autoencoders (VAEs) provide only an
upper bound on the compression rate in general. We introduce a new noise
channel, Echo noise, that admits a simple, exact expression for mutual
information for arbitrary input distributions. The noise is constructed in a
data-driven fashion that does not require restrictive distributional
assumptions. With its complex encoding mechanism and exact rate regularization,
Echo leads to improved bounds on log-likelihood and dominates $\beta$-VAEs
across the achievable range of rate-distortion trade-offs. Further, we show
that Echo noise can outperform state-of-the-art flow methods without the need
to train complex distributional transformations
@article{brekelmans2019exact,
abstract = {Compression is at the heart of effective representation learning. However,
lossy compression is typically achieved through simple parametric models like
Gaussian noise to preserve analytic tractability, and the limitations this
imposes on learning are largely unexplored. Further, the Gaussian prior
assumptions in models such as variational autoencoders (VAEs) provide only an
upper bound on the compression rate in general. We introduce a new noise
channel, Echo noise, that admits a simple, exact expression for mutual
information for arbitrary input distributions. The noise is constructed in a
data-driven fashion that does not require restrictive distributional
assumptions. With its complex encoding mechanism and exact rate regularization,
Echo leads to improved bounds on log-likelihood and dominates $\beta$-VAEs
across the achievable range of rate-distortion trade-offs. Further, we show
that Echo noise can outperform state-of-the-art flow methods without the need
to train complex distributional transformations},
added-at = {2019-09-04T16:10:55.000+0200},
author = {Brekelmans, Rob and Moyer, Daniel and Galstyan, Aram and Steeg, Greg Ver},
biburl = {https://www.bibsonomy.org/bibtex/2f91dc965ccba42a28c3846de81449b54/kirk86},
description = {[1904.07199] Exact Rate-Distortion in Autoencoders via Echo Noise},
interhash = {a1d9dab78173ccfd17ca2583bac3afb9},
intrahash = {f91dc965ccba42a28c3846de81449b54},
keywords = {compression mutual-information},
note = {cite arxiv:1904.07199},
timestamp = {2019-09-04T16:10:55.000+0200},
title = {Exact Rate-Distortion in Autoencoders via Echo Noise},
url = {http://arxiv.org/abs/1904.07199},
year = 2019
}