The deep image prior was recently introduced as a prior for natural images.
It represents images as the output of a convolutional network with random
inputs. For "inference", gradient descent is performed to adjust network
parameters to make the output match observations. This approach yields good
performance on a range of image reconstruction tasks. We show that the deep
image prior is asymptotically equivalent to a stationary Gaussian process prior
in the limit as the number of channels in each layer of the network goes to
infinity, and derive the corresponding kernel. This informs a Bayesian approach
to inference. We show that by conducting posterior inference using stochastic
gradient Langevin we avoid the need for early stopping, which is a drawback of
the current approach, and improve results for denoising and impainting tasks.
We illustrate these intuitions on a number of 1D and 2D signal reconstruction
tasks.
Description
[1904.07457v1] A Bayesian Perspective on the Deep Image Prior
%0 Journal Article
%1 cheng2019bayesian
%A Cheng, Zezhou
%A Gadelha, Matheus
%A Maji, Subhransu
%A Sheldon, Daniel
%D 2019
%K bayesian deep-learning
%T A Bayesian Perspective on the Deep Image Prior
%U http://arxiv.org/abs/1904.07457
%X The deep image prior was recently introduced as a prior for natural images.
It represents images as the output of a convolutional network with random
inputs. For "inference", gradient descent is performed to adjust network
parameters to make the output match observations. This approach yields good
performance on a range of image reconstruction tasks. We show that the deep
image prior is asymptotically equivalent to a stationary Gaussian process prior
in the limit as the number of channels in each layer of the network goes to
infinity, and derive the corresponding kernel. This informs a Bayesian approach
to inference. We show that by conducting posterior inference using stochastic
gradient Langevin we avoid the need for early stopping, which is a drawback of
the current approach, and improve results for denoising and impainting tasks.
We illustrate these intuitions on a number of 1D and 2D signal reconstruction
tasks.
@article{cheng2019bayesian,
abstract = {The deep image prior was recently introduced as a prior for natural images.
It represents images as the output of a convolutional network with random
inputs. For "inference", gradient descent is performed to adjust network
parameters to make the output match observations. This approach yields good
performance on a range of image reconstruction tasks. We show that the deep
image prior is asymptotically equivalent to a stationary Gaussian process prior
in the limit as the number of channels in each layer of the network goes to
infinity, and derive the corresponding kernel. This informs a Bayesian approach
to inference. We show that by conducting posterior inference using stochastic
gradient Langevin we avoid the need for early stopping, which is a drawback of
the current approach, and improve results for denoising and impainting tasks.
We illustrate these intuitions on a number of 1D and 2D signal reconstruction
tasks.},
added-at = {2020-01-20T00:56:13.000+0100},
author = {Cheng, Zezhou and Gadelha, Matheus and Maji, Subhransu and Sheldon, Daniel},
biburl = {https://www.bibsonomy.org/bibtex/26737a7007ecf57d1f5981b90e9c9a17b/kirk86},
description = {[1904.07457v1] A Bayesian Perspective on the Deep Image Prior},
interhash = {c90ea985bae13ad2f4cffb335b4ab3c2},
intrahash = {6737a7007ecf57d1f5981b90e9c9a17b},
keywords = {bayesian deep-learning},
note = {cite arxiv:1904.07457Comment: CVPR 2019},
timestamp = {2020-01-20T00:56:13.000+0100},
title = {A Bayesian Perspective on the Deep Image Prior},
url = {http://arxiv.org/abs/1904.07457},
year = 2019
}