Training a neural network is synonymous with learning the values of the
weights. In contrast, we demonstrate that randomly weighted neural networks
contain subnetworks which achieve impressive performance without ever training
the weight values. Hidden in a randomly weighted Wide ResNet-50 we show that
there is a subnetwork (with random weights) that is smaller than, but matches
the performance of a ResNet-34 trained on ImageNet. Not only do these
üntrained subnetworks" exist, but we provide an algorithm to effectively find
them. We empirically show that as randomly weighted neural networks with fixed
weights grow wider and deeper, an üntrained subnetwork" approaches a network
with learned weights in accuracy.
Description
[1911.13299] What's Hidden in a Randomly Weighted Neural Network?
%0 Journal Article
%1 ramanujan2019whats
%A Ramanujan, Vivek
%A Wortsman, Mitchell
%A Kembhavi, Aniruddha
%A Farhadi, Ali
%A Rastegari, Mohammad
%D 2019
%K deep-learning generalization readings theory
%T What's Hidden in a Randomly Weighted Neural Network?
%U http://arxiv.org/abs/1911.13299
%X Training a neural network is synonymous with learning the values of the
weights. In contrast, we demonstrate that randomly weighted neural networks
contain subnetworks which achieve impressive performance without ever training
the weight values. Hidden in a randomly weighted Wide ResNet-50 we show that
there is a subnetwork (with random weights) that is smaller than, but matches
the performance of a ResNet-34 trained on ImageNet. Not only do these
üntrained subnetworks" exist, but we provide an algorithm to effectively find
them. We empirically show that as randomly weighted neural networks with fixed
weights grow wider and deeper, an üntrained subnetwork" approaches a network
with learned weights in accuracy.
@article{ramanujan2019whats,
abstract = {Training a neural network is synonymous with learning the values of the
weights. In contrast, we demonstrate that randomly weighted neural networks
contain subnetworks which achieve impressive performance without ever training
the weight values. Hidden in a randomly weighted Wide ResNet-50 we show that
there is a subnetwork (with random weights) that is smaller than, but matches
the performance of a ResNet-34 trained on ImageNet. Not only do these
"untrained subnetworks" exist, but we provide an algorithm to effectively find
them. We empirically show that as randomly weighted neural networks with fixed
weights grow wider and deeper, an "untrained subnetwork" approaches a network
with learned weights in accuracy.},
added-at = {2019-12-03T21:25:43.000+0100},
author = {Ramanujan, Vivek and Wortsman, Mitchell and Kembhavi, Aniruddha and Farhadi, Ali and Rastegari, Mohammad},
biburl = {https://www.bibsonomy.org/bibtex/2f0ca99849dc75c9b5d8f73f0540a8757/kirk86},
description = {[1911.13299] What's Hidden in a Randomly Weighted Neural Network?},
interhash = {598d920eee2746ce49f312ad3ee962e4},
intrahash = {f0ca99849dc75c9b5d8f73f0540a8757},
keywords = {deep-learning generalization readings theory},
note = {cite arxiv:1911.13299},
timestamp = {2019-12-03T21:25:43.000+0100},
title = {What's Hidden in a Randomly Weighted Neural Network?},
url = {http://arxiv.org/abs/1911.13299},
year = 2019
}