Dropout has been one of standard approaches to train deep neural networks,
and it is known to regularize large models to avoid overfitting. The effect of
dropout has been explained by avoiding co-adaptation. In this paper, however,
we propose a new explanation of why dropout works and propose a new technique
to design better activation functions. First, we show that dropout is an
optimization technique to push the input towards the saturation area of
nonlinear activation function by accelerating gradient information flowing even
in the saturation area in backpropagation. Based on this explanation, we
propose a new technique for activation functions, gradient acceleration in
activation function (GAAF), that accelerates gradients to flow even in the
saturation area. Then, input to the activation function can climb onto the
saturation area which makes the network more robust because the model converges
on a flat region. Experiment results support our explanation of dropout and
confirm that the proposed GAAF technique improves performances with expected
properties.
%0 Generic
%1 citeulike:14620279
%A xxx,
%D 2018
%K arch regularization
%T Gradient Acceleration in Activation Functions
%U http://arxiv.org/abs/1806.09783
%X Dropout has been one of standard approaches to train deep neural networks,
and it is known to regularize large models to avoid overfitting. The effect of
dropout has been explained by avoiding co-adaptation. In this paper, however,
we propose a new explanation of why dropout works and propose a new technique
to design better activation functions. First, we show that dropout is an
optimization technique to push the input towards the saturation area of
nonlinear activation function by accelerating gradient information flowing even
in the saturation area in backpropagation. Based on this explanation, we
propose a new technique for activation functions, gradient acceleration in
activation function (GAAF), that accelerates gradients to flow even in the
saturation area. Then, input to the activation function can climb onto the
saturation area which makes the network more robust because the model converges
on a flat region. Experiment results support our explanation of dropout and
confirm that the proposed GAAF technique improves performances with expected
properties.
@misc{citeulike:14620279,
abstract = {{Dropout has been one of standard approaches to train deep neural networks,
and it is known to regularize large models to avoid overfitting. The effect of
dropout has been explained by avoiding co-adaptation. In this paper, however,
we propose a new explanation of why dropout works and propose a new technique
to design better activation functions. First, we show that dropout is an
optimization technique to push the input towards the saturation area of
nonlinear activation function by accelerating gradient information flowing even
in the saturation area in backpropagation. Based on this explanation, we
propose a new technique for activation functions, gradient acceleration in
activation function (GAAF), that accelerates gradients to flow even in the
saturation area. Then, input to the activation function can climb onto the
saturation area which makes the network more robust because the model converges
on a flat region. Experiment results support our explanation of dropout and
confirm that the proposed GAAF technique improves performances with expected
properties.}},
added-at = {2019-02-27T22:23:29.000+0100},
archiveprefix = {arXiv},
author = {xxx},
biburl = {https://www.bibsonomy.org/bibtex/2a34f98cbba9ad7bc9bdbd9338fcdb4d5/nmatsuk},
citeulike-article-id = {14620279},
citeulike-linkout-0 = {http://arxiv.org/abs/1806.09783},
citeulike-linkout-1 = {http://arxiv.org/pdf/1806.09783},
day = 26,
eprint = {1806.09783},
interhash = {b3be6f7f95482bedaaaba5cd09d9f45a},
intrahash = {a34f98cbba9ad7bc9bdbd9338fcdb4d5},
keywords = {arch regularization},
month = jun,
posted-at = {2018-07-31 19:01:50},
priority = {1},
timestamp = {2019-02-27T22:23:29.000+0100},
title = {{Gradient Acceleration in Activation Functions}},
url = {http://arxiv.org/abs/1806.09783},
year = 2018
}