We present a logarithmic-scale efficient convolutional neural network
architecture for edge devices, named WaveletNet. Our model is based on the
well-known depthwise convolution, and on two new layers, which we introduce in
this work: a wavelet convolution and a depthwise fast wavelet transform. By
breaking the symmetry in channel dimensions and applying a fast algorithm,
WaveletNet shrinks the complexity of convolutional blocks by an O(logD/D)
factor, where D is the number of channels. Experiments on CIFAR-10 and ImageNet
classification show superior and comparable performances of WaveletNet compared
to state-of-the-art models such as MobileNetV2.
%0 Generic
%1 citeulike:14677686
%A xxx,
%D 2018
%K arch backbone classification mobilenet wavelet
%T WaveletNet: Logarithmic Scale Efficient Convolutional Neural Networks for Edge Devices
%U http://arxiv.org/abs/1811.11644
%X We present a logarithmic-scale efficient convolutional neural network
architecture for edge devices, named WaveletNet. Our model is based on the
well-known depthwise convolution, and on two new layers, which we introduce in
this work: a wavelet convolution and a depthwise fast wavelet transform. By
breaking the symmetry in channel dimensions and applying a fast algorithm,
WaveletNet shrinks the complexity of convolutional blocks by an O(logD/D)
factor, where D is the number of channels. Experiments on CIFAR-10 and ImageNet
classification show superior and comparable performances of WaveletNet compared
to state-of-the-art models such as MobileNetV2.
@misc{citeulike:14677686,
abstract = {{ We present a logarithmic-scale efficient convolutional neural network
architecture for edge devices, named WaveletNet. Our model is based on the
well-known depthwise convolution, and on two new layers, which we introduce in
this work: a wavelet convolution and a depthwise fast wavelet transform. By
breaking the symmetry in channel dimensions and applying a fast algorithm,
WaveletNet shrinks the complexity of convolutional blocks by an O(logD/D)
factor, where D is the number of channels. Experiments on CIFAR-10 and ImageNet
classification show superior and comparable performances of WaveletNet compared
to state-of-the-art models such as MobileNetV2.}},
added-at = {2019-02-27T22:23:29.000+0100},
archiveprefix = {arXiv},
author = {xxx},
biburl = {https://www.bibsonomy.org/bibtex/2ee54f424430a30c74e835024495cf4db/nmatsuk},
citeulike-article-id = {14677686},
citeulike-linkout-0 = {http://arxiv.org/abs/1811.11644},
citeulike-linkout-1 = {http://arxiv.org/pdf/1811.11644},
day = 28,
eprint = {1811.11644},
interhash = {dd9a7f11f772c281e9a5ee29d6870834},
intrahash = {ee54f424430a30c74e835024495cf4db},
keywords = {arch backbone classification mobilenet wavelet},
month = nov,
posted-at = {2019-01-08 11:17:04},
priority = {4},
timestamp = {2019-02-27T22:23:29.000+0100},
title = {{WaveletNet: Logarithmic Scale Efficient Convolutional Neural Networks for Edge Devices}},
url = {http://arxiv.org/abs/1811.11644},
year = 2018
}