Very deep CNNs with small 3x3 kernels have recently been shown to achieve
very strong performance as acoustic models in hybrid NN-HMM speech recognition
systems. In this paper we investigate how to efficiently scale these models to
larger datasets. Specifically, we address the design choice of pooling and
padding along the time dimension which renders convolutional evaluation of
sequences highly inefficient. We propose a new CNN design without timepadding
and without timepooling, which is slightly suboptimal for accuracy, but has two
significant advantages: it enables sequence training and deployment by allowing
efficient convolutional evaluation of full utterances, and, it allows for batch
normalization to be straightforwardly adopted to CNNs on sequence data. Through
batch normalization, we recover the lost peformance from removing the
time-pooling, while keeping the benefit of efficient convolutional evaluation.
We demonstrate the performance of our models both on larger scale data than
before, and after sequence training. Our very deep CNN model sequence trained
on the 2000h switchboard dataset obtains 9.4 word error rate on the Hub5
test-set, matching with a single model the performance of the 2015 IBM system
combination, which was the previous best published result.
%0 Generic
%1 sercu2016advances
%A Sercu, Tom
%A Goel, Vaibhava
%D 2016
%K report
%R 10.21437/Interspeech.2016-1033
%T Advances in Very Deep Convolutional Neural Networks for LVCSR
%U http://dx.doi.org/10.21437/Interspeech.2016-1033
%X Very deep CNNs with small 3x3 kernels have recently been shown to achieve
very strong performance as acoustic models in hybrid NN-HMM speech recognition
systems. In this paper we investigate how to efficiently scale these models to
larger datasets. Specifically, we address the design choice of pooling and
padding along the time dimension which renders convolutional evaluation of
sequences highly inefficient. We propose a new CNN design without timepadding
and without timepooling, which is slightly suboptimal for accuracy, but has two
significant advantages: it enables sequence training and deployment by allowing
efficient convolutional evaluation of full utterances, and, it allows for batch
normalization to be straightforwardly adopted to CNNs on sequence data. Through
batch normalization, we recover the lost peformance from removing the
time-pooling, while keeping the benefit of efficient convolutional evaluation.
We demonstrate the performance of our models both on larger scale data than
before, and after sequence training. Our very deep CNN model sequence trained
on the 2000h switchboard dataset obtains 9.4 word error rate on the Hub5
test-set, matching with a single model the performance of the 2015 IBM system
combination, which was the previous best published result.
@misc{sercu2016advances,
abstract = {{Very deep CNNs with small 3x3 kernels have recently been shown to achieve
very strong performance as acoustic models in hybrid NN-HMM speech recognition
systems. In this paper we investigate how to efficiently scale these models to
larger datasets. Specifically, we address the design choice of pooling and
padding along the time dimension which renders convolutional evaluation of
sequences highly inefficient. We propose a new CNN design without timepadding
and without timepooling, which is slightly suboptimal for accuracy, but has two
significant advantages: it enables sequence training and deployment by allowing
efficient convolutional evaluation of full utterances, and, it allows for batch
normalization to be straightforwardly adopted to CNNs on sequence data. Through
batch normalization, we recover the lost peformance from removing the
time-pooling, while keeping the benefit of efficient convolutional evaluation.
We demonstrate the performance of our models both on larger scale data than
before, and after sequence training. Our very deep CNN model sequence trained
on the 2000h switchboard dataset obtains 9.4 word error rate on the Hub5
test-set, matching with a single model the performance of the 2015 IBM system
combination, which was the previous best published result.}},
added-at = {2017-07-19T15:29:59.000+0200},
archiveprefix = {arXiv},
author = {Sercu, Tom and Goel, Vaibhava},
biburl = {https://www.bibsonomy.org/bibtex/234824e68bf9354db22330b1a58333790/andreashdez},
citeulike-article-id = {14172189},
citeulike-linkout-0 = {http://dx.doi.org/10.21437/Interspeech.2016-1033},
citeulike-linkout-1 = {http://arxiv.org/abs/1604.01792},
citeulike-linkout-2 = {http://arxiv.org/pdf/1604.01792},
day = 25,
doi = {10.21437/Interspeech.2016-1033},
eprint = {1604.01792},
interhash = {7dc3eeafced7c5f4468ed0876b391d62},
intrahash = {34824e68bf9354db22330b1a58333790},
keywords = {report},
month = jun,
posted-at = {2016-10-26 15:39:57},
priority = {5},
timestamp = {2017-07-19T15:31:02.000+0200},
title = {{Advances in Very Deep Convolutional Neural Networks for LVCSR}},
url = {http://dx.doi.org/10.21437/Interspeech.2016-1033},
year = 2016
}