Graphics processing unit (GPU) is used for a faster artificial neural network. It is used to implement the matrix multiplication of a neural network to enhance the time performance of a text detection system. Preliminary results produced a 20-fold performance enhancement using an \ATI\ \RADEON\ 9700 \PRO\ board. The parallelism of a \GPU\ is fully utilized by accumulating a lot of input feature vectors and weight vectors, then converting the many inner-product operations into one matrix operation. Further research areas include benchmarking the performance with various hardware and GPU-aware learning algorithms.
%0 Journal Article
%1 Oh20041311
%A Oh, Kyoung-Su
%A Jung, Keechul
%D 2004
%J Pattern Recognition
%K ma-zehe neuralnet
%N 6
%P 1311 - 1314
%R http://dx.doi.org/10.1016/j.patcog.2004.01.013
%T GPU implementation of neural networks
%U http://www.sciencedirect.com/science/article/pii/S0031320304000524
%V 37
%X Graphics processing unit (GPU) is used for a faster artificial neural network. It is used to implement the matrix multiplication of a neural network to enhance the time performance of a text detection system. Preliminary results produced a 20-fold performance enhancement using an \ATI\ \RADEON\ 9700 \PRO\ board. The parallelism of a \GPU\ is fully utilized by accumulating a lot of input feature vectors and weight vectors, then converting the many inner-product operations into one matrix operation. Further research areas include benchmarking the performance with various hardware and GPU-aware learning algorithms.
@article{Oh20041311,
abstract = {Graphics processing unit (GPU) is used for a faster artificial neural network. It is used to implement the matrix multiplication of a neural network to enhance the time performance of a text detection system. Preliminary results produced a 20-fold performance enhancement using an \{ATI\} \{RADEON\} 9700 \{PRO\} board. The parallelism of a \{GPU\} is fully utilized by accumulating a lot of input feature vectors and weight vectors, then converting the many inner-product operations into one matrix operation. Further research areas include benchmarking the performance with various hardware and GPU-aware learning algorithms. },
added-at = {2016-11-14T16:50:29.000+0100},
author = {Oh, Kyoung-Su and Jung, Keechul},
biburl = {https://www.bibsonomy.org/bibtex/28a406fddcb2de43dbccb51524a825f76/albinzehe},
description = {GPU implementation of neural networks},
doi = {http://dx.doi.org/10.1016/j.patcog.2004.01.013},
interhash = {44a9b4d1c8f3458e56670e6597c687ee},
intrahash = {8a406fddcb2de43dbccb51524a825f76},
issn = {0031-3203},
journal = {Pattern Recognition },
keywords = {ma-zehe neuralnet},
number = 6,
pages = {1311 - 1314},
timestamp = {2016-11-14T16:50:52.000+0100},
title = {GPU implementation of neural networks },
url = {http://www.sciencedirect.com/science/article/pii/S0031320304000524},
volume = 37,
year = 2004
}