We present an end-to-end, multimodal, fully convolutional network for
extracting semantic structures from document images. We consider document
semantic structure extraction as a pixel-wise segmentation task, and propose a
unified model that classifies pixels based not only on their visual appearance,
as in the traditional page segmentation task, but also on the content of
underlying text. Moreover, we propose an efficient synthetic document
generation process that we use to generate pretraining data for our network.
Once the network is trained on a large set of synthetic documents, we fine-tune
the network on unlabeled real documents using a semi-supervised approach. We
systematically study the optimum network architecture and show that both our
multimodal approach and the synthetic data pretraining significantly boost the
performance.
Description
[1706.02337] Learning to Extract Semantic Structure from Documents Using Multimodal Fully Convolutional Neural Network
%0 Generic
%1 yang2017learning
%A Yang, Xiao
%A Yumer, Ersin
%A Asente, Paul
%A Kraley, Mike
%A Kifer, Daniel
%A Giles, C. Lee
%D 2017
%K DAS_2018_1 cnn dissertation dnn page_segmentation
%T Learning to Extract Semantic Structure from Documents Using Multimodal
Fully Convolutional Neural Network
%U http://openaccess.thecvf.com/content_cvpr_2017/papers/Yang_Learning_to_Extract_CVPR_2017_paper.pdf
%X We present an end-to-end, multimodal, fully convolutional network for
extracting semantic structures from document images. We consider document
semantic structure extraction as a pixel-wise segmentation task, and propose a
unified model that classifies pixels based not only on their visual appearance,
as in the traditional page segmentation task, but also on the content of
underlying text. Moreover, we propose an efficient synthetic document
generation process that we use to generate pretraining data for our network.
Once the network is trained on a large set of synthetic documents, we fine-tune
the network on unlabeled real documents using a semi-supervised approach. We
systematically study the optimum network architecture and show that both our
multimodal approach and the synthetic data pretraining significantly boost the
performance.
@misc{yang2017learning,
abstract = {We present an end-to-end, multimodal, fully convolutional network for
extracting semantic structures from document images. We consider document
semantic structure extraction as a pixel-wise segmentation task, and propose a
unified model that classifies pixels based not only on their visual appearance,
as in the traditional page segmentation task, but also on the content of
underlying text. Moreover, we propose an efficient synthetic document
generation process that we use to generate pretraining data for our network.
Once the network is trained on a large set of synthetic documents, we fine-tune
the network on unlabeled real documents using a semi-supervised approach. We
systematically study the optimum network architecture and show that both our
multimodal approach and the synthetic data pretraining significantly boost the
performance.},
added-at = {2017-10-16T10:25:28.000+0200},
author = {Yang, Xiao and Yumer, Ersin and Asente, Paul and Kraley, Mike and Kifer, Daniel and Giles, C. Lee},
biburl = {https://www.bibsonomy.org/bibtex/297956f2d7264155c8679538e6889cd93/chwick},
description = {[1706.02337] Learning to Extract Semantic Structure from Documents Using Multimodal Fully Convolutional Neural Network},
interhash = {e73afe65c050c46e6a18754e7c40d117},
intrahash = {97956f2d7264155c8679538e6889cd93},
keywords = {DAS_2018_1 cnn dissertation dnn page_segmentation},
note = {cite arxiv:1706.02337Comment: CVPR 2017 Spotlight},
timestamp = {2017-11-09T10:08:15.000+0100},
title = {Learning to Extract Semantic Structure from Documents Using Multimodal
Fully Convolutional Neural Network},
url = {http://openaccess.thecvf.com/content_cvpr_2017/papers/Yang_Learning_to_Extract_CVPR_2017_paper.pdf},
year = 2017
}