Scene labeling consists of labeling each pixel in an image with the category of the object it belongs to. We propose a method that uses a multiscale convolutional network trained from raw pixels to extract dense feature vectors that encode regions of multiple sizes centered on each pixel. The method alleviates the need for engineered features, and produces a powerful representation that captures texture, shape, and contextual information. We report results using multiple postprocessing methods to produce the final labeling. Among those, we propose a technique to automatically retrieve, from a pool of segmentation components, an optimal set of components that best explain the scene; these components are arbitrary, for example, they can be taken from a segmentation tree or from any family of oversegmentations. The system yields record accuracies on the SIFT Flow dataset (33 classes) and the Barcelona dataset (170 classes) and near-record accuracy on Stanford background dataset (eight classes), while being an order of magnitude faster than competing approaches, producing a 320×240 image labeling in less than a second, including feature extraction.
Description
Learning Hierarchical Features for Scene Labeling - IEEE Journals & Magazine
%0 Journal Article
%1 journals/pami/FarabetCNL13
%A Farabet, C.
%A Couprie, C.
%A Najman, L.
%A LeCun, Y.
%D 2013
%J IEEE Transactions on Pattern Analysis and Machine Intelligence
%K das_2018_1 dnn
%N 8
%P 1915-1929
%R 10.1109/TPAMI.2012.231
%T Learning Hierarchical Features for Scene Labeling
%U http://ieeexplore.ieee.org/document/6338939/
%V 35
%X Scene labeling consists of labeling each pixel in an image with the category of the object it belongs to. We propose a method that uses a multiscale convolutional network trained from raw pixels to extract dense feature vectors that encode regions of multiple sizes centered on each pixel. The method alleviates the need for engineered features, and produces a powerful representation that captures texture, shape, and contextual information. We report results using multiple postprocessing methods to produce the final labeling. Among those, we propose a technique to automatically retrieve, from a pool of segmentation components, an optimal set of components that best explain the scene; these components are arbitrary, for example, they can be taken from a segmentation tree or from any family of oversegmentations. The system yields record accuracies on the SIFT Flow dataset (33 classes) and the Barcelona dataset (170 classes) and near-record accuracy on Stanford background dataset (eight classes), while being an order of magnitude faster than competing approaches, producing a 320×240 image labeling in less than a second, including feature extraction.
@article{journals/pami/FarabetCNL13,
abstract = {Scene labeling consists of labeling each pixel in an image with the category of the object it belongs to. We propose a method that uses a multiscale convolutional network trained from raw pixels to extract dense feature vectors that encode regions of multiple sizes centered on each pixel. The method alleviates the need for engineered features, and produces a powerful representation that captures texture, shape, and contextual information. We report results using multiple postprocessing methods to produce the final labeling. Among those, we propose a technique to automatically retrieve, from a pool of segmentation components, an optimal set of components that best explain the scene; these components are arbitrary, for example, they can be taken from a segmentation tree or from any family of oversegmentations. The system yields record accuracies on the SIFT Flow dataset (33 classes) and the Barcelona dataset (170 classes) and near-record accuracy on Stanford background dataset (eight classes), while being an order of magnitude faster than competing approaches, producing a 320×240 image labeling in less than a second, including feature extraction.},
added-at = {2017-11-09T13:02:22.000+0100},
author = {Farabet, C. and Couprie, C. and Najman, L. and LeCun, Y.},
biburl = {https://www.bibsonomy.org/bibtex/2e0a6ea33d5aa5e54e4eab699941e8987/chwick},
description = {Learning Hierarchical Features for Scene Labeling - IEEE Journals & Magazine},
doi = {10.1109/TPAMI.2012.231},
interhash = {b093a676d1ce2dbede5fb52d00c7ed83},
intrahash = {e0a6ea33d5aa5e54e4eab699941e8987},
issn = {0162-8828},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
keywords = {das_2018_1 dnn},
month = aug,
number = 8,
pages = {1915-1929},
timestamp = {2017-11-09T13:02:22.000+0100},
title = {Learning Hierarchical Features for Scene Labeling},
url = {http://ieeexplore.ieee.org/document/6338939/},
volume = 35,
year = 2013
}