In this paper we present Mask DINO, a unified object detection and
segmentation framework. Mask DINO extends DINO (DETR with Improved Denoising
Anchor Boxes) by adding a mask prediction branch which supports all image
segmentation tasks (instance, panoptic, and semantic). It makes use of the
query embeddings from DINO to dot-product a high-resolution pixel embedding map
to predict a set of binary masks. Some key components in DINO are extended for
segmentation through a shared architecture and training process. Mask DINO is
simple, efficient, scalable, and benefits from joint large-scale detection and
segmentation datasets. Our experiments show that Mask DINO significantly
outperforms all existing specialized segmentation methods, both on a ResNet-50
backbone and a pre-trained model with SwinL backbone. Notably, Mask DINO
establishes the best results to date on instance segmentation (54.5 AP on
COCO), panoptic segmentation (59.4 PQ on COCO), and semantic segmentation (60.8
mIoU on ADE20K). Code will be avaliable at
https://github.com/IDEACVR/MaskDINO.
Description
Mask DINO: Towards A Unified Transformer-based Framework for Object Detection and Segmentation
%0 Generic
%1 li2022towards
%A Li, Feng
%A Zhang, Hao
%A xu, Huaizhe
%A Liu, Shilong
%A Zhang, Lei
%A Ni, Lionel M.
%A Shum, Heung-Yeung
%D 2022
%K segmentation
%T Mask DINO: Towards A Unified Transformer-based Framework for Object
Detection and Segmentation
%U http://arxiv.org/abs/2206.02777
%X In this paper we present Mask DINO, a unified object detection and
segmentation framework. Mask DINO extends DINO (DETR with Improved Denoising
Anchor Boxes) by adding a mask prediction branch which supports all image
segmentation tasks (instance, panoptic, and semantic). It makes use of the
query embeddings from DINO to dot-product a high-resolution pixel embedding map
to predict a set of binary masks. Some key components in DINO are extended for
segmentation through a shared architecture and training process. Mask DINO is
simple, efficient, scalable, and benefits from joint large-scale detection and
segmentation datasets. Our experiments show that Mask DINO significantly
outperforms all existing specialized segmentation methods, both on a ResNet-50
backbone and a pre-trained model with SwinL backbone. Notably, Mask DINO
establishes the best results to date on instance segmentation (54.5 AP on
COCO), panoptic segmentation (59.4 PQ on COCO), and semantic segmentation (60.8
mIoU on ADE20K). Code will be avaliable at
https://github.com/IDEACVR/MaskDINO.
@misc{li2022towards,
abstract = {In this paper we present Mask DINO, a unified object detection and
segmentation framework. Mask DINO extends DINO (DETR with Improved Denoising
Anchor Boxes) by adding a mask prediction branch which supports all image
segmentation tasks (instance, panoptic, and semantic). It makes use of the
query embeddings from DINO to dot-product a high-resolution pixel embedding map
to predict a set of binary masks. Some key components in DINO are extended for
segmentation through a shared architecture and training process. Mask DINO is
simple, efficient, scalable, and benefits from joint large-scale detection and
segmentation datasets. Our experiments show that Mask DINO significantly
outperforms all existing specialized segmentation methods, both on a ResNet-50
backbone and a pre-trained model with SwinL backbone. Notably, Mask DINO
establishes the best results to date on instance segmentation (54.5 AP on
COCO), panoptic segmentation (59.4 PQ on COCO), and semantic segmentation (60.8
mIoU on ADE20K). Code will be avaliable at
\url{https://github.com/IDEACVR/MaskDINO}.},
added-at = {2022-07-17T16:37:43.000+0200},
author = {Li, Feng and Zhang, Hao and xu, Huaizhe and Liu, Shilong and Zhang, Lei and Ni, Lionel M. and Shum, Heung-Yeung},
biburl = {https://www.bibsonomy.org/bibtex/2a74b3f6bbf5357de4d599ec8c42acc55/redtedtezza},
description = {Mask DINO: Towards A Unified Transformer-based Framework for Object Detection and Segmentation},
interhash = {482cd835253a7a5212c1eb834dad2249},
intrahash = {a74b3f6bbf5357de4d599ec8c42acc55},
keywords = {segmentation},
note = {cite arxiv:2206.02777},
timestamp = {2022-07-17T16:37:43.000+0200},
title = {Mask DINO: Towards A Unified Transformer-based Framework for Object
Detection and Segmentation},
url = {http://arxiv.org/abs/2206.02777},
year = 2022
}