W. Zheng, Z. Chen, J. Lu, and J. Zhou. (2019)cite arxiv:1903.05503Comment: Accepted as CVPR 2019 Oral. Source code available at https://github.com/wzzheng/HDML.
Abstract
This paper presents a hardness-aware deep metric learning (HDML) framework.
Most previous deep metric learning methods employ the hard negative mining
strategy to alleviate the lack of informative samples for training. However,
this mining strategy only utilizes a subset of training data, which may not be
enough to characterize the global geometry of the embedding space
comprehensively. To address this problem, we perform linear interpolation on
embeddings to adaptively manipulate their hard levels and generate
corresponding label-preserving synthetics for recycled training, so that
information buried in all samples can be fully exploited and the metric is
always challenged with proper difficulty. Our method achieves very competitive
performance on the widely used CUB-200-2011, Cars196, and Stanford Online
Products datasets.
%0 Generic
%1 zheng2019hardnessaware
%A Zheng, Wenzhao
%A Chen, Zhaodong
%A Lu, Jiwen
%A Zhou, Jie
%D 2019
%K augmentation loss metric regularization
%T Hardness-Aware Deep Metric Learning
%U http://arxiv.org/abs/1903.05503
%X This paper presents a hardness-aware deep metric learning (HDML) framework.
Most previous deep metric learning methods employ the hard negative mining
strategy to alleviate the lack of informative samples for training. However,
this mining strategy only utilizes a subset of training data, which may not be
enough to characterize the global geometry of the embedding space
comprehensively. To address this problem, we perform linear interpolation on
embeddings to adaptively manipulate their hard levels and generate
corresponding label-preserving synthetics for recycled training, so that
information buried in all samples can be fully exploited and the metric is
always challenged with proper difficulty. Our method achieves very competitive
performance on the widely used CUB-200-2011, Cars196, and Stanford Online
Products datasets.
@misc{zheng2019hardnessaware,
abstract = {This paper presents a hardness-aware deep metric learning (HDML) framework.
Most previous deep metric learning methods employ the hard negative mining
strategy to alleviate the lack of informative samples for training. However,
this mining strategy only utilizes a subset of training data, which may not be
enough to characterize the global geometry of the embedding space
comprehensively. To address this problem, we perform linear interpolation on
embeddings to adaptively manipulate their hard levels and generate
corresponding label-preserving synthetics for recycled training, so that
information buried in all samples can be fully exploited and the metric is
always challenged with proper difficulty. Our method achieves very competitive
performance on the widely used CUB-200-2011, Cars196, and Stanford Online
Products datasets.},
added-at = {2019-04-07T21:51:43.000+0200},
author = {Zheng, Wenzhao and Chen, Zhaodong and Lu, Jiwen and Zhou, Jie},
biburl = {https://www.bibsonomy.org/bibtex/219c28b9f6a80af14e9d60956f9d3b763/nmatsuk},
description = {Hardness-Aware Deep Metric Learning},
interhash = {e5d45372f229b2baee996cdfd0188ae5},
intrahash = {19c28b9f6a80af14e9d60956f9d3b763},
keywords = {augmentation loss metric regularization},
note = {cite arxiv:1903.05503Comment: Accepted as CVPR 2019 Oral. Source code available at https://github.com/wzzheng/HDML},
timestamp = {2019-04-07T21:51:43.000+0200},
title = {Hardness-Aware Deep Metric Learning},
url = {http://arxiv.org/abs/1903.05503},
year = 2019
}