In this paper, we focus on the supervised learning problem with corrupted
training data. We assume that the training dataset is generated from a mixture
of a target distribution and other unknown distributions. We estimate the
quality of each data by revealing the correlation between the generated
distribution and the target distribution. To this end, we present a novel
framework referred to here as ChoiceNet that can robustly infer the target
distribution in the presence of inconsistent data. We demonstrate that the
proposed framework is applicable to both classification and regression tasks.
ChoiceNet is evaluated in comprehensive experiments, where we show that it
constantly outperforms existing baseline methods in the handling of noisy data.
Particularly, ChoiceNet is successfully applied to autonomous driving tasks
where it learns a safe driving policy from a dataset with mixed qualities. In
the classification task, we apply the proposed method to the MNIST and CIFAR-10
datasets and it shows superior performances in terms of robustness to noisy
labels.
Description
[1805.06431] ChoiceNet: Robust Learning by Revealing Output Correlations
%0 Generic
%1 choi2018choicenet
%A Choi, Sungjoon
%A Hong, Sanghoon
%A Lim, Sungbin
%D 2018
%K deep-learning readings robustness
%T ChoiceNet: Robust Learning by Revealing Output Correlations
%U http://arxiv.org/abs/1805.06431
%X In this paper, we focus on the supervised learning problem with corrupted
training data. We assume that the training dataset is generated from a mixture
of a target distribution and other unknown distributions. We estimate the
quality of each data by revealing the correlation between the generated
distribution and the target distribution. To this end, we present a novel
framework referred to here as ChoiceNet that can robustly infer the target
distribution in the presence of inconsistent data. We demonstrate that the
proposed framework is applicable to both classification and regression tasks.
ChoiceNet is evaluated in comprehensive experiments, where we show that it
constantly outperforms existing baseline methods in the handling of noisy data.
Particularly, ChoiceNet is successfully applied to autonomous driving tasks
where it learns a safe driving policy from a dataset with mixed qualities. In
the classification task, we apply the proposed method to the MNIST and CIFAR-10
datasets and it shows superior performances in terms of robustness to noisy
labels.
@misc{choi2018choicenet,
abstract = {In this paper, we focus on the supervised learning problem with corrupted
training data. We assume that the training dataset is generated from a mixture
of a target distribution and other unknown distributions. We estimate the
quality of each data by revealing the correlation between the generated
distribution and the target distribution. To this end, we present a novel
framework referred to here as ChoiceNet that can robustly infer the target
distribution in the presence of inconsistent data. We demonstrate that the
proposed framework is applicable to both classification and regression tasks.
ChoiceNet is evaluated in comprehensive experiments, where we show that it
constantly outperforms existing baseline methods in the handling of noisy data.
Particularly, ChoiceNet is successfully applied to autonomous driving tasks
where it learns a safe driving policy from a dataset with mixed qualities. In
the classification task, we apply the proposed method to the MNIST and CIFAR-10
datasets and it shows superior performances in terms of robustness to noisy
labels.},
added-at = {2019-11-14T20:41:35.000+0100},
author = {Choi, Sungjoon and Hong, Sanghoon and Lim, Sungbin},
biburl = {https://www.bibsonomy.org/bibtex/2b4fa46c3d434705642862a182974af1f/kirk86},
description = {[1805.06431] ChoiceNet: Robust Learning by Revealing Output Correlations},
interhash = {0e6134561e01dd02352c1538f3481056},
intrahash = {b4fa46c3d434705642862a182974af1f},
keywords = {deep-learning readings robustness},
note = {cite arxiv:1805.06431},
timestamp = {2019-11-14T20:41:35.000+0100},
title = {ChoiceNet: Robust Learning by Revealing Output Correlations},
url = {http://arxiv.org/abs/1805.06431},
year = 2018
}