Likelihood-based generative models are a promising resource to detect
out-of-distribution (OOD) inputs which could compromise the robustness or
reliability of a machine learning system. However, likelihoods derived from
such models have been shown to be problematic for detecting certain types of
inputs that significantly differ from training data. In this paper, we pose
that this problem is due to the excessive influence that input complexity has
in generative models' likelihoods. We report a set of experiments supporting
this hypothesis, and use an estimate of input complexity to derive an efficient
and parameter-free OOD score, which can be seen as a likelihood-ratio, akin to
Bayesian model comparison. We find such score to perform comparably to, or even
better than, existing OOD detection approaches under a wide range of data sets,
models, model sizes, and complexity estimates.
Beschreibung
[1909.11480] Input complexity and out-of-distribution detection with likelihood-based generative models
%0 Journal Article
%1 serra2019input
%A Serrà, Joan
%A Álvarez, David
%A Gómez, Vicenç
%A Slizovskaia, Olga
%A Núñez, José F.
%A Luque, Jordi
%D 2019
%K anomaly-detection bayesian generative-models outliers readings uncertainty
%T Input complexity and out-of-distribution detection with likelihood-based
generative models
%U http://arxiv.org/abs/1909.11480
%X Likelihood-based generative models are a promising resource to detect
out-of-distribution (OOD) inputs which could compromise the robustness or
reliability of a machine learning system. However, likelihoods derived from
such models have been shown to be problematic for detecting certain types of
inputs that significantly differ from training data. In this paper, we pose
that this problem is due to the excessive influence that input complexity has
in generative models' likelihoods. We report a set of experiments supporting
this hypothesis, and use an estimate of input complexity to derive an efficient
and parameter-free OOD score, which can be seen as a likelihood-ratio, akin to
Bayesian model comparison. We find such score to perform comparably to, or even
better than, existing OOD detection approaches under a wide range of data sets,
models, model sizes, and complexity estimates.
@article{serra2019input,
abstract = {Likelihood-based generative models are a promising resource to detect
out-of-distribution (OOD) inputs which could compromise the robustness or
reliability of a machine learning system. However, likelihoods derived from
such models have been shown to be problematic for detecting certain types of
inputs that significantly differ from training data. In this paper, we pose
that this problem is due to the excessive influence that input complexity has
in generative models' likelihoods. We report a set of experiments supporting
this hypothesis, and use an estimate of input complexity to derive an efficient
and parameter-free OOD score, which can be seen as a likelihood-ratio, akin to
Bayesian model comparison. We find such score to perform comparably to, or even
better than, existing OOD detection approaches under a wide range of data sets,
models, model sizes, and complexity estimates.},
added-at = {2019-12-06T17:10:49.000+0100},
author = {Serrà, Joan and Álvarez, David and Gómez, Vicenç and Slizovskaia, Olga and Núñez, José F. and Luque, Jordi},
biburl = {https://www.bibsonomy.org/bibtex/2e6646461ee56e68e21c63a61bd356f64/kirk86},
description = {[1909.11480] Input complexity and out-of-distribution detection with likelihood-based generative models},
interhash = {94fe6f83cd207394d1e263dcb673d0b2},
intrahash = {e6646461ee56e68e21c63a61bd356f64},
keywords = {anomaly-detection bayesian generative-models outliers readings uncertainty},
note = {cite arxiv:1909.11480Comment: Includes appendix},
timestamp = {2019-12-06T17:10:49.000+0100},
title = {Input complexity and out-of-distribution detection with likelihood-based
generative models},
url = {http://arxiv.org/abs/1909.11480},
year = 2019
}