Adversarial examples have attracted significant attention in machine
learning, but the reasons for their existence and pervasiveness remain unclear.
We demonstrate that adversarial examples can be directly attributed to the
presence of non-robust features: features derived from patterns in the data
distribution that are highly predictive, yet brittle and incomprehensible to
humans. After capturing these features within a theoretical framework, we
establish their widespread existence in standard datasets. Finally, we present
a simple setting where we can rigorously tie the phenomena we observe in
practice to a misalignment between the (human-specified) notion of robustness
and the inherent geometry of the data.
Beschreibung
[1905.02175] Adversarial Examples Are Not Bugs, They Are Features
%0 Generic
%1 ilyas2019adversarial
%A Ilyas, Andrew
%A Santurkar, Shibani
%A Tsipras, Dimitris
%A Engstrom, Logan
%A Tran, Brandon
%A Madry, Aleksander
%D 2019
%K adversarial
%T Adversarial Examples Are Not Bugs, They Are Features
%U http://arxiv.org/abs/1905.02175
%X Adversarial examples have attracted significant attention in machine
learning, but the reasons for their existence and pervasiveness remain unclear.
We demonstrate that adversarial examples can be directly attributed to the
presence of non-robust features: features derived from patterns in the data
distribution that are highly predictive, yet brittle and incomprehensible to
humans. After capturing these features within a theoretical framework, we
establish their widespread existence in standard datasets. Finally, we present
a simple setting where we can rigorously tie the phenomena we observe in
practice to a misalignment between the (human-specified) notion of robustness
and the inherent geometry of the data.
@misc{ilyas2019adversarial,
abstract = {Adversarial examples have attracted significant attention in machine
learning, but the reasons for their existence and pervasiveness remain unclear.
We demonstrate that adversarial examples can be directly attributed to the
presence of non-robust features: features derived from patterns in the data
distribution that are highly predictive, yet brittle and incomprehensible to
humans. After capturing these features within a theoretical framework, we
establish their widespread existence in standard datasets. Finally, we present
a simple setting where we can rigorously tie the phenomena we observe in
practice to a misalignment between the (human-specified) notion of robustness
and the inherent geometry of the data.},
added-at = {2019-05-10T11:06:42.000+0200},
author = {Ilyas, Andrew and Santurkar, Shibani and Tsipras, Dimitris and Engstrom, Logan and Tran, Brandon and Madry, Aleksander},
biburl = {https://www.bibsonomy.org/bibtex/24095d2997b8609d103717a6fc9192b93/straybird321},
description = {[1905.02175] Adversarial Examples Are Not Bugs, They Are Features},
interhash = {405ded46dc4b8b466f4dff15cee60ed1},
intrahash = {4095d2997b8609d103717a6fc9192b93},
keywords = {adversarial},
note = {cite arxiv:1905.02175},
timestamp = {2019-05-10T11:06:42.000+0200},
title = {Adversarial Examples Are Not Bugs, They Are Features},
url = {http://arxiv.org/abs/1905.02175},
year = 2019
}