We propose to study the generalization error of a learned predictor $h$
in terms of that of a surrogate (potentially randomized) classifier that is
coupled to $h$ and designed to trade empirical risk for control of
generalization error. In the case where $h$ interpolates the data, it is
interesting to consider theoretical surrogate classifiers that are partially
derandomized or rerandomized, e.g., fit to the training data but with modified
label noise. We show that replacing $h$ by its conditional distribution
with respect to an arbitrary $\sigma$-field is a viable method to derandomize.
We give an example, inspired by the work of Nagarajan and Kolter (2019), where
the learned classifier $h$ interpolates the training data with high
probability, has small risk, and, yet, does not belong to a nonrandom class
with a tight uniform bound on two-sided generalization error. At the same time,
we bound the risk of $h$ in terms of a surrogate that is constructed by
conditioning and shown to belong to a nonrandom class with uniformly small
generalization error.
Description
[1912.04265] In Defense of Uniform Convergence: Generalization via derandomization with an application to interpolating predictors
%0 Journal Article
%1 negrea2019defense
%A Negrea, Jeffrey
%A Dziugaite, Gintare Karolina
%A Roy, Daniel M.
%D 2019
%K bounds generalization learning readings
%T In Defense of Uniform Convergence: Generalization via derandomization
with an application to interpolating predictors
%U http://arxiv.org/abs/1912.04265
%X We propose to study the generalization error of a learned predictor $h$
in terms of that of a surrogate (potentially randomized) classifier that is
coupled to $h$ and designed to trade empirical risk for control of
generalization error. In the case where $h$ interpolates the data, it is
interesting to consider theoretical surrogate classifiers that are partially
derandomized or rerandomized, e.g., fit to the training data but with modified
label noise. We show that replacing $h$ by its conditional distribution
with respect to an arbitrary $\sigma$-field is a viable method to derandomize.
We give an example, inspired by the work of Nagarajan and Kolter (2019), where
the learned classifier $h$ interpolates the training data with high
probability, has small risk, and, yet, does not belong to a nonrandom class
with a tight uniform bound on two-sided generalization error. At the same time,
we bound the risk of $h$ in terms of a surrogate that is constructed by
conditioning and shown to belong to a nonrandom class with uniformly small
generalization error.
@article{negrea2019defense,
abstract = {We propose to study the generalization error of a learned predictor $\hat h$
in terms of that of a surrogate (potentially randomized) classifier that is
coupled to $\hat h$ and designed to trade empirical risk for control of
generalization error. In the case where $\hat h$ interpolates the data, it is
interesting to consider theoretical surrogate classifiers that are partially
derandomized or rerandomized, e.g., fit to the training data but with modified
label noise. We show that replacing $\hat h$ by its conditional distribution
with respect to an arbitrary $\sigma$-field is a viable method to derandomize.
We give an example, inspired by the work of Nagarajan and Kolter (2019), where
the learned classifier $\hat h$ interpolates the training data with high
probability, has small risk, and, yet, does not belong to a nonrandom class
with a tight uniform bound on two-sided generalization error. At the same time,
we bound the risk of $\hat h$ in terms of a surrogate that is constructed by
conditioning and shown to belong to a nonrandom class with uniformly small
generalization error.},
added-at = {2020-02-22T03:06:30.000+0100},
author = {Negrea, Jeffrey and Dziugaite, Gintare Karolina and Roy, Daniel M.},
biburl = {https://www.bibsonomy.org/bibtex/2dc49da8fe620814c8052e6f10f3567de/kirk86},
description = {[1912.04265] In Defense of Uniform Convergence: Generalization via derandomization with an application to interpolating predictors},
interhash = {8f4e7b092730993a2f8fcbf1c86e2e00},
intrahash = {dc49da8fe620814c8052e6f10f3567de},
keywords = {bounds generalization learning readings},
note = {cite arxiv:1912.04265Comment: 12 pages},
timestamp = {2020-02-22T03:06:30.000+0100},
title = {In Defense of Uniform Convergence: Generalization via derandomization
with an application to interpolating predictors},
url = {http://arxiv.org/abs/1912.04265},
year = 2019
}