To perform inference after model selection, we propose controlling the
selective type I error; i.e., the error rate of a test given that it was
performed. By doing so, we recover long-run frequency properties among selected
hypotheses analogous to those that apply in the classical (non-adaptive)
context. Our proposal is closely related to data splitting and has a similar
intuitive justification, but is more powerful. Exploiting the classical theory
of Lehmann and Scheffé (1955), we derive most powerful unbiased selective
tests and confidence intervals for inference in exponential family models after
arbitrary selection procedures. For linear regression, we derive new selective
z-tests that generalize recent proposals for inference after model selection
and improve on their power, and new selective t-tests that do not require
knowledge of the error variance.
Описание
[1410.2597] Optimal Inference After Model Selection
%0 Journal Article
%1 fithian2014optimal
%A Fithian, William
%A Sun, Dennis
%A Taylor, Jonathan
%D 2014
%K differential-privacy information
%T Optimal Inference After Model Selection
%U http://arxiv.org/abs/1410.2597
%X To perform inference after model selection, we propose controlling the
selective type I error; i.e., the error rate of a test given that it was
performed. By doing so, we recover long-run frequency properties among selected
hypotheses analogous to those that apply in the classical (non-adaptive)
context. Our proposal is closely related to data splitting and has a similar
intuitive justification, but is more powerful. Exploiting the classical theory
of Lehmann and Scheffé (1955), we derive most powerful unbiased selective
tests and confidence intervals for inference in exponential family models after
arbitrary selection procedures. For linear regression, we derive new selective
z-tests that generalize recent proposals for inference after model selection
and improve on their power, and new selective t-tests that do not require
knowledge of the error variance.
@article{fithian2014optimal,
abstract = {To perform inference after model selection, we propose controlling the
selective type I error; i.e., the error rate of a test given that it was
performed. By doing so, we recover long-run frequency properties among selected
hypotheses analogous to those that apply in the classical (non-adaptive)
context. Our proposal is closely related to data splitting and has a similar
intuitive justification, but is more powerful. Exploiting the classical theory
of Lehmann and Scheff\'e (1955), we derive most powerful unbiased selective
tests and confidence intervals for inference in exponential family models after
arbitrary selection procedures. For linear regression, we derive new selective
z-tests that generalize recent proposals for inference after model selection
and improve on their power, and new selective t-tests that do not require
knowledge of the error variance.},
added-at = {2019-09-19T13:37:56.000+0200},
author = {Fithian, William and Sun, Dennis and Taylor, Jonathan},
biburl = {https://www.bibsonomy.org/bibtex/2d7ef43c3efe918244b6c957e3dd8bd91/kirk86},
description = {[1410.2597] Optimal Inference After Model Selection},
interhash = {9a4e2b2272632a19e5fa981785bef5be},
intrahash = {d7ef43c3efe918244b6c957e3dd8bd91},
keywords = {differential-privacy information},
note = {cite arxiv:1410.2597},
timestamp = {2019-09-19T13:37:56.000+0200},
title = {Optimal Inference After Model Selection},
url = {http://arxiv.org/abs/1410.2597},
year = 2014
}