We study the problem of properly learning large margin halfspaces in
the agnostic PAC model. In more detail, we study the complexity of properly
learning $d$-dimensional halfspaces on the unit ball within misclassification
error $OPT_\gamma + \epsilon$, where
$OPT_\gamma$ is the optimal $\gamma$-margin error rate and $\alpha
1$ is the approximation ratio. We give learning algorithms and
computational hardness results for this problem, for all values of the
approximation ratio $1$, that are nearly-matching for a range of
parameters. Specifically, for the natural setting that $\alpha$ is any constant
bigger than one, we provide an essentially tight complexity characterization.
On the positive side, we give an $= 1.01$-approximate proper learner
that uses $O(1/(\epsilon^2\gamma^2))$ samples (which is optimal) and runs in
time $poly(d/\epsilon) 2^O(1/\gamma^2)$. On the
negative side, we show that any constant factor approximate proper
learner has runtime $poly(d/\epsilon) 2^(1/\gamma)^2-o(1)$,
assuming the Exponential Time Hypothesis.
Description
[1908.11335] Nearly Tight Bounds for Robust Proper Learning of Halfspaces with a Margin
%0 Journal Article
%1 diakonikolas2019nearly
%A Diakonikolas, Ilias
%A Kane, Daniel M.
%A Manurangsi, Pasin
%D 2019
%K bounds robustness stats
%T Nearly Tight Bounds for Robust Proper Learning of Halfspaces with a
Margin
%U http://arxiv.org/abs/1908.11335
%X We study the problem of properly learning large margin halfspaces in
the agnostic PAC model. In more detail, we study the complexity of properly
learning $d$-dimensional halfspaces on the unit ball within misclassification
error $OPT_\gamma + \epsilon$, where
$OPT_\gamma$ is the optimal $\gamma$-margin error rate and $\alpha
1$ is the approximation ratio. We give learning algorithms and
computational hardness results for this problem, for all values of the
approximation ratio $1$, that are nearly-matching for a range of
parameters. Specifically, for the natural setting that $\alpha$ is any constant
bigger than one, we provide an essentially tight complexity characterization.
On the positive side, we give an $= 1.01$-approximate proper learner
that uses $O(1/(\epsilon^2\gamma^2))$ samples (which is optimal) and runs in
time $poly(d/\epsilon) 2^O(1/\gamma^2)$. On the
negative side, we show that any constant factor approximate proper
learner has runtime $poly(d/\epsilon) 2^(1/\gamma)^2-o(1)$,
assuming the Exponential Time Hypothesis.
@article{diakonikolas2019nearly,
abstract = {We study the problem of {\em properly} learning large margin halfspaces in
the agnostic PAC model. In more detail, we study the complexity of properly
learning $d$-dimensional halfspaces on the unit ball within misclassification
error $\alpha \cdot \mathrm{OPT}_{\gamma} + \epsilon$, where
$\mathrm{OPT}_{\gamma}$ is the optimal $\gamma$-margin error rate and $\alpha
\geq 1$ is the approximation ratio. We give learning algorithms and
computational hardness results for this problem, for all values of the
approximation ratio $\alpha \geq 1$, that are nearly-matching for a range of
parameters. Specifically, for the natural setting that $\alpha$ is any constant
bigger than one, we provide an essentially tight complexity characterization.
On the positive side, we give an $\alpha = 1.01$-approximate proper learner
that uses $O(1/(\epsilon^2\gamma^2))$ samples (which is optimal) and runs in
time $\mathrm{poly}(d/\epsilon) \cdot 2^{\tilde{O}(1/\gamma^2)}$. On the
negative side, we show that {\em any} constant factor approximate proper
learner has runtime $\mathrm{poly}(d/\epsilon) \cdot 2^{(1/\gamma)^{2-o(1)}}$,
assuming the Exponential Time Hypothesis.},
added-at = {2020-02-26T13:35:44.000+0100},
author = {Diakonikolas, Ilias and Kane, Daniel M. and Manurangsi, Pasin},
biburl = {https://www.bibsonomy.org/bibtex/27ffd39c1c84be092db2294a3c54a0e7b/kirk86},
description = {[1908.11335] Nearly Tight Bounds for Robust Proper Learning of Halfspaces with a Margin},
interhash = {47753cbafcfb5e41422cfeb39b5e9f12},
intrahash = {7ffd39c1c84be092db2294a3c54a0e7b},
keywords = {bounds robustness stats},
note = {cite arxiv:1908.11335},
timestamp = {2020-02-26T13:36:12.000+0100},
title = {Nearly Tight Bounds for Robust Proper Learning of Halfspaces with a
Margin},
url = {http://arxiv.org/abs/1908.11335},
year = 2019
}