We present a new anytime algorithm that achieves near-optimal regret for any instance of finite stochastic partial monitoring. In particular, the new algorithm achieves the minimax regret, within logarithmic factors, for both "easy" and "hard" problems. For easy problems, it additionally achieves logarithmic individual regret. Most importantly, the algorithm is adaptive in the sense that if the opponent strategy is in an "easy region" of the strategy space then the regret grows as if the problem was easy. As an implication, we show that under some reasonable additional assumptions, the algorithm enjoys an O(T^1/2) regret in Dynamic Pricing, proven to be hard by Bartok et al. (2011).
%0 Conference Paper
%1 BarZolSze1206
%A Bartók, G.
%A Zolghadr, N.
%A Szepesvári, Cs.
%B ICML
%D 2012
%K bounds, information, learning, minimax monitoring, online partial stochastic theory
%P 1--20
%T An adaptive algorithm for finite stochastic partial monitoring (extended version)
%X We present a new anytime algorithm that achieves near-optimal regret for any instance of finite stochastic partial monitoring. In particular, the new algorithm achieves the minimax regret, within logarithmic factors, for both "easy" and "hard" problems. For easy problems, it additionally achieves logarithmic individual regret. Most importantly, the algorithm is adaptive in the sense that if the opponent strategy is in an "easy region" of the strategy space then the regret grows as if the problem was easy. As an implication, we show that under some reasonable additional assumptions, the algorithm enjoys an O(T^1/2) regret in Dynamic Pricing, proven to be hard by Bartok et al. (2011).
@inproceedings{BarZolSze1206,
abstract = {We present a new anytime algorithm that achieves near-optimal regret for any instance of finite stochastic partial monitoring. In particular, the new algorithm achieves the minimax regret, within logarithmic factors, for both "easy" and "hard" problems. For easy problems, it additionally achieves logarithmic individual regret. Most importantly, the algorithm is adaptive in the sense that if the opponent strategy is in an "easy region" of the strategy space then the regret grows as if the problem was easy. As an implication, we show that under some reasonable additional assumptions, the algorithm enjoys an O(T^{1/2}) regret in Dynamic Pricing, proven to be hard by Bartok et al. (2011).},
added-at = {2020-03-17T03:03:01.000+0100},
author = {Bart{\'o}k, G. and Zolghadr, N. and Szepesv{\'a}ri, {Cs}.},
biburl = {https://www.bibsonomy.org/bibtex/25de629aa3897acce68f12944fb74bbff/csaba},
booktitle = {ICML},
date-added = {2012-06-03 14:44:57 -0600},
date-modified = {2012-06-06 21:33:10 -0600},
interhash = {fdab6db36e1a776539eddf572164f539},
intrahash = {5de629aa3897acce68f12944fb74bbff},
keywords = {bounds, information, learning, minimax monitoring, online partial stochastic theory},
month = {June},
pages = {1--20},
pdf = {papers/adaptive_partmon_full.pdf},
timestamp = {2020-03-17T03:03:01.000+0100},
title = {An adaptive algorithm for finite stochastic partial monitoring (extended version)},
year = 2012
}