In this paper we propose a novel gradient algorithm to learn a policy from an expert's observed behavior assuming that the expert behaves optimally with respect to some unknown reward function of a Markovian Decision Problem. The algorithm's aim is to find a reward function such that the resulting optimal policy matches well the expert's observed behavior. The main difficulty is that the mapping from the parameters to policies is both nonsmooth and highly redundant. Resorting to subdifferentials solves the first difficulty, while the second one is overcome by computing natural gradients. We tested the proposed method in two artificial domains and found it to be more reliable and efficient than some previous methods.
%0 Conference Paper
%1 neu2007
%A Neu, G.
%A Szepesvári, Cs.
%B UAI
%D 2007
%K application, apprenticeship gradient, inverse learning learning, natural reinforcement theory,
%P 295--302
%T Apprenticeship Learning using Inverse Reinforcement Learning and Gradient Methods
%X In this paper we propose a novel gradient algorithm to learn a policy from an expert's observed behavior assuming that the expert behaves optimally with respect to some unknown reward function of a Markovian Decision Problem. The algorithm's aim is to find a reward function such that the resulting optimal policy matches well the expert's observed behavior. The main difficulty is that the mapping from the parameters to policies is both nonsmooth and highly redundant. Resorting to subdifferentials solves the first difficulty, while the second one is overcome by computing natural gradients. We tested the proposed method in two artificial domains and found it to be more reliable and efficient than some previous methods.
@inproceedings{neu2007,
abstract = {In this paper we propose a novel gradient algorithm to learn a policy from an expert's observed behavior assuming that the expert behaves optimally with respect to some unknown reward function of a Markovian Decision Problem. The algorithm's aim is to find a reward function such that the resulting optimal policy matches well the expert's observed behavior. The main difficulty is that the mapping from the parameters to policies is both nonsmooth and highly redundant. Resorting to subdifferentials solves the first difficulty, while the second one is overcome by computing natural gradients. We tested the proposed method in two artificial domains and found it to be more reliable and efficient than some previous methods.},
added-at = {2020-03-17T03:03:01.000+0100},
author = {Neu, G. and Szepesv{\'a}ri, {Cs}.},
biburl = {https://www.bibsonomy.org/bibtex/279331c945157bee61401f4e973208916/csaba},
booktitle = {UAI},
date-added = {2010-08-28 17:38:14 -0600},
date-modified = {2010-11-25 00:54:55 -0700},
interhash = {1c2d225dae45d7f08af67131d35d4e74},
intrahash = {79331c945157bee61401f4e973208916},
keywords = {application, apprenticeship gradient, inverse learning learning, natural reinforcement theory,},
pages = {295--302},
pdf = {papers/uai2007-irl.pdf},
timestamp = {2020-03-17T03:03:01.000+0100},
title = {Apprenticeship Learning using Inverse Reinforcement Learning and Gradient Methods},
year = 2007
}