Distillation (Hinton et al., 2015) and privileged information (Vapnik &
Izmailov, 2015) are two techniques that enable machines to learn from other
machines. This paper unifies these two techniques into generalized
distillation, a framework to learn from multiple machines and data
representations. We provide theoretical and causal insight about the inner
workings of generalized distillation, extend it to unsupervised, semisupervised
and multitask learning scenarios, and illustrate its efficacy on a variety of
numerical simulations on both synthetic and real-world data.
%0 Generic
%1 lopezpaz2015unifying
%A Lopez-Paz, David
%A Bottou, Léon
%A Schölkopf, Bernhard
%A Vapnik, Vladimir
%D 2015
%K ml proposal tau
%T Unifying distillation and privileged information
%U http://arxiv.org/abs/1511.03643
%X Distillation (Hinton et al., 2015) and privileged information (Vapnik &
Izmailov, 2015) are two techniques that enable machines to learn from other
machines. This paper unifies these two techniques into generalized
distillation, a framework to learn from multiple machines and data
representations. We provide theoretical and causal insight about the inner
workings of generalized distillation, extend it to unsupervised, semisupervised
and multitask learning scenarios, and illustrate its efficacy on a variety of
numerical simulations on both synthetic and real-world data.
@misc{lopezpaz2015unifying,
abstract = {Distillation (Hinton et al., 2015) and privileged information (Vapnik &
Izmailov, 2015) are two techniques that enable machines to learn from other
machines. This paper unifies these two techniques into generalized
distillation, a framework to learn from multiple machines and data
representations. We provide theoretical and causal insight about the inner
workings of generalized distillation, extend it to unsupervised, semisupervised
and multitask learning scenarios, and illustrate its efficacy on a variety of
numerical simulations on both synthetic and real-world data.},
added-at = {2016-11-28T13:52:59.000+0100},
author = {Lopez-Paz, David and Bottou, Léon and Schölkopf, Bernhard and Vapnik, Vladimir},
biburl = {https://www.bibsonomy.org/bibtex/27a125115ffdd16970fdc413489896786/machinelearning},
interhash = {e2b4ea62e07602da1157c5238a24494c},
intrahash = {7a125115ffdd16970fdc413489896786},
keywords = {ml proposal tau},
note = {cite arxiv:1511.03643},
timestamp = {2016-11-28T13:52:59.000+0100},
title = {Unifying distillation and privileged information},
url = {http://arxiv.org/abs/1511.03643},
year = 2015
}