Y. Yang, I. Morillo, and T. Hospedales. (2018)cite arxiv:1806.06988Comment: presented at 2018 ICML Workshop on Human Interpretability in Machine Learning (WHI 2018), Stockholm, Sweden.
Abstract
Deep neural networks have been proven powerful at processing perceptual data,
such as images and audio. However for tabular data, tree-based models are more
popular. A nice property of tree-based models is their natural
interpretability. In this work, we present Deep Neural Decision Trees (DNDT) --
tree models realised by neural networks. A DNDT is intrinsically interpretable,
as it is a tree. Yet as it is also a neural network (NN), it can be easily
implemented in NN toolkits, and trained with gradient descent rather than
greedy splitting. We evaluate DNDT on several tabular datasets, verify its
efficacy, and investigate similarities and differences between DNDT and vanilla
decision trees. Interestingly, DNDT self-prunes at both split and
feature-level.
%0 Generic
%1 yang2018neural
%A Yang, Yongxin
%A Morillo, Irene Garcia
%A Hospedales, Timothy M.
%D 2018
%E ArXiv,
%K trees
%T Deep Neural Decision Trees
%U http://arxiv.org/abs/1806.06988
%X Deep neural networks have been proven powerful at processing perceptual data,
such as images and audio. However for tabular data, tree-based models are more
popular. A nice property of tree-based models is their natural
interpretability. In this work, we present Deep Neural Decision Trees (DNDT) --
tree models realised by neural networks. A DNDT is intrinsically interpretable,
as it is a tree. Yet as it is also a neural network (NN), it can be easily
implemented in NN toolkits, and trained with gradient descent rather than
greedy splitting. We evaluate DNDT on several tabular datasets, verify its
efficacy, and investigate similarities and differences between DNDT and vanilla
decision trees. Interestingly, DNDT self-prunes at both split and
feature-level.
@misc{yang2018neural,
abstract = {Deep neural networks have been proven powerful at processing perceptual data,
such as images and audio. However for tabular data, tree-based models are more
popular. A nice property of tree-based models is their natural
interpretability. In this work, we present Deep Neural Decision Trees (DNDT) --
tree models realised by neural networks. A DNDT is intrinsically interpretable,
as it is a tree. Yet as it is also a neural network (NN), it can be easily
implemented in NN toolkits, and trained with gradient descent rather than
greedy splitting. We evaluate DNDT on several tabular datasets, verify its
efficacy, and investigate similarities and differences between DNDT and vanilla
decision trees. Interestingly, DNDT self-prunes at both split and
feature-level.},
added-at = {2018-11-28T10:44:46.000+0100},
author = {Yang, Yongxin and Morillo, Irene Garcia and Hospedales, Timothy M.},
biburl = {https://www.bibsonomy.org/bibtex/2c9bbc228fb2aad3bb04f5bae1ea6de21/topel},
editor = {ArXiv},
interhash = {26961a29bd55ecac0ff279abe07fe8fa},
intrahash = {c9bbc228fb2aad3bb04f5bae1ea6de21},
keywords = {trees},
note = {cite arxiv:1806.06988Comment: presented at 2018 ICML Workshop on Human Interpretability in Machine Learning (WHI 2018), Stockholm, Sweden},
timestamp = {2018-11-28T10:44:46.000+0100},
title = {Deep Neural Decision Trees},
url = {http://arxiv.org/abs/1806.06988},
year = 2018
}