In this paper we propose the use of continuous residual modules for graph
kernels in Graph Neural Networks. We show how both discrete and continuous
residual layers allow for more robust training, being that continuous residual
layers are those which are applied by integrating through an Ordinary
Differential Equation (ODE) solver to produce their output. We experimentally
show that these residuals achieve better results than the ones with
non-residual modules when multiple layers are used, mitigating the low-pass
filtering effect of GCN-based models. Finally, we apply and analyse the
behaviour of these techniques and give pointers to how this technique can be
useful in other domains by allowing more predictable behaviour under dynamic
times of computation.
Beschreibung
[1911.09554] Discrete and Continuous Deep Residual Learning Over Graphs
%0 Journal Article
%1 avelar2019discrete
%A Avelar, Pedro H. C.
%A Tavares, Anderson R.
%A Gori, Marco
%A Lamb, Luis C.
%D 2019
%K graphs optimization
%T Discrete and Continuous Deep Residual Learning Over Graphs
%U http://arxiv.org/abs/1911.09554
%X In this paper we propose the use of continuous residual modules for graph
kernels in Graph Neural Networks. We show how both discrete and continuous
residual layers allow for more robust training, being that continuous residual
layers are those which are applied by integrating through an Ordinary
Differential Equation (ODE) solver to produce their output. We experimentally
show that these residuals achieve better results than the ones with
non-residual modules when multiple layers are used, mitigating the low-pass
filtering effect of GCN-based models. Finally, we apply and analyse the
behaviour of these techniques and give pointers to how this technique can be
useful in other domains by allowing more predictable behaviour under dynamic
times of computation.
@article{avelar2019discrete,
abstract = {In this paper we propose the use of continuous residual modules for graph
kernels in Graph Neural Networks. We show how both discrete and continuous
residual layers allow for more robust training, being that continuous residual
layers are those which are applied by integrating through an Ordinary
Differential Equation (ODE) solver to produce their output. We experimentally
show that these residuals achieve better results than the ones with
non-residual modules when multiple layers are used, mitigating the low-pass
filtering effect of GCN-based models. Finally, we apply and analyse the
behaviour of these techniques and give pointers to how this technique can be
useful in other domains by allowing more predictable behaviour under dynamic
times of computation.},
added-at = {2020-01-13T20:10:38.000+0100},
author = {Avelar, Pedro H. C. and Tavares, Anderson R. and Gori, Marco and Lamb, Luis C.},
biburl = {https://www.bibsonomy.org/bibtex/29f0fb83def5bcc4dec0f0feca3bc191c/kirk86},
description = {[1911.09554] Discrete and Continuous Deep Residual Learning Over Graphs},
interhash = {750d4674f3f9b0e479cfdc90795dd88c},
intrahash = {9f0fb83def5bcc4dec0f0feca3bc191c},
keywords = {graphs optimization},
note = {cite arxiv:1911.09554},
timestamp = {2020-01-13T20:10:38.000+0100},
title = {Discrete and Continuous Deep Residual Learning Over Graphs},
url = {http://arxiv.org/abs/1911.09554},
year = 2019
}