The advance of node pooling operations in Graph Neural Networks (GNNs) has
lagged behind the feverish design of new message-passing techniques, and
pooling remains an important and challenging endeavor for the design of deep
architectures. In this paper, we propose a pooling operation for GNNs that
leverages a differentiable unsupervised loss based on the mincut optimization
objective. For each node, our method learns a soft cluster assignment vector
that depends on the node features, the target inference task (e.g., graph
classification), and, thanks to the mincut objective, also on the graph
connectivity. Graph pooling is obtained by applying the matrix of assignment
vectors to the adjacency matrix and the node features. We validate the
effectiveness of the proposed pooling method on a variety of supervised and
unsupervised tasks.
Description
[1907.00481] Mincut pooling in Graph Neural Networks
%0 Journal Article
%1 bianchi2019mincut
%A Bianchi, Filippo Maria
%A Grattarola, Daniele
%A Alippi, Cesare
%D 2019
%K deep-learning graphs readings
%T Mincut pooling in Graph Neural Networks
%U http://arxiv.org/abs/1907.00481
%X The advance of node pooling operations in Graph Neural Networks (GNNs) has
lagged behind the feverish design of new message-passing techniques, and
pooling remains an important and challenging endeavor for the design of deep
architectures. In this paper, we propose a pooling operation for GNNs that
leverages a differentiable unsupervised loss based on the mincut optimization
objective. For each node, our method learns a soft cluster assignment vector
that depends on the node features, the target inference task (e.g., graph
classification), and, thanks to the mincut objective, also on the graph
connectivity. Graph pooling is obtained by applying the matrix of assignment
vectors to the adjacency matrix and the node features. We validate the
effectiveness of the proposed pooling method on a variety of supervised and
unsupervised tasks.
@article{bianchi2019mincut,
abstract = {The advance of node pooling operations in Graph Neural Networks (GNNs) has
lagged behind the feverish design of new message-passing techniques, and
pooling remains an important and challenging endeavor for the design of deep
architectures. In this paper, we propose a pooling operation for GNNs that
leverages a differentiable unsupervised loss based on the mincut optimization
objective. For each node, our method learns a soft cluster assignment vector
that depends on the node features, the target inference task (e.g., graph
classification), and, thanks to the mincut objective, also on the graph
connectivity. Graph pooling is obtained by applying the matrix of assignment
vectors to the adjacency matrix and the node features. We validate the
effectiveness of the proposed pooling method on a variety of supervised and
unsupervised tasks.},
added-at = {2019-12-12T19:33:30.000+0100},
author = {Bianchi, Filippo Maria and Grattarola, Daniele and Alippi, Cesare},
biburl = {https://www.bibsonomy.org/bibtex/2412bfbff89dcdef479d6e3f8293c5b69/kirk86},
description = {[1907.00481] Mincut pooling in Graph Neural Networks},
interhash = {1b16a7a4dbe5f1c4b342a48158f707a5},
intrahash = {412bfbff89dcdef479d6e3f8293c5b69},
keywords = {deep-learning graphs readings},
note = {cite arxiv:1907.00481},
timestamp = {2019-12-12T19:33:30.000+0100},
title = {Mincut pooling in Graph Neural Networks},
url = {http://arxiv.org/abs/1907.00481},
year = 2019
}