Graph embedding methods represent nodes in a continuous vector space, preserving different types of relational information from the graph. There are many hyperparameters to these methods (e.g. the length of a random walk) which have to be manually tuned for every graph. In this paper, we replace previously fixed hyperparameters with trainable ones that we automatically learn via backpropagation. In particular, we propose a novel attention model on the power series of the transition matrix, which guides the random walk to optimize an upstream objective. Unlike previous approaches to attention models, the method that we propose utilizes attention parameters exclusively on the data itself (e.g. on the random walk), and are not used by the model for inference. We experiment on link prediction tasks, as we aim to produce embeddings that best-preserve the graph structure, generalizing to unseen information. We improve state-of-the-art results on a comprehensive suite of real-world graph datasets including social, collaboration, and biological networks, where we observe that our graph attention model can reduce the error by up to 20\%-40\%. We show that our automatically-learned attention parameters can vary significantly per graph, and correspond to the optimal choice of hyperparameter if we manually tune existing methods.
El-Haija et al - Watch Your Step ~ Learning Node Embeddings via Graph Attention.pdf:C\:\\Users\\Admin\\Documents\\Research\\_Paperbase\\Graph Embeddings\\El-Haija et al - Watch Your Step ~ Learning Node Embeddings via Graph Attention.pdf:application/pdf
%0 Conference Paper
%1 abu-el-haija_watch_2018
%A Abu-El-Haija, Sami
%A Perozzi, Bryan
%A Al-Rfou, Rami
%A Alemi, Alexander A
%B Advances in Neural Information Processing Systems
%D 2018
%K Embedding_Algorithm Node_Embeddings
%P 9180--9190
%T Watch Your Step: Learning Node Embeddings via Graph Attention
%X Graph embedding methods represent nodes in a continuous vector space, preserving different types of relational information from the graph. There are many hyperparameters to these methods (e.g. the length of a random walk) which have to be manually tuned for every graph. In this paper, we replace previously fixed hyperparameters with trainable ones that we automatically learn via backpropagation. In particular, we propose a novel attention model on the power series of the transition matrix, which guides the random walk to optimize an upstream objective. Unlike previous approaches to attention models, the method that we propose utilizes attention parameters exclusively on the data itself (e.g. on the random walk), and are not used by the model for inference. We experiment on link prediction tasks, as we aim to produce embeddings that best-preserve the graph structure, generalizing to unseen information. We improve state-of-the-art results on a comprehensive suite of real-world graph datasets including social, collaboration, and biological networks, where we observe that our graph attention model can reduce the error by up to 20\%-40\%. We show that our automatically-learned attention parameters can vary significantly per graph, and correspond to the optimal choice of hyperparameter if we manually tune existing methods.
@inproceedings{abu-el-haija_watch_2018,
abstract = {Graph embedding methods represent nodes in a continuous vector space, preserving different types of relational information from the graph. There are many hyperparameters to these methods (e.g. the length of a random walk) which have to be manually tuned for every graph. In this paper, we replace previously fixed hyperparameters with trainable ones that we automatically learn via backpropagation. In particular, we propose a novel attention model on the power series of the transition matrix, which guides the random walk to optimize an upstream objective. Unlike previous approaches to attention models, the method that we propose utilizes attention parameters exclusively on the data itself (e.g. on the random walk), and are not used by the model for inference. We experiment on link prediction tasks, as we aim to produce embeddings that best-preserve the graph structure, generalizing to unseen information. We improve state-of-the-art results on a comprehensive suite of real-world graph datasets including social, collaboration, and biological networks, where we observe that our graph attention model can reduce the error by up to 20\%-40\%. We show that our automatically-learned attention parameters can vary significantly per graph, and correspond to the optimal choice of hyperparameter if we manually tune existing methods.},
added-at = {2020-02-21T16:09:44.000+0100},
author = {Abu-El-Haija, Sami and Perozzi, Bryan and Al-Rfou, Rami and Alemi, Alexander A},
biburl = {https://www.bibsonomy.org/bibtex/23b7b80bd7875abdc80c9aad1dc1b8791/tschumacher},
booktitle = {Advances in {Neural} {Information} {Processing} {Systems}},
file = {El-Haija et al - Watch Your Step ~ Learning Node Embeddings via Graph Attention.pdf:C\:\\Users\\Admin\\Documents\\Research\\_Paperbase\\Graph Embeddings\\El-Haija et al - Watch Your Step ~ Learning Node Embeddings via Graph Attention.pdf:application/pdf},
interhash = {d747024c22b2479009baf71d6c7258b4},
intrahash = {3b7b80bd7875abdc80c9aad1dc1b8791},
keywords = {Embedding_Algorithm Node_Embeddings},
language = {en},
pages = {9180--9190},
timestamp = {2020-02-21T16:09:44.000+0100},
title = {Watch {Your} {Step}: {Learning} {Node} {Embeddings} via {Graph} {Attention}},
year = 2018
}