Three-dimensional geometric data offer an excellent domain for studying
representation learning and generative modeling. In this paper, we look at
geometric data represented as point clouds. We introduce a deep AutoEncoder
(AE) network with state-of-the-art reconstruction quality and generalization
ability. The learned representations outperform existing methods on 3D
recognition tasks and enable shape editing via simple algebraic manipulations,
such as semantic part editing, shape analogies and shape interpolation, as well
as shape completion. We perform a thorough study of different generative models
including GANs operating on the raw point clouds, significantly improved GANs
trained in the fixed latent space of our AEs, and Gaussian Mixture Models
(GMMs). To quantitatively evaluate generative models we introduce measures of
sample fidelity and diversity based on matchings between sets of point clouds.
Interestingly, our evaluation of generalization, fidelity and diversity reveals
that GMMs trained in the latent space of our AEs yield the best results
overall.
Description
[1707.02392] Learning Representations and Generative Models for 3D Point Clouds
%0 Generic
%1 achlioptas2017learning
%A Achlioptas, Panos
%A Diamanti, Olga
%A Mitliagkas, Ioannis
%A Guibas, Leonidas
%D 2017
%K 2017 arxiv deep-learning generative paper point-cloud stanford
%T Learning Representations and Generative Models for 3D Point Clouds
%U http://arxiv.org/abs/1707.02392
%X Three-dimensional geometric data offer an excellent domain for studying
representation learning and generative modeling. In this paper, we look at
geometric data represented as point clouds. We introduce a deep AutoEncoder
(AE) network with state-of-the-art reconstruction quality and generalization
ability. The learned representations outperform existing methods on 3D
recognition tasks and enable shape editing via simple algebraic manipulations,
such as semantic part editing, shape analogies and shape interpolation, as well
as shape completion. We perform a thorough study of different generative models
including GANs operating on the raw point clouds, significantly improved GANs
trained in the fixed latent space of our AEs, and Gaussian Mixture Models
(GMMs). To quantitatively evaluate generative models we introduce measures of
sample fidelity and diversity based on matchings between sets of point clouds.
Interestingly, our evaluation of generalization, fidelity and diversity reveals
that GMMs trained in the latent space of our AEs yield the best results
overall.
@misc{achlioptas2017learning,
abstract = {Three-dimensional geometric data offer an excellent domain for studying
representation learning and generative modeling. In this paper, we look at
geometric data represented as point clouds. We introduce a deep AutoEncoder
(AE) network with state-of-the-art reconstruction quality and generalization
ability. The learned representations outperform existing methods on 3D
recognition tasks and enable shape editing via simple algebraic manipulations,
such as semantic part editing, shape analogies and shape interpolation, as well
as shape completion. We perform a thorough study of different generative models
including GANs operating on the raw point clouds, significantly improved GANs
trained in the fixed latent space of our AEs, and Gaussian Mixture Models
(GMMs). To quantitatively evaluate generative models we introduce measures of
sample fidelity and diversity based on matchings between sets of point clouds.
Interestingly, our evaluation of generalization, fidelity and diversity reveals
that GMMs trained in the latent space of our AEs yield the best results
overall.},
added-at = {2018-07-20T10:08:31.000+0200},
author = {Achlioptas, Panos and Diamanti, Olga and Mitliagkas, Ioannis and Guibas, Leonidas},
biburl = {https://www.bibsonomy.org/bibtex/21bebf25afcd6bff423dd2a3c84b53102/analyst},
description = {[1707.02392] Learning Representations and Generative Models for 3D Point Clouds},
interhash = {ca25745159fba69b3912a7fedf349c6f},
intrahash = {1bebf25afcd6bff423dd2a3c84b53102},
keywords = {2017 arxiv deep-learning generative paper point-cloud stanford},
note = {cite arxiv:1707.02392},
timestamp = {2018-07-20T10:08:31.000+0200},
title = {Learning Representations and Generative Models for 3D Point Clouds},
url = {http://arxiv.org/abs/1707.02392},
year = 2017
}