Point clouds have emerged as a popular representation of 3D visual data. With a set of unordered 3D points, one typically needs to transform them into latent representation before further classification and segmentation tasks. However, one cannot easily interpret such encoded latent representation. To address this issue, we propose a unique deep learning framework for disentangling body-type and pose information from 3D point clouds. Extending from autoencoder, we advance adversarial learning a selected feature type, while classification and data recovery can be additionally observed. Our experiments confirm that our model can be successfully applied to perform a wide range of 3D applications like shape synthesis, action translation, shape/action interpolation, and synchronization.
Description
Learning Interpretable Representation for 3D Point Clouds | IEEE Conference Publication | IEEE Xplore
%0 Conference Paper
%1 9412440
%A Su, Feng-Guang
%A Lin, Ci-Siang
%A Wang, Yu-Chiang Frank
%B 2020 25th International Conference on Pattern Recognition (ICPR)
%D 2021
%K 2021 3D journal point-cloud tpami
%P 7470-7477
%R 10.1109/ICPR48806.2021.9412440
%T Learning Interpretable Representation for 3D Point Clouds
%U https://ieeexplore.ieee.org/document/9412440
%X Point clouds have emerged as a popular representation of 3D visual data. With a set of unordered 3D points, one typically needs to transform them into latent representation before further classification and segmentation tasks. However, one cannot easily interpret such encoded latent representation. To address this issue, we propose a unique deep learning framework for disentangling body-type and pose information from 3D point clouds. Extending from autoencoder, we advance adversarial learning a selected feature type, while classification and data recovery can be additionally observed. Our experiments confirm that our model can be successfully applied to perform a wide range of 3D applications like shape synthesis, action translation, shape/action interpolation, and synchronization.
@inproceedings{9412440,
abstract = {Point clouds have emerged as a popular representation of 3D visual data. With a set of unordered 3D points, one typically needs to transform them into latent representation before further classification and segmentation tasks. However, one cannot easily interpret such encoded latent representation. To address this issue, we propose a unique deep learning framework for disentangling body-type and pose information from 3D point clouds. Extending from autoencoder, we advance adversarial learning a selected feature type, while classification and data recovery can be additionally observed. Our experiments confirm that our model can be successfully applied to perform a wide range of 3D applications like shape synthesis, action translation, shape/action interpolation, and synchronization.},
added-at = {2021-06-02T09:20:09.000+0200},
author = {Su, Feng-Guang and Lin, Ci-Siang and Wang, Yu-Chiang Frank},
biburl = {https://www.bibsonomy.org/bibtex/264402cff23345254fdea4346f3c2bcce/analyst},
booktitle = {2020 25th International Conference on Pattern Recognition (ICPR)},
description = {Learning Interpretable Representation for 3D Point Clouds | IEEE Conference Publication | IEEE Xplore},
doi = {10.1109/ICPR48806.2021.9412440},
interhash = {157d0cd13e25578cadb8324866f7f798},
intrahash = {64402cff23345254fdea4346f3c2bcce},
issn = {1051-4651},
keywords = {2021 3D journal point-cloud tpami},
month = jan,
pages = {7470-7477},
timestamp = {2021-06-02T09:20:09.000+0200},
title = {Learning Interpretable Representation for 3D Point Clouds},
url = {https://ieeexplore.ieee.org/document/9412440},
year = 2021
}