Point cloud data from 3D LiDAR sensors are one of the most crucial sensor modalities for versatile safety-critical applications such as self-driving vehicles. Since the annotations of point cloud data is an expensive and time-consuming process, therefore recently the utilisation of simulated environments and 3D LiDAR sensors for this task started to get some popularity. However, the generated synthetic point cloud data are still missing the artefacts usually exist in point cloud data from real 3D LiDAR sensors. Thus, in this work, we are proposing a domain adaptation framework for bridging this gap between synthetic and real point cloud data. Our proposed framework is based on the deep cycle-consistent generative adversarial networks (CycleGAN) architecture. We have evaluated the performance of our proposed framework on the task of vehicle detection from a bird's eye view (BEV) point cloud images coming from real 3D LiDAR sensors. The framework has shown competitive results with an improvement of more than 7% in average precision score over other baseline approaches when tested on real BEV point cloud images.
Description
Domain Adaptation for Vehicle Detection from Bird's Eye View LiDAR Point Cloud Data - IEEE Conference Publication
%0 Conference Paper
%1 9022327
%A Saleh, K.
%A Abobakr, A.
%A Attia, M.
%A Iskander, J.
%A Nahavandi, D.
%A Hossny, M.
%A Nahvandi, S.
%B 2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)
%D 2019
%K GAN convnets dnn lidar vehicle
%P 3235-3242
%R 10.1109/ICCVW.2019.00404
%T Domain Adaptation for Vehicle Detection from Bird's Eye View LiDAR Point Cloud Data
%U https://ieeexplore.ieee.org/document/9022327
%X Point cloud data from 3D LiDAR sensors are one of the most crucial sensor modalities for versatile safety-critical applications such as self-driving vehicles. Since the annotations of point cloud data is an expensive and time-consuming process, therefore recently the utilisation of simulated environments and 3D LiDAR sensors for this task started to get some popularity. However, the generated synthetic point cloud data are still missing the artefacts usually exist in point cloud data from real 3D LiDAR sensors. Thus, in this work, we are proposing a domain adaptation framework for bridging this gap between synthetic and real point cloud data. Our proposed framework is based on the deep cycle-consistent generative adversarial networks (CycleGAN) architecture. We have evaluated the performance of our proposed framework on the task of vehicle detection from a bird's eye view (BEV) point cloud images coming from real 3D LiDAR sensors. The framework has shown competitive results with an improvement of more than 7% in average precision score over other baseline approaches when tested on real BEV point cloud images.
@inproceedings{9022327,
abstract = {Point cloud data from 3D LiDAR sensors are one of the most crucial sensor modalities for versatile safety-critical applications such as self-driving vehicles. Since the annotations of point cloud data is an expensive and time-consuming process, therefore recently the utilisation of simulated environments and 3D LiDAR sensors for this task started to get some popularity. However, the generated synthetic point cloud data are still missing the artefacts usually exist in point cloud data from real 3D LiDAR sensors. Thus, in this work, we are proposing a domain adaptation framework for bridging this gap between synthetic and real point cloud data. Our proposed framework is based on the deep cycle-consistent generative adversarial networks (CycleGAN) architecture. We have evaluated the performance of our proposed framework on the task of vehicle detection from a bird's eye view (BEV) point cloud images coming from real 3D LiDAR sensors. The framework has shown competitive results with an improvement of more than 7% in average precision score over other baseline approaches when tested on real BEV point cloud images.},
added-at = {2020-10-16T17:20:28.000+0200},
author = {{Saleh}, K. and {Abobakr}, A. and {Attia}, M. and {Iskander}, J. and {Nahavandi}, D. and {Hossny}, M. and {Nahvandi}, S.},
biburl = {https://www.bibsonomy.org/bibtex/24e84157ced9809cb709bc3ecc7a0d1d6/sohnki},
booktitle = {2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)},
description = {Domain Adaptation for Vehicle Detection from Bird's Eye View LiDAR Point Cloud Data - IEEE Conference Publication},
doi = {10.1109/ICCVW.2019.00404},
interhash = {0d5b621c8823f2e8208c6f5766a5d22b},
intrahash = {4e84157ced9809cb709bc3ecc7a0d1d6},
issn = {2473-9944},
keywords = {GAN convnets dnn lidar vehicle},
month = oct,
pages = {3235-3242},
timestamp = {2020-10-16T17:20:28.000+0200},
title = {Domain Adaptation for Vehicle Detection from Bird's Eye View LiDAR Point Cloud Data},
url = {https://ieeexplore.ieee.org/document/9022327},
year = 2019
}