We describe a data-driven method for inferring the camera viewpoints given
multiple images of an arbitrary object. This task is a core component of
classic geometric pipelines such as SfM and SLAM, and also serves as a vital
pre-processing requirement for contemporary neural approaches (e.g. NeRF) to
object reconstruction and view synthesis. In contrast to existing
correspondence-driven methods that do not perform well given sparse views, we
propose a top-down prediction based approach for estimating camera viewpoints.
Our key technical insight is the use of an energy-based formulation for
representing distributions over relative camera rotations, thus allowing us to
explicitly represent multiple camera modes arising from object symmetries or
views. Leveraging these relative predictions, we jointly estimate a consistent
set of camera rotations from multiple images. We show that our approach
outperforms state-of-the-art SfM and SLAM methods given sparse images on both
seen and unseen categories. Further, our probabilistic approach significantly
outperforms directly regressing relative poses, suggesting that modeling
multimodality is important for coherent joint reconstruction. We demonstrate
that our system can be a stepping stone toward in-the-wild reconstruction from
multi-view datasets. The project page with code and videos can be found at
https://jasonyzhang.com/relpose.
Description
RelPose: Predicting Probabilistic Relative Rotation for Single Objects in the Wild
%0 Generic
%1 zhang2022relpose
%A Zhang, Jason Y.
%A Ramanan, Deva
%A Tulsiani, Shubham
%D 2022
%K pose-estimation
%T RelPose: Predicting Probabilistic Relative Rotation for Single Objects
in the Wild
%U http://arxiv.org/abs/2208.05963
%X We describe a data-driven method for inferring the camera viewpoints given
multiple images of an arbitrary object. This task is a core component of
classic geometric pipelines such as SfM and SLAM, and also serves as a vital
pre-processing requirement for contemporary neural approaches (e.g. NeRF) to
object reconstruction and view synthesis. In contrast to existing
correspondence-driven methods that do not perform well given sparse views, we
propose a top-down prediction based approach for estimating camera viewpoints.
Our key technical insight is the use of an energy-based formulation for
representing distributions over relative camera rotations, thus allowing us to
explicitly represent multiple camera modes arising from object symmetries or
views. Leveraging these relative predictions, we jointly estimate a consistent
set of camera rotations from multiple images. We show that our approach
outperforms state-of-the-art SfM and SLAM methods given sparse images on both
seen and unseen categories. Further, our probabilistic approach significantly
outperforms directly regressing relative poses, suggesting that modeling
multimodality is important for coherent joint reconstruction. We demonstrate
that our system can be a stepping stone toward in-the-wild reconstruction from
multi-view datasets. The project page with code and videos can be found at
https://jasonyzhang.com/relpose.
@misc{zhang2022relpose,
abstract = {We describe a data-driven method for inferring the camera viewpoints given
multiple images of an arbitrary object. This task is a core component of
classic geometric pipelines such as SfM and SLAM, and also serves as a vital
pre-processing requirement for contemporary neural approaches (e.g. NeRF) to
object reconstruction and view synthesis. In contrast to existing
correspondence-driven methods that do not perform well given sparse views, we
propose a top-down prediction based approach for estimating camera viewpoints.
Our key technical insight is the use of an energy-based formulation for
representing distributions over relative camera rotations, thus allowing us to
explicitly represent multiple camera modes arising from object symmetries or
views. Leveraging these relative predictions, we jointly estimate a consistent
set of camera rotations from multiple images. We show that our approach
outperforms state-of-the-art SfM and SLAM methods given sparse images on both
seen and unseen categories. Further, our probabilistic approach significantly
outperforms directly regressing relative poses, suggesting that modeling
multimodality is important for coherent joint reconstruction. We demonstrate
that our system can be a stepping stone toward in-the-wild reconstruction from
multi-view datasets. The project page with code and videos can be found at
https://jasonyzhang.com/relpose.},
added-at = {2022-08-12T19:54:06.000+0200},
author = {Zhang, Jason Y. and Ramanan, Deva and Tulsiani, Shubham},
biburl = {https://www.bibsonomy.org/bibtex/2b92a4eba2223e3800ac87df2a3759085/redtedtezza},
description = {RelPose: Predicting Probabilistic Relative Rotation for Single Objects in the Wild},
interhash = {895da1c3a7ddacbd27e3c1d7eae98c87},
intrahash = {b92a4eba2223e3800ac87df2a3759085},
keywords = {pose-estimation},
note = {cite arxiv:2208.05963Comment: In ECCV 2022},
timestamp = {2022-08-12T19:54:06.000+0200},
title = {RelPose: Predicting Probabilistic Relative Rotation for Single Objects
in the Wild},
url = {http://arxiv.org/abs/2208.05963},
year = 2022
}