Immersive novel view generation is an important
technology in the field of graphics and has recently
also received attention for operator-based
human-robot interaction. However, the involved
training is time-consuming, and thus the current
test scope is majorly on object capturing. This
limits the usage of related models in the robotics
community for 3D reconstruction since robots (1)
usually only capture a very small range of view
directions to surfaces that cause arbitrary
predictions on unseen, novel direction, (2) requires
real-time algorithms, and (3) work with growing
scenes, e.g., in robotic exploration. The letter
proposes a novel Neural Surface Light Fields model
that copes with the small range of view directions
while producing a good result in unseen
directions. Exploiting recent encoding techniques,
the training of our model is highly efficient. In
addition, we design Multiple Asynchronous Neural
Agents (MANA), a universal framework to learn each
small region in parallel for large-scale growing
scenes. Our model learns online the Neural Surface
Light Fields (NSLF) aside from real-time 3D
reconstruction with a sequential data stream as the
shared input. In addition to online training, our
model also provides real-time rendering after
completing the data stream for visualization. We
implement experiments using well-known RGBD indoor
datasets, showing the high flexibility to embed our
model into real-time 3D reconstruction and
demonstrating high-fidelity view synthesis for these
scenes.
%0 Journal Article
%1 RAL2023
%A Yuan, Y.
%A Nüchter, A.
%D 2023
%J IEEE Robotics and Automation Letters (RAL)
%K author imported myown
%N 6
%P 3843--3850
%R 10.1109/LRA.2023.3273516
%T Online Learning of Neural Surface Light Fields Alongside Real-Time Incremental 3D Reconstruction
%U https://robotik.informatik.uni-wuerzburg.de/telematics/download/ral2023.pdf
%V 8
%X Immersive novel view generation is an important
technology in the field of graphics and has recently
also received attention for operator-based
human-robot interaction. However, the involved
training is time-consuming, and thus the current
test scope is majorly on object capturing. This
limits the usage of related models in the robotics
community for 3D reconstruction since robots (1)
usually only capture a very small range of view
directions to surfaces that cause arbitrary
predictions on unseen, novel direction, (2) requires
real-time algorithms, and (3) work with growing
scenes, e.g., in robotic exploration. The letter
proposes a novel Neural Surface Light Fields model
that copes with the small range of view directions
while producing a good result in unseen
directions. Exploiting recent encoding techniques,
the training of our model is highly efficient. In
addition, we design Multiple Asynchronous Neural
Agents (MANA), a universal framework to learn each
small region in parallel for large-scale growing
scenes. Our model learns online the Neural Surface
Light Fields (NSLF) aside from real-time 3D
reconstruction with a sequential data stream as the
shared input. In addition to online training, our
model also provides real-time rendering after
completing the data stream for visualization. We
implement experiments using well-known RGBD indoor
datasets, showing the high flexibility to embed our
model into real-time 3D reconstruction and
demonstrating high-fidelity view synthesis for these
scenes.
@article{RAL2023,
abstract = {Immersive novel view generation is an important
technology in the field of graphics and has recently
also received attention for operator-based
human-robot interaction. However, the involved
training is time-consuming, and thus the current
test scope is majorly on object capturing. This
limits the usage of related models in the robotics
community for 3D reconstruction since robots (1)
usually only capture a very small range of view
directions to surfaces that cause arbitrary
predictions on unseen, novel direction, (2) requires
real-time algorithms, and (3) work with growing
scenes, e.g., in robotic exploration. The letter
proposes a novel Neural Surface Light Fields model
that copes with the small range of view directions
while producing a good result in unseen
directions. Exploiting recent encoding techniques,
the training of our model is highly efficient. In
addition, we design Multiple Asynchronous Neural
Agents (MANA), a universal framework to learn each
small region in parallel for large-scale growing
scenes. Our model learns online the Neural Surface
Light Fields (NSLF) aside from real-time 3D
reconstruction with a sequential data stream as the
shared input. In addition to online training, our
model also provides real-time rendering after
completing the data stream for visualization. We
implement experiments using well-known RGBD indoor
datasets, showing the high flexibility to embed our
model into real-time 3D reconstruction and
demonstrating high-fidelity view synthesis for these
scenes.},
added-at = {2023-05-17T09:19:58.000+0200},
author = {Yuan, Y. and N{\"u}chter, A.},
biburl = {https://www.bibsonomy.org/bibtex/2b304a630be50735cbd507e4eb7abf894/nuechter76},
doi = {10.1109/LRA.2023.3273516},
interhash = {37b08ddb05ff787d6b28ba819a7d19c5},
intrahash = {b304a630be50735cbd507e4eb7abf894},
journal = {IEEE Robotics and Automation Letters (RAL)},
keywords = {author imported myown},
note = {https://jarrome.github.io/NSLF-OL},
number = 6,
pages = {3843--3850},
timestamp = {2024-07-30T17:27:42.000+0200},
title = {{Online Learning of Neural Surface Light Fields Alongside Real-Time Incremental 3D Reconstruction}},
url = {https://robotik.informatik.uni-wuerzburg.de/telematics/download/ral2023.pdf},
volume = 8,
year = 2023
}