Virtual-reality (VR) and augmented-reality (AR) technology is increasingly
combined with eye-tracking. This combination broadens both fields and opens up
new areas of application, in which visual perception and related cognitive
processes can be studied in interactive but still well controlled settings.
However, performing a semantic gaze analysis of eye-tracking data from
interactive three-dimensional scenes is a resource-intense task, which so far
has been an obstacle to economic use. In this paper we present a novel approach
which minimizes time and information necessary to annotate volumes of interest
(VOIs) by using techniques from object recognition. To do so, we train
convolutional neural networks (CNNs) on synthetic data sets derived from
virtual models using image augmentation techniques. We evaluate our method in
real and virtual environments, showing that the method can compete with
state-of-the-art approaches, while not relying on additional markers or
preexisting databases but instead offering cross-platform use.
%0 Generic
%1 stubbemann2021neural
%A Stubbemann, Lena
%A Dürrschnabel, Dominik
%A Refflinghaus, Robert
%D 2021
%K gaze_analysis itegpub myown neural_networks xr_setting
%R 10.1145/3448017.3457380
%T Neural Networks for Semantic Gaze Analysis in XR Settings
%U http://arxiv.org/abs/2103.10451
%X Virtual-reality (VR) and augmented-reality (AR) technology is increasingly
combined with eye-tracking. This combination broadens both fields and opens up
new areas of application, in which visual perception and related cognitive
processes can be studied in interactive but still well controlled settings.
However, performing a semantic gaze analysis of eye-tracking data from
interactive three-dimensional scenes is a resource-intense task, which so far
has been an obstacle to economic use. In this paper we present a novel approach
which minimizes time and information necessary to annotate volumes of interest
(VOIs) by using techniques from object recognition. To do so, we train
convolutional neural networks (CNNs) on synthetic data sets derived from
virtual models using image augmentation techniques. We evaluate our method in
real and virtual environments, showing that the method can compete with
state-of-the-art approaches, while not relying on additional markers or
preexisting databases but instead offering cross-platform use.
@misc{stubbemann2021neural,
abstract = {Virtual-reality (VR) and augmented-reality (AR) technology is increasingly
combined with eye-tracking. This combination broadens both fields and opens up
new areas of application, in which visual perception and related cognitive
processes can be studied in interactive but still well controlled settings.
However, performing a semantic gaze analysis of eye-tracking data from
interactive three-dimensional scenes is a resource-intense task, which so far
has been an obstacle to economic use. In this paper we present a novel approach
which minimizes time and information necessary to annotate volumes of interest
(VOIs) by using techniques from object recognition. To do so, we train
convolutional neural networks (CNNs) on synthetic data sets derived from
virtual models using image augmentation techniques. We evaluate our method in
real and virtual environments, showing that the method can compete with
state-of-the-art approaches, while not relying on additional markers or
preexisting databases but instead offering cross-platform use.},
added-at = {2021-03-30T10:49:48.000+0200},
author = {Stubbemann, Lena and Dürrschnabel, Dominik and Refflinghaus, Robert},
biburl = {https://www.bibsonomy.org/bibtex/2b4f299005ada44545ec783c5655edb8e/duerrschnabel},
doi = {10.1145/3448017.3457380},
interhash = {f4fa21e44a84cc3dc2f3405314c3298c},
intrahash = {b4f299005ada44545ec783c5655edb8e},
keywords = {gaze_analysis itegpub myown neural_networks xr_setting},
note = {cite arxiv:2103.10451Comment: 16 pages, 6 figures, 1 table, Accepted to: ETRA2021, ACM Symposium on Eye Tracking Research and Applications},
timestamp = {2022-02-15T14:18:50.000+0100},
title = {Neural Networks for Semantic Gaze Analysis in XR Settings},
url = {http://arxiv.org/abs/2103.10451},
year = 2021
}