In previous work, researchers have repeatedly demonstrated that robots' use of deictic gestures enables effective and natural human-robot interaction. However, new technologies such as augmented reality head mounted displays enable environments in which mixed-reality becomes possible, and in such environments, physical gestures become but one category among many different types of mixed reality deictic gestures. In this paper, we present the first experimental exploration of the effectiveness of mixed reality deictic gestures beyond physical gestures. Specifically, we investigate human perception of videos simulating the display of allocentric gestures, in which robots circle their targets in users' fields of view. Our results suggest that this is an effective communication strategy, both in terms of objective accuracy and subjective perception, especially when paired with complex natural language references.
%0 Conference Paper
%1 williams2019mixed
%A Williams, Tom
%A Bussing, Matthew
%A Cabrol, Sebastian
%A Boyle, Elizabeth
%A Tran, Nhan
%B 2019 14th ACM/IEEE International Conference on Human-Robot Interaction (HRI)
%D 2019
%K communication deictic gesture human mixed multimodal reality robot
%P 191-201
%R 10.1109/HRI.2019.8673275
%T Mixed Reality Deictic Gesture for Multi-Modal Robot Communication
%U https://ieeexplore.ieee.org/document/8673275
%X In previous work, researchers have repeatedly demonstrated that robots' use of deictic gestures enables effective and natural human-robot interaction. However, new technologies such as augmented reality head mounted displays enable environments in which mixed-reality becomes possible, and in such environments, physical gestures become but one category among many different types of mixed reality deictic gestures. In this paper, we present the first experimental exploration of the effectiveness of mixed reality deictic gestures beyond physical gestures. Specifically, we investigate human perception of videos simulating the display of allocentric gestures, in which robots circle their targets in users' fields of view. Our results suggest that this is an effective communication strategy, both in terms of objective accuracy and subjective perception, especially when paired with complex natural language references.
@inproceedings{williams2019mixed,
abstract = {In previous work, researchers have repeatedly demonstrated that robots' use of deictic gestures enables effective and natural human-robot interaction. However, new technologies such as augmented reality head mounted displays enable environments in which mixed-reality becomes possible, and in such environments, physical gestures become but one category among many different types of mixed reality deictic gestures. In this paper, we present the first experimental exploration of the effectiveness of mixed reality deictic gestures beyond physical gestures. Specifically, we investigate human perception of videos simulating the display of allocentric gestures, in which robots circle their targets in users' fields of view. Our results suggest that this is an effective communication strategy, both in terms of objective accuracy and subjective perception, especially when paired with complex natural language references.},
added-at = {2020-06-15T00:46:52.000+0200},
author = {Williams, Tom and Bussing, Matthew and Cabrol, Sebastian and Boyle, Elizabeth and Tran, Nhan},
biburl = {https://www.bibsonomy.org/bibtex/2a38d9679f804f0f23c6ac6b9f7e4e128/porta},
booktitle = {2019 14th ACM/IEEE International Conference on Human-Robot Interaction (HRI)},
doi = {10.1109/HRI.2019.8673275},
interhash = {a2629167d4e9a92b1c9f04b78c6972c4},
intrahash = {a38d9679f804f0f23c6ac6b9f7e4e128},
keywords = {communication deictic gesture human mixed multimodal reality robot},
pages = {191-201},
timestamp = {2020-06-15T00:46:52.000+0200},
title = {Mixed Reality Deictic Gesture for Multi-Modal Robot Communication},
url = {https://ieeexplore.ieee.org/document/8673275},
year = 2019
}