The Handbook provides the first authoritative resource on what has become the dominant paradigm for new computer interfaces: user input involving new media (speech, multi-touch, hand and body gestures, facial expressions, writing) embedded in multimodal-multisensor interfaces that often include biosignals. This second volume of the handbook begins with multimodal signal processing, architectures, and machine learning. It includes recent deep learning approaches for processing multisensorial and multimodal user data and interaction, as well as context-sensitivity. A further highlight is processing of information about users' states and traits, an exciting emerging capability in next-generation user interfaces. These chapters discuss real-time multimodal analysis of emotion and social signals from various modalities, and perception of affective expression by users. Further chapters discuss multimodal processing of cognitive state using behavioral and physiological signals to detect cognitive load, domain expertise, deception, and depression. This collection of chapters provides walk-through examples of system design and processing, information on tools and practical resources for developing and evaluating new systems, and terminology and tutorial support for mastering this rapidly expanding field. In the final section of this volume, experts exchange views on the timely and controversial challenge topic of multimodal deep learning. The discussion focuses on how multimodal-multisensor interfaces are most likely to advance human performance during the next decade.
%0 Book
%1 OviattSchullerEtAl2018
%B ACM Books
%C San Rafael, CA
%D 2018
%E Oviatt, Sharon
%E Schuller, Björn
%E Cohen, Philip
%E Sonntag, Daniel
%E Potamianos, Gerasimos
%E Krüger, Antonio
%I Morgan & Claypool
%K 01801 103 acm ai architecture book emotion interface learn multimodal user zzz.mmi
%N 21
%R 10.1145/3107990
%T Handbook of Multimodal-Multisensor Interfaces
%V 2
%X The Handbook provides the first authoritative resource on what has become the dominant paradigm for new computer interfaces: user input involving new media (speech, multi-touch, hand and body gestures, facial expressions, writing) embedded in multimodal-multisensor interfaces that often include biosignals. This second volume of the handbook begins with multimodal signal processing, architectures, and machine learning. It includes recent deep learning approaches for processing multisensorial and multimodal user data and interaction, as well as context-sensitivity. A further highlight is processing of information about users' states and traits, an exciting emerging capability in next-generation user interfaces. These chapters discuss real-time multimodal analysis of emotion and social signals from various modalities, and perception of affective expression by users. Further chapters discuss multimodal processing of cognitive state using behavioral and physiological signals to detect cognitive load, domain expertise, deception, and depression. This collection of chapters provides walk-through examples of system design and processing, information on tools and practical resources for developing and evaluating new systems, and terminology and tutorial support for mastering this rapidly expanding field. In the final section of this volume, experts exchange views on the timely and controversial challenge topic of multimodal deep learning. The discussion focuses on how multimodal-multisensor interfaces are most likely to advance human performance during the next decade.
%@ 978-1-97000-171-6
@book{OviattSchullerEtAl2018,
__markedentry = {[flint:]},
abstract = {The Handbook provides the first authoritative resource on what has become the dominant paradigm for new computer interfaces: user input involving new media (speech, multi-touch, hand and body gestures, facial expressions, writing) embedded in multimodal-multisensor interfaces that often include biosignals. This second volume of the handbook begins with multimodal signal processing, architectures, and machine learning. It includes recent deep learning approaches for processing multisensorial and multimodal user data and interaction, as well as context-sensitivity. A further highlight is processing of information about users' states and traits, an exciting emerging capability in next-generation user interfaces. These chapters discuss real-time multimodal analysis of emotion and social signals from various modalities, and perception of affective expression by users. Further chapters discuss multimodal processing of cognitive state using behavioral and physiological signals to detect cognitive load, domain expertise, deception, and depression. This collection of chapters provides walk-through examples of system design and processing, information on tools and practical resources for developing and evaluating new systems, and terminology and tutorial support for mastering this rapidly expanding field. In the final section of this volume, experts exchange views on the timely and controversial challenge topic of multimodal deep learning. The discussion focuses on how multimodal-multisensor interfaces are most likely to advance human performance during the next decade.},
added-at = {2020-01-12T16:49:25.000+0100},
address = {San Rafael, CA},
biburl = {https://www.bibsonomy.org/bibtex/21500482604c1d0221084f34f50d9959c/flint63},
doi = {10.1145/3107990},
editor = {Oviatt, Sharon and Schuller, Björn and Cohen, Philip and Sonntag, Daniel and Potamianos, Gerasimos and Krüger, Antonio},
file = {copy:201x/8/OviattSchullerEtAl2018.pdf:PDF;shop:https\://www.morganclaypoolpublishers.com/catalog_Orig/product_info.php?products_id=1307:text/html},
interhash = {ea673206a8c7e06273a0147b4ba75e35},
intrahash = {1500482604c1d0221084f34f50d9959c},
isbn = {978-1-97000-171-6},
issn = {2374-6769},
keywords = {01801 103 acm ai architecture book emotion interface learn multimodal user zzz.mmi},
number = 21,
owner = {flint},
pagetotal = {555},
publisher = {Morgan \& Claypool},
referencetype = {collection},
series = {ACM Books},
subtitle = {Signal Processing, Architectures, and Detection of Emotion and Cognition},
timestamp = {2020-01-12T16:49:25.000+0100},
title = {Handbook of Multimodal-Multisensor Interfaces},
volume = 2,
volumes = {3},
x.sortdate = {2018-10-01},
year = 2018
}