Multimodal applications are typically developed together with their user interfaces, leading to a tight coupling. Additionally, human-computer interaction is often less considered. This potentially results in a worse user interface when additional modalities have to be integrated and/or the application shall be developed for a different device. A promising way of creating multimodal user interfaces with less effort for applications running on several devices is semi-automatic generation. This work shows the generation of multimodal interfaces where a discourse model is transformed to different automatically rendered modalities. It supports loose coupling of the design of human-computer interaction and the integration of specific modalities. The presented communication platform utilizes this transformation process. It allows for high-level integration of input like speech, hand gesture and a WIMP-UI. The generation of output is possible with the modalities speech and GUI. Integration of other input and output modalities is supported as well. Furthermore, the platform is applicable for several applications as well as different devices, e.g., PDAs and PCs.
%0 Conference Paper
%1 ertl2009semi
%A Ertl, Dominik
%B Proceedings of the 1st ACM SIGCHI symposium on Engineering interactive computing systems
%C New York, NY, USA
%D 2009
%I ACM
%K discourse model transformation multimodal user interface generation
%P 321--324
%R http://doi.acm.org/10.1145/1570433.1570494
%T Semi-automatic multimodal user interface generation
%U http://doi.acm.org/10.1145/1570433.1570494
%X Multimodal applications are typically developed together with their user interfaces, leading to a tight coupling. Additionally, human-computer interaction is often less considered. This potentially results in a worse user interface when additional modalities have to be integrated and/or the application shall be developed for a different device. A promising way of creating multimodal user interfaces with less effort for applications running on several devices is semi-automatic generation. This work shows the generation of multimodal interfaces where a discourse model is transformed to different automatically rendered modalities. It supports loose coupling of the design of human-computer interaction and the integration of specific modalities. The presented communication platform utilizes this transformation process. It allows for high-level integration of input like speech, hand gesture and a WIMP-UI. The generation of output is possible with the modalities speech and GUI. Integration of other input and output modalities is supported as well. Furthermore, the platform is applicable for several applications as well as different devices, e.g., PDAs and PCs.
@inproceedings{ertl2009semi,
abstract = {Multimodal applications are typically developed together with their user interfaces, leading to a tight coupling. Additionally, human-computer interaction is often less considered. This potentially results in a worse user interface when additional modalities have to be integrated and/or the application shall be developed for a different device. A promising way of creating multimodal user interfaces with less effort for applications running on several devices is semi-automatic generation. This work shows the generation of multimodal interfaces where a discourse model is transformed to different automatically rendered modalities. It supports loose coupling of the design of human-computer interaction and the integration of specific modalities. The presented communication platform utilizes this transformation process. It allows for high-level integration of input like speech, hand gesture and a {WIMP}-{UI}. The generation of output is possible with the modalities speech and {GUI}. Integration of other input and output modalities is supported as well. Furthermore, the platform is applicable for several applications as well as different devices, e.g., {PDAs} and {PCs}.},
added-at = {2011-12-27T21:13:51.000+0100},
address = {New York, NY, USA},
author = {Ertl, Dominik},
biburl = {https://www.bibsonomy.org/bibtex/25be9e24559b491dffbf3088eb85b5dc8/porta},
booktitle = {Proceedings of the 1st ACM SIGCHI symposium on Engineering interactive computing systems},
doi = {http://doi.acm.org/10.1145/1570433.1570494},
file = {ertl2009semi.pdf:ertl2009semi.pdf:PDF},
groups = {public},
interhash = {0feac19a68dead34c3d425401c145be9},
intrahash = {5be9e24559b491dffbf3088eb85b5dc8},
keywords = {discourse model transformation multimodal user interface generation},
location = {Pittsburgh, PA, USA},
pages = {321--324},
publisher = {ACM},
series = {EICS '09},
timestamp = {2013-03-01T23:26:37.000+0100},
title = {Semi-automatic multimodal user interface generation},
url = {http://doi.acm.org/10.1145/1570433.1570494},
username = {porta},
year = 2009
}