User interfaces are very important for the success of many computer-based applications these days. However, their development takes time, requires experts for user-interface design as well as experienced programmers and is very expensive. This problem becomes even more severe through the ubiquitous use of a variety of devices such as PCs, mobile phones, PDAs etc., since each of these devices has its own specifics that require a special user interface.Therefore, we developed a tool-supported approach to automatically synthesize multi-device user interfaces from high-level specifications in the form of models. In contrast to previous approaches focusing on abstracting the user interface per se, we make use of <i>communicative acts</i> derived from speech act theory for the specification of desired user intentions in interactions. In this way, we approach a solution to the given problem, since user interfaces can be efficiently provided without experience in implementing them.
%0 Conference Paper
%1 falb2005using
%A Falb, Jürgen
%A Popp, Roman
%A Röck, Thomas
%A Jelinek, Helmut
%A Arnautovic, Edin
%A Kaindl, Hermann
%B Proceedings of the 20th IEEE/ACM international Conference on Automated software engineering
%C New York, NY, USA
%D 2005
%I ACM
%K act dialogue generation interface speech synthesis user
%P 429--430
%R 10.1145/1101908.1101988
%T Using communicative acts in high-level specifications of user interfaces for their automated synthesis
%U http://doi.acm.org/10.1145/1101908.1101988
%X User interfaces are very important for the success of many computer-based applications these days. However, their development takes time, requires experts for user-interface design as well as experienced programmers and is very expensive. This problem becomes even more severe through the ubiquitous use of a variety of devices such as PCs, mobile phones, PDAs etc., since each of these devices has its own specifics that require a special user interface.Therefore, we developed a tool-supported approach to automatically synthesize multi-device user interfaces from high-level specifications in the form of models. In contrast to previous approaches focusing on abstracting the user interface per se, we make use of <i>communicative acts</i> derived from speech act theory for the specification of desired user intentions in interactions. In this way, we approach a solution to the given problem, since user interfaces can be efficiently provided without experience in implementing them.
%@ 1-58113-993-4
@inproceedings{falb2005using,
abstract = {User interfaces are very important for the success of many computer-based applications these days. However, their development takes time, requires experts for user-interface design as well as experienced programmers and is very expensive. This problem becomes even more severe through the ubiquitous use of a variety of devices such as PCs, mobile phones, PDAs etc., since each of these devices has its own specifics that require a special user interface.Therefore, we developed a tool-supported approach to automatically synthesize multi-device user interfaces from high-level specifications in the form of models. In contrast to previous approaches focusing on abstracting the user interface per se, we make use of <i>communicative acts</i> derived from speech act theory for the specification of desired user intentions in interactions. In this way, we approach a solution to the given problem, since user interfaces can be efficiently provided without experience in implementing them.},
acmid = {1101988},
added-at = {2013-01-06T16:21:22.000+0100},
address = {New York, NY, USA},
author = {Falb, J\"{u}rgen and Popp, Roman and R\"{o}ck, Thomas and Jelinek, Helmut and Arnautovic, Edin and Kaindl, Hermann},
biburl = {https://www.bibsonomy.org/bibtex/25619dc13a88c7b847bd95659f47fad7f/porta},
booktitle = {Proceedings of the 20th IEEE/ACM international Conference on Automated software engineering},
doi = {10.1145/1101908.1101988},
file = {falb2005using.pdf:falb2005using.pdf:PDF},
groups = {public},
interhash = {94b4e6a6c14655c3bab452eba67fd84b},
intrahash = {5619dc13a88c7b847bd95659f47fad7f},
isbn = {1-58113-993-4},
keywords = {act dialogue generation interface speech synthesis user},
location = {Long Beach, CA, USA},
numpages = {2},
pages = {429--430},
publisher = {ACM},
series = {ASE '05},
timestamp = {2013-03-01T23:26:36.000+0100},
title = {Using communicative acts in high-level specifications of user interfaces for their automated synthesis},
url = {http://doi.acm.org/10.1145/1101908.1101988},
username = {porta},
year = 2005
}