@inproceedings{pub5440,
abstract = {This article offers a brief overview of multimodal (speech, touch, gaze, etc.) input theory as it pertains to common in-
vehicle tasks and devices. After a brief introduction, we walk through a sample multimodal interaction, detailing the
steps involved and how information necessary to the interaction can be obtained by combining input modes in various
ways. We also discuss how contemporary in-vehicle systems take advantage of multimodality (or fail to do so), and
how the capabilities of such systems might be broadened in the future via clever multimodal input mechanisms.
},
year = {2011},
title = {Situation-Aware, User-Centric Multimodality for Automotive},
booktitle = {Automotive meets Electronics - Beiträge der 2. GMM-Fachtagung. GMM-Fachtagung: Automotive meets Electronics (AmE-2011), 2. May 4-5, Dortmund, Germany},
pages = {141-144},
publisher = {VDE Verlag},
author = {Christian MĂĽller},
organization = {VDE}
}