<?xml version='1.0' encoding='UTF-8'?>
<?xml-stylesheet type='text/xsl' href='/oai2.xsl' ?>
<OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd">
  <responseDate>2014-12-18T14:43:48Z</responseDate>
  <request verb="GetRecord" identifier="oai:eprints.lincoln.ac.uk:6922" metadataPrefix="oai_dc">http://eprints.lincoln.ac.uk/cgi/oai2</request>
  <GetRecord>
    <record>
    <header>
      <identifier>oai:eprints.lincoln.ac.uk:6922</identifier>
      <datestamp>2013-03-13T09:19:33Z</datestamp>
      <setSpec>7374617475733D707562</setSpec>
      <setSpec>7375626A656374733D6A6163735F48:6A6163735F48363730</setSpec>
      <setSpec>74797065733D636F6E666572656E63655F6974656D</setSpec></header>
    <metadata>
      <oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:dc="http://purl.org/dc/elements/1.1/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
        <dc:relation>http://eprints.lincoln.ac.uk/6922/</dc:relation>
        <dc:title>Mediated attention with multimodal augmented reality</dc:title>
        <dc:creator>Dierker, Angelika</dc:creator>
        <dc:creator>Mertes, Christian</dc:creator>
        <dc:creator>Hermann, Thomas</dc:creator>
        <dc:creator>Hanheide, Marc</dc:creator>
        <dc:creator>Sagerer, Gerhard</dc:creator>
        <dc:subject>H670 Robotics and Cybernetics</dc:subject>
        <dc:description>We present an Augmented Reality (AR) system to support&#13;
collaborative tasks in a shared real-world interaction space&#13;
by facilitating joint attention. The users are assisted by information about their interaction partner's eld of view both visually and acoustically. In our study, the audiovisual improvements are compared with an AR system without these support mechanisms in terms of the participants' reaction times and error rates. The participants performed a simple object-choice task we call the gaze game to ensure controlled experimental conditions. Additionally, we asked the subjects to ll in a questionnaire to gain subjective feedback from them. We were able to show an improvement for both dependent variables as well as positive feedback for the visual augmentation in the questionnaire.</dc:description>
        <dc:contributor>Gottfried, B.</dc:contributor>
        <dc:contributor>Aghajan, H.</dc:contributor>
        <dc:date>2009-11-02</dc:date>
        <dc:type>Conference or Workshop Item</dc:type>
        <dc:type>PeerReviewed</dc:type>
        <dc:format>application/pdf</dc:format>
        <dc:language>en</dc:language>
        <dc:rights></dc:rights>
        <dc:identifier>http://eprints.lincoln.ac.uk/6922/1/Dierker2009-Mediated_attention_with_multimodal_augmented_reality%5B1%5D.pdf</dc:identifier>
        <dc:identifier>  Dierker, Angelika and Mertes, Christian and Hermann, Thomas and Hanheide, Marc and Sagerer, Gerhard  (2009) Mediated attention with multimodal augmented reality.  In: International Conference on Multimodal interfaces - ICMI-MLMI '09, 2-5 November 20092009, Cambridge, Mass..     </dc:identifier></oai_dc:dc></metadata></record>
  </GetRecord>
</OAI-PMH>