We present a novel integration between a computational framework for modeling attention-driven perception and cognition (ARCADIA) with a cognitive robotic architecture (DIARC), demonstrating how this integration can be used to drive the gaze behavior of a robotic platform. Although some previous approaches to controlling gaze behavior in robots during human-robot interactions have relied either on models of human visual attention or human cognition, ARCADIA provides a novel framework with an attentional mechanism that bridges both lower-level visual and higher-level cognitive processes. We demonstrate how this approach can produce more natural and human-like robot gaze behavior. In particular, we focus on how our approach can control gaze during an interactive object learning task. We present results from a pilot crowdsourced evaluation that investigates whether the gaze behavior produced during this task increases confidence that the robot has correctly learned each object.
@inproceedings{briggsetal22hri,
title={A Novel Architectural Method for Producing Dynamic Gaze Behavior in Human-Robot Interactions},
author={Briggs, Gordon and Chita-Tegmark, Meia and Krause, Evan and
Bridewell, Will and Bello, Paul and Scheutz,
Matthias},
year={2022},
booktitle={Proceedings of the 2022 ACM/IEEE International Conference
on Human-Robot Interaction},
pages={383--392}
url={https://hrilab.tufts.edu/publications/briggsetal22hri.pdf}
doi={10.1109/HRI53351.2022.9889499}
}