We describe moral quasi-dilemmas: situations similar to moral dilemmas, but in which an agent is unsure whether exploring the plan space or the world may reveal a course of action that satisfies all moral requirements. We argue that artificial moral agents should be built to handle MQDs, and that MQDs may be useful for evaluating AMA architectures.
@inproceedings{kasenbergetal2018icres,
title={Quasi-Dilemmas for Artificial Moral Agents},
author={Daniel Kasenberg and Vasanth Sarathy and Thomas Arnold and Matthias Scheutz and Tom Williams},
year={2018},
booktitle={International Conference on Robot Ethics and Standards},
url={https://hrilab.tufts.edu/publications/kasenbergetal2018icres.pdf}
}