Robots and other artificial agents are increasingly being considered in domains involving complex decision-making and interaction with humans. These agents must adhere to human moral social norms: agents that fail to do so will be at best unpopular, and at worst dangerous. Artificial agents should have the ability to learn (both from natural language instruction and from observing other agents? behavior) and obey multiple, potentially conflicting norms.
@inproceedings{kasenberg2018hrip, title={Inferring and Obeying Norms in Temporal Logic}, author={Daniel Kasenberg}, year={2018}, booktitle={Proceedings of the Human-Robot Interaction (HRI) Pioneers Workshop}, url={https://hrilab.tufts.edu/publications/kasenberg2018hrip.pdf} doi={https://doi.org/10.1145/3173386.3176914} }