@article{acfe68e92fc347b4a4a7c1a1972e8661,
title = "Responsibility beyond design: Physicians{\textquoteright} requirements for ethical medical AI",
abstract = "Medical AI is increasingly being developed and tested to improve medical diagnosis, prediction and treatment of a wide array of medical conditions. Despite worries about the explainability and accuracy of such medical AI systems, it is reasonable to assume that they will be increasingly implemented in medical practice. Current ethical debates focus mainly on design requirements and suggest embedding certain values such as transparency, fairness, and explainability in the design of medical AI systems. Aside from concerns about their design, medical AI systems also raise questions with regard to physicians' responsibilities once these technologies are being implemented and used. How do physicians{\textquoteright} responsibilities change with the implementation of medical AI? Which set of competencies do physicians have to learn to responsibly interact with medical AI? In the present article, we will introduce the notion of forward-looking responsibility and enumerate through this conceptual lens a number of competencies and duties that physicians ought to employ to responsibly utilize medical AI in practice. Those include amongst others understanding the range of reasonable outputs, being aware of own experience and skill decline, and monitoring potential accuracy decline of the AI systems.",
keywords = "competencies, entrustable professional activities, forward-looking responsibility, medical AI, medical ethics, radiology, responsibility",
author = "Martin Sand and Dur{\'a}n, {Juan Manuel} and Jongsma, {Karin Rolanda}",
note = "Funding Information: This work was supported by the Netherlands Institute for Advance Study in the Humanities and Social Sciences (NIAS‐KNAW), which enabled us to closely collaborate as a NIAS‐Lorentz Theme Group on “Accountability in Medical Autonomous Expert Systems: Ethical and Epistemological Challenges for Explainable AI.” We are grateful to all Fellows at NIAS and our Theme Group members Sander Beckers and Giuseppe Primiero for their constructive feedback and support. Karin Rolanda Jongsma's contribution to this paper was partially funded by the Dutch Science Organization (NWO), RAIDIO project with grant number 406.Di.19.089. Funding Information: This work was supported by the Netherlands Institute for Advance Study in the Humanities and Social Sciences (NIAS-KNAW), which enabled us to closely collaborate as a NIAS-Lorentz Theme Group on ?Accountability in Medical Autonomous Expert Systems: Ethical and Epistemological Challenges for Explainable AI.? We are grateful to all Fellows at NIAS and our Theme Group members Sander Beckers and Giuseppe Primiero for their constructive feedback and support. Karin Rolanda Jongsma's contribution to this paper was partially funded by the Dutch Science Organization (NWO), RAIDIO project with grant number 406.Di.19.089. Publisher Copyright: {\textcopyright} 2021 John Wiley & Sons Ltd",
year = "2022",
month = feb,
doi = "10.1111/bioe.12887",
language = "English",
volume = "36",
pages = "162--169",
journal = "Bioethics",
issn = "0269-9702",
publisher = "Wiley-Blackwell",
number = "2",
}