@incollection{holzinger_explainable_2018, address = {Cham}, title = {Explainable {AI}: {The} {New} 42?}, volume = {11015}, isbn = {978-3-319-99739-1 978-3-319-99740-7}, shorttitle = {Explainable {AI}}, url = {http://link.springer.com/10.1007/978-3-319-99740-7_21}, abstract = {Explainable AI is not a new field. Since at least the early exploitation of C.S. Pierce’s abductive reasoning in expert systems of the 1980s, there were reasoning architectures to support an explanation function for complex AI systems, including applications in medical diagnosis, complex multi-component design, and reasoning about the real world. So explainability is at least as old as early AI, and a natural consequence of the design of AI systems. While early expert systems consisted of handcrafted knowledge bases that enabled reasoning over narrowly well-defined domains (e.g., INTERNIST, MYCIN), such systems had no learning capabilities and had only primitive uncertainty handling. But the evolution of formal reasoning architectures to incorporate principled probabilistic reasoning helped address the capture and use of uncertain knowledge.}, language = {en}, urldate = {2019-01-23}, booktitle = {Machine {Learning} and {Knowledge} {Extraction}}, publisher = {Springer International Publishing}, author = {Goebel, Randy and Chander, Ajay and Holzinger, Katharina and Lecue, Freddy and Akata, Zeynep and Stumpf, Simone and Kieseberg, Peter and Holzinger, Andreas}, editor = {Holzinger, Andreas and Kieseberg, Peter and Tjoa, A Min and Weippl, Edgar}, year = {2018}, doi = {10.1007/978-3-319-99740-7_21}, keywords = {Center for Artificial Intelligence, Center for Digital Health Innovation, FH SP Cyber Security, Forschungsgruppe Secure Societies, Institut für IT Sicherheitsforschung, SP IT Sec Security Management \& Privacy, best, peer-reviewed}, pages = {295--303}, }