dc.contributor.author | Schlegel, Udo | en_US |
dc.contributor.author | Cakmak, Eren | en_US |
dc.contributor.author | Keim, Daniel A. | en_US |
dc.contributor.editor | Archambault, Daniel and Nabney, Ian and Peltonen, Jaakko | en_US |
dc.date.accessioned | 2020-05-24T13:27:42Z | |
dc.date.available | 2020-05-24T13:27:42Z | |
dc.date.issued | 2020 | |
dc.identifier.isbn | 978-3-03868-113-7 | |
dc.identifier.uri | https://doi.org/10.2312/mlvis.20201100 | |
dc.identifier.uri | https://diglib.eg.org:443/handle/10.2312/mlvis20201100 | |
dc.description.abstract | Explainable artificial intelligence (XAI) methods aim to reveal the non-transparent decision-making mechanisms of black-box models. The evaluation of insight generated by such XAI methods remains challenging as the applied techniques depend on many factors (e.g., parameters and human interpretation). We propose ModelSpeX, a visual analytics workflow to interactively extract human-centered rule-sets to generate model specifications from black-box models (e.g., neural networks). The workflow enables to reason about the underlying problem, to extract decision rule sets, and to evaluate the suitability of the model for a particular task. An exemplary usage scenario walks an analyst trough the steps of the workflow to show the applicability. | en_US |
dc.publisher | The Eurographics Association | en_US |
dc.subject | Computing methodologies | |
dc.subject | Artificial intelligence | |
dc.subject | Human | |
dc.subject | centered computing | |
dc.subject | HCI theory | |
dc.subject | concepts and models | |
dc.title | ModelSpeX: Model Specification Using Explainable Artificial Intelligence Methods | en_US |
dc.description.seriesinformation | Machine Learning Methods in Visualisation for Big Data | |
dc.description.sectionheaders | Papers | |
dc.identifier.doi | 10.2312/mlvis.20201100 | |
dc.identifier.pages | 7-11 | |