-
Notifications
You must be signed in to change notification settings - Fork 2
/
references.bib
321 lines (299 loc) · 29.5 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
@article{Noyontologya,
title = {Ontology {Development} 101: {A} {Guide} to {Creating} {Your} {First} {Ontology}},
language = {en},
author = {Noy, Natalya F and McGuinness, Deborah L},
pages = {25},
}
@article{Jo2020lessonsa,
title = {Lessons from {Archives}: {Strategies} for {Collecting} {Sociocultural} {Data} in {Machine} {Learning}},
shorttitle = {Lessons from {Archives}},
url = {http://arxiv.org/abs/1912.10389},
doi = {10.1145/3351095.3372829},
abstract = {A growing body of work shows that many problems in fairness, accountability, transparency, and ethics in machine learning systems are rooted in decisions surrounding the data collection and annotation process. In spite of its fundamental nature however, data collection remains an overlooked part of the machine learning (ML) pipeline. In this paper, we argue that a new specialization should be formed within ML that is focused on methodologies for data collection and annotation: efforts that require institutional frameworks and procedures. Specifically for sociocultural data, parallels can be drawn from archives and libraries. Archives are the longest standing communal effort to gather human information and archive scholars have already developed the language and procedures to address and discuss many challenges pertaining to data collection such as consent, power, inclusivity, transparency, and ethics \& privacy. We discuss these five key approaches in document collection practices in archives that can inform data collection in sociocultural ML. By showing data collection practices from another field, we encourage ML research to be more cognizant and systematic in data collection and draw from interdisciplinary expertise.},
urldate = {2021-01-12},
journal = {Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency},
author = {Jo, Eun Seo and Gebru, Timnit},
month = jan,
year = {2020},
note = {00030
arXiv: 1912.10389},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Computers and Society, Computer Science - Machine Learning, I.2.0},
pages = {306--316},
}
@article{raji_saving_2020,
title = {Saving {Face}: {Investigating} the {Ethical} {Concerns} of {Facial} {Recognition} {Auditing}},
shorttitle = {Saving {Face}},
url = {http://arxiv.org/abs/2001.00964},
abstract = {Although essential to revealing biased performance, well intentioned attempts at algorithmic auditing can have effects that may harm the very populations these measures are meant to protect. This concern is even more salient while auditing biometric systems such as facial recognition, where the data is sensitive and the technology is often used in ethically questionable manners. We demonstrate a set of five ethical concerns in the particular case of auditing commercial facial processing technology, highlighting additional design considerations and ethical tensions the auditor needs to be aware of so as not exacerbate or complement the harms propagated by the audited system. We go further to provide tangible illustrations of these concerns, and conclude by reflecting on what these concerns mean for the role of the algorithmic audit and the fundamental product limitations they reveal.},
urldate = {2021-01-12},
journal = {arXiv:2001.00964 [cs]},
author = {Raji, Inioluwa Deborah and Gebru, Timnit and Mitchell, Margaret and Buolamwini, Joy and Lee, Joonseok and Denton, Emily},
month = jan,
year = {2020},
note = {00000
arXiv: 2001.00964},
keywords = {Computer Science - Computers and Society},
}
@article{raji_closing_2020,
title = {Closing the {AI} {Accountability} {Gap}: {Defining} an {End}-to-{End} {Framework} for {Internal} {Algorithmic} {Auditing}},
shorttitle = {Closing the {AI} {Accountability} {Gap}},
url = {http://arxiv.org/abs/2001.00973},
abstract = {Rising concern for the societal implications of artificial intelligence systems has inspired a wave of academic and journalistic literature in which deployed systems are audited for harm by investigators from outside the organizations deploying the algorithms. However, it remains challenging for practitioners to identify the harmful repercussions of their own systems prior to deployment, and, once deployed, emergent issues can become difficult or impossible to trace back to their source. In this paper, we introduce a framework for algorithmic auditing that supports artificial intelligence system development end-to-end, to be applied throughout the internal organization development lifecycle. Each stage of the audit yields a set of documents that together form an overall audit report, drawing on an organization's values or principles to assess the fit of decisions made throughout the process. The proposed auditing framework is intended to contribute to closing the accountability gap in the development and deployment of large-scale artificial intelligence systems by embedding a robust process to ensure audit integrity.},
urldate = {2021-01-12},
journal = {arXiv:2001.00973 [cs]},
author = {Raji, Inioluwa Deborah and Smart, Andrew and White, Rebecca N. and Mitchell, Margaret and Gebru, Timnit and Hutchinson, Ben and Smith-Loud, Jamila and Theron, Daniel and Barnes, Parker},
month = jan,
year = {2020},
note = {00000
arXiv: 2001.00973},
keywords = {Computer Science - Computers and Society},
}
@misc{noauthor_about_nodate,
title = {About {ML}},
url = {https://www.partnershiponai.org/about-ml/},
language = {en-US},
urldate = {2021-01-12},
journal = {The Partnership on AI},
note = {00000},
}
@book{borkan_nuanced_2019,
address = {New York},
title = {Nuanced {Metrics} for {Measuring} {Unintended} {Bias} with {Real} {Data} for {Text} {Classification}},
isbn = {978-1-4503-6675-5},
abstract = {Unintended bias in Machine Learning can manifest as systemic differences in performance for different demographic groups, potentially compounding existing challenges to fairness in society at large. In this paper, we introduce a suite of threshold-agnostic metrics that provide a nuanced view of this unintended bias, by considering the various ways that a classifier's score distribution can vary across designated groups. We also introduce a large new test set of online comments with crowd-sourced annotations for identity references. We use this to show how our metrics can be used to find new and potentially subtle unintended bias in existing public models.},
language = {English},
publisher = {Assoc Computing Machinery},
author = {Borkan, Daniel and Dixon, Lucas and Sorensen, Jeffrey and Thain, Nithum and Vasserman, Lucy},
year = {2019},
doi = {10.1145/3308560.3317593},
note = {00000
Pages: 491-500
Publication Title: Companion of the World Wide Web Conference (www 2019 )
WOS:000474353100078},
}
@book{sun_mitigating_2019,
address = {Stroudsburg},
title = {Mitigating {Gender} {Bias} in {Natural} {Language} {Processing}: {Literature} {Review}},
isbn = {978-1-950737-48-2},
shorttitle = {Mitigating {Gender} {Bias} in {Natural} {Language} {Processing}},
abstract = {As Natural Language Processing (NLP) and Machine Learning (ML) tools rise in popularity, it becomes increasingly vital to recognize the role they play in shaping societal biases and stereotypes. Although NLP models have shown success in modeling various applications, they propagate and may even amplify gender bias found in text corpora. While the study of bias in artificial intelligence is not new, methods to mitigate gender bias in NLP are relatively nascent. In this paper, we review contemporary studies on recognizing and mitigating gender bias in NLP. We discuss gender bias based on four forms of representation bias and analyze methods recognizing gender bias. Furthermore, we discuss the advantages and drawbacks of existing gender debiasing methods. Finally, we discuss future studies for recognizing and mitigating gender bias in NLP.},
language = {English},
publisher = {Assoc Computational Linguistics-Acl},
author = {Sun, Tony and Gaut, Andrew and Tang, Shirlyn and Huang, Yuxin and ElSherief, Mai and Zhao, Jieyu and Mirza, Diba and Belding, Elizabeth and Chang, Kai-Wei and Wang, William Yang},
editor = {Korhonen, A. and Traum, D. and Marquez, L.},
year = {2019},
note = {00000
Pages: 1630-1640
Publication Title: 57th Annual Meeting of the Association for Computational Linguistics (acl 2019)
WOS:000493046103013},
keywords = {science},
}
@article{mehrabi_survey_2019,
title = {A {Survey} on {Bias} and {Fairness} in {Machine} {Learning}},
url = {http://arxiv.org/abs/1908.09635},
abstract = {With the widespread use of AI systems and applications in our everyday lives, it is important to take fairness issues into consideration while designing and engineering these types of systems. Such systems can be used in many sensitive environments to make important and life-changing decisions; thus, it is crucial to ensure that the decisions do not reflect discriminatory behavior toward certain groups or populations. We have recently seen work in machine learning, natural language processing, and deep learning that addresses such challenges in different subdomains. With the commercialization of these systems, researchers are becoming aware of the biases that these applications can contain and have attempted to address them. In this survey we investigated different real-world applications that have shown biases in various ways, and we listed different sources of biases that can affect AI applications. We then created a taxonomy for fairness definitions that machine learning researchers have defined in order to avoid the existing bias in AI systems. In addition to that, we examined different domains and subdomains in AI showing what researchers have observed with regard to unfair outcomes in the state-of-the-art methods and how they have tried to address them. There are still many future directions and solutions that can be taken to mitigate the problem of bias in AI systems. We are hoping that this survey will motivate researchers to tackle these issues in the near future by observing existing work in their respective fields.},
urldate = {2021-01-12},
journal = {arXiv:1908.09635 [cs]},
author = {Mehrabi, Ninareh and Morstatter, Fred and Saxena, Nripsuta and Lerman, Kristina and Galstyan, Aram},
month = sep,
year = {2019},
note = {00000
arXiv: 1908.09635},
keywords = {Computer Science - Machine Learning},
}
@article{arnold_factsheets_2019,
title = {{FactSheets}: {Increasing} trust in {AI} services through supplier's declarations of conformity},
volume = {63},
issn = {0018-8646},
shorttitle = {{FactSheets}},
doi = {10.1147/JRD.2019.2942288},
abstract = {Accuracy is an important concern for suppliers of artificial intelligence (AI) services, but considerations beyond accuracy, such as safety (which includes fairness and explainability), security, and provenance, are also critical elements to engender consumers' trust in a service. Many industries use transparent, standardized, but often not legally required documents called supplier's declarations of conformity (SDoCs) to describe the lineage of a product along with the safety and performance testing it has undergone. SDoCs may be considered multidimensional fact sheets that capture and quantify various aspects of the product and its development to make it worthy of consumers' trust. In this article, inspired by this practice, we propose FactSheets to help increase trust in AI services. We envision such documents to contain purpose, performance, safety, security, and provenance information to be completed by AI service providers for examination by consumers. We suggest a comprehensive set of declaration items tailored to AI in the Appendix of this article.},
language = {English},
number = {4-5},
journal = {Ibm Journal of Research and Development},
author = {Arnold, M. and Bellamy, R. K. E. and Hind, M. and Houde, S. and Mehta, S. and Mojsilovic, A. and Nair, R. and Ramamurthy, K. Natesan and Olteanu, A. and Piorkowski, D. and Reimer, D. and Richards, J. and Tsay, J. and Varshney, K. R.},
month = sep,
year = {2019},
note = {00000
Place: Armonk
Publisher: Ibm Corp
WOS:000498912200007},
keywords = {safety, uncertainty},
pages = {6},
}
@article{thornton_modeling_2017,
title = {Modeling the {Domain} of {Digital} {Preservation} in {Wikidata}},
abstract = {Members of the digital preservation community collate and capture metadata to describe file formats, software, operating systems and hardware, and use it to inform and drive digital preservation processes. In this work we describe how the infrastructure of Wikidata meets the requirements for a technical registry of metadata related to computer software and computing environments. Collaboratively creating this metadata, and making it available as linked open data, will reduce the amount of redundant work digital preservation professionals do in order to describe resources. Having machine-readable, linked open data that describes the digital preservation domain will also allow us to reuse this data in our software applications and information systems, reducing the overhead when building new tools. Furthermore the Wikidata social and technical infrastructure will enable the long term continued access to the data digital preservation practitioners collate and capture.},
language = {en},
author = {Thornton, Katherine and Cochrane, Euan and Ledoux, Thomas and Caron, Bertrand and Wilson, Carl},
year = {2017},
note = {00000},
pages = {10},
}
@article{barredo_arrieta_explainable_2020,
title = {Explainable {Artificial} {Intelligence} ({XAI}): {Concepts}, taxonomies, opportunities and challenges toward responsible {AI}},
volume = {58},
issn = {1566-2535},
shorttitle = {Explainable {Artificial} {Intelligence} ({XAI})},
doi = {10.1016/j.inffus.2019.12.012},
abstract = {In the last few years, Artificial Intelligence (AI) has achieved a notable momentum that, if harnessed appropriately, may deliver the best of expectations over many application sectors across the field. For this to occur shortly in Machine Learning, the entire community stands in front of the barrier of explainability, an inherent problem of the latest techniques brought by sub-symbolism (e.g. ensembles or Deep Neural Networks) that were not present in the last hype of AI (namely, expert systems and rule based models). Paradigms underlying this problem fall within the so-called eXplainable AI (XAI) field, which is widely acknowledged as a crucial feature for the practical deployment of AI models. The overview presented in this article examines the existing literature and contributions already done in the field of XAI, including a prospect toward what is yet to be reached. For this purpose we summarize previous efforts made to define explainability in Machine Learning, establishing a novel definition of explainable Machine Learning that covers such prior conceptual propositions with a major focus on the audience for which the explainability is sought. Departing from this definition, we propose and discuss about a taxonomy of recent contributions related to the explainability of different Machine Learning models, including those aimed at explaining Deep Learning methods for which a second dedicated taxonomy is built and examined in detail. This critical literature analysis serves as the motivating background for a series of challenges faced by XAI, such as the interesting crossroads of data fusion and explainability. Our prospects lead toward the concept of Responsible Artificial Intelligence, namely, a methodology for the large-scale implementation of AI methods in real organizations with fairness, model explainability and accountability at its core. Our ultimate goal is to provide newcomers to the field of XAI with a thorough taxonomy that can serve as reference material in order to stimulate future research advances, but also to encourage experts and professionals from other disciplines to embrace the benefits of AI in their activity sectors, without any prior bias for its lack of interpretability.},
language = {English},
journal = {Information Fusion},
author = {Barredo Arrieta, Alejandro and Diaz-Rodriguez, Natalia and Del Ser, Javier and Bennetot, Adrien and Tabik, Siham and Barbado, Alberto and Garcia, Salvador and Gil-Lopez, Sergio and Molina, Daniel and Benjamins, Richard and Chatila, Raja and Herrera, Francisco},
month = jun,
year = {2020},
note = {00000
Place: Amsterdam
Publisher: Elsevier
WOS:000516799200007},
keywords = {Accountability, Comprehensibility, Data Fusion, Deep Learning, Explainable Artificial Intelligence, Fairness, Interpretability, Machine Learning, Privacy, Responsible Artificial Intelligence, Transparency, big data, black-box, data fusion, decision trees, feature-selection, generalized additive-models, logistic-regression, rule extraction, support vector machines, trained neural-networks},
pages = {82--115},
}
@article{christin_ethnographer_2020,
title = {The ethnographer and the algorithm: beyond the black box},
volume = {49},
issn = {0304-2421},
shorttitle = {The ethnographer and the algorithm},
doi = {10.1007/s11186-020-09411-3},
abstract = {A common theme in social science studies of algorithms is that they are profoundly opaque and function as "black boxes." Scholars have developed several methodological approaches in order to address algorithmic opacity. Here I argue that we can explicitly enroll algorithms in ethnographic research, which can shed light on unexpected aspects of algorithmic systems-including their opacity. I delineate three meso-level strategies for algorithmic ethnography. The first,algorithmic refraction, examines the reconfigurations that take place when computational software, people, and institutions interact. The second strategy,algorithmic comparison, relies on a similarity-and-difference approach to identify the instruments' unique features. The third strategy,algorithmic triangulation, enrolls algorithms to help gather rich qualitative data. I conclude by discussing the implications of this toolkit for the study of algorithms and future of ethnographic fieldwork.},
language = {English},
number = {5-6},
journal = {Theory and Society},
author = {Christin, Angele},
month = oct,
year = {2020},
note = {00000
Place: Dordrecht
Publisher: Springer
WOS:000559936900001},
keywords = {Algorithms, Enrollment, Ethnography, Opacity, audiences, journalism, lens, politics, technology},
pages = {897--918},
}
@techreport{dutia_heritage_2021,
type = {preprint},
title = {Heritage {Connector}: {A} {Machine} {Learning} {Framework} for {Building} {Linked} {Open} {Data} from {Museum} {Collections}},
shorttitle = {Heritage {Connector}},
url = {https://www.authorea.com/users/387788/articles/502720-heritage-connector-a-machine-learning-framework-for-building-linked-open-data-from-museum-collections?commit=2ab9d0dcad497f1659d8095a939acd19698b33ed},
abstract = {As with almost all data, museum collection catalogues are largely unstructured, variable in consistency and overwhelmingly composed of thin records. The form of these catalogues means that the potential for new forms of research, access and scholarly enquiry that range across multiple collections and related datasets remains dormant. In the project Heritage Connector: Transforming text into data to extract meaning and make connections, we are applying a battery of digital techniques to connect similar, identical and related items within and across collections and other publications. In this paper we describe a framework to create a Linked Open Data knowledge graph (KG) from digital museum catalogues, connect entities within this graph to Wikidata, and create new connections in this graph from text. We focus on the use of machine learning to create these links at scale with a small amount of labelled data, on a mid-range laptop or a small cloud virtual machine. We publish open-source software providing tools to perform the tasks of KG creation, entity matching and named entity recognition under these constraints.},
language = {en},
urldate = {2021-02-09},
institution = {Preprints},
author = {Dutia, Kalyan and Stack, John},
month = jan,
year = {2021},
doi = {10.22541/au.160994838.81187546/v1},
}
@article{mitchell_model_2019,
title = {Model {Cards} for {Model} {Reporting}},
url = {http://arxiv.org/abs/1810.03993},
doi = {10.1145/3287560.3287596},
abstract = {Trained machine learning models are increasingly used to perform high-impact tasks in areas such as law enforcement, medicine, education, and employment. In order to clarify the intended use cases of machine learning models and minimize their usage in contexts for which they are not well suited, we recommend that released models be accompanied by documentation detailing their performance characteristics. In this paper, we propose a framework that we call model cards, to encourage such transparent model reporting. Model cards are short documents accompanying trained machine learning models that provide benchmarked evaluation in a variety of conditions, such as across different cultural, demographic, or phenotypic groups (e.g., race, geographic location, sex, Fitzpatrick skin type) and intersectional groups (e.g., age and race, or sex and Fitzpatrick skin type) that are relevant to the intended application domains. Model cards also disclose the context in which models are intended to be used, details of the performance evaluation procedures, and other relevant information. While we focus primarily on human-centered machine learning models in the application fields of computer vision and natural language processing, this framework can be used to document any trained machine learning model. To solidify the concept, we provide cards for two supervised models: One trained to detect smiling faces in images, and one trained to detect toxic comments in text. We propose model cards as a step towards the responsible democratization of machine learning and related AI technology, increasing transparency into how well AI technology works. We hope this work encourages those releasing trained machine learning models to accompany model releases with similar detailed evaluation numbers and other relevant documentation.},
urldate = {2021-01-12},
journal = {Proceedings of the Conference on Fairness, Accountability, and Transparency},
author = {Mitchell, Margaret and Wu, Simone and Zaldivar, Andrew and Barnes, Parker and Vasserman, Lucy and Hutchinson, Ben and Spitzer, Elena and Raji, Inioluwa Deborah and Gebru, Timnit},
month = jan,
year = {2019},
note = {00221
arXiv: 1810.03993},
keywords = {All, Computer Science - Artificial Intelligence, Computer Science - Machine Learning, Tim},
pages = {220--229},
}
@misc{noauthor_we_nodate,
title = {We read the paper that forced {Timnit} {Gebru} out of {Google}. {Here}’s what it says.},
url = {https://www.technologyreview.com/2020/12/04/1013294/google-ai-ethics-research-paper-forced-out-timnit-gebru/},
abstract = {The company's star ethics researcher highlighted the risks of large language models, which are key to Google's business.},
language = {en},
urldate = {2021-01-12},
journal = {MIT Technology Review},
note = {00000},
}
@misc{noauthor_two-year_nodate,
title = {The two-year fight to stop {Amazon} from selling face recognition to the police},
url = {https://www.technologyreview.com/2020/06/12/1003482/amazon-stopped-selling-police-face-recognition-fight/},
abstract = {In the summer of 2018, nearly 70 civil rights and research organizations wrote a letter to Jeff Bezos demanding that Amazon stop providing face recognition technology to governments. As part of an increased focus on the role that tech companies were playing in enabling the US government’s tracking and deportation of immigrants, it called on…},
language = {en},
urldate = {2021-01-12},
journal = {MIT Technology Review},
note = {00002},
}
@article{gebru_datasheets_2020,
title = {Datasheets for {Datasets}},
url = {http://arxiv.org/abs/1803.09010},
abstract = {The machine learning community currently has no standardized process for documenting datasets, which can lead to severe consequences in high-stakes domains. To address this gap, we propose datasheets for datasets. In the electronics industry, every component, no matter how simple or complex, is accompanied with a datasheet that describes its operating characteristics, test results, recommended uses, and other information. By analogy, we propose that every dataset be accompanied with a datasheet that documents its motivation, composition, collection process, recommended uses, and so on. Datasheets for datasets will facilitate better communication between dataset creators and dataset consumers, and encourage the machine learning community to prioritize transparency and accountability.},
urldate = {2021-01-12},
journal = {arXiv:1803.09010 [cs]},
author = {Gebru, Timnit and Morgenstern, Jamie and Vecchione, Briana and Vaughan, Jennifer Wortman and Wallach, Hanna and Daumé III, Hal and Crawford, Kate},
month = mar,
year = {2020},
note = {00236
arXiv: 1803.09010},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Databases, Computer Science - Machine Learning},
}
@article{morley_what_2020,
title = {From {What} to {How}: {An} {Initial} {Review} of {Publicly} {Available} {AI} {Ethics} {Tools}, {Methods} and {Research} to {Translate} {Principles} into {Practices}},
volume = {26},
issn = {1353-3452},
shorttitle = {From {What} to {How}},
doi = {10.1007/s11948-019-00165-5},
abstract = {The debate about the ethical implications of Artificial Intelligence dates from the 1960s (Samuel in Science, 132(3429):741-742, 1960. 10.1126/science.132.3429.741; Wiener in Cybernetics: or control and communication in the animal and the machine, MIT Press, New York, 1961). However, in recent years symbolic AI has been complemented and sometimes replaced by (Deep) Neural Networks and Machine Learning (ML) techniques. This has vastly increased its potential utility and impact on society, with the consequence that the ethical debate has gone mainstream. Such a debate has primarily focused on principles-the 'what' of AI ethics (beneficence, non-maleficence, autonomy, justice and explicability)-rather than on practices, the 'how.' Awareness of the potential issues is increasing at a fast rate, but the AI community's ability to take action to mitigate the associated risks is still at its infancy. Our intention in presenting this research is to contribute to closing the gap between principles and practices by constructing a typology that may help practically-minded developers apply ethics at each stage of the Machine Learning development pipeline, and to signal to researchers where further work is needed. The focus is exclusively on Machine Learning, but it is hoped that the results of this research may be easily applicable to other branches of AI. The article outlines the research method for creating this typology, the initial findings, and provides a summary of future research needs.},
language = {English},
number = {4},
journal = {Science and Engineering Ethics},
author = {Morley, Jessica and Floridi, Luciano and Kinsey, Libby and Elhalal, Anat},
month = aug,
year = {2020},
note = {00029
Place: Dordrecht
Publisher: Springer
WOS:000557914000014},
keywords = {Applied ethics, Artificial intelligence, Data governance, Digital ethics, Ethics of AI, Governance, Machine learning, decision-making, design, explanation, future, privacy, responsibility},
pages = {2141--2168},
}
@misc{noauthor_google_nodate,
title = {Google {Cloud} {Model} {Cards}},
url = {https://modelcards.withgoogle.com/about},
urldate = {2021-01-12},
note = {00000},
}
@misc{noauthor_ai_nodate,
title = {{AI} {FactSheets} 360},
url = {https://aifs360.mybluemix.net/},
urldate = {2021-01-12},
note = {00000},
}
@article{sendak_presenting_2020,
title = {Presenting machine learning model information to clinical end users with model facts labels},
volume = {3},
issn = {2398-6352},
doi = {10.1038/s41746-020-0253-3},
abstract = {There is tremendous enthusiasm surrounding the potential for machine learning to improve medical prognosis and diagnosis. However, there are risks to translating a machine learning model into clinical care and clinical end users are often unaware of the potential harm to patients. This perspective presents the "Model Facts" label, a systematic effort to ensure that front-line clinicians actually know how, when, how not, and when not to incorporate model output into clinical decisions. The "Model Facts" label was designed for clinicians who make decisions supported by a machine learning model and its purpose is to collate relevant, actionable information in 1-page. Practitioners and regulators must work together to standardize presentation of machine learning model information to clinical end users in order to prevent harm to patients. Efforts to integrate a model into clinical practice should be accompanied by an effort to clearly communicate information about a machine learning model with a "Model Facts" label.},
language = {English},
number = {1},
journal = {Npj Digital Medicine},
author = {Sendak, Mark P. and Gao, Michael and Brajer, Nathan and Balu, Suresh},
month = mar,
year = {2020},
note = {00007
Place: Berlin
Publisher: Nature Research
WOS:000521984900003},
keywords = {benefits, box, diagnosis, harms, performance},
pages = {41},
}
@article{stoyanovich_imperative_2020,
title = {The imperative of interpretable machines},
volume = {2},
doi = {10.1038/s42256-020-0171-8},
abstract = {As artificial intelligence becomes prevalent in society, a framework is needed to connect interpretability and trust in algorithm-assisted decisions, for a range of stakeholders.},
language = {English},
number = {4},
journal = {Nature Machine Intelligence},
author = {Stoyanovich, Julia and Van Bavel, Jay J. and West, Tessa V.},
month = apr,
year = {2020},
note = {00003
Place: London
Publisher: Springernature
WOS:000571260800003},
pages = {197--199},
}