From 3bdf9a8cc6c7dd35adb2c42ae9011ab2fb054b7b Mon Sep 17 00:00:00 2001 From: Keelin Murphy Date: Tue, 7 Nov 2023 20:32:54 +0000 Subject: [PATCH] running version of Update_bib_file with outputs added --- scripts/Update_bib_file.ipynb | 887 +- scripts/script_data/blacklist_tmp_updated.csv | 146 + scripts/script_data/diag_ss_tmp_new.bib | 31993 ++++++++++++++++ 3 files changed, 32948 insertions(+), 78 deletions(-) create mode 100644 scripts/script_data/blacklist_tmp_updated.csv create mode 100644 scripts/script_data/diag_ss_tmp_new.bib diff --git a/scripts/Update_bib_file.ipynb b/scripts/Update_bib_file.ipynb index 42b0831..5d065ad 100644 --- a/scripts/Update_bib_file.ipynb +++ b/scripts/Update_bib_file.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 119, "metadata": { "pycharm": { "name": "#%%\n" @@ -23,13 +23,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 120, "metadata": { "pycharm": { "name": "#%%\n" } }, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "' KM: Remove this fn from here. Dre to update GenerateCSVFile with method like this which handles no-doi items\\ndef remove_blacklist_items(df_new_items):\\n blacklisted_items = pd.read_csv(\"./script_data/blacklist.csv\")\\n initial_length = len(df_new_items)\\n df_new_items = df_new_items[~df_new_items[\\'ss_doi\\'].isin(blacklisted_items[\\'doi\\'].unique().tolist())] # remove blacklisted dois\\n df_new_items = df_new_items[~df_new_items[\\'ss_id\\'].isin(blacklisted_items[\\'ss_id\\'].unique().tolist())] # remove blacklisted dois\\n\\n print(f\"{initial_length-len(df_new_items)} items removed from newly found items.\")\\n return df_new_items\\n'" + ] + }, + "execution_count": 120, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "''' KM: Remove this fn from here. Dre to update GenerateCSVFile with method like this which handles no-doi items\n", "def remove_blacklist_items(df_new_items):\n", @@ -45,7 +56,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 121, "metadata": { "pycharm": { "name": "#%%\n" @@ -71,7 +82,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 122, "metadata": { "pycharm": { "name": "#%%\n" @@ -79,18 +90,18 @@ }, "outputs": [], "source": [ - "def update_blacklist_csv(blacklist_csv, blacklist_entries): #blacklist_csv is a df\n", + "def update_blacklist_csv(blacklist_df, blacklist_entries, blacklist_out_file): #blacklist_csv is a df\n", " # Add all items to blacklist.csv\n", - " blacklist_csv = pd.concat([blacklist_csv, pd.DataFrame(blacklist_entries)], ignore_index=True)\n", + " blacklist_df = pd.concat([blacklist_df, pd.DataFrame(blacklist_entries)], ignore_index=True)\n", "\n", " # Save blacklist.csv\n", - " blacklist_csv.to_csv('./script_data/blacklist.csv', index=False)\n", + " blacklist_df.to_csv(blacklist_out_file, index=False)\n", " return f\"{len(blacklist_entries)} items added to blacklist\"" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 123, "metadata": { "pycharm": { "name": "#%%\n" @@ -111,7 +122,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 124, "metadata": { "pycharm": { "name": "#%%\n" @@ -128,8 +139,14 @@ " # else:\n", " # citations = get_citations(item['ss_id'])\n", "\n", + " # if no ss_doi exists\n", + " if len(str(item['ss_doi']))==0 or str(item['ss_doi'])=='nan':\n", + " print('no ss_doi available, I cannot add new bib entry', item['ss_id'])\n", + " return None\n", + " \n", " # make sure doi is not already in diag.bib\n", " if item['ss_doi'] in diag_bib_file:\n", + " print('doi already exists in bib file, I will not add new bib entry', item['ss_doi'], item['ss_id'])\n", " return None\n", "\n", " # Get BibLatex information based on DOI if not in the file\n", @@ -145,7 +162,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 126, "metadata": { "pycharm": { "name": "#%%\n" @@ -159,8 +176,7 @@ " for ind, entry in enumerate(diag_bib_raw):\n", " if entry.type == 'string':\n", " continue\n", - " \n", - " print(entry.key)\n", + "\n", " # if we found the relevant key\n", " if bibkey == entry.key:\n", " # if there is already something in all_ss_ids\n", @@ -174,19 +190,17 @@ " # if there is no ss_id here yet just add this single one\n", " else: \n", " entry.fields['all_ss_ids'] = '{' + str(ss_id) + '}'\n", - " # put the updated entry back into the list\n", - " diag_bib_raw[ind] = entry\n", - " print(str(ss_id), 'added to diag.bib')\n", - " return diag_bib_raw\n", + " print(str(ss_id), 'added to diag_bib_raw')\n", + " return [diag_bib_raw, 'Success']\n", " \n", " # if we haven't returned by now then we failed to update \n", " print('failed to add ss_id to diag.bib', str(ss_id), str(bibkey))\n", - " return diag_bib_raw" + " return [diag_bib_raw, 'Fail']" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 127, "metadata": { "pycharm": { "name": "#%%\n" @@ -194,10 +208,10 @@ }, "outputs": [], "source": [ - "def update_citation_count(path_diag_bib):\n", - " \n", - " diag_bib_raw = read_bibfile(None, path_diag_bib)\n", - " for entry in diag_bib_raw:\n", + "def update_citation_count(diag_bib_raw):\n", + " num_entries = len(diag_bib_raw)\n", + " for ind, entry in enumerate(diag_bib_raw):\n", + " # print('checking citations', ind, 'of', num_entries)\n", " flag=0\n", " if entry.type == 'string':\n", " continue\n", @@ -212,11 +226,19 @@ " n_cits = 0\n", " for key in dict_cits.keys():\n", " n_cits += dict_cits[key]\n", + " # TODO: is it correct logic to use this field name or should we make a new one?\n", " if 'gscites' in entry.fields:\n", - " if n_cits > int(entry.fields['gscites'].strip('{}')):\n", + " # only update if we are increasing the number of citations!!!\n", + " previous_cits = int(entry.fields['gscites'].strip('{}'))\n", + " if n_cits > previous_cits:\n", + " print('updating', entry.key, 'from', previous_cits, 'to', n_cits)\n", " entry.fields['gscites'] = '{' + str(n_cits) + '}'\n", + " elif (previous_cits > (1.5 * n_cits)) and (previous_cits - n_cits > 10):\n", + " print('warning: num citations calculated for this bibkey is much lower than previously suggested....', entry.key, previous_cits, n_cits)\n", " else:\n", + " print('adding gscites', entry.key, n_cits)\n", " entry.fields['gscites'] = '{' + str(n_cits) + '}'\n", + " print('done updating citations')\n", " return diag_bib_raw" ] }, @@ -228,12 +250,12 @@ } }, "source": [ - "# Load csv files" + "# Load manually checked csv file" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 128, "metadata": { "pycharm": { "name": "#%%\n" @@ -242,27 +264,469 @@ "outputs": [], "source": [ "# load manually_checked\n", - "manually_checked = pd.read_csv(\"./script_data/manual_check.csv\")\n", + "manually_checked = pd.read_excel(\"./script_data/manual_check_20231018.xlsx\")\n", "# manually_checked = remove_blacklist_items(manually_checked) # This should be done before actually manually checking\n", "\n", + "# load bib file just for reading at this point\n", + "#TODO: in the end when this script is routine this should just read the live diag.bib\n", + "diag_bib_path = os.path.join(parent_directory, 'scripts/script_data/diag_ss.bib')\n", + "with open(diag_bib_path, 'r', encoding=\"utf8\") as readonly_bib_file:\n", + " diag_bib_readonly = readonly_bib_file.read()\n", + " \n", "# POTENTIAL TO-DO CREATE ACTION MAPPINGS\n" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 129, "metadata": { "pycharm": { "name": "#%%\n" } }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Working on 0/280\n", + "Working on 1/280\n", + "Working on 2/280\n", + "Working on 3/280\n", + "Working on 4/280\n", + "Unable to generate bibtext for 10.1038/s41585-020-0324-x\n", + "'family'\n", + "Unable to gather information for 10.1038/s41585-020-0324-x\n", + "Working on 5/280\n", + "03ef312b3d3e616fd7f0a2f2260c82ad62ed7ef1 has not been checked yet, make sure only 1 action is mentioned\n", + "Working on 6/280\n", + "Working on 7/280\n", + "Working on 8/280\n", + "Working on 9/280\n", + "Unable to generate bibtext for 10.23698/AIDA/BRLN\n", + "'published'\n", + "Unable to gather information for 10.23698/AIDA/BRLN\n", + "Working on 10/280\n", + "Working on 11/280\n", + "Unable to generate bibtext for 10.1093/cid/ciac623\n", + "'family'\n", + "Unable to gather information for 10.1093/cid/ciac623\n", + "Working on 12/280\n", + "20\n", + "Working on 13/280\n", + "Working on 14/280\n", + "308\n", + "Working on 15/280\n", + "Unable to generate bibtext for 10.48550/arXiv.2112.05151\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.2112.05151\n", + "Working on 16/280\n", + "Unable to generate bibtext for 10.1093/rheumatology/keab835\n", + "'family'\n", + "Unable to gather information for 10.1093/rheumatology/keab835\n", + "Working on 17/280\n", + "Unable to generate bibtext for 10.1001/jamaophthalmol.2021.1407\n", + "'family'\n", + "Unable to gather information for 10.1001/jamaophthalmol.2021.1407\n", + "Working on 18/280\n", + "Working on 19/280\n", + "Working on 20/280\n", + "Working on 21/280\n", + "Working on 22/280\n", + "Working on 23/280\n", + "Working on 24/280\n", + "Working on 25/280\n", + "Working on 26/280\n", + "Working on 27/280\n", + "Unable to generate bibtext for 10.48550/arXiv.2212.08568\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.2212.08568\n", + "Working on 28/280\n", + "Working on 29/280\n", + "Working on 30/280\n", + "Working on 31/280\n", + "doi already exists in bib file, I will not add new bib entry 10.1109/TMI.2016.2553401 1d2109f8ec43c23db647c4778a5bb5846074e575\n", + "Unable to gather information for 10.1109/TMI.2016.2553401\n", + "Working on 32/280\n", + "Working on 33/280\n", + "Working on 34/280\n", + "Working on 35/280\n", + "Working on 36/280\n", + "Working on 37/280\n", + "Unable to generate bibtext for 10.48550/arXiv.1803.05471\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.1803.05471\n", + "Working on 38/280\n", + "Working on 39/280\n", + "Working on 40/280\n", + "Unable to generate bibtext for 10.48550/arXiv.1908.06037\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.1908.06037\n", + "Working on 41/280\n", + "Working on 42/280\n", + "Working on 43/280\n", + "Working on 44/280\n", + "Working on 45/280\n", + "Unable to generate bibtext for https://doi.org/10.48550/arXiv.2109.07892\n", + "'published'\n", + "Unable to gather information for https://doi.org/10.48550/arXiv.2109.07892\n", + "Working on 46/280\n", + "Working on 47/280\n", + "Working on 48/280\n", + "Working on 49/280\n", + "Working on 50/280\n", + "Working on 51/280\n", + "Working on 52/280\n", + "Working on 53/280\n", + "Working on 54/280\n", + "Working on 55/280\n", + "Working on 56/280\n", + "Working on 57/280\n", + "Working on 58/280\n", + "Working on 59/280\n", + "Working on 60/280\n", + "Working on 61/280\n", + "16\n", + "Working on 62/280\n", + "Working on 63/280\n", + "Working on 64/280\n", + "8\n", + "Working on 65/280\n", + "Unable to generate bibtext for 10.48550/arXiv.2206.01653\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.2206.01653\n", + "Working on 66/280\n", + "Working on 67/280\n", + "Working on 68/280\n", + "Working on 69/280\n", + "15\n", + "Working on 70/280\n", + "Unable to generate bibtext for 10.1186/s12874-021-01243-8\n", + "'family'\n", + "Unable to gather information for 10.1186/s12874-021-01243-8\n", + "Working on 71/280\n", + "Working on 72/280\n", + "Working on 73/280\n", + "Working on 74/280\n", + "Working on 75/280\n", + "Unable to generate bibtext for 10.48550/arXiv.2302.03116\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.2302.03116\n", + "Working on 76/280\n", + "Unable to generate bibtext for 10.1201/B18191-7\n", + "'author'\n", + "Unable to gather information for 10.1201/B18191-7\n", + "Working on 77/280\n", + "Working on 78/280\n", + "Working on 79/280\n", + "Working on 80/280\n", + "Working on 81/280\n", + "Working on 82/280\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some characters could not be decoded, and were replaced with REPLACEMENT CHARACTER.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Working on 83/280\n", + "8\n", + "Working on 84/280\n", + "Working on 85/280\n", + "Working on 86/280\n", + "Working on 87/280\n", + "Working on 88/280\n", + "Working on 89/280\n", + "Working on 90/280\n", + "Working on 91/280\n", + "Working on 92/280\n", + "Working on 93/280\n", + "Working on 94/280\n", + "Working on 95/280\n", + "Working on 96/280\n", + "Working on 97/280\n", + "Working on 98/280\n", + "Working on 99/280\n", + "Working on 100/280\n", + "Working on 101/280\n", + "Working on 102/280\n", + "Working on 103/280\n", + "Working on 104/280\n", + "Working on 105/280\n", + "16\n", + "Working on 106/280\n", + "Working on 107/280\n", + "Working on 108/280\n", + "no ss_doi available, I cannot add new bib entry 5a09637587e694a03f68a4ee1046d31aa97fd0c0\n", + "Unable to gather information for nan\n", + "Working on 109/280\n", + "Working on 110/280\n", + "no ss_doi available, I cannot add new bib entry 5cf666b6326b85a31b4e2759031392f0a49351b2\n", + "Unable to gather information for nan\n", + "Working on 111/280\n", + "Working on 112/280\n", + "Working on 113/280\n", + "Working on 114/280\n", + "Working on 115/280\n", + "Working on 116/280\n", + "17\n", + "Working on 117/280\n", + "Working on 118/280\n", + "Working on 119/280\n", + "Unable to generate bibtext for 10.1002/mrm.29371\n", + "'family'\n", + "Unable to gather information for 10.1002/mrm.29371\n", + "Working on 120/280\n", + "Unable to generate bibtext for 10.3390/jpm11070663\n", + "'family'\n", + "Unable to gather information for 10.3390/jpm11070663\n", + "Working on 121/280\n", + "Unable to generate bibtext for 10.48550/arXiv.2305.05984\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.2305.05984\n", + "Working on 122/280\n", + "Working on 123/280\n", + "Working on 124/280\n", + "Working on 125/280\n", + "Working on 126/280\n", + "Working on 127/280\n", + "Working on 128/280\n", + "Working on 129/280\n", + "Working on 130/280\n", + "Working on 131/280\n", + "Working on 132/280\n", + "Working on 133/280\n", + "8\n", + "Working on 134/280\n", + "no ss_doi available, I cannot add new bib entry 7593ee0a8242026714b37ba2c2805d1249c82e13\n", + "Unable to gather information for nan\n", + "Working on 135/280\n", + "Working on 136/280\n", + "Working on 137/280\n", + "Working on 138/280\n", + "Working on 139/280\n", + "Working on 140/280\n", + "Working on 141/280\n", + "Working on 142/280\n", + "Working on 143/280\n", + "Working on 144/280\n", + "Working on 145/280\n", + "Working on 146/280\n", + "Working on 147/280\n", + "Working on 148/280\n", + "Working on 149/280\n", + "Working on 150/280\n", + "Working on 151/280\n", + "no ss_doi available, I cannot add new bib entry 8706660fbf3110338bc794e354bc3d1c0075d230\n", + "Unable to gather information for nan\n", + "Working on 152/280\n", + "Working on 153/280\n", + "no ss_doi available, I cannot add new bib entry 877ce11291735ee26d2a618adde8db726808b107\n", + "Unable to gather information for nan\n", + "Working on 154/280\n", + "Working on 155/280\n", + "9\n", + "Working on 156/280\n", + "Working on 157/280\n", + "Working on 158/280\n", + "Working on 159/280\n", + "Working on 160/280\n", + "Working on 161/280\n", + "1\n", + "Working on 162/280\n", + "Working on 163/280\n", + "Working on 164/280\n", + "Working on 165/280\n", + "Working on 166/280\n", + "Working on 167/280\n", + "Working on 168/280\n", + "Working on 169/280\n", + "Working on 170/280\n", + "Working on 171/280\n", + "Working on 172/280\n", + "no ss_doi available, I cannot add new bib entry 926fe98caca0db9cef3d065f90b19ace24dedc76\n", + "Unable to gather information for nan\n", + "Working on 173/280\n", + "Working on 174/280\n", + "Working on 175/280\n", + "no ss_doi available, I cannot add new bib entry 952bdfe8a2732d537363028114718edad19bc451\n", + "Unable to gather information for nan\n", + "Working on 176/280\n", + "Working on 177/280\n", + "Working on 178/280\n", + "Unable to generate bibtext for 10.48550/arXiv.2212.13439\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.2212.13439\n", + "Working on 179/280\n", + "no ss_doi available, I cannot add new bib entry 99965d464a74d2083ce198156e6b0ca4d043b128\n", + "Unable to gather information for nan\n", + "Working on 180/280\n", + "Working on 181/280\n", + "Working on 182/280\n", + "Working on 183/280\n", + "Working on 184/280\n", + "Working on 185/280\n", + "Working on 186/280\n", + "Working on 187/280\n", + "Unable to generate bibtext for 10.48550/arXiv.2306.10484\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.2306.10484\n", + "Working on 188/280\n", + "Working on 189/280\n", + "Unable to generate bibtext for 10.1038/s41585-023-00748-9\n", + "'family'\n", + "Unable to gather information for 10.1038/s41585-023-00748-9\n", + "Working on 190/280\n", + "Working on 191/280\n", + "Working on 192/280\n", + "Working on 193/280\n", + "Working on 194/280\n", + "Working on 195/280\n", + "Working on 196/280\n", + "Working on 197/280\n", + "Working on 198/280\n", + "Working on 199/280\n", + "Working on 200/280\n", + "Working on 201/280\n", + "Working on 202/280\n", + "Unable to generate bibtext for 10.48550/arXiv.2301.06304\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.2301.06304\n", + "Working on 203/280\n", + "Working on 204/280\n", + "Working on 205/280\n", + "Working on 206/280\n", + "Working on 207/280\n", + "Working on 208/280\n", + "no ss_doi available, I cannot add new bib entry b8c484519a8970bf4cb07c3c609c41a05365f258\n", + "Unable to gather information for nan\n", + "Working on 209/280\n", + "Working on 210/280\n", + "Working on 211/280\n", + "Working on 212/280\n", + "Unable to generate bibtext for 10.48550/arXiv.2309.03383\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.2309.03383\n", + "Working on 213/280\n", + "Working on 214/280\n", + "Unable to generate bibtext for 10.1101/2022.05.17.492245\n", + "local variable 'kind' referenced before assignment\n", + "Unable to gather information for 10.1101/2022.05.17.492245\n", + "Working on 215/280\n", + "Working on 216/280\n", + "Working on 217/280\n", + "Working on 218/280\n", + "Working on 219/280\n", + "Working on 220/280\n", + "Unable to generate bibtext for 10.1186/s13063-020-04595-6\n", + "'family'\n", + "Unable to gather information for 10.1186/s13063-020-04595-6\n", + "Working on 221/280\n", + "Working on 222/280\n", + "Working on 223/280\n", + "Working on 224/280\n", + "Working on 225/280\n", + "Working on 226/280\n", + "Working on 227/280\n", + "Working on 228/280\n", + "Working on 229/280\n", + "no ss_doi available, I cannot add new bib entry cf46e880665be3dd1b2a81cc1be53f1ea9d64de1\n", + "Unable to gather information for nan\n", + "Working on 230/280\n", + "Working on 231/280\n", + "Working on 232/280\n", + "Working on 233/280\n", + "Working on 234/280\n", + "Working on 235/280\n", + "Working on 236/280\n", + "Working on 237/280\n", + "Working on 238/280\n", + "Working on 239/280\n", + "Working on 240/280\n", + "Working on 241/280\n", + "Working on 242/280\n", + "no ss_doi available, I cannot add new bib entry ddd8894c4281d91727ab1827501cde67f5cc322d\n", + "Unable to gather information for nan\n", + "Working on 243/280\n", + "Working on 244/280\n", + "Working on 245/280\n", + "Unable to generate bibtext for 10.1111/his.14902\n", + "'family'\n", + "Unable to gather information for 10.1111/his.14902\n", + "Working on 246/280\n", + "Working on 247/280\n", + "Working on 248/280\n", + "Working on 249/280\n", + "Working on 250/280\n", + "no ss_doi available, I cannot add new bib entry e4729ac7bfdb707e3207b0a91b57a2f907f5351b\n", + "Unable to gather information for nan\n", + "Working on 251/280\n", + "Working on 252/280\n", + "Unable to generate bibtext for 10.1148/radiol.2021203633\n", + "'family'\n", + "Unable to gather information for 10.1148/radiol.2021203633\n", + "Working on 253/280\n", + "Working on 254/280\n", + "doi already exists in bib file, I will not add new bib entry 10.1007/978-3-030-00949-6 e717ffb38990d5da64a82f8b8715bfb56daf9762\n", + "Unable to gather information for 10.1007/978-3-030-00949-6\n", + "Working on 255/280\n", + "Working on 256/280\n", + "Working on 257/280\n", + "Unable to generate bibtext for 10.1101/2022.09.02.22279476\n", + "local variable 'kind' referenced before assignment\n", + "Unable to gather information for 10.1101/2022.09.02.22279476\n", + "Working on 258/280\n", + "Unable to generate bibtext for 10.1055/B000000232\n", + "'author'\n", + "Unable to gather information for 10.1055/B000000232\n", + "Working on 259/280\n", + "Working on 260/280\n", + "Working on 261/280\n", + "Working on 262/280\n", + "Working on 263/280\n", + "Unable to generate bibtext for 10.48550/arXiv.2012.04974\n", + "'published'\n", + "Unable to gather information for 10.48550/arXiv.2012.04974\n", + "Working on 264/280\n", + "3\n", + "Working on 265/280\n", + "Working on 266/280\n", + "Working on 267/280\n", + "Working on 268/280\n", + "Working on 269/280\n", + "Working on 270/280\n", + "Working on 271/280\n", + "Working on 272/280\n", + "Unable to generate bibtext for 10.1101/158014\n", + "local variable 'kind' referenced before assignment\n", + "Unable to gather information for 10.1101/158014\n", + "Working on 273/280\n", + "Working on 274/280\n", + "Working on 275/280\n", + "Working on 276/280\n", + "Working on 277/280\n", + "24\n", + "Working on 278/280\n", + "Working on 279/280\n" + ] + } + ], "source": [ "# Iterate through all items in the manually checked csv\n", "blacklist_items = []\n", "items_to_add = ''\n", "items_to_update = []\n", "\n", + "failed_new_items = []\n", + "failed_updated_items = []\n", + "failed_to_find_actions = []\n", + "\n", "#TODO: Make sure new items or updated items in the bib-file include pmid and doi if they did not previously\n", "\n", "for index, bib_item in manually_checked.iterrows():\n", @@ -270,18 +734,19 @@ " # Make sure item is manually checked\n", " if \",\" in bib_item['action']:\n", " print(f\"{bib_item['ss_id']} has not been checked yet, make sure only 1 action is mentioned\")\n", + " failed_to_find_actions.append(bib_item)\n", " continue\n", " #TODO: we will later work from a dropdown-list rather than a comma separated set of actions so this probably will need updating\n", "\n", " # Add new item to diag.bib\n", " elif \"add new item\" in bib_item['action']:\n", - " bib_item_text = get_bib_info(diag_bib, bib_item)\n", + " bib_item_text = get_bib_info(diag_bib_readonly, bib_item)\n", "\n", " if bib_item_text is not None:\n", " items_to_add += bib_item_text\n", " else:\n", " print(f\"Unable to gather information for {bib_item['ss_doi']}\")\n", - " # TO-DO APPEND items we were unable to add\n", + " failed_new_items.append(bib_item)\n", "\n", " # Add ss_id to already existing doi in diag.bib\n", " elif \"add ss_id\" in bib_item['action']:\n", @@ -296,74 +761,346 @@ " # Get None items\n", " elif 'None' in bib_item['action']:\n", " continue\n", - "\n", - " # TODO: NOTIFY IF NOTHING IS DONE WITH AN ITEM (NO-MATCH)\n", - "\n", - "# First we open the bib file, add the completely new bib entries and save it:\n", - "#Load diag.bib as a string\n", - "cwd = os.getcwd()\n", - "parent_directory = os.path.abspath(os.path.join(cwd, \"..\"))\n", - "#TODO: in the end when this script is routine this should just read the live diag.bib\n", - "diag_bib_path = os.path.join(parent_directory, 'scripts/script_data/diag_ss.bib')\n", - "with open(diag_bib_path, encoding=\"utf8\") as bibtex_file:\n", - " diag_bib = bibtex_file.read()\n", + " \n", + " else:\n", + " failed_to_find_actions.append(bib_item)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Add new bib entries to the diag.bib file " + ] + }, + { + "cell_type": "code", + "execution_count": 136, + "metadata": {}, + "outputs": [], + "source": [ + "# First we use the bib file string, add the completely new bib entries and save it\n", "# append the new items to the string\n", - "diag_bib += items_to_add \n", - "# save the file to disk (temporarily commented)\n", - "# with open(diag_bib_path, encoding=\"utf8\") as bibtex_file:\n", - "# bibtex_file.write(diag_bib)\n", - "\n", + "diag_bib_readonly += items_to_add \n", + "# save the file to disk \n", + "# TODO : write to correct location\n", + "diag_bib_path_tmp_new = os.path.join(parent_directory, 'scripts/script_data/diag_ss_tmp_new.bib')\n", + "with open(diag_bib_path_tmp_new, 'w', encoding=\"utf8\") as bibtex_file:\n", + " bibtex_file.write(diag_bib)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Update existing bib entries with new ss_ids" + ] + }, + { + "cell_type": "code", + "execution_count": 137, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "14198a817d2c0a800bfb0a0a36baf5097fe22054 added to diag_bib_raw\n", + "14fcdfdd2b15f6fec9b9e7b1b4189e43281273d8 added to diag_bib_raw\n", + "1d3c69edf9e573412de0c758b3db1b8f9996f2c1 added to diag_bib_raw\n", + "1dddfd64c4a40269d63014b21ed3ed436f38b98b added to diag_bib_raw\n", + "2125835bf1c4fd0646b5dd50855d647044c07658 added to diag_bib_raw\n", + "24b8cee45431f633d2fa6e3c05670f62b1e41e7e added to diag_bib_raw\n", + "2723ce1686eea776df179e362cd9a8b8e2bb7ff1 added to diag_bib_raw\n", + "2a7dafe1287670068300ff77401923f7e151b9f4 added to diag_bib_raw\n", + "2e571724830cb8ca6e8dbe9cc1f92fdcfc517ec4 added to diag_bib_raw\n", + "2fe9af8a6b41fc9db41e76621f037aac453ab433 added to diag_bib_raw\n", + "34559bb0d95c5166625945eef9b53b21a30838fa added to diag_bib_raw\n", + "349ca29f588b9c785085da7147a4b58df032a8bf added to diag_bib_raw\n", + "374f4a7676183c95f901e655f2caf170cdd9ec9d added to diag_bib_raw\n", + "37e383517c34818ad049af0aa763ad5906e9f51a added to diag_bib_raw\n", + "42315be6452636824d9004d5d8aa2fe8924494a2 added to diag_bib_raw\n", + "44454c9090606d0332272ff38df6c87eac15f5f7 added to diag_bib_raw\n", + "4f016eb85905d24ce82adeee28bdf66b46870c9e added to diag_bib_raw\n", + "589037d110f74defb9cac3b42a370f7b044a1c4b added to diag_bib_raw\n", + "6ab3424f10b236c992d823568dfca2075e2ad46e added to diag_bib_raw\n", + "6d1ea27b41023f9add67e2c8c4dcbc7866ae640b added to diag_bib_raw\n", + "740d5d34fcd714870ddf0073fd8956db023319f0 added to diag_bib_raw\n", + "80af090645088134f058db53a708b7092dd28786 added to diag_bib_raw\n", + "862ef80662ea6cd8e646df642abc3fd343263191 added to diag_bib_raw\n", + "8a039fe22daf6f65a32ba02035f45a8c67b48339 added to diag_bib_raw\n", + "8b92dcfb8d8b92314d63de92852a28880a81f4ea added to diag_bib_raw\n", + "8c9301f67b46bbac884c588232ebca05dd3ba8ae added to diag_bib_raw\n", + "8ce6b544554a79e077e5fc52f55ba8234ce606d4 added to diag_bib_raw\n", + "974e8ae2bfa594ba157da2794a3254d12eb7bf26 added to diag_bib_raw\n", + "9a749224752f101c29f177f941bb5c967855db27 added to diag_bib_raw\n", + "a11db962a303ac6cb6ff4216659189c2ed378c21 added to diag_bib_raw\n", + "a1b328c04b54decb972bf83458f9df4ab5608af3 added to diag_bib_raw\n", + "a46c8e13b227cfc9d208915fdc79a6aff9fc58ea added to diag_bib_raw\n", + "ab07173f4e352d07f48eddd67136e8e33573aecf added to diag_bib_raw\n", + "c21f83954bab86611134478ab918399813c10313 added to diag_bib_raw\n", + "c37197020c3e86415f814d66e49de6f11d7cdbf1 added to diag_bib_raw\n", + "cb774bdd19514d661ab76d0d30d158e4e74a859f added to diag_bib_raw\n", + "cc7605f2b7e61723f12839fabc1066da0cc8744b added to diag_bib_raw\n", + "ce40309e3a7d5319ea2337c2d44f75bca3761e78 added to diag_bib_raw\n", + "d6e975989345b69f539dbb8f22cb3437f5cc5039 added to diag_bib_raw\n", + "e4db301a185bccd105017fb66e3f9e2adf876495 added to diag_bib_raw\n", + "e844b6b027c94468de8a607497f95a3771b7d48b added to diag_bib_raw\n", + "ed8cbf0b3373cf1dcadd2718dfd3daf6fbe068f3 added to diag_bib_raw\n", + "ee17bc918583166ad08ff307be5f77cb130486ea added to diag_bib_raw\n", + "f131ef217543d179269018950bf3b6ba2b30f3b1 added to diag_bib_raw\n", + "f643f4a927cf65f1ec66231ae76d3bc1736a67d3 added to diag_bib_raw\n", + "fc12f80e0fe56243c26f628d311577507f34b39c added to diag_bib_raw\n" + ] + } + ], + "source": [ "# Second we re-open the bib file using the read_bibfile method and update existing items with new ss_ids\n", - "diag_bib_raw = read_bibfile(None, diag_bib_path)\n", + "# TODO read from correct location here\n", + "diag_bib_raw = read_bibfile(None, diag_bib_path_tmp_new)\n", "for item_to_update in items_to_update:\n", - " diag_bib_raw=add_ss_id_to_existing_bibkey(diag_bib_raw, item_to_update[\"ss_id\"], item_to_update[\"bibkey\"])\n", - "# and save the bibfile with the newly added ss_ids (temporarily commented)\n", - "# save_to_file(diag_bib_raw, diag_bib_path)\n", + " [diag_bib_raw, result] = add_ss_id_to_existing_bibkey(diag_bib_raw, item_to_update[\"ss_id\"], item_to_update[\"bibkey\"])\n", + " if(result=='Fail'):\n", + " failed_updated_items.append(item_to_update)\n", "\n", - "# Third we update the blacklist (temporarily commented)\n", - "blacklist = pd.read_csv('./script_data/blacklist.csv')\n", - "# update_blacklist_csv(blacklist, blacklist_items)\n", "\n", - "# TODO: Here we should provide a report of rows where we did not know what to do or we failed to do the action\n", - "print(\"DONE with processing manually checked items\")" + "#Note we are not writing the file yet as we will use the same diag_bib_raw and update the citations on it first\n" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": { "pycharm": { - "name": "#%%\n" + "name": "#%% md\n" } }, - "outputs": [], "source": [ - "count = items_to_add.count('{yes}')\n", - "print(f\"Newly added items: {count}\")" + "# Update citation counts" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 141, "metadata": { "pycharm": { "name": "#%%\n" } }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Abra08a 287 21\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Boo09 38 8\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Dana99 1745 1000\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Ginn01a 51 27\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Hu19 12 0\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Kars90 26 7\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Kars91 28 15\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Kars96b 91 39\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Litj17 3681 1000\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Murp10a 45 28\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Niem09c 74 41\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Staa04a 2720 1000\n", + "adding gscites Tell21 154\n", + "adding gscites Teuw18 0\n", + "adding gscites Thag23 2\n", + "adding gscites Thee20 0\n", + "adding gscites Thij23 0\n", + "updating Timp02 from 10 to 11\n", + "updating Timp10 from 24 to 35\n", + "updating Trom12 from 3 to 4\n", + "adding gscites Turn21 4\n", + "adding gscites Valk19a 16\n", + "updating Vare05 from 46 to 52\n", + "updating Veli08d from 4 to 6\n", + "warning: num citations calculated for this bibkey is much lower than previously suggested.... Veli09 62 1\n", + "updating Veli09a from 2 to 3\n", + "updating Veli12 from 30 to 34\n", + "updating Veli13 from 36 to 45\n", + "updating Velz20 from 10 to 121\n", + "updating Ven11a from 8 to 12\n", + "updating Ven13b from 40 to 45\n", + "updating Ven16a from 15 to 21\n", + "updating Ven16f from 6 to 7\n", + "updating Vend17c from 18 to 36\n", + "adding gscites Vend18 54\n", + "adding gscites Venh15a 0\n", + "updating Venh15b from 45 to 72\n", + "adding gscites Venh15c 13\n", + "adding gscites Venh16a 11\n", + "updating Venh17a from 37 to 89\n", + "updating Venh17b from 59 to 112\n", + "adding gscites Venh18 112\n", + "adding gscites Venk21 51\n", + "adding gscites Venk23 1\n", + "adding gscites Vent20 2\n", + "adding gscites Vent21 16\n", + "adding gscites Vent23 6\n", + "updating Veta18 from 56 to 205\n", + "adding gscites Vina22 3\n", + "adding gscites Vink88 19\n", + "adding gscites Vlie22 12\n", + "updating Voge05 from 5 to 6\n", + "updating Voge07 from 43 to 45\n", + "updating Vos08 from 103 to 104\n", + "updating Vos09 from 14 to 15\n", + "updating Vos13 from 150 to 162\n", + "adding gscites Vos19 72\n", + "adding gscites Vos21 5\n", + "updating Vree17 from 21 to 39\n", + "updating Vree18 from 11 to 17\n", + "updating Vree18b from 14 to 24\n", + "updating Vree18c from 14 to 34\n", + "updating Vree18d from 25 to 44\n", + "adding gscites Vug18 0\n", + "updating Vuka11 from 4 to 6\n", + "updating Vuka12 from 24 to 28\n", + "updating Waal15 from 13 to 17\n", + "updating Wand17 from 57 to 102\n", + "updating Wand17a from 28 to 54\n", + "adding gscites Wild21 1\n", + "adding gscites Wild23a 1\n", + "adding gscites Wild23b 1\n", + "adding gscites Wild23c 0\n", + "updating Wink15a from 51 to 65\n", + "adding gscites Wink21 18\n", + "updating Witt11 from 21 to 24\n", + "updating Witt12 from 31 to 41\n", + "adding gscites Witt12a 17\n", + "updating Witt12b from 11 to 14\n", + "updating Xie20 from 5 to 91\n", + "adding gscites Xie21 0\n", + "adding gscites Xie23 2\n", + "adding gscites Xie23b 0\n", + "adding gscites Yu20 24\n", + "updating Zaid18 from 15 to 45\n", + "updating Zels15 from 19 to 34\n", + "updating Zels17b from 32 to 51\n", + "updating Zels18 from 23 to 51\n", + "updating Zels18a from 22 to 44\n", + "adding gscites Zels19a 14\n", + "adding gscites Zhou20 334\n", + "done updating citations\n" + ] + } + ], + "source": [ + "diag_bib_raw_new_cits = update_citation_count(diag_bib_raw)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 143, + "metadata": {}, "outputs": [], "source": [ - "print(f\"Blacklisted items: {len(blacklist_items)}\")" + "# TODO: update to the correct output path\n", + "save_to_file(diag_bib_raw_new_cits, None, diag_bib_path_tmp_new)" ] }, { "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" + "metadata": {}, + "source": [ + "# Update the blacklist" + ] + }, + { + "cell_type": "code", + "execution_count": 142, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'39 items added to blacklist'" + ] + }, + "execution_count": 142, + "metadata": {}, + "output_type": "execute_result" } - }, + ], "source": [ - "# Update citation counts" + "# Last we update the blacklist (temporarily commented) (what failures can happen here?)\n", + "blacklist_df = pd.read_csv('./script_data/blacklist.csv')\n", + "# TODO: fix to correct output location\n", + "blacklist_out_file = './script_data/blacklist_tmp_updated.csv'\n", + "# file writing\n", + "update_blacklist_csv(blacklist_df, blacklist_items, blacklist_out_file)" + ] + }, + { + "cell_type": "code", + "execution_count": 135, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DONE with processing manually checked items\n", + "Failures are as follows:\n", + "Failed to add new bib entry 03ad8d7078805db6fbd4993b881045b462b4e028\n", + "Failed to add new bib entry 06ee6ed85131848ef70da625806ba480915aa2e0\n", + "Failed to add new bib entry 0b78520bea8310ff375cf953bbde10082db0eede\n", + "Failed to add new bib entry 0df7a4f26d57eb58fe628316aa5e84e5ca474ee8\n", + "Failed to add new bib entry 0ebe8ab65571514718283cd2d8ac7277db3513c5\n", + "Failed to add new bib entry 0f27fc10d593859a440c6ccf901d5093f67939bd\n", + "Failed to add new bib entry 17b918178a85cdb670be7521e6cef3b4dbffb16b\n", + "Failed to add new bib entry 1d2109f8ec43c23db647c4778a5bb5846074e575\n", + "Failed to add new bib entry 202f393ad41b85acbc59a28e5080d19c9de56988\n", + "Failed to add new bib entry 233a8c1b929ccbb0f4a31720919e2b9f413a239c\n", + "Failed to add new bib entry 269e8609dff88d78e8e3c41f81a97199c9add3dd\n", + "Failed to add new bib entry 2f2182f8e55be5a85c1316cd1b181cd5c85c106c\n", + "Failed to add new bib entry 32af51ced47419cff26fde66cce602fbab2f238a\n", + "Failed to add new bib entry 362c510dec0d566d22d5be3af0519fc7eec8bb86\n", + "Failed to add new bib entry 36fb5c86a92b941f0754cb864e2c4c70b21b7b7d\n", + "Failed to add new bib entry 5a09637587e694a03f68a4ee1046d31aa97fd0c0\n", + "Failed to add new bib entry 5cf666b6326b85a31b4e2759031392f0a49351b2\n", + "Failed to add new bib entry 663336d2c2efa0a4bfe8a4988eb8d1b87e9a7403\n", + "Failed to add new bib entry 6769e24a1e5a4b5841fd8bcb0b8daa3051b52214\n", + "Failed to add new bib entry 67f07af40a5c7e2b008509e4e8f61030ce9f85ab\n", + "Failed to add new bib entry 7593ee0a8242026714b37ba2c2805d1249c82e13\n", + "Failed to add new bib entry 8706660fbf3110338bc794e354bc3d1c0075d230\n", + "Failed to add new bib entry 877ce11291735ee26d2a618adde8db726808b107\n", + "Failed to add new bib entry 926fe98caca0db9cef3d065f90b19ace24dedc76\n", + "Failed to add new bib entry 952bdfe8a2732d537363028114718edad19bc451\n", + "Failed to add new bib entry 98cec4020ccc1ef0399b4f866544a30fb550d34c\n", + "Failed to add new bib entry 99965d464a74d2083ce198156e6b0ca4d043b128\n", + "Failed to add new bib entry 9ddb2f47695191553a3623ac33eddeb9c7e416cd\n", + "Failed to add new bib entry a0f7cee93c06634d1945b614463977548c2b94bd\n", + "Failed to add new bib entry af346d53f267840fc87db7ff4f1ff1b97cfe713a\n", + "Failed to add new bib entry b8c484519a8970bf4cb07c3c609c41a05365f258\n", + "Failed to add new bib entry be95c8bbd8a4297d620b1c2644cf2a898603e355\n", + "Failed to add new bib entry c0f6940d1af8063139d99b12f7e451169278ec33\n", + "Failed to add new bib entry c705e376f3d2aaf91deb1ca806e838d94a38dda8\n", + "Failed to add new bib entry cf46e880665be3dd1b2a81cc1be53f1ea9d64de1\n", + "Failed to add new bib entry ddd8894c4281d91727ab1827501cde67f5cc322d\n", + "Failed to add new bib entry e1d86927b130950ef8c67caa76364bb336b083d7\n", + "Failed to add new bib entry e4729ac7bfdb707e3207b0a91b57a2f907f5351b\n", + "Failed to add new bib entry e4dbe4f2c08eeae4cab346e24327eefa65e44191\n", + "Failed to add new bib entry e717ffb38990d5da64a82f8b8715bfb56daf9762\n", + "Failed to add new bib entry ea9cff43d07c6e1e63dc9d88ff13c8ab7e5380af\n", + "Failed to add new bib entry eb6e86cf9697391771d1e2fbad3f49c1448e3411\n", + "Failed to add new bib entry efaa4e593dbf76786a33dfd2dff79b77396397cc\n", + "Failed to add new bib entry f9166fa0c5c102618d890bdeab63aca74b017c45\n", + "Failed to find valid action for item 03ef312b3d3e616fd7f0a2f2260c82ad62ed7ef1 [add ss_id, blacklist ss_id, add new item]\n" + ] + } + ], + "source": [ + "# TODO: Here we provide a report of rows where we did not know what to do or we failed to do the action\n", + "print(\"DONE with processing manually checked items\")\n", + "print('Failures are as follows:')\n", + "for item in failed_new_items:\n", + " print('Failed to add new bib entry ', item['ss_id'])\n", + "for item in failed_updated_items:\n", + " print('Failed to update exiting bib entry with new ss_id', item['bibkey'], item['ss_id'])\n", + "for item in failed_to_find_actions:\n", + " print('Failed to find valid action for item', item['ss_id'], item['action'])" ] }, { @@ -376,10 +1113,8 @@ }, "outputs": [], "source": [ - "path_diag_bib = os.path.join('script_data/', 'diag_ss.bib')\n", - "update_citation_count(path_diag_bib)\n", - "path_output_diag_bib = os.path.join('script_data/', 'diag_ss_new.bib')\n", - "save_to_file(diag_bib_raw, None, path_output_diag_bib)" + "count = items_to_add.count('{yes}')\n", + "print(f\"Newly added items: {count}\")" ] }, { @@ -392,11 +1127,7 @@ }, "outputs": [], "source": [ - "a = \"string\"\n", - "b = None\n", - "if b is not None:\n", - " c = a + b\n", - " print(c)" + "print(f\"Blacklisted items: {len(blacklist_items)}\")" ] }, { diff --git a/scripts/script_data/blacklist_tmp_updated.csv b/scripts/script_data/blacklist_tmp_updated.csv new file mode 100644 index 0000000..50313a3 --- /dev/null +++ b/scripts/script_data/blacklist_tmp_updated.csv @@ -0,0 +1,146 @@ +staff_id,staff_name,ss_year,ss_id,title,doi,Should be in diag.bib,Reason +8038506.0,Bram van Ginneken,2023,e738d94746b0f33b1c48737022ab186618d4d4cd,SimpleRad: Patient-Friendly Dutch Radiology Reports,10.1007/978-3-031-28241-6_18,no,Lecture notes +8038506.0,Bram van Ginneken,2022,2d49fb82fd715a213e2149f9f08dd2fb4749b235,Challenges in digital medicine applications in under-resourced settings,10.1038/s41467-022-30728-3,no,Interviewing article with experts +8038506.0,Bram van Ginneken,2022,2f2182f8e55be5a85c1316cd1b181cd5c85c106c,Metrics reloaded: Pitfalls and recommendations for image analysis validation,10.48550/arXiv.2206.01653,no,211 page document about Image analysis +8038506.0,Bram van Ginneken,2022,bc6b483f9b1fa630fa4b43158b13716f1ca7497b,Domain adaptation strategies for cancer-independent detection of lymph node metastases,10.48550/arXiv.2207.06193,no,"Arxiv paper, no published tag" +8038506.0,Bram van Ginneken,2019,14701232a78e72a3a780d01e59c943037e21902f,Genetic landscape of chronic obstructive pulmonary disease identifies heterogeneous cell type and phenotype associations,10.1038/s41588-018-0342-2,no,COPD Genetics Consortium +8038506.0,Bram van Ginneken,2018,6b604b6896cfd670bd9afe4172a2e5bf591b0cc2,MA20.09 Improved Lung Cancer and Mortality Prediction Accuracy Using Survival Models Based on Semi-Automatic CT Image Measurements,10.1016/J.JTHO.2018.08.488,no,Oral abstract +8038506.0,Bram van Ginneken,2018,76e5406b159352dc13b051f3e1b7259abb499317,Real-Life Artificial Intelligence Applications,10.5334/JBSR.1656,no,Short abstract +8038506.0,Bram van Ginneken,2018,d1e0a041fc417d82c584191ecf5b3b536127d9e0,"Erratum: “Computed tomography quantification of tracheal abnormalities in COPD and their influence on airflow limitation.” [Med. Phys., 44: 3594-3603 (2017)]",10.1002/MP.13065,no,Eratum +8038506.0,Bram van Ginneken,2018,f886ee663da1c3987b5828d62bd8176f6f3dc79e,ES01.03 Deep Machine Learning for Screening LDCT,10.1016/j.jtho.2018.08.020,no,Education Session +8038506.0,Bram van Ginneken,2017,63c9344c2046a0403941eb8a91d307b9e1190928,MA 14.11 Malignancy Risk Prediction of Pulmonary Nodule in Lung Cancer Screening – Diameter Or Volumetric Measurement,10.1016/J.JTHO.2017.09.580,no,Oral abstract +8038506.0,Bram van Ginneken,2015,2fa5f435f41242e8ef053e94ae7af400014ca7d3,"Erratum: Towards a close computed tomography monitoring approach for screen detected subsolid pulmonary nodules? (vol 45, pg 765, 2015)",10.1183/09031936.50005914,no,Eratum +8038506.0,Bram van Ginneken,2015,368d4191051016d050fd0c7bd1e166ada03ce00a,Detection of Lung Nodules in Chest Radiography,10.1201/B18191-11,no,Chapter in book +8038506.0,Bram van Ginneken,2015,f6f8f2eea0b4992cc64dc0d850061eee8837ecb7,Reproducibility of airway wall thickness measurements on CT in a lung cancer screening setting,10.1183/13993003.CONGRESS-2015.PA762,no,Conference abstract +8038506.0,Bram van Ginneken,2015,f7fc14ab71c783222a73599cc2dfd4e4e19e4b78,Bone density is associated with emphysema and air trapping on CT in smokers,10.1183/13993003.CONGRESS-2015.PA3754,no,Conference abstract +123637526.0,Bram van Ginneken,2023,d4fcad866167d5eeb2e0b1f18027c65055b97ad2,Prevalence of abnormal spirometry in individuals with a smoking history and no known obstructive lung disease,10.1016/j.rmed.2023.107126,no,COPD Investigators +123637526.0,Bram van Ginneken,2022,8886b367e66dbb154444b2d28b65718e3835fa02,Lung tissue shows divergent gene expression between chronic obstructive pulmonary disease and idiopathic pulmonary fibrosis,10.1186/s12931-022-02013-w,no,COPD Investigators +123637526.0,Bram van Ginneken,2021,45e4783358f99dc26ef9796a75aa9f22f8e003e5,Computer-aided diagnosis of masses in breast CT imaging: combined power of handcrafted and deep learning radiomics,10.1016/s1120-1797(22)00024-2,no,Conference live session +123637526.0,Bram van Ginneken,2021,cd44e3862af36a57aa951db042cacd7d06c43509,Mucus plugging on computed tomography and chronic bronchitis in chronic obstructive pulmonary disease,10.1186/s12931-021-01712-0,no,COPD Investigators +123637526.0,Bram van Ginneken,2020,5fe95d30f91811c50743420bd6623bde09b99e1a,Vitamin D deficiency is associated with respiratory symptoms and airway wall thickening in smokers with and without COPD: a prospective cohort study,10.1186/s12890-020-1148-4,no,COPD Investigators +123637526.0,Bram van Ginneken,2018,682e8febc5d053766fefe1ee2de5ca2ff39762f2,Author Correction: White Matter and Gray Matter Segmentation in 4D Computed Tomography,10.1038/s41598-018-25729-6,no,Author Correction +143613202.0,Francesco Ciompi,2023,6ab3424f10b236c992d823568dfca2075e2ad46e,Abstract P2-11-34: Mitotic spindle hotspot counting using deep learning networks is highly associated with clinical outcomes in patients with early-stage triple-negative breast cancer who did not receive systemic therapy,10.1158/1538-7445.sabcs22-p2-11-34,no,Abstract +143613202.0,Francesco Ciompi,2023,f63d82385cfe245d5b4241baabf417ab4476912b,Multi-resolution deep learning characterizes tertiary lymphoid structures in solid tumors,10.1101/2023.04.03.535381,no,doi has 'type' tag: 'posted-content'. Can be result of preprint article +143613202.0,Francesco Ciompi,2022,2d9d26e7402817aed359cb3fb029d9082183f007,14P Deep learning-based quantification of immune infiltrate for predicting response to pembrolizumab from pre-treatment biopsies of metastatic non-small cell lung cancer: A study on the PEMBRO-RT phase II trial,10.1016/j.iotech.2022.100119,no,Abstract +143613202.0,Francesco Ciompi,2022,631c4938fa46537e91e1420fb9fb1a5428114c61,Predicting pathological complete response to neoadjuvant chemotherapy in breast cancer from routine diagnostic histopathology biopsies,10.1101/2022.11.11.22282205,no,preprint +143613202.0,Francesco Ciompi,2020,889cc0565d6e2e844f4db0132c71a612672d2497,Pitfalls in assessing stromal tumor infiltrating lymphocytes (sTILs) in breast cancer,10.1038/s41523-020-0156-0,no,International Immuno-Oncology Biomarker Working Group +143613202.0,Francesco Ciompi,2020,bd6954422d3582093d26b76ac743991219c91351,Deep learning enables fully automated mitotic density assessment in breast cancer histopathology,10.1016/s0959-8049(20)30764-4,no,Poster Presentation +143613202.0,Francesco Ciompi,2020,c52daf1cb971120c4083116cfc213acbaac6faaf,Application of a risk-management framework for integration of stromal tumor-infiltrating lymphocytes in clinical trials,10.1038/s41523-020-0155-1,no,International Immuno-Oncology Biomarker Working Group +8038506.0,Bram van Ginneken,2023,e738d94746b0f33b1c48737022ab186618d4d4cd,SimpleRad: Patient-Friendly Dutch Radiology Reports,10.1007/978-3-031-28241-6_18,no,Lecture notes +8038506.0,Bram van Ginneken,2022,2d49fb82fd715a213e2149f9f08dd2fb4749b235,Challenges in digital medicine applications in under-resourced settings,10.1038/s41467-022-30728-3,no,Interviewing article with experts +8038506.0,Bram van Ginneken,2022,2f2182f8e55be5a85c1316cd1b181cd5c85c106c,Metrics reloaded: Pitfalls and recommendations for image analysis validation,10.48550/arXiv.2206.01653,no,211 page document about Image analysis +8038506.0,Bram van Ginneken,2022,bc6b483f9b1fa630fa4b43158b13716f1ca7497b,Domain adaptation strategies for cancer-independent detection of lymph node metastases,10.48550/arXiv.2207.06193,no,"Arxiv paper, no published tag" +8038506.0,Bram van Ginneken,2019,14701232a78e72a3a780d01e59c943037e21902f,Genetic landscape of chronic obstructive pulmonary disease identifies heterogeneous cell type and phenotype associations,10.1038/s41588-018-0342-2,no,COPD Genetics Consortium +8038506.0,Bram van Ginneken,2018,6b604b6896cfd670bd9afe4172a2e5bf591b0cc2,MA20.09 Improved Lung Cancer and Mortality Prediction Accuracy Using Survival Models Based on Semi-Automatic CT Image Measurements,10.1016/J.JTHO.2018.08.488,no,Oral abstract +8038506.0,Bram van Ginneken,2018,76e5406b159352dc13b051f3e1b7259abb499317,Real-Life Artificial Intelligence Applications,10.5334/JBSR.1656,no,Short abstract +8038506.0,Bram van Ginneken,2018,d1e0a041fc417d82c584191ecf5b3b536127d9e0,"Erratum: “Computed tomography quantification of tracheal abnormalities in COPD and their influence on airflow limitation.” [Med. Phys., 44: 3594-3603 (2017)]",10.1002/MP.13065,no,Eratum +8038506.0,Bram van Ginneken,2018,f886ee663da1c3987b5828d62bd8176f6f3dc79e,ES01.03 Deep Machine Learning for Screening LDCT,10.1016/j.jtho.2018.08.020,no,Education Session +8038506.0,Bram van Ginneken,2017,63c9344c2046a0403941eb8a91d307b9e1190928,MA 14.11 Malignancy Risk Prediction of Pulmonary Nodule in Lung Cancer Screening – Diameter Or Volumetric Measurement,10.1016/J.JTHO.2017.09.580,no,Oral abstract +8038506.0,Bram van Ginneken,2015,2fa5f435f41242e8ef053e94ae7af400014ca7d3,"Erratum: Towards a close computed tomography monitoring approach for screen detected subsolid pulmonary nodules? (vol 45, pg 765, 2015)",10.1183/09031936.50005914,no,Eratum +8038506.0,Bram van Ginneken,2015,368d4191051016d050fd0c7bd1e166ada03ce00a,Detection of Lung Nodules in Chest Radiography,10.1201/B18191-11,no,Chapter in book +8038506.0,Bram van Ginneken,2015,f6f8f2eea0b4992cc64dc0d850061eee8837ecb7,Reproducibility of airway wall thickness measurements on CT in a lung cancer screening setting,10.1183/13993003.CONGRESS-2015.PA762,no,Conference abstract +8038506.0,Bram van Ginneken,2015,f7fc14ab71c783222a73599cc2dfd4e4e19e4b78,Bone density is associated with emphysema and air trapping on CT in smokers,10.1183/13993003.CONGRESS-2015.PA3754,no,Conference abstract +123637526.0,Bram van Ginneken,2023,d4fcad866167d5eeb2e0b1f18027c65055b97ad2,Prevalence of abnormal spirometry in individuals with a smoking history and no known obstructive lung disease,10.1016/j.rmed.2023.107126,no,COPD Investigators +123637526.0,Bram van Ginneken,2022,8886b367e66dbb154444b2d28b65718e3835fa02,Lung tissue shows divergent gene expression between chronic obstructive pulmonary disease and idiopathic pulmonary fibrosis,10.1186/s12931-022-02013-w,no,COPD Investigators +123637526.0,Bram van Ginneken,2021,45e4783358f99dc26ef9796a75aa9f22f8e003e5,Computer-aided diagnosis of masses in breast CT imaging: combined power of handcrafted and deep learning radiomics,10.1016/s1120-1797(22)00024-2,no,Conference live session +123637526.0,Bram van Ginneken,2021,cd44e3862af36a57aa951db042cacd7d06c43509,Mucus plugging on computed tomography and chronic bronchitis in chronic obstructive pulmonary disease,10.1186/s12931-021-01712-0,no,COPD Investigators +123637526.0,Bram van Ginneken,2020,5fe95d30f91811c50743420bd6623bde09b99e1a,Vitamin D deficiency is associated with respiratory symptoms and airway wall thickening in smokers with and without COPD: a prospective cohort study,10.1186/s12890-020-1148-4,no,COPD Investigators +123637526.0,Bram van Ginneken,2018,682e8febc5d053766fefe1ee2de5ca2ff39762f2,Author Correction: White Matter and Gray Matter Segmentation in 4D Computed Tomography,10.1038/s41598-018-25729-6,no,Author Correction +143613202.0,Francesco Ciompi,2023,6ab3424f10b236c992d823568dfca2075e2ad46e,Abstract P2-11-34: Mitotic spindle hotspot counting using deep learning networks is highly associated with clinical outcomes in patients with early-stage triple-negative breast cancer who did not receive systemic therapy,10.1158/1538-7445.sabcs22-p2-11-34,no,Abstract +143613202.0,Francesco Ciompi,2023,f63d82385cfe245d5b4241baabf417ab4476912b,Multi-resolution deep learning characterizes tertiary lymphoid structures in solid tumors,10.1101/2023.04.03.535381,no,doi has 'type' tag: 'posted-content'. Can be result of preprint article +143613202.0,Francesco Ciompi,2022,2d9d26e7402817aed359cb3fb029d9082183f007,14P Deep learning-based quantification of immune infiltrate for predicting response to pembrolizumab from pre-treatment biopsies of metastatic non-small cell lung cancer: A study on the PEMBRO-RT phase II trial,10.1016/j.iotech.2022.100119,no,Abstract +143613202.0,Francesco Ciompi,2022,631c4938fa46537e91e1420fb9fb1a5428114c61,Predicting pathological complete response to neoadjuvant chemotherapy in breast cancer from routine diagnostic histopathology biopsies,10.1101/2022.11.11.22282205,no,preprint +143613202.0,Francesco Ciompi,2020,889cc0565d6e2e844f4db0132c71a612672d2497,Pitfalls in assessing stromal tumor infiltrating lymphocytes (sTILs) in breast cancer,10.1038/s41523-020-0156-0,no,International Immuno-Oncology Biomarker Working Group +143613202.0,Francesco Ciompi,2020,bd6954422d3582093d26b76ac743991219c91351,Deep learning enables fully automated mitotic density assessment in breast cancer histopathology,10.1016/s0959-8049(20)30764-4,no,Poster Presentation +143613202.0,Francesco Ciompi,2020,c52daf1cb971120c4083116cfc213acbaac6faaf,Application of a risk-management framework for integration of stromal tumor-infiltrating lymphocytes in clinical trials,10.1038/s41523-020-0155-1,no,International Immuno-Oncology Biomarker Working Group +4960344.0,James Meakin,2022,8526bb601d889c7c3924b2a290656a37785d184b,Erratum to: Velocity‐selective arterial spin labeling perfusion MRI: A review of the state of the art and recommendations for clinical implementation (Magn Reson Med. 2022; 88:1528–1547),10.1002/mrm.29504,no,erratum +2116215861.0,Ajay Patel,2017,bd9e7ce901d9c8f745a67bad65308c06434be4ca,"Implementing Pneumococcal Vaccination in Hospitalized Adults with COPD, Asthma, Current Smokers, and/or Over Age 65 Years. A Performance Improvement Project",10.1093/OFID/OFX163.1199,no,poster abstract +123637526.0,Bram van Ginneken,2018,494b9b7c529517bb40c67653556cf17164948942,Blood eosinophil count thresholds and exacerbations in patients with chronic obstructive pulmonary disease,10.1016/j.jaci.2018.04.010,no,COPDGene and ECLIPSE Investigators +123637526.0,Bram van Ginneken,2020,a883405868815a36ab2f32d7c678f5447d6c1e01,Disease Progression Modelling in Chronic Obstructive Pulmonary Disease (COPD).,10.1164/rccm.201908-1600OC,no,COPDGene and ECLIPSE Investigators +2109170880.0,"Ajay Patel, Rashindra Manniesing",2016,e7abcec2ecff1af9b9fe8c480b18b17f0ca5d977,Automatic interpretation of 4D computed tomography images in acute stroke,10.1117/2.1201609.006617,no,SPIE newsroom article +2074975080.0,Matthieu Rutten,2022,1a69398886fc36dfd3d46819aa5000cbbb549cf2,"Abstract PD11-05: Intelligent shear-wave elastography to reduce unnecessary biopsies in breast cancer diagnosis (INSPiRED 002): An international, multicenter analysis",10.1158/1538-7445.sabcs21-pd11-05,no,Abstract +145441238.0,Jeroen van der Laak,2023,ef5041166bb1482a6d2f33d308c82f6158f90d6d,The Banff 2022 Kidney Meeting Work Plan: Data-Driven Refinement of the Banff Classification for Renal Allografts,10.2139/ssrn.4444233,no,meeting notes +145441238.0,Jeroen van der Laak,2023,3c75e430d92041b23d77e4c8675fb06ab2f65b04,"What happened in Digital and Computational Pathology at the European Congress of Pathology, Basel, 2022?",10.47184/tp.2023.01.04,no,WG report/ blogpost on ECP 2022 +32649341.0,Jonas Teuwen,2018,71e3ac25735f2b8559f9d444cf31dff7a34569a2,[I253] Basics of deep learning,10.1016/J.EJMP.2018.06.325,no,abstract +2913408.0,Nikolas Lessman,2020,a45dad8a11aa9987154df9dd996c91c86677ac06,The risk of cardiovascular disease in irradiated breast cancer patients: The role of cardiac calcifications and adjuvant treatment,10.1016/s0959-8049(20)30543-8,no,Oral abstract +1745574.0,Nico Karssemeijer,2019,4c6cc85b59d663bc71c6edb3dbfa355cd1711a2d,Abstract P6-13-01: MRI breast cancer screening compared to mammography in women with a familial risk: A multicenter randomized controlled trial,10.1158/1538-7445.SABCS18-P6-13-01,no,abstract +145388932.0,Jeroen van der Laak,2020,65013e6ab482551a6671ff916eff71a02586f451,Abstract 2113: Discrimination of benign breast disease from normal lobules using an automated computational pathology algorithm,10.1158/1538-7445.am2020-2113,no,abstract +145388932.0,Jeroen van der Laak,2019,720dcc4174c00f80e18070b98ced8ec380bc3097,Abstract 651: Analysis of the immune microenvironment to advance breast cancer risk prediction and prevention,10.1158/1538-7445.SABCS18-651,no,abstract +1745574.0,Nico Karssemeijer,2019,e92f136f51e91eb012751fe91122788f9d3de75e,Abstract MS1-2: Deep learning systems for reading mammograms and breast tomosynthesis,10.1158/1538-7445.sabcs18-ms1-2,no,abstract +1745574.0,Nico Karssemeijer,2020,7e1445ce8a21ee15df11d4abe91a40662ccce538,Breast Lesion Excision System as a treatment method for small invasive breast cancers,10.1016/s0959-8049(20)30589-x,no,poster +32649341.0,Jonas Teuwen,2020,ef2b9224e6cb13dfcc23d04ec68ce39c89e8cab0,PH-0127: Quantifying intra-fractional gastric wall motion for MR-guided radiotherapy,10.1016/S0167-8140(21)00154-7,no,poster +32649341.0,Jonas Teuwen,2020,74be9a2d4d628b5e8477190031857fd46ff1d469,PO-0992: Pericardial effusion after radiotherapy for Non-Small Cell Lung Cancer,10.1016/S0167-8140(21)01009-4,no,presentation at ESTRO +32649341.0,Jonas Teuwen,2020,b037dc1dfed5de9a4834d58ea24495125f13201b,PO-1747: Segmentation of the heart using a Residual Unet model,10.1016/S0167-8140(21)01765-5,no,presentation at ESTRO +2913408.0,Nikolas Lessman,2020,5bfffb5745676f0369372f246c27939760f05f4c,OC-0109: Cardiovascular risk assessment based on cardiac calcifications on breast RT planning CT scans,10.1016/S0167-8140(21)00135-3,no,"presentation, paper has different doi" +2074975080.0,Matthieu Rutten,2020,31f7b71202b51458d19599160be61c9052476c29,Erratum: Present Status of Musculoskeletal Radiology in Europe: International Survey by the European Society of Musculoskeletal Radiology (ESSR),10.1055/s-0040-1722564,no,erratum +1745574.0,Nico Karssemeijer,2018,ad02412957fc8cdb63a207b351f6a07c5f21cce3,[OA245] Channelized hotelling observer performance in acquired mammographic images of an anthropomorphic breast phantom,10.1016/J.EJMP.2018.06.317,no,abstract from 89007a5745f46d11c824d8c74b9f6492b6da492f +123637526.0,Bram van Ginneken,2019,0b7485b0593a744432ddc552c24bc74cbbba4c3c,"Combined Forced Expiratory Volume in 1 Second and Forced Vital Capacity Bronchodilator Response, Exacerbations, and Mortality in Chronic Obstructive Pulmonary Disease",10.1513/ANNALSATS.201809-601OC,no,"COPDGene investigator, not an actual author" +8038506.0,"Bram van Ginneken, Colin Jacobs",2015,2fa5f435f41242e8ef053e94ae7af400014ca7d3,"Erratum: Towards a close computed tomography monitoring approach for screen detected subsolid pulmonary nodules? (vol 45, pg 765, 2015)",10.1183/09031936.50005914,no,erratum +2895994.0,"Colin Jacobs, Geert Litjens, Nico Karssemeijer",2016,4e6b705773934f42387213d2aeeac950a636119f,JMI 2015 List of Reviewers.,10.1117/1.JMI.3.1.010101,no,list of reviewers +34754023.0,"Henkjan Huisman, Nico Karssemeijer",2019,4d7f0ccc132e6d29c8227bffa572495a9e4646bd,JMI 2018 List of Reviewers.,10.1117/1.JMI.6.1.010101,no,list of reviewers +34754023.0,"Henkjan Huisman, Nico Karssemeijer",2020,50c847f76d620edd5202c5a6a102198f991a3044,2019 List of Reviewers,10.1117/1.JMI.7.1.010102,no,list of reviewers +8038506.0,"Bram van Ginneken, Alessa Hering, Keelin Murphy",2020,888c62de7448e14302c3c2ed7dc062829ea279d7,Learn2Reg Challenge: CT Lung Registration - Training Data,10.5281/ZENODO.3835682,no,training data +143613202.0,"Francesco Ciompi, Jeroen van der Laak, Geert Litjens",2017,ab781b4dc45e73ddfe6383a108a813a6739f1753,"The Digital Pathology Association's Annual Conference October 1-3 | Manchester Grand Hyatt | San Diego, CA",10.1016/s2153-3539(22)00450-3,no,Oral abstract +8038506.0,"Bram van Ginneken, Alessa Hering, Keelin Murphy",2020,efec0825343cfb78df86f34488019a51aaa4949a,Learn2Reg Challenge: CT Lung Registration - Test Data,10.5281/ZENODO.4048761,no,test data +123637526.0,"Bram van Ginneken, Bram Platel, Clarisa Sanchez, Rashindra Manniesing, Bram van Ginneken, Bram Platel, Clarisa Sanchez, Rashindra Manniesing",2018,682e8febc5d053766fefe1ee2de5ca2ff39762f2,Author Correction: White Matter and Gray Matter Segmentation in 4D Computed Tomography,10.1038/s41598-018-25729-6,no,small author correction +123637526.0,"Bram van Ginneken, Francesco Ciompi, Cornelia Schaefer-Prokop",2017,86b9d39fb026746752f95f87f87fd26b8512c913,Corrigendum: Towards automatic pulmonary nodule management in lung cancer screening with deep learning,10.1038/srep46878,no,small author correction +143613202.0,"Francesco Ciompi, Jeroen van der Laak",2020,aff9f3cd8024fb64c7ea36d396a36ebf33d01f5b,Correction to: Assessment of individual tumor buds using keratin immunohistochemistry: moderate interobserver agreement suggests a role for machine learning,10.1038/s41379-019-0450-2,no,small author correction +145959882.0,Geert Litjens,2022,b5b05d05d826fcfdcc92a6a5710c84f346610f1f,Crowdsourcing of artificial intelligence algorithms for diagnosis and Gleason grading of prostate cancer in biopsies,10.1016/s0302-2838(22)00693-5,no,abstract for congress +4960344.0,James Meakin,2017,40793c67a65ea507a10996edd506b95f53564647,ARTERIAL SPIN LABELLING MRI OF CEREBRAL TUMOURS IN,10.1093/NEUONC/NOX036.484,no,incorrect SS entry +1745574.0,Nico Karssemeijer,2015,b524b929a3e43348cbfd493b1696cf5a283c5e98,Detection and Diagnosis of Breast Masses in Mammography,10.1201/B18191-4,no,Chapter in book +32649341.0,Jonas Teuwen,2020,4f36ca4e813f7913d2b94edced1b35b05670c8cb,Convolutional neural networks,10.1016/b978-0-12-816176-0.00025-9,no,Chapter in book +34754023.0,Henkjan Huisman,2020,f08e36943c749664187e39f1b1ba2acde05cd1b1,Prostate cancer,10.1201/9780203732656-17,no,Chapter in book +145959882.0,Geert Litjens,2021,9a366d2c99f3c6f21989690e9cc84581e598624c,Automatic tumour segmentation in H&E-stained whole-slide images of the pancreas.,10.1117/12.2611542,no,"SPIE poster + presentation + paper, paper has also own doi" +2074975080.0,Matthieu Rutten,2020,31f7b71202b51458d19599160be61c9052476c29,Erratum: Present Status of Musculoskeletal Radiology in Europe: International Survey by the European Society of Musculoskeletal Radiology (ESSR),10.1055/s-0040-1722564,no,erratum +1419819133.0,Cornelia Schaefer-Prokop,2019,b9c6173c57aa5ee76631feab341e5b3111a2f816,Pulmonary Manifestations of Systemic Diseases,10.1007/978-3-030-11149-6_11,no,Chapter in book +8038506.0,"Bram van Ginneken, Alessa Hering, Keelin Murphy",2020,888c62de7448e14302c3c2ed7dc062829ea279d7,Learn2Reg Challenge: CT Lung Registration - Training Data,10.5281/ZENODO.3835682,no,training data +8038506.0,"Bram van Ginneken, Alessa Hering, Keelin Murphy",2020,efec0825343cfb78df86f34488019a51aaa4949a,Learn2Reg Challenge: CT Lung Registration - Test Data,10.5281/ZENODO.4048761,no,test data +143613202.0,Francesco Ciompi,2021,aecc480d2861d1e848ccdc4c5383be9367525f39,MItosis DOmain Generalization Challenge (MICCAI-MIDOG 2021) Training Data,10.5281/ZENODO.4643381,no,training data +143613202.0,Francesco Ciompi,2019,7f98999a9e47db537fe37454bd7b8fd9059052e7,14th European Congress on Digital Pathology,10.4103/2153-3539.270744,no,presentation abstract (and no doi for abstract) +123637526.0,Bram van Ginneken,2017,f09488ac1b4b5e046aebaf87f59cee1e3312e9e9,JMI 2016 List of Reviewers.,10.1117/1.JMI.4.1.010101,no,list of reviewers +2913408.0,Nikolas Lessman,2020,50c847f76d620edd5202c5a6a102198f991a3044,2019 List of Reviewers,10.1117/1.JMI.7.1.010102,no,list of reviewers +34754023.0,Henkjan Huisman,2020,d10ef40b8b13d9c07d0fc86a534609fbbb2e0423,Author Correction: Introducing PIONEER: a project to harness big data in prostate cancer research,10.1038/s41585-020-0355-3,no,small author correction + PIONEER consortium +1745574.0,Nico Karssemeijer,2019,4d7f0ccc132e6d29c8227bffa572495a9e4646bd,JMI 2018 List of Reviewers.,10.1117/1.JMI.6.1.010101,no,list of reviewers +,,2015,6368b6d0ede9eb21b624b90fc66bcaa2bfb8ef44,Fall 2015 Meeting of the NVPHBV,,no,meeting abstracts +5752941.0,Jos Thannhauser,2022,0920bab15c77b8ec169bb7961287c38dcf41cedb,Abstract 134: The Impact Of Alcohol Use On The Quality Of Cardiopulmonary Resuscitation: A Prespecified Analysis Of A Randomised Trial,10.1161/circ.146.suppl_1.134,no,abstract +,,2021,4202f56b0e22eedd6921d9b26519bc72f89cf4fd,Correction to: Incorporating radiomics into clinical trials: expert consensus endorsed by the European Society of Radiology on considerations for data-driven compared to biologically driven quantitative biomarkers,10.1007/s00330-021-07721-3,no,small author correction +2913408.0,Nikolas Lessman,2019,6f8fe54a656d0f1e448a6142a41c41316605c98f,P3411Prevalence and risk factors of multifocal cardiovascular calcification in patients at high cardiovascular risk,10.1093/eurheartj/ehz745.0285,no,poster session abstract +"[8038506, 123637526]","[8038506, 123637526]",2017,01df4624e9578fd597b0f0ee6d81ac508363e1eb,Fully automated detection of hyperreflective foci in optical coherence tomography,,no,ARVO Annual Meeting Abstract +"[144085811, 32187701]","[144085811, 32187701]",2015,06d272dcc4260690637651022e281399880d1c4d,The role of age-related macular degeneration associated variants in drusen progression,,no,ARVO Annual Meeting Abstract +[4960344],[4960344],2017,086a39a4b16055705aa074381c0dc9a0c685fc1a,OP05. ARTERIAL SPIN LABELLING MRI OF CEREBRAL TUMOURS IN RATS,10.1093/neuonc/now292.004,no,Oral presentation +"[144085811, 32187701]","[144085811, 32187701]",2018,14ef2e5ac0d3a7203a104ff84dfaee4ba26f1043,A Novel Method for Micro aneurysm Detection and Diabetic Retinopathy Diagnosis,,no,No Diag member in author list +"[8038506, 123637526]","[8038506, 123637526]",2015,1b3956b3bb8477f0958b775906da32812d0de574,Automatic detection of eye diseases using automated color fundus image analysis,,no,ARVO Annual Meeting Abstract +[1745574],[1745574],2018,1e62b6e9c77e0bab83e4a9ae5841a356bb1342de,Evaluation of a Strati fi ed National Breast Screening Program in the United Kingdom : An Early Model-Based Cost-Effectiveness Analysis,,no,"Title has 2 entries on SS, this one is an incomplete one" +"[1419819133, 1445069528, 1400632685]","[1419819133, 1445069528, 1400632685]",2015,24fa023edaa351d4feb7ce72f2ac5492cd40a7be,Interstitial lung diseases,10.1183/2312508X.10003015,no,It is a chapter in a book. I'm not sure the automatic entry would appear correctly +[143613202],[143613202],2018,2c468dfe5f797f5928800e02f20ceddb55bf37f1,"University of Dundee Towards Standardization of Retinal Vascular Measurements Mookiah,",,no,"conflicting info, might be a conference abstract but not sure" +"[32649341, 119024451]","[32649341, 119024451]",2018,2d323d4396bc559257227ddffceaa0700bb8fc46,Soft tissue lesion detection in mammography using deep neural networks for object detection,,no,paper rejected from midl but still online +[145959882],[145959882],2015,33efdb691cbcc9a4650f3f2a2fbf6c4121d2e132,Detection and Diagnosis of Prostate Cancer in MR,10.1201/B18191-23,no,"book chapter, not sure it will be auto added correctly" +[143613202],[143613202],2020,496a9e301e2a989e24aacb9dda154fcf86a5cf41,Algorithms for semantic segmentation and detection in histopathology images,,no,Report on progress for an EU grant (Not a peer reviewed publication) +"[8038506, 123637526]","[8038506, 123637526]",2020,591821f281a2906c8916b23bf6fcd991bda8e368,Biomedical Image Analysis Challenges (BIAS) Reporting Guideline,10.5281/ZENODO.4008954,no,This doi refers to zenodo dataset. The bib file already contains the arxiv and journal versions of this paper with the same name +"[145441238, 145388932]","[145441238, 145388932]",2017,5d19f8712f8460ddd5f5129b3805f875d4b6992c,Drosophila NMJ Morphometrics,10.6084/M9.FIGSHARE.2077399.V1,no,not sure what this is +[1745574],[1745574],2019,5e60744b9ba4f9f9380962b81eb236662dcfba3c,METHODS 2 . 1 Analysis of color variation in hematoxylin and eosin stained histology slides,,no,not sure what this is +[34754023],[34754023],2022,5f7134900435671d8292b0a65fe9983fc1bbd12a,Detecting Out-of-Distribution via an Unsupervised Uncertainty Estimation for Prostate Cancer Diagnosis ProstateAI Clinical Collaborators,,no,the pdf is different from the title on ss +[1745574],[1745574],2015,6368b6d0ede9eb21b624b90fc66bcaa2bfb8ef44,Fall 2015 Meeting of the NVPHBV,,no,meeting? +[145959882],[145959882],2020,6da9a553f8fa6eeceaa25aa53b748be6c70f6e03,The PANDA challenge: Prostate cANcer graDe Assessment using the Gleason grading system,10.5281/ZENODO.3715938,no,dataset +"[8038506, 123637526]","[8038506, 123637526]",2018,7202e88d58ce87817ae6876b8b847ad92929b502,Automatic Detection of Microaneurysms and Classification of Diabetic Retinopathy Images using SVM Technique,,no,"not sure, cannot find it online" +"[8038506, 123637526]","[8038506, 123637526]",2019,8678f63d7a648eb4d0f64e50c8ff5a1c53ac128b,Author Correction: Why rankings of biomedical image analysis competitions should be interpreted with care,10.1038/s41467-019-08563-w,no,author correction +[143613202],[143613202],2020,8baa4e68c61566c99ee2dfaea278a63ee48b6c77,Automatic Lung Cancer Detection and Classification in Whole-slide Histopathology,10.5281/ZENODO.3715001,no,dataset +"[8038506, 123637526]","[8038506, 123637526]",2017,8ea2deb23587178612dce8733d2cc90817c7ddd9,Automatic Lymph Node Cluster Segmentation Using Holistically-Nested Deep Convolutional Neural Networks and Structured Optimization in CT Images,,no,RSNA presentation +[145959882],[145959882],2021,96c231147b444cb802c4ebb4f1be11309040d1f9,Deep learning in digital pathology and microscopy for research and clinical practice,10.22443/RMS.EMC2020.286,no,doi not found +[34754023],[34754023],2020,99cf88b6359e0a5354c809fdb992bf82fdffc929,Learn2Reg - The Challenge,10.5281/ZENODO.3715652,no,dataset +"[1419819133, 1445069528, 1400632685]","[1419819133, 1445069528, 1400632685]",2021,af0490f8f0cf935673991b05c1ab1d80f5e2b92d,The tale of TILs in breast cancer: A report from The International Immuno-Oncology Biomarker Working Group,10.1038/s41523-021-00346-1,no,The International Immuno-Oncology Biomarker Working Group +"[145441238, 145388932]","[145441238, 145388932]",2019,afbaebf021cd8680a0342d22052644bf11e206bf,HookNet: A Multi Resolution Approach for Breast Tissue Segmentation in Histopathology Images,,no,can't find article from 2019 +"[145441238, 145388932]","[145441238, 145388932]",2019,b94247577fd67ad218495f173203dc033683b7be,Digital and Computational Pathology: Bring the Future into Focus,10.4103/2153-3539.255259,no,all pathology visions abstracts +[143613202],[143613202],2018,bdadaa3b69040f97adc83ad138e8db27c7f650cb,Computational Pathology and Ophthalmic Medical Image Analysis,10.1007/978-3-030-00949-6,no,workshop book +"[145441238, 145388932]","[145441238, 145388932]",2021,bdda7587a109daa00948967e028dfe138f815f83,"1009 Deep learning for improved detection of premalignant lesions in the fallopian tube, a proof of concept",10.1136/ijgc-2021-esgo.533,no,abstract but with individual doi +[4960344],[4960344],2020,c70d29315538d660696c55932c31c763123d2868,Envision a safer and smarter world with Dell EMC PowerScale,,no,blogpost +"[8038506, 123637526]","[8038506, 123637526]",2017,ccd03e70611898c67f5fcbadf0f4a3be4abefdef,An Approach for the Detection of Proliferative Diabetic Retinopathy,,no,ss entry incomplete +"[8038506, 123637526]","[8038506, 123637526]",2020,ce9e45bff8b3470ffab081bbbf4d3e0573eceea9,Performance Evaluation of Distortion Measures for Retinal Images,,no,ss entry incomplete/ year of publication 2011 +"[145441238, 145388932]","[145441238, 145388932]",2017,d4ecdfcb43e4400e15c5cf18ae706ead08a929d6,Validation of diagnosing melanocytic lesions on whole slide images : does z-stack scanning improve diagnostic accuracy?,,no,Oral Free Paper sessions +"[145441238, 145388932]","[145441238, 145388932]",2021,dc17a2f2fef8904b38921150dce466ac8f27b7b2,Computational Pathology: What is the way forward?,10.1117/12.2586357,no,presentation SPIE +"[8038506, 123637526]","[8038506, 123637526]",2019,de09e056b22d7e29e93f5656154d10c88181f7a5,Deep Learning in Image Diagnostics: Recent Results and Outlook,,no,not sure? keynote lecture +"[8038506, 123637526]","[8038506, 123637526]",2018,e2e2e9f858ffc3bde5ee7a6929c9b0e7d52c1a43,Deep learning based automatic liver tumor segmentation in CT with shape-based post-processing,,no,rejected MIDL submission +"[1419819133, 1445069528, 1400632685]","[1419819133, 1445069528, 1400632685]",2019,e5ee780f7926cd2403349bf40725814e4eb7e889,Aquilion ONE Clinical Evidence: Subtraction Contrast Enhancement Boost Technique at Aortic Computed Tomography Angiography: Added Value for the Evaluation of Type II Endoleaks After Endovascular Aortic Aneurysm Repair,,no,not sure what this is? +"[32649341, 119024451]","[32649341, 119024451]",2019,f2ad6e4c7023da74ed85a65de6572dc10ee91f19,Latest AI applications in breast imaging Executive Summary,,no,"no doi, does not seem to be a proper peer reviewed article" +[153744566],[153744566],2021,f574d9f2b018d707ff0c2dbcd250fcd4ee2856ae,Learn2Reg - The Challenge (2021),10.5281/ZENODO.4573968,no,refers to a dataset +[1745574],[1745574],2020,f7a9b39199077be3c0abf852d9665d65b1029027,Bringing tomosynthesis to screening,,no,"no doi, cannot find that this is a proper publication" diff --git a/scripts/script_data/diag_ss_tmp_new.bib b/scripts/script_data/diag_ss_tmp_new.bib new file mode 100644 index 0000000..e89dde2 --- /dev/null +++ b/scripts/script_data/diag_ss_tmp_new.bib @@ -0,0 +1,31993 @@ +@string{AA = _Age_and_Ageing_} +@string{AAC = _Antimicrobial_Agents_and_Chemotherapy_} +@string{AACC = _AACN_Advanced_Critical_Care_} +@string{AAPM = _American_Association_of_Physicists_in_Medicine_} +@string{AATC = _Annals_of_the_American_Thoracic_Society_} +@string{ABDI = _Abdominal_Imaging_} +@string{ACAEMEMED = _Academic_Emergency_Medicine_} +@string{ACHA = _Applied_and_Computational_Harmonic_Analysis_} +@string{ACMCS = _ACM_Computing_Surveys_} +@string{ACMTG = _ACM_Transactions_on_Graphics_} +@string{ACMTIS = _ACM_Transactions_on_Information_Systems_} +@string{ACMTMS = _ACM_Transactions_on_Mathematical_Software_} +@string{ACR = _Anticancer_Research_} +@string{ACSCHENEU = _ACS_Chemical_Neuroscience_} +@string{ACTAB = _Acta_Anaesthesiologica_Belgica_} +@string{ACTANASCA = _Acta_Anaesthesiologica_Scandinavica_} +@string{ACTBIOMAT = _Acta_Biomaterialia_} +@string{ACTNC = _Acta_Neurochirurgica_} +@string{ACTNEUBEL = _Acta_Neurologica_Belgica_} +@string{ACTNEUSCA = _Acta_Neurologica_Scandinavica_} +@string{ACTNP = _Acta_Neuropsychiatrica_} +@string{ACTONC = _Acta_Oncologica_} +@string{ACTOPH = _Acta_Ophthalmologica_} +@string{ACTOPHSCA = _Acta_Ophthalmologica_Scandinavica_} +@string{ACTOTT = _Acta_Orthopaedica_et_Traumatologica_Turcica_} +@string{ACTP = _Acta_Paediatrica_} +@string{ACTR = _Acta_Radiologica_} +@string{ADVCI = _Advances_in_Computational_Intelligence_} +@string{ADVDDR = _Advanced_Drug_Delivery_Reviews_} +@string{ADVEMB = _Advances_in_Experimental_Medicine_and_Biology_} +@string{ADVFM = _Advanced_Functional_Materials_} +@string{AE = _Anatomy_and_Embryology_} +@string{AFRHS = _African_Health_Sciences_} +@string{AHJ = _American_Heart_Journal_} +@string{AI = _Artificial_Intelligence_} +@string{AIM = _Artificial_Intelligence_in_Medicine_} +@string{AJC = _American_Journal_of_Cardiology_} +@string{AJCLIONC = _American_Journal_of_Clinical_Oncology_} +@string{AJCN = _American_Journal_of_Clinical_Nutrition_} +@string{AJCP = _American_Journal_of_Clinical_Pathology_} +@string{AJE = _American_Journal_of_Epidemiology_} +@string{AJEM = _American_Journal_of_Emergency_Medicine_} +@string{AJGE = _American_Journal_of_Gastroenterology_} +@string{AJH = _American_Journal_of_Hematology_} +@string{AJHG = _American_Journal_of_Human_Genetics_} +@string{AJHP = _American_Journal_of_Health-System_Pharmacy_} +@string{AJHS = _American_Journal_of_Hand_Surgery_} +@string{AJIM = _American_Journal_of_Industrial_Medicine_} +@string{AJM = _American_Journal_of_Medicine_} +@string{AJMG = _American_Journal_of_Medical_Genetics_} +@string{AJMGA = _American_Journal_of_Medical_Genetics_Part_A_} +@string{AJMGB = _American_Journal_of_Medical_Genetics_Part_B_} +@string{AJMMS = _African_Journal_of_Medicine_and_Medical_Sciences_} +@string{AJNEPH = _American_Journal_of_Nephrology_} +@string{AJNR = _American_Journal_of_Neuroradiology_} +@string{AJO = _American_Journal_of_Ophthalmology_} +@string{AJOG = _American_Journal_of_Obstetrics_and_Gynecology_} +@string{AJP = _American_Journal_of_Physiology_} +@string{AJPA = _American_Journal_of_Physical_Anthropology_} +@string{AJPAT = _American_Journal_of_Pathology_} +@string{AJPCP = _American_Journal_of_Physiology_Cell_Physiology_} +@string{AJPEM = _American_Journal_of_Physiology_Endocrinology_and_Metabolism_} +@string{AJPHCP = _American_Journal_of_Physiology_Heart_and_Circulatory_Physiology_} +@string{AJPM = _American_Journal_of_Preventive_Medicine_} +@string{AJPSY = _American_Journal_of_Psychiatry_} +@string{AJR = _American_Journal_of_Roentgenology_} +@string{AJRCCM = _American_Journal_of_Respiratory_and_Critical_Care_Medicine_} +@string{AJS = _American_Journal_of_Surgery_} +@string{AJSP = _American_Journal_of_Surgical_Pathology_} +@string{AKTRAD = _Aktuelle_Radiologie_} +@string{AM = _Academic_Medicine_} +@string{AMSTAT = _American_Statistician_} +@string{ANALCHEM = _Analytical_Chemistry_} +@string{ANIMRS = _Animal_Reproduction_Science_} +@string{ANNREVPAT = _Annual_Review_of_Pathology_} +@string{AOAMS = _Annals_of_the_Academy_of_Medicine_Singapore_} +@string{AOBE = _Annals_of_Biomedical_Engineering_} +@string{AOCR = _Annals_of_Clinical_Research_} +@string{AODP = _Annals_of_Diagnostic_Pathology_} +@string{AOE = _Annals_of_Epidemiology_} +@string{AOH = _Annals_of_Hematology_} +@string{AOIM = _Annals_of_Internal_Medicine_} +@string{AON = _Annals_of_Neurology_} +@string{AONYAS = _Annals_of_the_New_York_Academy_of_Sciences_} +@string{AOO = _Annals_of_Oncology_} +@string{AOORL = _Annals_of_Otology_Rhinology_and_Laryngology_} +@string{AORD = _Annals_of_the_Rheumatic_Diseases_} +@string{AOS = _Annals_of_Statistics_} +@string{AOSO = _Annals_of_Surgical_Oncology_} +@string{AOSURG = _Annals_of_Surgery_} +@string{AOTS = _Annals_of_Thoracic_Surgery_} +@string{AOVS = _Annals_of_Vascular_Surgery_} +@string{APMR = _Applied_Magnetic_Resonance_} +@string{APOPT = _Applied_Optics_} +@string{APRAD = _Applied_Radiology_} +@string{APSC = _Applied_Sciences_} +@string{APT = _Alimentary_Pharmacology_and_Therapeutics_} +@string{AQCH = _Analytical_and_Quantitative_Cytology_and_Histology_} +@string{AR = _Academic_Radiology_} +@string{ARBE = _Annual_Review_of_Biomedical_Engineering_} +@string{ARCDC = _Archives_of_Disease_in_Childhood_} +@string{ARCGO = _Archives_of_Gynecology_and_Obstetrics_} +@string{ARCIM = _Archives_of_Internal_Medicine_} +@string{ARCN = _Archives_of_Neurology_} +@string{ARCO = _Archives_of_Ophthalmology_} +@string{ARCOHNS = _Archives_of_Otolaryngology_Head_and_Neck_Surgery_} +@string{ARCPLM = _Archives_of_Pathology_and_Laboratory_Medicine_} +@string{ARCS = _Archives_of_Surgery_} +@string{ARLO = _Acoustics_Research_Letters_Online_} +@string{ARP = _Anesthesiology_Research_and_Practice_} +@string{ARRD = _American_Review_of_Respiratory_Disease_} +@string{ARTR = _Arthritis_and_Rheumatism_} +@string{ARVO = _Association_for_Research_in_Vision_and_Ophthalmology_} +@string{ASTJ = _Astrophysical_Journal_} +@string{ASTP = _Astroparticle_Physics_} +@string{ATM = _Annals_of_Translational_Medicine_} +@string{ATS = _American_Thoracic_Society_International_Conference_} +@string{ATVB = _Arteriosclerosis_Thrombosis_and_Vascular_Biology_} +@string{AUSNZJO = _Australian_and_New_Zealand_Journal_of_Ophthalmology_} +@string{AUSPESM = _Australasian_Physical_and_Engineering_Sciences_in_Medicine_} +@string{AUSRAD = _Australasian_Radiology_} +@string{Acustica = _Acustica_} +@string{Algorithmica = _Algorithmica_} +@string{Angiogenesis = _Angiogenesis_} +@string{Apidologie = _Apidologie_} +@string{Atherosclerosis = _Atherosclerosis_} +@string{Automedica = _Automedica_} +@string{BAMS = _Bulletin_of_the_Autralian_Mathematical_Society_} +@string{BCRT = _Breast_Cancer_Research_and_Treatment_} +@string{BDM = _BioData_Mining_} +@string{BEJZ = _Belgian_Journal_of_Zoology_} +@string{BIOBA = _Biochimica_et_Biophysica_Acta_} +@string{BIOCJ = _Biochemical_Journal_} +@string{BIOCST = _Biochemical_Society_Transactions_} +@string{BIOD = _BioDrugs_} +@string{BIOLC = _Biological_Cybernetics_} +@string{BIOLN = _Biology_of_the_Neonate_} +@string{BIOLP = _Biological_Psychiatry_} +@string{BIOMDL = _Biomedical_Digital_Libraries_} +@string{BIOMEO = _Biomedical_Engineering_Online_} +@string{BIOMS = _Biomedical_Simulation_} +@string{BIOMSI = _Biomedical_Sciences_Instrumentation_} +@string{BIOMT = _Biomedizinische_Technik_} +@string{BIOPC = _Biophysical_Chemistry_} +@string{BIOPLJ = _Biophysical_Journal_} +@string{BIOPSJ = _Biophysics_Journal_} +@string{BJA = _British_Journal_of_Anaesthesia_} +@string{BJC = _British_Journal_of_Cancer_} +@string{BJCP = _British_Journal_of_Clinical_Pharmacology_} +@string{BJDC = _British_Journal_of_Diseases_of_the_Chest_} +@string{BJHS = _British_Journal_of_Hand_Surgery_} +@string{BJIM = _British_Journal_of_Industrial_Medicine_} +@string{BJO = _British_Journal_of_Ophthalmology_} +@string{BJOG = _British_Journal_of_Obstetrics_and_Gynaecology_} +@string{BJP = _British_Journal_of_Psychiatry_} +@string{BJR = _British_Journal_of_Radiology_} +@string{BJS = _British_Journal_of_Surgery_} +@string{BJUI = _British_Journal_of_Urology_International_} +@string{BMCCAN = _BMC_Cancer_} +@string{BMCID = _BMC_Infectious_Diseases_} +@string{BMCIMM = _BMC_Immunology_} +@string{BMCMI = _BMC_Medical_Imaging_} +@string{BMCMIDM = _BMC_Medical_Informatics_and_Decision_Making_} +@string{BMCMRM = _BMC_Medical_Research_Methodology_} +@string{BMCNEU = _BMC_Neurology_} +@string{BMCPH = _BMC_Public_Health_} +@string{BMCPM = _BMC_Pulmonary_Medicine_} +@string{BMCRN = _BMC_Research_Notes_} +@string{BMCSUR = _BMC_Surgery_} +@string{BMCWH = _BMC_Womens_Health_} +@string{BMJ = _British_Medical_Journal_} +@string{BMJCR = _BMJ_Case_Reports_} +@string{BMJO = _BMJ_Open_} +@string{BMT = _Bone_Marrow_Transplantation_} +@string{BMVC = _British_Machine_Vision_Conference_} +@string{BOE = _Biomedical_Optics_Express_} +@string{BRAI = _Brain_Injury_} +@string{BRAP = _Brain_Pathology_} +@string{BRAR = _Brain_Research_} +@string{BRASF = _Brain_Structure_and_Function_} +@string{BRAT = _Brain_Topography_} +@string{BREC = _Breast_Cancer_} +@string{BRECR = _Breast_Cancer_Research_} +@string{BRECRT = _Breast_Cancer_Research_and_Treatment_} +@string{BREJ = _Breast_Journal_} +@string{BSTJ = _Bell_System_Technical_Journal_} +@string{BULMB = _Bulletin_of_Mathematical_Biology_} +@string{BULWHO = _Bulletin_of_the_World_Health_Organization_} +@string{Bildgebung = _Bildgebung_} +@string{BioIm = _IEEE_International_Symposium_on_Biomedical_Imaging_} +@string{Biochemistry = _Biochemistry_} +@string{Biomaterials = _Biomaterials_} +@string{Biometrics = _Biometrics_} +@string{Biometrika = _Biometrika_} +@string{Biotechniques = _Biotechniques_} +@string{Blut = _Blut_} +@string{Bone = _Bone_} +@string{Brain = _Brain_} +@string{Breast = _Breast_} +@string{CACJC = _CA_a_Cancer_Journal_for_Clinicians_} +@string{CADES = _Computer-Aided_Design_} +@string{CAJM = _Central_African_Journal_of_Medicine_} +@string{CANARJ = _Canadian_Association_of_Radiologists_Journal_} +@string{CANB = _Cancer_Biomarkers_} +@string{CANBR = _Cancer_Biotherapy_and_Radiopharmaceuticals_} +@string{CANCERS = _Cancers_} +@string{CANE = _Cancer_Epidemiology_} +@string{CANEBP = _Cancer_Epidemiology_Biomarkers_and_Prevention_} +@string{CANI = _Cancer_Imaging_} +@string{CANL = _Cancer_Letters_} +@string{CANR = _Cancer_Research_} +@string{CANTR = _Cancer_Treatment_and_Research_} +@string{CARIR = _Cardiology_in_Review_} +@string{CARRES = _Carbohydrate_Research_} +@string{CARS = _Computer_Assisted_Radiology_and_Surgery_} +@string{CARY = _Cardiology_in_the_Young_} +@string{CAS = _Computer_Aided_Surgery_} +@string{CATHCVI = _Catheterization_and_Cardiovascular_Interventions_} +@string{CBM = _Computers_in_Biology_and_Medicine_} +@string{CBR = _Computers_and_Biomedical_Research_} +@string{CC = _Computers_in_Cardiology_} +@string{CCT = _Contemporary_Clinical_Trials_} +@string{CD = _Clinical_Diabetes_} +@string{CDSR = _Cochrane_Database_of_Systematic_Reviews_} +@string{CELBB = _Cell_Biochemistry_and_Biophysics_} +@string{CELLO = _Cellular_Oncology_} +@string{CELMLS = _Cellular_and_Molecular_Life_Sciences_} +@string{CERVD = _Cerebrovascular_Diseases_} +@string{CG = _Computers_and_Graphics_} +@string{CGA = _IEEE_Computer_Graphics_and_Applications_} +@string{CGC = _Clinical_Genitourinary_Cancer_} +@string{CGIP = _Computer_Graphics_and_Image_Processing_} +@string{CHIMJ = _Chinese_Medical_Journal_} +@string{CHIMSJ = _Chinese_Medical_Sciences_Journal_} +@string{CIRCCVI = _Circulation_Cardiovascular_Imaging_} +@string{CIRCJ = _Circulation_Journal_} +@string{CIRCRES = _Circulation_Research_} +@string{CLEPCFJ = _Cleft_Palate-Craniofacial_Journal_} +@string{CLINATH = _Clinical_and_Applied_Thrombosis/Hemostasis_} +@string{CLINCC = _Clinical_Colorectal_Cancer_} +@string{CLINCHEM = _Clinical_Chemistry_} +@string{CLINCM = _Clinics_in_Chest_Medicine_} +@string{CLINCR = _Clinical_Cancer_Research_} +@string{CLINEND = _Clinical_Endocrinology_} +@string{CLINEOPH = _Clinical_and_Experimental_Ophthalmology_} +@string{CLINEOPT = _Clinical_and_Experimental_Optometry_} +@string{CLINEPP = _Clinical_and_Experimental_Pharmacology_and_Physiology_} +@string{CLINGH = _Clinical_Gastroenterology_and_Hepatology_} +@string{CLINI = _Clinical_Imaging_} +@string{CLINID = _Clinical_Infectious_Diseases_} +@string{CLINNEPH = _Clinical_Nephrology_} +@string{CLINNM = _Clinical_Nuclear_Medicine_} +@string{CLINNN = _Clinical_Neurology_and_Neurosurgery_} +@string{CLINNP = _Clinical_Neurophysiology_} +@string{CLINORR = _Clinical_Orthopaedics_and_Related_Research_} +@string{CLINR = _Clinical_Radiology_} +@string{CLINTHE = _Clinical_Therapeutics_} +@string{CLIONC = _Clinical_Oncology_} +@string{CMBBE = _Computational_Methods_in_Biomechanics_and_Biomedical_Engineering_} +@string{CMIG = _Computerized_Medical_Imaging_and_Graphics_} +@string{CMPB = _Computer_Methods_and_Programs_in_Biomedicine_} +@string{CMR = _Clinical_Microbiology_Reviews_} +@string{COMJ = _Computer_Journal_} +@string{COMMACM = _Communications_of_the_Association_for_Computing_Machinery_} +@string{COMMMED = _Communications_Medicine_} +@string{COMPAY = _Computational_Pathology_and_Ophthalmic_Medical_Image_Analysis_} +@string{CONCLITRI = _Controlled_Clinical_Trials_} +@string{CONMMI = _Contrast_Media_and_Molecular_Imaging_} +@string{COPD = _COPD_} +@string{COPDM = _Chronic_obstructive_pulmonary_diseases_Miami_} +@string{COPM = _Current_Opinion_in_Pulmonary_Medicine_} +@string{CPAM = _Communications_on_Pure_and_Applied_Mathematics_} +@string{CPCV = _Conference_Proceedings_of_the_IEEE_Computer_Vision_} +@string{CPEE = _IEEE_International_Conference_on_Computational_Problems_of_Electrical_Engineering_} +@string{CPEMBS = _Conference_Proceedings_of_the_IEEE_Engineering_in_Medicine_and_Biology_Society_} +@string{CRCM = _Critical_Care_Medicine_} +@string{CRD = _Chronic_Respiratory_Disease_} +@string{CRIC = _Critical_Care_} +@string{CRIREVDI = _Critical_Reviews_in_Diagnostic_Imaging_} +@string{CRIREVOH = _Critical_Reviews_in_Oncology/Hematology_} +@string{CRM = _Case_Reports_in_Medicine_} +@string{CSCNA = _Chest_Surgery_Clinics_of_North_America_} +@string{CSE = _Computing_in_Science_and_Engineering_} +@string{CTI = _Calcified_Tissue_International_} +@string{CURDR = _Current_Diabetes_Reports_} +@string{CURDRV = _Current_Diabetes_Review_} +@string{CURER = _Current_Eye_Research_} +@string{CURMCAA = _Current_Medicinal_Chemistry_Anti-cancer_Agents_} +@string{CURMEDRESOPI = _Current_Medical_Research_and_Opinion_} +@string{CURNNR = _Current_Neurology_and_Neuroscience_Reports_} +@string{CUROPCC = _Current_Opinion_in_Critical_Care_} +@string{CUROPO = _Current_Opinion_in_Ophthalmology_} +@string{CUROPPM = _Current_Opinion_in_Pulmonary_Medicine_} +@string{CUROPU = _Current_Opinion_in_Urology_} +@string{CURPDR = _Current_Problems_in_Diagnostic_Radiology_} +@string{CURUROREP = _Current_Urology_Reports_} +@string{CVGIP = _Computer_Vision_Graphics_and_Image_Processing_} +@string{CVIR = _Cardiovascular_and_Interventional_Radiology_} +@string{CVIU = _Computer_Vision_and_Image_Understanding_} +@string{CVPR = _Computer_Vision_and_Pattern_Recognition_} +@string{Cancer = _Cancer_} +@string{Cell = _Cell_} +@string{Chemphyschem = _Chemphyschem_} +@string{Chest = _Chest_} +@string{Chirurg = _Chirurg_} +@string{Circulation = _Circulation_} +@string{Climacteric = _Climacteric_} +@string{Computing = _Computing_} +@string{Cytometry = _Cytometry_} +@string{CytometryB = _Cytometry_Part_B-Clinical_Cytometry_} +@string{DAM = _Discrete_Applied_Mathematics_} +@string{DBME = _Dutch_Bio-Medical_Engineering_Conference_} +@string{DC = _Diabetes_Care_} +@string{DDT = _Drug_Discovery_Today_} +@string{DENTRAD = _Dentomaxillofacial_Radiology_} +@string{DI = _Diagnostic_Imaging_} +@string{DIE = _Diagnostic_Imaging_Europe_} +@string{DIGD = _Digestive_Diseases_} +@string{DIGDS = _Digestive_Diseases_and_Sciences_} +@string{DIGMAM = _Digital_Mammography_} +@string{DIR = _Diagnostic_and_Interventional_Radiology_} +@string{DM = _Diabetic_Medicine_} +@string{DMBE = _Dutch_Bio-Medical_Engineering_Conference_} +@string{DMCN = _Developmental_Medicine_and_Child_Neurology_} +@string{DMKD = _Data_Mining_and_Knowledge_Discovery_} +@string{DPR = _Delft_Progress_Report_} +@string{DRCP = _Diabetes_Research_and_Clinical_Practice_} +@string{DTT = _Diabetes_Technology_and_Therapeutics_} +@string{Diabetes = _Diabetes_} +@string{Diabetologia = _Diabetologia_} +@string{EAU = _Annual_European_Association_of_Urology_Congress_} +@string{EBCC = _European_Breast_Cancer_Conference_} +@string{EBJ = _European_Biophysics_Journal_} +@string{ECCV = _European_Conference_on_Computer_Vision_} +@string{ECP = _European_Congress_of_Pathology_} +@string{ECR = _European_Congress_of_Radiology_} +@string{EER = _Experimental_Eye_Research_} +@string{EHD = _Early_Human_Development_} +@string{EHJ = _European_Heart_Journal_} +@string{EHJCVI = _European_Heart_Journal_Cardiovascular_Imaging_} +@string{EJAP = _European_Journal_of_Applied_Physiology_} +@string{EJBIOCHEM = _European_Journal_of_Biochemistry_} +@string{EJC = _European_Journal_of_Cancer_} +@string{EJCI = _European_Journal_of_Clinical_Investigation_} +@string{EJCP = _European_Journal_of_Cancer_Prevention_} +@string{EJCTS = _European_Journal_of_Cardio-Thoracic_Surgery_} +@string{EJE = _European_Journal_of_Epidemiology_} +@string{EJEC = _European_Journal_of_Echocardiography_} +@string{EJEND = _European_Journal_of_Endocrinology_} +@string{EJHF = _European_Journal_of_Heart_Failure_} +@string{EJHG = _European_Journal_of_Human_Genetics_} +@string{EJIM = _European_Journal_of_Internal_Medicine_} +@string{EJIVP = _EURASIP_Journal_on_Image_and_Video_Processing_} +@string{EJMR = _European_Journal_of_Medical_Research_} +@string{EJN = _European_Journal_of_Neurology_} +@string{EJNEUSCI = _European_Journal_of_Neuroscience_} +@string{EJNM = _European_Journal_of_Nuclear_Medicine_} +@string{EJNMMI = _European_Journal_of_Nuclear_Medicine_and_Molecular_Imaging_} +@string{EJP = _European_Journal_of_Pharmacology_} +@string{EJPED = _European_Journal_of_Pediatrics_} +@string{EJPS = _European_Journal_of_Pediatric_Surgery_} +@string{EJR = _European_Journal_of_Radiology_} +@string{EJSUR = _European_Journal_of_Surgery_} +@string{EJSURONC = _European_Journal_of_Surgical_Oncology_} +@string{EJTES = _European_Journal_of_Trauma_and_Emergency_Surgery_} +@string{EJUS = _European_Journal_of_Ultrasound_} +@string{EJVES = _European_Journal_of_Vascular_and_Endovascular_Surgery_} +@string{ELE = _Eye_London_England_} +@string{EMBM = _IEEE_Engineering_in_Medicine_and_Biology_Magazine_} +@string{EMBS = _Annual_International_Conference_of_the_IEEE_Engineering_in_Medicine_and_Biology_Society_} +@string{EMEMCNA = _Emergency_Medicine_Clinics_of_North_America_} +@string{EMEMJ = _Emergency_Medicine_Journal_} +@string{EMERAD = _Emergency_Radiology_} +@string{ENPP = _European_Neuropsychopharmacology_} +@string{EOPT = _Expert_Opinion_on_Pharmacotherapy_} +@string{EPJC = _European_Physical_Journal_C_-_Particles_and_Fields_} +@string{EPL = _Europhysics_Letters_} +@string{ER = _European_Radiology_} +@string{ERACT = _Expert_Review_of_Anticancer_Therapy_} +@string{ERAIT = _Expert_Review_of_Anti-Infective_Therapy_} +@string{ERJ = _European_Respiratory_Journal_} +@string{ERJS = _European_Respiratory_Journal_Supplement_} +@string{ERMD = _Expert_Review_of_Medical_Devices_} +@string{ERNT = _Expert_Review_of_Neurotherapeutics_} +@string{ERS = _European_Respiratory_Society_International_Congress_} +@string{ESA = _Expert_Systems_with_Applications_} +@string{ESJ = _European_Stroke_Journal_} +@string{ESMRMB = _European_Society_for_Magnetic_Resonance_in_Medicine_and_Biology_} +@string{ESNR = _European_Society_of_Neuroradiology_} +@string{ESTI = _Annual_Meeting_of_the_European_Society_of_Thoracic_Imaging_} +@string{ETHIOPMJ = _Ethiopian_Medical_Journal_} +@string{EU = _European_Urology_} +@string{EUF = _European_Urology_Focus_} +@string{EURETINA = _European_Society_of_Retina_Specialists_} +@string{EXPBR = _Experimental_Brain_Research_} +@string{EXPCR = _Experimental_Cell_Research_} +@string{EXPGER = _Experimental_Gerontology_} +@string{Ecology = _Ecology_} +@string{Endocrine = _Endocrine_} +@string{Epilepsia = _Epilepsia_} +@string{Ergonomics = _Ergonomics_} +@string{Eye = _Eye_} +@string{FEBSL = _FEBS_Letters_} +@string{FICT = _Frontiers_in_ICT_} +@string{FTCGV = _Foundations_and_Trends_in_Computer_Graphics_and_Vision_} +@string{GACEO = _Graefe_s_Archive_for_Clinical_and_Experimental_Ophthalmology_} +@string{GO = _Gynecologic_Oncology_} +@string{GROHIR = _Growth_Hormone_and_IGF_Research_} +@string{Gamma = _Gamma_} +@string{Gastroenterology = _Gastroenterology_} +@string{GigaScience = _GigaScience_} +@string{Gut = _Gut_} +@string{HEADN = _Head_and_Neck_} +@string{HEARINGR = _Hearing_Research_} +@string{HORMONER = _Hormone_Research_} +@string{HTA = _Health_Technology_Assessment_} +@string{HUMBM = _Human_Brain_Mapping_} +@string{HUMMG = _Human_Molecular_Genetics_} +@string{HUMP = _Human_Pathology_} +@string{Hepatology = _Hepatology_} +@string{Herz = _Herz_} +@string{Histopathology = _Histopathology_} +@string{Hypertension = _Hypertension_} +@string{Hystrix = _Hystrix_} +@string{IAOEH = _International_Archives_of_Occupational_and_Environmental_Health_} +@string{ICCV = _International_Conference_on_Computer_Vision_} +@string{ICIP = _International_Conference_on_Image_Processing_} +@string{ICM = _Intensive_Care_Medicine_} +@string{ICML = _International_Conference_on_Machine_Learning_} +@string{ICPR = _International_Conference_on_Pattern_Recognition_} +@string{ICS = _International_Congress_Series_} +@string{ICVTS = _Interactive_Cardiovascular_Thoracic_Surgery_} +@string{IDAQPRT = _Infinite_Dimensional_Analysis_Quantum_Probability_and_Related_Topics_} +@string{IDMRI = _Imaging_Decisions_MRI_} +@string{IEM = _Internal_and_Emergency_Medicine_} +@string{IF = _Interface_Focus_} +@string{IIM = _Imaging_in_Medicine_} +@string{IJBI = _International_Journal_of_Biomedical_Imaging_} +@string{IJBIOMAR = _International_Journal_of_Biological_Markers_} +@string{IJBIOMET = _International_Journal_of_Biometeorology_} +@string{IJBRECAN = _International_Journal_of_Breast_Cancer_} +@string{IJC = _International_Journal_of_Cancer_} +@string{IJCARD = _International_Journal_of_Cardiology_} +@string{IJCARDHV = _International_Journal_of_Cardiology_Heart_and_Vasculature_} +@string{IJCARS = _International_Journal_of_Computer_Assisted_Radiology_and_Surgery_} +@string{IJCI = _International_Journal_of_Cardiac_Imaging_} +@string{IJCOPD = _International_Journal_of_Chronic_Obstructive_Pulmonary_Disease_} +@string{IJCV = _International_Journal_of_Computer_Vision_} +@string{IJCVI = _International_Journal_of_Cardiovascular_Imaging_} +@string{IJECE = _International_Journal_of_Electronics_and_Communication_Engineering_} +@string{IJEPI = _International_Journal_of_Epidemiology_} +@string{IJGO = _International_Journal_of_Gynaecology_and_Obstetrics_} +@string{IJGYNCAN = _International_Journal_of_Gynecological_Cancer_} +@string{IJHYPERT = _International_Journal_of_Hyperthermia_} +@string{IJINFDIS = _International_Journal_of_Infectious_Diseases_} +@string{IJINTCAR = _International_Journal_of_Integrated_Care_} +@string{IJIST = _International_Journal_of_Imaging_Systems_and_Technology_} +@string{IJMI = _International_Journal_of_Medical_Informatics_} +@string{IJMMMAS = _International_Journal_of_Mathematical_Models_and_Methods_in_Applied_Sciences_} +@string{IJMOLSCI = _International_Journal_of_Molecular_Sciences_} +@string{IJNM = _International_Journal_of_Nanomedicine_} +@string{IJO = _International_Journal_of_Obesity_} +@string{IJOMS = _International_Journal_of_Oral_and_Maxillofacial_Surgery_} +@string{IJONC = _International_Journal_of_Oncology_} +@string{IJPHA = _International_Journal_of_Pharmaceutics_} +@string{IJPRAI = _International_Journal_of_Pattern_Recognition_and_Artificial_Intelligence_} +@string{IJRARCS = _International_Journal_of_Research_and_Reviews_in_Computer_Science_} +@string{IJROBP = _International_Journal_of_Radiation_Oncology_Biology_Physics_} +@string{IJS = _International_Journal_of_Stroke_} +@string{IJSP = _International_Journal_of_Surgical_Pathology_} +@string{IJTECASSHC = _International_Journal_of_Technology_Assessment_in_Health_Care_} +@string{IJTLD = _International_Journal_of_Tuberculosis_and_Lung_Disease_} +@string{IJU = _International_Journal_of_Urology_} +@string{IJWOMHEA = _International_Journal_of_Women_s_Health_} +@string{INDAMAT = _Indagationes_Mathematicae_} +@string{INDJO = _Indian_Journal_of_Ophthalmology_} +@string{INDPED = _Indian_Pediatrics_} +@string{INSI = _Insights_into_Imaging_} +@string{INTMEDJ = _Internal_Medicine_Journal_} +@string{INTNR = _Interventional_Neuroradiology_} +@string{INVPRO = _Inverse_Problems_} +@string{IOVS = _Investigative_Ophthalmology_and_Visual_Science_} +@string{IPL = _Information_Processing_Letters_} +@string{IPMI = _Information_Processing_in_Medical_Imaging_} +@string{IR = _Investigative_Radiology_} +@string{IRJMEDSCI = _Irish_Journal_of_Medical_Science_} +@string{IS = _IEEE_Intelligent_Systems_} +@string{ISBI = _IEEE_International_Symposium_on_Biomedical_Imaging_} +@string{ISMRM = _Annual_Meeting_of_the_International_Society_for_Magnetic_Resonance_in_Medicine_} +@string{ISSM = _International_Symposium_on_Mathematical_Morphology_} +@string{IUGJ = _International_Urogynecology_Journal_} +@string{IUJPFD = _International_Urogynecology_Journal_and_Pelvic_Floor_Dysfunction_} +@string{IUN = _International_Urology_and_Nephrology_} +@string{IUS = _IEEE_International_Ultrasonics_Symposium_} +@string{IVC = _Image_and_Vision_Computing_} +@string{Infection = _Infection_} +@string{JA = _Journal_of_Anatomy_} +@string{JAAD = _Journal_of_the_American_Academy_of_Dermatology_} +@string{JAAPOS = _Journal_of_the_American_Association_for_Pediatric_Ophthalmology_and_Strabismus_} +@string{JACC = _Journal_of_the_American_College_of_Cardiology_} +@string{JACCCI = _JACC_Cardiovascular_Imaging_} +@string{JACI = _Journal_of_Allergy_and_Clinical_Immunology_} +@string{JACM = _Journal_of_the_ACM_} +@string{JACMP = _Journal_of_Applied_Clinical_Medical_Physics_} +@string{JACOUSTSOCAM = _Journal_of_the_Acoustical_Society_of_America_} +@string{JACR = _Journal_of_the_American_College_of_Radiology_} +@string{JACS = _Journal_of_the_American_Chemical_Society_} +@string{JAIDS = _Journal_of_Acquired_Immune_Deficiency_Syndromes_} +@string{JAIR = _Journal_of_Artificial_Intelligence_Research_} +@string{JAMA = _Journal_of_the_American_Medical_Association_} +@string{JAMAO = _JAMA_Opthalmology_} +@string{JAMBOAFAM = _Journal_of_the_American_Board_of_Family_Practice_} +@string{JAMC = _Journal_of_Antimicrobial_Chemotherapy_} +@string{JAMCOLSUR = _Journal_of_the_American_College_of_Surgeons_} +@string{JAMIA = _Journal_of_the_American_Medical_Informatics_Association_} +@string{JAP = _Journal_of_Applied_Physiology_} +@string{JAPE = _Journal_of_Applied_Photographic_Engineering_} +@string{JAPSCI = _Journal_of_Applied_Polymer_Science_} +@string{JASA = _Journal_of_the_American_Statistical_Association_} +@string{JASEC = _Journal_of_the_American_Society_of_Echocardiography_} +@string{JASN = _Journal_of_the_American_Society_of_Nephrology_} +@string{JAT = _Journal_of_Atherosclerosis_and_Thrombosis_} +@string{JB = _Journal_of_Bronchology_} +@string{JBC = _Journal_of_Biological_Chemistry_} +@string{JBELSORAD = _Journal_of_the_Belgian_Society_of_Radiology_} +@string{JBHI = _IEEE_Journal_of_Biomedical_and_Health_Informatics_} +@string{JBIOMECH = _Journal_of_Biomechanics_} +@string{JBIOMEDENG = _Journal_of_Biomedical_Engineering_} +@string{JBIOMEDINF = _Journal_of_Biomedical_Informatics_} +@string{JBIOMEDOPT = _Journal_of_Biomedical_Optics_} +@string{JBIOMEDSCI = _Journal_of_Biomedical_Science_and_Engineering_} +@string{JBIOMOLNMR = _Journal_of_Biomolecular_NMR_} +@string{JBMRA = _Journal_of_Biomedical_Materials_Research_Part_A_} +@string{JBRASP = _Jornal_Brasileiro_de_Pneumologia_} +@string{JCAT = _Journal_of_Computer_Assisted_Tomography_} +@string{JCBFM = _Journal_of_Cerebral_Blood_Flow_and_Metabolism_} +@string{JCD = _Journal_of_Chronic_Disease_} +@string{JCELBCS = _Journal_of_Cellular_Biochemistry_Supplement_} +@string{JCELBIO = _Journal_of_Cell_Biology_} +@string{JCELPHY = _Journal_of_Cellular_Physiology_} +@string{JCELSCI = _Journal_of_Cell_Science_} +@string{JCGS = _Journal_of_Computational_and_Graphical_Statistics_} +@string{JCHEMPHYS = _Journal_of_Chemical_Physics_} +@string{JCHIMEDASS = _Journal_of_the_Chinese_Medical_Association_} +@string{JCHRMEDASSIND = _Journal_of_the_Christian_Medical_Association_of_India_} +@string{JCI = _the_Journal_for_Clinical_Investigation_} +@string{JCICS = _Journal_of_Chemical_Information_and_Computer_Sciences_} +@string{JCLIMIC = _Journal_of_Clinical_Microbiology_} +@string{JCLINEM = _Journal_of_Clinical_Endocrinology_and_Metabolism_} +@string{JCLINEPI = _Journal_of_Clinical_Epidemiology_} +@string{JCLINI = _Journal_of_Clinical_Investigation_} +@string{JCMC = _Journal_of_Clinical_Monitoring_and_Computing_} +@string{JCMFS = _Journal_of_Cranio-Maxillo-Facial_Surgery_} +@string{JCN = _Journal_of_Clinical_Neuroscience_} +@string{JCO = _Journal_of_Clinical_Oncology_} +@string{JCOGNEUSCI = _Journal_of_Cognitive_Neuroscience_} +@string{JCOLPHYSURPAK = _Journal_of_the_College_of_Physicians_and_Surgeons_Pakistan_} +@string{JCOMPSY = _Journal_of_Comparative_Psychology_} +@string{JCOMPUTPHYS = _Journal_of_Computational_Physics_} +@string{JCOMSYSSCI = _Journal_of_Computer_and_System_Sciences_} +@string{JCONBRA = _Journal_of_Contemporary_Brachytherapy_} +@string{JCONREL = _Journal_of_Controlled_Release_} +@string{JCP = _Journal_of_Clinical_Pathology_} +@string{JCSS = _Journal_of_Computer_and_System_Sciences_} +@string{JCVCT = _Journal_of_Cardiovascular_Computed_Tomography_} +@string{JCVMR = _Journal_of_Cardiovascular_Magnetic_Resonance_} +@string{JCVPHA = _Journal_of_Cardiovascular_Pharmacology_} +@string{JCVR = _Journal_of_Cardiovascular_Risk_} +@string{JCVS = _Journal_of_Cardiovascular_Surgery_} +@string{JDAISCI = _Journal_of_Dairy_Science_} +@string{JDEN = _Journal_of_Dentistry_} +@string{JDI = _Journal_of_Digital_Imaging_} +@string{JDIACOM = _Journal_of_Diabetes_and_its_Complications_} +@string{JECH = _Journal_of_Epidemiology_and_Community_Health_} +@string{JECP = _Journal_of_Evaluation_in_Clinical_Practice_} +@string{JEDURES = _Journal_of_Educational_Research_} +@string{JEI = _Journal_of_Electronic_Imaging_} +@string{JEMATH = _Journal_of_Engineering_Mathematics_} +@string{JEMED = _Journal_of_Emergency_Medicine_} +@string{JET = _Journal_of_Endovascular_Therapy_} +@string{JEU = _Journal_of_Endourology_} +@string{JEXPMED = _Journal_of_Experimental_Medicine_} +@string{JFS = _Journal_of_Forensic_Sciences_} +@string{JGAA = _Journal_of_Graph_Algorithms_and_Applications_} +@string{JGT = _Journal_of_Graphics_Tools_} +@string{JHEP = _Journal_of_Hepatology_} +@string{JHOSPI = _Journal_of_Hospital_Infection_} +@string{JHS = _Journal_of_Hand_Surgery_} +@string{JHVD = _Journal_of_Heart_Valve_Disease_} +@string{JHYPT = _Journal_of_Hypertension_} +@string{JI = _Journal_of_Immunology_} +@string{JIB = _Journal_of_Integrative_Bioinformatics_} +@string{JIC = _Journal_of_Interventional_Cardiology_} +@string{JIM = _Journal_of_Internal_Medicine_} +@string{JIMDR = _JIMD_Reports_} +@string{JINF = _Journal_of_Infectious_Diseases_} +@string{JINHMETDIS = _Journal_of_Inherited_Metabolic_Disease_} +@string{JINSURMED = _Journal_of_Insurance_Medicine_} +@string{JINTMEDRES = _Journal_of_International_Medical_Research_} +@string{JINVC = _Journal_of_Invasive_Cardiology_} +@string{JIS = _Journal_of_Intelligent_Systems_} +@string{JIST = _Journal_of_Imaging_Science_and_Technology_} +@string{JJCO = _Japanese_Journal_of_Clinical_Oncology_} +@string{JJID = _Japanese_Journal_of_Infectious_Diseases_} +@string{JJR = _Japanese_Journal_of_Radiology_} +@string{JKORMEDSCI = _Journal_of_Korean_Medical_Science_} +@string{JM = _Journal_of_Microscopy_} +@string{JMA = _Journal_of_Mathematics_and_the_Arts_} +@string{JMATSCIMATMED = _Journal_of_Materials_Science_Materials_in_Medicine_} +@string{JMBE = _Journal_of_Medical_and_Biological_Engineering_} +@string{JMCR = _Journal_of_Medical_Case_Reports_} +@string{JMEDCHE = _Journal_of_Medicinal_Chemistry_} +@string{JMEDULT = _Journal_of_Medical_Ultrasonics_} +@string{JMI = _Journal_of_Medical_Imaging_} +@string{JMICROENC = _Journal_of_Microencapsulation_} +@string{JMIRO = _Journal_of_Medical_Imaging_and_Radiation_Oncology_} +@string{JMIV = _Journal_of_Mathematical_Imaging_and_Vision_} +@string{JMLR = _Journal_of_Machine_Learning_Research_} +@string{JMOLBIOL = _Journal_of_Molecular_Biology_} +@string{JMOLCELCAR = _Journal_of_Molecular_and_Cellular_Cardiology_} +@string{JMPSY = _Journal_of_Mathematical_Psychology_} +@string{JMR = _Journal_of_Magnetic_Resonance_} +@string{JMRI = _Journal_of_Magnetic_Resonance_Imaging_} +@string{JMSCR = _Journal_of_Medical_Screening_} +@string{JMSS = _Journal_of_Medical_Signals_and_Sensors_} +@string{JMSYS = _Journal_of_Medical_Systems_} +@string{JN = _Journal_of_Neurology_} +@string{JNANO = _Journal_of_Nanoscience_and_Nanotechnology_} +@string{JNC = _Journal_of_Nuclear_Cardiology_} +@string{JNCCN = _Journal_of_the_National_Comprehensive_Cancer_Network_} +@string{JNCI = _Journal_of_the_National_Cancer_Institute_} +@string{JNEPH = _Journal_of_Nephrology_} +@string{JNEUCHE = _Journal_of_Neurochemistry_} +@string{JNEUDEGDIS = _Journal_of_Neurodegenerative_Diseases_} +@string{JNEUIMA = _Journal_of_Neuroimaging_} +@string{JNEUIMM = _Journal_of_Neuroimmunology_} +@string{JNEUINTSUR = _Journal_of_Neurointerventional_Surgery_} +@string{JNEUONC = _Journal_of_Neurooncology_} +@string{JNEURAD = _Journal_of_Neuroradiology_} +@string{JNEUSCI = _Journal_of_the_Neurological_Sciences_} +@string{JNM = _Journal_of_Nuclear_Medicine_} +@string{JNNP = _Journal_of_Neurology_Neurosurgery_and_Psychiatry_} +@string{JNSCI = _Journal_of_Neuroscience_} +@string{JNSCIM = _Journal_of_Neuroscience_Methods_} +@string{JNSUR = _Journal_of_Neurosurgery_} +@string{JNTRA = _Journal_of_Neurotrauma_} +@string{JNUCMEDTEC = _Journal_of_Nuclear_Medicine_Technology_} +@string{JOB = _Journal_of_Optics_B_} +@string{JOE = _IEEE_Journal_of_Oceanic_Engineering_} +@string{JOEM = _Journal_of_Occupational_and_Environmental_Medicine_} +@string{JOH = _Journal_of_Occupational_Health_} +@string{JOMT = _Journal_of_Occupational_Medicine_and_Toxicology_} +@string{JOPH = _Journal_of_Ophthalmology_} +@string{JOSAA = _Journal_of_the_Optical_Society_of_America_A_} +@string{JOSAB = _Journal_of_the_Optical_Society_of_America_B_} +@string{JOV = _Journal_of_Vision_} +@string{JOVE = _JoVE_} +@string{JPAT = _Journal_of_Pathology_} +@string{JPATINF = _Journal_of_Pathology_Informatics_} +@string{JPED = _Journal_of_Pediatrics_} +@string{JPEDGAS = _Journal_of_Pediatric_Gastroenterology_and_Nutrition_} +@string{JPEDHEM = _Journal_of_Pediatric_Hematology_Oncology_} +@string{JPEDSUR = _Journal_of_Pediatric_Surgery_} +@string{JPEDURO = _Journal_of_Pediatric_Urology_} +@string{JPGM = _Journal_of_Postgraduate_Medicine_} +@string{JPHYSIOL = _Journal_of_Physiology_} +@string{JPS = _Journal_of_Physiological_Sciences_} +@string{JPSYRES = _Journal_of_Psychiatric_Research_} +@string{JR = _Journal_de_Radiologie_} +@string{JRH = _Journal_of_Rheumatology_} +@string{JRSS = _Journal_of_the_Royal_Statistical_Society_} +@string{JRSSB = _Journal_of_the_Royal_Statistical_Society_B_} +@string{JSB = _Journal_of_Structural_Biology_} +@string{JSCVD = _Journal_of_Stroke_and_Cerebrovascular_Diseases_} +@string{JSD = _Journal_of_Spinal_Disorders_} +@string{JSDT = _Journal_of_Spinal_Disorders_and_Techniques_} +@string{JSO = _Journal_of_Surgical_Oncology_} +@string{JSTROKE = _Journal_of_Stroke_} +@string{JSTSP = _IEEE_Journal_of_Selected_Topics_in_Signal_Processing_} +@string{JSURRES = _Journal_of_Surgical_Research_} +@string{JT = _Journal_of_Trauma_} +@string{JTCS = _Journal_of_Thoracic_and_Cardiovascular_Surgery_} +@string{JTD = _Journal_of_Thoracic_Disease_} +@string{JTHRHAE = _Journal_of_Thrombosis_and_Haemostasis_} +@string{JTI = _Journal_of_Thoracic_Imaging_} +@string{JTISENGREGMED = _Journal_of_Tissue_Engineering_and_Regenerative_Medicine_} +@string{JTO = _Journal_of_Thoracic_Oncology_} +@string{JTRAMED = _Journal_of_Translational_Medicine_} +@string{JU = _Journal_of_Urology_} +@string{JUM = _Journal_of_Ultrasound_in_Medicine_} +@string{JVA = _Journal_of_Vascular_Access_} +@string{JVCIR = _Journal_of_Visual_Communication_and_Image_Representation_} +@string{JVIR = _Journal_of_Vascular_and_Interventional_Radiology_} +@string{JVIROL = _Journal_of_Virology_} +@string{JVOICE = _Journal_of_Voice_} +@string{JVS = _Journal_of_Vascular_Surgery_} +@string{JWH = _Journal_of_Women_s_Health_} +@string{KI = _Kidney_International_} +@string{KITS = _Kitware_Source_} +@string{KJR = _Korean_Journal_of_Radiology_} +@string{KLINF = _Klinische_Fysica_} +@string{Knee = _Knee_} +@string{LABINV = _Laboratory_Investigation_} +@string{LANCETGH = _Lancet_Global_Health_} +@string{LANCETID = _Lancet_Infectious_Diseases_} +@string{LANCETN = _Lancet_Neurology_} +@string{LANCETO = _Lancet_Oncology_} +@string{LANCETRM = _Lancet_Respiratory_Medicine_} +@string{LDA = _Lifetime_Data_Analysis_} +@string{LHD = _Lipids_in_Health_and_Disease_} +@string{LNAI = _Lecture_Notes_in_Artificial_Intelligence_} +@string{LNCS = _Lecture_Notes_in_Computer_Science_} +@string{LUNGC = _Lung_Cancer_} +@string{LYMPHRESBIO = _Lymphatic_Research_and_Biology_} +@string{Lancet = _Lancet_} +@string{Laryngoscope = _Laryngoscope_} +@string{Lung = _Lung_} +@string{MACD = _Monaldi_Archives_for_Chest_Disease_} +@string{MAGMA = _MAGMA_} +@string{MBEC = _Medical_and_Biological_Engineering_and_Computing_} +@string{MCP = _Mayo_Clinic_Proceedings_} +@string{MD = _Medical_Dosimetry_} +@string{MDM = _Medical_Decision_Making_} +@string{MEDCNA = _Medical_Clinics_of_North_America_} +@string{MEDSCIMON = _Medical_Science_Monitor_} +@string{MEDUS = _Medical_Ultrasonography_} +@string{MEP = _Medical_Engineering_and_Physics_} +@string{MGR = _Magnesium_Research_} +@string{MI = _Medical_Imaging_} +@string{MIA = _Medical_Image_Analysis_} +@string{MICAD = _Medical_Imaging_Computer-Aided_Diagnosis_} +@string{MICCAI = _Medical_Image_Computing_and_Computer-Assisted_Intervention_} +@string{MIDL = _Medical_Imaging_with_Deep_Learning_} +@string{MIM = _Methods_of_Information_in_Medicine_} +@string{MINF = _Medical_Informatics_} +@string{MIT = _Medical_Imaging_Technology_} +@string{MJA = _Medical_Journal_of_Australia_} +@string{ML = _Machine_Learning_} +@string{MM = _Medica_Mundi_} +@string{MMB = _Methods_in_Molecular_Biology_} +@string{MMM = _Methods_in_Molecular_Medicine_} +@string{MMS = _Multiscale_Modeling_and_Simulation_} +@string{MN = _Muscle_and_Nerve_} +@string{MODP = _Modern_Pathology_} +@string{MOLC = _Molecular_Cancer_} +@string{MOLCT = _Molecular_Cancer_Therapeutics_} +@string{MOLI = _Molecular_Imaging_} +@string{MOLIB = _Molecular_Imaging_and_Biology_} +@string{MOLPHA = _Molecular_Pharmaceutics_} +@string{MOLVIS = _Molecular_Vision_} +@string{MP = _Medical_Physics_} +@string{MRI = _Magnetic_Resonance_Imaging_} +@string{MRICNA = _Magnetic_Resonance_Imaging_Clinics_of_North_America_} +@string{MRM = _Magnetic_Resonance_in_Medicine_} +@string{MRMPBM = _Magnetic_Resonance_Materials_in_Physics_Biology_and_Medicine_} +@string{MRMS = _Magnetic_Resonance_in_Medical_Sciences_} +@string{MRP = _Medical_Radiography_and_Photography_} +@string{MVA = _Machine_Vision_and_Applications_} +@string{Menopause = _Menopause_} +@string{Mind = _Mind_} +@string{Mitochondrion = _Mitochondrion_} +@string{NATCAN = _Nature_Cancer_} +@string{NATCOM = _Nature_Communications_} +@string{NATMED = _Nature_Medicine_} +@string{NATREVCAN = _Nature_Reviews_Cancer_} +@string{NATREVCLIONC = _Nature_Reviews_Clinical_Oncology_} +@string{NATREVDRUDIS = _Nature_Reviews_Drug_Discovery_} +@string{NATREVMICBIO = _Nature_Reviews_Microbiology_} +@string{NATREVNEP = _Nature_Reviews_Nephrology_} +@string{NATREVNEU = _Nature_Reviews_Neurology_} +@string{NATREVU = _Nature_Reviews_Urology_} +@string{NATSCIREP = _Nature_Scientific_Reports_} +@string{NATUREBT = _Nature_Biotechnology_} +@string{NBE = _Nature_Biomedical_Engineering_} +@string{NDT = _Nephrology_Dialysis_Transplantation_} +@string{NEJM = _New_England_Journal_of_Medicine_} +@string{NETHHJ = _Netherlands_Heart_Journal_} +@string{NETHJM = _Netherlands_Journal_of_Medicine_} +@string{NEUBA = _Neurobiology_of_Aging_} +@string{NEUBD = _Neurobiology_of_Disease_} +@string{NEUC = _Neural_Computation_} +@string{NEUCC = _Neurocritical_Care_} +@string{NEUCI = _Neurochemistry_International_} +@string{NEUCS = _Neural_Computing_Surveys_} +@string{NEUI = _NeuroImage_} +@string{NEUICL = _NeuroImage_Clinical_} +@string{NEUN = _Neural_Networks_} +@string{NEUONC = _Neuro-oncology_} +@string{NEURES = _Neurological_Research_} +@string{NEURNR = _Neurorehabilitation_and_Neural_Repair_} +@string{NEURUUD = _Neurourology_and_Urodynamics_} +@string{NEUSF = _Neurosurgical_Focus_} +@string{NEUSRC = _Neuroscience_Research_Communications_} +@string{NIMPR-A = _Nuclear_Instruments_and_Methods_in_Physics_Research_Section_A_Accelerators_Spectrometers_Detectors_and_Associated_Equipment_} +@string{NJMS = _Nagoya_Journal_of_Medical_Science_} +@string{NMD = _Neuromuscular_Disorders_} +@string{NMRB = _NMR_in_Biomedicine_} +@string{NPJBC = _Nature_Partner_Journals_Breast_Cancer_} +@string{NRU = _Nature_Reviews_Urology_} +@string{NTVG = _Nederlands_Tijdschrift_voor_Geneeskunde_} +@string{NUCMB = _Nuclear_Medicine_and_Biology_} +@string{NUCMC = _Nuclear_Medicine_Communications_} +@string{Nanobiotechnology = _Nanobiotechnology_} +@string{Nanotechnology = _Nanotechnology_} +@string{Nature = _Nature_} +@string{Neoplasia = _Neoplasia_} +@string{Nervenarzt = _Nervenarzt_} +@string{Neurocomputing = _Neurocomputing_} +@string{Neurology = _Neurology_} +@string{Neuron = _Neuron_} +@string{Neuropediatrics = _Neuropediatrics_} +@string{Neuroradiology = _Neuroradiology_} +@string{Neuroreport = _Neuroreport_} +@string{Neurosurgery = _Neurosurgery_} +@string{Nuklearmedizin = _Nuklearmedizin_} +@string{OE = _Optics_Express_} +@string{OEM = _Occupational_and_Environmental_Medicine_} +@string{OMS = _Optimization_Methods_and_Software_} +@string{OO = _Oral_Oncology_} +@string{OPTC = _Optics_Communications_} +@string{OPTE = _Optical_Engineering_} +@string{OTON = _Otology_and_Neurotology_} +@string{OVS = _Optometry_and_Vision_Science_} +@string{Oncoimmunology = _Oncoimmunology_} +@string{Oncologist = _Oncologist_} +@string{Oncology = _Oncology_} +@string{Oncotarget = _Oncotarget_} +@string{Ophthalmologica = _Ophthalmologica_} +@string{Ophthalmology = _Ophthalmology_} +@string{PAA = _Pattern_Analysis_and_Applications_} +@string{PARC = _Parallel_Computing_} +@string{PATHOB = _Pathobiology_} +@string{PATS = _Proceedings_of_the_American_Thoracic_Society_} +@string{PBMB = _Progress_in_Biophysics_and_Molecular_Biology_} +@string{PCPD = _Prostate_Cancer_and_Prostatic_Diseases_} +@string{PEDAI = _Pediatric_Allergy_and_Immunology_} +@string{PEDBC = _Pediatric_Blood_and_Cancer_} +@string{PEDCAR = _Pediatric_Cardiology_} +@string{PEDCCM = _Pediatric_Critical_Care_Medicine_} +@string{PEDCNA = _Pediatric_Clinics_of_North_America_} +@string{PEDEC = _Pediatric_Emergency_Care_} +@string{PEDNEP = _Pediatric_Nephrology_} +@string{PEDNEU = _Pediatric_Neurology_} +@string{PEDPUL = _Pediatric_Pulmonology_} +@string{PEDRAD = _Pediatric_Radiology_} +@string{PEDRES = _Pediatric_Research_} +@string{PHA = _Public_Health_Action_} +@string{PHYSMED = _Physica_Medica_} +@string{PHYSREVA = _Physical_Review_A_} +@string{PHYSREVD = _Physical_Review_D_} +@string{PICIP = _Proceedings_of_the_International_Conference_on_Image_Processing_} +@string{PICML = _Proceedings_of_the_International_Conference_on_Machine_Learning_} +@string{PICPR = _Proceedings_of_the_International_Conference_on_Pattern_Recognition_} +@string{PIEEE = _Proceedings_of_the_IEEE_} +@string{PIMEH = _Proceedings_of_the_Institution_of_Mechanical_Engineers_Part_H_Journal_of_Engineering_in_Medicine_} +@string{PLOSBIOL = _PLoS_Biology_} +@string{PLOSMED = _PLoS_Medicine_} +@string{PLOSONE = _PLoS_One_} +@string{PMB = _Physics_in_Medicine_and_Biology_} +@string{PNAS = _Proceedings_of_the_National_Academy_of_Sciences_of_the_United_States_of_America_} +@string{PR = _Pattern_Recognition_} +@string{PRER = _Progress_in_Retinal_and_Eye_Research_} +@string{PRIA = _Pattern_Recognition_and_Image_Analysis_} +@string{PRJ = _PeerJ_} +@string{PRL = _Pattern_Recognition_Letters_} +@string{PROCBS = _Proceedings_Biological_Sciences_} +@string{PSPOIE = _Proceedings_of_the_Society_of_Photo-Optical_Instrumentation_Engineers_} +@string{PSR = _Photosynthesis_Research_} +@string{PSYB = _Psychological_Bulletin_} +@string{PSYCHIRES = _Psychiatry_Research_} +@string{PSYM = _Psychological_Methods_} +@string{PSYR = _Psychological_Research_} +@string{Pediatrics = _Pediatrics_} +@string{Peptides = _Peptides_} +@string{Perception = _Perception_} +@string{Pneumologie = _Pneumologie_} +@string{Prostate = _Prostate_} +@string{Proteins = _Proteins_} +@string{QIMS = _Quantitative_Imaging_in_Medicine_and_Surgery_} +@string{QJM = _QJM_Monthly_Journal_of_the_Association_of_Physicians_} +@string{QJNUCMED = _Quarterly_Journal_of_Nuclear_Medicine_} +@string{QJNUCMEDMOLIMA = _Quarterly_Journal_of_Nuclear_Medicine_and_Molecular_Imaging_} +@string{RADBRA = _Radiologia_Brasileira_} +@string{RADD = _RadiologenDagen_} +@string{RADIATRES = _Radiation_Research_} +@string{RADIOLMED = _La_Radiologia_Medica_} +@string{RADIOLONC = _Radiology_and_Oncology_} +@string{RADM = _Radiation_Medicine_} +@string{RADONC = _Radiation_Oncology_} +@string{RAI = _Radiology_Artificial_Intelligence_} +@string{RAPM = _Regional_Anesthesia_and_Pain_Medicine_} +@string{RBME = _IEEE_Reviews_in_Biomedical_Engineering_} +@string{RCNA = _Radiologic_Clinics_of_North_America_} +@string{RECRCR = _Recent_Results_in_Cancer_Research_} +@string{REGPEP = _Regulatory_Peptides_} +@string{REGTOXPHA = _Regulatory_Toxicology_and_Pharmacology_} +@string{REPSCI = _Reproductive_Sciences_} +@string{REPTOX = _Reproductive_Toxicology_} +@string{RESPC = _Respiratory_Care_} +@string{RESPM = _Respiratory_Medicine_} +@string{RESPR = _Respiratory_Research_} +@string{REVU = _Reviews_in_Urology_} +@string{RHEINT = _Rheumatology_International_} +@string{RHEUMADV = _Rheumatology_Advances_in_Practice_} +@string{RHOISLMED = _Rhode_Island_Medicine_} +@string{RIMTSP = _Revista_do_Instituto_de_Medicina_Tropical_de_Sao_Paulo_} +@string{RM = _Respiratory_Medicine_} +@string{RPD = _Radiation_Protection_Dosimetry_} +@string{RPT = _Radiological_Physics_and_Technology_} +@string{RSNA = _Annual_Meeting_of_the_Radiological_Society_of_North_America_} +@string{RTO = _Radiotherapy_and_Oncology_} +@string{Radiographics = _Radiographics_} +@string{Radiologe = _Radiologe_} +@string{Radiology = _Radiology_} +@string{Respiration = _Respiration_} +@string{Respirology = _Respirology_} +@string{Retina = _Retina_} +@string{Rheumatology = _Rheumatology_} +@string{Robotica = _Robotica_} +@string{Rofo = _RoFo_} +@string{SCACARJ = _Scandinavian_Cardiovascular_Journal_} +@string{SCAJGAS = _Scandinavian_Journal_of_Gastroenterology_} +@string{SCAJINFDIS = _Scandinavian_Journal_of_Infectious_Diseases_} +@string{SCC = _Supportive_Care_in_Cancer_} +@string{SCHBUL = _Schizophrenia_Bulletin_} +@string{SCIENCETM = _Science_Translational_Medicine_} +@string{SCIREP = _Scientific_Reports_} +@string{SEMCARTVA = _Seminars_in_Cardiothoracic_and_Vascular_Anesthesia_} +@string{SEMIC = _Seminars_in_Interventional_Cardiology_} +@string{SEMNEU = _Seminars_in_Neurology_} +@string{SEMNM = _Seminars_in_Nuclear_Medicine_} +@string{SEMR = _Seminars_in_Roentgenology_} +@string{SEMRESCCM = _Seminars_in_Respiratory_and_Critical_Care_Medicine_} +@string{SEMSURONC = _Seminars_in_Surgical_Oncology_} +@string{SEMTHOCVS = _Seminars_in_Thoracic_and_Cardiovascular_Surgery_} +@string{SEMUCM = _Seminars_in_Ultrasound_CT_and_MR_} +@string{SEMVMSSA = _Seminars_in_Veterinary_Medicine_and_Surgery_Small_Animal_} +@string{SEMVS = _Seminars_in_Vascular_Surgery_} +@string{SHTI = _Studies_in_Health_Technology_and_Informatics_} +@string{SIAMJCOM = _SIAM_Journal_on_Computing_} +@string{SIAMJMAA = _SIAM_Journal_on_Matrix_Analysis_and_Applications_} +@string{SIAMJNUMA = _SIAM_Journal_on_Numerical_Analysis_} +@string{SIAMJSCIC = _SIAM_Journal_on_Scientific_Computing_} +@string{SIGPRO = _Signal_Processing_} +@string{SKER = _Skeletal_Radiology_} +@string{SKIRESTEC = _Skin_Research_and_Technology_} +@string{SM = _Statistics_in_Medicine_} +@string{SMMR = _Statistical_Methods_in_Medical_Research_} +@string{SOCSCIMED = _Social_Science_and_Medicine_} +@string{SOUAFRMEDJ = _South_African_Medical_Journal_} +@string{SP = _Signal_Processing_} +@string{SPIE = _Proceedings_of_the_SPIE_} +@string{SPM = _IEEE_Signal_Processing_Magazine_} +@string{SRA = _Surgical_and_Radiologic_Anatomy_} +@string{STACOM = _Statistics_and_Computing_} +@string{STASUR = _Statistics_Surveys_} +@string{STRONK = _Strahlentherapie_und_Onkologie_} +@string{SUBCELBIO = _Subcellular_Biochemistry_} +@string{SUROPH = _Survey_of_Ophthalmology_} +@string{SYMCULSCI = _Symmetry_Culture_and_Science_} +@string{Sarcoma = _Sarcoma_} +@string{Science = _Science_} +@string{Scientometrics = _Scientometrics_} +@string{Sensors = _Sensors_} +@string{Small = _Small_} +@string{Spine = _Spine_} +@string{Stroke = _Stroke_} +@string{Surgery = _Surgery_} +@string{Symmetry = _Symmetry_} +@string{TAI = _IEEE_Transactions_on_Artificial_Intelligence_} +@string{TARONC = _Targeted_Oncology_} +@string{TASSP = _IEEE_Transactions_on_Acoustics_Speech_and_Signal_Processing_} +@string{TBCAS = _IEEE_Transactions_on_Biomedical_Circuits_and_Systems_} +@string{TBME = _IEEE_Transactions_on_Biomedical_Engineering_} +@string{TC = _IEEE_Transactions_on_Computers_} +@string{TCOMM = _IEEE_Transactions_on_Communications_} +@string{TCR = _WSEAS_Transactions_on_Computer_Research_} +@string{TCS = _Theory_of_Computing_Systems_} +@string{TECHCRT = _Technology_in_Cancer_Research_and_Treatment_} +@string{TECHHC = _Technology_and_Health_Care_} +@string{TEH = _Telemedicine_and_e-health_} +@string{TEXHIJ = _Texas_Heart_Institute_Journal_} +@string{TFS = _IEEE_Transactions_on_Fuzzy_Systems_} +@string{THEDRUMON = _Therapeutic_Drug_Monitoring_} +@string{THOCVS = _Thoracic_and_Cardiovascular_Surgeon_} +@string{THOSC = _Thoracic_Surgery_Clinics_} +@string{THRHAE = _Thrombosis_and_Haemostasis_} +@string{THRRES = _Thrombosis_Research_} +@string{TIJPM = _The_Internet_Journal_of_Pulmonary_Medicine_} +@string{TIP = _IEEE_Transactions_on_Image_Processing_} +@string{TISENG = _Tissue_Engineering_} +@string{TISENGB = _Tissue_Engineering_Part_B_Reviews_} +@string{TISENGC = _Tissue_Engineering_Part_C_Methods_} +@string{TIT = _IEEE_Transactions_on_Information_Theory_} +@string{TITB = _IEEE_Transactions_on_Information_Technology_in_Biomedicine_} +@string{TJEM = _Tohoku_Journal_of_Experimental_Medicine_} +@string{TKDE = _IEEE_Transactions_on_Knowledge_and_Data_Engineering_} +@string{TLCR = _Translational_Lung_Cancer_Research_} +@string{TMI = _IEEE_Transactions_on_Medical_Imaging_} +@string{TMIH = _Tropical_Medicine_and_International_Health_} +@string{TMM = _IEEE_Transactions_on_Multimedia_} +@string{TMRI = _Topics_in_Magnetic_Resonance_Imaging_} +@string{TNN = _IEEE_Transactions_on_Neural_Networks_} +@string{TNNLS = _IEEE_Transactions_on_Neural_Networks_and_Learning_Systems_} +@string{TNS = _IEEE_Transactions_on_Nuclear_Science_} +@string{TOXAP = _Toxicology_and_Applied_Pharmacology_} +@string{TP = _Toxicologic_Pathology_} +@string{TPAMI = _IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence_} +@string{TRAMON = _Trauma_Monthly_} +@string{TRAP = _Transplantation_Proceedings_} +@string{TREBT = _Trends_in_Biotechnology_} +@string{TRECVM = _Trends_in_Cardiovascular_Medicine_} +@string{TREEM = _Trends_in_Endocrinology_and_Metabolism_} +@string{TREMM = _Trends_in_Molecular_Medicine_} +@string{TROD = _Tropical_Doctor_} +@string{TRSTMH = _Transactions_of_the_Royal_Society_of_Tropical_Medicine_and_Hygiene_} +@string{TSAP = _IEEE_Transactions_on_Speech_and_Audio_Processing_} +@string{TSMC = _IEEE_Transactions_on_Systems_Man_and_Cybernetics_} +@string{TSP = _IEEE_Transactions_on_Signal_Processing_} +@string{TUBLUNDIS = _Tubercle_and_Lung_Disease_} +@string{TUFF = _IEEE_Transactions_on_Ultrasonics_Ferroelectrics_and_Frequency_Control_} +@string{TUMBIO = _Tumor_Biology_} +@string{TUR = _Tijdschrift_voor_Urologie_} +@string{TVCG = _IEEE_Transactions_on_Visualization_and_Computer_Graphics_} +@string{Technometrics = _Technometrics_} +@string{Thorax = _Thorax_} +@string{Transfusion = _Transfusion_} +@string{Trials = _Trials_} +@string{Tuberculosis = _Tuberculosis_} +@string{Tumori = _Tumori_} +@string{UI = _Ultrasonic_Imaging_} +@string{UJMS = _Upsala_Journal_of_Medical_Sciences_} +@string{UMB = _Ultrasound_in_Medicine_and_Biology_} +@string{UOG = _Ultrasound_in_Obstetrics_and_Gynecology_} +@string{UPDCANTHE = _Update_on_Cancer_Therapeutics_} +@string{UROCLINA = _Urologic_Clinics_of_North_America_} +@string{UROONC = _Urologic_Oncology_} +@string{URORES = _Urological_Research_} +@string{USM = _Ultraschall_in_der_Medizin_} +@string{Ultramicroscopy = _Ultramicroscopy_} +@string{Ultrasonics = _Ultrasonics_} +@string{Unfallchirurg = _Unfallchirurg_} +@string{Urologe = _Urologe_} +@string{Urology = _Urology_} +@string{VETJ = _Veterinary_Journal_} +@string{VIRA = _Virchows_Archiv_} +@string{VISCOM = _Visual_Computer_} +@string{VISR = _Vision_Research_} +@string{VOPONK = _Voprosy_Onkologii_} +@string{VRU = _Veterinary_Radiology_and_Ultrasound_} +@string{Vascular = _Vascular_} +@string{WIENKW = _Wiener_Klinische_Wochenschrift_} +@string{WIENKWS = _Wiener_Klinische_Wochenschrift_Supplementum_} +@string{WIENMW = _Wiener_Medizinische_Wochenschrift_} +@string{WIENMWS = _Wiener_Medizinische_Wochenschrift_Supplementum_} +@string{WIRSBM = _Wiley_Interdisciplinary_Reviews_Systems_Biology_and_Medicine_} +@string{WJDIA = _World_Journal_of_Diabetes_} +@string{WJGE = _World_Journal_of_Gastroenterology_} +@string{WJM = _Western_Journal_of_Medicine_} +@string{WJNUCMED = _World_Journal_of_Nuclear_Medicine_} +@string{WJR = _World_Journal_of_Radiology_} +@string{WJSURG = _World_Journal_of_Surgery_} +@string{WJU = _World_Journal_of_Urology_} +@string{WONEU = _World_Neurosurgery_} +@string{WOUREPREG = _Wound_Repair_and_Regeneration_} +@string{ZEIAVW = _Zeitschrift_fuer_Astronomie_und_verwandte_Wissenschaften_} +@string{ZEIMP = _Zeitschrift_fuer_Medizinische_Physik_} +@string{ZENNEUCHI = _Zentralblatt_fuer_Neurochirurgie_} +@string{ZKARD = _Zeitschrift_fuer_Kardiologie_} + +@article{Abas05, + author = {D. Ab\'{a}solo and R. Hornero and P. Espino and J. Poza and C. I. S\'{a}nchez and R. de la Rosa}, + title = {Analysis of regularity in the {EEG} background activity of {A}lzheimer's disease patients with {A}pproximate {E}ntropy}, + journal = CLINNP, + year = {2005}, + volume = {116}, + pages = {1826--1834}, + doi = {10.1016/j.clinph.2005.04.001}, + abstract = {{OBJECTIVE}: {T}he aim of this study was to analyse the regularity of the {EEG} background activity of {A}lzheimer's disease ({AD}) patients to test the hypothesis that the irregularity of the {AD} patients' {EEG} is lower than that of age-matched controls. {METHODS}: {W}e recorded the {EEG} from 19 scalp electrodes in 10 {AD} patients and 8 age-matched controls and estimated the {A}pproximate {E}ntropy ({A}p{E}n). {A}p{E}n is a non-linear statistic that can be used to quantify the irregularity of a time series. {L}arger values correspond to more complexity or irregularity. {A} spectral analysis was also performed. {RESULTS}: {A}p{E}n was significantly lower in the {AD} patients at electrodes {P}3 and {P}4 ({P} < 0.01), indicating a decrease of irregularity. {W}e obtained 70% sensitivity and 100% specificity at {P}3, and 80\% sensitivity and 75% specificity at {P}4. {R}esults seemed to be complementary to spectral analysis. {CONCLUSIONS}: {T}he decreased irregularity found in the {EEG} of {AD} patients in the parietal region leads us to think that {EEG} analysis with {A}p{E}n could be a useful tool to increase our insight into brain dysfunction in {AD}. {H}owever, caution should be applied due to the small sample size. {SIGNIFICANCE}: {T}his article represents a first step in demonstrating the feasibility of {A}p{E}n for recognition of {EEG} changes in {AD}.}, + file = {Abas05.pdf:pdf\\Abas05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {8}, + pmid = {15979403}, + month = {8}, + gsid = {6548562133891070710}, + gscites = {233}, +} + +@inproceedings{Abas05a, + author = {D. Ab\'{a}solo and C. G\'{o}mez and J. Poza and M. Garc\'{i}a and C. I. S\'{a}nchez and M. L\'{o}pez}, + title = {{EEG} background activity analysis in {A}lzheimer's disease patients with sample entropy}, + booktitle = {International Conference on Computational Bioengineering}, + year = {2005}, + pages = {1067--1076}, + optnote = {DIAG, RADIOLOGY}, + gsid = {15571196371711494667}, + gscites = {3}, +} + +@article{Abel19, + author = {Abels, Esther and Pantanowitz, Liron and Aeffner, Famke and Zarella, Mark D and van der Laak, Jeroen and Bui, Marilyn M and Vemuri, Venkata Np and Parwani, Anil V and Gibbs, Jeff and Agosto-Arroyo, Emmanuel and Beck, Andrew H and Kozlowski, Cleopatra}, + title = {Computational pathology definitions, best practices, and recommendations for regulatory guidance: a white paper from the Digital Pathology Association}, + journal = JPAT, + year = {2019}, + volume = {249}, + issue = {3}, + month = {7}, + pages = {286-294}, + doi = {10.1002/path.5331}, + abstract = {In this white paper, experts from the Digital Pathology Association (DPA) define terminology and concepts in the emerging field of computational pathology, with a focus on its application to histology images analyzed together with their associated patient data to extract information. This review offers a historical perspective and describes the potential clinical benefits from research and applications in this field, as well as significant obstacles to adoption. Best practices for implementing computational pathology workflows are presented. These include infrastructure considerations, acquisition of training data, quality assessments, as well as regulatory, ethical, and cyber-security concerns. Recommendations are provided for regulators, vendors, and computational pathology practitioners in order to facilitate progress in the field. (c) 2019 The Authors. The Journal of Pathology published by John Wiley & Sons Ltd on behalf of Pathological Society of Great Britain and Ireland.}, + file = {Abel19.pdf:pdf\\Abel19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31355445}, + gsid = {17130199197824110899}, + gscites = {24}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/215198}, +} + +@article{Abra08a, + author = {M. D. Abr\`amoff and M. Niemeijer and M. S. A. Suttorp-Schulten and M. A. Viergever and S. R. Russell and B. van Ginneken}, + title = {Evaluation of a system for automatic detection of diabetic retinopathy from color fundus photographs in a large population of patients with diabetes}, + journal = DC, + year = {2008}, + volume = {31}, + number = {2}, + pages = {193--198}, + doi = {10.2337/dc07-1312}, + abstract = {{OBJECTIVE} {T}o evaluate the performance of a system for automated detection of diabetic retinopathy in digital retinal photographs, built from published algorithms, in a large, representative, screening population. {RESEARCH} {DESIGN} {AND} {METHODS} {W}e conducted a retrospective analysis of 10,000 consecutive patient visits, specifically exams (four retinal photographs, two left and two right) from 5,692 unique patients from the {E}ye{C}heck diabetic retinopathy screening project imaged with three types of cameras at 10 centers. {I}nclusion criteria included no previous diagnosis of diabetic retinopathy, no previous visit to ophthalmologist for dilated eye exam, and both eyes photographed. {O}ne of three retinal specialists evaluated each exam as unacceptable quality, no referable retinopathy, or referable retinopathy. {W}e then selected exams with sufficient image quality and determined presence or absence of referable retinopathy. {O}utcome measures included area under the receiver operating characteristic curve (number needed to miss one case [{NNM}]) and type of false negative. {RESULTS} {T}otal area under the receiver operating characteristic curve was 0.84, and {NNM} was 80 at a sensitivity of 0.84 and a specificity of 0.64. {A}t this point, 7,689 of 10,000 exams had sufficient image quality, 4,648 of 7,689 (60%) were true negatives, 59 of 7,689 (0.8%) were false negatives, 319 of 7,689 (4%) were true positives, and 2,581 of 7,689 (33%) were false positives. {T}wenty-seven percent of false negatives contained large hemorrhages and/or neovascularizations. {CONCLUSIONS} {A}utomated detection of diabetic retinopathy using published algorithms cannot yet be recommended for clinical practice. {H}owever, performance is such that evaluation on validated, publicly available datasets should be pursued. {I}f algorithms can be improved, such a system may in the future lead to improved prevention of blindness and vision loss in patients with diabetes.}, + file = {Abra08a.pdf:pdf\\Abra08a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {18024852}, + month = {11}, + gsid = {14186889695192659355}, + gscites = {287}, + ss_id = {0445a1a0aa30c9ca41a8ab31463d617b849a0814}, + all_ss_ids = {['0445a1a0aa30c9ca41a8ab31463d617b849a0814']}, +} + +@article{Adam22a, + author = {Adams, Lisa C. and Makowski, Marcus R. and Engel, Gunther and Rattunde, Maximilian and Busch, Felix and Asbach, Patrick and Niehues, Stefan M. and Vinayahalingam, Shankeeth and van Ginneken, Bram and Litjens, Geert and Bressem, Keno K.}, + title = {Dataset of prostate MRI annotated for anatomical zones and cancer.}, + doi = {10.1016/j.dib.2022.108739}, + pages = {108739}, + volume = {45}, + abstract = {In the present work, we present a publicly available, expert-segmented representative dataset of 158 3.0 Tesla biparametric MRIs [1]. There is an increasing number of studies investigating prostate and prostate carcinoma segmentation using deep learning (DL) with 3D architectures [2], [3], [4], [5], [6], [7]. The development of robust and data-driven DL models for prostate segmentation and assessment is currently limited by the availability of openly available expert-annotated datasets [8], [9], [10]. The dataset contains 3.0 Tesla MRI images of the prostate of patients with suspected prostate cancer. Patients over 50 years of age who had a 3.0 Tesla MRI scan of the prostate that met PI-RADS version 2.1 technical standards were included. All patients received a subsequent biopsy or surgery so that the MRI diagnosis could be verified/matched with the histopathologic diagnosis. For patients who had undergone multiple MRIs, the last MRI, which was less than six months before biopsy/surgery, was included. All patients were examined at a German university hospital (Charite Universitatsmedizin Berlin) between 02/2016 and 01/2020. All MRI were acquired with two 3.0 Tesla MRI scanners (Siemens VIDA and Skyra, Siemens Healthineers, Erlangen, Germany). Axial T2W sequences and axial diffusion-weighted sequences (DWI) with apparent diffusion coefficient maps (ADC) were included in the data set. T2W sequences and ADC maps were annotated by two board-certified radiologists with 6 and 8 years of experience, respectively. For T2W sequences, the central gland (central zone and transitional zone) and peripheral zone were segmented. If areas of suspected prostate cancer (PIRADS score of >= 4) were identified on examination, they were segmented in both the T2W sequences and ADC maps. Because restricted diffusion is best seen in DWI images with high b-values, only these images were selected and all images with low b-values were discarded. Data were then anonymized and converted to NIfTI (Neuroimaging Informatics Technology Initiative) format.}, + file = {Adam22a.pdf:pdf\\Adam22a.pdf:PDF}, + journal = {Data in brief}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36426089}, + year = {2022}, + ss_id = {516b2dc6c3761458c8fa6f5759295673e86a42db}, + all_ss_ids = {['516b2dc6c3761458c8fa6f5759295673e86a42db']}, + gscites = {1}, +} + +@article{Adam22b, + author = {Adams, Lisa C. and Makowski, Marcus R. and Engel, Gunther and Rattunde, Maximilian and Busch, Felix and Asbach, Patrick and Niehues, Stefan M. and Vinayahalingam, Shankeeth and van Ginneken, Bram and Litjens, Geert and Bressem, Keno K.}, + title = {Prostate158 - An expert-annotated 3T MRI dataset and algorithm for prostate cancer detection.}, + doi = {10.1016/j.compbiomed.2022.105817}, + pages = {105817}, + volume = {148}, + abstract = {The development of deep learning (DL) models for prostate segmentation on magnetic resonance imaging (MRI) depends on expert-annotated data and reliable baselines, which are often not publicly available. This limits both reproducibility and comparability. Prostate158 consists of 158 expert annotated biparametric 3T prostate MRIs comprising T2w sequences and diffusion-weighted sequences with apparent diffusion coefficient maps. Two U-ResNets trained for segmentation of anatomy (central gland, peripheral zone) and suspicious lesions for prostate cancer (PCa) with a PI-RADS score of >=4 served as baseline algorithms. Segmentation performance was evaluated using the Dice similarity coefficient (DSC), the Hausdorff distance (HD), and the average surface distance (ASD). The Wilcoxon test with Bonferroni correction was used to evaluate differences in performance. The generalizability of the baseline model was assessed using the open datasets Medical Segmentation Decathlon and PROSTATEx. Compared to Reader 1, the models achieved a DSC/HD/ASD of 0.88/18.3/2.2 for the central gland, 0.75/22.8/1.9 for the peripheral zone, and 0.45/36.7/17.4 for PCa. Compared with Reader 2, the DSC/HD/ASD were 0.88/17.5/2.6 for the central gland, 0.73/33.2/1.9 for the peripheral zone, and 0.4/39.5/19.1 for PCa. Interrater agreement measured in DSC/HD/ASD was 0.87/11.1/1.0 for the central gland, 0.75/15.8/0.74 for the peripheral zone, and 0.6/18.8/5.5 for PCa. Segmentation performances on the Medical Segmentation Decathlon and PROSTATEx were 0.82/22.5/3.4; 0.86/18.6/2.5 for the central gland, and 0.64/29.2/4.7; 0.71/26.3/2.2 for the peripheral zone. We provide an openly accessible, expert-annotated 3T dataset of prostate MRI and a reproducible benchmark to foster the development of prostate segmentation algorithms.}, + file = {Adam22b.pdf:pdf\\Adam22b.pdf:PDF}, + journal = {Computers in biology and medicine}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35841780}, + year = {2022}, + ss_id = {c661857719657aac571718e3d48cb66b8fc1e941}, + all_ss_ids = {['c661857719657aac571718e3d48cb66b8fc1e941']}, + gscites = {11}, +} + +@article{Adri09, + author = {Miraude E A P M Adriaensen and Cornelia M Schaefer-Prokop and Debbie A C Duyndam and Bernard A Zonnenberg and M. Prokop}, + title = {Fatty foci in the myocardium in patients with tuberous sclerosis complex: common finding at {CT}}, + doi = {10.1148/radiol.2533082118}, + number = {2}, + pages = {359--363}, + volume = {253}, + abstract = {To examine the morphologic characteristics of focal fatty foci in the myocardium of patients with tuberous sclerosis complex (TSC) at computed tomography (CT).Institutional review board approval was obtained, and patient informed consent was waived. Fifty-five patients with TSC (mean age, 37 years; range, 16-67 years; 22 male patients) who had CT results available that included at least the basal portions of the heart were included. Fifty-five age- and sex-matched control subjects without TSC were selected from a CT database. Images were reviewed for the presence of areas of fat attenuation in the depicted portions of the myocardium. Descriptive statistics and the McNemar test for case-control comparisons were used.CT results demonstrated foci of fat attenuation within the myocardium in 35 (64\%) of 55 patients with TSC. Foci were well circumscribed and focal and located in the interventricular septum, left ventricle wall, right ventricle wall, and papillary muscles. Size varied between 3 x 1 mm and 62 x 31 mm. Multiple lesions were seen in 19 patients. In the control group, only one (2\%) lesion with fat attenuation was found (P < .001). Its linear shape and subendocardial location in the left ventricular wall differed from the morphology of fatty foci seen in patients with TSC.Despite incomplete depiction of the heart with CT, the majority of patients with TSC demonstrated well-circumscribed foci of fat attenuation in the myocardium that were not present in age- and sex-matched control subjects. This suggests that such fatty foci may be another characteristic of TSC.}, + file = {Adri09.pdf:pdf\\Adri09.pdf:PDF}, + journal = Radiology, + optnote = {DIAG}, + pmid = {19709996}, + year = {2009}, + month = {11}, + gsid = {8664419584929603899}, + gscites = {33}, +} + +@article{Adri09a, + author = {M. E A P M Adriaensen and C. M. Schaefer-Prokop and T. Stijnen and D. A C Duyndam and B. A. Zonnenberg and M. Prokop}, + title = {Prevalence of subependymal giant cell tumors in patients with tuberous sclerosis and a review of the literature}, + journal = EJN, + year = {2009}, + volume = {16}, + pages = {691--696}, + doi = {10.1111/j.1468-1331.2009.02567.x}, + abstract = {To investigate the prevalence of subependymal giant cell ependymomas (SEGA) in patients with tuberous sclerosis complex (TSC).We performed a retrospective cross-sectional study in a cohort of 285 patients with known TSC. Institutional review board approval was obtained. We included all 214 TSC-patients who had received a contrast-enhanced computed tomography (CT) scan of the brain. The most recent scan was evaluated for SEGA and presence of hydrocephalus. Additionally, a literature search was performed, and pooled estimates of SEGA prevalence in TSC were calculated. We used descriptive statistics, two sample t-test, chi-squared-test, and meta-analysis as appropriate.Computed tomography showed radiological evidence of SEGA in 43 of the 214 TSC-patients (20\%); 23 of 105 men (22\%) and 20 of 109 women (18\%; P = .52). Average maximum tumor size was 11.4 mm (range, 4-29 mm). Patients with SEGA (mean, 31 years; range, 16-58 years) were on average younger than patients without SEGA (mean, 37 years; range, 10-72 years; P = 0.007). No association between tumor size and patient age was detected. Nine patients had bilateral SEGA. Hydrocephalus was present in six of the 43 patients (14\%). Meta-analysis of reported prevalence and our current study showed that studies using radiological evidence to diagnose SEGA gave a higher pooled estimate of the prevalence of SEGA in TSC (0.16; 95\% CI: 0.12, 0.21) than studies using mainly histopathological evidence of SEGA (0.09; 95\% CI: 0.07, 0.12).In our cohort, CT demonstrated evidence of SEGA in 20\% of TSC-patients. Prevalence of SEGA in TSC is higher in studies using radiological evidence to diagnose SEGA than in studies using histopathological evidence.}, + file = {Adri09a.pdf:pdf\\Adri09a.pdf:PDF}, + optnote = {DIAG}, + number = {6}, + pmid = {19236458}, + month = {6}, + gsid = {14651688851483947146}, + gscites = {155}, +} + +@article{Adri10, + author = {Miraude E A P M Adriaensen and Maarten J M Cramer and Madelon E E Brouha and Cornelia M Schaefer-Prokop and Mathias Prokop and Pieter A F M Doevendans and Bernard A Zonnenberg and Harm H H Feringa}, + title = {Echocardiographic screening results in patients with tuberous sclerosis complex}, + journal = TEXHIJ, + year = {2010}, + volume = {37}, + pages = {280--283}, + abstract = {We sought to examine the frequency of abnormal echocardiographic findings in patients with tuberous sclerosis complex. In a retrospective cohort study, we included all patients with known tuberous sclerosis complex who had been sent to our cardiology department for echocardiographic screening from 1995 through August 2003 (n=56). Two research scientists independently reviewed the reports of the echocardiographic screening examinations for abnormal findings. We used descriptive statistics, the Mann-Whitney U test, and the chi(2) test. The mean age of patients included in the study was 35 years (range, 12-73 yr); 23 patients were male. Abnormal findings were seen in 22 patients (39\%). The most common abnormal findings were focal areas of increased intramyocardial echogenicity, which were seen in 16 patients (29\%). The clinical consequence of this finding is still unknown. We conclude that echocardiographic abnormalities are common in patients with tuberous sclerosis complex.}, + file = {Adri10.pdf:pdf\\Adri10.pdf:PDF}, + optnote = {DIAG}, + number = {3}, + pmid = {20548802}, + gsid = {7949021548853267706}, + gscites = {7}, + ss_id = {3da4ab4d50211eb7a0148c4ed74729b2d4e30ba9}, + all_ss_ids = {['3da4ab4d50211eb7a0148c4ed74729b2d4e30ba9']}, +} + +@article{Adri11, + author = {M. E A P M Adriaensen and C. M. Schaefer-Prokop and D. A C Duyndam and B. A. Zonnenberg and M. Prokop}, + title = {Radiological evidence of lymphangioleiomyomatosis in female and male patients with tuberous sclerosis complex}, + journal = CLINR, + year = {2011}, + volume = {66}, + pages = {625--628}, + doi = {10.1016/j.crad.2011.02.009}, + abstract = {To determine the gender-specific prevalence of pulmonary cysts typical for lymphangioleiomyomatosis (LAM) in adult patients with known tuberous sclerosis complex (TSC).A retrospective, cross-sectional study in a cohort of 206 adult TSC patients was performed. Institutional review board approval was obtained, and patient informed consent was waived. Patients had routinely undergone abdominal CT scanning between 1996 and 2006. All 186 patients (mean age 38 years; range 19-72 years; 91 (49\%) male patients) in whom at least the lung bases were depicted on computed tomography (CT) were included. Images were reviewed for the presence of pulmonary thin-walled cysts. Descriptive statistics, two sample t-test to compare means, and ?(2)-test to compare proportions were applied.CT demonstrated pulmonary thin-walled cysts in the lung bases in 52 (28\%) of 186 patients. Size varied from 2mm in diameter to more than 2cm. Pulmonary cysts were detected in 40 (42\%) of 95 female patients and in 12 (13\%) of 91 male patients (p<0.001). In general, cysts were larger and more numerous in women than in men. Only minimal cystic changes were found in four women and two men, moderate cystic changes were seen in three women and seven men, but considerable cystic changes were seen almost exclusively in women (33?women versus three men).CT demonstrated thin-walled pulmonary cysts in the lung bases in 28\% of 186 included patients with tuberous sclerosis complex. Female patients were more affected than male patients.}, + file = {Adri11.pdf:pdf\\Adri11.pdf:PDF}, + optnote = {DIAG}, + number = {7}, + pmid = {21459371}, + month = {7}, + gsid = {15610164965312020379}, + gscites = {81}, + ss_id = {691c90e68eab1dcae5861fba0eab73ba626e6777}, + all_ss_ids = {['691c90e68eab1dcae5861fba0eab73ba626e6777']}, +} + +@phdthesis{Adri11a, + author = {M. E. A. P. M. Adriaensen}, + title = {Imaging in tuberous sclerosis complex}, + year = {2011}, + url = {http://igitur-archive.library.uu.nl/dissertations/2011-0314-200307/UUindex.html}, + abstract = {Since 1995, the University Medical Center Utrecht is a nationwide referral center for patients with tuberous sclerosis complex (TSC). Aim of this thesis was to make a start with the systematic evaluation of imaging in patients with TSC followed at our institution focusing on the heart, the lungs and the brain. In a case-control study, we examined the morphologic characteristics at CT of focal fatty foci noted in daily practice in the myocardium of patients known to have TSC. The fatty foci appeared to have unique CT characteristics consisting of a combination of focality, well-circumscribed form, location into the mid myocardium, pure fat density, absence of enhancement, and absence of invasive behavior. Because these characteristic well-circumscribed foci of fat attenuation were found in the myocardium at CT in the majority of patients with TSC (35 of 55 patients) and not in the control group, such fatty foci may help identify patients suspected of having the disease. Histopathology in patients with TSC showed multiple areas of mature fat cells in the myocardium without associated inflammation, without associated fibrosis, without entrapped myocardial cells, and without a capsule which seem to be unique for tuberous sclerosis complex. Adding these fatty foci in the myocardium seen on CT, MRI, echocardiography and histopathology to the list of major features of the current clinical diagnostic criteria for TSC may be considered in the future. In a cross-sectional study we reviewed the most recent CT scan of the lung bases for radiologic evidence of lymphangioleiomyomatosis (LAM) in a cohort of 186 adult patients with known TSC. Our study demonstrated pulmonary thin-walled cysts with intervening normal lung parenchyma in the lung bases in 28% of adult TSC-patients (52 of 186 patients). Gender specific prevalence was 0.42 in female and 0.13 in male TSC-patients. In another cross-sectional study we reviewed the most recent contrast-enhanced CT scan of the brain for radiologic signs of subependymal giant cell tumors (SGCT). Our study demonstrated radiologic evidence of SGCT in 20% of TSC-patients (43 of 214 patients). Gender specific prevalence was 0.18 in female and 0.22 in male TSC-patients. In addition, a meta-analysis of the reported prevalence in the English language literature, and our own study was performed. The resulting pooled estimate of the prevalence of SGCT in TSC-patients was 0.11 (95% CI:0.09, 0.14). The prevalence of SGCT in TSC was higher in studies using radiologic evidence to diagnose SGCT (0.16; 95% CI: 0.12, 0.21) than in studies using mainly histopathologic evidence (0.09; 95% CI:0.07, 0.12).}, + copromotor = {C. M. Schaefer-Prokop and B. A. Zonnenberg}, + file = {Adri11a.pdf:pdf\\Adri11a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {W. M. Prokop}, + school = {Utrecht University}, + journal = {PhD thesis}, +} + +@article{Adri11b, + author = {Miraude E A P M Adriaensen and Matthijs F M van Oosterhout and Harm H H Feringa and Cornelia M Schaefer-Prokop and Bernard A Zonnenberg and Mathias Prokop}, + title = {Mature fat cells in the myocardium of patients with tuberous sclerosis complex}, + journal = JCP, + year = {2011}, + volume = {64}, + pages = {244--245}, + doi = {10.1136/jcp.2010.087676}, + abstract = {Routine abdominal CT scans in patients with tuberous sclerosis complex (TSC) showed characteristic fatty foci in the depicted caudal portions of the myocardium. The purpose of this study was to investigate if areas of abnormal myocardium in patients with TSC could also be found in post-mortem specimens.A retrospective search of our histopathology database was performed to identify specimens of the heart of patients with TSC. Institutional review board approval was obtained, and patient informed consent was waived. Four specimens were included (mean age, 44 years; range 32-68 years; 2 females). Two specimens (50\%) of the heart showed areas of mature fat cells in the myocardium, without associated inflammation, without associated fibrosis, without entrapped myocardial cells and without a capsule. Post-mortem specimens of the heart of patients with TSC showed areas of mature fat cells in the myocardium which seem to be unique for TSC.}, + file = {Adri11b.pdf:pdf\\Adri11b.pdf:PDF}, + optnote = {DIAG}, + number = {3}, + pmid = {21217088}, + month = {1}, + gsid = {4087773643446117872}, + gscites = {7}, + ss_id = {c738da78dc1fb336a8dff5a8047059d52a6de939}, + all_ss_ids = {['c738da78dc1fb336a8dff5a8047059d52a6de939']}, +} + +@article{Adri11c, + author = {Miraude E A P M Adriaensen and Harm H H Feringa and Cornelia M Schaefer-Prokop and Sandra A P Cornelissen and Bernard A Zonnenberg and Mathias Prokop}, + title = {Focal fatty areas in the myocardium of patients with tuberous sclerosis complex: a unique finding}, + journal = JTI, + year = {2011}, + volume = {26}, + pages = {W12--W13}, + doi = {10.1097/RTI.0b013e3181eebc65}, + abstract = {With this collection of computed tomography and magnetic resonance images, we illustrate a recently described novel finding in the myocardium of patients with tuberous sclerosis complex.}, + file = {Adri11c.pdf:pdf\\Adri11c.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {20921911}, + month = {2}, + gsid = {11770667656434269892}, + gscites = {6}, + ss_id = {9fabf1bc1c126702a051ec56474af2d1518ad0ba}, + all_ss_ids = {['9fabf1bc1c126702a051ec56474af2d1518ad0ba']}, +} + +@inproceedings{Albe11, + author = {Alberti, Marina and Gatta, Carlo and Balocco, Simone and Ciompi, Francesco and Pujol, Oriol and Silva, Joana and Carrillo, Xavier and Radeva, Petia}, + title = {Automatic branching detection in IVUS sequences}, + booktitle = PRIA, + year = {2011}, + publisher = {Springer}, + pages = {126--133}, + url = {http://link.springer.com/chapter/10.1007/978-3-642-21257-4_16}, + abstract = {Atherosclerosis is a vascular pathology affecting the arterial walls, generally located in specific vessel sites, such as bifurcations. In this paper, for the first time, a fully automatic approach for the detection of bifurcations in IVUS pullback sequences is presented. The method identifies the frames and the angular sectors in which a bifurcation is visible. This goal is achieved by applying a classifier to a set of textural features extracted from each image of an IVUS pullback. A comparison between two state-of-the-art classifiers is performed, AdaBoost and Random Forest. A cross-validation scheme is applied in order to evaluate the performances of the approaches. The obtained results are encouraging, showing a sensitivity of 75% and an accuracy of 94% by using the AdaBoost algorithm.}, + file = {Albe11.pdf:pdf\\Albe11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {5931371087353506280}, + gscites = {4}, +} + +@article{Albe12, + author = {Alberti, Marina and Balocco, Simone and Gatta, Carlo and Ciompi, Francesco and Pujol, Oriol and Silva, Joana and Carrillo, Xavier and Radeva, Petia}, + title = {Automatic bifurcation detection in coronary IVUS sequences}, + journal = TBME, + year = {2012}, + volume = {59}, + pages = {1022--1031}, + doi = {10.1109/TBME.2011.2181372}, + abstract = {In this paper, we present a fully automatic method which identifies every bifurcation in an intravascular ultrasound (IVUS) sequence, the corresponding frames, the angular orientation with respect to the IVUS acquisition, and the extension. This goal is reached using a two-level classification scheme: first, a classifier is applied to a set of textural features extracted from each image of a sequence. A comparison among three state-of-the-art discriminative classifiers (AdaBoost, random forest, and support vector machine) is performed to identify the most suitable method for the branching detection task. Second, the results are improved by exploiting contextual information using a multiscale stacked sequential learning scheme. The results are then successively refined using a-priori information about branching dimensions and geometry. The proposed approach provides a robust tool for the quick review of pullback sequences, facilitating the evaluation of the lesion at bifurcation sites. The proposed method reaches an F-Measure score of 86.35\%, while the F-Measure scores for inter- and intraobserver variability are 71.63\% and 76.18\%, respectively. The obtained results are positive. Especially, considering the branching detection task is very challenging, due to high variability in bifurcation dimensions and appearance.}, + file = {Albe12.pdf:pdf\\Albe12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + pmid = {22231146}, + month = {4}, + gsid = {4944327237273850643}, + gscites = {42}, +} + +@mastersthesis{Alee18, + author = {Tajwar Abrar Ableef}, + title = {Malignancy estimation of Pulmonary Nodules using Multi-View Multi-Time Point Convolutional Neural Networks}, + year = {2018}, + abstract = {Lung Cancer is one of the leading causes of cancer-related deaths for both men and women in the United States. The aim of lung cancer screening is to detect lung cancer at an early stage. Majority of the time, after the lung nodule detection phase, only a small portion out of all the nodules that get detected turns out to be cancerous. Compared to traditional techniques that use handcrafted features and furthermore relies on tedious \& time-consuming prior lung nodule segmentation, the proposed method uses deep learning techniques in an end-to-end arrangement that performs both the feature extraction and classification directly from raw nodule patches. In this study, we focus on improving the pulmonary nodule malignancy estimation part by introducing a novel multi-view multi-timepoint convolutional neural network (MVMT-CNN) architecture that uses low dose CT images as its input. The dataset used in this study was taken from the National Lung Cancer Screening Trial (NLST)- which is the largest lung cancer screening trial known to date. We investigate the influence of whether adding temporal information of the same patient can help to improve the diagnosis. The proposed convolutional neural network architecture requires nine 2D patches- each of which represents a certain plane from the extracted 3D nodule patches. The nine planes are analyzed separately in parallel CNN streams and the output features coming from the nine different pathways are fused into one layer before passing it to the classification stage. Additionally, batch normalization and drop out layers are also incorporated in order to decrease the training time and reduce the chances of over-fitting. The average Area Under the ROC curve obtained after 5 fold cross validation along with bootstrapping were used to compare \& select the final best performing architecture. The robustness of the final selected model was examined and verified by swapping the time points to see if the network did actually learn to identify the growth of the nodule between timepoints. The proposed method confirms that using the proposed multi-view multi-timepoint CNN architecture improves the prediction ability of pulmonary nodules significantly.}, + file = {Alee18.pdf:pdf\\Alee18.pdf:PDF}, + optnote = {DIAG}, + school = {University of Girona (MAIA master program)}, + journal = {Master thesis}, +} + +@article{Alex21, + author = {Alexander, Barbara D and Lamoth, Fr\'{e}d\'{e}ric and Heussel, Claus Peter and Prokop, Cornelia Schaefer and Desai, Sujal R and Morrissey, C Orla and Baddley, John W}, + title = {Guidance on Imaging for Invasive Pulmonary Aspergillosis and Mucormycosis: From the Imaging Working Group for the Revision and Update of the Consensus Definitions of Fungal Disease from the EORTC/MSGERC}, + doi = {10.1093/cid/ciaa1855}, + year = {2021}, + abstract = {Abstract + + Background + Clinical imaging in suspected invasive fungal disease (IFD) has a significant role in early detection of disease and helps direct further testing and treatment. Revised definitions of IFD from the EORTC/MSGERC were recently published and provide clarity on the role of imaging for the definition of IFD. Here, we provide evidence to support these revised diagnostic guidelines. + + + Methods + We reviewed data on imaging modalities and techniques used to characterize IFDs. + + + Results + Volumetric high-resolution computed tomography (CT) is the method of choice for lung imaging. Although no CT radiologic pattern is pathognomonic of IFD, the halo sign, in the appropriate clinical setting, is highly suggestive of invasive pulmonary aspergillosis (IPA) and associated with specific stages of the disease. The ACS is not specific for IFD and occurs in the later stages of infection. By contrast, the reversed halo sign and the hypodense sign are typical of pulmonary mucormycosis but occur less frequently. In noncancer populations, both invasive pulmonary aspergillosis and mucormycosis are associated with "atypical" nonnodular presentations, including consolidation and ground-glass opacities. + + + Conclusions + A uniform definition of IFD could improve the quality of clinical studies and aid in differentiating IFD from other pathology in clinical practice. Radiologic assessment of the lung is an important component of the diagnostic work-up and management of IFD. Periodic review of imaging studies that characterize findings in patients with IFD will inform future diagnostic guidelines. + }, + url = {http://dx.doi.org/10.1093/cid/ciaa1855}, + file = {Alex21.pdf:pdf\Alex21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Clinical Infectious Diseases}, + citation-count = {31}, + automatic = {yes}, + pages = {S79-S88}, + volume = {72}, +} + +@inproceedings{Altu20, + author = {Altun, Hidir Cem and Chlebus, Grzegorz and Jacobs, Colin and Meine, Hans and van Ginneken, Bram and Hahn, Horst K.}, + booktitle = MI, + title = {Feasibility of End-To-End Trainable Two-Stage U-Net for Detection of Axillary Lymph Nodes in Contrast-Enhanced CT Based Scans on Sparse Annotations}, + doi = {10.1117/12.2551331}, + pages = {113141C}, + series = SPIE, + abstract = {Manual reading of enlarged lymph nodes is time-consuming, error-prone and suffers from inter-observer variability. We propose a mostly generic computer-aided detection system, which can be trained in an end-to-end fashion from sparse annotations, to automatically detect axillary lymph nodes. Our pipeline is a two-stage approach, where both stages are performed using the same U-net architecture: volume of interest localization (axillary region) and then axillary lymph node detection within the VOI. Our dataset comprised 492 lymph nodes (median diameter 7 mm) from 76 patients, and our system achieved an 83% accuracy at 6.7 FP per scan on the test data.}, + file = {Altu20.pdf:pdf\\Altu20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + year = {2020}, + month = {3}, + ss_id = {90a870cbb9897124193ba18c5358fe45b6260621}, + all_ss_ids = {['90a870cbb9897124193ba18c5358fe45b6260621']}, + gscites = {3}, +} + +@article{Alve23, + author = {Alves, Nat\'{a}lia and Bosma, Joeran S. and Venkadesh, Kiran V. and Jacobs, Colin and Saghir, Zaigham and de Rooij, Maarten and Hermans, John and Huisman, Henkjan}, + title = {Prediction Variability to Identify Reduced AI Performance in Cancer Diagnosis at MRI and CT}, + doi = {10.1148/radiol.230275}, + year = {2023}, + abstract = {Background:A priori identification of patients at risk of artificial intelligence (AI) failure in diagnosing cancer would contribute to the safer clinical integration of diagnostic algorithms. + Purpose:To evaluate AI prediction variability as an uncertainty quantification (UQ) metric for identifying cases at risk of AI failure in diagnosing cancer at MRI and CT across different cancer types, data sets, and algorithms. + Materials and Methods:Multicenter data sets and publicly available AI algorithms from three previous studies that evaluated detec-tion of pancreatic cancer on contrast-enhanced CT images, detection of prostate cancer on MRI scans, and prediction of pulmo-nary nodule malignancy on low-dose CT images were analyzed retrospectively. Each task's algorithm was extended to generate an uncertainty score based on ensemble prediction variability. AI accuracy percentage and partial area under the receiver operating characteristic curve (pAUC) were compared between certain and uncertain patient groups in a range of percentile thresholds (10%-90%) for the uncertainty score using permutation tests for statistical significance. The pulmonary nodule malignancy prediction algorithm was compared with 11 clinical readers for the certain group (CG) and uncertain group (UG). + Results:In total, 18 022 images were used for training and 838 images were used for testing. AI diagnostic accuracy was higher for the cases in the CG across all tasks (P < .001). At an 80% threshold of certain predictions, accuracy in the CG was 21%-29% higher than in the UG and 4%-6% higher than in the overall test data sets. The lesion-level pAUC in the CG was 0.25-0.39 higher than in the UG and 0.05-0.08 higher than in the overall test data sets (P < .001). For pulmonary nodule malignancy prediction, accuracy of AI was on par with clinicians for cases in the CG (AI results vs clinician results, 80% [95% CI: 76, 85] vs 78% [95% CI: 70, 87]; P = .07) but worse for cases in the UG (AI results vs clinician results, 50% [95% CI: 37, 64] vs 68% [95% CI: 60, 76]; P < .001). + Conclusion:An AI-prediction UQ metric consistently identified reduced performance of AI in cancer diagnosis.}, + url = {http://dx.doi.org/10.1148/radiol.230275}, + file = {Alve23.pdf:pdf\Alve23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + citation-count = {1}, + automatic = {yes}, + volume = {308}, + all_ss_ids = {740d5d34fcd714870ddf0073fd8956db023319f0}, + gscites = {1}, +} + +@conference{Alves21a, + author = {Alves, Nat\'{a}lia and Hermans, John and Huisman, Henkjan}, + booktitle = RSNA, + title = {CT-based Deep Learning Towards Early Detection Of Pancreatic Ductal Adenocarcinoma}, + abstract = {Purpose: To investigate the performance of a 3D nnUnet based algorithm for pancreatic ductal adenocarcinoma (PDAC)detection and assess the potential of the model for early diagnosis by conducting a subgroup analysis on small (size <2cm) tumors. Methods and Materials: Portal-venous phase contrast-enhanced computed tomography (CE-CT) scans from a cohort of119 patients with pathology-proven PDAC and 122 consecutive patients with normal pancreas were included in thisretrospective study. For the PDAC cohort, expert segmentations of the pancreas and tumor volumes were available, alongwith the tumor sizes measured on the CT scan. For the non-PDAC cohort, the pancreas segmentations were obtained usinga pre-trained deep learning segmentation model. The pancreas segmentation determined a region of interest from the fullCE-CT as input to the 3D nnUnet. + The network was trained for 1000 epochs with 5-fold cross-validation to differentiatebetween tumor and normal voxels. The predicted heatmaps were thresholded at 0.1. An image was considered a positivecase of PDAC if the predicted tumor volume was greater than 100 mm3. Results: The median tumor size on the PDAC cohort was 2.8 cm (range 1.2 cm - 9.3 cm). The detection task achieved anaverage sensitivity of 0.93 +- 0.04 (111/119), specificity of 0.98 +- 0.02 (119/122) and area under the receiver operatingcharacteristic curve of 0.96 +- 0.04. The median DICE score between the expert and the network tumor segmentations was0.68 +- 0.18. In 2 of the 3 false positive cases the network wrongly detected a hypodense region of the normal pancreas,which could be originated by fat accumulation or natural perfusion differences. The mean sensitivity in the sub-group oftumors with size smaller than 2 cm was 0.92 +- 0.1 (21/23), and the median DICE score in this sub-group was 0.56 +- 0.20. Conclusions: These preliminary results indicate that a 3D nnUnet based algorithm can accurately detect small tumors,suggesting that it could be useful at assisting in early PDAC diagnosis. Clinical Relevance/Application: Early diagnosis improves pancreatic cancer prognosis but requires significant expertise.An automatic tool for the detection of early-stage tumors would reduce expertise requirements.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, +} + +@article{Alves22a, + author = {Alves, Nat\'{a}lia and Schuurmans, Megan and Litjens, Geke and Bosma, Joeran S. and Hermans, John and Huisman, Henkjan}, + title = {Fully Automatic Deep Learning Framework for Pancreatic Ductal Adenocarcinoma Detection on Computed Tomography}, + doi = {https://doi.org/10.3390/cancers14020376}, + pages = {376}, + url = {https://www.mdpi.com/2072-6694/14/2/376/htm}, + abstract = {Early detection improves prognosis in pancreatic ductal adenocarcinoma (PDAC) but is challenging as lesions are often small and poorly defined on contrast-enhanced computed tomography scans (CE-CT). Deep learning can facilitate PDAC diagnosis, however current models still fail to identify small (<2cm) lesions. In this study, state-of-the-art deep learning models were used to develop an automatic framework for PDAC detection, focusing on small lesions. Additionally, the impact of integrating surrounding anatomy was investigated. CE-CT scans from a cohort of 119 pathology-proven PDAC patients and a cohort of 123 patients without PDAC were used to train a nnUnet for automatic lesion detection and segmentation (nnUnet_T). Two additional nnUnets were trained to investigate the impact of anatomy integration: (1) segmenting the pancreas and tumor (nnUnet_TP), (2) segmenting the pancreas, tumor, and multiple surrounding anatomical structures (nnUnet_MS). An external, publicly available test set was used to compare the performance of the three networks. The nnUnet_MS achieved the best performance, with an area under the receiver operating characteristic curve of 0.91 for the whole test set and 0.88 for tumors <2cm, showing that state-of-the-art deep learning can detect small PDAC and benefits from anatomy information.}, + journal = {Cancers}, + optnote = {DIAG, RADIOLOGY}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/247178}, + year = {2022}, + ss_id = {3a84e2a533df9a1351f7442735c961bc80b5892e}, + all_ss_ids = {['3a84e2a533df9a1351f7442735c961bc80b5892e']}, + gscites = {24}, +} + +@conference{Alves22b, + author = {Alves, Nat\'{a}lia and Bosma, Joeran S. and Huisman, Henkjan}, + booktitle = RSNA, + title = {Towards Safe Clinical Use of Artificial Intelligence for Cancer Detection Through Uncertainty Quantification}, + abstract = {Purpose: Investigate whether quantifying deep learning (DL) models' uncertainty can help identify low performance cases that require expert attention. Materials and Methods: This retrospective study included two use cases: pancreatic cancer detection on contrast-enhanced computed tomography and clinically significant prostate cancer (csPCa) detection on biparametric magnetic resonance imaging. The pancreatic cohort consisted of 242 (119 cancer) in-house cases for training and 361 cases (80 healthy, 281 cancer) from two external, public datasets for testing. The csPCa cohort consisted of 7756 (3022 csPCa) in-house examinations for training and 300 cases (88 csPCa) from an external center for testing. All tumor cases in the independent test sets were histopathology confirmed. The uncertainty of the proposed automatic cancer detection algorithms was computed using model ensembling. Fifteen DL models were trained with the nnUNet framework and integrated into previously established pipelines for each use case. The models were applied independently to the test sets and uncertainty was quantified in a case level as the standard deviation (sd) of the ensemble. Cases with sd lower than 10% were classified as having low prediction uncertainty, while the remaining were classified as having high prediction uncertainty. The mean and 95% confidence intervals (CI) of the area under the receiver operating characteristic curves (AUC) for the high and low uncertainty groups were calculated. The permutation test was used to assess statistical significance. Results: The DL frameworks' performances for the uncertain groups were significantly lower than for the certain groups for both use cases. For pancreatic cancer detection, the mean AUC dropped from 98.0% (95%CI: 96.2%-99.8%) for the low uncertainty group to 78.0% (95%CI: 68.2%-87.8%) for the high uncertainty group (p<10-4). For csPCa, the mean AUC dropped from 92.4% (95%CI: 90.4%-94.4%) for the low uncertainty group to 65.7% (95%CI: 54.7%-76.7%) for the high uncertainty group (p<10-4). The low uncertainty groups included 41% of the pancreatic and 78% of the csPCa test sets. Conclusions: The proposed ensembling method can be used to identify cases where AI models' predictions are uncertain and show low performance. Clinical Relevance Statement: To be safely integrated in the clinic, AI can predict uncertainty and identify uncertain cases with lower performance that should be handled with extra care}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@inproceedings{Alves22c, + author = {Alves, Nat{\'a}lia and de Wilde, Bram}, + booktitle = {Fast and Low-Resource Semi-supervised Abdominal Organ Segmentation}, + title = {Uncertainty-Guided Self-learning Framework for Semi-supervised Multi-organ Segmentation}, + doi = {https://doi.org/10.1007/978-3-031-23911-3_11}, + pages = {116--127}, + publisher = {Springer Nature Switzerland}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@article{Amga20, + author = {Mohamed Amgad and and Elisabeth Specht Stovgaard and Eva Balslev and Jeppe Thagaard and Weijie Chen and Sarah Dudgeon and Ashish Sharma and Jennifer K. Kerner and Carsten Denkert and Yinyin Yuan and Khalid AbdulJabbar and Stephan Wienert and Peter Savas and Leonie Voorwerk and Andrew H. Beck and Anant Madabhushi and Johan Hartman and Manu M. Sebastian and Hugo M. Horlings and Jan Hude{\v{c}}ek and Francesco Ciompi and David A. Moore and Rajendra Singh and Elvire Roblin and Marcelo Luiz Balancin and Marie-Christine Mathieu and Jochen K. Lennerz and Pawan Kirtani and I-Chun Chen and Jeremy P. Braybrooke and Giancarlo Pruneri and Sandra Demaria and Sylvia Adams and Stuart J. Schnitt and Sunil R. Lakhani and Federico Rojo and Laura Comerma and Sunil S. Badve and Mehrnoush Khojasteh and W. Fraser Symmans and Christos Sotiriou and Paula Gonzalez-Ericsson and Katherine L. Pogue-Geile and Rim S. Kim and David L. Rimm and Giuseppe Viale and Stephen M. Hewitt and John M. S. Bartlett and Fr{\'{e}}d{\'{e}}rique Penault-Llorca and Shom Goel and Huang-Chun Lien and Sibylle Loibl and Zuzana Kos and Sherene Loi and Matthew G. Hanna and Stefan Michiels and Marleen Kok and Torsten O. Nielsen and Alexander J. Lazar and Zsuzsanna Bago-Horvath and Loes F. S. Kooreman and Jeroen A. W. M. van der Laak and Joel Saltz and Brandon D. Gallas and Uday Kurkure and Michael Barnes and Roberto Salgado and Lee A. D. Cooper}, + title = {Report on computational assessment of Tumor Infiltrating Lymphocytes from the International Immuno-Oncology Biomarker Working Group}, + journal = {npj Breast Cancer}, + year = {2020}, + volume = {6}, + number = {1}, + month = {5}, + doi = {10.1038/s41523-020-0154-2}, + pmid = {32411818}, + file = {:pdf/Amga20.pdf:PDF}, + optnote = {DIAG}, + gsid = {2197993663136428528}, + gscites = {85}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/220833}, + all_ss_ids = {['69999230b02054b82254684a73bb8a4c83878d28', 'b4c4c3dc91d42114023b0575c3e2273b87446ff7']}, +} + +@conference{Amin11, + author = {S. Amin and J.G. Goldin and M.R. Zeidler and E. Kleerup and P. Lu and M. Galperin-Aizenberg and E. M. van Rikxoort and D. Gjertson and D. Ross}, + title = {Air trapping on {HRCT} assessed by quantitative image analysis as an early predictor of bronchiolitis obliterans syndrome in lung transplant recipients}, + booktitle = ATS, + year = {2011}, + abstract = {{RATIONALE:} {C}urrent diagnosis of bronchiolitis obliterans syndrome ({BOS}) in lung transplant recipients by spirometry identifies the disease process after the point at which treatment appears to be effective. Earlier identification of BOS may allow improved outcomes. We aim to identify early predictors of {BOS} by quantitatively assessing air trapping in computed tomography scans ({HRCT}) of transplanted lungs. Previous studies of air trapping by visual analysis of {HRCT} were poor at early identification of {BOS}. {METHODS:} {T}he clinical {HRCT} images of eight lung transplant recipients who met spirometric criteria for {BOS} by {FEV1}, and who had the absence of infection and rejection confirmed by bronchoalveolar lavage and transbronchial biopsy within one month of spirometry, were reviewed. For each patient, the {HRCT} within one month of the spirometric diagnosis of {BOS} (visit 2) was evaluated. A previous {HRCT} (visit 1) at least two months prior to visit 2 and six months after transplantation was compared. Semi-automatic lung and lobar segmentation followed by quantitative image analysis were performed on the images of the transplanted lung at residual volume ({RV}) and total lung capacity ({TLC}). For each scan, previously published quantitative measures of air trapping were assessed: lobar volume at {RV} ({RVCT}), whole lung {RV/TLC} ratio ({RV/TLCCT}), density masks between -950 and -860 {HU} at {RV} ({DM}-950-860), median {HU} at {RV}, and 10th percentile {HU} at {RV} were computed. The changes in these measurements between visits 1 and 2 were compared using a t-test. {RESULTS:} {V}isit 1 scans were an average of 144 (range 42-243) days prior to visit 2 scans. The mean {RVCT} increased 52.53 cc (p=.48) and the mean change in {RV/TLCCT} increased 3.35% (p=0.47) at visit 2 when compared to visit 1. The {DM}-950-860 of the expiratory images increased by 0.48 % (p=0.78) from visit 1 to visit 2. Between the two visits, the average median {HU} at {RV} and 10th percentile of {HU} at {RV} decreased by 0.24 % (p=0.95) and 0.32 % (p=0.85), respectively. {CONCLUSION:} {I}n our small sample of lung transplant patients, we found that quantitative measures of air trapping seen at the time of spirometric {BOS} diagnosis were also seen approximately five months earlier. These measures may serve as an earlier marker of chronic lung transplant rejection; however, additional analysis of {CT} images over an extended period of time is necessary.}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, +} + +@article{Anto21, + author = {Michela Antonelli and Annika Reinke and Spyridon Bakas and Keyvan Farahani and AnnetteKopp-Schneider and Bennett A. Landman and Geert Litjens and Bjoern Menze and Olaf Ronneberger and Ronald M. Summers and Bram van Ginneken and Michel Bilello and Patrick Bilic and Patrick F. Christ and Richard K. G. Do and Marc J. Gollub and Stephan H. Heckers and Henkjan Huisman and William R. Jarnagin and Maureen K. McHugo and Sandy Napel and Jennifer S. Goli Pernicka and Kawal Rhode and Catalina Tobon-Gomez and Eugene Vorontsov and Henkjan Huisman and James A. Meakin and Sebastien Ourselin and Manuel Wiesenfarth and Pablo Arbelaez and Byeonguk Bae and Sihong Chen and Laura Daza and Jianjiang Feng and Baochun He and Fabian Isensee and Yuanfeng Ji and Fucang Jia and Namkug Kim and Ildoo Kim and Dorit Merhof and Akshay Pai and Beomhee Park and Mathias Perslev and Ramin Rezaiifar and Oliver Rippel and Ignacio Sarasua and Wei Shen and Jaemin Son and Christian Wachinger and Liansheng Wang and Yan Wang and Yingda Xia and Daguang Xu and Zhanwei Xu and Yefeng Zheng and Amber L. Simpson and Lena Maier-Hein and M. Jorge Cardoso}, + journal = {arXiv preprint arXiv:2106.05735}, + url = {https://arxiv.org/abs/2106.05735}, + title = {The Medical Segmentation Decathlon}, + abstract = {International challenges have become the de facto standard for comparative assessment of image analysis algorithms given a specific task. Segmentation is so far the most widely investigated medical image processing task, but the various segmentation challenges have typically been organized in isolation, such that algorithm development was driven by the need to tackle a single specific clinical problem. We hypothesized that a method capable of performing well on multiple tasks will generalize well to a previously unseen task and potentially outperform a custom-designed solution. To investigate the hypothesis, we organized the Medical Segmentation Decathlon (MSD) - a biomedical image analysis challenge, in which algorithms compete in a multitude of both tasks and modalities. The underlying data set was designed to explore the axis of difficulties typically encountered when dealing with medical images, such as small data sets, unbalanced labels, multi-site data and small objects. The MSD challenge confirmed that algorithms with a consistent good performance on a set of tasks preserved their good average performance on a different set of previously unseen tasks. Moreover, by monitoring the MSD winner for two years, we found that this algorithm continued generalizing well to a wide range of other clinical problems, further confirming our hypothesis. Three main conclusions can be drawn from this study: (1) state-of-the-art image segmentation algorithms are mature, accurate, and generalize well when retrained on unseen tasks; (2) consistent algorithmic performance across multiple tasks is a strong surrogate of algorithmic generalizability; (3) the training of accurate AI segmentation models is now commoditized to non AI experts.}, + file = {:21b - The Medical Segmentation Decathlon.pdf:PDF}, + month = jun, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + year = {2021}, + all_ss_ids = {['979a9f247700d00ff2c3f0612d5eb001379f93c8', 'c397c6f1480ac8e3ed875adad96e9b3e00c37f26']}, + gscites = {381}, +} + +@article{Anto22, + author = {Antonelli, Michela and Reinke, Annika and Bakas, Spyridon and Farahani, Keyvan and Kopp-Schneider, Annette and Landman, Bennett A. and Litjens, Geert and Menze, Bjoern and Ronneberger, Olaf and Summers, Ronald M. and van Ginneken, Bram and Bilello, Michel and Bilic, Patrick and Christ, Patrick F. and Do, Richard K. G. and Gollub, Marc J. and Heckers, Stephan H. and Huisman, Henkjan and Jarnagin, William R. and McHugo, Maureen K. and Napel, Sandy and Pernicka, Jennifer S. Golia and Rhode, Kawal and Tobon-Gomez, Catalina and Vorontsov, Eugene and Meakin, James A. and Ourselin, Sebastien and Wiesenfarth, Manuel and Arbelaez, Pablo and Bae, Byeonguk and Chen, Sihong and Daza, Laura and Feng, Jianjiang and He, Baochun and Isensee, Fabian and Ji, Yuanfeng and Jia, Fucang and Kim, Ildoo and Maier-Hein, Klaus and Merhof, Dorit and Pai, Akshay and Park, Beomhee and Perslev, Mathias and Rezaiifar, Ramin and Rippel, Oliver and Sarasua, Ignacio and Shen, Wei and Son, Jaemin and Wachinger, Christian and Wang, Liansheng and Wang, Yan and Xia, Yingda and Xu, Daguang and Xu, Zhanwei and Zheng, Yefeng and Simpson, Amber L. and Maier-Hein, Lena and Cardoso, M. Jorge}, + title = {The {Medical} {Segmentation} {Decathlon}}, + doi = {10.1038/s41467-022-30695-9}, + number = {1}, + pages = {4128}, + volume = {13}, + abstract = {International challenges have become the de facto standard for comparative assessment of image analysis algorithms. Although segmentation is the most widely investigated medical image processing task, the various challenges have been organized to focus only on specific clinical tasks. We organized the Medical Segmentation Decathlon (MSD)-a biomedical image analysis challenge, in which algorithms compete in a multitude of both tasks and modalities to investigate the hypothesis that a method capable of performing well on multiple tasks will generalize well to a previously unseen task and potentially outperform a custom-designed solution. MSD results confirmed this hypothesis, moreover, MSD winner continued generalizing well to a wide range of other clinical problems for the next two years. Three main conclusions can be drawn from this study: (1) state-of-the-art image segmentation algorithms generalize well when retrained on unseen tasks; (2) consistent algorithmic performance across multiple tasks is a strong surrogate of algorithmic generalizability; (3) the training of accurate AI segmentation models is now commoditized to scientists that are not versed in AI model training.}, + file = {:pdf/Anto22.pdf:PDF}, + journal = NATCOM, + pmid = {35840566}, + year = {2022}, + all_ss_ids = {['979a9f247700d00ff2c3f0612d5eb001379f93c8', 'c397c6f1480ac8e3ed875adad96e9b3e00c37f26']}, + gscites = {381}, +} + +@conference{Anto23, + author = {Antonissen, N and Venkadesh, K and Gietema, H and Vliegenthart, R and Saghir, Z and Scholten, E. Th and Prokop, M and Schaefer-Prokop, C and Jacobs, C }, + booktitle = ECR, + title = {Retrospective validation of nodule management based on deep learning-based malignancy thresholds in lung cancer screening}, + abstract = {Purpose: We previously developed and validated a deep learning (DL) algorithm for malignancy risk estimation of screen-detected nodules. The nodule risk cut-off for a positive screen, triggering more intensive follow-up (either short-term follow-up, PET-CT or biopsy), varies in existing nodule management protocols; 1-2% for Lung-RADS (cat 3), 6% for PanCan2b (CAT3). In this study, we investigated two DL-based malignancy thresholds to define a positive screen, compared to existing nodule management protocols. + Methods and materials: All baseline CT-scans from the Danish Lung Cancer Screening Trial were linked to lung cancer diagnosis within 2 years, resulting in 2,019 non-cancer and 18 cancer cases. The DL-based malignancy risk was computed for all screen-detected nodules using two malignancy risk cut-off points (6% and 10%), as threshold for a positive screen. For both Lung-RADS and PanCan2b, we used the published nodule-risk cut-offs for a positive screen. Sensitivity and False Positive Rate (FPR) were calculated for all baseline scans (n=2,037) using the risk dominant nodule per scan. + Results: At a threshold of 6%, DL achieved the highest sensitivity with 88.9% compared to 83.3% of Lung-RADS and 77.8% with PanCan2b. DL and PanCan2b yielded comparable FPR of 3.6% and 4.1%, respectively, while Lung-RADS had a higher FPR of 8.7%. Increasing the DL threshold to >=10% resulted in a sensitivity of 88.9%, and a FPR of 2.5%. + Conclusion: DL-based nodule risk cut-offs achieved the highest sensitivity and lowest FPR for defining a positive screen, triggering more intense diagnostic work-up. Increasing the risk cut-off from 6% to 10% further decreased the FPR without alteration of sensitivity. + Limitations: This study is a retrospective analysis on data from one screening trial and one screening round. More external validation is needed, including validation for incidence screenings.}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, +} + +@article{Apru19, + author = {Aprupe, Lilija and Litjens, Geert and Brinker, Titus J and van der Laak, Jeroen and Grabe, Niels}, + title = {Robust and accurate quantification of biomarkers of immune cells in lung cancer micro-environment using deep convolutional neural networks}, + journal = PRJ, + year = {2019}, + volume = {7}, + pages = {e6335}, + doi = {10.7717/peerj.6335}, + abstract = {Recent years have seen a growing awareness of the role the immune system plays in successful cancer treatment, especially in novel therapies like immunotherapy. The characterization of the immunological composition of tumors and their micro-environment is thus becoming a necessity. In this paper we introduce a deep learning-based immune cell detection and quantification method, which is based on supervised learning, i.e., the input data for training comprises labeled images. Our approach objectively deals with staining variation and staining artifacts in immunohistochemically stained lung cancer tissue and is as precise as humans. This is evidenced by the low cell count difference to humans of 0.033 cells on average. This method, which is based on convolutional neural networks, has the potential to provide a new quantitative basis for research on immunotherapy.}, + file = {Apru19.pdf:pdf\\Apru19.pdf:PDF}, + optnote = {DIAG}, + pmid = {30993030}, + month = {4}, + gsid = {10979800637142162997}, + gscites = {18}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/202849}, + ss_id = {0c44949334771d4e331d792bc0db77c7fe708ca2}, + all_ss_ids = {['0c44949334771d4e331d792bc0db77c7fe708ca2']}, +} + +@mastersthesis{Arch22, + author = {Anwai Archit and Bram van Ginneken}, + title = {Automated Abdominal Aortic Aneurysm Detection on CT Scans}, + abstract = {Computed tomography (CT) scans enable the detection of local enlargements in the abdominal aorta (AA), resulting to straight-forward quantitative and qualitative understandings, typically instated as abdominal aortic aneurysm (AAA). Although, the segmentation of aorta is disposed to stall in presence of expanded lumen or intraluminal thrombus as a result of insufficient spiteful examples, raising the susceptibility for uneventful outcomes of an aortic rupture. + The motion of this research proposes to develop and validate a fully automated deep learning algorithm to segment and measure AAAs on abdominal CT scans. The computer-aided detection (CAD) model is steered by a self-configuring convolutional neural network (CNN), which plumps for essential decisions in a standardised environment to design the 3D segmentation pipeline, regardless of the dataset diversity in the domain. It uses an additional 3D instance-based vertebral segmentation software bundle for independent vertebrae labelling. It coheres with a post-processing routine to perceive the growth patterns by investigation across the aortic centerline around strong anatomical landmarks. It benefits from supplementary measurement techniques of the maximal diameter and cross-section area for gaining extensive insights of the main characteristics of AAA. The system evaluates the relationship between the AA and vertebra level surface features. Conclusively, it generates a portable document, devised to group the anticipated aneurysmal information. + The 3D CAD system agrees with expert's suggestions about the existence of the aneurysm in 398 institutional images, exhibiting a high capacity to generalize across genders and portions of a full body CT scan using solely radiologist-supported quantitative speculations from the radiology reports. The end-to-end routine achieves an 95.7% dice score coefficient (DSC) on the validation subset for patient-specific cases, indicating a modest agreement with radiologists within an average difference of 0.3 cm in the relative measurement of maximal AAA diameter, thus justifying the possibility of generalizing to the detection of aneurysms using report-based textual information only.}, + file = {Arch22.pdf:pdf\\Arch22.pdf:PDF}, + journal = {Master thesis}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2022}, +} + +@conference{Ardu20, + author = {Ardu, Alessandro and Liefers, Bart and de Vente, Coen and Gonz\'{a}lez-Gonzalo, Cristina and Klaver, Caroline and S\'{a}nchez, Clara I.}, + booktitle = EURETINA, + title = {Artificial Intelligence for the Classification and Quantification of Reticular Pseudodrusen in Multimodal Retinal Images}, + url = {https://www.euretina.org/congress/amsterdam-2020/virtual-2020-freepapers/}, + abstract = {Purpose: + Reticular pseudodrusen (RPD) are retinal lesions highly correlated with the risk of developing end-stage age-related macular degeneration (AMD) and, therefore, relevant biomarkers for understanding the progression of AMD. Due to the subtle features characterizing RPD, multiple imaging modalities are often necessary to confirm the presence and extension of RPD, considerably increasing the workload of the expert graders. We propose a deep neural network (DNN) architecture that classifies and quantifies RPD using multimodal retinal images. + Setting: + A cross-sectional study that compares the performance of three expert graders with a DNN trained for identifying and quantifying RPD. Conducted on retinal images drawn from the Rotterdam Study, a population-based cohort, in three modalities: color fundus photographs (CFP), fundus autofluorescence images (FAF) and near-infrared reflectance images (NIR). + Methods: + Multimodal images of 278 eyes of 230 patients were retrieved from the Rotterdam Study database. Of those, 72 eyes showed presence of RPD, 108 had soft distinct/indistinct drusen, and 98 had no signs of drusen as confirmed by the Rotterdam Study graders. Delineations of the areas affected with RPD were made in consensus by two human experts using CFP and NIR images simultaneously and were used as reference standard (RS) for RPD area quantification. The data was randomly divided, patient-wise, in training (243) and test (35) sets for model development and evaluation. A DNN was developed for RPD classification and quantification. The proposed DNN is based on an encoder-decoder architecture. The model jointly inputs a set of co-registered retinal image modalities (CFP, NIR, FAF) and outputs a heatmap image containing, per pixel, the likelihood of RPD presence. The 99th percentile of the values contained in this heatmap measures the likelihood of RPD presence. Three independent graders manually delineated RPD in all eyes of the test set based on the CFP and NIR and their performance was compared with the DNN in the tasks of RPD classification and quantification. + Results: + The proposed DNN obtained an area under the receiver operating characteristic curve (AUROC) with 95% confidence interval (CI) of 0.939[0.818-1.0], a sensitivity (SE) of 0.928 and specificity (SP) of 0.809 for the detection of RPD in multimodal imaging. For RPD quantification, the DNN achieved a mean Dice coefficient (DSC) of 0.632+-0.261 and an intra-class correlation (ICC) of 0.676[0.294-0.999]. Comparably, for RPD classification, grader 1 obtained SE/SP pairs of 1.0/0.785, grader 2 of 1.0/0.5 and grader 3 of 1.0/0.785. For RPD quantification, the graders obtained mean DSC of 0.619+-0.196, 0.573+-0.170 and 0.697+-0.157, respectively, and an ICC of 0.721[0.340-0.999], 0.597[0.288-0.999], 0.751[0.294-0.999], respectively. Of the DNN's three false negatives, none of them was correctly classified by the three graders. The model correctly classified RPD in three of the six eyes where graders disagreed and in the only eye where none of the graders found RPD. Overall, 65.1% of the area indicated as RPD by the reference was delineated by at least one grader and only 26.5% of the total was graded as RPD by all experts. The DNN only missed 23.2% of the areas that all three graders identified correctly. + Conclusions: + The proposed DNN showed promising capacities in the tasks of classifying and quantifying RPD lesions on multimodal retinal images. The results show that the model is able to correctly classify and quantify RPD on eyes where lesions are difficult to spot. The probabilistic output of the model allows for the classification of RPD at different levels of confidence and indicates what retinal areas are most likely affected. This is in line with the manual assessment done by the graders. To this point, the model is developed to classify and quantify RPD only on CFP, FAF and NIR. However, introducing other imaging modalities, such as OCT, might help diminish ambiguities in the classification and quantification of this abnormality. Therefore, a future direction for improving the proposed method is to include OCT scans as an additional input to the model. Automatic classification and quantification of RPD using deep learning on multimodal images will enable the automatic and accurate analysis of increasingly large amounts of data for clinical studies and will facilitate AMD screening in the elderly by decreasing the workload of the expert graders. + Financial Disclosure: + None}, + month = {9}, + optnote = {DIAG, RADIOLOGY}, + year = {2020}, +} + +@article{Ares18, + author = {Guilherme Aresta and Colin Jacobs and Teresa Ara\'{u}jo and A. Cunha and Isabel Ramos and Bram van Ginneken and A. Campilho}, + title = {{iW-Net}: an automatic and minimalistic interactive lung nodule segmentation deep network}, + journal = {arXiv:1811.12789}, + year = {2018}, + abstract = {We propose iW-Net, a deep learning model that allows for both automatic and interactive segmentation of lung nodules in computed tomography images. iW-Net is composed of two blocks: the first one provides an automatic segmentation and the second one allows to correct it by analyzing 2 points introduced by the user in the nodule's boundary. For this purpose, a physics inspired weight map that takes the user input into account is proposed, which is used both as a feature map and in the system's loss function. Our approach is extensively evaluated on the public LIDC-IDRI dataset, where we achieve a state-of-the-art performance of 0.55 intersection over union vs the 0.59 inter-observer agreement. Also, we show that iW-Net allows to correct the segmentation of small nodules, essential for proper patient referral decision, as well as improve the segmentation of the challenging non-solid nodules and thus may be an important tool for increasing the early diagnosis of lung cancer.}, + optnote = {DIAG}, + month = {11}, + all_ss_ids = {['b3dc561dd990cebc626e10318b8582a198aa3571']}, + gscites = {63}, +} + +@inproceedings{Ares18a, + author = {Guilherme Aresta and Teresa Ara\'{u}jo and Colin Jacobs and Bram van Ginneken and Ant\'{o}nio Cunha and Isabel Ramos and Aur\'{e}lio Campilho}, + title = {Towards an automatic lung cancer screening system in low dose computed tomography}, + booktitle = {MICCAI Workshop: Thoracic Image Analysis}, + year = {2018}, + volume = {11040}, + series = LNCS, + doi = {10.1007/978-3-030-00946-5}, + abstract = {We propose a deep learning-based pipeline that, given a low-dose computed tomography of a patient chest, recommends if a patient should be submitted to further medical analysis. The algorithm is composed of a nodule detection block that uses the object detection framework YOLOv2, followed by a U-Net based segmentation. The found structures of interest are then characterized in terms of diameter and texture to produce a final referral recommendation according to the National Lung Screen Trial (NLST) criteria. Our method is trained using the public LUNA16 and LIDC-IDRI datasets and tested on an independent dataset composed of 500 scans from the Kaggle DSB 2017 challenge. The proposed system achieves a patient-wise recall of 89% while providing an explanation to the referral decision and thus may serve as a second opinion tool to speed-up and improve lung cancer screening.}, + file = {:pdf\\Ares18a.pdf:PDF}, + optnote = {DIAG}, + gsid = {5919517379453999457}, + gscites = {15}, + ss_id = {ab99f176fa295a776d6a1fa98d5a5c0156fd4302}, + all_ss_ids = {['ab99f176fa295a776d6a1fa98d5a5c0156fd4302']}, +} + +@article{Ares19, + author = {Aresta, Guilherme and Jacobs, Colin and Araujo,Teresa and Cunha, Antonio and Ramos, Isabel and van Ginneken, Bram and Campilho, Aurelio}, + title = {iW-Net: an automatic and minimalistic interactive lung nodule segmentation deep network}, + journal = NATSCIREP, + year = {2019}, + volume = {9}, + number = {1}, + pages = {11591}, + doi = {10.1038/s41598-019-48004-8}, + url = {https://doi.org/10.1038/s41598-019-48004-8}, + abstract = {We propose iW-Net, a deep learning model that allows for both automatic and interactive segmentation of lung nodules in computed tomography images. iW-Net is composed of two blocks: the first one provides an automatic segmentation and the second one allows to correct it by analyzing 2 points introduced by the user in the nodule's boundary. For this purpose, a physics inspired weight map that takes the user input into account is proposed, which is used both as a feature map and in the system's loss function. Our approach is extensively evaluated on the public LIDC-IDRI dataset, where we achieve a state-of-the-art performance of 0.55 intersection over union vs the 0.59 inter-observer agreement. Also, we show that iW-Net allows to correct the segmentation of small nodules, essential for proper patient referral decision, as well as improve the segmentation of the challenging non-solid nodules and thus may be an important tool for increasing the early diagnosis of lung cancer.}, + file = {Ares19.pdf:pdf\\Ares19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31406194}, + month = {8}, + gsid = {17366457829799450871}, + gscites = {63}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/207075}, + ss_id = {b3dc561dd990cebc626e10318b8582a198aa3571}, + all_ss_ids = {['b3dc561dd990cebc626e10318b8582a198aa3571']}, +} + +@article{Argu19, + author = {Argus, Max and Schaefer-Prokop, Cornelia and Lynch, David A. and van Ginneken, Bram}, + title = {Function Follows Form: Regression from Complete Thoracic Computed Tomography Scans}, + journal = {arXiv:1909.12047}, + year = {2019}, + url = {https://arxiv.org/abs/1909.12047}, + abstract = {Chronic Obstructive Pulmonary Disease (COPD) is a leading cause of morbidity and mortality. While COPD diagnosis is based on lung function tests, early stages and progression of different aspects of the disease can be visible and quantitatively assessed on computed tomography (CT) scans. Many studies have been published that quantify imaging biomarkers related to COPD. In this paper we present a convolutional neural network that directly computes visual emphysema scores and predicts the outcome of lung function tests for 195 CT scans from the COPDGene study. Contrary to previous work, the proposed method does not encode any specific prior knowledge about what to quantify, but it is trained end-to-end with a set of 1424 CT scans for which the output parameters were available. The network provided state-of-the-art results for these tasks: Visual emphysema scores are comparable to those assessed by trained human observers; COPD diagnosis from estimated lung function reaches an area under the ROC curve of 0.94, outperforming prior art. The method is easily generalizable to other situations where information from whole scans needs to be summarized in single quantities.}, + file = {Argu19.pdf:pdf\\Argu19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {9}, + all_ss_ids = {['a9a9dfdccb4e656e7ff2b88fc88666415ab7e189', '6425e3f4c37f8deb9e8dc933e34d49aa843635b9', 'cc7605f2b7e61723f12839fabc1066da0cc8744b']}, + gscites = {0}, +} + +@article{Arma08, + author = {S. G. Armato and B. van Ginneken}, + title = {Anniversary paper: image processing and manipulation through the pages of {M}edical {P}hysics}, + journal = MP, + year = {2008}, + volume = {35}, + pages = {4488--4500}, + doi = {10.1118/1.2977537}, + abstract = {{T}he language of radiology has gradually evolved from "the film" (the foundation of radiology since {W}ilhelm {R}oentgen's 1895 discovery of x-rays) to "the image," an electronic manifestation of a radiologic examination that exists within the bits and bytes of a computer. {R}ather than simply storing and displaying radiologic images in a static manner, the computational power of the computer may be used to enhance a radiologist's ability to visually extract information from the image through image processing and image manipulation algorithms. {I}mage processing tools provide a broad spectrum of opportunities for image enhancement. {G}ray-level manipulations such as histogram equalization, spatial alterations such as geometric distortion correction, preprocessing operations such as edge enhancement, and enhanced radiography techniques such as temporal subtraction provide powerful methods to improve the diagnostic quality of an image or to enhance structures of interest within an image. {F}urthermore, these image processing algorithms provide the building blocks of more advanced computer vision methods. {T}he prominent role of medical physicists and the {AAPM} in the advancement of medical image processing methods, and in the establishment of the "image" as the fundamental entity in radiology and radiation oncology, has been captured in 35 volumes of {M}edical {P}hysics.}, + file = {Arma08.pdf:pdf\\Arma08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + pmid = {18975696}, + month = {9}, + gsid = {13150535677729050397}, + gscites = {10}, +} + +@article{Arma18, + author = {Samuel G. Armato and Henkjan Huisman and Karen Drukker and Lubomir Hadjiiski and Justin S. Kirby and Nicholas Petrick and George Redmond and Maryellen L. Giger and Kenny Cha and Artem Mamonov and Jayashree Kalpathy-Cramer and Keyvan Farahani}, + title = {The PROSTATEx Challenges for Computerized Classification of Prostate Lesions from Multi-Parametric Magnetic Resonance Images}, + journal = JMI, + year = {2018}, + volume = {5}, + number = {4}, + pages = {044501}, + doi = {10.1117/1.JMI.5.4.044501}, + abstract = {Grand challenges stimulate advances within the medical imaging research community: within a competitive yet friendly environment, they allow for a direct comparison of algorithms through a well-defined, centralized infrastructure. The tasks of the two-part PROSTATEx Challenges (the PROSTATEx Challenge and the PROSTATEx-2 Challenge) were (1) the computerized classification of clinically significant prostate lesions and (2) the computerized determination of Gleason Grade Group in prostate cancer, both based on multi-parametric magnetic resonance images. The Challenges incorporated well-vetted cases for training and testing, a centralized performance assessment process to evaluate results, and an established infrastructure for case dissemination, communication, and result submission. In the PROSTATEx Challenge, 32 groups applied their computerized methods (71 methods total) to 208 prostate lesions in the test set. Area under the receiver operating characteristic curve (AUC) for these methods in the task of differentiating between lesions that were and were not clinically significant ranged from 0.45-0.87; statistically significant differences in performance among the top-performing methods, however, were not observed. In the PROSTATEx-2 Challenge, 21 groups applied their computerized methods (43 methods total) to 70 prostate lesions in the test set. When compared with the reference standard, the quadratic-weighted kappa values for these methods in the task of assigning a 5-point Gleason Grade Group to each lesion ranged from 0.24-0.27; superiority to random guessing could be established for only two methods. When approached with a sense of commitment and scientific rigor, challenges foster interest in the designated task and encourage innovation in the field.}, + file = {Arma18.pdf:pdf\\Arma18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30840739}, + month = {11}, + ss_id = {86cfa3c1324e704263cd7e39d9bed566c47f556e}, + all_ss_ids = {['86cfa3c1324e704263cd7e39d9bed566c47f556e']}, + gscites = {103}, +} + +@conference{Arnt15, + author = {R. Arntz and S. van den Broek and L. Rutten-Jacobs and N. Maaijwee and I. van Uden and M. Ghafoorian and B. Platel and E. van Dijk and F.E. de Leeuw}, + title = {Small vessel disease after stroke at young age: the FUTURE-study}, + booktitle = {European Stroke Organization}, + year = {2015}, + optnote = {DIAG}, +} + +@article{Arnt16, + author = {Arntz, Renate M and van den Broek, Steffen and van Uden, Inge WM and Ghafoorian, Mohsen and Platel, Bram and Rutten-Jacobs, Loes CA and Maaijwee, Noortje AM and Schaapsmeerders, Pauline and Schoonderwaldt, Hennie C and van Dijk, Ewoud J and de Leeuw, FE}, + title = {Accelerated development of cerebral small vessel disease in young stroke patients}, + journal = {Neurology}, + year = {2016}, + doi = {10.1212/WNL.0000000000003123}, + abstract = {Objective: To study the long-term prevalence of small vessel disease after young stroke and to compare this to healthy controls. + Methods: This prospective cohort study comprises 337 patients with an ischemic stroke or TIA, aged 18-50 years without a history of TIA or stroke. In addition 90 age and sex matched controls were included. At follow-up lacunes, microbleeds and white matter hyperintensitie (WMH) volume were assessed using MRI. To investigate the relation between riks factors and small vessel disease, logistic and linear regression were used. + Results: After mean follow-up of 9.9 (SD 8.1) years, 337 patients were included (227 with an ischemic stroke and 110 with a TIA). Mean age for patients was 49.8 (SD 10.3) years and 45.4% were men, for controls mean age was 49.4 (SD 11.9) and 45.6% were men. Compared with controls, patients more often had at least one lacune (24.0% versus 4.5%, p<0.0001). In addition, they had a higher WMH-volume (median 1.5 ml (IQR 0.5-3.7) versus 0.4 ml (IQR 0.0-1.0), p<0.001). Compared with controls, patients had the same volume of WMH on average 10-20 years earlier. In the patient group, age at stroke (beta=0.03 (95%CI 0.02-0.04) hypertension (beta=0.22, 95%CI 0.04-0.39) and smoking (beta=0.18, 95%CI 0.01-0.34) at baseline were associated with WMH-volume. + Conclusions: Patients with a young stroke have a higher burden of small vessel disease than controls adjusted for confounders. Cerebral aging seems accelerated by 10-20 years in these patients, which may suggest an increased vulnerability to vascular risk factors.}, + file = {Arnt16.pdf:pdf\\Arnt16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27521431}, + month = {8}, + gsid = {17987708002828940533}, + gscites = {28}, + ss_id = {c73a1da11addbc1436980c4a327124bfacc491d3}, + all_ss_ids = {['c73a1da11addbc1436980c4a327124bfacc491d3']}, +} + +@article{Arta09, + author = {X. Artaechevarria and D. P\'{e}rez-Martin and M. Ceresa and G. de Biurrun and D. Blanco and L. M. Montuenga and B. van Ginneken and C. Ortiz-de-Sol\'{o}rzano and A. Mu{\~n}oz-Barrutia}, + title = {Airway segmentation and analysis for the study of mouse models of lung disease using micro-{CT}}, + journal = PMB, + year = {2009}, + volume = {54}, + pages = {7009--7024}, + doi = {10.1088/0031-9155/54/22/017}, + abstract = {{A}nimal models of lung disease are gaining importance in understanding the underlying mechanisms of diseases such as emphysema and lung cancer. {M}icro-{CT} allows in vivo imaging of these models, thus permitting the study of the progression of the disease or the effect of therapeutic drugs in longitudinal studies. {A}utomated analysis of micro-{CT} images can be helpful to understand the physiology of diseased lungs, especially when combined with measurements of respiratory system input impedance. {I}n this work, we present a fast and robust murine airway segmentation and reconstruction algorithm. {T}he algorithm is based on a propagating fast marching wavefront that, as it grows, divides the tree into segments. {W}e devised a number of specific rules to guarantee that the front propagates only inside the airways and to avoid leaking into the parenchyma. {T}he algorithm was tested on normal mice, a mouse model of chronic inflammation and a mouse model of emphysema. {A} comparison with manual segmentations of two independent observers shows that the specificity and sensitivity values of our method are comparable to the inter-observer variability, and radius measurements of the mainstem bronchi reveal significant differences between healthy and diseased mice. {C}ombining measurements of the automatically segmented airways with the parameters of the constant phase model provides extra information on how disease affects lung function.}, + file = {Arta09.pdf:pdf\\Arta09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {22}, + pmid = {19887716}, + month = {11}, + gsid = {7831132571538357123}, + gscites = {38}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/79921}, + ss_id = {6f8ce3c9888cefd812fc1de939e5f7a73786152e}, + all_ss_ids = {['6f8ce3c9888cefd812fc1de939e5f7a73786152e']}, +} + +@inproceedings{Arta09a, + author = {X. Artaechevarria and A. Mu{\~n}oz-Barrutia and B. van Ginneken and C. Ortiz-de-Sol\'{o}rzano}, + title = {Fast murine airway segmentation and reconstruction in {micro-CT} images}, + booktitle = MI, + year = {2009}, + volume = {7262}, + series = SPIE, + pages = {72620B1--72620B8}, + doi = {10.1117/12.811554}, + abstract = {{M}ouse models are becoming instrumental for the study of lung disease. {D}ue to its resolution and low cost, high resolution {C}omputed {T}omography (micro-{CT}) is a very adequate technology to visualize the mouse lungs in-vivo. {A}utomatic segmentation and measurement of airways in micro-{CT} images of the lungs can be useful as a preliminary step prior other image analysis quantification tasks, as well as for the study of pathologies that alter the airways structure. {I}n this paper, we present an efficient segmentation and reconstruction algorithm which simultaneously segments and reconstructs the bronchial tree, while providing the length and mean radius of each airway segment. {A} locally adaptive intensity threshold is used to account for the low signal to noise ratio and strong artifacts present in micro-{CT} images. {W}e validate our method by comparing it with manual segmentations of 10 different scans, obtaining an average true positive volume fraction of 85.52% with a false positive volume fraction of 5.04%.}, + file = {Arta09a.pdf:pdf\\Arta09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {2}, + gsid = {4299735518106080724}, + gscites = {3}, + ss_id = {d2056f4696e6badb76a795147c1f7fbb07ecf076}, + all_ss_ids = {['d2056f4696e6badb76a795147c1f7fbb07ecf076']}, +} + +@inproceedings{Arzh06, + author = {Arzhaeva, Y. and van Ginneken, B. and Tax, D. M. J.}, + title = {Image classification from generalized image distance features: application to detection of interstitial disease in chest radiographs}, + booktitle = ICPR, + year = {2006}, + doi = {10.1109/ICPR.2006.682}, + abstract = {{O}ne of the most important tasks in medical image analysis is to detect the absence or presence of disease in an image, without having precise delineations of pathology available for training. {A} novel method is proposed to solve such a classification task, based on a generalized representation of an image derived from local per-pixel features. {F}rom this representation, differences between images can be computed, and these can be used to classify the image requiring knowledge of only global image labels for training. {I}t is shown how to construct multiple representations of one image to get multiple classification opinions and combine them to smooth over errors of individual classifiers. {T}he performance of the method is evaluated on the detection of interstitial lung disease on standard chest radiographs. {T}he best result is obtained for the combining classification scheme yielding an area under the {ROC} curve of 0.955.}, + file = {Arzh06.pdf:pdf\\Arzh06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {12424497119551485003}, + gscites = {4}, + ss_id = {e03a6b506700d339efbf75c7547aa6507ec792ae}, + all_ss_ids = {['e03a6b506700d339efbf75c7547aa6507ec792ae']}, +} + +@inproceedings{Arzh06a, + author = {Arzhaeva, Y. and Tax, D. M. J. and van Ginneken, B.}, + title = {Improving computer-aided diagnosis of interstitial disease in chest radiographs by combining one-class and two-class classifiers}, + booktitle = MI, + year = {2006}, + volume = {6144}, + series = SPIE, + pages = {614458-1--614458-8}, + doi = {10.1117/12.652208}, + abstract = {{I}n this paper we compare and combine two distinct pattern classification approaches to the automated detection of regions with interstitial abnormalities in frontal chest radiographs. {S}tandard two-class classifiers and recently developed one-class classifiers are considered. {T}he one-class problem is to find the best model of the normal class and reject all objects that don't fit the model of normality. {T}his one-class methodology was developed to deal with poorly balanced classes, and it uses only objects from a well-sampled class for training. {T}his may be an advantageous approach in medical applications, where normal examples are easier to obtain thanabnormal cases. {W}e used receiver operating characteristic ({ROC}) analysis to evaluate classification performance by the different methods as a function of the number of abnormal cases available for training. {V}arious two-class classifiers performed excellently in case that enough abnormal examples were available (area under {ROC} curve {A}z = 0.985 for a linear discriminant classifier). {T}he one-class approach gave worse result when used stand-alone ({A}z = 0.88 for {G}aussian data description) but the combination of both approaches, using a mean combining classifier resulted in better performance when only few abnormal samples were available (average {A}z = 0.94 for the combination and {A}z = 0.91 for the stand-alone linear discriminant in the same set-up). {T}his indicates that computer-aided diagnosis schemes may benefit from using a combination of two-class and one-class approaches when only few abnormal samples are available.}, + file = {Arzh06a.pdf:pdf\\Arzh06a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {9920783540454814909}, + gscites = {12}, + ss_id = {a96e8a989d891bf4d8b015397a1c6ebf1cf39012}, + all_ss_ids = {['a96e8a989d891bf4d8b015397a1c6ebf1cf39012']}, +} + +@article{Arzh07, + author = {Y. Arzhaeva and M. Prokop and D. M. J. Tax and P. A. de Jong and C. M. Schaefer-Prokop and B. van Ginneken}, + title = {Computer-aided detection of interstitial abnormalities in chest radiographs using a reference standard based on computed tomography}, + journal = MP, + year = {2007}, + volume = {34}, + pages = {4798--4809}, + doi = {10.1118/1.2795672}, + abstract = {{A} computer-aided detection ({CAD}) system is presented for the localization of interstitial lesions in chest radiographs. {T}he system analyzes the complete lung fields using a two-class supervised pattern classification approach to distinguish between normal texture and texture affected by interstitial lung disease. {A}nalysis is done pixel-wise and produces a probability map for an image where each pixel in the lung fields is assigned a probability of being abnormal. {I}nterstitial lesions are often subtle and ill defined on x-rays and hence difficult to detect, even for expert radiologists. {T}herefore a new, semiautomatic method is proposed for setting a reference standard for training and evaluating the {CAD} system. {T}he proposed method employs the fact that interstitial lesions are more distinct on a computed tomography ({CT}) scan than on a radiograph. {L}esion outlines, manually drawn on coronal slices of a {CT} scan of the same patient, are automatically transformed to corresponding outlines on the chest x-ray, using manually indicated correspondences for a small set of anatomical landmarks. {F}or the texture analysis, local structures are described by means of the multiscale {G}aussian filter bank. {T}he system performance is evaluated with {ROC} analysis on a database of digital chest radiographs containing 44 abnormal and 8 normal cases. {T}he best performance is achieved for the linear discriminant and support vector machine classifiers, with an area under the {ROC} curve ({A}z) of 0.78. {S}eparate {ROC} curves are built for classification of abnormalities of different degrees of subtlety versus normal class. {H}ere the best performance in terms of {A}z is 0.90 for differentiation between obviously abnormal and normal pixels. {T}he system is compared with two human observers, an expert chest radiologist and a chest radiologist in training, on evaluation of regions. {E}ach lung field is divided in four regions, and the reference standard and the probability maps are converted into region scores. {T}he system performance does not significantly differ from that of the observers, when the perihilar regions are excluded from evaluation, and reaches {A}z=0.85 for the system, with {A}z=0.88 for both observers.}, + file = {Arzh07.pdf:pdf\\Arzh07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {12}, + pmid = {18196808}, + month = {11}, + gsid = {15574026353971813570}, + gscites = {42}, +} + +@conference{Arzh07a, + author = {Y. Arzhaeva and M. Prokop and P. A. de Jong and C. M. Schaefer-Prokop and B. van Ginneken}, + title = {Computer-aided detection of interstitial abnormalities in chest radiographs}, + booktitle = RSNA, + year = {2007}, + pages = {407}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Arzh07b, + author = {Y. Arzhaeva and K. Murphy and M. Prokop and C. M. Schaefer-Prokop and B. van Ginneken}, + title = {Application of computerized texture analysis of {CT} lung images for estimation of interstitial lung disease progression}, + booktitle = RSNA, + year = {2007}, + pages = {267}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Arzh07c, + author = {Y. Arzhaeva and E. M. van Rikxoort and B. van Ginneken}, + title = {Automated segmentation of caudate nucleus in {MR} brain images with voxel classification}, + booktitle = {3D Segmentation in the Clinic: A Grand Challenge}, + year = {2007}, + pages = {65-72}, + file = {Arzh07c.pdf:pdf\\Arzh07c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {14279526554979730903}, + gscites = {6}, + ss_id = {3aed0d4fafa12efe822da4f3614caa46da2cbcf1}, + all_ss_ids = {['3aed0d4fafa12efe822da4f3614caa46da2cbcf1']}, +} + +@article{Arzh09, + author = {Y. Arzhaeva and D. M. J. Tax and B. van Ginneken}, + title = {Dissimilarity-based classification in the absence of local ground truth: application to the diagnostic interpretation of chest radiographs}, + journal = PR, + year = {2009}, + volume = {42}, + pages = {1768--1776}, + doi = {10.1016/j.patcog.2009.01.016}, + abstract = {In this paper classification on dissimilarity representations is applied to medical imaging data with the task of discrimination between normal images and images with signs of disease. We show that dissimilarity-based classification is a beneficial approach in dealing with weakly labeled data, i.e. when the location of disease in an image is unknown and therefore local feature-based classifiers cannot be trained. A modification to the standard dissimilarity-based approach is proposed that makes a dissimilarity measure multi-valued, hence, able to retain more information. A multi-valued dissimilarity between an image and a prototype becomes an image representation vector in classification. Several classification outputs with respect to different prototypes are further integrated into a final image decision. Both standard and proposed methods are evaluated on data sets of chest radiographs with textural abnormalities and compared to several feature-based region classification approaches applied to the same data. On a tuberculosis data set the multi-valued dissimilarity-based classification performs as well as the best region classification method applied to the fully labeled data, with an area under the receiver operating characteristic (ROC) curve (Az) of 0.82. The standard dissimilarity-based classification yields Az=0.80. On a data set with interstitial abnormalities both dissimilarity-based approaches achieve Az=0.98 which is closely behind the best region classification method.}, + file = {Arzh09.pdf:pdf\\Arzh09.pdf:PDF}, + gsid = {16382712958622308082}, + optnote = {DIAG, RADIOLOGY, TB}, + number = {9}, + month = {9}, + gscites = {35}, + ss_id = {445c06e086a985f3b08982ec6cc126bd92e40d11}, + all_ss_ids = {['445c06e086a985f3b08982ec6cc126bd92e40d11']}, +} + +@inproceedings{Arzh09b, + author = {Y. Arzhaeva and L. Hogeweg and P. A. de Jong and M. A. Viergever and B. van Ginneken}, + title = {Global and {L}ocal {M}ulti-valued {D}issimilarity-{B}ased {C}lassification: {A}pplication to {C}omputer-{A}ided {D}etection of {T}uberculosis}, + booktitle = MICCAI, + year = {2009}, + series = LNCS, + pages = {724--731}, + doi = {10.1007/978-3-642-04271-3_88}, + abstract = {{I}n many applications of computer-aided detection {(CAD)} it is not possible to precisely localize lesions or affected areas in images that are known to be abnormal. {I}n this paper a novel approach to computer-aided detection is presented that can deal effectively with such weakly labeled data. {O}ur approach is based on multi-valued dissimilarity measures that retain more information about underlying local image features than single-valued dissimilarities. {W}e show how this approach can be extended by applying it locally as well as globally, and by merging the local and global classification results into an overall opinion about the image to be classified. {T}he framework is applied to the detection of tuberculosis {(TB)} in chest radiographs. {T}his is the first study to apply a {CAD} system to a large database of digital chest radiographs obtained from a {TB} screening program, including normal cases, suspect cases and cases with proven {TB.} {T}he global dissimilarity approach achieved an area under the {ROC} curve of 0.81. {T}he combination of local and global classifications increased this value to 0.83.}, + file = {Arzh09b.pdf:pdf\\Arzh09b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + number = {5762}, + gsid = {14232725066672225972}, + gscites = {25}, + ss_id = {e007b22f9cd225c0831cafe69b2074c5ca2a6da1}, + all_ss_ids = {['e007b22f9cd225c0831cafe69b2074c5ca2a6da1']}, +} + +@phdthesis{Arzh09c, + author = {Y. Arzhaeva}, + title = {Computer-aided detection and quantification of interstitial lung disease from computed tomography and chest radiography}, + year = {2009}, + url = {http://igitur-archive.library.uu.nl/dissertations/2009-0923-200142/UUindex.html}, + abstract = {{T}his thesis contributes to the development of pattern classification methods employed by computer-aided detection ({CAD}) systems, with the application of these methods to the automated analysis of interstitial lung disease ({ILD}) and tuberculosis ({TB}). {T}hree different applications are considered. {I}n {C}hapters 2, 3 and 6, we aim at detecting the presence of disease in chest radiographs. {T}he purpose of a {CAD} system described in {C}hapter 4 is to pinpoint the locations of abnormalities in radiographs. {I}n {C}hapter 5, a {CAD} system is described that estimates the progression of {ILD} in serial computed tomography ({CT}) scans. {I}n many applications of {CAD} it is not possible to precisely localize lesions or affected areas in images that are known to be abnormal. {T}herefore, a {CAD} system has to be trained with weakly labeled data. {I}n {C}hapters 2 and 3, two novel classification approaches are presented that deal with such data better than traditional classifiers. {I}n {C}hapter 2, a classifier distinguishes between normal and affected radiographs by explicitly optimizing the area under the receiver operating characteristic ({ROC}) curve. {T}his chapter introduces a novel heuristics for subsampling problem constraints which is shown to improve classification time and performance. {C}hapter 3 proposes to represent an image by comparing it with another image. {A} dissimilarity between two images consists of dissimilarities between the distributions of various local texture measurements. {B}y comparing each image with different prototype images, several different classification results can be obtained and combined into a final image decision. {T}he performance of this classification approach is similar to that of methods applied to fully labeled radiographs. {I}n {C}hapter 4, a new method to set a reference standard for interstitial abnormalities in chest radiographs is described. {A}bnormality outlines are manually delineated in selected coronal slices of a chest {CT} scan and automatically mapped to a radiograph of the same patient. {T}he {CAD} system performs pixel-wise classification of the lung fields and produces a color-coded probability map accentuating areas highly probable of being abnormal. {T}he system is shown to perform not significantly different from two radiologists on severely and moderately abnormal regions. {I}n {C}hapter 5, a novel {CAD} application is introduced. {I}t compares corresponding 2{D} axial sections from the baseline and follow-up chest {CT} scans and yields an opinion whether this pair of sections represents regression, progression or unchanged disease. {I}n the first stage of classification, image pairs exhibiting any change in the state of disease are separated from unchanged cases. {I}n the second stage, the direction of an estimated change is classified into regression or progression. {T}he accuracy of our system is not significantly different from that of two radiologists. {I}n {C}hapter 6, the approach described in {C}hapter 3 is applied to the analysis of radiographs from {TB} screening programs. {T}he dissimilarity-based approach is extended by applying it to fixed lung partitions, as well as to the complete lung fields, and merging the local and global classification results into a single image decision. {T}he {CAD} system yields a probability for an image to contain {TB}-related abnormalities.}, + copromotor = {B. van Ginneken}, + file = {Arzh09c.pdf:pdf\\Arzh09c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + promotor = {M. A. Viergever and W. M. Prokop}, + school = {Utrecht University}, + journal = {PhD thesis}, +} + +@article{Arzh10, + author = {Y. Arzhaeva and M. Prokop and K. Murphy and E. M. van Rikxoort and P. A. de Jong and H. A. Gietema and M. A. Viergever and B. van Ginneken}, + title = {Automated estimation of progression of interstitial lung disease in {CT} images}, + journal = MP, + year = {2010}, + volume = {37}, + pages = {63--73}, + doi = {10.1118/1.3264662}, + abstract = {{PURPOSE}: {A} system is presented for automated estimation of progression of interstitial lung disease in serial thoracic {CT} scans. {METHODS}: {T}he system compares corresponding 2{D} axial sections from baseline and follow-up scans and concludes whether this pair of sections represents regression, progression, or unchanged disease status. {T}he correspondence between serial {CT} scans is achieved by intrapatient volumetric image registration. {T}he system classification function is trained with two different feature sets. {F}eatures in the first set represent the intensity distribution of a difference image between the baseline and follow-up {CT} sections. {F}eatures in the second set represent dissimilarities computed between the baseline and follow-up images filtered with a bank of general purpose texture filters. {RESULTS}: {I}n an experiment on 74 scan pairs, the system classification accuracies were 76.1\% and 79.5\% for the two feature sets, respectively, while the accuracies of two observer radiologist were 78.5\% and 82\%, respectively. {T}he agreements of the system with the reference standard, measured by weighted kappa statistics, were 0.611 and 0.683 for the two feature sets, respectively. {CONCLUSIONS}: {T}he system employing the second feature set showed good agreement with the reference standard, and its accuracy approached that of two radiologists.}, + file = {Arzh10.pdf:pdf\\Arzh10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {20175467}, + month = {12}, + gsid = {10305933231621539339}, + gscites = {24}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/88430}, + ss_id = {e9a1022bef95937ea3a55df1ea1d5fd3aec2ebf5}, + all_ss_ids = {['e9a1022bef95937ea3a55df1ea1d5fd3aec2ebf5']}, +} + +@conference{Aswo19, + author = {Aswolinskiy, W. AND Horlings, H. AND Mulder, L. AND van der Laak, J. AND Wesseling, J. AND Lips, E. AND Ciompi, F.}, + booktitle = {European Congress of Pathology}, + title = {Potential of an AI-based digital biomarker to predict neoadjuvant chemotherapy response from preoperative biopsies of Luminal-B breast cancer}, + abstract = {Background & objectives: Invasive breast cancer (IBC) is increasingly treated with neoadjuvant chemotherapy. Yet, only 15-20% of Luminal-B patients achieve pathological complete response (pCR). We developed an AI-based biomarker to predict pCR of Luminal-B IBC from preoperative biopsies stained with H&E. + + Methods: First, we trained a deep learning model on a multi-centric dataset of n=277 manually annotated breast cancer H&E-stained histopathology images to segment tumour, lymphocytes and other tissue. Second, we applied the segmentation model to an independent set of n=297 Luminal-B pre-treatment biopsies. For each case, we computed our biomarker: the proportion of tumour within 80mm distance from lymphocyte regions. + + Results: From the Luminal-B cohort, 32/297 cases (11%) were labelled as "pCR" when no remaining cancer cells were reported for the post-operative surgical resection. The biomarker showed significant (p<<0.01) correlation with pCR with a point biserial correlation coefficient of 0.27. Setting a cut-off value based on the optimal operating point of the ROC curve (AUC=0.69), we reached a sensitivity of 0.53 and a specificity of 0.74. + + Conclusion: The developed deep-learning based biomarker quantifies the proportion of inflammatory tumour regions. It shows promising results for predicting pCR for Luminal-B breast cancer from pre-treatment biopsies stained with H&E.}, + optnote = {DIAG, RADIOLOGY}, + year = {2019}, +} + +@inproceedings{Aswo21, + author = {Witali Aswolinskiy and David Tellez and Gabriel Raya and Lieke van der Woude and Monika Looijen-Salamon and Jeroen van der Laak and Katrien Grunberg and Francesco Ciompi}, + title = {{Neural image compression for non-small cell lung cancer subtype classification in H&E stained whole-slide images}}, + volume = {11603}, + booktitle = {Medical Imaging 2021: Digital Pathology}, + publisher = {SPIE}, + pages = {1 -- 7}, + year = {2021}, + doi = {10.1117/12.2581943}, + ss_id = {6b71ee48bb52c6aab084e7325669e0116dda292b}, + all_ss_ids = {['6b71ee48bb52c6aab084e7325669e0116dda292b']}, + gscites = {8}, +} + +@article{Aubr22, + author = {Aubreville, Marc and Stathonikos, Nikolas and Bertram, Christof A. and Klopfleisch, Robert and Ter Hoeve, Natalie and Ciompi, Francesco and Wilm, Frauke and Marzahl, Christian and Donovan, Taryn A. and Maier, Andreas and Breen, Jack and Ravikumar, Nishant and Chung, Youjin and Park, Jinah and Nateghi, Ramin and Pourakpour, Fattaneh and Fick, Rutger H. J. and Ben Hadj, Saima and Jahanifar, Mostafa and Shephard, Adam and Dexl, Jakob and Wittenberg, Thomas and Kondo, Satoshi and Lafarge, Maxime W. and Koelzer, Viktor H. and Liang, Jingtang and Wang, Yubo and Long, Xi and Liu, Jingxin and Razavi, Salar and Khademi, April and Yang, Sen and Wang, Xiyue and Erber, Ramona and Klang, Andrea and Lipnik, Karoline and Bolfa, Pompei and Dark, Michael J. and Wasinger, Gabriel and Veta, Mitko and Breininger, Katharina}, + title = {Mitosis domain generalization in histopathology images - The MIDOG challenge.}, + doi = {10.1016/j.media.2022.102699}, + pages = {102699}, + volume = {84}, + abstract = {The density of mitotic figures (MF) within tumor tissue is known to be highly correlated with tumor proliferation and thus is an important marker in tumor grading. Recognition of MF by pathologists is subject to a strong inter-rater bias, limiting its prognostic value. State-of-the-art deep learning methods can support experts but have been observed to strongly deteriorate when applied in a different clinical environment. The variability caused by using different whole slide scanners has been identified as one decisive component in the underlying domain shift. The goal of the MICCAI MIDOG 2021 challenge was the creation of scanner-agnostic MF detection algorithms. The challenge used a training set of 200 cases, split across four scanning systems. As test set, an additional 100 cases split across four scanning systems, including two previously unseen scanners, were provided. In this paper, we evaluate and compare the approaches that were submitted to the challenge and identify methodological factors contributing to better performance. The winning algorithm yielded an F score of 0.748 (CI95: 0.704-0.781), exceeding the performance of six experts on the same task.}, + file = {Aubr22.pdf:pdf\\Aubr22.pdf:PDF}, + journal = {Medical Image Analysis}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36463832}, + year = {2022}, + ss_id = {18e784586cb41d5dfc28e84f4f1c390086c6516d}, + all_ss_ids = {['18e784586cb41d5dfc28e84f4f1c390086c6516d']}, + gscites = {45}, +} + +@book{Aubr23, + author = {Aubreville, Marc and Stathonikos, Nikolas and Bertram, Christof A. and Klopfleisch, Robert and Hoeve, Natalie ter and Ciompi, Francesco and Wilm, Frauke and Marzahl, Christian and Donovan, Taryn A. and Maier, Andreas and Veta, Mitko and Breininger, Katharina}, + title = {Abstract: the MIDOG Challenge 2021}, + doi = {10.1007/978-3-658-41657-7_26}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-658-41657-7_26}, + file = {Aubr23.pdf:pdf\Aubr23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Bildverarbeitung fur die Medizin, Workshop}, + citation-count = {0}, + automatic = {yes}, + pages = {115-115}, +} + +@inproceedings{Avoi13, + author = {van der Avoird, Andre and Lin, Ning and van Ginneken, Bram and Manniesing, Rashindra}, + title = {A Hardware Implementation of a Levelset Algorithm for Carotid Lumen Segmentation in {CTA}}, + booktitle = MI, + year = {2013}, + series = SPIE, + doi = {10.1117/12.2007231}, + abstract = {This work presents a novel hardware implementation of a levelset algorithm for carotid lumen segmentation in computed tomography. We propose to use a field programmable gate array (FPGA) to iteratively solve the underlying finite difference scheme. A FPGA processor can be programmed to have a dedicated hardware architecture including specific data path and processor core design with different types of parallelizations which is fully tailored and optimized toward its application. The method has been applied to ten carotid bifurcation of six stroke patients and the results have been compared to the results obtained from the same method implemented in C++. Visual inspections revealed similar segmentation results. The average computation time in software was 1663 A,A+- 86 seconds, the computation time on the FPGA processor was 28 seconds yielding approximately a 60-fold speed-up which to our knowledge has been unmmatched before for this class of algorithms.}, + file = {Avoi13.pdf:pdf\\Avoi13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + ss_id = {cbbe7f568d45425dc6410a27c56468f9fdfefab1}, + all_ss_ids = {['cbbe7f568d45425dc6410a27c56468f9fdfefab1']}, + gscites = {1}, +} + +@article{Ayat20, + author = {Ayatollahi, Fazael and Shokouhi, Shahriar B. and Teuwen, Jonas}, + title = {Differentiating Benign and Malignant Mass and non-Mass Lesions in Breast {DCE-MRI} using Normalized Frequency-based Features}, + doi = {10.1007/s11548-019-02103-z}, + issue = {2}, + pages = {297-307}, + volume = {15}, + abstract = {Purpose: In this study we propose a new computer-aided diagnosis (CADx) to distinguish between malign and benign mass and non-mass lesions in breast DCE-MRI. For this purpose, we introduce new frequency textural features. + Methods: In this paper we propose novel normalized frequency-based features. These are obtained by applying the dual-tree complex wavelet transform to MRI slices containing a lesion for specific decomposition levels. The low-pass and band-pass frequency coefficients of the dual-tree complex wavelet transform represent the general shape and texture features respectively of the lesion. The extraction of these features is computationally efficient. We employ a support vector machine (SVM) to classify the lesions, and investigate modified cost functions and under- and oversampling strategies to handle the class imbalance. + Results: The proposed method has been tested on a dataset of 80 patients containing 103 lesions. An area under the curve (AUC) of 0.98 for the mass and 0.94 for the non-mass lesions is obtained. Similarly, accuracies of 96.9% and 89.8%, sensitivities of 93.8% and 84.6% and specificities of 98% and 92.3% are obtained for the mass and non-mass lesions respectively. + Conclusions: Normalized frequency-based features can characterize benign and malignant lesions efficiently in both mass and non-mass like lesions. Additionally, the combination of normalized frequency-based features and three dimensional shape descriptors improve the CADx performance.}, + file = {Ayat19.pdf:pdf\\Ayat19.pdf:PDF}, + journal = IJCARS, + optnote = {DIAG, RADIOLOGY}, + pmid = {31838643}, + year = {2020}, + month = {12}, + ss_id = {f435c3f782e98246413f30adcf8f78899650d23f}, + all_ss_ids = {['f435c3f782e98246413f30adcf8f78899650d23f']}, + gscites = {5}, +} + +@mastersthesis{Bagu18, + author = {Ines Correia Bagulho}, + title = {Reference Tissue Normalization of Prostate MRI with automatic Multi-Organ Deep Learning Pelvis segmentation}, + abstract = {Prostate cancer is the most common cancer among male patients and second leading cause of death + from cancer in men (excluding non-melanoma skin cancer). Magnetic Resonance Imaging (MRI) is + currently becoming the modality of choice for clinical staging of localized prostate cancer. However, + MRI lacks intensity quantification which hinders its diagnostic ability. The overall aim of this dissertation + is to automate a novel normalization method that can potentially quantify general MR intensities, thus + improving the diagnostic ability of MRI. + Two Prostate multi-parametric MRI cohorts, of 2012 and 2016, were used in this retrospective study. To + improve the diagnostic ability of T2-Weighted MRI, a novel multi-reference tissue normalization method + was tested and automated. This method consists of computing the average intensity of the referencetissues + and the corresponding normalized reference values to define a look-up-table through interpolation. + Since the method requires delineation of multiple reference tissues, an MRI-specific Deep Learning + model, Aniso-3DUNET, was trained on manual segmentations and tested to automate this segmentation + step. The output of the Deep Learning model, that consisted of automatic segmentations, was validated + and used in an automatic normalization approach. The effect of the manual and automatic normalization + approaches on diagnostic accuracy of T2-weighted intensities was determined with Receiver Operating + Characteristic (ROC) analyses. The Areas Under the Curve (AUC) were compared. + The automatic segmentation of multiple reference-tissues was validated with an average DICE score + higher than 0.8 in the test phase. Thereafter, the method developed demonstrated that the normalized + intensities lead to an improved diagnostic accuracy over raw intensities using the manual approach, with + an AUC going from 0.54 (raw) to 0.68 (normalized), and automatic approach, with an AUC going from + 0.68 to 0.73. + This study demonstrates that multi-reference tissue normalization improves quantification of T2-weighted + images and diagnostic accuracy, possibly leading to a decrease in radiologist's interpretation variability. + It is also possible to conclude that this novel T2-weighted MRI normalization method can be automatized, + becoming clinically applicable.}, + file = {Bagu18.pdf:pdf/Bagu18.pdf:PDF}, + optnote = {DIAG}, + school = {Universidade De Lisboa}, + year = {2018}, + journal = {Master thesis}, +} + +@article{Baid18, + author = {Baidoshvili, Alexi and Bucur, Anca and van Leeuwen, Jasper and van der Laak, Jeroen and Kluin, Philip and van Diest, Paul J.}, + title = {Evaluating the benefits of digital pathology implementation: time savings in laboratory logistics}, + journal = Histopathology, + year = {2018}, + volume = {73}, + issue = {5}, + month = {11}, + pages = {784--794}, + doi = {10.1111/his.13691}, + abstract = {The benefits of digital pathology for workflow improvement and thereby cost savings in pathology, at least partly outweighing investment costs, are being increasingly recognised. Successful implementations in a variety of scenarios have started to demonstrate the cost benefits of digital pathology for both research and routine diagnosis, contributing to a sound business case encouraging further adoption. To further support new adopters, there is still a need for detailed assessment of the impact that this technology has on the relevant pathology workflows, with an emphasis on time-saving. To assess the impact of digital pathology adoption on logistic laboratory tasks (i.e. not including pathologists' time for diagnosis-making) in the Laboratorium Pathologie Oost Nederland, a large regional pathology laboratory in The Netherlands. To quantify the benefits of digitisation, we analysed the differences between the traditional analogue and new digital workflows, carried out detailed measurements of all relevant steps in key analogue and digital processes, and compared the time spent. We modelled and assessed the logistic savings in five workflows: (i) routine diagnosis; (ii) multidisciplinary meeting; (iii) external revision requests; (iv) extra stainings; and (v) external consultation. On average, >19 working hours were saved on a typical day by working digitally, with the highest savings in routine diagnosis and multidisciplinary meeting workflows. By working digitally, a significant amount of time could be saved in a large regional pathology laboratory with a typical case mix. We also present the data in each workflow per task and concrete logistic steps to allow extrapolation to the context and case mix of other laboratories.}, + file = {:pdf/Baid18.pdf:PDF}, + optnote = {DIAG}, + pmid = {29924891}, + gsid = {4937409988134534335}, + gscites = {68}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/197167}, + ss_id = {c549a002b6c2d5849674288cdf82bc2b2c0629d7}, + all_ss_ids = {['c549a002b6c2d5849674288cdf82bc2b2c0629d7']}, +} + +@article{Baid18a, + author = {Baidoshvili, Alexi and Stathonikos, Nikolas and Freling, Gerard and Bart, Jos and 't Hart, Nils and van der Laak, Jeroen and Doff, Jan and van der Vegt, Bert and Kluin Philip, M. and van Dies, Paul J.}, + title = {Validation of a whole-slide image-based teleconsultation network}, + journal = Histopathology, + year = {2018}, + volume = {73}, + month = {6}, + pages = {777-783}, + doi = {10.1111/his.13673}, + abstract = {Aims: Most validation studies on digital pathology diagnostics have been performed in single institutes. Because rapid consultation on cases with extramural experts is one of the most important uses for digital pathology laboratory networks, the aim of this study was to validate a whole-slide image-based teleconsultation network between three independent laboratories. Methods and results: Each laboratory contributed 30 biopsies and/or excisions, totalling 90 specimens (776 slides) of varying difficulty and covering a wide variety of organs and subspecialties. All slides were scanned centrally at 940 scanning magnification and uploaded, and subsequently assessed digitally by 16 pathologists using the same image management system and viewer. Each laboratory was excluded from digital assessment of their own cases. Concordance rates between the two diagnostic modalities (light microscopic versus digital) were compared. Loading speed of the images, zooming latency and focus quality were scored. Leaving out eight minor discrepancies without any clinical significance, the concordance rate between remote digital and original microscopic diagnoses was 97.8%. The two cases with a major discordance (for which the light microscopic diagnoses were deemed to be the better ones) resulted from a different interpretation of diagnostic criteria in one case and an image quality issue in the other case. Average scores for loading speed of the images, zooming latency and focus quality were 2.37 (on a scale up to 3), 2.39 (scale up to 3) and 3.06 (scale up to 4), respectively. Conclusions: This validation study demonstrates the suitability of a teleconsultation network for remote digital consultation using whole-slide images. Such networks may contribute to faster revision and consultation in pathology while maintaining diagnostic standards.}, + file = {Baid18a.pdf:pdf\\Baid18a.pdf:PDF}, + optnote = {DIAG}, + pmid = {29893996}, + gsid = {10398339840051479936}, + gscites = {16}, + ss_id = {4c56dbeda2cb93aadcc8e5c697ccb6f44283799b}, + all_ss_ids = {['4c56dbeda2cb93aadcc8e5c697ccb6f44283799b']}, +} + +@article{Bakk19, + author = {Bakker, Marije F and de Lange, Stephanie V and Pijnappel, Ruud M and Mann, Ritse M and Peeters, Petra H M and Monninkhof, Evelyn M and Emaus, Marleen J and Loo, Claudette E and Bisschops, Robertus H C and Lobbes, Marc B I and de Jong, Matthijn D F and Duvivier, Katya M and Veltman, Jeroen and Karssemeijer, Nico and de Koning, Harry J and van Diest, Paul J and Mali, Willem P T M and van den Bosch, Maurice A A J and Veldhuis, Wouter B and van Gils, Carla H and DENSE Trial Study Group}, + title = {Supplemental MRI Screening for Women with Extremely Dense Breast Tissue}, + journal = NEJM, + year = {2019}, + volume = {381}, + issue = {22}, + month = {11}, + pages = {2091--2102}, + doi = {10.1056/NEJMoa1903986}, + abstract = {Extremely dense breast tissue is a risk factor for breast cancer and limits the detection of cancer with mammography. Data are needed on the use of supplemental magnetic resonance imaging (MRI) to improve early detection and reduce interval breast cancers in such patients. In this multicenter, randomized, controlled trial in the Netherlands, we assigned 40,373 women between the ages of 50 and 75 years with extremely dense breast tissue and normal results on screening mammography to a group that was invited to undergo supplemental MRI or to a group that received mammography screening only. The groups were assigned in a 1:4 ratio, with 8061 in the MRI-invitation group and 32,312 in the mammography-only group. The primary outcome was the between-group difference in the incidence of interval cancers during a 2-year screening period. The interval-cancer rate was 2.5 per 1000 screenings in the MRI-invitation group and 5.0 per 1000 screenings in the mammography-only group, for a difference of 2.5 per 1000 screenings (95% confidence interval [CI], 1.0 to 3.7; P<0.001). Of the women who were invited to undergo MRI, 59% accepted the invitation. Of the 20 interval cancers that were diagnosed in the MRI-invitation group, 4 were diagnosed in the women who actually underwent MRI (0.8 per 1000 screenings) and 16 in those who did not accept the invitation (4.9 per 1000 screenings). The MRI cancer-detection rate among the women who actually underwent MRI screening was 16.5 per 1000 screenings (95% CI, 13.3 to 20.5). The positive predictive value was 17.4% (95% CI, 14.2 to 21.2) for recall for additional testing and 26.3% (95% CI, 21.7 to 31.6) for biopsy. The false positive rate was 79.8 per 1000 screenings. Among the women who underwent MRI, 0.1% had either an adverse event or a serious adverse event during or immediately after the screening. The use of supplemental MRI screening in women with extremely dense breast tissue and normal results on mammography resulted in the diagnosis of significantly fewer interval cancers than mammography alone during a 2-year screening period. (Funded by the University Medical Center Utrecht and others; DENSE ClinicalTrials.gov number, NCT01315015.).}, + file = {Bakk19.pdf:pdf\\Bakk19.pdf:PDF}, + optnote = {DIAG}, + pmid = {31774954}, + gsid = {13397244904340425857}, + gscites = {288}, + ss_id = {60ba4be07cc7c44df3c4baf8ceece4b460bdb774}, + all_ss_ids = {['60ba4be07cc7c44df3c4baf8ceece4b460bdb774']}, +} + +@article{Bala05, + author = {Balassy, Csilla and Prokop, Mathias and Weber, Michael and Sailer, Johannes and Herold, Christian J. and Schaefer-Prokop, Cornelia}, + title = {Flat-panel display ({LCD}) versus high-resolution gray-scale display ({CRT}) for chest radiography: an observer preference study}, + journal = AJR, + year = {2005}, + volume = {184}, + pages = {752--756}, + abstract = {Our objective was to compare cathode ray tube (CRT) display with liquid crystal display (LCD) for soft-copy viewing of chest radiographs in a clinical setting.We displayed 80 posteroanterior digital chest radiographs side by side on a 5-megapixel CRT display and a 3-megapixel LCD. Gradation characteristics of both monitors were adjusted to DICOM display standards. Using a 4-point scale, seven radiologists ranked overall image quality and visibility of anatomic landmarks. Data analysis included Wilcoxon's rank sum test to assess the significance of preference for the different display modes and calculation of the percentage of images ranked equally by at least five of the seven radiologists.Wilcoxon's rank sum test found significant preferences (p < 0.001) for the CRT display for visualization of structures in low-attenuation areas of the thorax and for the LCD for visualization of structures in high-attenuation areas of the thorax. Overall image quality was ranked equal by at least five radiologists in 70\% of cases, whereas for the remaining images a significant preference was found for the CRT display.We conclude that, under subdued ambient lighting conditions and without use of windowing, for most images the overall quality is equal with high-resolution CRT display and LCD. In images judged preferentially, we found a significant superiority for LCD for delineating mediastinal structures and for CRT display for delineating structures in the lung.}, + file = {Bala05.pdf:pdf\\Bala05.pdf:PDF}, + optnote = {CXR, DIAG, RADIOLOGY}, + number = {3}, + pmid = {15728593}, + month = {3}, + gsid = {12511755905883910379}, + gscites = {48}, +} + +@article{Balk19, + author = {Balkenhol, Maschenka C A and Bult, Peter and Tellez, David and Vreuls, Willem and Clahsen, Pieter C and Ciompi, Francesco and van der Laak, Jeroen A W M}, + title = {Deep learning and manual assessment show that the absolute mitotic count does not contain prognostic information in triple negative breast cancer}, + journal = CELLO, + year = {2019}, + volume = {42}, + month = {4}, + pages = {4555-4569}, + doi = {10.1007/s13402-019-00445-z}, + abstract = {The prognostic value of mitotic count for invasive breast cancer is firmly established. As yet, however, limited studies have been aimed at assessing mitotic counts as a prognostic factor for triple negative breast cancers (TNBC). Here, we assessed the prognostic value of absolute mitotic counts for TNBC, using both deep learning and manual procedures. A retrospective TNBC cohort (n=298) was used. The absolute manual mitotic count was assessed by averaging counts from three independent observers. Deep learning was performed using a convolutional neural network on digitized H&E slides. Multivariable Cox regression models for relapse-free survival and overall survival served as baseline models. These were expanded with dichotomized mitotic counts, attempting every possible cut-off value, and evaluated by means of the c-statistic. We found that per 2 mm2 averaged manual mitotic counts ranged from 1 to 187 (mean 37.6, SD 23.4), whereas automatic counts ranged from 1 to 269 (mean 57.6; SD 42.2). None of the cut-off values improved the models' baseline c-statistic, for both manual and automatic assessments. Based on our results we conclude that the level of proliferation, as reflected by mitotic count, does not serve as a prognostic factor for TNBC. Therefore, TNBC patient management based on mitotic count should be discouraged.}, + file = {Balk19.pdf:pdf\\Balk19.pdf:PDF}, + optnote = {DIAG}, + pmid = {30989469}, + gsid = {12757377873820382256}, + gscites = {17}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/206059}, + all_ss_ids = {['91bcebba717670d740ef76097a0885a0bd3c9dde', '94962ef85aca6df5c8289874b56bcaa8722dc596']}, +} + +@article{Balk19b, + author = {Balkenhol, Maschenka C A and Tellez, David and Vreuls, Willem and Clahsen, Pieter C and Pinckaers, Hans and Ciompi, Francesco and Bult, Peter and van der Laak, Jeroen A W M}, + title = {Deep learning assisted mitotic counting for breast cancer}, + journal = LABINV, + year = {2019}, + month = {6}, + doi = {10.1038/s41374-019-0275-0}, + url = {https://www.nature.com/articles/s41374-019-0275-0}, + abstract = {As part of routine histological grading, for every invasive breast cancer the mitotic count is assessed by counting mitoses in the (visually selected) region with the highest proliferative activity. Because this procedure is prone to subjectivity, the present study compares visual mitotic counting with deep learning based automated mitotic counting and fully automated hotspot selection. Two cohorts were used in this study. Cohort A comprised 90 prospectively included tumors which were selected based on the mitotic frequency scores given during routine glass slide diagnostics. This pathologist additionally assessed the mitotic count in these tumors in whole slide images (WSI) within a preselected hotspot. A second observer performed the same procedures on this cohort. The preselected hotspot was generated by a convolutional neural network (CNN) trained to detect all mitotic figures in digitized hematoxylin and eosin (H&E) sections. The second cohort comprised a multicenter, retrospective TNBC cohort (n=298), of which the mitotic count was assessed by three independent observers on glass slides. The same CNN was applied on this cohort and the absolute number of mitotic figures in the hotspot was compared to the averaged mitotic count of the observers. Baseline interobserver agreement for glass slide assessment in cohort A was good (kappa 0.689; 95% CI 0.580-0.799). Using the CNN generated hotspot in WSI, the agreement score increased to 0.814 (95% CI 0.719-0.909). Automated counting by the CNN in comparison with observers counting in the predefined hotspot region yielded an average kappa of 0.724. We conclude that manual mitotic counting is not affected by assessment modality (glass slides, WSI) and that counting mitotic figures in WSI is feasible. Using a predefined hotspot area considerably improves reproducibility. Also, fully automated assessment of mitotic score appears to be feasible without introducing additional bias or variability.}, + file = {Balk19b.pdf:pdf\\Balk19b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31222166}, + gsid = {6608334460566471833}, + gscites = {65}, + ss_id = {66f70286594b27928fb0eea3b4a0f292261c292f}, + all_ss_ids = {['66f70286594b27928fb0eea3b4a0f292261c292f', '2125835bf1c4fd0646b5dd50855d647044c07658']}, +} + +@article{Balk20, + author = {Balkenhol, Maschenka C A and Vreuls, Willem and Wauters, Carla A P and Mol, Suzanne J J and van der Laak, Jeroen A W M and Bult, Peter}, + title = {Histological subtypes in triple negative breast cancer are associated with specific information on survival}, + journal = AODP, + year = {2020}, + volume = {46}, + month = jun, + pages = {151490}, + doi = {10.1016/j.anndiagpath.2020.151490}, + abstract = {Much research has focused on finding novel prognostic biomarkers for triple negative breast cancer (TNBC), whereas only scattered information about the relation between histopathological features and survival in TNBC is available. This study aims to explore the prognostic value of histological subtypes in TNBC. A multicenter retrospective TNBC cohort was established from five Dutch hospitals. All non-neoadjuvantly treated, stage I-III patients with estrogen receptor, progesterone receptor and human epidermal growth factor receptor 2 negative breast cancer diagnosed between 2006 and 2014 were included. Clinical and follow-up data (overall survival; OS, relapse free survival; RFS) were retrieved and a central histopathological review was performed. Of 597 patients included (median follow up 62.8 months, median age at diagnosis 56.0 years), 19.4% developed a recurrence. The most prevalent histological subtypes were carcinoma of no special type (NST) (88.4%), metaplastic carcinoma (4.4%) and lobular carcinoma (3.4%). Collectively, tumors of special type were associated with a worse RFS and OS compared to carcinoma NST (RFS HR 1.89; 95% CI 1.18-3.03; p = 0.008; OS HR 1.94; 95% CI 1.28-2.92; p = 0.002). Substantial differences in survival, however, were present between the different histological subtypes. In the presented TNBC cohort, special histological subtype was in general associated with less favorable survival. However, within the group of tumors of special type there were differences in survival between the different subtypes. Accurate histological examination can provide specific prognostic information that may potentially enable more personalized treatment and surveillance regimes for TNBC patients.}, + file = {Balk20.pdf:pdf\\Balk20.pdf:PDF}, + optnote = {DIAG}, + pmid = {32179443}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/221654}, + ss_id = {2c00230262e1e99862ab27743b725e222a74dcca}, + all_ss_ids = {['2c00230262e1e99862ab27743b725e222a74dcca']}, + gscites = {20}, +} + +@phdthesis{Balk20a, + author = {Maschenka C. A. Balkenhol}, + title = {Tissue-based biomarker assessment for predicting prognosis of triple negative breast cancer: the additional value of artificial intelligence}, + url = {https://repository.ubn.ru.nl/handle/2066/220344}, + abstract = {Despite much research, currently still about 1 in 4 patients with TNBC will develop a recurrence after which the survival outlook is very poor. To date, no prognostic markers are available for TNBC to adequately stratify patients for the risk of developing a recurrence. The emergence of powerful computer algorithms, in particular deep learning models, enable more in depth and more extensive biomarker exploration. In addition, these algorithms are objective and reproducible, in contrast to most human visual biomarker assessment. The first aim of this thesis was to establish a well-defined cohort of TNBC, consisting of tissue sections, clinical and pathology data as well as comprehensive follow up data. Secondly, we aimed to evaluate the prognostic value of the mitotic count, which has widespread clinical use as part of the Nottingham grading system. We studied mitotic count both via conventional manual assessment and automatic assessment, to see if we could find a cut-off value which is better tailored for TNBC. Our third aim was to evaluate the prognostic value of TILs, a promising biomarker not yet used in clinical practice. + + To study the prognostic value of biomarkers in TNBC, the following objectives were defined: + 1. Establish a multicentre TNBC cohort including tissue sections and follow up data (Chapter 2) + 2. Develop a baseline prognostic model for TNBC based on the currently known clinicopathological variables (Chapter 2) + 3. Establish a computer algorithm (Chapter 3) which can automatically find mitoses in WSI of breast cancer, and validate the algorithm (Chapter 4) + 4. Explore the prognostic value of the mitotic count for TNBC using manual and automatic assessment (Chapter 5) + 5. Optimize the assessment of tumour infiltrating lymphocytes using deep learning and study its prognostic value in TNBC (Chapter 6)}, + copromotor = {P. Bult and F. Ciompi}, + file = {Balk20a.pdf:pdf\\Balk20.pdf:PDF}, + optnote = {DIAG}, + promotor = {J.A.W.M. van der Laak and N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + year = {2020}, + journal = {PhD thesis}, +} + +@conference{Balk20b, + author = {M. Balkenhol and P. Bult and D. Tellez and W. Vreuls and P. Clahsen and F. Ciompi and J. Van der Laak}, + booktitle = {European Journal of Cancer}, + title = {Deep learning enables fully automated mitotic density assessment in breast cancer histopathology}, + abstract = {Background: Mitosis counting is an important part of breast cancer grading, yet known to suffer from observer variability. Advances in machine learning enable fully automated analysis of digitized glass slides. The present study evaluated automatic mitosis counting and demonstrated applicability on triple negative breast cancers (TNBC). + Material and Methods: In entire scanned H&E slides of 90 invasive breast tumours, a deep learning algorithm (DLA) fully automatically detected all mitoses and determined the hotspot (area with highest mitotic density). Subsequently, two independent observers assessed mitotic counts on glass slides according to routine practice, and in the computer-defined hotspot. + Next, automated mitotic counting was performed in our TNBC cohort (n = 597). Multivariable Cox regression survival models were expanded with dichotomized mitotic counts. The c-statistic was used to evaluate the additional prognostic value of every possible cut off value. + Results: Automatic counting showed excellent concordance with visual assessment in computer detected hotspots with intraclass correlation coefficients (ICC) of 0.895 (95% CI 0.845-0.930) and 0.888 (95% CI 0.783-0.936) for two observers, respectively. ICC of fully automated counting versus conventional glass slide assessment were 0.828 (95% CI 0.750-0.883 and 0.757 (95% CI 0.638-0.839), respectively. + In the TNBC cohort, automatic mitotic counts ranged from 1 to 269 (mean 57.6) in 2 mm2 hotspots. None of the cut off values improved the models' baseline c-statistic. + Conclusion: Automatic mitosis counting is a promising complementary aid for mitoses assessment. Our method was capable of fully automatically locating the mitotic hotspot in tumours, and was capable of processing a large series of TNBC, showing that mitotic count was not prognostic for TNBC even when attempting alternative cut off points.}, + optnote = {DIAG, RADIOLOGY}, + year = {2020}, +} + +@article{Balk21, + author = {Balkenhol, Maschenka Ca and Ciompi, Francesco and Swiderska-Chadaj, Zaneta and van de Loo, Rob and Intezar, Milad and Otte-Holler, Irene and Geijs, Daan and Lotz, Johannes and Weiss, Nick and de Bel, Thomas and Litjens, Geert and Bult, Peter and van der Laak, Jeroen Awm}, + title = {Optimized tumour infiltrating lymphocyte assessment for triple negative breast cancer prognostics.}, + volume = {56}, + pages = {78--87}, + doi = {10.1016/j.breast.2021.02.007}, + abstract = {The tumour microenvironment has been shown to be a valuable source of prognostic information for different cancer types. This holds in particular for triple negative breast cancer (TNBC), a breast cancer subtype for which currently no prognostic biomarkers are established. Although different methods to assess tumour infiltrating lymphocytes (TILs) have been published, it remains unclear which method (marker, region) yields the most optimal prognostic information. In addition, to date, no objective TILs assessment methods are available. For this proof of concept study, a subset of our previously described TNBC cohort (n = 94) was stained for CD3, CD8 and FOXP3 using multiplex immunohistochemistry and subsequently imaged by a multispectral imaging system. Advanced whole-slide image analysis algorithms, including convolutional neural networks (CNN) were used to register unmixed multispectral images and corresponding H&E sections, to segment the different tissue compartments (tumour, stroma) and to detect all individual positive lymphocytes. Densities of positive lymphocytes were analysed in different regions within the tumour and its neighbouring environment and correlated to relapse free survival (RFS) and overall survival (OS). We found that for all TILs markers the presence of a high density of positive cells correlated with an improved survival. None of the TILs markers was superior to the others. The results of TILs assessment in the various regions did not show marked differences between each other. The negative correlation between TILs and survival in our cohort are in line with previous studies. Our results provide directions for optimizing TILs assessment methodology.}, + file = {:pdf/Balk21.pdf:PDF}, + journal = {The Breast}, + month = apr, + optnote = {DIAG}, + pmid = {33640523}, + year = {2021}, + ss_id = {25890a20503e79f773f427c999fcb41387f0aab1}, + all_ss_ids = {['25890a20503e79f773f427c999fcb41387f0aab1']}, + gscites = {17}, +} + +@inproceedings{Balo11, + author = {Balocco, Simone and Gatta, Carlo and Ciompi, Francesco and Pujol, Oriol and Carrillo, Xavier and Mauri, Josepa and Radeva, Petia}, + title = {Combining Growcut and temporal correlation for IVUS lumen segmentation}, + booktitle = PRIA, + year = {2011}, + publisher = {Springer}, + pages = {556--563}, + url = {http://link.springer.com/chapter/10.1007/978-3-642-21257-4_69}, + abstract = {The assessment of arterial luminal area, performed by IVUS analysis, is a clinical index used to evaluate the degree of coronary artery disease. In this paper we propose a novel approach to automatically segment the vessel lumen, which combines model-based temporal information extracted from successive frames of the sequence, with spatial classification using the Growcut algorithm. The performance of the method is evaluated by an in vivo experiment on 300 IVUS frames. The automatic and manual segmentation performances in general vessel and stent frames are comparable. The average segmentation error in vessel, stent and bifurcation frames are 0.17 A,A+- 0.08 mm, 0.18 A,A+- 0.07 mm and 0.31 A,A+- 0.12 mm respectively.}, + file = {Balo11.pdf:pdf\\Balo11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {9479858170729570556}, + gscites = {25}, +} + +@article{Balo13, + author = {Balocco, Simone and Gatta, Carlo and Ciompi, Francesco and Wahle, Andreas and Radeva, Petia and Carlier, Stephane and Unal, Gozde and Sanidas, Elias and Mauri, Josepa and Carillo, Xavier and Kovarnik, Tomas and Wang, Ching-Wei and Chen, Hsiang-Chou and Exarchos, Themis P. and Fotiadis, Dimitrios I. and Destrempes, FranAfASSois and Cloutier, Guy and Pujol, Oriol and Alberti, Marina and Mendizabal-Ruiz, E Gerardo and Rivera, Mariano and Aksoy, Timur and Downe, Richard W. and Kakadiaris, Ioannis A.}, + title = {Standardized evaluation methodology and reference database for evaluating IVUS image segmentation}, + journal = CMIG, + year = {2014}, + volume = {38}, + pages = {70-90}, + doi = {10.1016/j.compmedimag.2013.07.001}, + abstract = {This paper describes an evaluation framework that allows a standardized and quantitative comparison of IVUS lumen and media segmentation algorithms. This framework has been introduced at the MICCAI 2011 Computing and Visualization for (Intra)Vascular Imaging (CVII) workshop, comparing the results of eight teams that participated. We describe the available data-base comprising of multi-center, multi-vendor and multi-frequency IVUS datasets, their acquisition, the creation of the reference standard and the evaluation measures. The approaches address segmentation of the lumen, the media, or both borders; semi- or fully-automatic operation; and 2-D vs. 3-D methodology. Three performance measures for quantitative analysis have been proposed. The results of the evaluation indicate that segmentation of the vessel lumen and media is possible with an accuracy that is comparable to manual annotation when semi-automatic methods are used, as well as encouraging results can be obtained also in case of fully-automatic segmentation. The analysis performed in this paper also highlights the challenges in IVUS segmentation that remains to be solved.}, + file = {Balo13.pdf:pdf\\Balo13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {24012215}, + month = {3}, + gsid = {8816868787926476106}, + gscites = {88}, +} + +@book{Balo17, + author = {Balocco, Simone and Ciompi, Francesco and Rigla, Juan and Carrillo, Xavier and Mauri, Josepa and Radeva, Petia}, + title = {Intra-coronary Stent Localization in Intravascular Ultrasound Sequences, A Preliminary Study}, + doi = {10.1007/978-3-319-67534-3_2}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-319-67534-3_2}, + file = {Balo17.pdf:pdf\Balo17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Lecture Notes in Computer Science}, + citation-count = {2}, + automatic = {yes}, + pages = {12-19}, +} + +@article{Balo18, + author = {Balocco, Simone and Ciompi, Francesco and Rigla, Juan and Carrillo, Xavier and Mauri, Josepa and Radeva, Petia}, + title = {Assessment Of Intra-coronary Stent Location And Extension In Intravascular Ultrasound Sequences}, + journal = MP, + year = {2018}, + volume = {46}, + number = {2}, + pages = {484-493}, + doi = {10.1002/mp.13273}, + abstract = {An intraluminal coronary stent is a metal scaffold deployed in a stenotic artery during Percutaneous Coronary Intervention (PCI). In order to have an effective deployment, a stent should be optimally placed with regard to anatomical structures such as bifurcations and stenoses. Intravascular Ultrasound (IVUS) is a catheter-based imaging technique generally used for PCI guiding and assessing the correct placement of the stent. A novel approach that automatically detects the boundaries and the position of the stent along the IVUS pullback is presented. Such technique aims at optimizing the stent deployment. The method requires the identification of the stable frames of the sequence and the reliable detection of stent struts. Using this data, a measure of likelihood for a frame to contain a stent is computed. Then, a robust binary representation of the presence of the stent in the pullback is obtained applying an iterative and multi-scale quantization of the signal to symbols using the Symbolic Aggregate approXimation algorithm. The technique was extensively validated on a set of 103 IVUS of sequences of in-vivo coronary arteries containing metallic and bio-absorbable stents acquired through an international multi-centric collaboration across five clinical centers. The method was able to detect the stent position with an overall F-measure of 86.4%, a Jaccard index score of 75% and a mean distance of 2.5 mm from manually annotated stent boundaries, and in bio-absorbable stents with an overall F-measure of 88.6%, a Jaccard score of 77.7 and a mean distance of 1.5 mm from manually annotated stent boundaries. Additionally, a map indicating the distance between the lumen and the stent along the pullback is created in order to show the angular sectors of the sequence in which the malapposition is present. Results obtained comparing the automatic results versus the manual annotation of two observers shows that the method approaches the inter-observer variability. Similar performances are obtained on both metallic and bio-absorbable stents, showing the flexibility and robustness of the method. This article is protected by copyright. All rights reserved.}, + file = {:pdf/Balo18.pdf:PDF}, + optnote = {DIAG}, + pmid = {30383304}, + month = {12}, + gsid = {7215414788070039474}, + gscites = {4}, + ss_id = {3a3664f9b7e6c4258d9dc282c9c43a95054c648e}, + all_ss_ids = {['3a3664f9b7e6c4258d9dc282c9c43a95054c648e']}, +} + +@article{Balt18, + author = {Balta, Christiana and Bouwman, Ramona W and Sechopoulos, Ioannis and Broeders, Mireille J M and Karssemeijer, Nico and van Engen, Ruben E and Veldkamp, Wouter J H}, + title = {A model observer study using acquired mammographic images of an anthropomorphic breast phantom}, + journal = MP, + year = {2018}, + volume = {45}, + issue = {2}, + month = {2}, + pages = {655--665}, + doi = {10.1002/mp.12703}, + abstract = {To study the feasibility of a task-based framework composed of an anthropomorphic breast phantom and mathematical model observers (MOs) for the evaluation of system-processed mammographic images. A prototype anthropomorphic breast phantom with inserted gold discs of 0.1 mm and 0.25 mm diameter was imaged with two digital mammography systems (system A and B) at four different dose levels. From the acquired processed and unprocessed images, signal-present and signal-absent regions of interest (ROIs) were extracted. The ROIs were evaluated by a non-pre-whitening MO with eye filter (NPWE) and by three human observers in a two-alternative forced-choice experiment. We compared the human and the MO performance on a simple detection task of the calcification-like discs in ROIs with and without postprocessing. Proportion of correct responses of the human (PC ) and NPWE (PC ) experiments was calculated and the correlation between the two was analyzed using a mixed-effect regression model. Correlation results including the goodness of fit (r ) of PC and PC for all different parameters investigated were evaluated to determine whether NPWE MO can be used to predict human observer performance. PC and PC increased with dose for all conditions investigated (signal size, processing status, and different system). In case of the 0.1 mm discs, for system A, r between PC with PC was 0.81. For system B, r was 0.93. In case of the 0.25 mm discs, r in system A was 0.79 and for system B, r was 0.82. For the combined parameters investigated, and after excluding the 0.1 mm discs on system A because the results were influenced by aliasing, the overall r was 0.81. Image processing did not affect the detectability of calcification-like signals. No significant difference (P > 0.05) was found between the predicted PC by the MO and the PC for all different conditions. The framework seems promising to be used in objective image quality assessment. It was found to be relatively robust for the range of parameters investigated. However, further optimization of the anthropomorphic breast phantom and investigation of other MOs for a broader range of image quality assessment tasks is needed.}, + file = {:pdf/Balt18.pdf:PDF;:Balt18 - A model observer study using acquired mammographic images of an anthropomorphic breast phantom:}, + optnote = {AXTI, DIAG, RADIOLOGY}, + pmid = {29193129}, + gsid = {7340681759496680687}, + gscites = {15}, + ss_id = {393450e398ef7775bd9a0300974d1e31636b5a26}, + all_ss_ids = {['393450e398ef7775bd9a0300974d1e31636b5a26']}, +} + +@article{Balt19b, + author = {Balta, Christiana and Bouwman, Ramona W and Broeders, Mireille JM and Karssemeijer, Nico and Veldkamp, Wouter JH and Sechopoulos, Ioannis and van Engen, Ruben E}, + title = {Optimization of the difference-of-Gaussian channel sets for the channelized Hotelling observer}, + journal = JMI, + year = {2019}, + volume = {6}, + number = {3}, + pages = {035501}, + doi = {10.1117/1.JMI.6.3.035501}, + url = {https://www.spiedigitallibrary.org/journals/Journal-of-Medical-Imaging/volume-6/issue-3/035501/Optimization-of-the-difference-of-Gaussian-channel-sets-for-the/10.1117/1.JMI.6.3.035501.short?SSO=1}, + abstract = {The channelized-Hotelling observer (CHO) was investigated as a surrogate of human observers in task-based image quality assessment. The CHO with difference-of-Gaussian (DoG) channels has shown potential for the prediction of human detection performance in digital mammography (DM) images. However, the DoG channels employ parameters that describe the shape of each channel. The selection of these parameters influences the performance of the DoG CHO and needs further investigation. The detection performance of the DoG CHO was calculated and correlated with the detection performance of three humans who evaluated DM images in 2-alternative forced-choice experiments. A set of DM images of an anthropomorphic breast phantom with and without calcification-like signals was acquired at four different dose levels. For each dose level, 200 square regions-of-interest (ROIs) with and without signal were extracted. Signal detectability was assessed on ROI basis using the CHO with various DoG channel parameters and it was compared to that of the human observers. It was found that varying these DoG parameter values affects the correlation ( r2 ) of the CHO with human observers for the detection task investigated. In conclusion, it appears that the the optimal DoG channel sets that maximize the prediction ability of the CHO might be dependent on the type of background and signal of ROIs investigated.}, + file = {Balt19b.pdf:pdf\\Balt19b.pdf:PDF}, + optnote = {AXTI, DIAG}, + pmid = {31572746}, + publisher = {International Society for Optics and Photonics}, + month = {9}, + gsid = {8315844268599249140}, + gscites = {1}, + all_ss_ids = {['f131ef217543d179269018950bf3b6ba2b30f3b1', '20fe29e553477d9bd330f052e4d01c0c1c616b22']}, +} + +@phdthesis{Balt19c, + author = {Christiana Balta}, + title = {Objective image quality assessment in X-ray breast imaging}, + url = {https://repository.ubn.ru.nl/handle/2066/212645}, + abstract = {Breast cancer is the most common type of cancer in women. In the Netherlands breast cancer screening has been implemented for women between 50 and 75 years old. Participating women get a mammogram consisting of four digital mammography (DM) images every two years. These mammograms are reviewed by two radiologists independently. On the images, breast cancer- among other manifestations- can appear as low-contrast soft-tissue lesions or very small calcifications. Depiction of these structures is technically challengign for the DM systems.}, + copromotor = {W. Veldkamp, M. Broeders, I. Sechopoulos}, + file = {Balt19c.pdf:pdf\\Balt19c.pdf:PDF}, + optnote = {AXTI, DIAG, RADIOLOGY}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen, The Netherlands}, + year = {2019}, + journal = {PhD thesis}, +} + +@inproceedings{Balt20, + author = {Balta, Christiana and Rodriguez-Ruiz, Alejandro and Mieskes, Christoph and Karssemeijer, Nico and Heywang-K\"{o}brunner, Sylvia H.}, + title = {Going from double to single reading for screening exams labeled as likely normal by AI: what is the impact?}, + doi = {10.1117/12.2564179}, + year = {2020}, + abstract = {We investigated whether a deep learning-based artificial intelligence (AI) system can be used to improve breast cancer screening workflow efficiency by making a pre-selection of likely normal screening mammograms where double-reading could be safely replaced with single-reading. We collected 18,015 consecutively acquired screening exams, the independent reading assessments by each radiologist of the double reading process, and the information about whether the case was recalled and if so the recall outcome. The AI system assigned a 1-10 score to each screening exam denoting the likelihood of cancer. We simulated the impact on recall rate, cancer detection rate, and workload if single-reading would have been performed for the mammograms with the lowest AI scores. After evaluating all possible AI score thresholds, it was found that when AI scores 1 to 7 are single read instead of double read, the cancer detection rate would have remained the same (no screen-detected cancers missed -the AI score is low but the single-reader would recall the exam), recall rate would have decreased by 11.8% (from 5.35% to 4.79%), and screen reading workload would have decreased by 32.6%. In conclusion, using an AI system could improve breast cancer screening efficiency by pre-selecting likely normal exams where double-reading might not be needed.}, + url = {http://dx.doi.org/10.1117/12.2564179}, + file = {Balt20.pdf:pdf\Balt20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {15th International Workshop on Breast Imaging (IWBI2020)}, + citation-count = {11}, + automatic = {yes}, +} + +@inproceedings{Band17, + author = {P\'{e}ter B\'{a}ndi and Rob {van de Loo} and Milad Intezar and Daan Geijs and Francesco Ciompi and Bram {van Ginneken} and Jeroen {van der Laak} and Geert Litjens}, + title = {Comparison of Different Methods for Tissue Segmentation In Histopathological Whole-Slide Images}, + booktitle = ISBI, + pages = {591-595}, + doi = {10.1109/ISBI.2017.7950590}, + url = {https://arxiv.org/abs/1703.05990}, + abstract = {Tissue segmentation is an important pre-requisite for efficient and accurate diagnostics in digital pathology. However, it is well known that whole-slide scanners can fail in detecting all tissue regions, for example due to the tissue type, or due to weak staining because their tissue detection algorithms are not robust enough. In this paper, we introduce two different convolutional neural network architectures for whole slide image segmentation to accurately identify the tissue sections. We also compare the algorithms to a published traditional method. We collected 54 whole slide images with differing stains and tissue types from three laboratories to validate our algorithms. We show that while the two methods do not differ significantly they outperform their traditional counterpart (Jaccard index of 0.937 and 0.929 vs. 0.870, p < 0.01).}, + file = {:pdf/Band17.pdf:PDF}, + gscites = {35}, + gsid = {564398722349753208}, + month = {4}, + optnote = {DIAG, PATHOLOGY}, + year = {2017}, + ss_id = {418354adcafbc948c266c55564feb5b12cc3dc21}, + all_ss_ids = {['418354adcafbc948c266c55564feb5b12cc3dc21']}, +} + +@article{Band18, + author = {P\'{e}ter B\'{a}ndi and Oscar Geessink and Quirine Manson and Marcory van Dijk and Maschenka Balkenhol and Meyke Hermsen and Babak Ehteshami Bejnordi and Byungjae Lee and Kyunghyun Paeng and Aoxiao Zhong and Quanzheng Li and Farhad Ghazvinian Zanjani and Svitlana Zinger and Keisuke Fukuta and Daisuke Komura and Vlado Ovtcharov and Shenghua Cheng and Shaoqun Zeng and Jeppe Thagaard and Anders B. Dahl and Huangjing Lin and Hao Chen and Ludwig Jacobsson and Martin Hedlund and Melih Cetin and Eren Halici and Hunter Jackson and Richard Chen and Fabian Both and Jorg Franke and Heidi Kusters-Vandevelde and Willem Vreuls and Peter Bult and Bram van Ginneken and Jeroen van der Laak and Geert Litjens}, + title = {From detection of individual metastases to classification of lymph node status at the patient level: the {CAMELYON}17 challenge}, + doi = {10.1109/tmi.2018.2867350}, + issue = {2}, + pages = {550--560}, + volume = {38}, + abstract = {Automated detection of cancer metastases in lymph nodes has the potential to improve assessment of prognosis for patients. To enable fair comparison between the algorithms for this purpose, we set up the CAMELYON17 challenge in conjunction with the IEEE International Symposium on Biomedical Imaging 2017 conference in Melbourne. Over 300 participants registered on the challenge website, of which 23 teams submitted a total of 37 algorithms before the initial deadline. Participants were provided with 899 whole-slide images for developing their algorithms.The developed algorithms were evaluated based on the test set encompassing 100 patients and 500 whole-slide images. The evaluation metric used was a quadratic weighted Cohen's kappa. We discuss the algorithmic details of the ten best preconference and two post-conference submissions. All these participants used convolutional neural networks in combination with pre- and postprocessing steps. Algorithms differed mostly in neural network architecture, training strategy and pre- and postprocessing methodology. Overall, the kappa metric ranged from 0.89 to -0.13 across all submissions. The best results were obtained with pre-trained architectures such as ResNet. Confusion matrix analysis revealed that all participants struggled with reliably identifying isolated tumor cells, the smallest type of metastasis, with detection rates below 40%. Qualitative inspection of the results of the top participants showed categories of false positives, such as nerves or contamination, which could be targets for further optimization. Last, we show that simple combinations of the top algorithms result in higher kappa metric values than any algorithm individually, with 0.93 for the best combination.}, + file = {:pdf/Band18.pdf:PDF}, + journal = TMI, + optnote = {DIAG, RADIOLOGY}, + pmid = {30716025}, + publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, + year = {2018}, + month = {2}, + gsid = {13054230002586416609}, + gscites = {369}, + ss_id = {1b27b9cfe0ce17950b6ea72f9ef8cf5a7459bccd}, + all_ss_ids = {['1b27b9cfe0ce17950b6ea72f9ef8cf5a7459bccd']}, +} + +@article{Band19a, + author = {B\'{a}ndi, P. and Balkenhol, Maschenka and van Ginneken, Bram and van der Laak, Jeroen and Litjens, Geert}, + title = {Resolution-agnostic tissue segmentation in whole-slide histopathology images with convolutional neural networks}, + doi = {10.7717/peerj.8242}, + pages = {e8242}, + url = {https://peerj.com/articles/8242/}, + volume = {7}, + abstract = {Modern pathology diagnostics is being driven toward large scale digitization of microscopic tissue sections. A prerequisite for its safe implementation is the guarantee that all tissue present on a glass slide can also be found back in the digital image. Whole-slide scanners perform a tissue segmentation in a low resolution overview image to prevent inefficient high-resolution scanning of empty background areas. However, currently applied algorithms can fail in detecting all tissue regions. + In this study, we developed convolutional neural networks to distinguish tissue from background. We collected 100 whole-slide images of 10 tissue samples--staining categories from five medical centers for development and testing. Additionally, eight more images of eight unfamiliar categories were collected for testing only. We compared our fully-convolutional neural networks to three traditional methods on a range of resolution levels using Dice score and sensitivity. + We also tested whether a single neural network can perform equivalently to multiple networks, each specialized in a single resolution. Overall, our solutions outperformed the traditional methods on all the tested resolutions. The resolution-agnostic network achieved average Dice scores between 0.97 and 0.98 across the tested resolution levels, only 0.0069 less than the resolution-specific networks. Finally, its excellent generalization performance was demonstrated by achieving averages of 0.98 Dice score and 0.97 sensitivity on the eight unfamiliar images. A future study should test this network prospectively.}, + file = {Band19a.pdf:pdf\\Band19a.pdf:PDF}, + journal = PRJ, + optnote = {DIAG, RADIOLOGY}, + pmid = {31871843}, + year = {2019}, + month = {12}, + gsid = {12477234465933785268}, + gscites = {36}, + ss_id = {e1170679c6b5fe0addb40cfb62076979a3b3ebec}, + all_ss_ids = {['e1170679c6b5fe0addb40cfb62076979a3b3ebec']}, +} + +@article{Band23, + author = {B\'{a}ndi, P. and Balkenhol, Maschenka and van Dijk, Marcory and Kok, Michel and van Ginneken, Bram and van der Laak, Jeroen and Litjens, Geert}, + title = {Continual learning strategies for cancer-independent detection of lymph node metastases}, + doi = {10.1016/j.media.2023.102755}, + pages = {102755}, + volume = {85}, + abstract = {Recently, large, high-quality public datasets have led to the development of convolutional neural networks that can detect lymph node metastases of breast cancer at the level of expert pathologists. Many cancers, regardless of the site of origin, can metastasize to lymph nodes. However, collecting and annotating high-volume, high-quality datasets for every cancer type is challenging. In this paper we investigate how to leverage existing high-quality datasets most efficiently in multi-task settings for closely related tasks. Specifically, we will explore different training and domain adaptation strategies, including prevention of catastrophic forgetting, for breast, colon and head-and-neck cancer metastasis detection in lymph nodes. + + Our results show state-of-the-art performance on colon and head-and-neck cancer metastasis detection tasks. We show the effectiveness of adaptation of networks from one cancer type to another to obtain multi-task metastasis detection networks. Furthermore, we show that leveraging existing high-quality datasets can significantly boost performance on new target tasks and that catastrophic forgetting can be effectively mitigated. Last, we compare different mitigation strategies.}, + file = {Band23.pdf:pdf\\Band23.pdf:PDF}, + journal = {Medical Image Analysis}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + year = {2023}, + all_ss_ids = {['4bcd672218ecec70473c84f6f1cc52c64031f3e5']}, + gscites = {4}, +} + +@article{Bank07, + author = {Alexander A Bankier and Cornelia Schaefer-Prokop and Viviane De Maertelaer and Denis Tack and Peter Jaksch and Walter Klepetko and Pierre Alain Gevenois}, + title = {Air trapping: comparison of standard-dose and simulated low-dose thin-section {CT} techniques}, + journal = Radiology, + year = {2007}, + volume = {242}, + pages = {898--906}, + file = {Bank07.pdf:pdf\\Bank07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, airtrapping}, + number = {3}, + pmid = {17244721}, + month = {3}, +} + +@article{Bank17, + author = {Bankier, Alexander A. and MacMahon, Heber and Goo, Jin Mo and Rubin, Geoffrey D. and Schaefer-Prokop, Cornelia M. and Naidich, David P.}, + title = {Recommendations for Measuring Pulmonary Nodules at CT: A Statement from the Fleischner Society}, + doi = {10.1148/radiol.2017162894}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.2017162894}, + file = {Bank17.pdf:pdf\Bank17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + citation-count = {203}, + automatic = {yes}, + pages = {584-600}, + volume = {285}, +} + +@article{Bare96, + author = {J. O. Barentsz and G. J. Jager and P. B. van Vierzen and J. A. Witjes and S. P. Strijk and H. Peters and N. Karssemeijer and S. H. Ruijs}, + title = {Staging urinary bladder cancer after transurethral biopsy: value of fast dynamic contrast-enhanced {MR} imaging}, + journal = Radiology, + year = {1996}, + volume = {201}, + pages = {185--193}, + file = {Bare96.pdf:pdf/Bare96.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {8816542}, + month = {10}, + gsid = {10907411171717953315}, + gscites = {222}, + ss_id = {328a63a444a23693caa473d92e726efc1503c680}, + all_ss_ids = {['328a63a444a23693caa473d92e726efc1503c680']}, +} + +@article{Bare99, + author = {Barentsz, J. O. and Engelbrecht, M. and Jager, G. J. and Witjes, J. A. and de LaRosette, J. and van Der Sanden, B. P. and Huisman, H. J. and Heerschap, A.}, + title = {Fast dynamic gadolinium-enhanced {MR} imaging of urinary bladder and prostate cancer}, + journal = JMRI, + year = {1999}, + volume = {10}, + pages = {295--304}, + doi = {10.1002/(SICI)1522-2586(199909)10:3<295::AID-JMRI10>3.0.CO;2-Z}, + url = {http://onlinelibrary.wiley.com/doi/10.1002/(SICI)1522-2586(199909)10:3%3C295::AID-JMRI10%3E3.0.CO;2-Z/abstract}, + abstract = {{A}mong the noninvasive imaging modalities, contrast enhanced magnetic resonance ({MR}) imaging is the most powerful tool with which to visualize vascularity. {C}ommon pathology only shows microvessel density, whereas dynamic {MR} imaging is sensitive to the total endothelial surface area of perfused vessels. {T}herefore, dynamic {MR} imaging may be of additional value in tumor staging and in evaluating therapies that affect the perfused microvessel density or surface area, such as chemo-, radiation, or anti-angiogenic therapy. {I}n urinary bladder cancer, this technique results in improved local and nodal staging, in improved separation of transurethral granulation tissue and edema from malignant tumor, and in improved evaluation of the effect of chemotherapy. {I}n prostate cancer, dynamic {MR} imaging may be of help in problematic cases. {T}his technique can assist in determining seminal vesicle infiltration, in depicting of minimal capsular penetration, and in recognizing tumors within the transitional zone. {A}lso, based on very rapid enhancement, very poorly differentiated tumors can be recognized. {E}valuation of the effects of therapy is another promising area, however a lot of research remain to be done. {T}his article reviews some basics of fast enhancement techniques, provides practical information, and shows recent developments, in using these fast techniques for staging and grading of bladder and prostate cancer, and for evaluating the effect of therapy.}, + file = {Bare99.pdf:pdf\\Bare99.pdf:PDF}, + optnote = {BioMR, DIAG, RADIOLOGY}, + number = {3}, + pmid = {10508289}, + month = {9}, + gsid = {9172514927993460045}, + gscites = {172}, + ss_id = {46b888d39bbf82a9ff78a5bfb581b4ded6df46bc}, + all_ss_ids = {['46b888d39bbf82a9ff78a5bfb581b4ded6df46bc']}, +} + +@article{Bart20, + author = {Jonas W. Bartstra and Fieke Draaisma and Sabine R. Zwakenberg and Nikolas Lessmann and Jelmer M. Wolterink and Yvonne T. van der Schouw and Pim A. de Jong and Joline W. J. Beulens}, + journal = {European Journal of Nutrition}, + title = {Six months vitamin {K} treatment does not affect systemic arterial calcification or bone mineral density in diabetes mellitus 2}, + year = {2021}, + volume = {60}, + pages = {1691-1699}, + doi = {10.1007/s00394-020-02412-z}, + pmid = {33068157}, + optnote = {DIAG, RADIOLOGY}, + file = {Bart20.pdf:pdf\\Bart20.pdf:PDF}, + abstract = {Vitamin K-dependent proteins are involved in (patho)physiological calcification of the vasculature and the bones. Type 2 diabetes mellitus (DM2) is associated with increased arterial calcification and increased fractures. This study investigates the effect of 6 months vitamin K2 supplementation on systemic arterial calcification and bone mineral density (BMD) in DM2 patients with a history of cardiovascular disease (CVD).}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/235803}, + ss_id = {dd270749c2da64ae1d99baa1c40bc904b28b892f}, + all_ss_ids = {['dd270749c2da64ae1d99baa1c40bc904b28b892f']}, + gscites = {24}, +} + +@article{Beau22, + author = {Beauferris, Youssef and Teuwen, Jonas and Karkalousos, Dimitrios and Moriakov, Nikita and Caan, Matthan and Yiasemis, George and Rodrigues, L\'{i}via and Lopes, Alexandre and Pedrini, Helio and Rittner, Let\'{i}cia and Dannecker, Maik and Studenyak, Viktor and Gr\"{o}ger, Fabian and Vyas, Devendra and Faghih-Roohi, Shahrooz and Kumar Jethi, Amrit and Chandra Raju, Jaya and Sivaprakasam, Mohanasankar and Lasby, Mike and Nogovitsyn, Nikita and Loos, Wallace and Frayne, Richard and Souza, Roberto}, + title = {Multi-Coil MRI Reconstruction Challenge--Assessing Brain MRI Reconstruction Models and Their Generalizability to Varying Coil Configurations}, + doi = {10.3389/fnins.2022.919186}, + year = {2022}, + abstract = {Deep-learning-based brain magnetic resonance imaging (MRI) reconstruction methods have the potential to accelerate the MRI acquisition process. Nevertheless, the scientific community lacks appropriate benchmarks to assess the MRI reconstruction quality of high-resolution brain images, and evaluate how these proposed algorithms will behave in the presence of small, but expected data distribution shifts. The multi-coil MRI (MC-MRI) reconstruction challenge provides a benchmark that aims at addressing these issues, using a large dataset of high-resolution, three-dimensional, T1-weighted MRI scans. The challenge has two primary goals: (1) to compare different MRI reconstruction models on this dataset and (2) to assess the generalizability of these models to data acquired with a different number of receiver coils. In this paper, we describe the challenge experimental design and summarize the results of a set of baseline and state-of-the-art brain MRI reconstruction models. We provide relevant comparative information on the current MRI reconstruction state-of-the-art and highlight the challenges of obtaining generalizable models that are required prior to broader clinical adoption. The MC-MRI benchmark data, evaluation code, and current challenge leaderboard are publicly available. They provide an objective performance assessment for future developments in the field of brain MRI reconstruction.}, + url = {http://dx.doi.org/10.3389/fnins.2022.919186}, + file = {Beau22.pdf:pdf\Beau22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Frontiers in Neuroscience}, + citation-count = {7}, + automatic = {yes}, + volume = {16}, +} + +@conference{Beck16, + author = {Ruud Becks and Midas Meijs and Rashindra Manniesing and Jeroen Vister and Steven Schalekamp and Ritse Mann and Stefan Steens and Ewoud Smit and Ewoud {van Dijk} and Mathias Prokop and Frederick J.A. Meijer}, + title = {Additional Value of Brain {CT} Perfusion in The Detection of Intracranial Vessel Occlusion in Acute Ischemic Stroke: A (Multi Experience Level) Inter-Observer Study}, + booktitle = RSNA, + year = {2016}, + abstract = {PURPOSE: We aimed to evaluate the additional value of brain {CT} perfusion ({CTP}) for intracranial vessel occlusion detection in acute ischemic stroke for observers with different levels of experience. + + METHOD AND MATERIALS: We retrospectively included all patients with symptoms of acute ischemic stroke (onset of less than 9 hours) who were scanned with non-enhanced {CT} ({NECT}), {CT} angiography ({CTA}) and {CTP} in the year 2015. Four observers with different levels of experience (neuroradiologist, non-neuroradiologist, two radiology residents) evaluated the imaging data with 2 imaging strategies. Method 1 included {NECT} and {CTA}. For method 2, additional {CTP} maps were provided for the evaluation of intracranial vessel occlusion on {CTA}. The observers were blinded to patient identity and clinical outcome. Receiver operating characteristic ({ROC}) was used for the evaluation of accuracy in intracranial vessel occlusion detection. The reference standard of vessel occlusion was set based on the evaluation by the four observers, and the judgment of an independent neuroradiologist serving as a referee in case of discrepancy. + + RESULTS: In total 110 patients were included, preliminary analyses included 94 patients. There was an increase of {AUC} in the overall detection of intracranial vessel occlusion for observer 1, 3 and 4, though only for observer 1 the increase in {AUC} was statistically significant (p=0.041). Increase of intracranial vessel occlusion detection mainly concerned distal vessel occlusions. No significant added value of {CTP} was found for proximal vessel occlusions, with already a high accuracy based on {NECT} and {CTA} for all experience levels with sensitivity ranging between 86-94% and specificity between 92-100%. + + CONCLUSION: Our study demonstrates that the use of {CTP} can aid in the detection of distal intracranial vessel occlusions on {CTA} in case {CTP} is integrated in the reading strategy. It is also demonstrated that {CTP} was not of added value for the detection of proximal intracranial vessel occlusions. Finally, there was no major difference in the diagnostic accuracy of intracranial vessel occlusion detection for the different levels in experience of the observers. + + CLINICAL RELEVANCE/APPLICATION: Our study demonstrated that brain {CT} perfusion can aid in the detection of distal intracranial vessel occlusions, which is clinically relevant for optimizing the imaging strategy in acute ischemic stroke.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Beck19a, + author = {Ruud J. Becks and Rashindra Manniesing and Jeroen Vister and Sjoert Pegge and Stefan C.A. Steens and Ewoud J. van Dijk and Mathias Prokop and Frederick J.A. Meijer}, + title = {Brain {CT} Perfusion Improves Intracranial Vessel Occlusion Detection on {CT} Angiography}, + journal = JNEURAD, + year = {2019}, + volume = {46}, + number = {2}, + pages = {124-129}, + doi = {10.1016/j.neurad.2018.03.003}, + abstract = {Background and purpose: To evaluate whether brain CT perfusion (CTP) aids in the detection of intracranial vessel occlusion on CT angiography (CTA) in acute ischemic stroke. + + Materials and methods: Medical-ethical committee approval of our hospital was obtained and informed consent was waived. Patients suspected of acute ischemic stroke who underwent non-contrast CT(NCCT), CTA and whole-brain CTP in our center in the year 2015 were included. Three observers with different levels of experience evaluated the imaging data of 110 patients for the presence or absence of intracranial arterial vessel occlusion with two strategies. In the first strategy, only NCCT and CTA were available. In the second strategy, CTP maps were provided in addition to NCCT and CTA. Receiver-operating-characteristic (ROC) analysis was used for the evaluation of diagnostic accuracy. + + Results: Overall, a brain perfusion deficit was scored present in 87-89% of the patients with an intracranial vessel occlusion, more frequently observed in the anterior than in the posterior circulation. Performance of intracranial vessel occlusion detection on CTA was significantly improved with the availability of CTP maps as compared to the first strategy (P = 0.023), due to improved detection of distal and posterior circulation vessel occlusions (P-values of 0.032 and 0.003 respectively). No added value of CTP was found for intracranial proximal vessel occlusion detection, with already high accuracy based on NCCT and CTA alone. + + Conclusion: The performance of intracranial vessel occlusion detection on CTA was improved with the availability of brain CT perfusion maps due to the improved detection of distal and posterior circulation vessel occlusions.}, + file = {pdf\\Beck19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29625153}, + month = {3}, + gsid = {6771048124161780993}, + gscites = {56}, + ss_id = {5a7fca54156869246045d31ebf54ff10408deccb}, + all_ss_ids = {['5a7fca54156869246045d31ebf54ff10408deccb']}, +} + +@book{Beic11, + author = {R. Beichel and M. de Bruijne and B. van Ginneken and S. Kabus and A. Kiraly and J. M. Kuhnigk and J. McClelland and K. Mori and E. M. van Rikxoort and S. Rit}, + title = {The {F}ourth {I}nternational {W}orkshop on {P}ulmonary {I}mage {A}nalysis}, + year = {2011}, + publisher = {CreateSpace.com}, + url = {http://www.amazon.com/Fourth-International-Workshop-Pulmonary-Analysis/dp/1466200162/}, + abstract = {This was the fourth time that a satellite workshop solely devoted to pulmonary image analysis was held in conjunction with the Medical Image Computing and Computer Assisted Intervention (MICCAI) Conference. We have received many high quality submissions for the fourth edition of this workshop. All papers underwent a thorough peer review process. A team of 21 scientists performed peer-reviews, and each paper received at least three detailed reviews. Out of the 24 accepted papers, eight were selected for oral presentation, fourteen for poster presentation and two for software demonstration. In addition, the workshop included an invited keynote talk given by Heidi Roberts, Professor of Radiology at the University of Toronto. The proceedings of this workshop are organized into two parts. The first part consists of sixteen regular workshop papers that deal with different aspects of pulmonary image analysis, including topics like segmentation, registration, quantification, computer-aided detection/diagnosis of lung disease, and visualization. The second part of the proceedings is a collection of papers submitted for the international segmentation challenge called LOLA11 (LObe and Lung Analysis 2011, http://www.lola11.com). The goal of LOLA11 was to compare methods for (semi-) automatic segmentation of the lungs and lobes from chest computed tomography scans. Nine international teams participated in this challenge. We thank the organizers of MICCAI 2011 for handling the workshop logistics and all the colleagues that were involved in the peer-review process. We are grateful to Siemens Corporate Research, MedQIA, Philips Healthcare, and VIDA Diagnostics for the financial support of the Fourth International Workshop on Pulmonary Image Analysis.}, + file = {Beic11.pdf:pdf\\Beic11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@book{Beic13, + author = {R. Beichel and M. de Bruijne and S. Kabus and A. P. Kiraly and J. M. Kuhnigk and J. McClelland and T. Kitasaka and E. M. van Rikxoort and S. Rit}, + title = {The {F}ifth {I}nternational {W}orkshop on {P}ulmonary {I}mage {A}nalysis}, + year = {2013}, + publisher = {CreatCreate.com}, + url = {http://www.amazon.com/Fifth-International-Workshop-Pulmonary-Analysis/dp/149218697X}, + abstract = {These are the proceedings of the fth edition of the InternationalWorkshop on Pulmonary Image Analysis, held in conjunction with the Medical Image Computing and Computer Assisted Intervention (MICCAI) Conference 2013 in Nagoya, Japan. The International Workshop onPulmonary Image Analysis brings together researchers in pulmonary image analysis to discuss new developments in the field. For the fifth edition of the workshop we have received a large number of high quality papers, which all received thorough reviews by at least three reviewers. In total, fourteen papers were accepted for presentation at the workshop, of which nine were selected for oral presentation and five for poster presentation. The presented papers deal with di erent aspects of pulmonary image analysis, including computer aided diagnosis, segmentation, and registration. In addition, we are very happy that Dr. Noboru Niki from the University of Tokushima agreed to give a keynote lecture. We would like to thank the organizers of MICCAI 2013 for hosting the fifth edition of the International Workshop on Pulmonary Image Analysis and for handling the logistics of the workshop, and all colleagues involved in the peer-review process.}, + file = {Beic13.pdf:pdf/Beic13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@book{Beic16, + author = {R. Beichel and K. Farahani and C. Jacobs and S. Kabus and A. P. Kiraly and J. M. Kuhnigk and J. McClelland and K. Mori and J. Petersen and S. Rit}, + title = {The {S}ixth {I}nternational {W}orkshop on {P}ulmonary {I}mage {A}nalysis}, + publisher = {CreateSpace.com}, + url = {http://www.amazon.com/Sixth-International-Workshop-Pulmonary-Analysis/dp/1537038583}, + abstract = {These are the proceedings of the sixth edition of the International Workshop on Pulmonary Image Analysis, held in conjunction with the Medical Image Computing and Computer Assisted Intervention (MICCAI) Conference 2016 in Athens, Greece. The International Workshop on Pulmonary Image Analysis brings together researchers in pulmonary image analysis to discuss new developments in the field. For the sixth edition of the workshop, all submitted papers received thorough reviews by at least three reviewers. In total, eight papers were accepted for presentation at the workshop, of which five were selected for oral presentation and three for poster presentation. The presented papers deal with di + erent aspects of pulmonary image analysis, including computer aided diagnosis, segmentation, and registration. We would like to thank the organizers of MICCAI 2016 for hosting the sixth edition of the International Workshop on Pulmonary Image Analysis and for handling the logistics of the workshop, and all colleagues involved in the peer-review process.}, + file = {Beic16.pdf:pdf/Beic16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + year = {2016}, +} + +@inproceedings{Bejn13, + author = {Babak {Ehteshami Bejnordi} and Ramin Moshavegh and K Sujathan and Patrik Malm and Ewert Bengtsson and Andrew Mehnert}, + title = {Novel chromatin texture features for the classification of pap smears}, + booktitle = MI, + year = {2013}, + series = SPIE, + doi = {10.1117/12.2007185}, + abstract = {his paper presents a set of novel structural texture features for quantifying nuclear chromatin patterns in cells on a conventional Pap smear. The features are derived from an initial segmentation of the chromatin into bloblike texture primitives. The results of a comprehensive feature selection experiment, including the set of proposed structural texture features and a range of different cytology features drawn from the literature, show that two of the four top ranking features are structural texture features. They also show that a combination of structural and conventional features yields a classification performance of 0.954+-0.019 (AUC+-SE) for the discrimination of normal (NILM) and abnormal (LSIL and HSIL) slides. The results of a second classification experiment, using only normal-appearing cells from both normal and abnormal slides, demonstrates that a single structural texture feature measuring chromatin margination yields a classification performance of 0.815+-0.019. Overall the results demonstrate the efficacy of the proposed structural approach and that it is possible to detect malignancy associated changes (MACs) in Papanicoloau stain.}, + file = {Bejn13.pdf:pdf\\Bejn13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, +} + +@inproceedings{Bejn14, + author = {B. {Ehteshami Bejnordi} and N. Timofeeva and I. Otte-H\"oller and N. Karssemeijer and J. {van der Laak}}, + title = {Quantitative analysis of stain variability in histology slides and an algorithm for standardization}, + booktitle = MI, + year = {2014}, + series = SPIE, + doi = {10.1117/12.2043683}, + abstract = {This paper presents data on the sources of variation of the widely used hematoxylin and eosin (H&E) histological staining, as well as a new algorithm to reduce these variations in digitally scanned tissue sections. Experimental results demonstrate that staining protocols in different laboratories and staining on different days of the week are the major factors causing color variations in histopathological images. The proposed algorithm for standardizing histology slides is based on an initial clustering of the image into two tissue components having different absorption characteristics for different dyes. The color distribution for each tissue component is standardized by aligning the 2D histogram of color distribution in the hue-saturation-density (HSD) model. Qualitative evaluation of the proposed standardization algorithm shows that color constancy of the standardized images is improved. Quantitative evaluation demonstrates that the algorithm outperforms competing methods. In conclusion, the paper demonstrates that staining variations, which may potentially hamper usefulness of computer assisted analysis of histopathological images, can be reduced considerably by applying the proposed algorithm.}, + file = {Bejn14.pdf:pdf\\Bejn14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {12432893429390034088}, + gscites = {39}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/132653}, + ss_id = {e39a5aa36977c617a9caecb6da5c87e27886ad6f}, + all_ss_ids = {['e39a5aa36977c617a9caecb6da5c87e27886ad6f']}, +} + +@inproceedings{Bejn15, + author = {Bejnordi, Babak Ehteshami and Litjens, Geert and Hermsen, Meyke and Karssemeijer, Nico and van der Laak, Jeroen A W M}, + title = {A multi-scale superpixel classification approach to the detection of regions of interest in whole slide histopathology images}, + booktitle = MI, + year = {2015}, + volume = {9420}, + series = SPIE, + pages = {94200H}, + doi = {10.1117/12.2081768}, + abstract = {This paper presents a new algorithm for automatic detection of regions of interest in whole slide histopathological images. The proposed algorithm generates and classifies superpixels at multiple resolutions to detect regions of interest. The algorithm emulates the way the pathologist examines the whole slide histopathology image by processing the image at low magnifications and performing more sophisticated analysis only on areas requiring more detailed information. However, instead of the traditional usage of fixed sized rectangular patches for the identification of relevant areas, we use superpixels as the visual primitives to detect regions of interest. Rectangular patches can span multiple distinct structures, thus degrade the classification performance. The proposed multi-scale superpixel classification approach yields superior performance for the identification of the regions of interest. For the evaluation, a set of 10 whole slide histopathology images of breast tissue were used. Empirical evaluation of the performance of our proposed algorithm relative to expert manual annotations shows that the algorithm achieves an area under the Receiver operating characteristic (ROC) curve of 0.958, demonstrating its efficacy for the detection of regions of interest.}, + file = {Bejn15.pdf:pdf\\Bejn15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/141326}, + ss_id = {49d5e6ed83859117cfd781612878ea93fa75c348}, + all_ss_ids = {['49d5e6ed83859117cfd781612878ea93fa75c348']}, + gscites = {46}, +} + +@inproceedings{Bejn17, + author = {Bejnordi, Babak Ehteshami and Jimmy Lin and Ben Glass and Maeve Mullooly and Gretchen Gierach and Mark Sherman and Nico Karssemeijer and Jeroen A.W.M. van der Laak and Andrew Beck}, + title = {Deep learning-based assessment of tumor-associated stroma for diagnosing breast cancer in histopathology images}, + booktitle = ISBI, + year = {2017}, + pages = {929-932}, + doi = {10.1109/ISBI.2017.7950668}, + url = {https://arxiv.org/abs/1702.05803}, + abstract = {Diagnosis of breast carcinomas has so far been limited to the morphological interpretation of epithelial cells and the assessment of epithelial tissue architecture. Consequently, most of the automated systems have focused on characterizing the epithelial regions of the breast to detect cancer. In this paper, we propose a system for classification of hematoxylin and eosin (H\&E) stained breast specimens based on convolutional neural networks that primarily targets the assessment of tumorassociated stroma to diagnose breast cancer patients. We evaluate the performance of our proposed system using a large cohort containing 646 breast tissue biopsies. Our evaluations show that the proposed system achieves an area under ROC of 0.92, demonstrating the discriminative power of previously neglected tumor associated stroma as a diagnostic biomarker.}, + file = {Bejn17.pdf:pdf\\Bejn17.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + pmid = {28230532}, + month = {4}, + gsid = {574730569234442494}, + gscites = {62}, + ss_id = {3096798a92ab0829ceb1d601d002dd47dc49e156}, + all_ss_ids = {['3096798a92ab0829ceb1d601d002dd47dc49e156']}, +} + +@phdthesis{Bejn17a, + author = {Babak Ehteshami Bejnordi}, + title = {Histopathological diagnosis of breast cancer using machine learning}, + url = {https://repository.ubn.ru.nl/handle/2066/178907}, + abstract = {Application of machine learning to WSI is a promising yet largely unexplored field of research. The primary aim of the research described in this thesis was to develop automated systems for analysis of H&E stained breast histopathological images. This involved automatic detection of ductal carcinoma in-situ (DCIS), invasive, and metastatic breast cancer in whole-slide histopathological images. A secondary aim was to identify new diagnostic biomarkers for the detection of invasive breast cancer. To this end the research was undertaken with the following objectives: + + 1. Development of an algorithm for standardization of H&E stained WSIs; + 2. Detection, classification and segmentation of primary breast cancer; + 3. Evaluation of the state of the art of machine learning algorithms for automatic detection of lymph nodes metastases; + 4. Identifying and leveraging new stromal biomarkers to improve breast cancer diagnostics.}, + copromotor = {J.A.W.M. van der Laak and G. Litjens}, + file = {Bejn17a.pdf:pdf\\Bejn17a.pdf:PDF}, + optnote = {DIAG}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + year = {2017}, + journal = {PhD thesis}, +} + +@article{Bejn17b, + author = {Bejnordi, Babak Ehteshami and Zuidhof, Guido and Balkenhol, Maschenka and Hermsen, Meyke and Bult, Peter and van Ginneken, Bram and Karssemeijer, Nico and Litjens, Geert and van der Laak, Jeroen}, + title = {Context-aware stacked convolutional neural networks for classification of breast carcinomas in whole-slide histopathology images}, + journal = JMI, + year = {2017}, + volume = {4}, + issue = {4}, + month = {10}, + pages = {044504}, + doi = {10.1117/1.JMI.4.4.044504}, + abstract = {Currently, histopathological tissue examination by a pathologist represents the gold standard for breast lesion diagnostics. Automated classification of histopathological whole-slide images (WSIs) is challenging owing to the wide range of appearances of benign lesions and the visual similarity of ductal carcinoma in-situ (DCIS) to invasive lesions at the cellular level. Consequently, analysis of tissue at high resolutions with a large contextual area is necessary. We present context-aware stacked convolutional neural networks (CNN) for classification of breast WSIs into normal/benign, DCIS, and invasive ductal carcinoma (IDC). We first train a CNN using high pixel resolution to capture cellular level information. The feature responses generated by this model are then fed as input to a second CNN, stacked on top of the first. Training of this stacked architecture with large input patches enables learning of fine-grained (cellular) details and global tissue structures. Our system is trained and evaluated on a dataset containing 221 WSIs of hematoxylin and eosin stained breast tissue specimens. The system achieves an AUC of 0.962 for the binary classification of nonmalignant and malignant slides and obtains a three-class accuracy of 81.3% for classification of WSIs into normal/benign, DCIS, and IDC, demonstrating its potential for routine diagnostics.}, + file = {Bejn17b.pdf:pdf\\Bejn17b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29285517}, + gsid = {10616853499280907465}, + gscites = {144}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/181885}, + ss_id = {784906ab63c3743a5d26dca846a49c2edbf4dc6a}, + all_ss_ids = {['784906ab63c3743a5d26dca846a49c2edbf4dc6a']}, +} + +@article{Bejn18a, + author = {Bejnordi, Babak Ehteshami and Litjens, Geert and van der Laak, Jeroen AWM}, + title = {Machine Learning Compared With Pathologist Assessment-Reply}, + journal = JAMA, + year = {2018}, + volume = {319}, + issue = {16}, + month = {4}, + pages = {1726}, + doi = {10.1001/jama.2018.1478}, + file = {:pdf/Bejn18a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29710158}, + gsid = {2786338949526955716}, + gscites = {5}, + ss_id = {afe7e342756fb74d5cf7e4ad171415470a5baa3b}, + all_ss_ids = {['afe7e342756fb74d5cf7e4ad171415470a5baa3b']}, +} + +@inproceedings{Bel18, + author = {de Bel, Thomas and Hermsen, Meyke and van der Laak, Jeroen and Litjens, Geert J. S. and Smeets, Bart and Hilbrands, Luuk}, + title = {Automatic segmentation of histopathological slides of renal tissue using deep learning}, + doi = {10.1117/12.2293717}, + year = {2018}, + abstract = {Diagnoses in kidney disease often depend on quantification and presence of specific structures in the tissue. The progress in the field of whole-slide imaging and deep learning has opened up new possibilities for automatic analysis of histopathological slides. An initial step for renal tissue assessment is the differentiation and segmentation of relevant tissue structures in kidney specimens. We propose a method for segmentation of renal tissue using convolutional neural networks. Nine structures found in (pathological) renal tissue are included in the segmentation task: glomeruli, proximal tubuli, distal tubuli, arterioles, capillaries, sclerotic glomeruli, atrophic tubuli, in ammatory infiltrate and fibrotic tissue. Fifteen whole slide images of normal cortex originating from tumor nephrectomies were collected at the Radboud University Medical Center, Nijmegen, The Netherlands. The nine classes were sparsely annotated by a PhD student, experienced in the field of renal histopathology (MH). Experiments were performed with three different network architectures: a fully convolutional network, a multi-scale fully convolutional network and a U-net. We assessed the added benefit of combining the networks into an ensemble. We performed four-fold cross validation and report the average pixel accuracy per annotation for each class. Results show that convolutional neural net- works are able to accurately perform segmentation tasks in renal tissue, with accuracies of 90% for most classes.}, + url = {http://dx.doi.org/10.1117/12.2293717}, + file = {Bel18.pdf:pdf\Bel18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Imaging 2018: Digital Pathology}, + citation-count = {22}, + automatic = {yes}, +} + +@inproceedings{Bel18, + author = {de Bel, Thomas and Hermsen, Meyke and van der Laak, Jeroen and Litjens, Geert J. S. and Smeets, Bart and Hilbrands, Luuk}, + title = {Automatic segmentation of histopathological slides of renal tissue using deep learning}, + doi = {10.1117/12.2293717}, + year = {2018}, + abstract = {Diagnoses in kidney disease often depend on quantification and presence of specific structures in the tissue. The progress in the field of whole-slide imaging and deep learning has opened up new possibilities for automatic analysis of histopathological slides. An initial step for renal tissue assessment is the differentiation and segmentation of relevant tissue structures in kidney specimens. We propose a method for segmentation of renal tissue using convolutional neural networks. Nine structures found in (pathological) renal tissue are included in the segmentation task: glomeruli, proximal tubuli, distal tubuli, arterioles, capillaries, sclerotic glomeruli, atrophic tubuli, in ammatory infiltrate and fibrotic tissue. Fifteen whole slide images of normal cortex originating from tumor nephrectomies were collected at the Radboud University Medical Center, Nijmegen, The Netherlands. The nine classes were sparsely annotated by a PhD student, experienced in the field of renal histopathology (MH). Experiments were performed with three different network architectures: a fully convolutional network, a multi-scale fully convolutional network and a U-net. We assessed the added benefit of combining the networks into an ensemble. We performed four-fold cross validation and report the average pixel accuracy per annotation for each class. Results show that convolutional neural net- works are able to accurately perform segmentation tasks in renal tissue, with accuracies of 90% for most classes.}, + url = {http://dx.doi.org/10.1117/12.2293717}, + file = {Bel18.pdf:pdf\Bel18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Imaging 2018: Digital Pathology}, + citation-count = {22}, + automatic = {yes}, +} + +@inproceedings{Bel19, + author = {Thomas de Bel and Meyke Hermsen and Jesper Kers and Jeroen van der Laak and Geert Litjens}, + title = {Stain-Transforming Cycle-Consistent Generative Adversarial Networks for Improved Segmentation of Renal Histopathology}, + booktitle = MIDL, + year = {2019}, + url = {https://openreview.net/forum?id=BkxJkgSlx4}, + abstract = {The performance of deep learning applications in digital histopathology can deteriorate significantly due to staining variations across centers. We employ cycle-consistent generative adversarial networks (cycleGANs) for unpaired image-to-image translation, facilitating between-center stain transformation. We find that modifications to the original cycleGAN architecture make it more suitable for stain transformation, creating artificially stained images of high quality. Specifically, changing the generator model to a smaller U-net-like architecture, adding an identity loss term, increasing the batch size and the learning all led to improved training stability and performance. Furthermore, we propose a method for dealing with tiling artifacts when applying the network on whole slide images (WSIs). We apply our stain transformation method on two datasets of PAS-stained (Periodic Acid-Schiff) renal tissue sections from different centers. We show that stain transformation is beneficial to the performance of cross-center segmentation, raising the Dice coefficient from 0.36 to 0.85 and from 0.45 to 0.73 on the two datasets.}, + file = {Bel19.pdf:pdf\\Bel19.pdf:PDF}, + optnote = {DIAG}, + gsid = {8777609378172049761}, + gscites = {70}, + ss_id = {ffab09ba8100420dbd066f31c830f7d3e984696b}, + all_ss_ids = {['ffab09ba8100420dbd066f31c830f7d3e984696b']}, +} + +@article{Bel21, + author = {de Bel, Thomas and Bokhorst, John-Melle and van der Laak, Jeroen and Litjens, Geert}, + title = {Residual cyclegan for robust domain transformation of histopathological tissue slides.}, + doi = {10.1016/j.media.2021.102004}, + pages = {102004}, + volume = {70}, + abstract = {Variation between stains in histopathology is commonplace across different medical centers. This can have a significant effect on the reliability of machine learning algorithms. In this paper, we propose to reduce performance variability by using -consistent generative adversarial (CycleGAN) networks to remove staining variation. We improve upon the regular CycleGAN by incorporating residual learning. We comprehensively evaluate the performance of our stain transformation method and compare its usefulness in addition to extensive data augmentation to enhance the robustness of tissue segmentation algorithms. Our steps are as follows: first, we train a model to perform segmentation on tissue slides from a single source center, while heavily applying augmentations to increase robustness to unseen data. Second, we evaluate and compare the segmentation performance on data from other centers, both with and without applying our CycleGAN stain transformation. We compare segmentation performances in a colon tissue segmentation and kidney tissue segmentation task, covering data from 6 different centers. We show that our transformation method improves the overall Dice coefficient by 9% over the non-normalized target data and by 4% over traditional stain transformation in our colon tissue segmentation task. For kidney segmentation, our residual CycleGAN increases performance by 10% over no transformation and around 2% compared to the non-residual CycleGAN.}, + file = {:pdf/Bel21.pdf:PDF}, + journal = MIA, + month = may, + pmid = {33647784}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/233603}, + ss_id = {455c14205e23d9cb709371ceb605f92e4fbbda2f}, + all_ss_ids = {['455c14205e23d9cb709371ceb605f92e4fbbda2f']}, + gscites = {50}, +} + +@article{Bel22, + author = {de Bel, Thomas and Litjens, Geert and Ogony, Joshua and Stallings-Mann, Melody and Carter, Jodi M. and Hilton, Tracy and Radisky, Derek C. and Vierkant, Robert A. and Broderick, Brendan and Hoskin, Tanya L. and Winham, Stacey J. and Frost, Marlene H. and Visscher, Daniel W. and Allers, Teresa and Degnim, Amy C. and Sherman, Mark E. and van der Laak, Jeroen A. W. M.}, + title = {Automated quantification of levels of breast terminal duct lobular (TDLU) involution using deep learning}, + doi = {10.1038/s41523-021-00378-7}, + year = {2022}, + abstract = {AbstractConvolutional neural networks (CNNs) offer the potential to generate comprehensive quantitative analysis of histologic features. Diagnostic reporting of benign breast disease (BBD) biopsies is usually limited to subjective assessment of the most severe lesion in a sample, while ignoring the vast majority of tissue features, including involution of background terminal duct lobular units (TDLUs), the structures from which breast cancers arise. Studies indicate that increased levels of age-related TDLU involution in BBD biopsies predict lower breast cancer risk, and therefore its assessment may have potential value in risk assessment and management. However, assessment of TDLU involution is time-consuming and difficult to standardize and quantitate. Accordingly, we developed a CNN to enable automated quantitative measurement of TDLU involution and tested its performance in 174 specimens selected from the pathology archives at Mayo Clinic, Rochester, MN. The CNN was trained and tested on a subset of 33 biopsies, delineating important tissue types. Nine quantitative features were extracted from delineated TDLU regions. Our CNN reached an overall dice-score of 0.871 (+-0.049) for tissue classes versus reference standard annotation. Consensus of four reviewers scoring 705 images for TDLU involution demonstrated substantial agreement with the CNN method (unweighted kappa = 0.747 +- 0.01). Quantitative involution measures showed anticipated associations with BBD histology, breast cancer risk, breast density, menopausal status, and breast cancer risk prediction scores (p < 0.05). Our work demonstrates the potential to improve risk prediction for women with BBD biopsies by applying CNN approaches to generate automated quantitative evaluation of TDLU involution.}, + url = {http://dx.doi.org/10.1038/s41523-021-00378-7}, + file = {Bel22.pdf:pdf\Bel22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {npj Breast Cancer}, + citation-count = {5}, + automatic = {yes}, + volume = {8}, +} + +@article{Bel22, + author = {de Bel, Thomas and Litjens, Geert and Ogony, Joshua and Stallings-Mann, Melody and Carter, Jodi M. and Hilton, Tracy and Radisky, Derek C. and Vierkant, Robert A. and Broderick, Brendan and Hoskin, Tanya L. and Winham, Stacey J. and Frost, Marlene H. and Visscher, Daniel W. and Allers, Teresa and Degnim, Amy C. and Sherman, Mark E. and van der Laak, Jeroen A. W. M.}, + title = {Automated quantification of levels of breast terminal duct lobular (TDLU) involution using deep learning}, + doi = {10.1038/s41523-021-00378-7}, + year = {2022}, + abstract = {AbstractConvolutional neural networks (CNNs) offer the potential to generate comprehensive quantitative analysis of histologic features. Diagnostic reporting of benign breast disease (BBD) biopsies is usually limited to subjective assessment of the most severe lesion in a sample, while ignoring the vast majority of tissue features, including involution of background terminal duct lobular units (TDLUs), the structures from which breast cancers arise. Studies indicate that increased levels of age-related TDLU involution in BBD biopsies predict lower breast cancer risk, and therefore its assessment may have potential value in risk assessment and management. However, assessment of TDLU involution is time-consuming and difficult to standardize and quantitate. Accordingly, we developed a CNN to enable automated quantitative measurement of TDLU involution and tested its performance in 174 specimens selected from the pathology archives at Mayo Clinic, Rochester, MN. The CNN was trained and tested on a subset of 33 biopsies, delineating important tissue types. Nine quantitative features were extracted from delineated TDLU regions. Our CNN reached an overall dice-score of 0.871 (+-0.049) for tissue classes versus reference standard annotation. Consensus of four reviewers scoring 705 images for TDLU involution demonstrated substantial agreement with the CNN method (unweighted kappa = 0.747 +- 0.01). Quantitative involution measures showed anticipated associations with BBD histology, breast cancer risk, breast density, menopausal status, and breast cancer risk prediction scores (p < 0.05). Our work demonstrates the potential to improve risk prediction for women with BBD biopsies by applying CNN approaches to generate automated quantitative evaluation of TDLU involution.}, + url = {http://dx.doi.org/10.1038/s41523-021-00378-7}, + file = {Bel22.pdf:pdf\Bel22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {npj Breast Cancer}, + citation-count = {5}, + automatic = {yes}, + volume = {8}, +} + +@article{Bemp22, + author = {Van den Bempt, Maxim and Vinayahalingam, Shankeeth and Han, Michael D. and Berge, Stefaan J. and Xi, Tong}, + title = {The role of muscular traction in the occurrence of skeletal relapse after advancement bilateral sagittal split osteotomy (BSSO): A systematic review.}, + doi = {10.1111/ocr.12488}, + issue = {1}, + pages = {1--13}, + volume = {25}, + abstract = {The aim of this systematic review was (i) to determine the role of muscular traction in the occurrence of skeletal relapse after advancement BSSO and (ii) to investigate the effect of advancement BSSO on the perimandibular muscles. This systematic review reports in accordance with the recommendations proposed by the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) statement. Electronic database searches were performed in the databases MEDLINE, Embase and Cochrane Library. Inclusion criteria were as follows: assessment of relapse after advancement BSSO; assessment of morphological and functional change of the muscles after advancement BSSO; and clinical studies on human subjects. Exclusion criteria were as follows: surgery other than advancement BSSO; studies in which muscle activity/traction was not investigated; and case reports with a sample of five cases or fewer, review articles, meta-analyses, letters, congress abstracts or commentaries. Of the initial 1006 unique articles, 11 studies were finally included. In four studies, an intervention involving the musculature was performed with subsequent assessment of skeletal relapse. The changes in the morphological and functional properties of the muscles after BSSO were studied in seven studies. The findings of this review demonstrate that the perimandibular musculature plays a role in skeletal relapse after advancement BSSO and may serve as a target for preventive strategies to reduce this complication. However, further research is necessary to (i) develop a better understanding of the role of each muscle group, (ii) to develop new therapeutic strategies and (iii) to define criteria that allow identification of patients at risk.}, + file = {Bemp22.pdf:pdf\\Bemp22.pdf:PDF}, + journal = {Orthodontics & craniofacial research}, + optnote = {DIAG, RADIOLOGY}, + pmid = {33938136}, + year = {2022}, +} + +@article{Berb23, + author = {Berb\'{i}s, M. Alvaro and McClintock, David S. and Bychkov, Andrey and Van der Laak, Jeroen and Pantanowitz, Liron and Lennerz, Jochen K. and Cheng, Jerome Y. and Delahunt, Brett and Egevad, Lars and Eloy, Catarina and Farris, Alton B. and Fraggetta, Filippo and Garc\'{i}a del Moral, Raimundo and Hartman, Douglas J. and Herrmann, Markus D. and Hollemans, Eva and Iczkowski, Kenneth A. and Karsan, Aly and Kriegsmann, Mark and Salama, Mohamed E. and Sinard, John H. and Tuthill, J. Mark and Williams, Bethany and Casado-S\'{a}nchez, C\'{e}sar and S\'{a}nchez-Turri\'{o}n, V\'{i}ctor and Luna, Antonio and Aneiros-Fern\'{a}ndez, Jos\'{e} and Shen, Jeanne}, + title = {Computational pathology in 2030: a Delphi study forecasting the role of AI in pathology within the next decade}, + doi = {10.1016/j.ebiom.2022.104427}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ebiom.2022.104427}, + file = {Berb23.pdf:pdf\Berb23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {eBioMedicine}, + citation-count = {13}, + automatic = {yes}, + pages = {104427}, + volume = {88}, +} + +@article{Berg18, + author = {Bergkamp, Mayra I and Wissink, Joost G J and van Leijsen, Esther M C and Ghafoorian, Mohsen and Norris, David G and van Dijk, Ewoud J and Platel, Bram and Tuladhar, Anil M and de Leeuw, Frank-Erik}, + title = {Risk of Nursing Home Admission in Cerebral Small Vessel Disease}, + journal = Stroke, + year = {2018}, + volume = {49}, + issue = {11}, + month = {11}, + pages = {2659--2665}, + doi = {10.1161/STROKEAHA.118.021993}, + url = {https://www.ahajournals.org/doi/pdf/10.1161/STROKEAHA.118.021993}, + abstract = {Background and Purpose- Since cerebral small vessel disease (SVD) is associated with cognitive and motor impairment and both might ultimately lead to nursing home admission, our objective was to investigate the association of SVD markers with nursing home admission. Methods- The RUN DMC study (Radboud University Nijmegen Diffusion Tensor and Magnetic Resonance Cohort) is a prospective cohort of 503 independent living individuals with SVD. Date of nursing home admission was retrieved from the Dutch municipal personal records database. Risk of nursing home admission was calculated using a competing risk analysis, with mortality as a competing risk. Results- During follow-up (median 8.7 years, interquartile range 8.5-8.9), 31 participants moved to a nursing home. Before nursing home admission, 19 participants were diagnosed with dementia, 6 with parkinsonism, and 10 with stroke. Participants with the lowest white matter volume had an 8-year risk of nursing home admission of 13.3% (95% CI, 8.6-18.9), which was significantly different from participants with middle or highest white matter volume (respectively, 4.8% [95% CI, 2.3-8.8] and 0%; P<0.001). After adjusting for baseline age and living condition, the association of white matter volume and total brain volume with nursing home admission was significant, with, respectively, hazard ratios of 0.88 [95% CI, 0.84-0.95] ( P value 0.025) and 0.92 [95% CI, 0.85-0.98] ( P<0.001) per 10 mL. The association of white matter hyperintensities and lacunes with nursing home admission was not significant. Conclusions- This study demonstrates that in SVD patients, independent from age and living condition, a lower white matter volume and a lower total brain volume is associated with an increased risk of nursing home admission. Nursing home admission is a relevant outcome in SVD research since it might be able to combine both cognitive and functional consequences of SVD in 1 outcome.}, + file = {Berg18.pdf:pdf\\Berg18.pdf:PDF}, + optnote = {DIAG}, + pmid = {30355195}, + ss_id = {966107ec26f4d315bb9b7cbbbb0e60a4534e998c}, + all_ss_ids = {['966107ec26f4d315bb9b7cbbbb0e60a4534e998c']}, + gscites = {3}, +} + +@article{Berg22, + author = {Bergshoeff, Verona E. and Balkenhol, Maschenka C. A. and Haesevoets, Annick and Ruland, Andrea and Chenault, Michelene N. and Nelissen, Rik C. and Peutz, Carine J. and Clarijs, Ruud and Van der Laak, Jeroen A. W. M. and Takes, Robert P. and Van den Brekel, Michiel W. and Van Velthuysen, Marie-Louise F. and Ramaekers, Frans C. S. and Kremer, Bernd and Speel, Ernst-Jan M.}, + title = {Evaluation Criteria for Chromosome Instability Detection by FISH to Predict Malignant Progression in Premalignant Glottic Laryngeal Lesions}, + doi = {10.3390/cancers14133260}, + year = {2022}, + abstract = {Background: The definition of objective, clinically applicable evaluation criteria for FISH 1c/7c in laryngeal precursor lesions for the detection of chromosome instability (CI). Copy Number Variations (CNV) for chromosomes 1 and 7 reflect the general ploidy status of premalignant head and neck lesions and can therefore be used as a marker for CI. Methods: We performed dual-target FISH for chromosomes 1 and 7 centromeres on 4 um formalin-fixed, paraffin-embedded tissue sections of 87 laryngeal premalignancies to detect CNVs. Thirty-five normal head and neck squamous cell samples were used as a control. First, the chromosome 7:1 ratio (CR) was evaluated per lesion. The normal range of CRs (>=0.84 <= 1.16) was based on the mean CR +/- 3 x SD found in the normal population. Second, the percentage of aberrant nuclei, harboring > 2 chromosomes of chromosome 1 and/or 7 (PAN), was established (cut-off value for abnormal PAN >= 10%). Results: PAN showed a stronger correlation with malignant progression than CR (resp. OR 5.6, p = 0.001 and OR 3.8, p = 0.009). PAN combined with histopathology resulted in a prognostic model with an area under the ROC curve (AUC) of 0.75 (s.e. 0.061, sensitivity 71%, specificity 70%). Conclusions: evaluation criteria for FISH 1c/7c based on PAN >= 10% provide the best prognostic information on the risk of malignant progression of premalignant laryngeal lesions as compared with criteria based on the CR. FISH 1c/7c detection can be applied in combination with histopathological assessment.}, + url = {http://dx.doi.org/10.3390/cancers14133260}, + file = {Berg22.pdf:pdf\Berg22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Cancers}, + citation-count = {0}, + automatic = {yes}, + pages = {3260}, + volume = {14}, +} + +@article{Berg23, + title = {ChatGPT and Generating a Differential Diagnosis Early in an Emergency Department Presentation}, + journal = {Annals of Emergency Medicine}, + doi = {https://doi.org/10.1016/j.annemergmed.2023.08.003}, + url = {https://www.sciencedirect.com/science/article/pii/S019606442300642X}, + author = {Hidde ten Berg and Bram van Bakel and Lieke van de Wouw and Kim E. Jie and Anoeska Schipper and Henry Jansen and Rory D. O'Connor and Bram van Ginneken and Steef Kurstjens}, + citation-count = {0}, + optnote = {DIAG, RADIOLOGY}, + pmid = {37690022}, + year = {2023}, +} + +@conference{Berk22, + author = {van den Berk, I.A.H. and Jacobs, C. and Kanglie, M.M.N.P. and Mets, O.M. and Snoeren, M. and Montauban van Swijndregt, A.D. and Taal, E.M. and van Engelen, T.S.R. and Prins, J. and Bipat, S. and Bossuyt, P.M.M. and Stoker, J.}, + title = {Added value of artificial intelligence for the detection and analysis of lung nodules on ultra-low-dose CT in an emergency setting}, + booktitle = RSNA, + year = {2022}, + abstract = {PURPOSE: To analyze the added value of an artificial intelligence (AI) algorithm for lung nodule detection on ultra-low-dose CT's (ULDCT) acquired at the emergency department (ED). MATERIALS AND METHODS: In the OPTIMACT trial 873 patients with suspected non-traumatic pulmonary disease underwent ULDCT at the ED, 870 patients were available for analysis. During the trial clinical reading of the ULDCT's was done by the radiologist on call and clinical relevant incidental lung nodules were reported in the radiology report. All ULDCT's were processed using CIRRUS Lung Nodule AI, a deep learning based algorithm for detection of non-calcified lung nodules >= 6 mm. Three chest radiologists independently reviewed the lung nodules identified during the trial and all marks from the AI algorithm. Each AI mark was accepted or rejected for being a lung nodule. Accepted AI marks were classified as solid, part-solid, non-solid and were volumetrically measured using semi-automatic segmentation software. Incidental lung nodules that (i) were scored as a nodule by at least two of the three chest radiologists and (ii) met the Fleischner criteria for clinically relevant lung nodules were used as reference standard. We assessed differences in proportion of true positive and false positive findings detected during prospective evaluation by the radiologist on call versus a standalone AI reading. RESULTS: During the trial 59 clinical relevant incidental lung nodules in 35/870 (4.0%) patients had been reported. 24/59 of these nodules were scored by at least two chest radiologists and met the Fleischner criteria, leaving 35/59 false positives. In 458/870 (53%) ULDCT's one or more AI marks were found, 1862 marks in total. 104/1862 (5.6%) AI marks were scored as clinically relevant nodules by at least two chest radiologists, leaving 1758/1862 (94%) false positive marks. Overall, 4 times more (104 vs 24) lung nodules were detected with the use of AI, at the expense of 50 times more false positive findings. CONCLUSION: The use of AI on ULDCT in ED patients with pulmonary disease results in the detection of more clinically relevant incidental lung nodules but is limited by the high false positive rate. CLINICAL RELEVANCE: In the ED setting focus lies on the acute presentation. Artificial intelligence aids in the detection of clinical relevant incidental lung nodules but is limited by the high false positive rate.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Berk94, + author = {A. P. Berkhoff and H. J. Huisman and J. M. Thijssen and E. M. Jacobs and R. J. Homan}, + title = {Fast scan conversion algorithms for displaying ultrasound sector images}, + journal = UI, + year = {1994}, + volume = {16}, + pages = {87--108}, + doi = {10.1006/uimg.1994.1006}, + abstract = {{T}wo fast algorithms for interpolation of ultrasonic sector-scans were developed. {B}oth algorithms are based on line-drawing algorithms and are free from multiplications in the innermost loops. {T}he algorithms were compared to the following conventional interpolators: 2-{D} windowed sinc, bicubic spline, 4 x 4 point bicubic spline, bilinear, and nearest neighbor. {T}he most accurate of the two new algorithms is about eight times faster than nearest neighbor interpolation. {T}he quantitative errors are of the same order as the errors of the nearest neighbor interpolator. {T}he subjective image quality is between nearest neighbor and bilinear interpolation.}, + file = {Berk94.pdf:pdf\\Berk94.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + number = {2}, + pmid = {7974911}, + month = {4}, + gsid = {6038926458136740781}, + gscites = {47}, +} + +@article{Bian18, + author = {Bian, Z and Charbonnier, J-P and Liu, J and Zhao, D and Lynch, D A and van Ginneken, B}, + title = {Small airway segmentation in thoracic computed tomography scans: a machine learning approach}, + journal = PMB, + year = {2018}, + volume = {63}, + issue = {15}, + month = {8}, + pages = {155024}, + doi = {10.1088/1361-6560/aad2a1}, + abstract = {Small airway obstruction is a main cause for chronic obstructive pulmonary disease (COPD). We propose a novel method based on machine learning to extract the airway system from a thoracic computed tomography (CT) scan. The emphasis of the proposed method is on including the smallest airways that are still visible on CT. We used an optimized sampling procedure to extract airway and non-airway voxel samples from a large set of scans for which a semi-automatically constructed reference standard was available. We created a set of features which represent tubular and texture properties that are characteristic for small airway voxels. A random forest classifier was used to determine for each voxel if it belongs to the airway class. Our method was validated on a set of 20 clinical thoracic CT scans from the COPDGene study. Experiments show that our method is effective in extracting the full airway system and in detecting a large number of small airways that were missed by the semi-automatically constructed reference standard.}, + file = {Bian8.pdf:pdf\\Bian18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29995646}, + gsid = {10454150924096992775}, + gscites = {15}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/194505}, + ss_id = {6e733d61fef6612626c4c5350f143fc61db9a0da}, + all_ss_ids = {['6e733d61fef6612626c4c5350f143fc61db9a0da']}, +} + +@article{Bili19, + author = {Patrick Bilic and Patrick Ferdinand Christ and Eugene Vorontsov and Grzegorz Chlebus and Hao Chen and Qi Dou and Chi-Wing Fu and Xiao Han and Pheng-Ann Heng and Jurgen Hesser and Samuel Kadoury and Tomasz Konopczynski and Miao Le and Chunming Li and Xiaomeng Li and Jana Lipkova and John Lowengrub and Hans Meine and Jan Hendrik Moltz and Chris Pal and Marie Piraud and Xiaojuan Qi and Jin Qi and Markus Rempfler and Karsten Roth and Andrea Schenk and Anjany Sekuboyina and Eugene Vorontsov and Ping Zhou and Christian Hulsemeyer and Marcel Beetz and Florian Ettlinger and Felix Gruen and Georgios Kaissis and Fabian Lohofer and Rickmer Braren and Julian Holch and Felix Hofmann and Wieland Sommer and Volker Heinemann and Colin Jacobs and Humpire Mamani, Gabriel Efrain and Bram van Ginneken and Gabriel Chartrand and An Tang and Michal Drozdzal and Avi Ben-Cohen and Eyal Klang and Marianne M. Amitai and Eli Konen and Hayit Greenspan and Johan Moreau and Alexandre Hostettler and Luc Soler and Refael Vivanti and Adi Szeskin and Naama Lev-Cohain and Jacob Sosna and Leo Joskowicz and Bjoern H. Menze}, + title = {The {L}iver {T}umor {S}egmentation {B}enchmark ({L}i{TS})}, + journal = {arXiv:1901.04056}, + year = {2019}, + abstract = {In this work, we report the set-up and results of the Liver Tumor Segmentation Benchmark (LITS) organized in conjunction with the IEEE International Symposium on Biomedical Imaging (ISBI) 2016 and International Conference On Medical Image Computing Computer Assisted Intervention (MICCAI) 2017. Twenty four valid state-of-the-art liver and liver tumor segmentation algorithms were applied to a set of 131 computed tomography (CT) volumes with different types of tumor contrast levels (hyper-/hypo-intense), abnormalities in tissues (metastasectomie) size and varying amount of lesions. The submitted algorithms have been tested on 70 undisclosed volumes. The dataset is created in collaboration with seven hospitals and research institutions and manually reviewed by independent three radiologists. We found that not a single algorithm performed best for liver and tumors. The best liver segmentation algorithm achieved a Dice score of 0.96(MICCAI) whereas for tumor segmentation the best algorithm evaluated at 0.67(ISBI) and 0.70(MICCAI). The LITS image data and manual annotations continue to be publicly available through an online evaluation system as an ongoing benchmarking resource.}, + optnote = {DIAG, RADIOLOGY}, + month = {1}, + gsid = {10996095861110041445}, + gscites = {539}, + all_ss_ids = {['0655dcaa39cf41a3609974840f91300d73b4aed1']}, +} + +@article{Bili22, + title = {The Liver Tumor Segmentation Benchmark (LiTS)}, + journal = MIA, + pages = {102680}, + year = {2022}, + doi = {https://doi.org/10.1016/j.media.2022.102680}, + url = {https://www.sciencedirect.com/science/article/pii/S1361841522003085}, + author = {Patrick Bilic and Patrick Christ and Hongwei Bran Li and Eugene Vorontsov and Avi Ben-Cohen and Georgios Kaissis and Adi Szeskin and Colin Jacobs and Gabriel Efrain Humpire Mamani and Gabriel Chartrand and Fabian Lohofer and Julian Walter Holch and Wieland Sommer and Felix Hofmann and Alexandre Hostettler and Naama Lev-Cohain and Michal Drozdzal and Michal Marianne Amitai and Refael Vivanti and Jacob Sosna and Ivan Ezhov and Anjany Sekuboyina and Fernando Navarro and Florian Kofler and Johannes C. Paetzold and Suprosanna Shit and Xiaobin Hu and Jana Lipkova and Markus Rempfler and Marie Piraud and Jan Kirschke and Benedikt Wiestler and Zhiheng Zhang and Christian Hulsemeyer and Marcel Beetz and Florian Ettlinger and Michela Antonelli and Woong Bae and Miriam Bellver and Lei Bi and Hao Chen and Grzegorz Chlebus and Erik B. Dam and Qi Dou and Chi-Wing Fu and Bogdan Georgescu and Xavier Giro-i-Nieto and Felix Gruen and Xu Han and Pheng-Ann Heng and Jurgen Hesser and Jan Hendrik Moltz and Christian Igel and Fabian Isensee and Paul Jager and Fucang Jia and Krishna Chaitanya Kaluva and Mahendra Khened and Ildoo Kim and Jae-Hun Kim and Sungwoong Kim and Simon Kohl and Tomasz Konopczynski and Avinash Kori and Ganapathy Krishnamurthi and Fan Li and Hongchao Li and Junbo Li and Xiaomeng Li and John Lowengrub and Jun Ma and Klaus Maier-Hein and Kevis-Kokitsi Maninis and Hans Meine and Dorit Merhof and Akshay Pai and Mathias Perslev and Jens Petersen and Jordi Pont-Tuset and Jin Qi and Xiaojuan Qi and Oliver Rippel and Karsten Roth and Ignacio Sarasua and Andrea Schenk and Zengming Shen and Jordi Torres and Christian Wachinger and Chunliang Wang and Leon Weninger and Jianrong Wu and Daguang Xu and Xiaoping Yang and Simon Chun-Ho Yu and Yading Yuan and Miao Yue and Liping Zhang and Jorge Cardoso and Spyridon Bakas and Rickmer Braren and Volker Heinemann and Christopher Pal and An Tang and Samuel Kadoury and Luc Soler and Bram van Ginneken and Hayit Greenspan and Leo Joskowicz and Bjoern Menze}, + abstract = {In this work, we report the set-up and results of the Liver Tumor Segmentation Benchmark (LiTS), which was organized in conjunction with the IEEE International Symposium on Biomedical Imaging (ISBI) 2017 and the International Conferences on Medical Image Computing and Computer-Assisted Intervention (MICCAI) 2017 and 2018. The image dataset is diverse and contains primary and secondary tumors with varied sizes and appearances with various lesion-to-background levels (hyper-/hypo-dense), created in collaboration with seven hospitals and research institutions. Seventy-five submitted liver and liver tumor segmentation algorithms were trained on a set of 131 computed tomography (CT) volumes and were tested on 70 unseen test images acquired from different patients. We found that not a single algorithm performed best for both liver and liver tumors in the three events. The best liver segmentation algorithm achieved a Dice score of 0.963, whereas, for tumor segmentation, the best algorithms achieved Dices scores of 0.674 (ISBI 2017), 0.702 (MICCAI 2017), and 0.739 (MICCAI 2018). Retrospectively, we performed additional analysis on liver tumor detection and revealed that not all top-performing segmentation algorithms worked well for tumor detection. The best liver tumor detection method achieved a lesion-wise recall of 0.458 (ISBI 2017), 0.515 (MICCAI 2017), and 0.554 (MICCAI 2018), indicating the need for further research. LiTS remains an active benchmark and resource for research, e.g., contributing the liver-related segmentation tasks in http://medicaldecathlon.com/. In addition, both data and online evaluation are accessible via www.lits-challenge.com.}, + pmid = {36481607}, + volume = {84}, + ss_id = {0655dcaa39cf41a3609974840f91300d73b4aed1}, + all_ss_ids = {['0655dcaa39cf41a3609974840f91300d73b4aed1']}, + gscites = {539}, +} + +@article{Blek19, + author = {Jeroen Bleker and Thomas Kwee and Rudi Dierckx and IgleJan de Jong and Henkjan Huisman and Derya Yakar}, + title = {Multiparametric {MRI} and auto-fixed volume of interest-based radiomics signature for clinically significant peripheral zone prostate cancer}, + journal = ER, + year = {2019}, + month = {9}, + doi = {https://doi.org/10.1007/s00330-019-06488-y}, + abstract = {Objectives + To create a radiomics approach based on multiparametric magnetic resonance imaging (mpMRI) features extracted from an auto-fixed volume of interest (VOI) that quantifies the phenotype of clinically significant (CS) peripheral zone (PZ) prostate cancer (PCa). + + Methods + This study included 206 patients with 262 prospectively called mpMRI prostate imaging reporting and data system 3-5 PZ lesions. Gleason scores > 6 were defined as CS PCa. Features were extracted with an auto-fixed 12-mm spherical VOI placed around a pin point in each lesion. The value of dynamic contrast-enhanced imaging(DCE), multivariate feature selection and extreme gradient boosting (XGB) vs. univariate feature selection and random forest (RF), expert-based feature pre-selection, and the addition of image filters was investigated using the training (171 lesions) and test (91 lesions) datasets. + + Results + The best model with features from T2-weighted (T2-w) + diffusion-weighted imaging (DWI) + DCE had an area under the curve (AUC) of 0.870 (95% CI 0.980-0.754). Removal of DCE features decreased AUC to 0.816 (95% CI 0.920-0.710), although not significantly (p = 0.119). Multivariate and XGB outperformed univariate and RF (p = 0.028). Expert-based feature pre-selection and image filters had no significant contribution. + + Conclusions + The phenotype of CS PZ PCa lesions can be quantified using a radiomics approach based on features extracted from T2-w + DWI using an auto-fixed VOI. Although DCE features improve diagnostic performance, this is not statistically significant. Multivariate feature selection and XGB should be preferred over univariate feature selection and RF. The developed model may be a valuable addition to traditional visual assessment in diagnosing CS PZ PCa.}, + file = {:pdf/Blek19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31776744}, + gsid = {12882217133592383323}, + gscites = {39}, + ss_id = {06d09d8c1c217b9bfa0f89736258b8f3b4edc555}, + all_ss_ids = {['06d09d8c1c217b9bfa0f89736258b8f3b4edc555']}, +} + +@article{Blek21, + author = {Bleker, Jeroen and Yakar, Derya and van Noort, Bram and Rouw, Dennis and de Jong, Igle Jan and Dierckx, Rudi AJO and Kwee, Thomas C and Huisman, Henkjan}, + title = {Single-center versus multi-center biparametric MRI radiomics approach for clinically significant peripheral zone prostate cancer}, + journal = INSI, + year = {2021}, + volume = {12}, + number = {1}, + doi = {https://doi.org/10.1186/s13244-021-01099-y}, + optnote = {DIAG, RADIOLOGY}, + abstract = {Objectives + To investigate a previously developed radiomics-based biparametric magnetic resonance imaging (bpMRI) approach for discrimination of clinically significant peripheral zone prostate cancer (PZ csPCa) using multi-center, multi-vendor (McMv) and single-center, single-vendor (ScSv) datasets. + + Methods + This study's starting point was a previously developed ScSv algorithm for PZ csPCa whose performance was demonstrated in a single-center dataset. A McMv dataset was collected, and 262 PZ PCa lesions (9 centers, 2 vendors) were selected to identically develop a multi-center algorithm. The single-center algorithm was then applied to the multi-center dataset (single-multi-validation), and the McMv algorithm was applied to both the multi-center dataset (multi-multi-validation) and the previously used single-center dataset (multi-single-validation). The areas under the curve (AUCs) of the validations were compared using bootstrapping. + + Results + Previously the single-single validation achieved an AUC of 0.82 (95% CI 0.71-0.92), a significant performance reduction of 27.2% compared to the single-multi-validation AUC of 0.59 (95% CI 0.51-0.68). The new multi-center model achieved a multi-multi-validation AUC of 0.75 (95% CI 0.64-0.84). Compared to the multi-single-validation AUC of 0.66 (95% CI 0.56-0.75), the performance did not decrease significantly (p value: 0.114). Bootstrapped comparison showed similar single-center performances and a significantly different multi-center performance (p values: 0.03, 0.012). + + Conclusions + A single-center trained radiomics-based bpMRI model does not generalize to multi-center data. Multi-center trained radiomics-based bpMRI models do generalize, have equal single-center performance and perform better on multi-center data.}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/239809}, + ss_id = {3d130766be579a65496f87ec07f51123206fe131}, + all_ss_ids = {['3d130766be579a65496f87ec07f51123206fe131']}, + gscites = {12}, +} + +@article{Blek22, + author = {Bleker, Jeroen and Kwee, Thomas C and Rouw, Dennis and Roest, Christian and Borstlap, Jaap and de Jong, Igle Jan and Dierckx, Rudi AJO and Huisman, Henkjan and Yakar, Derya}, + title = {A deep learning masked segmentation alternative to manual segmentation in biparametric MRI prostate cancer radiomics}, + journal = {European Radiology}, + pages = {1--10}, + year = {2022}, + publisher = {Springer}, + ss_id = {9d6b9f6c203c73af662853a1320659a62eb9be4b}, + all_ss_ids = {['9d6b9f6c203c73af662853a1320659a62eb9be4b']}, + gscites = {10}, +} + +@article{Blue10, + author = {Adriana M J Bluekens and Nico Karssemeijer and David Beijerinck and Jan J M Deurenberg and Ruben E van Engen and Mireille J M Broeders and Gerard J den Heeten}, + title = {Consequences of digital mammography in population-based breast cancer screening: initial changes and long-term impact on referral rates}, + journal = ER, + year = {2010}, + volume = {20}, + pages = {2067--2073}, + doi = {10.1007/s00330-010-1786-7}, + abstract = {OBJECTIVES: To investigate the referral pattern after the transition to full-field digital mammography (FFDM) in a population-based breast cancer screening programme. METHODS: Preceding the nationwide digitalisation of the Dutch screening programme, an FFDM feasibility study was conducted. Detection and referral rates for FFDM and screen-film mammography (SFM) were compared for first and subsequent screens. Furthermore, radiological characteristics of referrals in digital screening were assessed. RESULTS: A total of 312,414 screening mammograms were performed (43,913 digital and 268,501 conventional), with 4,473 consecutive referrals (966 following FFDM). Initially the FFDM referral rate peaked, and many false-positive results were noted as a consequence of pseudolesions and increased detection of (benign) microcalcifications. A higher overall referral rate was observed in FFDM screening in both first and subsequent examinations (p < .001), with a significant increase in cancer detection (p = .010). CONCLUSION: As a result of initial inexperience with digital screening images implementing FFDM in a population-based breast cancer screening programme may lead to a strong, but temporary increase in referral. Dedicated training in digital screening for radiographers and screening radiologists is therefore recommended. Referral rates decrease and stabilise (learning curve effect) at a higher level than in conventional screening, yet with significantly enhanced cancer detection.}, + file = {Blue10.pdf:pdf/Blue10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {9}, + pmid = {20407901}, + month = {4}, + gsid = {2663774235436162653}, + gscites = {64}, + ss_id = {c05538614208ef1e78d54fa9325730adcad07b7d}, + all_ss_ids = {['c05538614208ef1e78d54fa9325730adcad07b7d']}, +} + +@article{Blue12, + author = {Bluekens, Adriana M J. and Holland, Roland and Karssemeijer, Nico and Broeders, Mireille J M. and den Heeten, Gerard J.}, + title = {Comparison of Digital Screening Mammography and Screen-Film Mammography in the Early Detection of Clinically Relevant Cancers: A Multicenter Study}, + journal = Radiology, + year = {2012}, + volume = {265}, + pages = {707-714}, + doi = {10.1148/radiol.12111461}, + abstract = {Purpose:To compare screen-film mammography with digital mammography in a breast cancer screening program, with a focus on the clinical relevance of detected cancers.Materials and Methods:The study was approved by the regional medical ethics review board. Informed consent was not required. Before the nationwide transition to digital mammography in the Dutch biennial screening program, the performance of digital mammography was studied in three screening regions. For initial screening examinations, mediolateral oblique and craniocaudal views were obtained of each breast. In subsequent examinations, the mediolateral oblique view was standard. A craniocaudal view was added if indicated. Screening outcomes obtained with screen-film mammography and digital mammography, including radiologic and pathologic characteristics, were compared for initial and subsequent examinations.Results:A total of 1 198 493 screening examinations were performed between 2003 and 2007. Recall was indicated in 18 896 cases (screen-film mammography: 2.6\% at initial examinations, 1.3\% at subsequent examinations; digital mammography: 4.4\% at initial examinations, 2.1\% at subsequent examinations; P < .001 for both). Breast cancer was diagnosed in 6410 women (detection rate per 1000 women with screen-film mammography: 5.6 at initial examinations, 5.2 at subsequent examinations; detection rate per 1000 women with digital mammography: 6.8 at initial examinations, 6.1 at subsequent examinations; P = .02 and P < .001, respectively). Digital mammography depicted significantly more ductal carcinoma in situ (DCIS) lesions, irrespective of screening round. Invasive carcinoma was detected significantly more often in subsequent examinations, particularly when associated with microcalcifications (P = .047). The distribution of the histopathologic differentiation grades for DCIS and invasive carcinoma were similar with both modalities. However, with digital mammography more high-grade DCIS lesions were detected at subsequent examinations (P = .013).Conclusion:In a population-based breast screening program, the performance of digital mammography in the detection of DCIS and invasive carcinoma was substantially better than that of screen-film mammography. There is no sign of an increase in detection of low-grade DCIS lesions-indicative of possible overdiagnosis-with digital breast cancer screening. Rather, digital mammography appears to add to the detection of high-grade DCIS.A,A(c) RSNA, 2012.}, + file = {Blue12.pdf:pdf\\Blue12.pdf:PDF}, + optnote = {DIAG}, + pmid = {23033499}, + month = {12}, + gsid = {1545261100148519203}, + gscites = {115}, +} + +@article{Blue15, + author = {Bluekens, Adriana Mj and Veldkamp, Wouter Jh and Schuur, Klaas H. and Karssemeijer, Nico and Broeders, Mireille Jm and den Heeten, Gerard J.}, + title = {The potential use of ultra-low radiation dose images in digital mammography - a clinical proof-of-concept study in craniocaudal views}, + journal = BJR, + year = {2015}, + volume = {88}, + pages = {20140626}, + doi = {10.1259/bjr.20140626}, + abstract = {Objective: To estimate the potential of low-dose images in digital mammography by analysing the effect of substantial dose reduction in craniocaudal (CC) views on clinical performance. Methods: At routine mammography, additional CC views were obtained with about 10\% of the standard dose. Five radiologists retrospectively read the standard [mediolateral oblique (MLO)AC/a,!aEURdeg+AC/a,!aEURdegCC] and combination low-dose mammograms (standard MLO+low-dose CC). If present, lesion type, conspicuity, and suggested work-up were recorded. Final diagnoses were made by histology or follow-up. A t-test or Chi-square test was used to compare results. Results: 421 cases were included, presenting 5 malignancies, 66 benign lesions and multiple non-specific radiologic features. Using MLO with low-dose CC, all lesions were detected by at least one reader, but altogether less often than with standard mammography (sensitivity 73.9\% versus 81.5\%). Missed lesions concerned all types. Lesions detected with both protocols were described similarly (p=0.084) with comparable work-up recommendations (p=0.658). Conclusion: Mammography with ultra-low-dose CC images particularly influences detection. While sensitivity decreased, specificity was unaffected. In this proof-of-concept study a lower limit was to be determined that is not intended nor applicable for clinical practice. This should facilitate further research in optimisation of a low-dose approach, which has potential in a relatively young and largely asymptomatic population. Advances in Knowledge: Tungsten/silver-acquired mammography images might facilitate substantial dose reduction. Ultra-low-dose CC images reduce sensitivity, but not specificity. Low-dose images have potential in a largely young and asymptomatic population; a baseline is set for further research in optimization of a low-dose approach.}, + file = {Blue15.pdf:pdf\\Blue15.pdf:PDF}, + optnote = {DIAG}, + number = {1047}, + pmid = {25571915}, + month = {3}, + gsid = {7320665817091407981}, + gscites = {3}, + ss_id = {5447ca0a02102769866deb7a4cccbeb0dcd9ad46}, + all_ss_ids = {['5447ca0a02102769866deb7a4cccbeb0dcd9ad46']}, +} + +@mastersthesis{Boer18, + author = {Marjolijn den Boer}, + title = {Automated structure segmentation and lymphocyte detection in kidney transplant whole slide images using a convolutional neural network}, + abstract = {Much research is done to understand and prevent renal allograft rejection. Where acute rejection is brought back to a minimum, late rejection still occurs too frequent. Renal allografts with declined function are classified into rejection types by pathologists to improve treatment. This classification is based upon various grading criteria in stained pathological slides. Criteria depend on the different structures present in the kidney and quantification of inflammation. Since the digitization of pathological slides into whole slide images (WSIs), computer aided analyses have proven useful. Artificial neural networks have shown consistent in automated image analyses with quality comparable to trained experts. Here, we examine a convolutional neural networks as a means to aid analysis of renal allograft WSIs on two subjects: segmentation of kidney structures and segmentation of lymphocytes. A dataset of renal allograft biopsies from the Radboud University Medical Centre, Nijmegen, the Netherlands was used. Overall structure segmentation performance is good with a dice coefficient of 0.88. Segmentation of certain specific structures could be increased, but overall the segmentation maps are presumably valuable. Performance of lymphocyte segmentation proves more difficult with a dice coefficient of 0.60 and a large amount of false positives in lymphocyte instance detection. Segmentation performance of the model is currently not sufficient for the difficult task, but indicates future possibilities for lymphocyte segmentation using convolutional neural networks.}, + file = {Boer18.pdf:pdf/Boer18.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2018}, + journal = {Master thesis}, +} + +@mastersthesis{Boer19, + author = {Tim Boers}, + title = {Interactive Residual 3D U-net for the Segmentation of the Pancreas in Computed Tomography Scans}, + abstract = {The thesis is divided into two main parts. Part A will focus on the clinical background, theoretical background of deep learning and will also denote a prior experiment which did not end up in the paper. Part B contains the main product of this thesis and will be provided in paper format.}, + file = {Boer19.pdf:pdf/Boer19.pdf:PDF}, + optnote = {DIAG}, + school = {University of Twente}, + year = {2019}, + journal = {Master thesis}, +} + +@article{Boer20, + author = {Boers, Tim and Hu, Yipeng and Gibson, Eli and Barratt, Dean and Bonmati, Ester and Krdzalic, Jasenko and van der Heijden, Ferdi and Hermans, John and Huisman, Henkjan}, + title = {Interactive 3D U-net for the Segmentation of the Pancreas in Computed Tomography Scans}, + doi = {10.1088/1361-6560/ab6f99}, + number = {6}, + pages = {065002}, + volume = {65}, + abstract = {The increasing incidence of pancreatic cancer will make it the second deadliest cancer in 2030. Artificial intelligence can help provide and improve widespread diagnostic expertise and accurate interventional image interpretation. Accurate segmentation of the pancreas is essential to create annotated data sets to train AI, and for computer assisted interventional guidance. Automated deep learning segmentation performance in pancreas CT imaging is low due to poor grey value contrast and complex anatomy. A good solution seemed a recent interactive deep learning segmentation framework for brain CT that helped strongly improve initial automated segmentation with minimal user input. This method yielded no satisfactory results for pancreas CT, possibly due to a sub-optimal neural architecture. We hypothesize that a state-of-the-art U-net neural architecture is better because it can produce a better initial segmentation and is likely to be extended to work in an interactive approach. We implemented the existing interactive method, iFCN, and developed an interactive version of U-net method we call iUnet. The iUnet is fully trained to produce the best possible initial segmentation. In interactive mode it is additionally trained on a partial set of layers on user generated scribbles. We compare initial segmentation performance of iFCN and iUnet on a 100CT dataset using DSC analysis. Secondly, we assessed the performance gain in interactive use with three observers on segmentation quality and time. Average automated baseline performance was 78\% (iUnet) vs 72\% (FCN). Manual and semi-automatic segmentation performance was: 87\% in 15 min. for manual, and 86\% in 8 min. for iUNet. We conclude that iUnet provides a better baseline than iFCN and can reach expert manual performance significantly faster than manual segmentation in case of pancreas CT. Our novel iUnet architecture is modality and organ agnostic and can be a potential novel solution for semi-automatic medical imaging segmentation in general.}, + file = {Boer20.pdf:pdf\\Boer20.pdf:PDF}, + journal = PMB, + optnote = {DIAG, RADIOLOGY}, + pmid = {31978921}, + year = {2020}, + month = {3}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/217643}, + ss_id = {4992f23e29e711eec8b92e17ce35610da3758ecd}, + all_ss_ids = {['4992f23e29e711eec8b92e17ce35610da3758ecd']}, + gscites = {27}, +} + +@mastersthesis{Boer2020, + author = {Tristan de Boer}, + title = {A feasibility study for Deep Learning Image Guided Guidewire Tracking for Image-guided Interventions}, + abstract = {A feasibility study for Deep Learning Image Guided Guidewire Tracking for Image-guided Interventions + A current challenge in real-time magnetic resonance imaging (MRI) guided minimally invasive images is needle tracking and planning. We propose a pipeline for automatic object detection using a state-of-the-art object detection network. Predictions by the object detection network were used to translate the MRI plane to keep a guidewire tip in a plane. We evaluated the pipeline on displacement error between the prediction and the actual location of the guidewire tip in a setup with an anthropomorphic blood vessel. For this setup, we hypothesized that the network should be able to correctly predict the actual location within a margin of 10 mm, at least within 1000 ms. + Results show that the pipeline can accurately track the guidewire tip in real-time (within 458 ms), with a mean displacement error of 7 mm (s = 4). Based on this evidence, we have demonstrated the feasibility of deep learning assisted image-guided interventions, creating possibilities for other deep learning guided interventions. Our proposed method shows potential for cryoablation. During these types of minimally invasive procedures tracking needles can be a challenge.}, + file = {:pdf/Boer20a.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University}, + year = {2020}, + journal = {Master thesis}, +} + +@article{Boga22, + author = {Joep M. A. Bogaerts and Miranda P. Steenbeek and Majke H. D. van Bommel and Johan Bulten and Jeroen A. W. M. van der Laak and Joanne A. de Hullu and Michiel Simons}, + title = {Recommendations for diagnosing {STIC}: a systematic review and meta-analysis}, + doi = {10.1007/s00428-021-03244-w}, + number = {4}, + pages = {725--737}, + volume = {480}, + abstract = {Our understanding of the oncogenesis of high-grade serous cancer of the ovary and its precursor lesions, such as serous tubal intraepithelial carcinoma (STIC), has significantly increased over the last decades. Adequate and reproducible diagnosis of these precursor lesions is important. Diagnosing STIC can have prognostic consequences and is an absolute requirement for safely offering alternative risk reducing strategies, such as risk reducing salpingectomy with delayed oophorectomy. However, diagnosing STIC is a challenging task, possessing only moderate reproducibility. In this review and meta-analysis, we look at how pathologists come to a diagnosis of STIC. We performed a literature search identifying 39 studies on risk reducing salpingo-oophorectomy in women with a known BRCA1/2 PV, collectively reporting on 6833 patients. We found a pooled estimated proportion of STIC of 2.8% (95% CI, 2.0-3.7). We focused on reported grossing protocols, morphological criteria, level of pathologist training, and the use of immunohistochemistry. The most commonly mentioned morphological characteristics of STIC are (1) loss of cell polarity, (2) nuclear pleomorphism, (3) high nuclear to cytoplasmic ratio, (4) mitotic activity, (5) pseudostratification, and (6) prominent nucleoli. The difference in reported incidence of STIC between studies who totally embedded all specimens and those who did not was 3.2% (95% CI, 2.3-4.2) versus 1.7% (95% CI, 0.0-6.2) (p 0.24). We provide an overview of diagnostic features and present a framework for arriving at an adequate diagnosis, consisting of the use of the SEE-FIM grossing protocol, evaluation by a subspecialized gynecopathologist, rational use of immunohistochemical staining, and obtaining a second opinion from a colleague.}, + file = {Boga22a.pdf:pdf\\Boga22a.pdf:PDF}, + month = {12}, + optnote = {DIAG}, + publisher = {Springer Science and Business Media {LLC}}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/251830}, + ss_id = {a5c0ae9b50b0058f6e16bdc59a95e489b897e430}, + all_ss_ids = {['a5c0ae9b50b0058f6e16bdc59a95e489b897e430']}, + gscites = {10}, +} + +@article{Bogu19, + author = {Bogunovic, Hrvoje and Venhuizen, Freerk and Klimscha, Sophie and Apostolopoulos, Stefanos and Bab-Hadiashar, Alireza and Bagci, Ulas and Beg, Mirza Faisal and Bekalo, Loza and Chen, Qiang and Ciller, Carlos and Gopinath, Karthik and Gostar, Amirali K and Jeon, Kiwan and Ji, Zexuan and Kang, Sung Ho and Koozekanani, Dara D and Lu, Donghuan and Morley, Dustin and Parhi, Keshab K and Park, Hyoung Suk and Rashno, Abdolreza and Sarunic, Marinko and Shaikh, Saad and Sivaswamy, Jayanthi and Tennakoon, Ruwan and Yadav, Shivin and De Zanet, Sandro and Waldstein, Sebastian M and Gerendas, Bianca S and Klaver, Caroline and S\'{a}nchez, Clara I and Schmidt-Erfurth, Ursula}, + title = {RETOUCH: The Retinal OCT Fluid Detection and Segmentation Benchmark and Challenge}, + doi = {10.1109/TMI.2019.2901398}, + issue = {8}, + pages = {1858--1874}, + volume = {38}, + abstract = {Retinal swelling due to the accumulation of fluid is associated with the most vision-threatening retinal diseases. Optical coherence tomography (OCT) is the current standard of care in assessing the presence and quantity of retinal fluid and image-guided treatment management. Deep learning methods have made their impact across medical imaging, and many retinal OCT analysis methods have been proposed. However, it is currently not clear how successful they are in interpreting the retinal fluid on OCT, which is due to the lack of standardized benchmarks. To address this, we organized a challenge RETOUCH in conjunction with MICCAI 2017, with eight teams participating. The challenge consisted of two tasks: fluid detection and fluid segmentation. It featured for the first time: all three retinal fluid types, with annotated images provided by two clinical centers, which were acquired with the three most common OCT device vendors from patients with two different retinal diseases. The analysis revealed that in the detection task, the performance on the automated fluid detection was within the inter-grader variability. However, in the segmentation task, fusing the automated methods produced segmentations that were superior to all individual methods, indicating the need for further improvements in the segmentation performance.}, + file = {Bogu19.pdf:pdf\\Bogu19.pdf:PDF}, + journal = TMI, + month = {8}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30835214}, + year = {2019}, + gsid = {11669534821534873919}, + gscites = {135}, + ss_id = {6fd044b6c49661a5a1db4b49543c9a6ca4b108fb}, + all_ss_ids = {['6fd044b6c49661a5a1db4b49543c9a6ca4b108fb']}, +} + +@article{Bohn22, + author = {Bohner, Lauren and Vinayahalingam, Shankeeth and Kleinheinz, Johannes and Hanisch, Marcel}, + title = {Digital Implant Planning in Patients with Ectodermal Dysplasia: Clinical Report.}, + doi = {10.3390/ijerph19031489}, + issue = {3}, + volume = {19}, + abstract = {Ectodermal dysplasia may severely affect the development of jaw growth and facial appearance. This case report describes the treatment of two patients suffering from ectodermal dysplasia, both treated with dental implant-fixed restorations by means of computer-guided surgery. Two patients presented to our clinic with congenital malformation of the jaw as a manifestation of ectodermal dysplasia, showing oligodontia and alveolar ridge deficit. Clinical examination revealed multiple unattached teeth and a need for prosthetic therapy. For both cases, dental implants were placed based on a computer-guided planning. A surgical guide was used to determine the positioning of the dental implants according to the prosthetic planning, which allowed for a satisfactory aesthetic and functional outcome. Computer-guided implant placement allowed predictable treatment of complex cases with satisfactory aesthetic and functional results. Adequate surgical and prosthetic planning is considered critical for treatment success.}, + file = {Bohn22.pdf:pdf\\Bohn22.pdf:PDF}, + journal = {International journal of environmental research and public health}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35162510}, + year = {2022}, +} + +@article{Bois12, + author = {Boiselle, Phillip M. and Goodman, Lawrence R. and Litmanovich, Diana and R\'{e}my-Jardin, Martine and Schaefer-Prokop, Cornelia}, + title = {Expert opinion: {CT} pulmonary angiography in pregnant patients with suspected pulmonary embolism}, + journal = JTI, + year = {2012}, + volume = {27}, + pages = {5}, + doi = {10.1097/RTI.0b013e31824008ad}, + file = {Bois12.pdf:pdf\\Bois12.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {22189243}, + month = {1}, + ss_id = {69ee456f69165c71d2c731334f93039c7cf21f9b}, + all_ss_ids = {['69ee456f69165c71d2c731334f93039c7cf21f9b']}, + gscites = {3}, +} + +@inproceedings{Bokh18a, + author = {Bokhorst, John-Melle and Rijstenberg, Lucia and Goudkade, Danny and Nagtegaal, Iris and van der Laak, Jeroen and Ciompi, Francesco}, + title = {Automatic Detection of Tumor Budding in Colorectal Carcinoma with Deep Learning}, + booktitle = COMPAY, + year = {2018}, + publisher = {Springer}, + month = {1}, + doi = {10.1007/978-3-030-00949-6_16}, + url = {http://dx.doi.org/10.1007/978-3-030-00949-6_16}, + abstract = {Colorectal cancer patients would benefit from a valid, reliable and efficient detection of Tumor Budding (TB), as this is a proven prognostic biomarker. We explored the application of deep learning techniques to detect TB in Hematoxylin and Eosin (H&E) stained slides, and used convolutional neural networks to classify image patches as containing tumor buds, tumor glands and background. As a reference standard for training we stained slides both with H&E and immunohistochemistry (IHC), where one pathologist first annotated buds in IHC and then transferred the obtained annotations to the corresponding H&E image. We show the effectiveness of the proposed three-class approach, which allows to substantially reduce the amount of false positives, especially when combined with a hard-negative mining technique. Finally we report the results of an observer study aimed at investigating the correlation between pathologists at detecting TB in IHC and H&E.}, + file = {Bokh18a.pdf:pdf\\Bokh18a.pdf:PDF}, + optnote = {DIAG}, + gsid = {18087021687980029988}, + gscites = {10}, + ss_id = {bf16e7e46d23a0e83bd2efe2d13e7ba7b0c47b51}, + all_ss_ids = {['bf16e7e46d23a0e83bd2efe2d13e7ba7b0c47b51']}, +} + +@inproceedings{Bokh19, + author = {Bokhorst, John-Melle and Pinckaers, Hans and van Zwam, Peter and Nagetgaal, Iris and van der Laak, Jeroen and Ciompi, Francesco}, + title = {Learning from sparsely annotated data for semantic segmentation in histopathology images}, + booktitle = MIDL, + year = {2019}, + volume = {102}, + series = {Proceedings of Machine Learning Research}, + pages = {81-94}, + url = {http://proceedings.mlr.press/v102/bokhorst19a.html}, + abstract = {We investigate the problem of building convolutional networks for semantic segmentation in histopathology images when weak supervision in the form of sparse manual annotations is provided in the training set. We propose to address this problem by modifying the loss function in order to balance the contribution of each pixel of the input data. We introduce and compare two approaches of loss balancing when sparse annotations are provided, namely (1) instance based balancing and (2) mini-batch based balancing. We also consider a scenario of full supervision in the form of dense annotations, and compare the performance of using either sparse or dense annotations with the proposed balancing schemes. Finally, we show that using a bulk of sparse annotations and a + small fraction of dense annotations allows to achieve performance comparable to full supervision.}, + file = {Bokh19.pdf:pdf\\Bokh19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {9237806221216165855}, + gscites = {34}, + ss_id = {e58b84e96868eb5f165ab710ef43668c4d8bbf74}, + all_ss_ids = {['e58b84e96868eb5f165ab710ef43668c4d8bbf74']}, +} + +@conference{Bokh19a, + author = {Bokhorst, John-Melle and Dawson, Heather and Blank, Annika and Zlobec, Inti and Lugli, Alessandro and Vieth, Michael and Kirsch, Richard and Urbanowicz, Maria and Brockmoeller, Scarlet and Flejou, Jean-Francois and Rijstenberg, Lucia and van der Laak, Jeroen and Ciompi, Francesco and Nagtegaal, Iris}, + title = {Assessment of tumor buds in colorectal cancer. A large-scale international digital observer study}, + booktitle = ECP, + year = {2019}, + abstract = {Tumor budding (TB) is a promising and cost-effective biomarker with strong prognostic value in colorectal cancer. We previously found moderate agreement between two pathologists scoring TB using the International Tumor Budding Consensus Conference (ITBCC) guidelines, while considerable discrepancy in identifying individual tumor buds was observed. To explore this issue further, we performed a large-scale international digital observer study on the assessment of individual tumor buds. We extracted 3000 tumor bud candidates by application of digital image analysis algorithms. For every candidate, an image patch (size 256x256um) was extracted from pan-cytokeratinstained whole-slide images of 36 patients with reported TB. Members of a tumor budding consortium were invited to categorize each individual object as (1) tumor bud, (2) poorly differentiated cluster, or (3) none of the previous, based on best practice and current definitions. Agreement was assessed with Cohen's and Fleiss Kappa. Cohen's and Fleiss Kappa showed a fair to moderate overall agreement between observers (range 0.24-0.65 and 0.37 respectively) when asked to score 3000 individual objects. Despite adequate agreement between observers in the assessment of TB on patient level, the agreement on individual tumor bud level using immunohistochemistry is only fair. To better understand the causes of this disagreement, more research is needed involving H&E stained images. A machine learning approach may prove especially useful for a more robust assessment of individual tumor buds.}, + optnote = {DIAG}, +} + +@article{Bokh19b, + author = {Bokhorst, John-Melle and Blank, A. and Lugli, A. and Zlobec, I. and Dawson, H. and Vieth, M. and Rijstenberg, L. L. and Brockmoeller, S. and Urbanowicz, M. and Flejou, J. F. and Kirsch, R. and Ciompi, F. and van der Laak, J. A. W. M. and Nagtegaal, I. D.}, + title = {Assessment of individual tumor buds using keratin immunohistochemistry: moderate interobserver agreement suggests a role for machine learning}, + journal = MODP, + year = {2019}, + doi = {10.1038/s41379-019-0434-2}, + url = {https://www.nature.com/articles/s41379-019-0434-2}, + abstract = {Tumor budding is a promising and cost-effective biomarker with strong prognostic value in colorectal cancer. However, challenges related to interobserver variability persist. Such variability may be reduced by immunohistochemistry and computer-aided tumor bud selection. Development of computer algorithms for this purpose requires unequivocal examples of individual tumor buds. As such, we undertook a large-scale, international, and digital observer study on individual tumor bud assessment. From a pool of 46 colorectal cancer cases with tumor budding, 3000 tumor bud candidates were selected, largely based on digital image analysis algorithms. For each candidate bud, an image patch (size 256 x 256 um) was extracted from a pan cytokeratin-stained whole-slide image. Members of an International Tumor Budding Consortium (n = 7) were asked to categorize each candidate as either (1) tumor bud, (2) poorly differentiated cluster, or (3) neither, based on current definitions. Agreement was assessed with Cohen's and Fleiss Kappa statistics. Fleiss Kappa showed moderate overall agreement between observers (0.42 and 0.51), while Cohen's Kappas ranged from 0.25 to 0.63. Complete agreement by all seven observers was present for only 34% of the 3000 tumor bud candidates, while 59% of the candidates were agreed on by at least five of the seven observers. Despite reports of moderate-to-substantial agreement with respect to tumor budding grade, agreement with respect to individual pan cytokeratin-stained tumor buds is moderate at most. A machine learning approach may prove especially useful for a more robust assessment of individual tumor buds.}, + file = {Bokh19b.pdf:pdf\\Bokh19b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31844269}, + month = {12}, + gsid = {15779257869957117093}, + gscites = {28}, + all_ss_ids = {['aff9f3cd8024fb64c7ea36d396a36ebf33d01f5b', 'c3e98e4b01c27affe1b6c161172c5e27e8141618']}, +} + +@conference{Bokh20, + author = {Bokhorst, John-Melle and Ciompi, Francesco and Zlobec, Inti and Lugli, Alassandro and Vieth, Michael and Kirsch, Richard and van der Laak, Jeroen and Nagtegaal, Iris}, + booktitle = ECP, + title = {Computer-assisted hot-spot selection for tumor budding assessment in colorectal cancer}, + abstract = {Background & objectives + Tumor budding (TB) is an established prognosticator for colorectal cancer. Detection of the hot-spot to score TB is based on visual inspection, hindering reproducibility of this important factor. We present an algorithm that can potentially assist pathologists in this task. + + Methods + We used a previously developed algorithm for the detection of tumor buds in pan-cytokeratin stained whole slide images, calculating the number of buds for each location using a circle with 0.785mm2 surface area. From these numbers, density heatmaps were produced. The algorithm was applied to 270 slides from Bern University hospital, in which hot-spots and tumor buds were visually identified. + + Results + Heat maps were created and we located the hand-selected hotspot and noted the associated TB number. The differences and similarities between computer identified and manually selected hot-spots were visually assessed as well as via histograms. Preliminary results show that the heatmaps are helpful, as locations with the highest TB density (the top 15%) also include the hand-selected hotspots. The full results will be presented during the conference. + + Conclusion + The presented algorithm can assist the pathologist in selecting the hot-spot with the highest tumor bud count with more ease at low magnification and can help to reduce the high interobserver variability among pathologists in scoring tumor budding.}, + optnote = {DIAG}, + year = {2020}, +} + +@conference{Bokh20a, + author = {Bokhorst, John-Melle and Nagtegaal, Iris and Zlobec, Inti and Lugli, Alassandro and Vieth, Michael and Kirsch, Richard and van der Laak, Jeroen and Ciompi, Francesco}, + booktitle = ECP, + title = {Deep learning based tumor bud detection in pan-cytokeratin stained colorectal cancer whole-slide images}, + abstract = {Background & objectives + Tumor budding (TB) is an established prognosticator for colorectal cancer. Deep learning based TB assessment has the potential to improve diagnostic reproducibility and efficiency. We developed an algorithm that can detect individual tumor buds in pan-cytokeratin stained colorectal cancer slides + + Methods + Tumor-bud candidates (n=1765, collected from 58 whole slide images; WSI) were labeled by seven experts as either TB, poorly differentiated cluster, or neither. The 58 slides were randomly split into a training (49) and test-set (9). A deep learning (DL) model was trained using the buds identified by the experts in the training set. + + Results + The algorithm was tested on the nine remaining WSI and 270 WSI from pan-cytokeratin stained slides from Bern University hospital, in which hot spots and TB were manually scored. An F1 score of 0.82 was found for correspondence at the bud level between experts and DL. A correlation of 0.745 was found between the manually counted buds within the hotspots and the automated method in the 270 WSIs. + + Conclusion + Assessment of tumor budding as a prognostic factor for colorectal cancer can be automated using deep learning. At the level of individual tumor buds, correspondence between DL and experts is high and comparable to the inter-rater variability. However, compared to the manual procedure, the algorithm yields higher counts for cases with relatively high bud densities (>15). Follow-up studies will focus on the assessment of TB in H&E stained slides.}, + optnote = {DIAG}, + year = {2020}, +} + +@article{Bokh23, + author = {J. Bokhorst and I. Nagtegaal and F. Fraggetta and S. Vatrano and W. Mesker and Michael Vieth and J. A. van der Laak and F. Ciompi}, + title = {Deep learning for multi-class semantic segmentation enables colorectal cancer detection and classification in digital pathology images}, + abstract = {In colorectal cancer (CRC), artificial intelligence (AI) can alleviate the laborious task of characterization and reporting on resected biopsies, including polyps, the numbers of which are increasing as a result of CRC population screening programs ongoing in many countries all around the globe. Here, we present an approach to address two major challenges in the automated assessment of CRC histopathology whole-slide images. We present an AI-based method to segment multiple (n=14) tissue compartments in the H &E-stained whole-slide image, which provides a different, more perceptible picture of tissue morphology and composition. We test and compare a panel of state-of-the-art loss functions available for segmentation models, and provide indications about their use in histopathology image segmentation, based on the analysis of (a) a multi-centric cohort of CRC cases from five medical centers in the Netherlands and Germany, and (b) two publicly available datasets on segmentation in CRC. We used the best performing AI model as the basis for a computer-aided diagnosis system that classifies colon biopsies into four main categories that are relevant pathologically. We report the performance of this system on an independent cohort of more than 1000 patients. The results show that with a good segmentation network as a base, a tool can be developed which can support pathologists in the risk stratification of colorectal cancer patients, among other possible uses. We have made the segmentation model available for research use on https://grand-challenge.org/algorithms/colon-tissue-segmentation/.}, + file = {Bokh23.pdf:pdf\\Bokh23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY}, + pmid = {37225743}, + year = {2023}, + journal = NATSCIREP, + volume = {13}, + pages = {8398}, + doi = {10.1038/s41598-023-35491-z}, + ss_id = {f7c9d9a217387d3a73a39b232a3bc14e6706b533}, + all_ss_ids = {['f7c9d9a217387d3a73a39b232a3bc14e6706b533']}, + gscites = {1}, +} + +@article{Bokh23a, + author = {J. Bokhorst and I. Nagtegaal and I. Zlobec and H. Dawson and K. Sheahan and F. Simmer and R. Kirsch and Michael Vieth and A. Lugli and J. A. van der Laak and F. Ciompi}, + title = {Semi-Supervised Learning to Automate Tumor Bud Detection in Cytokeratin-Stained Whole-Slide Images of Colorectal Cancer}, + url = {https://www.semanticscholar.org/paper/be741d6f455a941f206de7fecb44f81678f385bf}, + abstract = {Simple Summary Tumor budding is a promising and cost-effective histological biomarker with strong prognostic value in colorectal cancer. It is defined by the presence of single tumor cells or small clusters of cells within the tumor or at the tumor-invasion front. Deep learning based tumor bud assessment can potentially improve diagnostic reproducibility and efficiency. This study aimed to develop a deep learning algorithm to detect tumor buds in cytokeratin-stained images automatically. We used a semi-supervised learning technique to overcome the limitations of a small dataset. Validation of our model showed a sensitivity of 91% and a fairly strong correlation between a human annotator and our deep learning method. We demonstrate that the automated tumor bud count achieves a prognostic value similar to visual estimation. We also investigate new metrics for quantifying buds, such as density and dispersion, and report on their predictive value. Abstract Tumor budding is a histopathological biomarker associated with metastases and adverse survival outcomes in colorectal carcinoma (CRC) patients. It is characterized by the presence of single tumor cells or small clusters of cells within the tumor or at the tumor-invasion front. In order to obtain a tumor budding score for a patient, the region with the highest tumor bud density must first be visually identified by a pathologist, after which buds will be counted in the chosen hotspot field. The automation of this process will expectedly increase efficiency and reproducibility. Here, we present a deep learning convolutional neural network model that automates the above procedure. For model training, we used a semi-supervised learning method, to maximize the detection performance despite the limited amount of labeled training data. The model was tested on an independent dataset in which human- and machine-selected hotspots were mapped in relation to each other and manual and machine detected tumor bud numbers in the manually selected fields were compared. We report the results of the proposed method in comparison with visual assessment by pathologists. We show that the automated tumor bud count achieves a prognostic value comparable with visual estimation, while based on an objective and reproducible quantification. We also explore novel metrics to quantify buds such as density and dispersion and report their prognostic value. We have made the model available for research use on the grand-challenge platform.}, + file = {Bokh23a.pdf:pdf\\Bokh23a.pdf:PDF}, + optnote = {DIAG, PATHOLOGY}, + journal = {Cancers}, + volume = {15}, + issue = {7}, + pages = {2079}, + pmid = {37046742}, + year = {2023}, + doi = {10.3390/cancers15072079}, + ss_id = {be741d6f455a941f206de7fecb44f81678f385bf}, + all_ss_ids = {['be741d6f455a941f206de7fecb44f81678f385bf']}, + gscites = {1}, +} + +@article{Bokh23b, + author = {Bokhorst, John-Melle and Ciompi, Francesco and \"{O}zt\"{u}rk, Sonay Kus and Oguz Erdogan, Ayse Selcen and Vieth, Michael and Dawson, Heather and Kirsch, Richard and Simmer, Femke and Sheahan, Kieran and Lugli, Alessandro and Zlobec, Inti and van der Laak, Jeroen and Nagtegaal, Iris D.}, + title = {Fully Automated Tumor Bud Assessment in Hematoxylin and Eosin-Stained Whole Slide Images of Colorectal Cancer}, + doi = {10.1016/j.modpat.2023.100233}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.modpat.2023.100233}, + file = {Bokh23b.pdf:pdf\Bokh23b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Modern Pathology}, + citation-count = {0}, + automatic = {yes}, + pages = {100233}, + volume = {36}, + ss_id = {0e7ef01ded474485f8f715342f18907f451526c5}, + all_ss_ids = {['0e7ef01ded474485f8f715342f18907f451526c5']}, + gscites = {0}, +} + +@inproceedings{Bomm22, + author = {van Bommel, Majke and Bogaerts, Joep and Hermens, Rosella and Steenbeek, Miranda and de Hullu, Joanne and van der Laak, Jeroen and Simons, Michiel}, + title = {2022-RA-646-ESGO Consensus based recommendations for the diagnosis of serous tubal intraepithelial carcinoma, an international delphi study}, + doi = {10.1136/ijgc-2022-esgo.790}, + year = {2022}, + abstract = {Introduction/Background Reliable diagnosis of precursor lesions to high grade serous cancer (HGSC) is crucial, for individual patient care, for better understanding its oncogenesis and for research regarding novel strategies to prevent ovarian cancer. These precursor lesions, serous tubal intraepithelial carcinoma (STIC), are difficult to diagnose: the lesion is small, rare, and clear diagnostic criteria are lacking. We aim to optimize STIC diagnosis by providing recommendations for STIC diagnosis, based on international consensus from gynecopathologists. Methodology A three-round Delphi study was conducted to systematically explore current clinical practice and to reach consensus regarding STIC diagnosis. First, an expert panel consisting of international gynecopathologists was formed. This panel was asked to provide information regarding all relevant aspects of STIC diagnostics, which was used to form a set of statements. Second, the panel rated their agreement on those statements. Third, statements without consensus, according to predefined rules, were rated again by the panel members in the light of the anonymous responses to round 2 of the other panel members. Finally, each expert was asked to either approve or disapprove the set of consensus statements. Results A panel of 34 gynecopathologists from 11 countries rated their agreement on 64 statements. A total of 27 statements (42%) reached consensus. This set reflects the entire diagnostic workup for pathologists, regarding processing and macroscopy, microscopy, immunohistochemistry, interpretation and reporting. The final set of consensus statements was approved by 76% of the experts. Conclusion A set of 27 statements regarding STIC diagnosis reached consensus by an international expert panel of gynecopathologists. Those consensus statements contribute to a basis for international standards for STIC diagnosis, which are urgently needed for better understanding of HGSC, for better counselling of patients, and for safely investigating novel preventive strategies for women at high risk of ovarian cancer.}, + url = {http://dx.doi.org/10.1136/ijgc-2022-esgo.790}, + file = {Bomm22.pdf:pdf\Bomm22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Pathology}, + citation-count = {0}, + automatic = {yes}, +} + +@article{Boo09, + author = {D. W. de Boo and M. Prokop and M. Uffmann and B. van Ginneken and C. M. Schaefer-Prokop}, + title = {Computer-aided detection ({CAD}) of lung nodules and small tumours on chest radiographs}, + journal = EJR, + year = {2009}, + volume = {72}, + pages = {218--225}, + doi = {10.1016/j.ejrad.2009.05.062}, + abstract = {{D}etection of focal pulmonary lesions is limited by quantum and anatomic noise and highly influenced by variable perception capacity of the reader. {M}ultiple studies have proven that lesions - missed at time of primary interpretation - were visible on the chest radiographs in retrospect. {C}omputer-aided diagnosis ({CAD}) schemes do not alter the anatomic noise but aim at decreasing the intrinsic limitations and variations of human perception by alerting the reader to suspicious areas in a chest radiograph when used as a 'second reader'. {M}ultiple studies have shown that the detection performance can be improved using {CAD} especially for less experienced readers at a variable amount of decreased specificity. {T}here seem to be a substantial learning process for both, experienced and inexperienced readers, to be able to optimally differentiate between false positive and true positive lesions and to build up sufficient trust in the capabilities of these systems to be able to use them at their full advantage. {S}tudies so far focussed on stand-alone performance of the {CAD} schemes to reveal the magnitude of potential impact or on retrospective evaluation of {CAD} as a second reader for selected study groups. {F}urther research is needed to assess the performance of these systems in clinical routine and to determine the trade-off between performance increase in terms of increased sensitivity and decreased inter-reader variability and loss of specificity and secondary indicated follow-up examinations for further diagnostic workup.}, + file = {Boo09.pdf:pdf\\Boo09.pdf:PDF}, + optnote = {DIAG, NoduleDetectionCT, RADIOLOGY}, + number = {2}, + pmid = {19747791}, + month = {11}, + gsid = {5227608991913046865}, + gscites = {38}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/81498}, + ss_id = {40cc7c929546e4422263fa844a8cd6f47d266ead}, + all_ss_ids = {['40cc7c929546e4422263fa844a8cd6f47d266ead']}, +} + +@article{Boo11, + author = {D. W. de Boo and M. Uffmann and M. Weber and S. Bipat and E. F. Boorsma and M. J. Scheerder and N. J. Freling and C. M. Schaefer-Prokop}, + title = {Computer-aided Detection of Small Pulmonary Nodules in Chest Radiographs An Observer Study}, + journal = AR, + year = {2011}, + volume = {18}, + pages = {1507--1514}, + doi = {10.1016/j.acra.2011.08.008}, + abstract = {RATIONALE AND OBJECTIVES: To evaluate the impact of computer-aided detection (CAD, IQQA-Chest; EDDA Technology, Princeton Junction, NJ) used as second reader on the detection of small pulmonary nodules in chest radiography (CXR). MATERIALS AND METHODS: A total of 113 patients (mean age 62 years) with CT and CXR within 6 weeks were selected. Fifty-nine patients showed 101 pulmonary nodules (diameter 5-15mm); the remaining 54 patients served as negative controls. Six readers of varying experience individually evaluated the CXR without and with CAD as second reader in two separate reading sessions. The sensitivity per lesion, figure of merit (FOM), and mean false positive per image (mFP) were calculated. Institutional review board approval was waived. RESULTS: With CAD, the sensitivity increased for inexperienced readers (39\% vs. 45\%, P < .05) and remained unchanged for experienced readers (50\% vs. 51\%). The mFP nonsignificantly increased for both inexperienced and experienced readers (0.27 vs. 0.34 and 0.16 vs. 0.21). The mean FOM did not significantly differ for readings without and with CAD irrespective of reader experience (0.71 vs. 0.71 and 0.84 vs. 0.87). All readers together dismissed 33\% of true-positive CAD candidates. False-positive candidates by CAD provoked 40\% of all false-positive marks made by the readers. CONCLUSION: CAD improves the sensitivity of inexperienced readers for the detection of small nodules at the expense of loss of specificity. Overall performance by means of FOM was therefore not affected. To use CAD more beneficial, readers need to improve their ability to differentiate true from false-positive CAD candidates.}, + file = {Boo11.pdf:pdf\\Boo11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {12}, + pmid = {21963532}, + month = {12}, + ss_id = {3363021df95dabbccfb6e95762195b67c9d8a259}, + all_ss_ids = {['3363021df95dabbccfb6e95762195b67c9d8a259']}, + gscites = {25}, +} + +@article{Boo11a, + author = {D. W. de Boo and M. Uffmann and S. Bipat and E. F. A. Boorsma and M. J. Scheerder and M. Weber and C. M. Schaefer-Prokop}, + title = {Gray-Scale Reversal for the Detection of Pulmonary Nodules on a PACS Workstation}, + journal = AJR, + year = {2011}, + volume = {197}, + pages = {1096--1100}, + doi = {10.2214/AJR.11.6625}, + abstract = {The purpose of this article is to evaluate the impact of gray-scale reversal on the detection of small pulmonary nodules in two-view chest radiography.One hundred twenty-eight patients (mean age, 62 years) who underwent CT and chest radiography within 6 weeks were retrospectively selected for this study. Seventy-three percent of patients showed variable degrees of radiographic findings of a "dirty lung." A total of 129 solid pulmonary nodules were present in 74 patients (nodule diameter range, 5-30 mm; mean diameter, 13 mm). The remaining 54 patients served as negative control subjects. Six readers with varying experience levels evaluated the images without and with the availability of gray-scale reversal in two separate reading sessions. Figure of merit (FOM), sensitivity per lesion, mean number of false-positive marks per image, and accuracy were calculated.Five of the six readers showed a slight increase in sensitivity with the use of gray-scale reversal, but on average, the difference was not significant (48\% vs 50\%; p > 0.05). The mean number of false-positive marks per image also nonsignificantly increased from 0.20 to 0.23. The increases in both sensitivity and the mean number of false-positive marks per image translated into nonsignificant decreases in average FOM (0.79 vs 0.77) and accuracy (72\% vs 71\%). Data analysis of subgroups of nodules or different reader groups, depending on level of experience, did not reveal significant differences.Using PACS display of digital chest radiographs, gray-scale reversal does not help the radiologists in detecting pulmonary nodules.}, + file = {Boo11a.pdf:pdf\\Boo11a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {22021501}, + month = {11}, + ss_id = {271cc27f0f6b1b4a87e43eed7c85df9c729b3279}, + all_ss_ids = {['271cc27f0f6b1b4a87e43eed7c85df9c729b3279']}, + gscites = {0}, +} + +@article{Boo11b, + author = {D. W. de Boo and M. Weber and E. E. Deurloo and G. J. Streekstra and N. J. Freling and D. A. Dongelmans and C. M. Schaefer-Prokop}, + title = {Computed radiography versus mobile direct radiography for bedside chest radiographs: impact of dose on image quality and reader agreement}, + journal = CLINR, + year = {2011}, + volume = {66}, + pages = {826--832}, + doi = {10.1016/j.crad.2011.03.013}, + abstract = {To asses the image quality and potential for dose reduction of mobile direct detector (DR) chest radiography as compared with computed radiography (CR) for intensive care unit (ICU) chest radiographs (CXR).Three groups of age-, weight- and disease-matched ICU patients (n=114 patients; 50 CXR per acquisition technique) underwent clinically indicated bedside CXR obtained with either CR (single read-out powder plates) or mobile DR (GOS-TFT detectors) at identical or 50\% reduced dose (DR(50\%)). Delineation of anatomic structures and devices used for patient monitoring, overall image quality and disease were scored by four readers. In 12 patients pairs of follow-up CR and DR images were available, and in 15 patients pairs of CR and DR(50\%) images were available. In these pairs the overall image quality was also compared side-by-side.Delineation of anatomy in the mediastinum was scored better with DR or DR(50\%) than with CR. Devices used for patient monitoring were seen best with DR, with DR(50\%) being superior to CR. In the side-by-side comparison, the overall image quality of DR and DR(50\%) was rated better than CR in 96\% (46/48) and 87\% (52/60), respectively. Inter-observer agreement for the assessment of pathology was fair for CR and DR(50\%) (? = 0.33 and ? = 0.39, respectively) and moderate for DR (? = 0.48).Mobile DR units offer better image quality than CR for bedside chest radiography and allow for 50\% dose reduction. Inter-observer agreement increases with image quality and is superior with DR, while DR(50\%) and CR are comparable.}, + file = {Boo11b.pdf:pdf\\Boo11b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {9}, + pmid = {21570679}, + month = {9}, + ss_id = {23bcee96f8bb86f925bb2d3d0d3b0fb3d870fcee}, + all_ss_ids = {['23bcee96f8bb86f925bb2d3d0d3b0fb3d870fcee']}, + gscites = {18}, +} + +@phdthesis{Boo12, + author = {D. W. de Boo}, + title = {Advances in digital chest radiography: impact on reader performance}, + year = {2012}, + url = {http://dare.uva.nl/record/1/385863}, + abstract = {This thesis demonstrates that - The introduction of mobile direct radiography at the bedside allows for 50% dose reduction, as compared to computed radiography, without loss of clinically relevant image quality. Alternatively, the improved image quality obtained at unaltered dose can be used to uniform diagnostic performance. - Using PACS display of digital chest radiographs, gray-scale reversal does not help the radiologists in detecting small pulmonary nodules. - The potential of CAD to reduce detection errors by radiologists is not fully established. - Despite short-term observer training, radiologists still have difficulties differentiating true positive from false positive CAD candidates.}, + copromotor = {C. M. Schaefer-Prokop}, + file = {Boo12.pdf:pdf\\Boo12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {J. S. Lam\'{e}ris}, + school = {University of Amsterdam}, + journal = {PhD thesis}, +} + +@article{Boo12a, + author = {De Boo, Diederick W. and van Hoorn, FranAfASSois and van Schuppen, Joost and Schijf, Laura and Scheerder, Maeke J. and Freling, Nicole J. and Mets, Onno and Weber, Michael and Schaefer-Prokop, Cornelia M.}, + title = {Observer training for computer-aided detection of pulmonary nodules in chest radiography}, + journal = ER, + year = {2012}, + volume = {22}, + pages = {1659--1664}, + doi = {10.1007/s00330-012-2412-7}, + abstract = {To assess whether short-term feedback helps readers to increase their performance using computer-aided detection (CAD) for nodule detection in chest radiography.The 140 CXRs (56 with a solitary CT-proven nodules and 84 negative controls) were divided into four subsets of 35; each were read in a different order by six readers. Lesion presence, location and diagnostic confidence were scored without and with CAD (IQQA-Chest, EDDA Technology) as second reader. Readers received individual feedback after each subset. Sensitivity, specificity and area under the receiver-operating characteristics curve (AUC) were calculated for readings with and without CAD with respect to change over time and impact of CAD.CAD stand-alone sensitivity was 59 \% with 1.9 false-positives per image. Mean AUC slightly increased over time with and without CAD (0.78 vs. 0.84 with and 0.76 vs. 0.82 without CAD) but differences did not reach significance. The sensitivity increased (65 \% vs. 70 \% and 66 \% vs. 70 \%) and specificity decreased over time (79 \% vs. 74 \% and 80 \% vs. 77 \%) but no significant impact of CAD was found.Short-term feedback does not increase the ability of readers to differentiate true- from false-positive candidate lesions and to use CAD more effectively.AC/a,!AC/ Computer-aided detection (CAD) is increasingly used as an adjunct for many radiological techniques. AC/a,!AC/ Short-term feedback does not improve reader performance with CAD in chest radiography. AC/a,!AC/ Differentiation between true- and false-positive CAD for low conspicious possible lesions proves difficult. AC/a,!AC/ CAD can potentially increase reader performance for nodule detection in chest radiography.}, + file = {Boo12a.pdf:pdf\\Boo12a.pdf:PDF}, + optnote = {DIAG}, + number = {8}, + pmid = {22447377}, + month = {3}, + ss_id = {d67d1dbabc705e5b2d7cb675e8737ff249c1a88f}, + all_ss_ids = {['d67d1dbabc705e5b2d7cb675e8737ff249c1a88f']}, + gscites = {33}, +} + +@inproceedings{Boom12, + author = {R. van den Boom and M. Oei and S. Lafebre and L.J. Oostveen and A. Meijer and S. Steens and M. Prokop and B. van Ginneken and R. Manniesing}, + title = {Brain Tissue Segmentation in {4D CT} Images Using Voxel Classification}, + booktitle = MI, + year = {2012}, + volume = {8314}, + series = SPIE, + pages = {83144B-83144B-6}, + doi = {10.1117/12.911189}, + abstract = {A method is proposed to segment anatomical regions from 4D computer tomography (CT) cerebral patient data. The method consist of a three step voxel classification scheme, each step focusing on structures that are increasingly difficult to segment. The first step classifies air and bone, the second step classifies vessels and the third step classifies white matter, gray matter and cerebrospinal fluid. The method has been applied to ten 4D CT cerebral patient data. A leave-one-out experiment showed consistent and accurate segmentation results.}, + file = {:./pdf/Boom12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + ss_id = {4d53bd36934e6f17b2e3eb58a8764d03340402e6}, + all_ss_ids = {['4d53bd36934e6f17b2e3eb58a8764d03340402e6']}, + gscites = {2}, +} + +@conference{Boom12a, + author = {R. van den Boom and M.T.H. Oei and L.J. Oostveen and S.J. Lafebre and B. van Ginneken and R. Manniesing and M. Prokop}, + title = {Effect of radiation exposure on quantitative evaluation of cerebral {CT} perfusion maps: results from a hybrid digital phantom}, + booktitle = ECR, + year = {2012}, + abstract = {Purpose: To use a hybrid digital phantom to study the effect of mAs on quantitative evaluation of cerebral CT perfusion scans. Material and Methods: The hybrid digital phantom consists of scans of a homogeneous skull phantom on which circular objects (10-15-20mm) are digitally superimposed. Tissue perfusion curves derived from patient data were superimposed on the background (white matter) and on the objects (grey matter). We tested a CTP sequence using 30 scans every 2s over 60s (80kV, 5mm thickness) derived from a 320-row CT scanner. The mAs values per CTP sequence were varied per scan (10-230mAs). A synthetic dataset without noise was the gold standard. CBF maps for each mAs setting and for the synthetic dataset were calculated using the ASIST CT program. Noise in CBF maps was measured in the background; contrast between background and objects was measured in circular ROIs with a diameter of half the object diameter. Size- and dose-dependence of absolute values and CNR in CBF maps was calculated. Results: Above 55 mAs per scan, the CBV values for the 20mm objects were within 12% of the gold standard. At 10 mAs CBF was overestimated by >100%. With smaller objects the estimation of CBF became less precise. CNR for CBF in 20mm decreased rapidly: CNR at 230 mAs was 4.2 and decreased to 2.4 at 100 mAs and 0.4 at 10 mAs. Conclusion: Absolute CBF values require sufficient dose to be correct: values at low dose are overestimated. In addition, CNR rapidly decreases with lower dose.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Boom13, + author = {R. van den Boom and M.T.H. Oei and L.J. Oostveen and H.O.A. Laue and B. van Ginneken and R. Manniesing and M. Prokop}, + title = {Effect of dose reduction on cerebral {CT} perfusion maps: results from a hybrid digital perfusion phantom}, + booktitle = ECR, + year = {2013}, + abstract = {Purpose: To quantitatively evaluate the effect of dose reduction of a standard head CTP protocol on the perfusion maps. Material and Methods: A hybrid digital perfusion phantom was used to simulate dose reduction in a CT perfusion acquisition. Noise patterns were obtained by scanning an epoxy filled human skull phantom on multiple mAs levels on a 320-row scanner and combined with tissue attenuation curves obtained from 5 patient data for grey matter (GM), white matter (WM), artery and vein. We simulated a real clinical CTP head protocol available on the Toshiba CT console (1 scan at 200 mAs, 13 scans at 100 mAs, 5 scans at 75 mAs and all at 80 kV) with a total duration of 55s (5mm slab thickness) and artificially reduced the dose from 100% to 10% in steps of 10%. CBF, CBV and MTT maps were calculated using PMA (ASIST-JAPAN). Mean values of GM and WM were evaluated. Results: Absolute CBF and CBV values stay within 20% of the values at original dose for CTP acquisitions at more than 50% of the original dose for both GM and WM. At 10% of the original dose, WM CBV values were overestimated with 24-107%, GM CBV values with 4-32%, and WM CBF values with 42-106%. GM CBF values were within 20% of the values at 100% of original dose. MTT values were less affected by dose reduction. Conclusion: Absolute CBF and CBV values require sufficient dose to be correct: values at low dose are overestimated.}, + optnote = {DIAG}, +} + +@article{Boom14, + author = {van den Boom, Rieneke and Manniesing, Rashindra and Oei, Marcel T. H. and van der Woude, Willem-Jan and Smit, Ewoud J. and Laue, Hendrik O. A. and van Ginneken, Bram and Prokop, Mathias}, + title = {A {4D} Digital Phantom for Patient-Specific Simulation of Brain {CT} Perfusion Protocols}, + journal = MP, + year = {2014}, + volume = {41}, + pages = {071907-1 -- 071907-9}, + doi = {10.1118/1.4881520}, + abstract = {Purpose Optimizing CT brain perfusion protocols is a challenge because of the complex interaction between image acquisition, calculation of perfusion data and patient hemodynamics. Several digital phantoms have been developed to avoid unnecessary patient exposure or suboptimum choice of parameters. We expand this idea by using realistic noise patterns and measured tissue attenuation curves representing patient-specific hemodynamics. The purpose of this work is to validate that this approach can realistically simulate mean perfusion values and noise on perfusion data for individual patients. Materials and Methods The proposed 4D digital phantom consists of three major components: 1) a definition of the spatial structure of various brain tissues within the phantom, 2) measured tissue attenuation curves and 3) measured noise patterns. Tissue attenuation curves were measured in patient data using regions of interest (ROIs) in gray matter and white matter. By assigning the tissue attenuation curves to the corresponding tissue curves within the phantom, patient-specific CTP acquisitions were retrospectively simulated. Noise patterns were acquired by repeatedly scanning an anthropomorphic skull phantom at various exposure settings. We selected 20 consecutive patients that were scanned for suspected ischemic stroke and constructed patient-specific 4D digital phantoms using the individual patients' hemodynamics. We compared the perfusion maps of the patient data with the digital phantom data. Agreement between phantom- and patient-derived data was determined for mean perfusion values and for standard deviation in de perfusion data using intra-class correlation coefficients (ICCs) and a linear fit. Results ICCs ranged between 0.92-0.99 for mean perfusion values. ICCs for the standard deviation in perfusion maps were between 0.86-0.93. Linear fitting yielded slope values between 0.90-1.06. Conclusions A patient-specific 4D digital phantom allows for realistic simulation of mean values and standard deviation in perfusion data and makes it possible to retrospectively study how the interaction of patient hemodynamics and scan parameters affects CT perfusion values.}, + file = {Boom14.pdf:pdf\\Boom14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {24989385}, + month = {6}, + gsid = {1905732569550541986}, + gscites = {5}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/136581}, + ss_id = {608a5409afa9a5f535d5b3f4776b75ae92ec9e26}, + all_ss_ids = {['608a5409afa9a5f535d5b3f4776b75ae92ec9e26']}, +} + +@article{Bort21, + title = {Adversarial Attack Vulnerability of Medical Image Analysis Systems: Unexplored Factors}, + journal = MIA, + year = {2021}, + pmid = {34246850}, + pages = {102141}, + doi = {https://doi.org/10.1016/j.media.2021.102141}, + url = {https://arxiv.org/abs/2006.06356}, + author = {Bortsova, Gerda and Gonz\'{a}lez-Gonzalo, Cristina and Wetstein, Suzanne C. and Dubost, Florian and Katramados, Ioannis and Hogeweg, Laurens and Liefers, Bart and van Ginneken, Bram and Pluim, Josien P.W. and Veta, Mitko and S\'{a}nchez, Clara I. and de Bruijne, Marleen}, + abstract = {Adversarial attacks are considered a potentially serious security threat for machine learning systems. Medical image analysis (MedIA) systems have recently been argued to be vulnerable to adversarial attacks due to strong financial incentives and the associated technological infrastructure. In this paper, we study previously unexplored factors affecting adversarial attack vulnerability of deep learning MedIA systems in three medical domains: ophthalmology, radiology, and pathology. We focus on adversarial black-box settings, in which the attacker does not have full access to the target model and usually uses another model, commonly referred to as surrogate model, to craft adversarial examples that are then transferred to the target model. We consider this to be the most realistic scenario for MedIA systems. Firstly, we study the effect of weight initialization (pre-training on ImageNet or random initialization) on the transferability of adversarial attacks from the surrogate model to the target model, i.e., how effective attacks crafted using the surrogate model are on the target model. Secondly, we study the influence of differences in development (training and validation) data between target and surrogate models. We further study the interaction of weight initialization and data differences with differences in model architecture. All experiments were done with a perturbation degree tuned to ensure maximal transferability at minimal visual perceptibility of the attacks. Our experiments show that pre-training may dramatically increase the transferability of adversarial examples, even when the target and surrogate's architectures are different: the larger the performance gain using pre-training, the larger the transferability. Differences in the development data between target and surrogate models considerably decrease the performance of the attack; this decrease is further amplified by difference in the model architecture. We believe these factors should be considered when developing security-critical MedIA systems planned to be deployed in clinical practice. We recommend avoiding using only standard components, such as pre-trained architectures and publicly available datasets, as well as disclosure of design specifications, in addition to using adversarial defense methods. When evaluating the vulnerability of MedIA systems to adversarial attacks, various attack scenarios and target-surrogate differences should be simulated to achieve realistic robustness estimates. The code and all trained models used in our experiments are publicly available. + + (The first three authors contributed equally to this work.)}, + publisher = {Elsevier}, + optnote = {DIAG}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/238599}, + all_ss_ids = {['0f779d6ca748e8a73a78f2e1dadaba3161e80950']}, + gscites = {39}, +} + +@conference{Bosma21a, + author = {Joeran S. Bosma and Anindo Saha and Matin Hosseinzadeh and Henkjan Huisman}, + booktitle = RSNA, + title = {Augmenting AI with Automated Segmentation of Report Findings Applied to Prostate Cancer Detection in Biparametric MRI}, + abstract = {PURPOSE: To improve risk stratification of clinically significant prostate cancer (csPCa) with a 3D deep learning detection model, by augmenting the largest prostate MRI dataset reported in literature to date. MATERIALS AND METHODS: This retrospective study included 6,866 prostate bpMRI studies (axial T2- weighted, high b-value (>= 1400) and apparent diffusion coefficient scans) from 5,646 consecutive patients, with elevated PSA or clinical suspicion of PCa, between 2014-2020 for training. All studies were evaluated by experienced radiologists during clinical routine. The findings in 2,659 studies were manually segmented. We propose a hybrid framework to augment our detection model with more training data. After training with expert csPCa segmentations, we generate automatic cancer segmentations by masking our model predictions with the number of csPCa lesions automatically extracted from radiology reports. An augmented detection model is then trained jointly on manual and automatically generated csPCa segmentations. Models trained on expert or augmented cancer segmentations are evaluated on 296 visits of 296 consecutive men with elevated PSA or clinical suspicion of prostate cancer from an independent, external, centre. Ground truth is provided by MR-guided and TRUS-guided biopsies, and radical prostatectomy when available. Studies are considered positive if they have at least one Gleason grade group >= 2 lesion (csPCa). We trained our models on expert and augmented segmentations with 5-fold cross-validation and 5 restarts. Diagnostic performance was evaluated using the Area under the Receiver Operating Characteristic curve (AUROC). We performed a permutation test to obtain the statistical significance level. RESULTS: Augmenting the training set with automatic csPCa segmentations guided by report findings, sig- nificantly improves the case-level risk stratification of csPCa. For the external test set, the AUROC increased from 85.7 +- 2.5% to 89.0 +- 1.2% (P < 0.0001). On the external test set, experienced radiologists had a sensitivity of 91.4+-2.7% at 78.3+-2.4% specificity. At the same sensitivity, adding automatic segmentations improved the model's specificity from 46.8 +- 12.8% to 62.0 +- 7.1% (P < 0.0001). CONCLUSION: Augmenting training with automatic report driven segmentations of prostate cancer in bpMRI significantly improved the risk-stratification of the deep learning prostate cancer detection model. CLINICAL RELEVANCE: Risk stratification for clinically significant prostate cancer using prostate MRI is in- strumental to reduce over-treatment. Deep learning can assist, but requires expensive expert segmentations.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, +} + +@mastersthesis{Bosma21b, + author = {Joeran S. Bosma and Anindo Saha and Matin Hosseinzadeh and Henkjan Huisman}, + title = {Augmenting AI with Automated Segmentation of Report Findings Applied to Prostate Cancer Detection in Biparametric MRI}, + abstract = {Prostate MRI interpreted by expert radiologists provides the best non-invasive diagnosis of clinically significant prostate cancer (csPCa), but is a limited resource. Deep learning has the potential to assist, but requires expensive expert annotations. We developed an automatic labelling procedure guided by radiology reports from clinical routine, capable of generating high quality voxel-level annotations. First, we parse the radiology report to extract the number of clinically significant findings (nsig), and then generate annotation by keeping the nsig most confident lesion candidates from a csPCa segmentation model. We included 7,756 prostate bpMRI studies (axial T2, high b-value and ADC scans), of which 3,052 were manually annotated and 4,704 were automatically annotated, resulting in the largest prostate MRI dataset reported in literature. We evaluated the automatic annotation procedure using the manual annotations: our score extraction correctly identified nsig for 99.3% of the visits, our prostate cancer segmentation model correctly localised 83.7 +- 3.0% of the lesions, and the automatic annotations of correctly localised lesions have good spatial congruence, with Dice similarity coefficients of 0.71+-0.15. Augmenting the training set with automatically labelled visits significantly improved prostate cancer detection performance, as evaluated on 296 visits from an independent, external, centre with ground truth provided by MR-guided and TRUS-guided biopsies, or radical prostatectomy when available. Patient-based diagnostic AUROC increased from 86.6 +- 1.8% to 88.7 +- 1.1% (P = 0.047) and lesion-based diagnostic pAUC increased from 1.940+-0.082 to 2.050+-0.031 (P = 0.016), with mean+-std. over 25 independent runs.}, + url = {Bosma21b.pdf:pdf\\Bosma21b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + school = {Radboud University Medical Center}, + year = {2021}, + journal = {Master thesis}, +} + +@article{Bosma23, + title = {Semi-supervised Learning with Report-guided Pseudo Labels for Deep Learning-based Prostate Cancer Detection Using Biparametric MRI}, + author = {Bosma, Joeran S. and Saha, Anindo and Hosseinzadeh, Matin and Slootweg, Ivan and de Rooij, Maarten and Huisman, Henkjan}, + journal = {Radiology: Artificial Intelligence}, + optnote = {DIAG, RADIOLOGY}, + doi = {10.1148/ryai.230031}, + url = {https://pubs.rsna.org/doi/full/10.1148/ryai.230031}, + pages = {e230031}, + year = {2023}, + publisher = {Radiological Society of North America}, + ss_id = {99f201aa5e15e2b52ca9090bae355dfca22577be}, + all_ss_ids = {['99f201aa5e15e2b52ca9090bae355dfca22577be']}, + gscites = {4}, +} + +@inproceedings{Bosma23a, + author = {Joeran S. Bosma and Dr{\'e} Peeters and Nat{\'a}lia Alves and Anindo Saha and Zaigham Saghir and Colin Jacobs and Henkjan Huisman}, + booktitle = {Medical Imaging with Deep Learning}, + title = {Reproducibility of Training Deep Learning Models for Medical Image Analysis}, + url = {https://openreview.net/forum?id=MR01DcGST9}, + abstract = {Performance of deep learning algorithms varies due to their development data and training method, but also due to several stochastic processes during training. Due to these random factors, a single training run may not accurately reflect the performance of a given training method. Statistical comparisons in literature between different deep learning training methods typically ignore this performance variation between training runs and incorrectly claim significance of changes in training method. We hypothesize that the impact of such performance variation is substantial, such that it may invalidate biomedical competition leaderboards and some scientific papers. To test this, we investigate the reproducibility of training deep learning algorithms for medical image analysis. We repeated training runs from prior scientific studies: three diagnostic tasks (pancreatic cancer detection in CT, clinically significant prostate cancer detection in MRI, and lung nodule malignancy risk estimation in low-dose CT) and two organ segmentation tasks (pancreas segmentation in CT and prostate segmentation in MRI). A previously published top-performing algorithm for each task was trained multiple times to determine the variance in model performance. For all three diagnostic algorithms, performance variation from retraining was significant compared to data variance. Statistically comparing independently trained algorithms from the same training method using the same dataset should follow the null hypothesis, but we observed claimed significance with a p-value below 0.05 in + of comparisons with conventional testing (paired bootstrapping). We conclude that variance in model performance due to retraining is substantial and should be accounted for.}, + file = {Bosma23a.pdf:pdf\\Bosma23a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, +} + +@article{Boss01, + author = {E. A. Boss and L. F. Massuger and L. A. Pop and L. C. Verhoef and H. J. Huisman and H. Boonstra and J. O. Barentsz}, + title = {Post-radiotherapy contrast enhancement changes in fast dynamic {MRI} of cervical carcinoma}, + journal = JMRI, + year = {2001}, + volume = {13}, + pages = {600--606}, + doi = {10.1002/jmri.1084}, + abstract = {{T}his pilot study determines fast dynamic gadolinium enhanced {MRI} contrast enhancement parameters (onset of enhancement and time to peak enhancement) before and after radiotherapy in 10 cervical carcinoma patients. {B}efore radiotherapy, onset of enhancement and time to peak enhancement were early, with a median of 4.5 and 5.2 seconds, respectively. {H}igh-grade tumors showed early enhancement, compared with low-grade. {A}fter radiotherapy, contrast enhancement patterns differed. {I}n survivors, onset of enhancement after radiotherapy was later than before radiotherapy. {I}n non-survivors, onset of enhancement after radiotherapy was still early. {T}he median difference in onset of enhancement before and after radiotherapy in survivors and non-survivors was an increase of 3.2 and a decrease of 1.1 seconds, respectively. {E}arly onset of enhancement after radiotherapy was a better predictor for survival than a high-signal intensity zone on post radiotherapy unenhanced {T}1/{T}2-weighted {MRI}. {I}t is concluded that enhancement parameters from fast dynamic {G}d-enhanced {MR} images can provide additional functional information with regard to tumor vascularization, and may have prognostic significance. {I}t complements clinical examination and unenhanced {MRI} in determining the effectiveness of radiotherapy treatment in cervical carcinoma. {F}uture studies will focus on the clinical utility and improvements of the estimation of contrast-enhanced parameters with this new technique.}, + file = {Boss01.pdf:pdf\\Boss01.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + pmid = {11276105}, + gsid = {1003186607531091259}, + gscites = {49}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/122189}, + ss_id = {9836dc01306350e2f0d22817e0c67b07e7858fa8}, + all_ss_ids = {['9836dc01306350e2f0d22817e0c67b07e7858fa8']}, +} + +@mastersthesis{Botr22, + author = {Michel Botros}, + title = {Automated Detection and Assessment of Vertebral Fractures in CT Images}, + abstract = {Osteoporotic vertebral fractures are associated with an increased mortality, morbidity, a decrease in quality of life and are strong predictors of future osteoporotic fractures. Many guidelines on management of osteoporosis recommend pharmacological intervention when vertebral fractures are present. However, they remain largely underdiagnosed, due to frequently occurring asymptomatic and often not being reported by clinicians when present in computed tomography (CT) scans. The aim of this study is to explore the use of computer-aided diagnosis (CAD) systems to assist clinicians with detection and assessment of vertebral fractures based on CT images, thereby enabling preventive treatment and improving the outcome for osteoporosis patients. In the first part of this work we developed an automated method that performs Genant's semiquantitative method, one of the most widely used methods for assessment of vertebral fractures today. Results on a test set of 12 patients (116 vertebrae) show that the method is able to grade vertebral fractures with substantial agreement with two radiologists, with a quadratic weighted kappa value of 0.92. In the second part of this work we address the most notable limitations of Genant's semiquantitative method. We developed an automated scoring method that aims to bring both convenience and consistency to the field. The scoring method compares the shape of the vertebral body with the expected shape to calculate an abnormality score, which quantifies collapse of the vertebra and can be used to identify fractures. We again evaluated the scoring method on a test set of 12 patients (116 vertebrae) and show that the abnormality score corresponds well with assessment of two radiologists, with a quadratic weighted kappa value of 0.85 while addressing the main limitations of Genant's semiquantitative method.}, + file = {Botr22.pdf:pdf\\Botr22.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University}, + year = {2022}, + journal = {Master thesis}, +} + +@article{Boue18, + author = {Boueiz, Adel and Chang, Yale and Cho, Michael H and Washko, George R and San Jose Estepar, Raul and Bowler, Russell P and Crapo, James D and DeMeo, Dawn L and Dy, Jennifer G and Silverman, Edwin K and Castaldi, Peter J and {COPDGene Investigators}}, + title = {Lobar Emphysema Distribution Is Associated With 5-Year Radiological Disease Progression}, + journal = Chest, + year = {2018}, + volume = {153}, + number = {1}, + pages = {65-76}, + doi = {10.1016/j.chest.2017.09.022}, + abstract = {Emphysema has considerable variability in its regional distribution. Craniocaudal emphysema distribution is an important predictor of the response to lung volume reduction. However, there is little consensus regarding how to define upper lobe-predominant and lower lobe-predominant emphysema subtypes. Consequently, the clinical and genetic associations with these subtypes are poorly characterized. We sought to identify subgroups characterized by upper-lobe or lower-lobe emphysema predominance and comparable amounts of total emphysema by analyzing data from 9,210 smokers without alpha-1-antitrypsin deficiency in the Genetic Epidemiology of COPD (COPDGene) cohort. CT densitometric emphysema was measured in each lung lobe. Random forest clustering was applied to lobar emphysema variables after regressing out the effects of total emphysema. Clusters were tested for association with clinical and imaging outcomes at baseline and at 5-year follow-up. Their associations with genetic variants were also compared. Three clusters were identified: minimal emphysema (n = 1,312), upper lobe-predominant emphysema (n = 905), and lower lobe-predominant emphysema (n = 796). Despite a similar amount of total emphysema, the lower-lobe group had more severe airflow obstruction at baseline and higher rates of metabolic syndrome compared with subjects with upper-lobe predominance. The group with upper-lobe predominance had greater 5-year progression of emphysema, gas trapping, and dyspnea. Differential associations with known COPD genetic risk variants were noted. Subgroups of smokers defined by upper-lobe or lower-lobe emphysema predominance exhibit different functional and radiological disease progression rates, and the upper-lobe predominant subtype shows evidence of association with known COPD genetic risk variants. These subgroups may be useful in the development of personalized treatments for COPD.}, + file = {Boue18.pdf:pdf\\Boue18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28943279}, + month = {1}, + gsid = {17459489425823189277}, + gscites = {44}, + ss_id = {51081160a16f05064f518aa0a5af1a5b075f43e3}, + all_ss_ids = {['51081160a16f05064f518aa0a5af1a5b075f43e3']}, +} + +@conference{Boul22a, + author = {Luuk Boulogne and Bram van Ginneken}, + booktitle = RSNA, + title = {Automatically Generated CT Severity Scores for COVID-19 Predict Death or Intubation at 1-Month Follow-Up}, + abstract = {PURPOSE: To evaluate the ability of CORADS-AI, an automatic system that was originally developed for scoring the presence and current extent of a COVID19 infection from chest Computed Tomography (CT), to predict death or intubation after one month for positive COVID-19 patients. METHODS AND MATERIALS: CORADS-AI was developed in a previous study using CT scans from COVID-19 patients in the Netherlands. From a CT scan, CORADS-AI automatically segments each pulmonary lobe and produces a CT Severity Score (CTSS) at the lobe and patient level. This score is based on the percentage of affected tissue and has been used in routine clinical practice. We applied CORADS-AI to all 1205 patients that were involved in the STOIC study, had a positive COVID-19 RT-PCR result, and for which a CT scan was publicly available. 301 of them had died or had to be intubated at 1-month follow-up. We applied logistic regression with 5-fold cross validation on patient sex, age, and the patient level CTSS output of CORADS-AI to predict death or intubation after one month. We compared our method with the logistic regression model from the original STOIC study. This model received age, sex, several clinical variables, and manual CT annotations for lung disease extent as input. It was developed with all 4238 patients from the STOIC study that were positive for COVID-19 both according to RT-PCR testing and CT reading. RESULTS: Our model obtained an AUC of 0.72+-0.02 (mean+-std. dev.) for predicting death or intubation after one month. When using solely the patient level CTSS output, we obtained an AUC of 0.68+-0.03. In comparison, the original STOIC study reported an AUC of their model of 0.69 (95% CI: 0.67, 0.71). CONCLUSIONS: We showed that CORADS-AI can predict death or intubation after one month for positive COVID-19 patients. Adding age and sex information to the model improved its results. The performance was comparable to that of the model developed in the original STOIC study, which used additional clinical variables and manual CT annotations. This comparison should be interpreted carefully, since that model was evaluated on a different subset of the STOIC cohort. CLINICAL RELEVANCE/APPLICATION: We showed the potential of CORADS-AI, of which the output format is already used in clinical practice, for aiding radiologists in predicting the course of a COVID-19 infection.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@inproceedings{Boze12, + author = {J. Bozek and M. G. J. Kallenberg and M. Grgic and N. Karssemeijer}, + title = {Comparison of Lesion Size Using Area and Volume in Full Field Digital Mammograms}, + booktitle = {IWDM '12: Proceedings of the 11th International Workshop on Breast Imaging}, + year = {2012}, + volume = {7361}, + series = LNCS, + pages = {96--103}, + doi = {10.1007/978-3-642-31271-7_13}, + abstract = {The size of a lesion is a feature often used in computer-aided detection systems for classification between benign and malignant lesions. However, size of a lesion presented by its area might not be as reliable as volume of a lesion. Volume is more independent of the view (CC or MLO) since it represents three dimensional information, whereas area refers only to the projection of a lesion on a two dimensional plane. Furthermore, volume might be better than area for comparing lesion size in two consecutive exams and for evaluating temporal change to distinguish benign and malignant lesions. We have used volumetric breast density estimation in digital mammograms to obtain thickness of dense tissue in regions of interest in order to compute volume of lesions. The dataset consisted of 382 mammogram pairs in CC and MLO views and 120 mammogram pairs for temporal analysis. The obtained correlation coefficients between the lesion size in the CC and MLO views were 0.70 (0.64-0.76) and 0.83 (0.79-0.86) for area and volume, respectively. Two- tailed z-test showed a significant difference between two correlation coefficients (p=0.0001). The usage of area and volume in temporal analysis of mammograms has been evaluated using ROC analysis. The obtained values of the area under the curve (AUC) were 0.73 and 0.75 for area and volume, respectively. Although a higher AUC value for volume was found, this difference was not significant (p=0.16).}, + file = {Boze12.pdf:pdf/Boze12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {3825556773874521565}, + gscites = {2}, + ss_id = {52f2a5c63c6baad0b75adbd0667f6d63f0e7b134}, + all_ss_ids = {['52f2a5c63c6baad0b75adbd0667f6d63f0e7b134']}, +} + +@article{Boze14, + author = {Bozek, Jelena and Kallenberg, Michiel and Grgic, Mislav and Karssemeijer, Nico}, + title = {Use of volumetric features for temporal comparison of mass lesions in full field digital mammograms}, + journal = MP, + year = {2014}, + volume = {41}, + pages = {021902}, + doi = {10.1118/1.4860956}, + abstract = {Temporal comparison of lesions might improve classification between benign and malignant lesions in full-field digital mammograms (FFDM). The authors compare the use of volumetric features for lesion classification, which are computed from dense tissue thickness maps, to the use of mammographic lesion area. Use of dense tissue thickness maps for lesion characterization is advantageous, since it results in lesion features that are invariant to acquisition parameters.The dataset used in the analysis consisted of 60 temporal mammogram pairs comprising 120 mediolateral oblique or craniocaudal views with a total of 65 lesions, of which 41 were benign and 24 malignant. The authors analyzed the performance of four volumetric features, area, and four other commonly used features obtained from temporal mammogram pairs, current mammograms, and prior mammograms. The authors evaluated the individual performance of all features and of different feature sets. The authors used linear discriminant analysis with leave-one-out cross validation to classify different feature sets.Volumetric features from temporal mammogram pairs achieved the best individual performance, as measured by the area under the receiver operating characteristic curve (Az value). Volume change (Az = 0.88) achieved higher Az value than projected lesion area change (Az = 0.78) in the temporal comparison of lesions. Best performance was achieved with a set that consisted of a set of features extracted from the current exam combined with four volumetric features representing changes with respect to the prior mammogram (Az = 0.90). This was significantly better (p = 0.005) than the performance obtained using features from the current exam only (Az = 0.77).Volumetric features from temporal mammogram pairs combined with features from the single exam significantly improve discrimination of benign and malignant lesions in FFDM mammograms compared to using only single exam features. In the comparison with prior mammograms, use of volumetric change may lead to better performance than use of lesion area change.}, + file = {Boze14.pdf:pdf\\Boze14.pdf:PDF}, + optnote = {DIAG}, + number = {2}, + pmid = {24506623}, + month = {1}, + gsid = {13159064489659255149}, + gscites = {8}, + ss_id = {d300479cd834d7c2a32ef4db2825903170d74ce5}, + all_ss_ids = {['d300479cd834d7c2a32ef4db2825903170d74ce5']}, +} + +@article{Bozo16, + author = {Bozovic, G. and Steen, S. and Sj{\"o}berg, T. and Schaefer-Prokop, C. and Verschakelen, J. and Liao, Q. and H{\"o}glund, P. and Siemund, R. and Bj{\"o}rkman-Burtscher, I. M.}, + title = {Circulation stabilizing therapy and pulmonary high-resolution computed tomography in a porcine brain-dead model}, + journal = ACTANASCA, + year = {2016}, + volume = {60}, + pages = {93--102}, + doi = {10.1111/aas.12595}, + abstract = {Currently 80\% of donor lungs are not accepted for transplantation, often due to fluid overload. Our aim was to investigate if forced fluid infusion may be replaced by a new pharmacological therapy to stabilize circulation after brain death in an animal model, and to assess therapy effects on lung function and morphology trough blood gas parameters and state-of-the-art High-resolution CT (HRCT).Brain death was caused by surgical decapitation. To maintain mean aortic pressure > 60 mmHg, pigs were treated with forced electrolyte solution infusion (GI; n = 6) or the pharmacological therapy (GII; n = 11). GIII (n = 11) were non-decapitated controls. Lung function was investigated with blood gases and lung morphology with HRCT.GI pigs became circulatory instable 4-6 h after brain death in spite of forced fluid infusion, five pigs showed moderate to severe pulmonary edema on HRCT and median final PaO2 /FiO2 was 29 kPa (Q1; Q3; range 26; 40; 17-76). GII and GIII were circulatory stable (mean aortic pressure > 80 mmHg) and median final PaO2 /FiO2 after 24 h was 72 kPa (Q1; Q3; range 64; 76; 53-91) (GII) and 66 kPa (55; 78; 43-90) (GIII). On HRCT, only two pigs in GII had mild pulmonary edema and none in GIII. More than 50\% of HRCT exams revealed unexpected lung disease even in spite of PaO2 /FiO2 > 40 kPa.Pharmacological therapy but not forced fluid infusion prevented circulatory collapse and extensive HRCT verified pulmonary edema after acute brain death. HRCT was useful to evaluate lung morphology and revealed substantial occult parenchymal changes justifying efforts toward a more intense use of HRCT in the pre-transplant evaluation.}, + file = {Bozo16.pdf:pdf\\Bozo16.pdf:PDF}, + optnote = {DIAG}, + pmid = {26251260}, + month = {8}, + ss_id = {15c5713c4b4c87c587b2df572c32fd121cf0d081}, + all_ss_ids = {['15c5713c4b4c87c587b2df572c32fd121cf0d081']}, + gscites = {6}, +} + +@article{Bozo17, + author = {Bozovic, Gracijela and Adlercreutz, Catharina and H\"{o}glund, Peter and Bj\"{o}rkman-Burtscher, Isabella and Reinstrup, Peter and Ingemansson, Richard and Schaefer-Prokop, Cornelia and Siemund, Roger and Geijer, Mats}, + title = {Imaging of the Lungs in Organ Donors and its Clinical Relevance}, + doi = {10.1097/rti.0000000000000255}, + year = {2017}, + abstract = { + Purpose: + The aim of the study was to retrospectively evaluate the diagnostic imaging that potential lung donors undergo, the reader variability of image interpretation and its relevance for donation, and the potential information gained from imaging studies not primarily intended for lung evaluation but partially including them. + + + Materials and Methods: + Bedside chest radiography and computed tomography (CT), completely or incompletely including the lungs, of 110 brain-dead potential organ donors in a single institution during 2007 to 2014 were reviewed from a donation perspective. Two chest radiologists in consensus analyzed catheters and cardiovascular, parenchymal, and pleural findings. Clinical reports and study review were compared for substantial differences in findings that could have led to a treatment change, triggered additional examinations such as bronchoscopy, or were considered important for donation. + + + Results: + Among 136 bedside chest radiographs, no differences between clinical reports and study reviews were found in 37 (27%), minor differences were found in 28 (21%), and substantial differences were found in 71 (52%) examinations (P<0.0001). In 31 of 42 (74%) complete or incomplete CT examinations, 50 of 74 findings with relevance for lung donation were not primarily reported (P<0.0001). + + + Conclusions: + The majority of donor patients undergo only chest radiography. A targeted imaging review of abnormalities affecting the decision to use donor lungs may be useful in the preoperative stage. With a targeted list, substantial changes were made from initial clinical interpretations. CT can provide valuable information on donor lung pathology, even if the lungs are only partially imaged. + }, + url = {http://dx.doi.org/10.1097/RTI.0000000000000255}, + file = {Bozo17.pdf:pdf\Bozo17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Thoracic Imaging}, + citation-count = {7}, + automatic = {yes}, + pages = {107-114}, + volume = {32}, +} + +@article{Brak00, + author = {G. M. te Brake and N. Karssemeijer and J. H. Hendriks}, + title = {An automatic method to discriminate malignant masses from normal tissue in digital mammograms}, + journal = PMB, + year = {2000}, + volume = {45}, + pages = {2843--2857}, + file = {Brak00.pdf:pdf\\Brak00.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + pmid = {11049175}, + month = {9}, + gsid = {11284350184411772588}, + gscites = {151}, + ss_id = {29dab22d41369fe0af3dd6bca85e0c5aa6859ded}, + all_ss_ids = {['29dab22d41369fe0af3dd6bca85e0c5aa6859ded']}, +} + +@phdthesis{Brak00a, + author = {G. M. te Brake}, + title = {Computer Aided Detection of Masses in Digital Mammograms}, + year = {2000}, + url = {http://repository.ubn.ru.nl/handle/2066/18825}, + abstract = {This thesis describes the components of an automated detection method for masses and architectural distortions, signs of infiltrating cancer. Masses and architectural distortions can be very subtle and are frequently missed by radiologists. Because the success of treatment of breast cancer depends largely on the stage of the tumor at the time of detection, early detection is very important. Masses have two main image characteristics that can be used for detection: a radiating pattern of spicules and a mass. Sometimes both characteristics are present, but often only spicules or just a faint mass is visible. To achieve high sensitivity on the whole spectrum of possible appearances of masses and distortions, detection of both characteristics is essential. Chapter 2 describes a sensitive method to detect radiating spicule patterns using statistical analysis of line orientations. However, many masses do not show clear spiculation, and must be detected by their mass. Chapter 3 describes how the spicule detection method can be transformed to a mass detection method. Instead of a map of line orientations, a map of gradient orientations is computed. Statistical analysis of this orientation map was used to detect masses. A large set of mammograms taken from the Nijmegen screening program was used to test a detection method based on spicules, a detection method based on masses, and a detection method that detects both spicules and masses. Best results were obtained when both the spiculation and mass features were used. Of all masses, 85% was detected at a specificity level of 1 false positive per image, 55% at 1 false positive per 10 images. The diameter of masses in mammograms varies from 5 mm to 5 cm, inspiring many research groups to use multi-scale approaches to detect masses. However, the benefit of applying their method in a multi-scale way is almost never compared to a single-scale version of their method. In Chapter 4, the mass detection method of Chapter 3 and two popular pattern recognition techniques to detect bright areas were applied in a single and multi-scale way to examine the possible gain of multi-scale detection. It appeared that the multi-scale versions of the mass detection method had similar performance as a single-scale approach if this scale was chosen appropriately. Of course, when the scale for the single-scale approach was chosen sub-optimally the performance was lower. This study shows that it is not self-evident that a multi-scale mass detection method gives better results than a single-scale version of the method. A multi-scale method is sensitive for masses over a range of sizes, but is also sensitive for false positives of different sizes. The specificity level that was achieved by the mass detection method described in Chapter 3 is not high enough for successful application in the clinic or in screening. To improve the specificity, a second stage was designed, that classifies each detected region based on regional criteria like contrast, shape, and texture. Based on such features, many normal tissue regions could be discriminated from real masses. To compute these features, a segmentation of the suspicious regions is required. In Chapter 5, a method is described to segment masses 126 using a discrete dynamic contour model. For each region a size estimate was available of the suspect region, and an appropriate initial starting contour was created that was fitted to the edge of the region. The method proved to be fast and robust, and outperformed a region growing approach. In Chapter 6, the contour model was used to segment regions that were found by the mass detection method of Chapter 3. A number of features were implemented that capture image characteristics that radiologists use to determine whether a suspicious region is a mass or dense normal tissue. Classification using these regional features gave a large reduction in false positives at each desired sensitivity level. On two large datasets a relatively high sensitivity was achieved even at high specificity levels. In Chapter 7, + all segmentation methods of Chapter 5 were used to segment and classify the detected regions. The adaptive discrete contour method that was used in Chapter 6 and the preprocessed probabilistic region growing method gave similar results. The experiments of Chapter 8 showed that a substantial number of the tumors that were missed by radiologists in a screening program despite double reading, were detected by the mass detection method of Chapter 3. Successful detection of missed tumors indicates that a CAD system can be a useful tool for radiologists if the prompts are sufficiently specific. Chapter 9 describes two experiments that were done using a commercially available prompting device. A large experiment showed that the specificity of radiologists does not decrease when they are prompted. This is an important result because some fear that the large number of false positive prompts of a CAD system might increases the recall rate. Results of a second experiment indicated that radiologists have much more difficulty with interpreting suspicious signs than is generally believed. It seems that many screening errors that are thought to be due to oversight, are due to misinterpretation. Both experiments showed large differences in the performance levels of radiologists. Detection of masses is reaching a level of performance where successful use in screening or clinical practice is possible. Approximately 75% of all masses are detected in at least one view at a specificity level of 0.1 false positives per image. Improvement of the mass and spicule features is still possible, and more sophisticated features can be used to remove false positives. Because the data sets that are used for training are becoming larger, better classifiers can be produced. A considerable improvement can be expected when suspicious regions in one view are correlated to suspicious regions in the other view. Many strong false positives are only present in one of the views, real lesions are most often visible in both. Together with asymmetry features and a method to detect temporal changes in mammograms, another considerable reduction in false positives seems possible}, + copromotor = {N. Karssemeijer}, + file = {Brak00a.pdf:pdf\\Brak00a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {C. C. A. M. Gielen}, + school = {Radboud University, Nijmegen}, + gsid = {13650896233409932468}, + gscites = {9}, + journal = {PhD thesis}, +} + +@article{Brak01, + author = {G. M. te Brake and N. Karssemeijer}, + title = {Segmentation of suspicious densities in digital mammograms}, + journal = MP, + year = {2001}, + volume = {28}, + pages = {259--266}, + doi = {10.1118/1.1339884}, + abstract = {{S}tate-of-the-art algorithms for detection of masses in mammograms are very sensitive but they also detect many normal regions with slightly suspicious features. {B}ased on segmentations of detected regions, shape and intensity features can be computed that discriminate between normal and abnormal regions. {T}hese features can be used to discard false positive detections and hence improve the specificity of the detection method. {I}n this work two different methods to segment suspect regions were examined. {A} number of different implementations of a region growing method were compared to a discrete dynamic contour method. {B}oth methods were applied to a consecutive data set of 132 mammograms containing masses and architectural distortions, taken from the {D}utch screening program. {E}valuation of the performance of the methods was done in two different ways. {I}n the first experiment, the segmentations of masses were compared to annotations made by the radiologist. {I}n the second experiment, a number of features were computed for all segmented areas, normal and abnormal, based on which regions were classified with a neural network. {T}he most sophisticated region growing method and the method using the dynamic contour model had a similar performance when evaluation was based on the overlap of the annotations. {T}he second experiment showed that the contours generated by the discrete dynamic contour model were more suited for computation of discriminating features. {C}ontrast features were especially useful to improve the performance of the detection method.}, + file = {Brak01.pdf:pdf\\Brak01.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {11243351}, + month = {2}, + gsid = {15867724720093407015}, + gscites = {109}, + ss_id = {e7aa02db15ac1c46249be1231b3fa693792844a6}, + all_ss_ids = {['e7aa02db15ac1c46249be1231b3fa693792844a6']}, +} + +@article{Brak98, + author = {G. M. te Brake and N. Karssemeijer and J. H. Hendriks}, + title = {Automated detection of breast carcinomas not detected in a screening program}, + journal = Radiology, + year = {1998}, + volume = {207}, + pages = {465--471}, + file = {Brak98.pdf:pdf/Brak98.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {9577496}, + month = {5}, + gsid = {3316279620693133833}, + gscites = {106}, + ss_id = {a0a0fcedd92107cc3f115f323ec0650012654994}, + all_ss_ids = {['a0a0fcedd92107cc3f115f323ec0650012654994']}, +} + +@article{Brak99, + author = {G. M. te Brake and N. Karssemeijer}, + title = {Single and multiscale detection of masses in digital mammograms}, + journal = TMI, + year = {1999}, + volume = {18}, + pages = {628--639}, + doi = {10.1109/42.790462}, + file = {Brak99.pdf:pdf\\Brak99.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {7}, + pmid = {10504096}, + month = {7}, + gsid = {9014789287224804562}, + gscites = {161}, + ss_id = {b301e9c53ffbfa0df00feed1b50c6438d16c0c08}, + all_ss_ids = {['b301e9c53ffbfa0df00feed1b50c6438d16c0c08']}, +} + +@inproceedings{Brak99b, + author = {G. M. te Brake and M. J. Stoutjesdijk and N. Karssemeijer}, + title = {Discrete dynamic contour model for mass segmentation in digital mammograms}, + booktitle = MI, + year = {1999}, + volume = {3661}, + series = SPIE, + pages = {911-919}, + doi = {10.1117/12.348651}, + url = {http://link.aip.org/link/?PSI/3661/911/1}, + abstract = {In recent years, deformable models have become popular in the field of medical image analysis. We have applied a member of this family, a discrete dynamic contour model, to the task of mass segmentation in digital mammograms. The method was compared to a recently published region growing method on a dataset of 214 mammograms. Both methods need a starting point. In a first experiment, for each mass the center of gravity of the annotation was used. In a second experiment, a pixel-based initial detection step was used to generate starting points. The latter starting points are often located less proper for good segmentation, requiring the methods to be robust. The performance was measured using an overlap criterion based on the annotation made by an experienced radiologist and the segmented region. The discrete contour model proved to be a robust method to segment masses, and outperformed a probabilistic region growing method. However, just like for the region growing methods, a good choice for the seed point appeared to be of great importance.}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {5415364043687958742}, + gscites = {9}, + ss_id = {e08c326631cc5359840d77beb1de4f6927a30390}, + all_ss_ids = {['e08c326631cc5359840d77beb1de4f6927a30390']}, +} + +@article{Bran11, + author = {S. Brandt and G. Karemore and N. Karssemeijer and M. Nielsen}, + title = {An Anatomically Oriented Breast Coordinate System for Mammogram Analysis}, + journal = TMI, + year = {2011}, + volume = {30}, + pages = {1841--1851}, + doi = {10.1109/TMI.2011.2155082}, + abstract = {We have developed a breast coordinate system that is based on breast anatomy to register female breasts into a common coordinate frame in 2D mediolateral (ML) or mediolateral oblique (MLO) view mammograms. The breasts are registered according to the location of the pectoral muscle and the nipple and the shape of the breast boundary because these are the most robust features independent of the breast size and shape. On the basis of these landmarks, we have constructed a nonlinear mapping between the parameter frame and the breast region in the mammogram. This mapping makes it possible to identify the corresponding positions and orientations among all of the ML or MLO mammograms, which facilitates an implicit use of the registration, i.e., no explicit image warping is needed. We additionally show how the coordinate transform can be used to extract Gaussian derivative features so that the feature positions and orientations are registered and extracted without non-linearly deforming the images. We use the proposed breast coordinate transform in a cross-sectional breast cancer risk assessment study of 490 women, in which we attempt to learn breast cancer risk factors from mammograms that were taken prior to when the breast cancer became visible to a radiologist. The coordinate system provides both the relative position and orientation information on the breast region from which the features are derived. In addition, the coordinate system can be used in temporal studies to pin-point anatomically equivalent locations between the mammograms of each woman and among the mammograms of all of the women in the study. The results of the cross-sectional study show that the classification into cancer and control groups can be improved by using the new coordinate system, compared to other systems evaluated. Comparisons were performed using the area-under-the-receiveroperating- characteristic-curve (AUC) score. In general, the new coordinate system makes an accurate anatomical registration of breasts possible, which suggests its wide applicability wherever 2D mammogram registration is required.}, + file = {Bran11.pdf:pdf/Bran11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + pmid = {21609879}, + month = {10}, + gsid = {18263174567335478043}, + gscites = {28}, + ss_id = {aaa9f7fc2e216f4f0ce40ef000d1ac9ed3a991e9}, + all_ss_ids = {['aaa9f7fc2e216f4f0ce40ef000d1ac9ed3a991e9']}, +} + +@inproceedings{Brec08, + author = {Brecheisen, R. and Vilanova Bartroli, A. and Platel, B. and ter Haar Romeny, B. M.}, + title = {Flexible GPU-Based Multi-Volume Ray-Casting}, + booktitle = {Vision Modeling and Visualization}, + year = {2008}, + pages = {303--312}, + abstract = {Using combinations of different volumetric datasets is becoming more common in scientific applications, especially medical environments such as neurosurgery where multiple imaging modalities are required to provide insight to both anatomical and functional structures in the brain. Such data sets are usually in different orientations and have different resolutions. Furthermore, it is often interesting, e.g. for surgical planning or intraoperative applications to add the visualization of foreign objects (e.g., surgical tools, reference grids, 3D measurement widgets). We propose a flexible framework based on GPU-accelerated ray-casting and depth peeling, that allows volume rendering of multiple, arbitrarily positioned volumes intersected with opaque or translucent geometric objects. These objects can also be used as convex or concave clipping shapes. We consider the main contribution of our work to be the flexible combination of the above-mentioned features in a single framework. As such, it can serve as a basis for neurosurgery applications but also for other fields where multi-volume rendering is important.}, + file = {Brec08.pdf:pdf/Brec08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {6251607912237041435}, + gscites = {33}, +} + +@article{Brec09, + author = {Ralph Brecheisen and Bram Platel and Anna Vilanova and B. M. ter Haar Romeny}, + title = {Parameter sensitivity visualization for {DTI} fiber tracking}, + journal = TVCG, + year = {2009}, + volume = {15}, + pages = {1441--1448}, + doi = {10.1109/TVCG.2009.170}, + abstract = {Fiber tracking of Diffusion Tensor Imaging (DTI) data offers a unique insight into the three-dimensional organisation of white matter structures in the living brain. However, fiber tracking algorithms require a number of user-defined input parameters that strongly affect the output results. Usually the fiber tracking parameters are set once and are then re-used for several patient datasets. However, the stability of the chosen parameters is not evaluated and a small change in the parameter values can give very different results. The user remains completely unaware of such effects. Furthermore, it is difficult to reproduce output results between different users. We propose a visualization tool that allows the user to visually explore how small variations in parameter values affect the output of fiber tracking. With this knowledge the user cannot only assess the stability of commonly used parameter values but also evaluate in a more reliable way the output results between different patients. Existing tools do not provide such information. A small user evaluation of our tool has been done to show the potential of the technique.}, + file = {Brec09.pdf:pdf/Brec09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {19834219}, + month = {11}, + gsid = {16104233345726047543}, + gscites = {66}, +} + +@article{Brec12, + author = {R. Brecheisen and B. Platel and B. M. ter Haar Romeny and A. Vilanova}, + title = {Illustrative uncertainty visualization of {DTI} fiber pathways}, + journal = VISCOM, + year = {2013}, + volume = {29}, + pages = {297-309}, + doi = {10.1007/s00371-012-0733-9}, + abstract = {Diffusion Tensor Imaging (DTI) and fiber tracking provide unique insight into the 3D structure of fibrous tissues in the brain. However, the output of fiber tracking contains a significant amount of uncertainty accumulated in the various steps of the processing pipeline. Existing DTI visualization methods do not present these uncertainties to the end-user. This creates a false impression of precision and accuracy that can have serious consequences in applications that rely heavily on risk assessment and decision-making, such as neurosurgery. On the other hand, adding uncertainty to an already complex visualization can easily lead to information overload and visual clutter. In this work, we propose Illustrative Confidence Intervals to reduce the complexity of the visualization and present only those aspects of uncertainty that are of interest to the user. We look specifically at the uncertainty in fiber shape due to noise and modeling errors. To demonstrate the flexibility of our framework, we compute this uncertainty in two different ways, based on (1) fiber distance and (2) the probability of a fiber connection between two brain regions. We provide the user with interactive tools to define multiple confidence intervals, specify visual styles and explore the uncertainty with a Focus+Context approach. Finally, we have conducted a user evaluation with three neurosurgeons to evaluate the added value of our visualization.}, + file = {Brec12.pdf:pdf\\Brec12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {13640138231313490925}, + gscites = {21}, +} + +@article{Breu14, + author = {Breuninger, Marianne and van Ginneken, Bram and Philipsen, Rick H H M. and Mhimbira, Francis and Hella, Jerry J. and Lwilla, Fred and van den Hombergh, Jan and Ross, Amanda and Jugheli, Levan and Wagner, Dirk and Reither, Klaus}, + title = {Diagnostic accuracy of computer-aided detection of pulmonary tuberculosis in chest radiographs: a validation study from sub-saharan {A}frica}, + journal = PLOSONE, + year = {2014}, + volume = {9}, + pages = {e106381}, + doi = {10.1371/journal.pone.0106381}, + abstract = {Chest radiography to diagnose and screen for pulmonary tuberculosis has limitations, especially due to inter-reader variability. Automating the interpretation has the potential to overcome this drawback and to deliver objective and reproducible results. The CAD4TB software is a computer-aided detection system that has shown promising preliminary findings. Evaluation studies in different settings are needed to assess diagnostic accuracy and practicability of use.CAD4TB was evaluated on chest radiographs of patients with symptoms suggestive of pulmonary tuberculosis enrolled in two cohort studies in Tanzania. All patients were characterized by sputum smear microscopy and culture including subsequent antigen or molecular confirmation of Mycobacterium tuberculosis (M.tb) to determine the reference standard. Chest radiographs were read by the software and two human readers, one expert reader and one clinical officer. The sensitivity and specificity of CAD4TB was depicted using receiver operating characteristic (ROC) curves, the area under the curve calculated and the performance of the software compared to the results of human readers.Of 861 study participants, 194 (23\%) were culture-positive for M.tb. The area under the ROC curve of CAD4TB for the detection of culture-positive pulmonary tuberculosis was 0.84 (95\% CI 0.80-0.88). CAD4TB was significantly more accurate for the discrimination of smear-positive cases against non TB patients than for smear-negative cases (p-value<0.01). It differentiated better between TB cases and non TB patients among HIV-negative compared to HIV-positive individuals (p<0.01). CAD4TB significantly outperformed the clinical officer, but did not reach the accuracy of the expert reader (pAC/a,!A =AC/a,!A 0.02), for a tuberculosis specific reading threshold.CAD4TB accurately distinguished between the chest radiographs of culture-positive TB cases and controls. Further studies on cost-effectiveness, operational and ethical aspects should determine its place in diagnostic and screening algorithms.}, + file = {Breu14.pdf:pdf\\Breu14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {9}, + pmid = {25192172}, + month = {9}, + gsid = {7079580458158799529}, + gscites = {75}, + ss_id = {7e2cc3c4c648d38b74c4fe802672198067ddebf7}, + all_ss_ids = {['7e2cc3c4c648d38b74c4fe802672198067ddebf7']}, +} + +@article{Bria13, + author = {Bria, A. and Karssemeijer, N. and Tortorella, F.}, + title = {Learning from unbalanced data: A cascade-based approach for detecting clustered microcalcifications}, + journal = MIA, + year = {2013}, + volume = {18}, + pages = {241--252}, + doi = {10.1016/j.media.2013.10.014}, + abstract = {Finding abnormalities in diagnostic images is a difficult task even for expert radiologists because the normal tissue locations largely outnumber those with suspicious signs which may thus be missed or incorrectly interpreted. For the same reason the design of a Computer-Aided Detection (CADe) system is very complex because the large predominance of normal samples in the training data may hamper the ability of the classifier to recognize the abnormalities on the images. In this paper we present a novel approach for computer-aided detection which faces the class imbalance with a cascade of boosting classifiers where each node is trained by a learning algorithm based on ranking instead of classification error. Such approach is used to design a system (CasCADe) for the automated detection of clustered microcalcifications (AZA 1/4 Cs), which is a severely unbalanced classification problem because of the vast majority of image locations where no AZA 1/4 C is present. The proposed approach was evaluated with a dataset of 1599 full-field digital mammograms from 560 cases and compared favorably with the Hologic R2CAD ImageChecker, one of the most widespread commercial CADe systems. In particular, at the same lesion sensitivity of R2CAD (90\%) on biopsy proven malignant cases, CasCADe and R2CAD detected 0.13 and 0.21 false positives per image (FPpi), respectively (p-value=0.09), whereas at the same FPpi of R2CAD (0.21), CasCADe and R2CAD detected 93\% and 90\% of true lesions respectively (p-value=0.11) thus showing that CasCADe can compete with high-end CADe commercial systems.}, + file = {Bria13.pdf:pdf\\Bria13.pdf:PDF}, + optnote = {DIAG}, + number = {2}, + pmid = {24292553}, + month = {2}, + gsid = {4547748588204966796}, + gscites = {76}, + ss_id = {3679f6f835dac7059cf470c5251b6e7f3e748359}, + all_ss_ids = {['3679f6f835dac7059cf470c5251b6e7f3e748359']}, +} + +@inproceedings{Bria16a, + author = {Bria, A. and Marrocco, C. and Mordang, J. J. and Karssemeijer, N. and Molinara, M. and Tortorella, F.}, + title = {LUT-QNE: Look-Up-Table Quantum Noise Equalization in Digital Mammograms}, + booktitle = {Breast Imaging}, + year = {2016}, + volume = {9699}, + series = {Lecture Notes in Computer Science}, + publisher = {Springer International Publishing Switzerland}, + pages = {27-34}, + doi = {10.1007/978-3-319-41546-8_4}, + abstract = {Abstract. Quantum noise is a signal-dependent, Poisson-distributed noise and the dominant noise source in digital mammography. Quantum noise removal or equalization has been shown to be an important step in the automatic detection of microcalcifications. However, it is often limited by the difficulty of robustly estimating the noise parameters on the images. In this study, a nonparametric image intensity transformation method that equalizes quantum noise in digital mammograms is described. A simple Look-Up-Table for Quantum Noise Equalization (LUT-QNE) is determined based on the assumption that noise properties do not vary significantly across the images. This method was evaluated on a dataset of 252 raw digital mammograms by comparing noise statistics before and after applying LUT-QNE. Performance was also tested as a preprocessing step in two microcalcification detection schemes. Results show that the proposed method statistically significantly improves microcalcification detection performance.}, + file = {:pdf/Bria16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {10449433380463654551}, + gscites = {5}, + ss_id = {2af2d2173999744b98665922265bb69a8bccf6ef}, + all_ss_ids = {['2af2d2173999744b98665922265bb69a8bccf6ef']}, +} + +@inproceedings{Bria16b, + author = {Bria, A. and Marrocco, C. and Karssemeijer, N. and Molinara, M. and Tortorella, F.}, + title = {Deep Cascade Classifiers to Detect Clusters of Microcalcifications}, + booktitle = {Breast Imaging}, + year = {2016}, + volume = {9699}, + series = {Lecture Notes in Computer Science}, + publisher = {Springer International Publishing Switzerland}, + pages = {415-422}, + doi = {10.1007/978-3-319-41546-8_52}, + abstract = {Abstract. Recent advances in Computer-Aided Detection (CADe) for the automatic detection of clustered microcalcifications on mammograms show that cascade classifiers can compete with high-end commercial systems. In this paper, we introduce a deep cascade detector where the learning algorithm of each binary pixel classifier has been redesigned in the early stopping mechanism conventionally used to avoid overfitting to the training data. In this way, we strongly increase the number of features considered in each stage of the cascade (hence the term "deep"), yet we still benefit from the cascade framework by obtaining a very fast processing of mammograms (less than one second per image). We evaluated the proposed approach on a database of full-field digital mammograms; the experiments revealed a statistically significant improvement of deep cascade with respect to the traditional cascade framework. We also obtained statistically significantly higher performance than one of the most wide-spread commercial CADe systems, the Hologic R2CAD ImageChecker. Specifically, at the same number of false positives per image of R2CAD (0.21), the deep cascade detected 96 % of true lesions against the 90 % of R2CAD, whereas at the same lesion sensitivity of R2CAD (90 %), we obtained 0.05 false positives per image for the deep cascade against the 0.21 of R2CAD.}, + file = {:pdf/Bria16b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {6212175193964795667}, + gscites = {16}, + ss_id = {3e8abcfa3851f8cbb06dacc2a7ea96305d0714e9}, + all_ss_ids = {['3e8abcfa3851f8cbb06dacc2a7ea96305d0714e9']}, +} + +@article{Bria18, + author = {Bria, Alessandro and Marrocco, Claudio and Borges, Lucas R and Molinara, Mario and Marchesi, Agnese and Mordang, Jan-Jurre and Karssemeijer, Nico and Tortorella, Francesco}, + title = {Improving the Automated Detection of Calcifications using Adaptive Variance Stabilization}, + journal = TMI, + year = {2018}, + volume = {37}, + number = {8}, + month = {8}, + pages = {1857-1864}, + doi = {10.1109/TMI.2018.2814058}, + abstract = {In this work, we analyze how stabilizing the variance of intensity-dependent quantum noise in digital mammograms can significantly improve the computerized detection of microcalcifications (MCs). These lesions appear on mammograms as tiny deposits of calcium smaller than 20 pixels in diameter. At this scale, high frequency image noise is dominated by quantum noise, which in raw mammograms can be described with a squareroot noise model. Under this assumption, we derive an adaptive variance stabilizing transform (VST) that stabilizes the noise to unitary standard deviation in all the images. This is achieved by estimating the noise characteristics from the image at hand. We tested the adaptive VST as a preprocessing stage for four existing computerized MC detection methods on three datasets acquired with mammographic units from different manufacturers. In all the test cases considered, MC detection performance on transformed mammograms was statistically significantly higher than on unprocessed mammograms. Results were also superior in comparison with a 'fixed' (nonparametric) VST previously proposed for digital mammograms.}, + file = {:pdf/Bria18.pdf:PDF}, + optnote = {DIAG}, + pmid = {29994062}, + gsid = {7371071681059227535}, + gscites = {10}, + ss_id = {3f632517cafc2e8159b1c4c0c6f5375248dfe8db}, + all_ss_ids = {['3f632517cafc2e8159b1c4c0c6f5375248dfe8db']}, +} + +@conference{Brin13, + author = {Brink, M. and {van Rikxoort}, E. M. and Charbonnier, JP. and {van Riel}, S. J. and Schaefer-Prokop, C. M. and Prokop, M.}, + title = {Iodine Mapping of the Lung Using Subtraction Imaging for Pulmonary Embolism: Technique and Initial Clinical Experience}, + booktitle = RSNA, + year = {2013}, + abstract = {PURPOSE/AIM To describe a technique of deriving iodine maps of the lungs using subtraction CTA. To provide our initial clinical experience of this technique and comparison to iodine maps derived from dual energy imaging CONTENT ORGANIZATION A. Principles B. Data acquisition C. Postprocessing: Registration, motion correction and filtering D. Clinical examples E. Comparison with dual energy CT F. Advantages and limitations SUMMARY Iodine maps derived from dual energy CT have been successfully used for assessment of acute pulmonary embolism. We present technical principles and initial results of a technique deriving iodine maps from subtracting precontrast scans from pulmonary CTA. Accurate image registration is crucial: Artifacts may arise from pulsation, motion or differences in inspiratory level. With good registration, partially obstructed vessels can be distinguished from complete occlusions and correlated with perfusion defects in the lung parenchyma. Color coding of lung parenchyma can enhance perfusion differences at a quality that is superior to iodine maps from dual energy imaging. Radiation dose can be kept below 5 mSv for standard size patients. This subtraction technique allows for excellent evaluation of lung parenchyma and pulmonary vessels and achieves a more than 3 times higher contrast-to-noise ratio than dual energy images at identical dose.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Broe04, + author = {E. L. van den Broek and E. M. van Rikxoort}, + title = {Evaluation of color representation for texture analysis}, + booktitle = {Proceedings of the 16th Belgian Dutch Artificial Intelligence Conference (BNAIC)}, + year = {2004}, + pages = {35--42}, + url = {https://research.utwente.nl/en/publications/evaluation-of-color-representation-for-texture-analysis}, + abstract = {Since more than 50 years texture in image material is a topic of research. Hereby, color was ignored mostly. This study compares 70 different configurations for texture analysis, using four features. For the configurations we used: (i) a gray value texture descriptor: the co-occurrence matrix and a color texture descriptor: the color correlogram, (ii) six color spaces, and (iii) several quantization schemes. A three classifier combination was used to classify the output of the configurations on the {VisTex} texture database. The results indicate that the use of a coarse {HSV} color space quantization can substantially improve texture recognition compared to various other gray and color quantization schemes.}, + file = {Broe04.pdf:pdf/Broe04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {1383911289569758304}, + gscites = {51}, +} + +@inproceedings{Broe05, + author = {E. L. van den Broek and E. M. van Rikxoort and Th.E. Schouten}, + title = {Human-centered object-based image retrieval}, + booktitle = {Proceedings of the Third International Conference on Advances in Pattern Recognition}, + year = {2005}, + series = LNCS, + pages = {492--501}, + doi = {10.1007/11552499_55}, + abstract = {A new object-based image retrieval ({OBIR}) scheme is introduced. The images are analyzed using the recently developed, human-based 11 colors quantization scheme and the color correlogram. Their output served as input for the image segmentation algorithm: agglomerative merging, which is extended to color images. From the resulting coarse segments, boundaries are extracted by pixelwise classification, which are smoothed by erosion and dilation operators. The resulting features of the extracted shapes, completed the data for a -vector. Combined with the intersection distance measure, this vector is used for {OBIR}, as are its components. Although shape matching by itself provides good results, the complete vector outperforms its components, with up to 80% precision. Hence, a unique, excellently performing, fast, on human perception based, {OBIR} scheme is achieved.}, + file = {Broe05.pdf:pdf/Broe05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {117183584984567400}, + gscites = {19}, +} + +@inproceedings{Broe05a, + author = {E. L. van den Broek and E. M. van Rikxoort}, + title = {Parallel-Sequential Texture Analysis}, + booktitle = {Proceedings of the Third International Conference on Advances in Pattern Recognition}, + year = {2005}, + series = LNCS, + pages = {532--541}, + doi = {10.1007/11552499_59}, + abstract = {Color induced texture analysis is explored, using two texture analysis techniques: the co-occurrence matrix and the color correlogram as well as color histograms. Several quantization schemes for six color spaces and the human-based 11 color quantization scheme have been applied. The {VisTex} texture database was used as test bed. A new color induced texture analysis approach is introduced: the parallel-sequential approach; i.e., the color correlogram combined with the color histogram. This new approach was found to be highly successful (up to 96% correct classification). Moreover, the 11 color quantization scheme performed excellent (94% correct classification) and should, therefore, be incorporated for real-time image analysis. In general, the results emphasize the importance of the use of color for texture analysis and of color as global image feature.Moreover, it illustrates the complementary character of both features.}, + file = {Broe05a.pdf:pdf/Broe05a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {11930911287994883502}, + gscites = {9}, +} + +@inproceedings{Broe06, + author = {E. L. van den Broek and E. M. van Rikxoort and T. Kok and Th. E. Schouten}, + title = {{M-HinTS}: Mimicking Humans in Texture Sorting}, + booktitle = {Human Vision and Electronic Imaging XI}, + year = {2006}, + series = SPIE, + pages = {332--343}, + doi = {10.1117/12.643797}, + abstract = {Various texture analysis algorithms have been developed the last decades. However, no computational model has arisen that mimics human texture perception adequately. In 2000, Payne, Hepplewhite, and Stoneham and in 2005, Van Rikxoort, Van den Broek, and Schouten achieved mappings between humans and artificial classifiers of respectively around 29% and 50%. In the current research, the work of Van Rikxoort et al. was replicated, using the newly developed, online card sorting experimentation platform {M-HinTS}: {http://eidetic.ai.ru.nl/M-HinTS/}. In two separate experiments, color and gray scale versions of 180 textures, drawn from the {OuTex} and {VisTex} texture databases were clustered by 34 subjects. The mutual agreement among these subjects was 51% and 52% for, respectively, the experiments with color and gray scale textures. The average agreement between the k-means algorithm and the participants was 36%, where k-means approximated some participants up to 60%. Since last yearA-A?A 1/2 s results were not replicated, an additional data analysis was developed, which uses the semantic labels available in the database. This analysis shows that semantics play an important role in human texture clustering and once more illustrate the complexity of texture recognition. The current findings, the introduction of {M-HinTS}, and the set of analyzes discussed, are the start of a next phase in unraveling human texture recognition.}, + file = {Broe06.pdf:pdf/Broe06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {729595948648210024}, + gscites = {3}, +} + +@book{Brow08a, + author = {M. Brown and M. de Bruijne and B. van Ginneken and A. Kiraly and J.-M. Kuhnigk and C. Lorenz and K. Mori and J. Reinhardt}, + title = {The {F}irst {I}nternational {W}orkshop on {P}ulmonary {I}mage {A}nalysis}, + year = {2008}, + publisher = {Lulu.com}, + url = {http://www.amazon.com/First-International-Workshop-Pulmonary-Analysis/dp/1435759524/}, + abstract = {Both the quantity and quality of image data available to study the pulmonary system have increased enormously in the last decade. The goal of this workshop is to bring together researchers in pulmonary image analysis and discuss recent advances in this rapidly developing field. We invited papers on all aspects of image analysis of pulmonary image data, including segmentation, registration, quantification, modeling of the image acquisition process, visualization, statistical modeling, biophysical modeling of the lungs (computational anatomy), and novel applications. In addition, we want to address the effective use of these methodologies for diagnosis and therapy in clinical applications, bringing together theory and practice, by including a hands-on demo session focusing on clinical workstations for pulmonary analysis. We received many high quality submissions covering a broad spectrum of issues in pulmonary image analysis. All papers underwent a thorough review process with 3-4 reviews per paper by members of the program committee and additional reviewers. We finally accepted 12 papers for oral presentation, 16 poster presentations, and 3 papers describing software systems which will be demonstrated during the poster and demo session.}, + file = {Brow08a.pdf:pdf\\Brow08a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['57cc1d7dea1252afd3b287fce6551430c87226ff']}, + gscites = {12}, +} + +@book{Brow09, + author = {M. Brown and M. de Bruijne and B. van Ginneken and A. Kiraly and J.-M. Kuhnigk and C. Lorenz and J. R. McClelland and K. Mori and A. Reeves and J. Reinhardt}, + title = {The {S}econd {I}nternational {W}orkshop on {P}ulmonary {I}mage {A}nalysis}, + year = {2009}, + publisher = {Createspace.com}, + url = {http://www.amazon.com/Second-International-Workshop-Pulmonary-Analysis/dp/1448680891/}, + abstract = {After the successful first edition of the International Workshop on Pulmonary Image Analysis at MICCAI 2008 in New York City, the entire organizing team volunteered to organize the second edition of this event, aimed at bringing together researchers in pulmonary image analysis to discuss recent advances in this rapidly developing field. The Second International Workshop on Pulmonary Image Analysis will be held on September 20, 2009 in London, UK, again as a workshop of the MICCAI conference. Two researchers later joined the organizing team. We received many high quality submissions for this workshop. All papers underwent a thorough review process with two to four reviews per paper by members of the program committee and additional reviewers. The proceedings of this workshop consist of three parts. There are fifteen regular papers, dealing with various aspects of image analysis of pulmonary image data, including segmentation, registration, and quantification of abnormalities in various modalities, with the focus in most studies on computed tomography, but also with papers on the analysis of MRI and X-ray scans. Next to these regular papers, we invited researchers to join in two comparative studies where algorithms were applied to a common data set, and submit a paper to the workshop about their system. The first of these challenges is EXACT09, on the extraction of the pulmonary airway tree from CT data. The second one, VOLCANO'09, is on the analysis of size changes in pulmonary nodules from consecutive CT scans. The results of these challenges are described in two overview papers that can be found in these proceedings. Moreover, fifteen papers describe systems that participated in the EXACT09 challenge and three papers describe algorithms that were used for the VOLCANO'09 challenge. That challenge attracted thirteen participating teams who applied algorithms, often previously published and not described in these proceedings, to the challenge data.}, + file = {Brow09.pdf:pdf\\Brow09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['858f620084a0676d77b2b52e983b2632e92b7def']}, + gscites = {11}, +} + +@book{Brow10, + author = {M. Brown and M. de Bruijne and B. van Ginneken and A. Kiraly and J.-M. Kuhnigk and C. Lorenz and K. Mori and J. Reinhardt}, + title = {The {T}hird {I}nternational {W}orkshop on {P}ulmonary {I}mage {A}nalysis}, + year = {2010}, + publisher = {Createspace.com}, + url = {http://www.amazon.com/Third-International-Workshop-Pulmonary-Analysis/dp/1453776001/}, + abstract = {These are the proceedings of the third edition of the International Workshop on Pulmonary Image Analysis held on September 20, 2010 at MICCAI 2010 in Beijing. The two first events, organized also as workshops in conjunction with the MICCAI conferences in 2008 in New York and in 2009 in London, were very successful in bringing together researchers in pulmonary image analysis to discuss new developments in this growing field. For this edition, we received a large number of high quality papers that received a detailed review by two to four reviewers from the organizing team or from guest reviewers. A total of eight papers have been accepted for oral presentation at the workshop, in sessions on segmentation, classiffication and quantification, and registration. Six papers and one software were selected for poster presentation. Together these papers cover a wide range of topics within the field of pulmonary image analysis. We are also very happy that Eric Hoffman, Professor of Radiology, Medicine and Biomedical Engineering at the University of Iowa and director of the Iowa Comprehensive Lung Imaging Center, has agreed to give the keynote lecture of the third International Workshop on Pulmonary Image Analysis. Dr Hoffman was involved in the development of CT in the very early days and continues to lead forefront research in unravelling the mechanisms of pulmonary ventilation and lung diseases.}, + file = {Brow10.pdf:pdf\\Brow10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Brui02, + author = {M. de Bruijne and B. van Ginneken and W. J. Niessen and J. B. A. Maintz and M. A. Viergever}, + title = {Active shape model based segmentation of abdominal aortic aneurysms in {CTA} images}, + booktitle = MI, + year = {2002}, + volume = {4684}, + series = SPIE, + pages = {463-474}, + doi = {10.1117/12.467188}, + abstract = {{A}n automated method for the segmentation of thrombus in abdominal aortic aneurysms from {CTA} data is presented. {T}he method is based on {A}ctive {S}hape {M}odel ({ASM}) fitting in sequential slices, using the contour obtained in one slice as the initialisation in the adjacent slice. {T}he optimal fit is defined by maximum correlation of grey value profiles around the contour in successive slices, in contrast to the original {ASM} scheme as proposed by {C}ootes and {T}aylor, where the correlation with profiles from training data is maximised. {A}n extension to the proposed approach prevents the inclusion of low-intensity tissue and allows the model to refine to nearby edges. {T}he applied shape models contain either one or two image slices, the latter explicitly restricting the shape change from slice to slice. {T}o evaluate the proposed methods a leave-one-out experiment was performed, using six datasets containing 274 slices to segment. {B}oth adapted {ASM} schemes yield significantly better results than the original scheme (p<0.0001). {T}he extended slice correlation fit of a one-slice model showed best overall performance. {U}sing one manually delineated image slice as a reference, on average a number of 29 slices could be automatically segmented with an accuracy within the bounds of manual inter-observer variability.}, + file = {Brui02.pdf:pdf\\Brui02.pdf:PDF}, + gsid = {10630122810979271113}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gscites = {61}, + ss_id = {1c04dc70e53afab5482a7adef2ddc5f707383deb}, + all_ss_ids = {['1c04dc70e53afab5482a7adef2ddc5f707383deb']}, +} + +@inproceedings{Brui03, + author = {M. de Bruijne and B. van Ginneken and M. A. Viergever and W. J. Niessen}, + title = {Adapting {A}ctive {S}hape {M}odels for {3D} segmentation of tubular structures in medical images}, + booktitle = IPMI, + year = {2003}, + volume = {2732}, + series = LNCS, + pages = {136--147}, + doi = {10.1007/b11820}, + abstract = {{A}ctive {S}hape {M}odels ({ASM}) have proven to be an effective approach for image segmentation. {I}n some applications however, the linear model of gray level appearance around a contour that is used in {ASM} is not sufficient for accurate boundary localization. {F}urthermore, the statistical shape model may be too restricted if the training set is limited. {T}his paper describes modifications to both the shape and the appearance model of the original {ASM} formulation. {S}hape model flexibility is increased, for tubular objects, by modeling the axis deformation independent of the cross-sectional deformation, and by adding supplementary cylindrical deformation modes. {F}urthermore, a novel appearance modeling scheme that effectively deals with a highly varying background is developed. {I}n contrast with the conventional {ASM} approach, the new appearance model is trained on both boundary and non-boundary points, and the probability that a given point belongs to the boundary is estimated non-parametrically. {T}he methods are evaluated on the complex task of segmenting thrombus in abdominal aortic aneurysms ({AAA}). {S}hape approximation errors were successfully reduced using the two shape model extensions. {S}egmentation using the new appearance model significantly outperformed the original {ASM} scheme; average volume errors are 5.1\% and 45\% respectively.}, + file = {Brui03.pdf:pdf\\Brui03.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {15344453}, + gsid = {2360432872690580120}, + gscites = {185}, + ss_id = {0778bee7108adc686ab78dcd98758c76bf14bd36}, + all_ss_ids = {['0778bee7108adc686ab78dcd98758c76bf14bd36']}, +} + +@inproceedings{Brui03a, + author = {M. de Bruijne and B. van Ginneken and L. W. Bartels and M. J. van der Laan and J. D. Blankensteijn and W. J. Niessen and M. A. Viergever}, + title = {Automated {S}egmentation of {A}bdominal {A}ortic {A}neurysms in {M}ulti-spectral {MR} {I}mages}, + booktitle = MICCAI, + year = {2003}, + volume = {2879}, + series = LNCS, + pages = {538-545}, + doi = {10.1007/b93811}, + abstract = {{A}n automated method for segmenting the outer boundary of abdominal aortic aneurysms in {MR} images is presented. {T}he method is based on the well known {A}ctive {S}hape {M}odels ({ASM}), which fit a global landmark-based shape model on the basis of local boundary appearance models. {T}he original three-dimensional {ASM} scheme is modified to deal with multi-spectral image information and inconsistent boundary appearance in a principled way, with only a limited amount of training data. {I}n addition, a framework for user interaction is proposed. {I}f required, the obtained segmentation can be corrected in an interactive manner by indicating points on the desired boundary. {T}he methods are evaluated in leave-one-out experiments on 21 datasets. {A} segmentation scheme combining gray level information from two or three {MR} sequences produces significantly better results than a single-scan model. {A}verage volume errors with respect to the manual segmentation are 4.0%, in 19 out of 21 datasets. {I}n the cases in which the obtained error is large, results can easily be improved using the interactive scheme.}, + file = {Brui03a.pdf:pdf\\Brui03a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {10995536154405183851}, + gscites = {18}, + all_ss_ids = {['1ee7b2b9e546979b9985722ee3e1353abc199731']}, +} + +@inproceedings{Brui03b, + author = {M. de Bruijne and B. van Ginneken and W. J. Niessen and M. Loog and M. A. Viergever}, + title = {Model-based segmentation of abdominal aortic aneurysms in {CTA} images}, + booktitle = MI, + year = {2003}, + volume = {5032}, + series = SPIE, + pages = {1560-1571}, + doi = {10.1117/12.481367}, + abstract = {Segmentation of thrombus in abdominal aortic aneurysms is complicated by regions of low boundary contrast and by the presence of many neighboring structures in close proximity to the aneurysm wall. This paper presents an automated method that is similar to the well known Active Shape Models (ASM), which combine a three-dimensional shape model with a one-dimensional boundary appearance model. Our contribution is twofold: First, we show how the generalizability of a shape model of curvilinear objects can be improved by modeling the objects axis deformation independent of its cross-sectional deformation. Second, a non-parametric appearance modeling scheme that effectively deals with a highly varying background is presented. In contrast with the conventional ASM approach, the new appearance model trains on both true and false examples of boundary profiles. The probability that a given image profile belongs to the boundary is obtained using k nearest neighbor (kNN) probability density estimation. The performance of this scheme is compared to that of original ASMs, which minimize the Mahalanobis distance to the average true profile in the training set. A set of leave-one-out experiments is performed on 23 datasets. Modeling the axis and cross-section separately reduces the shape reconstruction error in all cases. The average reconstruction error was reduced from 2.2 to 1.6 mm. Segmentation using the kNN appearance model significantly outperforms the original ASM scheme; average volume errors are 5.9% and 46% respectively.}, + file = {Brui03b.pdf:pdf\\Brui03b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {5651884635784583934}, + gscites = {25}, + ss_id = {00559146cc1276c6c98401e2aa35e591ebccca76}, + all_ss_ids = {['00559146cc1276c6c98401e2aa35e591ebccca76']}, +} + +@article{Brui04, + author = {M. de Bruijne AND B. van Ginneken AND M. A. Viergever AND W. J. Niessen}, + title = {Interactive segmentation of abdominal aortic aneurysms in {CTA} images}, + journal = MIA, + year = {2004}, + volume = {8}, + pages = {127-138}, + doi = {10.1016/j.media.2004.01.001}, + abstract = {{A} model-based approach to interactive segmentation of abdominal aortic aneurysms from {CTA} data is presented. {A}fter manual delineation of the aneurysm sac in the first slice, the method automatically detects the contour in subsequent slices, using the result from the previous slice as a reference. {I}f an obtained contour is not sufficiently accurate, the user can intervene and provide an additional manual reference contour. {T}he method is inspired by the active shape model ({ASM}) segmentation scheme (), in which a statistical shape model, derived from corresponding landmark points in manually labeled training images, is fitted to the image in an iterative manner. {I}n our method, a shape model of the contours in two adjacent image slices is progressively fitted to the entire volume. {T}he contour obtained in one slice thus constrains the possible shapes in the next slice. {T}he optimal fit is determined on the basis of multi-resolution gray level models constructed from gray value patches sampled around each landmark. {W}e propose to use the similarity of adjacent image slices for this gray level model, and compare these to single-slice features that are more generally used with {ASM}. {T}he performance of various image features is evaluated in leave-one-out experiments on 23 data sets. {F}eatures that use the similarity of adjacent image slices outperform measures based on single-slice features in all cases. {T}he average number of slices in our datasets is 51, while on average eight manual initializations are required, which decreases operator segmentation time by a factor of 6.}, + file = {Brui04.pdf:pdf\\Brui04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {15063862}, + month = {6}, + gsid = {14139207650278234060}, + gscites = {128}, + ss_id = {c6736d66d1088989faf12ba082dae19132baed4d}, + all_ss_ids = {['c6736d66d1088989faf12ba082dae19132baed4d']}, +} + +@inproceedings{Brun07, + author = {Ellen J L Brunenberg and Anna Vilanova and Veerle Visser-Vandewalle and Yasin Temel and Linda Ackermans and Bram Platel and B. M. ter Haar Romeny}, + title = {Automatic trajectory planning for deep brain stimulation: a feasibility study}, + booktitle = MICCAI, + year = {2007}, + volume = {10}, + series = LNCS, + pages = {584--592}, + abstract = {DBS for Parkinson's disease involves an extensive planning to find a suitable electrode implantation path to the selected target. We have investigated the feasibility of improving the conventional planning with an automatic calculation of possible paths in 3D. This requires the segmentation of anatomical structures. Subsequently, the paths are calculated and visualized. After selection of a suitable path, the settings for the stereotactic frame are determined. A qualitative evaluation has shown that automatic avoidance of critical structures is feasible. The participating neurosurgeons estimate the time gain to be around 30 minutes.}, + file = {Brun07.pdf:pdf/Brun07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {Pt 1}, + pmid = {18051106}, + gsid = {5894165833264999381}, + gscites = {61}, +} + +@conference{Brun09, + author = {Brunenberg, E. and Prckovska, V. and Platel, B. and Strijkers, G. and ter Haar Romeny, B. M.}, + title = {Untangling a fiber bundle knot - {P}reliminary results on {STN} connectivity using {DTI} and {HARDI} on rat brains}, + booktitle = {Proceedings of the International Society for Magnetic Resonance in Medicine}, + year = {2009}, + volume = {17}, + pages = {740}, + abstract = {Deep Brain Stimulation of the subthalamic nucleus (STN) for Parkinsons Disease improves the motor function, but cognitive and emotional side effects occur. These can be minimized by an accurate location of the STN motor part, analyzing the connections with other brain areas. In this study, we compare DTI and HARDI glyphs around the rat STN. The HARDI glyphs are more heterogeneous than the mostly oblate DTI glyphs. We observe crossings in the lateral STN, while a linear configuration can be seen medially. We may thus distinguish the lateral motor part and the medial cognitive/emotional part of the rat STN.}, + file = {Brun09.pdf:pdf/Brun09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {15103257933820165441}, + gscites = {10}, +} + +@inproceedings{Brun10, + author = {Ellen Brunenberg and Remco Duits and B. M. ter Haar Romeny and Bram Platel}, + title = {A Sobolev norm based distance measure for HARDI clustering: a feasibility study on phantom and real data}, + booktitle = MICCAI, + year = {2010}, + volume = {13}, + series = LNCS, + pages = {175--182}, + abstract = {Dissimilarity measures for DTI clustering are abundant. However, for HARDI, the L2 norm has up to now been one of only few practically feasible measures. In this paper we propose a new measure, that not only compares the amplitude of diffusion profiles, but also rewards coincidence of the extrema. We tested this on phantom and real brain data. In both cases, our measure significantly outperformed the L2 norm.}, + file = {Brun10.pdf:pdf/Brun10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {Pt 1}, + pmid = {20879229}, + ss_id = {f5a131da8468342f2af75863a762a56ab95ab8a1}, + all_ss_ids = {['f5a131da8468342f2af75863a762a56ab95ab8a1']}, + gscites = {4}, +} + +@article{Brun11, + author = {Ellen J L Brunenberg and Bram Platel and Paul A M Hofman and B.M. ter Haar Romeny and Veerle Visser-Vandewalle}, + title = {Magnetic resonance imaging techniques for visualization of the subthalamic nucleus}, + journal = JNSUR, + year = {2011}, + volume = {115}, + pages = {971--984}, + doi = {10.3171/2011.6.JNS101571}, + abstract = {The authors reviewed 70 publications on MR imaging-based targeting techniques for identifying the subthalamic nucleus (STN) for deep brain stimulation in patients with Parkinson disease. Of these 70 publications, 33 presented quantitatively validated results. There is still no consensus on which targeting technique to use for surgery planning; methods vary greatly between centers. Some groups apply indirect methods involving anatomical landmarks, or atlases incorporating anatomical or functional data. Others perform direct visualization on MR imaging, using T2-weighted spin echo or inversion recovery protocols. The combined studies do not offer a straightforward conclusion on the best targeting protocol. Indirect methods are not patient specific, leading to varying results between cases. On the other hand, direct targeting on MR imaging suffers from lack of contrast within the subthalamic region, resulting in a poor delineation of the STN. These deficiencies result in a need for intraoperative adaptation of the original target based on test stimulation with or without microelectrode recording. It is expected that future advances in MR imaging technology will lead to improvements in direct targeting. The use of new MR imaging modalities such as diffusion MR imaging might even lead to the specific identification of the different functional parts of the STN, such as the dorsolateral sensorimotor part, the target for deep brain stimulation.}, + file = {Brun11.pdf:pdf/Brun11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {21800960}, + month = {11}, + ss_id = {4304be3b3a04f1effbd488d408c812fcb8c44358}, + all_ss_ids = {['4304be3b3a04f1effbd488d408c812fcb8c44358']}, + gscites = {53}, +} + +@phdthesis{Brun11a, + author = {Brunenberg, E.}, + title = {Hitting the right target : noninvasive localization of the subthalamic nucleus motor part for specific deep brain stimulation}, + year = {2011}, + url = {http://repository.tue.nl/715250}, + abstract = {Deep brain stimulation of the subthalamic nucleus (STN) has gained momentum as a therapy for advanced ParkinsonA-A?A 1/2 s disease. The stimulation effectively alleviates the patientsA-A?A 1/2 typical motor symptoms on a long term, but can give rise to cognitive and psychiatric adverse effects as well. Based on primate studies, the STN has been divided into three functionally different parts, which were distinguished by their afferent and efferent connections. The largest part is the motor area, followed by an associative and a limbic area. The serious adverse effects on cognition and behavior occurring after deep brain stimulation are assumed to be caused by electrical current spread to the associative and limbic areas of the STN. Therefore, selective stimulation of the motor part of the STN seems crucial, both to obtain the best possible therapeutic effect on the motor symptoms and to minimize the debilitating effects on cognition and behavior. However, current medical imaging techniques do not yet facilitate the required accurate identification of the STN itself, let alone its different functional areas. The final target for DBS is still often adjusted using intraoperative electrophysiology. Therefore, in this thesis we aimed to improve imaging for deep brain stimulation using noninvasive MRI protocols, in order to identify the STN and its motor part. We studied the advantages and drawbacks of already available noninvasive methods to target the STN. This review did not lead to a straightforward conclusion; identification of the STN motor part remained an open question. In follow-up on this question, we investigated the possibility to distinguish the different functional STN parts based on their connectivity information. Three types of information were carefully analyzed in this thesis. First, we looked into the clustering of local diffusion information within the STN region. We visually inspected the complex diffusion profiles, derived from postmortem rat brain data with high angular resolution, and augmented this manual segmentation method using k-means and graph cuts clustering. Because the weighing of different orders of diffusion information in the traditionally used L2 norm on the orientation distribution functions (ODFs) remained an open issue, we developed a specialized distance measure, the so-called Sobolev norm. This norm does not only take into account the amplitudes of the diffusion profiles, but also their extrema. We showed it to perform better than the L2 norm on synthetic phantom data and real brain (thalamus) data. The research done on this topic facilitates better classification by clustering of gray matter structures in the (deep) brain. Secondly, we were the first to analyze the STNA-A?A 1/2 s full structural connectivity, based on probabilistic fiber tracking in diffusion MRI data of healthy volunteers. The results correspond well to topical literature on STN projections. Furthermore, we assessed the structural connectivity per voxel of the STN seed region and discovered a gradient in connectivity to the premotor cortex within the STN. While going from the medial to the lateral part of the STN, the connectivity increases, confirming the expected lateral location of the STN motor part. Finally, the connectivity analysis produced evidence for the existence of a A-A?A 1/2 hyperdirect?? pathway between the motor cortex and the STN in humans, which is very useful for future research into stimulation targets. The results of these experiments indicate that it is possible to find the motor part of the STN as specific target for deep brain stimulation using structural connectivity information acquired in a noninvasive way. Third and last, we studied functional connectivity using resting state functional MRI data of healthy volunteers. The resulting significant clusters provided us with the first complete description of the STNA-A?A 1/2 s resting state functional connectivity, which corresponds with the expectations based on available literature. Moreover, we performed a reverse regression procedure with the average time + series signals in motor and limbic areas as principal regressors. The results were analyzed for each STN voxel separately and also showed mediolateral gradients in functional connectivity within the STN. The lateral STN part exhibited more motor connectivity, while the medial part seemed to be more functionally connected to limbic brain areas, as described in neuronal tracer studies. These results show that functional connectivity analysis also is a viable noninvasive method to find the motor part of the STN. The work on noninvasive MRI methods for identification of the STN and its functional parts, as presented in this thesis, thus contributes to future specific stimulation of the motor part of the STN for deep brain stimulation in patients with ParkinsonA-A?A 1/2 s disease. This may help to maximize the motor effects and minimize severe cognitive and psychiatric side effects.}, + copromotor = {B. Platel}, + file = {Brun11a.pdf:pdf/Brun11a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. M. Ter Haar Romeny and V. Visser Vandewalle}, + school = {Eindhoven University of Technology}, + journal = {PhD thesis}, +} + +@article{Brun12, + author = {Brunenberg, Ellen J L. and Moeskops, Pim and Backes, Walter H. and Pollo, Claudio and Cammoun, Leila and Vilanova, Anna and Janssen, Marcus L F. and Visser-Vandewalle, Veerle E R M. and Ter Haar Romeny, Bart M. and Thiran, Jean-Philippe and Platel, Bram}, + title = {Structural and Resting State Functional Connectivity of the Subthalamic Nucleus: Identification of Motor STN Parts and the Hyperdirect Pathway}, + journal = PLOSONE, + year = {2012}, + volume = {7}, + pages = {e39061}, + doi = {10.1371/journal.pone.0039061}, + abstract = {Deep brain stimulation (DBS) for Parkinson's disease often alleviates the motor symptoms, but causes cognitive and emotional side effects in a substantial number of cases. Identification of the motor part of the subthalamic nucleus (STN) as part of the presurgical workup could minimize these adverse effects. In this study, we assessed the STN's connectivity to motor, associative, and limbic brain areas, based on structural and functional connectivity analysis of volunteer data. For the structural connectivity, we used streamline counts derived from HARDI fiber tracking. The resulting tracks supported the existence of the so-called "hyperdirect" pathway in humans. Furthermore, we determined the connectivity of each STN voxel with the motor cortical areas. Functional connectivity was calculated based on functional MRI, as the correlation of the signal within a given brain voxel with the signal in the STN. Also, the signal per STN voxel was explained in terms of the correlation with motor or limbic brain seed ROI areas. Both right and left STN ROIs appeared to be structurally and functionally connected to brain areas that are part of the motor, associative, and limbic circuit. Furthermore, this study enabled us to assess the level of segregation of the STN motor part, which is relevant for the planning of STN DBS procedures.}, + file = {Brun12.pdf:pdf/Brun12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {22768059}, + month = {6}, + gsid = {7953209599105730739}, + gscites = {133}, + ss_id = {3d346a616566ceafe3a09acdd4831df76c408b33}, + all_ss_ids = {['3d346a616566ceafe3a09acdd4831df76c408b33']}, +} + +@conference{Buka17, + author = {Bukala, Joris and Humpire Mamani, Gabriel Efrain and Scholten, Ernst and Prokop, Mathias and van Ginneken, Bram and Jacobs, Colin}, + title = {Fully Automatic Measurement of the Splenic Volume in CT with U-Net Convolutional Neural Networks}, + booktitle = RSNA, + year = {2017}, + abstract = {Purpose: To develop a fully automatic deep learning method for 3D segmentation of the spleen on computed tomography (CT) scans and to compare the automatically measured spleen volume with the standard splenic index approximation formula that requires three 2D manual measurements. Method and Materials:145 CT thorax-abdomen scans were collected from our institute. All scans were contrast enhanced and acquired with a slice thickness of 1 or 2 mm. The spleens were manually segmented in 3D by trained human observers in all scans. We used 100 scans for training and 45 scans as an independent test set. In the test set, the standard approximation formula was applied by a human observer to get an estimation of the splenic volume. The system fully analyzes the entire thorax-abdomen CT scan to segment the exact location of the spleen, without any need for pre-processing. Multiple U-net convolutional neural networks were trained for different orthogonal directions using the training data set. A validation set consisting of 30% of the training data was used to optimize the hyperparameters of the neural network. A dedicated hard mining selection strategy was employed to improve the learning process. The predictions of the U-nets were averaged and subsequently thresholded to obtain a 3D spleen segmentation. The mean absolute error of the splenic volume was used to measure the accuracy of the deep learning approach and the standard approximation formula in comparison to the manual reference standard. The performance of the deep learning approach was also evaluated by computing the Dice similarity coefficient on the test set. Results: The deep learning approach resulted in a mean absolute error of 8.5% (SD 11.6) in the splenic volume while the approximation formula gave a significantly higher (p<0.01) mean absolute error of 17.7% (SD 14.7). The average Dice score between the deep learning segmentations and the reference segmentations was 0.91 (SD 0.08). Conclusion: Splenic volume can be fully automatically assessed using a U-net deep learning approach, with an accuracy that is substantially better than the clinically widely used approximation formula. Clinical relevance/Application: An accurate splenic volume measurement can be used for assessing splenomegaly and for detecting changes in splenic volume over time.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Bult18, + author = {Wouter Bulten and Christina A. Hulsbergen-van de Kaa and Jeroen van der Laak and Geert J. S. Litjens}, + title = {Automated segmentation of epithelial tissue in prostatectomy slides using deep learning}, + booktitle = MI, + year = {2018}, + volume = {10581}, + series = SPIE, + pages = {105810S}, + doi = {10.1117/12.2292872}, + abstract = {Prostate cancer is generally graded by pathologists based on hematoxylin and eosin (H\&E) stained slides. Because of the large size of the tumor areas in radical prostatectomies (RP), this task can be tedious and error prone with known high interobserver variability. Recent advancements in deep learning have enabled development of automated systems that may assist pathologists in prostate diagnostics. As prostate cancer originates from glandular tissue, an important prerequisite for development of such algorithms is the possibility to automatically differentiate between glandular tissue and other tissues. In this paper, we propose a method for automatically segmenting epithelial tissue in digitally scanned prostatectomy slides based on deep learning. We collected 30 single-center whole mount tissue sections, with reported Gleason growth patterns ranging from 3 to 5, from 27 patients that underwent RP. Two different network architectures, U-Net and regular fully convolutional networks with varying depths, were trained using a set of sparsely annotated slides. We evaluated the trained networks on exhaustively annotated regions from a separate test set. The test set contained both healthy and cancerous epithelium with different Gleason growth patterns. The results show the effectiveness of our approach given a pixel-based AUC score of 0.97. Our method contains no prior assumptions on glandular morphology, does not directly rely on the presence of lumina and all features are learned by the network itself. The generated segmentation can be used to highlight regions of interest for pathologists and to improve cancer annotations to further enhance an automatic cancer grading system.}, + file = {Bult18.pdf:pdf\\Bult18.pdf:PDF}, + optnote = {DIAG}, + month = {3}, + gsid = {14179800442563998624}, + gscites = {16}, + ss_id = {add18010e1af63998bae7573f4cd5d2843eeb5bb}, + all_ss_ids = {['add18010e1af63998bae7573f4cd5d2843eeb5bb']}, +} + +@inproceedings{Bult18a, + author = {Bulten, Wouter and Litjens, Geert}, + title = {Unsupervised Prostate Cancer Detection on H\&E using Convolutional Adversarial Autoencoders}, + booktitle = MIDL, + year = {2018}, + url = {https://openreview.net/forum?id=Syoj0k2iG}, + abstract = {We propose an unsupervised method using self-clustering convolutional adversarial autoencoders to classify prostate tissue as tumor or non-tumor without any labeled training data. The clustering method is integrated into the training of the autoencoder and requires only little post-processing. Our network trains on hematoxylin and eosin (H\&E) input patches and we tested two different reconstruction targets, H&E and immunohistochemistry (IHC). We show that antibody-driven feature learning using IHC helps the network to learn relevant features for the clustering task. Our network achieves a F1 score of 0.62 using only a small set of validation labels to assign classes to clusters.}, + file = {Bult18a.pdf:pdf\\Bult18a.pdf:PDF}, + optnote = {DIAG}, + gsid = {18442552638759628545}, + gscites = {24}, + ss_id = {eed96a595b9c988affacac34c57128bd115020f0}, + all_ss_ids = {['eed96a595b9c988affacac34c57128bd115020f0']}, +} + +@article{Bult19, + author = {Wouter Bulten and P\'{e}ter B\'{a}ndi and Jeffrey Hoven and Rob {van de Loo} and Johannes Lotz and Nick Weiss and Jeroen {van der Laak} and Bram {van Ginneken} and Christina {Hulsbergen-van de Kaa} and Geert Litjens}, + title = {Epithelium segmentation using deep learning in H\&E-stained prostate specimens with immunohistochemistry as reference standard}, + journal = NATSCIREP, + year = {2019}, + volume = {9}, + number = {864}, + issue = {1}, + doi = {10.1038/s41598-018-37257-4}, + url = {https://arxiv.org/abs/1808.05883}, + abstract = {Given the importance of gland morphology in grading prostate cancer (PCa), automatically differentiating between epithelium and other tissues is an important prerequisite for the development of automated methods for detecting PCa. We propose a new deep learning method to segment epithelial tissue in digitised hematoxylin and eosin (H\&E) stained prostatectomy slides using immunohistochemistry (IHC) as reference standard. We used IHC to create a precise and objective ground truth compared to manual outlining on H&E slides, especially in areas with high-grade PCa. 102 tissue sections were stained with H&E and subsequently restained with P63 and CK8/18 IHC markers to highlight epithelial structures. Afterwards each pair was co-registered. First, we trained a U-Net to segment epithelial structures in IHC using a subset of the IHC slides that were preprocessed with color deconvolution. Second, this network was applied to the remaining slides to create the reference standard used to train a second U-Net on H\&E. Our system accurately segmented both intact glands and individual tumour epithelial cells. The generalisation capacity of our system is shown using an independent external dataset from a different centre. We envision this segmentation as the first part of a fully automated prostate cancer grading pipeline.}, + file = {:pdf/Bult19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30696866}, + month = {1}, + gsid = {10102326358237382134}, + gscites = {120}, + ss_id = {d5b8fba71671814445de80d36a2d563f43eb9d07}, + all_ss_ids = {['d5b8fba71671814445de80d36a2d563f43eb9d07']}, +} + +@conference{Bult19a, + author = {Bulten, Wouter and Pinckaers, Hans and {Hulsbergen-van de Kaa}, Christina and Litjens, Geert}, + title = {Automated Gleason Grading of Prostate Biopsies Using Deep Learning}, + booktitle = {United States and Canadian Academy of Pathology (USCAP) 108th Annual Meeting}, + year = {2019}, + abstract = {Grading prostate cancer is a time-consuming process and suffers from high inter- and intra-observer variability. Advances in computer-aided diagnosis have shown promise in improving histopathological diagnosis. We trained a deep learning system using data retrieved from the patients records to grade digitized prostate biopsies. Our system is the first that can automatically classify background, benign epithelium, Gleason 3, 4, and 5 on a gland-by-gland level in prostate biopsies. 532 glass slides containing 2162 prostate biopsies, evaluated by an experienced urogenital pathologist were collected and scanned. 596 biopsies were kept separate for evaluation, the remaining 1576 were used to train the deep learning algorithm (see table for Gleason grade distribution). A single label denoting the Gleason score (e.g. 3+4=7) was available for each biopsy, without information on tumor location or volume. To generate detailed annotations for training we used two previously trained deep learning networks to first segment the epithelium and, subsequently, to detect cancer. The Gleason grade from the patient record was assigned to the cancerous epithelium. These generated weakly annotated regions of tumor were then used to train a Gleason grading system. To evaluate, the system was applied to the biopsies in the test set. We used the total predicted surface area of each growth pattern to determine the Gleason score of the biopsy. Predicted tumor areas smaller than 15% of total epithelial tissue were considered unreliable (e.g. incomplete glands at the edges of the biopsy) and ignored for slide level classification. For predicted grades only areas larger than 5% of all epithelial tissue were considered, which is also common in clinical practice. Predicting whether a biopsy contains tumor resulted in an accuracy of 86% (linear weighted kappa (k) of 0.73, area under the ROC curve of 0.96). We compared the predicted primary Gleason grade to the one from the pathologists' report. Our system achieved an accuracy of 75% (k 0.64). On predicting the Grade Group (using primary and secondary pattern), our system achieved an accuracy of 67% (k 0.57). Misclassifications of more than one grade are rare. Our deep learning system automatically identifies Gleason patterns and benign tissue on a gland-by-gland basis. This can be used to determine the biopsy-level Grade Group and Gleason score, and show which parts of the tissue contribute to this prediction. Improvements need to be made to decrease misclassifications, for example in areas with inflammation.}, + optnote = {DIAG}, + gsid = {14775949012876589282}, + gscites = {125}, + all_ss_ids = {8ce6b544554a79e077e5fc52f55ba8234ce606d4}, +} + +@article{Bult20, + author = {Bulten, Wouter and Pinckaers, Hans and van Boven, Hester and Vink, Robert and de Bel, Thomas and van Ginneken, Bram and van der Laak, Jeroen and Hulsbergen-van de Kaa, Christina and Litjens, Geert}, + title = {Automated deep-learning system for Gleason grading of prostate cancer using biopsies: a diagnostic study}, + journal = LANCETO, + year = {2020}, + volume = {21}, + issue = {2}, + pages = {233--241}, + doi = {10.1016/S1470-2045(19)30739-9}, + url = {https://arxiv.org/abs/1907.07980}, + algorithm = {https://grand-challenge.org/algorithms/gleason-grading-of-prostate-biopsies/}, + abstract = {BACKGROUND: + The Gleason score is the strongest correlating predictor of recurrence for prostate cancer, but has substantial inter-observer variability, limiting its usefulness for individual patients. Specialised urological pathologists have greater concordance; however, such expertise is not widely available. Prostate cancer diagnostics could thus benefit from robust, reproducible Gleason grading. We aimed to investigate the potential of deep learning to perform automated Gleason grading of prostate biopsies. + + METHODS: + In this retrospective study, we developed a deep-learning system to grade prostate biopsies following the Gleason grading standard. The system was developed using randomly selected biopsies, sampled by the biopsy Gleason score, from patients at the Radboud University Medical Center (pathology report dated between Jan 1, 2012, and Dec 31, 2017). A semi-automatic labelling technique was used to circumvent the need for manual annotations by pathologists, using pathologists' reports as the reference standard during training. The system was developed to delineate individual glands, assign Gleason growth patterns, and determine the biopsy-level grade. For validation of the method, a consensus reference standard was set by three expert urological pathologists on an independent test set of 550 biopsies. Of these 550, 100 were used in an observer experiment, in which the system, 13 pathologists, and two pathologists in training were compared with respect to the reference standard. The system was also compared to an external test dataset of 886 cores, which contained 245 cores from a different centre that were independently graded by two pathologists. + + FINDINGS: + We collected 5759 biopsies from 1243 patients. The developed system achieved a high agreement with the reference standard (quadratic Cohen's kappa 0*918, 95% CI 0*891-0*941) and scored highly at clinical decision thresholds: benign versus malignant (area under the curve 0*990, 95% CI 0*982-0*996), grade group of 2 or more (0*978, 0*966-0*988), and grade group of 3 or more (0*974, 0*962-0*984). In an observer experiment, the deep-learning system scored higher (kappa 0*854) than the panel (median kappa 0*819), outperforming 10 of 15 pathologist observers. On the external test dataset, the system obtained a high agreement with the reference standard set independently by two pathologists (quadratic Cohen's kappa 0*723 and 0*707) and within inter-observer variability (kappa 0*71). + + INTERPRETATION: + Our automated deep-learning system achieved a performance similar to pathologists for Gleason grading and could potentially contribute to prostate cancer diagnosis. The system could potentially assist pathologists by screening biopsies, providing second opinions on grade group, and presenting quantitative measurements of volume percentages. + + FUNDING: + Dutch Cancer Society.}, + file = {:pdf/Bult20.pdf:PDF}, + optnote = {DIAG}, + pmid = {31926805}, + month = {2}, + gsid = {11015417795282172712}, + gscites = {433}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/217309}, + ss_id = {39a52867ea3d60ec4f28182e0e618b762d3d7909}, + all_ss_ids = {['39a52867ea3d60ec4f28182e0e618b762d3d7909', '24b8cee45431f633d2fa6e3c05670f62b1e41e7e', 'c310babca20446de2ee7d8857abe239e0fe261a0']}, +} + +@article{Bult20a, + title = {Artificial Intelligence Assistance Significantly Improves Gleason Grading of Prostate Biopsies by Pathologists}, + author = {Bulten, Wouter and Balkenhol, Maschenka and Belinga, Jean-Jo\"{e}l Awoumou and Brilhante, Am\'{e}rico and \c{C}ak\i{}r, Asl\i{} and Egevad, Lars and Eklund, Martin and Farr\'{e}, Xavier and Geronatsiou, Katerina and Molini\'{e}, Vincent and Pereira, Guilherme and Roy, Paromita and Saile, G\"{u}nter and Salles, Paulo and Schaafsma, Ewout and Tschui, Jo\"{e}lle and Vos, Anne-Marie and Delahunt, Brett and Samaratunga, Hemamali and Grignon, David J. and Evans, Andrew J. and Berney, Daniel M. and Pan, Chin-Chen and Kristiansen, Glen and Kench, James G. and Oxley, Jon and Leite, Katia R. M. and McKenney, Jesse K. and Humphrey, Peter A. and Fine, Samson W. and Tsuzuki, Toyonori and Varma, Murali and Zhou, Ming and Comperat, Eva and Bostwick, David G. and Iczkowski, Kenneth A. and Magi-Galluzzi, Cristina and Srigley, John R. and Takahashi, Hiroyuki and van der Kwast, Theo and van Boven, Hester and Vink, Robert and van der Laak, Jeroen and Hulsbergen-van der Kaa, Christina and Litjens, Geert}, + year = {2020}, + month = {8}, + pmid = {32759979}, + abstract = {The Gleason score is the most important prognostic marker for prostate cancer patients, but it suffers from significant observer variability. Artificial intelligence (AI) systems based on deep learning can achieve pathologist-level performance at Gleason grading. However, the performance of such systems can degrade in the presence of artifacts, foreign tissue, or other anomalies. Pathologists integrating their expertise with feedback from an AI system could result in a synergy that outperforms both the individual pathologist and the system. Despite the hype around AI assistance, existing literature on this topic within the pathology domain is limited. We investigated the value of AI assistance for grading prostate biopsies. A panel of 14 observers graded 160 biopsies with and without AI assistance. Using AI, the agreement of the panel with an expert reference standard increased significantly (quadratically weighted Cohen's kappa, 0.799 vs. 0.872; p = 0.019). On an external validation set of 87 cases, the panel showed a significant increase in agreement with a panel of international experts in prostate pathology (quadratically weighted Cohen's kappa, 0.733 vs. 0.786; p = 0.003). In both experiments, on a group-level, AI-assisted pathologists outperformed the unassisted pathologists and the standalone AI system. Our results show the potential of AI systems for Gleason grading, but more importantly, show the benefits of pathologist-AI synergy.}, + journal = MODP, + doi = {10.1038/s41379-020-0640-y}, + url = {https://doi.org/10.1038/s41379-020-0640-y}, + file = {:pdf/Bult20a.pdf:PDF}, + optnote = {DIAG}, + ss_id = {06bcda243a29be0892422c60b663f80cff21978a}, + all_ss_ids = {['06bcda243a29be0892422c60b663f80cff21978a']}, + gscites = {87}, +} + +@article{Bult22, + author = {Bulten, Wouter and Kartasalo, Kimmo and Chen, Po-Hsuan Cameron and Strom, Peter and Pinckaers, Hans and Nagpal, Kunal and Cai, Yuannan and Steiner, David F. and van Boven, Hester and Vink, Robert and Hulsbergen-van de Kaa, Christina and van der Laak, Jeroen and Amin, Mahul B. and Evans, Andrew J. and van der Kwast, Theodorus and Allan, Robert and Humphrey, Peter A. and Gronberg, Henrik and Samaratunga, Hemamali and Delahunt, Brett and Tsuzuki, Toyonori and Hakkinen, Tomi and Egevad, Lars and Demkin, Maggie and Dane, Sohier and Tan, Fraser and Valkonen, Masi and Corrado, Greg S. and Peng, Lily and Mermel, Craig H. and Ruusuvuori, Pekka and Litjens, Geert and Eklund, Martin and Brilhante, Americo and Cakir, Asli and Farre, Xavier and Geronatsiou, Katerina and Molinie, Vincent and Pereira, Guilherme and Roy, Paromita and Saile, Gunter and Salles, Paulo G. O. and Schaafsma, Ewout and Tschui, Joelle and Billoch-Lima, Jorge and Pereira, Emiio M. and Zhou, Ming and He, Shujun and Song, Sejun and Sun, Qing and Yoshihara, Hiroshi and Yamaguchi, Taiki and Ono, Kosaku and Shen, Tao and Ji, Jianyi and Roussel, Arnaud and Zhou, Kairong and Chai, Tianrui and Weng, Nina and Grechka, Dmitry and Shugaev, Maxim V. and Kiminya, Raphael and Kovalev, Vassili and Voynov, Dmitry and Malyshev, Valery and Lapo, Elizabeth and Campos, Manuel and Ota, Noriaki and Yamaoka, Shinsuke and Fujimoto, Yusuke and Yoshioka, Kentaro and Juvonen, Joni and Tukiainen, Mikko and Karlsson, Antti and Guo, Rui and Hsieh, Chia-Lun and Zubarev, Igor and Bukhar, Habib S. T. and Li, Wenyuan and Li, Jiayun and Speier, William and Arnold, Corey and Kim, Kyungdoc and Bae, Byeonguk and Kim, Yeong Won and Lee, Hong-Seok and Park, Jeonghyuk and {the PANDA challenge consortium}}, + title = {Artificial intelligence for diagnosis and {Gleason} grading of prostate cancer: the {PANDA} challenge}, + doi = {10.1038/s41591-021-01620-2}, + url = {https://doi.org/10.1038/s41591-021-01620-2}, + abstract = {Artificial intelligence (AI) has shown promise for diagnosing prostate cancer in biopsies. However, results have been limited to individual studies, lacking validation in multinational settings. Competitions have been shown to be accelerators for medical imaging innovations, but their impact is hindered by lack of reproducibility and independent validation. With this in mind, we organized the PANDA challenge--the largest histopathology competition to date, joined by 1,290 developers--to catalyze development of reproducible AI algorithms for Gleason grading using 10,616 digitized prostate biopsies. We validated that a diverse set of submitted algorithms reached pathologist-level performance on independent cross-continental cohorts, fully blinded to the algorithm developers. On United States and European external validation sets, the algorithms achieved agreements of 0.862 (quadratically weighted k, 95% confidence interval (CI), 0.840-0.884) and 0.868 (95% CI, 0.835-0.900) with expert uropathologists. Successful generalization across different patient populations, laboratories and reference standards, achieved by a variety of algorithmic approaches, warrants evaluating AI-based Gleason grading in prospective clinical trials.}, + file = {:pdf/Bult22.pdf:PDF}, + journal = NATMED, + month = jan, + optnote = {DIAG, PATHOLOGY}, + pmid = {35027755}, + year = {2022}, + ss_id = {551a32d6bb196127b75d256e8547b81ef67a7ad3}, + all_ss_ids = {['551a32d6bb196127b75d256e8547b81ef67a7ad3']}, + gscites = {141}, +} + +@phdthesis{Bult22a, + author = {Wouter Bulten}, + title = {Artificial intelligence as a digital fellow in pathology: Human-machine synergy for improved prostate cancer diagnosis}, + year = {2022}, + url = {https://repository.ubn.ru.nl/handle/2066/241550}, + abstract = {The histological grading of prostate biopsies is a crucial element in the diagnostic pathway of prostate cancer. The known high inter- and intraobserver variability show potential and a need for assisting pathologists in this task. Furthermore, a global shortage of pathologists stresses the demand for reproducible, more efficient, and easily accessible diagnostic solutions. This thesis's primary aim was to investigate and design an AI-based system to detect and grade prostate cancer in biopsies. A second aim was to evaluate the potential clinical merits of AI-assisted grading when such systems are embedded in the pathologist's workflow. To this extent, the following objectives were undertaken as part of this thesis: + + 1. The development of an automated system that can distinguish epithelial tissue from other tissue types within H&E stained prostate specimens (Chapter 2); + 2. The development and validation of an automated system for grading prostate biopsies using the Gleason grading system (Chapter 3); + 3. A multi-center independent evaluation of state-of-the-art algorithms for automated Gleason grading sourced through a large-scale medical AI competition(Chapter 4); + 4. The investigation of the potential merits of AI-assisted grading of prostate cancer through an observer study (Chapter 5).}, + copromotor = {G. Litjens and C. Hulbergen-van de Kaa}, + file = {Bult22a.pdf:pdf\\Bult22a.pdf:PDF}, + optnote = {DIAG}, + promotor = {J.A.W.M. van der Laak and B. van Ginneken}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Busc16, + author = {Busch, Robert and Han, MeiLan K and Bowler, Russell P and Dransfield, Mark T and Wells, J Michael and Regan, Elizabeth A and Hersh, Craig P and {COPDGene Investigators}}, + title = {Risk factors for COPD exacerbations in inhaled medication users: the COPDGene study biannual longitudinal follow-up prospective cohort}, + journal = BMCPM, + year = {2016}, + volume = {16}, + pages = {28}, + doi = {10.1186/s12890-016-0191-7}, + url = {http://dx.doi.org/10.1186/s12890-016-0191-7}, + abstract = {Despite inhaled medications that decrease exacerbation risk, some COPD patients experience frequent exacerbations. We determined prospective risk factors for exacerbations among subjects in the COPDGene Study taking inhaled medications.2113 COPD subjects were categorized into four medication use patterns: triple therapy with tiotropium (TIO) plus long-acting beta-agonist/inhaled-corticosteroid (ICS), tiotropium alone, ICS, and short-acting bronchodilators. Self-reported exacerbations were recorded in telephone and web-based longitudinal follow-up surveys. Associations with exacerbations were determined within each medication group using four separate logistic regression models. A head-to-head analysis compared exacerbation risk among subjects using tiotropium vs. ICS.In separate logistic regression models, the presence of gastroesophageal reflux, female gender, and higher scores on the St. George's Respiratory Questionnaire were significant predictors of exacerbator status within multiple medication groups (reflux: OR 1.62-2.75; female gender: OR 1.53 - OR 1.90; SGRQ: OR 1.02-1.03). Subjects taking either ICS or tiotropium had similar baseline characteristics, allowing comparison between these two groups. In the head-to-head comparison, tiotropium users showed a trend towards lower rates of exacerbations (OR=0.69 [95 \% CI 0.45, 1.06], p=0.09) compared with ICS users, especially in subjects without comorbid asthma (OR=0.56 [95\% CI 0.31, 1.00], p=0.05).Each common COPD medication usage group showed unique risk factor patterns associated with increased risk of exacerbations, which may help clinicians identify subjects at risk. Compared to similar subjects using ICS, those taking tiotropium showed a trend towards reduced exacerbation risk, especially in subjects without asthma.ClinicalTrials.gov NCT00608764, first received 1/28/2008.}, + file = {Busc16.pdf:pdf\\Busc16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26861867}, + month = {2}, +} + +@inproceedings{Caba18a, + author = {Caballo, M. and Teuwen, J. and Mann, R. and Sechopolous, I.}, + title = {Breast parenchyma analysis and classification for breast masses detection using texture feature descriptors and neural networks in dedicated breast CT images}, + booktitle = MI, + year = {2019}, + series = SPIE, + doi = {10.1117/12.2511718}, + abstract = {We propose a method to identify breast parenchyma regions containing mass-like abnormalities in dedicated breast CT images using texture feature descriptors. From 53 complete patient breast CT scans (29 of which containing masses), we first isolated the breast parenchyma through automatic segmentation, and we collected a total of 14,751 normal 2-D image patches (negatives), and 525 containing a breast mass (positives). We extracted 141 texture features (10 first-order descriptors, 6 Haralick features, 20 run-length features, 45 structural and pattern descriptors, 60 Gabor features), which we then analyzed through multivariate analysis of variance (MANOVA) and linear discriminant analysis, and which resulted in an area under the curve (AUC) of 0.92. We finally trained and validated a neural network using these data, dividing them into multiple batches, with each batch always containing the whole set of positive cases, and as many different negative examples. To avoid the possible bias due to the high skewness in class proportion, the training was performed on all these batches independently, without re-initializing the network weights after each training. The network was tested using an additional independent 18 patient breast CT scans (8 normal and 10 containing a mass), on a total of 6,635 image patches (213 positives, 6,422 negatives) which were not used during the training/validation phase, and resulted in a precision of 94.8%, a recall of 93.9%, and an AUC of 0.99. Our results suggest that the proposed approach could be further evaluated and expanded for computer-aided detection tasks in breast CT imaging.}, + file = {Caba18a.pdf:pdf\\Caba18a.pdf:PDF}, + optnote = {AXTI, DIAG, RADIOLOGY}, + month = {3}, + ss_id = {26b1aec641be34aa1a7a57362b04bd187ee75aa2}, + all_ss_ids = {['26b1aec641be34aa1a7a57362b04bd187ee75aa2']}, + gscites = {5}, +} + +@article{Caba21, + author = {Caballo, Marco and Hernandez, Andrew M. and Lyu, Su Hyun and Teuwen, Jonas and Mann, Ritse M. and van Ginneken, Bram and Boone, John M. and Sechopoulos, Ioannis}, + title = {Computer-aided diagnosis of masses in breast computed tomography imaging: deep learning model with combined handcrafted and convolutional radiomic features}, + doi = {10.1117/1.jmi.8.2.024501}, + year = {2021}, + abstract = {Purpose: A computer-aided diagnosis (CADx) system for breast masses is proposed, which incorporates both handcrafted and convolutional radiomic features embedded into a single deep learning model. + Approach: The model combines handcrafted and convolutional radiomic signatures into a multi-view architecture, which retrieves three-dimensional (3D) image information by simultaneously processing multiple two-dimensional mass patches extracted along different planes through the 3D mass volume. Each patch is processed by a stream composed of two concatenated parallel branches: a multi-layer perceptron fed with automatically extracted handcrafted radiomic features, and a convolutional neural network, for which discriminant features are learned from the input patches. All streams are then concatenated together into a final architecture, where all network weights are shared and the learning occurs simultaneously for each stream and branch. The CADx system was developed and tested for diagnosis of breast masses (N = 284) using image datasets acquired with independent dedicated breast computed tomography systems from two different institutions. The diagnostic classification performance of the CADx system was compared against other machine and deep learning architectures adopting handcrafted and convolutional approaches, and three board-certified breast radiologists. + Results: On a test set of 82 masses (45 benign, 37 malignant), the proposed CADx system performed better than all other model architectures evaluated, with an increase in the area under the receiver operating characteristics curve (AUC) of 0.05 +- 0.02, and achieving a final AUC of 0.947, outperforming the three radiologists (AUC = 0.814 - 0.902). + Conclusions: In conclusion, the system demonstrated its potential usefulness in breast cancer diagnosis by improving mass malignancy assessment.}, + url = {http://dx.doi.org/10.1117/1.JMI.8.2.024501}, + file = {Caba21.pdf:pdf\Caba21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Medical Imaging}, + citation-count = {4}, + automatic = {yes}, + volume = {8}, +} + +@inproceedings{Call19, + title = {Handling label noise through model confidence and uncertainty: application to chest radiograph classification}, + author = {E. Calli and E. Sogancioglu and E. Th. Scholten and K. Murphy and B. van Ginneken}, + booktitle = MI, + year = {2019}, + series = SPIE, + abstract = {In this work we analyze the eect of label noise in training and test data when performing classication experi- + ments on chest radiographs (CXRs) with modern deep learning architectures. We use ChestXRay14, the largest + publicly available CXR dataset. We simulate situs inversus by horizontal ipping of the CXRs, allowing us to + precisely control the amount of label noise. We also perform experiments in classifying emphysema using the + ChestXRay14 provided labels that are known to be noisy. Our situs inversus experiments conrm results from + the computer vision literature that deep learning architectures are relatively robust but not completely insensi- + tive to label noise in the training data: without or with very low noise, classication results are near perfect; 16% + and 32% training label noise only lead to a 1.5% and 4.6% drop in accuracy. We investigate two metrics that + could be used to identify test samples that have an incorrect label: model condence and model uncertainty. We + show, in an observer study with an experienced chest radiologist, that both measures are eective in identifying + samples in ChestXRay14 that are erroneously labeled for the presence of emphysema.}, + file = {Call19.pdf:pdf\\Call19.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + doi = {10.1117/12.2514290}, + month = {3}, + gsid = {1458347346763809084}, + gscites = {16}, + ss_id = {58662a2da377ad2a9b250b514128988009a2a0b3}, + all_ss_ids = {['58662a2da377ad2a9b250b514128988009a2a0b3']}, +} + +@inproceedings{Call19b, + title = {FRODO: Free rejection of out-of-distribution samples: application to chest x-ray analysis}, + author = {E. Calli and K. Murphy and E. Sogancioglu and B. van Ginneken}, + booktitle = MIDL, + year = {2019}, + url = {https://openreview.net/forum?id=H1e7kWD794}, + abstract = {In this work, we propose a method to reject out-of-distribution samples which can be adapted to any network architecture and requires no additional training data. Publicly available chest x-ray data (38,353 images) is used to train a standard ResNet-50 model to detect emphysema. Feature activations of intermediate layers are used as descriptors defining the training data distribution. A novel metric, FRODO, is measured by using the Mahalanobis distance of a new test sample to the training data distribution. The method is tested using a held-out test dataset of 21,176 chest x-rays (in-distribution) and a set of 14,821 out-of-distribution x-ray images of incorrect orientation or anatomy. In classifying test samples as in or out-of distribution, our method achieves an AUC score of 0.99.}, + file = {:pdf/Call19b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {13448494044754660685}, + gscites = {16}, + ss_id = {45c2090f06bae6402a0b0d4c77c5f5ee3fd7a513}, + all_ss_ids = {['45c2090f06bae6402a0b0d4c77c5f5ee3fd7a513']}, +} + +@article{Call21, + author = {E. Calli and K. Murphy and S. Kurstjens and T. Samson and R. Herpers and H. Smits and M. Rutten and B. van Ginneken}, + title = {Deep learning with robustness to missing data: {A} novel approach to the detection of {COVID}-19}, + doi = {10.1371/journal.pone.0255301}, + number = {7}, + pages = {e0255301}, + url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0255301}, + volume = {16}, + abstract = {In the context of the current global pandemic and the limitations of the RT-PCR test, we propose a novel deep learning architecture, DFCN (Denoising Fully Connected Network). Since medical facilities around the world differ enormously in what laboratory tests or chest imaging may be available, DFCN is designed to be robust to missing input data. An ablation study extensively evaluates the performance benefits of the DFCN as well as its robustness to missing inputs. Data from 1088 patients with confirmed RT-PCR results are obtained from two independent medical facilities. The data includes results from 27 laboratory tests and a chest x-ray scored by a deep learning model. Training and test datasets are taken from different medical facilities. Data is made publicly available. The performance of DFCN in predicting the RT-PCR result is compared with 3 related architectures as well as a Random Forest baseline. All models are trained with varying levels of masked input data to encourage robustness to missing inputs. Missing data is simulated at test time by masking inputs randomly. DFCN outperforms all other models with statistical significance using random subsets of input data with 2-27 available inputs. When all 28 inputs are available DFCN obtains an AUC of 0.924, higher than any other model. Furthermore, with clinically meaningful subsets of parameters consisting of just 6 and 7 inputs respectively, DFCN achieves higher AUCs than any other model, with values of 0.909 and 0.919.}, + file = {:Call21.pdf:PDF}, + journal = PLOSONE, + publisher = {Public Library of Science}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/238626}, + ss_id = {58f03af16be931f9a56c8499ed30efbb9b9c3592}, + all_ss_ids = {['58f03af16be931f9a56c8499ed30efbb9b9c3592']}, + gscites = {2}, +} + +@article{Call21a, + author = {E. Calli and E. Sogancioglu and B. van Ginneken and K. van Leeuwen and K. Murphy}, + title = {Deep learning for chest {X}-ray analysis: {A} survey}, + doi = {10.1016/j.media.2021.102125}, + pages = {102125}, + volume = {72}, + abstract = {Recent advances in deep learning have led to a promising performance in many medical image analysis tasks. As the most commonly performed radiological exam, chest radiographs are a particularly important modality for which a variety of applications have been researched. The release of multiple, large, publicly available chest X-ray datasets in recent years has encouraged research interest and boosted the number of publications. In this paper, we review all studies using deep learning on chest radiographs published before March 2021, categorizing works by task: image-level prediction (classification and regression), segmentation, localization, image generation and domain adaptation. Detailed descriptions of all publicly available datasets are included and commercial systems in the field are described. A comprehensive discussion of the current state of the art is provided, including caveats on the use of public datasets, the requirements of clinically useful systems and gaps in the current literature.}, + file = {:Call21a.html:URL}, + journal = MIA, + pmid = {34171622}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/235735}, + ss_id = {6cc01a49101001e1a490645e941b3d2311dd4265}, + all_ss_ids = {['6cc01a49101001e1a490645e941b3d2311dd4265']}, + gscites = {168}, +} + +@article{Call22, + author = {Calli, Erdi and Murphy, Keelin and Scholten, Ernst T. and Schalekamp, Steven and van Ginneken, Bram}, + title = {Explainable emphysema detection on chest radiographs with deep learning}, + doi = {10.1371/journal.pone.0267539}, + number = {7}, + pages = {e0267539}, + volume = {17}, + abstract = {We propose a deep learning system to automatically detect four explainable emphysema signs on frontal and lateral chest radiographs. Frontal and lateral chest radiographs from 3000 studies were retrospectively collected. Two radiologists annotated these with 4 radiological signs of pulmonary emphysema identified from the literature. A patient with >=2 of these signs present is considered emphysema positive. Using separate deep learning systems for frontal and lateral images we predict the presence of each of the four visual signs and use these to determine emphysema positivity. The ROC and AUC results on a set of 422 held-out cases, labeled by both radiologists, are reported. Comparison with a black-box model which predicts emphysema without the use of explainable visual features is made on the annotations from both radiologists, as well as the subset that they agreed on. DeLong's test is used to compare with the black-box model ROC and McNemar's test to compare with radiologist performance. In 422 test cases, emphysema positivity was predicted with AUCs of 0.924 and 0.946 using the reference standard from each radiologist separately. Setting model sensitivity equivalent to that of the second radiologist, our model has a comparable specificity (p = 0.880 and p = 0.143 for each radiologist respectively). Our method is comparable with the black-box model with AUCs of 0.915 (p = 0.407) and 0.935 (p = 0.291), respectively. On the 370 cases where both radiologists agreed (53 positives), our model achieves an AUC of 0.981, again comparable to the black-box model AUC of 0.972 (p = 0.289). Our proposed method can predict emphysema positivity on chest radiographs as well as a radiologist or a comparable black-box method. It additionally produces labels for four visual signs to ensure the explainability of the result. The dataset is publicly available at https://doi.org/10.5281/zenodo.6373392.}, + file = {PubMed entry:http\://www.ncbi.nlm.nih.gov/pubmed/35900979:text/html}, + journal = PLOSONE, + pmid = {35900979}, + year = {2022}, + ss_id = {e6376c22784f8360aa3d054fbfecb113265c032a}, + all_ss_ids = {['e6376c22784f8360aa3d054fbfecb113265c032a']}, + gscites = {1}, +} + +@article{Call22a, + author = {Calli, Erdi and Van Ginneken, Bram and Sogancioglu, Ecem and Murphy, Keelin}, + title = {FRODO: An in-depth analysis of a system to reject outlier samples from a trained neural network.}, + doi = {10.1109/TMI.2022.3221898}, + volume = {PP}, + abstract = {An important limitation of state-of-the-art deep learning networks is that they do not recognize when their input is dissimilar to the data on which they were trained and proceed to produce outputs that will be unreliable or nonsensical. In this work, we describe FRODO (Free Rejection of Out-of-Distribution), a publicly available method that can be easily employed for any trained network to detect input data from a different distribution than is expected. FRODO uses the statistical distribution of intermediate layer outputs to define the expected in-distribution (ID) input image properties. New samples are judged based on the Mahalanobis distance (MD) of their layer outputs from the defined distribution. The method can be applied to any network, and we demonstrate the performance of FRODO in correctly rejecting OOD samples on three distinct architectures for classification, localization, and segmentation tasks in chest X-rays. A dataset of 21,576 X-ray images with 3,655 in-distribution samples is defined for testing. The remaining images are divided into four OOD categories of varying levels of difficulty, and performance at rejecting each type is evaluated using receiver operating characteristic (ROC) analysis. FRODO achieves areas under the ROC (AUC) of between 0.815 and 0.999 in distinguishing OOD samples of different types. This is shown to be comparable with the best-performing state-of-the-art method tested, with the substantial advantage that FRODO integrates seamlessly with any network and requires no extra model to be constructed and trained.}, + file = {Call22a.pdf:pdf\\Call22a.pdf:PDF}, + journal = {IEEE transactions on medical imaging}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36374875}, + year = {2022}, + ss_id = {5cf064abf52e73426c32a6c4725f0de26e0c1c5a}, + all_ss_ids = {['5cf064abf52e73426c32a6c4725f0de26e0c1c5a']}, + gscites = {4}, +} + +@phdthesis{Call23, + author = {Erdi \c{C}all\i}, + title = {Deep learning methods towards clinically applicable Chest X-ray interpretation systems}, + url = {https://repository.ubn.ru.nl/handle/2066/292983}, + abstract = {Chest X-Ray is a very commonly acquired, low-cost imaging examination which is sensitive for multiple pathologies while exposing the patient to very little ionizing radiation. The number of medical imaging examinations being acquired increases year-on-year while there is a global shortage of radiologists qualified to interpret the images. This research presents findings on the use of deep learning for interpretation of chest X-Ray images. An in-depth review of the current state-of-the-art is provided as well as investigation of handling of label noise, outlier detection, explainable identification of emphysema and detection of COVID-19 with robustness to missing data.}, + copromotor = {K. Murphy}, + file = {:pdf/Calli23.pdf:PDF}, + journal = {PhD thesis}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken}, + school = {Radboud University, Nijmegen}, + year = {2023}, +} + +@book{Cana23, + author = {Canalini, Luca and Klein, Jan and Gerken, Annika and Heldmann, Stefan and Hering, Alessa and Hahn, Horst K.}, + title = {Iterative Method to Register Longitudinal MRI Acquisitions in Neurosurgical Context}, + doi = {10.1007/978-3-031-33842-7_23}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-031-33842-7_23}, + file = {Cana23.pdf:pdf\Cana23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries}, + citation-count = {0}, + automatic = {yes}, + pages = {262-272}, +} + +@article{Carr11, + author = {Carrillo, Xavier and Fernandez-Nofrerias, Eduard and Ciompi, Francesco and Rodriguez-Leor, Oriol and Radeva, Petia and Salvatella, Neus and Pujol, Oriol and Mauri, Josepa and Bayes-Genis, Antoni}, + title = {Changes in radial artery volume assessed using intravascular ultrasound: a comparison of two vasodilator regimens in transradial coronary interventions}, + journal = JINVC, + year = {2011}, + volume = {23}, + pages = {401--404}, + pmid = {21972156}, + url = {http://europepmc.org/abstract/MED/21972156}, + abstract = {OBJECTIVES: This study used intravascular ultrasound (IVUS) to evaluate radial artery volume changes after intraarterial administration of nitroglycerin and/or verapamil. BACKGROUND: Radial artery spasm, which is associated with radial artery size, is the main limitation of the transradial approach in percutaneous coronary interventions (PCI). METHODS: This prospective, randomized study compared the effect of two intra-arterial vasodilator regimens on radial artery volume: 0.2 mg of nitroglycerin plus 2.5 mg of verapamil (Group 1; n = 15) versus 2.5 mg of verapamil alone (Group 2; n = 15). Radial artery lumen volume was assessed using IVUS at two time points: at baseline (5 minutes after sheath insertion) and post-vasodilator (1 minute after drug administration). The luminal volume of the radial artery was computed using ECOC Random Fields (ECOC-RF), a technique used for automatic segmentation of luminal borders in longitudinal cut images from IVUS sequences. RESULTS: There was a significant increase in arterial lumen volume in both groups, with an increase from 451 A,A+- 177 mmA,A3 to 508 A,A+- 192 mmA,A3 (p = 0.001) in Group 1 and from 456 A,A+- 188 mmA,A3 to 509 A,A+- 170 mmA,A3 (p = 0.001) in Group 2. There were no significant differences between the groups in terms of absolute volume increase (58 mmA,A3 versus 53 mmA,A3, respectively; p = 0.65) or in relative volume increase (14% versus 20%, respectively; p = 0.69). CONCLUSIONS: Administration of nitroglycerin plus verapamil or verapamil alone to the radial artery resulted in similar increases in arterial lumen volume according to ECOC-RF IVUS measurements.}, + file = {Carr11.pdf:pdf\\Carr11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + gsid = {7010776494810916960,7948323122588209355}, + gscites = {20}, +} + +@article{Cast17, + author = {Castells-Nobau, Anna and Nijhof, Bonnie and Eidhof, Ilse and Wolf, Louis and Scheffer-de Gooyert, Jolanda M. and Monedero, Ignacio and Torroja, Laura and van der Laak, Jeroen A. W. M. and Schenck, Annette}, + title = {Two Algorithms for High-throughput and Multi-parametric Quantification of Drosophila Neuromuscular Junction Morphology}, + doi = {10.3791/55395 (2017).}, + issue = {e55395}, + pages = {1-13}, + url = {https://www.jove.com/video/55395}, + volume = {123}, + abstract = {Synaptic morphology is tightly related to synaptic efficacy, and in many cases morphological synapse defects ultimately lead to synaptic malfunction. The Drosophila larval neuromuscular junction (NMJ), a well-established model for glutamatergic synapses, has been extensively studied for decades. Identification of mutations causing NMJ morphological defects revealed a repertoire of genes that regulate synapse development and function. Many of these were identified in large-scale studies that focused on qualitative approaches to detect morphological abnormalities of the Drosophila NMJ. A drawback of qualitative analyses is that many subtle players contributing to NMJ morphology likely remain unnoticed. Whereas quantitative analyses are required to detect the subtler morphological differences, such analyses are not yet commonly performed because they are laborious. This protocol describes in detail two image analysis algorithms "Drosophila NMJ Morphometrics" and "Drosophila NMJ Bouton Morphometrics", available as Fiji-compatible macros, for quantitative, accurate and objective morphometric analysis of the Drosophila NMJ. This methodology is developed to analyze NMJ terminals immunolabeled with the commonly used markers Dlg-1 and Brp. Additionally, its wider application to other markers such as Hrp, Csp and Syt is presented in this protocol. The macros are able to assess nine morphological NMJ features: NMJ area, NMJ perimeter, number of boutons, NMJ length, NMJ longest branch length, number of islands, number of branches, number of branching points and number of active zones in the NMJ terminal.}, + file = {Cast17.pdf:pdf\\Cast17.pdf:PDF}, + journal = JOVE, + pmid = {28518121}, + month = {5}, + optnote = {DIAG}, + year = {2017}, + gsid = {17547073303920483171}, + gscites = {10}, + ss_id = {d20888ebb2f710492747031f387c4efce4f2c2dd}, + all_ss_ids = {['d20888ebb2f710492747031f387c4efce4f2c2dd']}, +} + +@article{Cele20, + author = {Csilla Celeng and Richard A.P. Takx and Nikolas Lessmann and P{\'{a}}l Maurovich-Horvat and Tim Leiner and Ivana I{\v{s}}gum and Pim A. de Jong}, + title = {The association between marital status, coronary computed tomography imaging biomarkers, and mortality in a lung cancer screening population}, + journal = JTI, + year = {2020}, + volume = {35}, + pages = {204--209}, + doi = {10.1097/rti.0000000000000457}, + pmid = {31651690}, + optnote = {DIAG, RADIOLOGY}, + file = {Cele20.pdf:pdf\\Cele20.pdf:PDF}, + ss_id = {b8c7e6fcff579b9332541d2cef25b6960906096e}, + all_ss_ids = {['b8c7e6fcff579b9332541d2cef25b6960906096e']}, + gscites = {8}, +} + +@inproceedings{Char13, + author = {Charbonnier, JP. and Smit, E. J. and Viergever, M. A. and Velthuis, B. K. and Vos, P. C.}, + title = {Computer-aided diagnosis of acute ischemic stroke based on cerebral hypoperfusion using 4{D} {CT} angiography}, + booktitle = MI, + year = {2013}, + volume = {8670}, + series = SPIE, + doi = {10.1117/12.2007735}, + file = {:pdf\\Char13.pdf:PDF}, + abstract = {The presence of collateral blood flow is found to be a strong predictor of patient outcome after acute ischemic stroke. Collateral blood flow is defined as an alternative way to provide oxygenated blood to ischemic cerebral tissue. Assessment of collateral blood supply is currently performed by visual inspection of a Computed Tomography Angiogram (CTA) which introduces inter-observer variability and depends on the grading scale. Furthermore, variations in the arterial contrast arrival time may lead to underestimation of collateral blood supply in a CTA which exerts a negative influence on the prediction of patient outcome. In this study, the feasibility of a Computer-aided Diagnosis system is investigated capable of objectively predicting patient outcome. We present a novel automatic method for quantitative assessment of cerebral hypoperfusion in timing-invariant (i.e. delay insensitive) CTA (TI-CTA). The proposed Vessel Density Symmetry algorithm automatically generates descriptive maps based on hemispheric asymmetry of blood vessels. Intensity and symmetry based features are extracted from these descriptive maps and subjected to a best-first-search feature selection. Linear Discriminant Analysis is performed to combine selected features into a likelihood of good patient outcome. Receiver operating characteristic (ROC) analysis is conducted to evaluate the diagnostic performance of the CAD by leave-one- patient-out cross validation. A Positive Predicting Value of 1 was obtained at a sensitivity of 25% with an area under the ROC-curve of 0.86. The results show that the CAD is feasible to objectively predict patient outcome. The presented CAD could make an important contribution to acute ischemic stroke diagnosis and treatment.}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, +} + +@article{Char15, + author = {Charbonnier, JP. and Brink, M. and Ciompi, F. and Scholten, E. and Schaefer-Prokop, C. and {van Rikxoort}, E.}, + title = {Automatic Pulmonary Artery-Vein Separation and Classification in Computed Tomography Using Tree Partitioning and Peripheral Vessel Matching}, + journal = TMI, + year = {2016}, + month = {11}, + pages = {882-892}, + doi = {10.1109/TMI.2015.2500279}, + url = {http://dx.doi.org/10.1109/TMI.2015.2500279}, + abstract = {We present a method for automatic separation and classification of pulmonary arteries and veins in computed tomography. Our method takes advantage of local information to separate segmented vessels, and global information to perform the artery-vein classification. Given a vessel segmentation, a geometric graph is constructed that represents both the topology and the spatial distribution of the vessels. All nodes in the geometric graph where arteries and veins are potentially merged are identified based on graph pruning and individual branching patterns. At the identified nodes, the graph is split into subgraphs that each contain only arteries or veins. Based on the anatomical information that arteries and veins approach a common alveolar sag, an arterial subgraph is expected to be intertwined with a venous subgraph in the periphery of the lung. This relationship is quantified using periphery matching and is used to group subgraphs of the same artery-vein class. Artery-vein classification is performed on these grouped subgraphs based on the volumetric difference between arteries and veins. A quantitative evaluation was performed on 55 publicly available non-contrast {CT} scans. In all scans, two observers manually annotated randomly selected vessels as artery or vein. Our method was able to separate and classify arteries and veins with a median accuracy of 89\%, closely approximating the inter-observer agreement. All {CT} scans used in this study, including all results of our system and all manual annotations, are publicly available at http://arteryvein.grand-challenge.org.}, + file = {Char15.pdf:pdf\\Char15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26584489}, + gsid = {7303540606188063974}, + gscites = {51}, + ss_id = {4db42d98f611269628eb8ddc4171c4a5f412506a}, + all_ss_ids = {['4db42d98f611269628eb8ddc4171c4a5f412506a']}, +} + +@conference{Char15a, + author = {Charbonnier, JP. and Hogeweg, L. and Kuhnigk, J. and Lynch, D. A. and {Van Rikxoort}, E. M.}, + title = {Sensitivity of airway wall thickness measurements: influence of small airways}, + booktitle = RSNA, + year = {2015}, + url = {http://rsna2015.rsna.org/program/}, + abstract = {PURPOSE + Changes in the morphology of the airways contributes to lung function impairment in chronic obstructive pulmonary disease (COPD). Measurements of airway morphology might be influenced by the quality of the airway segmentation. In this study we investigate the stability of a commonly used airway measurement (Pi10) from CT scans for varying segmentation depths of the airways. + METHOD AND MATERIALS + Inspiratory low-dose thoracic CT scans of 267 subjects, well distributed over GOLD stages, were selected for this study. Airways were automatically extracted by a state-of-the-art segmentation method and manually corrected to ensure a leakage free segmentation. Airway wall thickness quantification was performed in orthogonal cross-sections every 1mm throughout the entire airway tree using an intensity-integration technique which accounts for partial volume effects. Using regression on all cross-sectional measurements, airway morphology was expressed as the square root of wall area at airways with a perimeter of 10mm (Pi10). To determine the sensitivity of the Pi10 measurement to the length of the segmented airway tree, sensitivity analysis was performed on Pi10 by leaving-out wall measurements of the smallest airways and recalculating the Pi10. For each subject, Pi10 regression analysis was repeated excluding airways with a lumen perimeter below 6mm, 8mm or 10mm. The recalculated Pi10 measurements were compared to the baseline Pi10. + RESULTS + The segmented airway trees consisted for 55% of airways with lumen diameters below 10mm, 19% below 8mm, and 1% below 6mm.The average baseline Pi10 of all subjects was 2.43 +/- 0.56 (range [1.40, 4.36]), which corresponds to an average airway wall thickness (for an airway with a lumen perimeter of 10mm) of 0.52mm +/- 0.21mm. By excluding airways with a lumen perimeter below 6, 8 or 10mm from the regression analysis, absolute changes in Pi10 were 0.003 +/- 0.004 (0.11%), 0.035 +/- 0.023 (1.46%), and 0.107 +/- 0.087 (4.6%), respectively, corresponding to changes in airway wall thickness (at 10mm lumen perimeter) of 0.001, 0.013, and 0.039mm. + CONCLUSION + The commonly used Pi10 measurement to express airway morphology from a CT scan is insensitive to the exclusion of smaller airways in the computation. + CLINICAL RELEVANCE/APPLICATION + When expressing airway morhplogy as Pi10, there is no need to (manually) adjust automatic airway segmentation methods to include smaller airways in order to obtain an accurate Pi10 measurement.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Char15b, + author = {Charbonnier, JP. and Brink, M. and Ciompi, F. and Scholten, E. T. and Schaefer-Prokop, C. M. and {Van Rikxoort}, E. M.}, + title = {Automatic Separation and Classification of Arteries and Veins in Non-Contrast Thoracic {CT} Scans}, + booktitle = RSNA, + year = {2015}, + url = {http://rsna2015.rsna.org/program/}, + abstract = {PURPOSE + Automated classification of pulmonary arteries and veins in thoracic CT scans is an unsolved problem which is important for e.g. CAD of pulmonary embolisms and treatment planning. This study presents and validates a new anatomy-based method to automatically classify arteries and veins in non-contrast chest CT scans. + METHOD AND MATERIALS + A set of 55 full inspiration non-contrast low dose chest CT scans (16x0.75mm, 120-140kVp, 30mAs) with variable severity of emphysema and interstitial lung diseases, were taken from a lung cancer screening trial. In all state-of-the-art vessel segmentation algorithms, arteries and veins are attached at locations where they cross, since these algorithms are not designed to distinguish between bifurcating and crossing vessels. This method starts with automatic vessel segmentation, followed by pruning the vessel segmentation to detect locations that are inconsistent with the topology of a tree structure. By disconnecting the vessels at these locations, the vessel segmentation is separated into subtrees that fulfill a tree structure and are assumed to be of an arterial or venous label. Next, subtrees are grouped using anatomical knowledge that arterial and venous capillaries meet each other at the alveoli, which implies that the corresponding peripheral arteries and veins go towards similar regions. By analyzing the peripheral vessels in each subtree, subtrees of the same artery-vein label are grouped without knowing the actual label. To extract the final artery-vein labels of the grouped subtrees, classification is performed using the fact that veins have an overall larger volume compared to arteries. For quantitative evaluation, two human observers manually labeled a total of 2750 randomly selected arteries and veins from all 55 scans. The accuracy and Cohen's kappa between the observers and between the method and observers were used for evaluation. + RESULTS + Inter-observer Cohen's kappa was 0.84 with 93% accuracy. The proposed method achieved a mean accuracy of 88% and a Cohen's kappa of 0.76. + CONCLUSION + A new concept for artery-vein separation and classification was presented that uses anatomical information from peripheral arteries and veins. The performance of the presented method closely approximated the inter-observer agreement. + CLINICAL RELEVANCE/APPLICATION + Automatic artery-vein classification is essential for investigating pulmonary hypertension, COPD and for improving CAD systems for pulmonary embolisms.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Char16, + author = {Charbonnier, JP. and Lynch, D. and Humphries, S. and Strand, M. and {van Rikxoort}, E.}, + title = {Relative Contributions of Quantitative CT Measures to Airflow Obstruction in Cigarette Smokers}, + booktitle = ATS, + year = {2016}, + abstract = {{RATIONALE:} + To evaluate the relative contributions of quantitative {CT} ({QCT}) measures of emphysema, air trapping, and airway wall thickening and narrowing, to airflow obstruction in cigarette smokers with and without chronic obstructive lung disease ({COPD}). + {METHOD:} + 2000 cigarette smokers participating in the {COPDG}ene study were evaluated, 818 subjects were excluded because of missing {QCT}. Thirona Lung Quantification software was used to extract {QCT} measures from inspiratory and expiratory {CT} scans for each subject, including emphysema (%{LAA}-950, defined as the percentage of low attenuation areas ({LAA}) below -950{HU} in inspiratory scans), gas trapping (%{LAA}-856, defined as the percentage of {LAA} below -856{HU} in expiration), and an index score for airway wall thickening and/or narrowing (Pi10, defined as the root of the wall area of a hypothetical airway of 10-mm internal perimeter). The evaluated spirometry measures included the ratio of forced expiratory volume in 1 second ({FEV}1) and forced vital capacity ({FVC}), and the predicted percentage of {FEV}1 ({FEV}1%-predicted). + {QCT} measures were correlated to {FEV}1/{FVC} and {FEV}1%-predicted using Pearson correlation. In addition, multiple linear regression analysis was used to evaluate the predicted value of the {QCT} measures on both {FEV}1/{FVC} and {FEV}1%-predicted. For these models, the spirometry measures were log10-transformed to ensure a distribution of residuals closer to normal. + {RESULTS:} + The 1183 subjects were divided over {GOLD} stagesChar16 0 to 4:478, 100, 279, 143 and 47. 136 subjects were unclassified by {GOLD}. %{LAA}-950, %{LAA}-856, and Pi10 correlated significantly with both {FEV}1/{FVC} (p<0.0001, r= -0.758, r=-0.829, and r=-0.423, respectively) and {FEV}1%-predicted (p<0.0001, r= -0.628, r=-0.728, and r=-0.547, respectively). In the regression model for {FEV}1/{FVC}, the combination of the three {QCT} measures accounted for 74.5% of the variation in {FEV}1/{FVC}, with a relative contribution of 68.7% for %{LAA}-856, 3.1% for Pi10, and 2.6% for %{LAA}-950. In the regression model for {FEV}1%-predicted, the combination of the three {QCT} measures accounted for 65.8% of the variation in {FEV}1%-predicted, with a relative contribution of 52.9% for %{LAA}-856, 11.6% for Pi10, and 1.3% for %{LAA}-950. + {CONCLUSION:} + Gas trapping and airway wall thickening and/or narrowing are the major contributors to airflow obstruction in cigarette smokers.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Char16a, + author = {Charbonnier, JP. and Pompe, E. and Moore, C. and Humphries, S. and {van Ginneken}, B. and Lynch, D. and Make, B. and {van Rikxoort}, E.}, + title = {Airway Dimensions in Current and Former smokers: an independent predictor of Airflow Obstruction and Respiratory Quality of Life in Cigarette Smokers}, + booktitle = RSNA, + year = {2016}, + abstract = {PURPOSE: We investigated the relationship between airway dimensions and airflow obstruction and respiratory quality of life in current and former cigarette smokers. METHOD AND MATERIALS: + Cigarette smokers were studied that enrolled in the COPDGene study. Spirometry assessment included forced expiratory volume in 1 sec (FEV1), forced vital capacity (FVC), % predicted FEV1 (FEV1%-p), % predicted FVC (FVC%-p), and peak expiratory flow (PEF). Respiratory quality of life was assessed by the St George's Respiratory Questionnaire (SGRQ) score and 6 Minute Walking Distance (SMWD). Inspiratory CT was available to extract the airways, the amount of emphysema, and the total lung capacity (TLC). Lumen perimeters and airway wall areas were automatically extracted perpendicular to the airways. Linear regression was performed on these measurements to calculate an index score of airway wall thickness, expressed as the square root of wall area at airways with a perimeter of 10mm (Pi10). Emphysema was defined as the percentage of low-attenuation area below -950 HU (LAA%-950). Multiple linear regression was used to determine the predictive value of Pi10 and smoking status on airflow obstruction and respiratory quality of life. An interaction was included in the model to investigate if the effect of Pi10 differed by smoking status. All models were adjusted for age, gender, body mass index, pack years, bronchodilator responsiveness, TLC, and LAA%-950. RESULTS: 1544 cigarette smokers (894 former smokers) were included, with a mean age of 60.7 A-A?A 1/2 8.9 years and a mean Pi10 of 2.23 A-A?A 1/2 0.57mm. Pi10 was significantly associated with all airflow obstruction and respiratory quality of life measures (all p<0.001). The interaction between Pi10 and smoking status was significant for all measures except FVC%-p (p=0.30) and SGRQ score (p=0.064). This indicates that the effect of Pi10 on FEV1%-p, PEF, FEV1/FVC and SMWD was significantly reduced in current smokers compared to former smokers. CONCLUSION: Pi10 independently contributes to airflow obstruction and respiratory quality of life. This effect is stronger in former smokers as compared to current smokers. CLNICAL RELEVANCE/APPLICATION: Pi10 is an independent marker for airflow obstruction and respiratory quality of life and may be more strongly associated with these outcomes in former smokers than current smokers.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Char16b, + author = {Charbonnier, JP. and Han, M. and Pompe, E. and Moore, C. and Humphries, S. and Lynch, D. and {van Ginneken} B. and Make, B. and {van Rikxoort} E.}, + title = {CT-based models for prediction of chronic obstructed pulmonary disease and smoking-related morbidity in cigarette smokers}, + booktitle = RSNA, + year = {2016}, + abstract = {PURPOSE: To predict COPD and smoking-related morbidity in cigarette smokers using quantitative CT (QCT) measures. METHOD AND MATERIALS: 1544 subjects were included from the COPDGene study. COPD was defined by a ratio of forced expiratory volume in 1 sec. (FEV1) and forced vital capacity (FVC) < 0.7. Smoking-related morbidity was defined as FEV1/FVC < 0.70 with either a St George's Respiratory Questionnaire score >= 25 or an exacerbation frequency >= 2/year. On inspiratory CT, multiple cross-sectional lumen perimeters and airway wall areas were extracted from the airways. Using linear regression, airway wall thickness was defined as the square root of wall area of an airway with a perimeter of 10mm (Pi10). Total lung capacity (TLC) and emphysema were measured on inspiratory CT, where emphysema was defined as the % of low-attenuation areas (LAA%) < -950HU (LAA%-950). Air-trapping was defined on expiratory CT as LAA% < -856HU (LAA%-856). Six logistic regression models were fitted for both the prediction of COPD and smoking-related morbidity using a random subset of 761 subjects. Model 1 included only age, gender, BMI, pack years, smoking status, and TLC, while models 2 to 6 additionally included: LAA%-950 (model 2), LAA%-856 (model 3), Pi10 (model 4), LAA%-950 + Pi10 (model 5), and LAA%-950 + LAA%-856 + Pi10 (model 6). The models were validated on a separate set (810 subjects) using the area under the receiver operating curve (AUC). RESULTS: + The validation set consisted of 369 subjects with and 441 without COPD. QCT measures were independent predictors of COPD in all models (p<0.001), with AUC values for models 1 to 6 of 0.77, 0.85, 0.90, 0.87, 0.91, and 0.93, respectively. The validation set consisted of 216 subject with and 594 without smoking-related morbidity. QCT measures were independent predictors of smoking-related morbidity in all models (p<0.001, except for LAA%-950 in model 5), with AUC values for models 1 to 6 of 0.72, 0.83, 0.87, 0.83, 0.88, and 0.89, respectively. CONCLUSION: LAA%-950, LAA%-856, and Pi10 are independent predictors of COPD and smoking-related morbidity. The model including only inspiratory QCT predictors has similar predictive value to the model that also includes expiratory air-trapping. CLNICAL RELEVANCE/APPLICATION: Since LAA%-950 and Pi10 can be readily extracted from inspiratory images, these measures may be useful to predict smoking related morbidity in lung cancer screening.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Char16c, + author = {Charbonnier, J.P. and {van Rikxoort}, E.M. and Setio, A.A.A. and Schaefer-Prokop, C. and {van Ginneken}, B. and Ciompi, F.}, + title = {Improving Airway Segmentation in Computed Tomography using Leak Detection with Convolutional Networks}, + journal = MIA, + year = {2017}, + volume = {36}, + pages = {52-60}, + doi = {10.1016/j.media.2016.11.001}, + url = {http://dx.doi.org/10.1016/j.media.2016.11.001}, + abstract = {We propose a novel method to improve airway segmentation in thoracic computed tomography (CT) by detecting and removing leaks. Leak detection is formulated as a classification problem, in which a convolutional network (ConvNet) is trained in a supervised fashion to perform the classification task. In order to increase the segmented airway tree length, we take advantage of the fact that multiple segmentations can be extracted from a given airway segmentation algorithm by varying the parameters that influence the tree length and the amount of leaks. We propose a strategy in which the combination of these segmentations after removing leaks can increase the airway tree length while limiting the amount of leaks. This strategy therefore largely circumvents the need for parameter fine-tuning of a given airway segmentation algorithm. + + The ConvNet was trained and evaluated using a subset of inspiratory thoracic CT scans taken from the COPDGene study. Our method was validated on a separate independent set of the EXACT?09 challenge. We show that our method significantly improves the quality of a given leaky airway segmentation, achieving a higher sensitivity at a low false-positive rate compared to all the state-of-the-art methods that entered in EXACT09, and approaching the performance of the combination of all of them.}, + file = {Char16c.pdf:pdf\\Char16c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27842236}, + publisher = {Elsevier {BV}}, + month = {2}, + gsid = {10119306392018768738}, + gscites = {83}, + ss_id = {d884880febbc8e6c5f86c1040c009174632569c4}, + all_ss_ids = {['d884880febbc8e6c5f86c1040c009174632569c4']}, +} + +@phdthesis{Char17, + author = {J. P. Charbonnier}, + title = {Segmentation \& quantification of airways and blood vessels in chest CT}, + year = {2017}, + url = {https://repository.ubn.ru.nl/bitstream/handle/2066/178922/178922.pdf?sequence=1}, + abstract = {This thesis describes several methods for automated CT analysis of pulmonary arteries, pulmonary veins, and airways.}, + copromotor = {E. M. van Rikxoort and F. Ciompi}, + file = {Char17.pdf:pdf/Char17.pdf:PDF}, + optnote = {DIAG}, + promotor = {B. van Ginneken and C. M. Schaefer-Prokop}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Char18, + author = {Charbonnier, Jean-Paul and Chung, Kaman and Scholten, Ernst T and van Rikxoort, Eva M and Jacobs, Colin and Sverzellati, Nicola and Silva, Mario and Pastorino, Ugo and van Ginneken, Bram and Ciompi, Francesco}, + title = {Automatic segmentation of the solid core and enclosed vessels in subsolid pulmonary nodules}, + journal = NATSCIREP, + year = {2018}, + volume = {8}, + issue = {1}, + month = {1}, + pages = {646}, + doi = {10.1038/s41598-017-19101-3}, + abstract = {Subsolid pulmonary nodules are commonly encountered in lung cancer screening and clinical routine. Compared to other nodule types, subsolid nodules are associated with a higher malignancy probability for which the size and mass of the nodule and solid core are important indicators. However, reliably measuring these characteristics on computed tomography (CT) can be hampered by the presence of vessels encompassed by the nodule, since vessels have similar CT attenuation as solid cores. This can affect treatment decisions and patient management. We present a method based on voxel classification to automatically identify vessels and solid cores in given subsolid nodules on CT. Three experts validated our method on 170 screen-detected subsolid nodules from the Multicentric Italian Lung Disease trial. The agreement between the proposed method and the observers was substantial for vessel detection and moderate for solid core detection, which was similar to the inter-observer agreement. We found a relatively high variability in the inter-observer agreement and low method-observer agreements for delineating the borders of vessels and solid cores, illustrating the difficulty of this task. However, 92.4% of the proposed vessel and 80.6% of the proposed solid core segmentations were labeled as usable in clinical practice by the majority of experts.}, + file = {Char18.pdf:pdf\\Char18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29330380}, + gsid = {8466773781330734644}, + gscites = {16}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/183711}, + ss_id = {3ed451d2830a2afcbbd254f0015eeafa796ba1b7}, + all_ss_ids = {['3ed451d2830a2afcbbd254f0015eeafa796ba1b7']}, +} + +@article{Char19, + author = {Jean-Paul Charbonnier and Esther Pompe and Camille Moore and Stephen Humphries and Bram van Ginneken and Barry Make and Elizabeth Regan and James D. Crapo and Eva M. van Rikxoort and David A. Lynch}, + title = {Airway wall thickening on {CT}: Relation to smoking status and severity of {COPD}}, + journal = RM, + year = {2019}, + volume = {146}, + pages = {36-41}, + doi = {10.1016/j.rmed.2018.11.014}, + abstract = {Airway wall thickening in cigarette smokers is thought to be a result of inflammatory changes and airway remodeling. This study investigates if CT-derived airway wall thickening associates to disease severity in smokers with and without COPD and if airway wall thickening is reversible by smoking cessation. We examined 2000 smokers and 46 never-smokers who returned for a 5-year follow-up visit in the COPDGene-study. Multivariable regression analyses were performed at visit 1 to associate airway wall thickness (expressed as Pi10) with percent predicted forced expiratory volume in 1 s (FEV1%-predicted), 6-min walking distance (6MWD), and St. George Respiratory Questionnaire (SGRQ). Longitudinal analyses were performed to assess the effect of smoking cessation on Pi10 using linear mixed models. A higher Pi10 was significantly associated with worse FEV1%-predicted, 6MWD, and SGRQ in all GOLD-stages. Longitudinal analyses showed that subjects that quit smoking significantly decreased in Pi10 (DPi10 = -0.18 mm, p < 0.001). Subjects that started smoking had a significant increase in Pi10 (DPi10 = 0.14 mm, p < 0.001). Pi10 is a clinically relevant biomarker of smoking-related airway injury in smokers with and without COPD. The change in Pi10 with change in smoking status suggests that it can quantify a reversible component of smoking-related airway inflammation.}, + file = {Char19.pdf:pdf\\Char19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30665516}, + month = {1}, + gsid = {2044101552450383649}, + gscites = {48}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/201027}, + ss_id = {4c1db92618a758e92b0964e7ed65ea7ec48af0f0}, + all_ss_ids = {['4c1db92618a758e92b0964e7ed65ea7ec48af0f0']}, +} + +@article{Chav93, + author = {Chavan, A. and Galanski, M. and Jandeleit, K. and Prokop, M. and Schaefer, C.}, + title = {The kissing balloons technique. Simultaneous dilatation of stenoses of branch arteries at the bifurcation of the renal artery}, + journal = ACTR, + year = {1993}, + volume = {34}, + pages = {486--488}, + doi = {10.1177/028418519303400512}, + abstract = {Whereas dilatation of a single stenosis of the main renal artery is a routine intervention, a narrowing of the renal artery at its bifurcation extending into the branch arteries poses a more complex problem. The procedural risks of renal artery dilatation are compounded by the danger of occlusion of one of the branch arteries during maneuvers to dilate the other. Long-term results of surgical vascular reconstruction at a renal artery bifurcation are not satisfactory. Untreated, these stenoses lead to intractable hypertension and impairment or loss of renal function. We present a patient with hypertension and impaired function of the right kidney on account of a renal arterial bifurcation stenosis. Using a bilateral transfemoral arterial approach 2 balloon catheters were simultaneously placed into the origins of the renal artery branches and both arteries were successfully dilated by the kissing balloons technique. This method provides an elegant alternative to difficult surgery to save an endangered kidney.}, + file = {Chav93.pdf:pdf\\Chav93.pdf:PDF}, + optnote = {DIAG}, + number = {5}, + pmid = {8369186}, + month = {9}, + gsid = {13444770511480943963}, + gscites = {16}, +} + +@inproceedings{Chel22, + author = {Eduard Chelebian and Francesco Ciompi}, + booktitle = {Learning Meaningful Representations of Life, NeurIPS 2022}, + title = {Seeded iterative clustering for histology region identification}, + abstract = {Annotations are necessary to develop computer vision algorithms for histopathology, but dense annotations at a high resolution are often time-consuming to make. Deep learning models for segmentation are a way to alleviate the process, but require large amounts of training data, training times and computing power. To address these issues, we present seeded iterative clustering to produce a coarse segmentation densely and at the whole slide level. The algorithm uses precomputed representations as the clustering space and a limited amount of sparse interactive annotations as seeds to iteratively classify image patches. We obtain a fast and effective way of generating dense annotations for whole slide images and a framework that allows the comparison of neural network latent representations in the context of transfer learning.}, + file = {Chel22.pdf:pdf\\Chel22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, + ss_id = {892d4f3ee2cc9ff2dcf1c308fae68473dcf787d2}, + all_ss_ids = {['892d4f3ee2cc9ff2dcf1c308fae68473dcf787d2']}, + gscites = {0}, +} + +@article{Chen22, + author = {Chen, Yun-Fang and Vinayahalingam, Shankeeth and Berge, Stefaan and Liao, Yu-Fang and Maal, Thomas and Xi, Tong}, + title = {Is the pattern of mandibular asymmetry in mild craniofacial microsomia comparable to non-syndromic class II asymmetry?}, + doi = {10.1007/s00784-022-04429-6}, + issue = {6}, + pages = {4603--4613}, + volume = {26}, + abstract = {To compare the characteristics of mandibular asymmetry in patients with unilateral craniofacial microsomia (CFM) and class II asymmetry. Pretreatment cone-beam computed tomography of consecutive adults with Pruzansky-Kaban type I and IIA CFM (CFM group) was analyzed by 3D cephalometry. Fourteen mandibular landmarks and two dental landmarks were identified. The mandibular size and positional asymmetry were calculated by using landmark-based linear and volumetric measurements, in terms of asymmetry ratios (affected/non-affected side) and absolute differences (affected - non-affected side). Results were compared with non-syndromic class II with matched severity of chin deviation (Class II group). Statistical analyses included independent t test, paired t test, chi-square test, and ANOVA. CFM group (n, 21; mean age, 20.4 +- 2.5 years) showed significantly larger size asymmetry in regions of mandibular body, ramus, and condyle compared to Class II group (n, 21; mean age, 27.8 +- 5.9 years) (p < 0.05). The curvature of mandibular body was asymmetric in CFM. Regarding the positional asymmetry of mandibular body, while a comparable transverse shift and a negligible yaw rotation were found among the two groups, the roll rotation in CFM was significantly greater as well as the occlusal (6.06deg vs. 4.17deg) and mandibular (7.84deg vs. 2.80deg) plane cants (p < 0.05). Mild CFM showed significantly more severe size asymmetry and roll rotation in mandible than non-CFM class II asymmetry. To improve the mandibular size and positional asymmetry in CFM, adjunct hard tissue augmentation or reduction in addition to OGS orthodontics with a meticulous roll and yaw planning is compulsory, which is expected to be distinct from treating non-CFM class II asymmetry.}, + file = {Chen22.pdf:pdf\\Chen22.pdf:PDF}, + journal = {Clinical oral investigations}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35218426}, + year = {2022}, +} + +@article{Chle18, + author = {Chlebus, Grzegorz and Schenk, Andrea and Moltz, Jan Hendrik and van Ginneken, Bram and Hahn, Horst Karl and Meine, Hans}, + title = {Automatic liver tumor segmentation in {CT} with fully convolutional neural networks and object-based postprocessing}, + journal = NATSCIREP, + year = {2018}, + volume = {8}, + issue = {1}, + month = {10}, + pages = {15497}, + doi = {10.1038/s41598-018-33860-7}, + abstract = {Automatic liver tumor segmentation would have a big impact on liver therapy planning procedures and follow-up assessment, thanks to standardization and incorporation of full volumetric information. In this work, we develop a fully automatic method for liver tumor segmentation in CT images based on a 2D fully convolutional neural network with an object-based postprocessing step. We describe our experiments on the LiTS challenge training data set and evaluate segmentation and detection performance. Our proposed design cascading two models working on voxel- and object-level allowed for a significant reduction of false positive findings by 85% when compared with the raw neural network output. In comparison with the human performance, our approach achieves a similar segmentation quality for detected tumors (mean Dice 0.69 vs. 0.72), but is inferior in the detection performance (recall 63% vs. 92%). Finally, we describe how we participated in the LiTS challenge and achieved state-of-the-art performance.}, + file = {Chle18.pdf:pdf\\Chle18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30341319}, + gsid = {1182212609291447328}, + gscites = {160}, + ss_id = {27cebb08aca785444166209593fc0ec740469b1b}, + all_ss_ids = {['27cebb08aca785444166209593fc0ec740469b1b']}, +} + +@article{Chle19, + author = {Chlebus, Grzegorz and Meine, Hans and Thoduka, Smita and Abolmaali, Nasreddin and van Ginneken, Bram and Hahn, Horst Karl and Schenk, Andrea}, + title = {Reducing inter-observer variability and interaction time of MR liver volumetry by combining automatic CNN-based liver segmentation and manual corrections}, + journal = PLOSONE, + year = {2019}, + volume = {14}, + issue = {5}, + pages = {e0217228}, + doi = {10.1371/journal.pone.0217228}, + abstract = {To compare manual corrections of liver masks produced by a fully automatic segmentation method based on convolutional neural networks (CNN) with manual routine segmentations in MR images in terms of inter-observer variability and interaction time. For testing, patient's precise reference segmentations that fulfill the quality requirements for liver surgery were manually created. One radiologist and two radiology residents were asked to provide manual routine segmentations. We used our automatic segmentation method Liver-Net to produce liver masks for the test cases and asked a radiologist assistant and one further resident to correct the automatic results. All observers were asked to measure their interaction time. Both manual routine and corrected segmentations were compared with the reference annotations. The manual routine segmentations achieved a mean Dice index of 0.95 and a mean relative error (RVE) of 4.7%. The quality of liver masks produced by the Liver-Net was on average 0.95 Dice and 4.5% RVE. Liver masks resulting from manual corrections of automatically generated segmentations compared to routine results led to a significantly lower inter-observer variability (mean per case absolute RVE difference across observers 0.69%) when compared to manual routine ones (2.75%). The mean interaction time was 2 min for manual corrections and 10 min for manual routine segmentations. The quality of automatic liver segmentations is on par with those from manual routines. Using automatic liver masks in the clinical workflow could lead to a reduction of segmentation time and a more consistent liver volume estimation across different observers.}, + file = {Chle19.pdf:pdf\\Chle19.pdf:PDF}, + optnote = {DIAG}, + pmid = {31107915}, + month = {5}, + gsid = {3883884374712278395}, + gscites = {38}, + ss_id = {5bdb1a2667cbf2ce2f6142f14218df298a9eb090}, + all_ss_ids = {['5bdb1a2667cbf2ce2f6142f14218df298a9eb090']}, +} + +@conference{Chle19a, + author = {Chlebus, Grzegorz and Humpire Mamani, Gabriel Efrain and Schenk, Andrea and van Ginneken, Bram and Meine, Hans}, + title = {Mimicking radiologists to improve the robustness of deep-learning based automatic liver segmentation}, + booktitle = RSNA, + year = {2019}, + abstract = {Radiologists delineating organ contours on a CT slice typically consider a couple of neighboring slices while taking into account the whole in-plane context in order to distinguish the organ boundary from surrounding structures. We present a new 3D deep-learning model that mimics the way radiologists interpret images on the example of liver segmentation. To evaluate its performance, the model is compared with a standard 3D neural network.}, + optnote = {DIAG, RADIOLOGY}, +} + +@phdthesis{Chle22, + author = {Grzegorz Chlebus}, + title = {Deep Learning-Based Segmentation in Multimodal Abdominal Imaging}, + url = {https://repository.ubn.ru.nl/handle/2066/251471}, + abstract = {This thesis is devoted to the applications of deep learning segmentation algorithms to multimodal abdominal imaging. It focuses on the segmentation of liver, prostate, and liver tumors in CT and MRI images. + It aims not only to propose and evaluate new segmentation architectures, but also to investigate aspects such as the required time for the correction of automatic segmentation results, the impact on the inter-observer variability, and the optimization of annotation effort. + The following objectives were undertaken as part of this thesis: + + 1. The development of a two-stage cascade system for liver and liver tumor segmentation in CT images (Chapter 2); + 2. The development of an ensemble of three orthogonal 2D CNNs for liver segmentation in late-phase T1W MRI images (Chapter 3); + 3. The investigation of various active learning strategies to optimally select a set of CT slices to obtain the best possible liver segmentation method in CT without the need to manually annotate a large amount of training data (Chapter 4); + 4. The development of a novel multi-planar 3D anisotropic CNN architecture for prostate segmentation in multi-planar T2W MRI images (Chapter 5).}, + copromotor = {H. Meine and A. Schenk}, + file = {Chle22.pdf:pdf/Chle22.pdf:PDF}, + journal = {PhD thesis}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken and H. K. Hahn}, + school = {Radboud University, Nijmegen}, + year = {2022}, +} + +@inproceedings{Chon11, + author = {D. Chong and E. M. van Rikxoort and H. J. Kim and J. G. Goldin and M. S. Brown}, + title = {Scan-rescan reproducibility of {CT} densitometric measures of emphysema}, + booktitle = MI, + year = {2011}, + volume = {7963}, + series = SPIE, + pages = {796339-1--796339-6}, + doi = {10.1117/12.878240}, + abstract = {{T}his study investigated the reproducibility of {HRCT} densitometric measures of emphysema in patients scanned twice one week apart. 24 emphysema patients from a multicenter study were scanned at full inspiration {(TLC)} and expiration {(RV)}, then again a week later for four scans total. Scans for each patient used the same scanner and protocol, except for tube current in three patients. Lung segmentation with gross airway removal was performed on the scans. Volume, weight, mean lung density {(MLD)}, relative area under -950{HU} ({RA}-950), and 15th percentile ({PD}-15) were calculated for {TLC}, and volume and an airtrapping mask ({RA}-air) between -950 and -850{HU} for {RV}. For each measure, absolute differences were computed for each scan pair, and linear regression was performed against volume difference in a subgroup with volume difference <500mL. Two {TLC} scan pairs were excluded due to segmentation failure. The mean lung volumes were 5802 +/- 1420m{L} for {TLC}, 3878 +/- 1077m{L} for {RV}. The mean absolute differences were 169m{L} for {TLC} volume, 316mL for {RV} volume, 14.5g for weight, 5.0{HU} for {MLD}, 0.66p.p. for {RA}-950, 2.4{HU} for {PD}-15, and 3.1p.p. for {RA}-air. The <500m{L} subgroup had 20 scan pairs for {TLC} and {RV}. The {R}2 values were 0.8 for weight, 0.60 for {MLD}, 0.29 for {RA}-950, 0.31 for {PD}-15, and 0.64 for {RA}-air. Our results indicate that considerable variability exists in densitometric measures over one week that cannot be attributed to breathhold or physiology. This has implications for clinical trials relying on these measures to assess emphysema treatment efficacy.}, + file = {Chon11.pdf:pdf\\Chon11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, +} + +@conference{Chon11a, + author = {D. Chong and H.J. Kim and E.M. van Rikxoort and M. Galperin and K. Yang and Y. Jung and M.F. McNitt-Gray and J.G. Goldin and M.S. Brown}, + title = {Reproducibility of densitometric measures of emphysema in computed tomography scans one week apart: the effect of breathhold and scanner calibration}, + booktitle = ATS, + year = {2011}, + abstract = {{RATIONALE:} {T}o examine the reproducibility of {H}igh-resolution {C}omputed {T}omography ({HRCT}) densitometric measures of emphysema in patients scanned twice one week apart, and to assess the effects of breathhold (measured by {CT} lung volume) and scanner calibration (measured by separate phantom scans) on reproducibility. {METHODS:} {CT} scans were obtained from our anonymized research database for 27 emphysema patients scanned at full inspiration on two separate visits (median 7 days, standard deviation 2.4 days). Patients were instructed on proper breathholding before scanning to maximize breathhold reproducibility. For each patient, the same scanner and protocol was used for both visits. An air and water phantom was scanned on the same scanner within 24 hours of each patient. For each scan, lung segmentation was performed including removal of gross airway structure. Lung volume ({V}), relative area below -950{HU} ({RA}950) and 15th percentile density ({PD}15) were computed, and mean {H}ounsfield {U}nit ({HU}) of water ({P}water) and air ({P}air) were 15 obtained from the corresponding phantom scan. Difference between visits was computed for each quantity ({?V}, {?RA}950, {?PD}15, {?P}water,{ ?P}air), and {B}land-{A}ltman analysis was used to assess reproducibility for {V}, {RA}950, and {PD}15. This analysis was repeated for a subgroup with {|?V|}<250mL (representing excellent breathhold reproducibility). Next, linear regression was performed for {?RA}950 versus {?V}, {?P} water, and {?P}air, and also for {?PD}15 versus {?V}, {?P}water, and {?P}air . {RESULTS:} {T}able 1 summarizes the quantities in this study. Table 2 displays the results of {B}land-{A}ltman analysis. {R}eproducibility of breathhold and {RA}950 were high, with limits of agreement within 500mL and 1.5 percentage points (p.p.), respectively. {R}eproducibility for {PD}15 was poorer overall but improved noticeably in the <250mL subgroup, from 7 to 3{HU}. This effect was not observed for {RA}950. For {?RA}950, linear regression revealed an {R^2} of 0.43, 0.00, and 0.00 versus {?V}, {?P}water , and {?P}air , respectively. {?PD}15 exhibited an {R^2} of 0.74, 0.03, and 0.01 versus ?V, {?P}water, and {?P}air. Linear regression against {?V} is displayed in {F}igures 1 and 2. {CONCLUSION:} This study consists of a population of patients with high breathhold reproducibility (within 500m{L}) imaged on scanners with good calibration. Under these conditions, our data suggests that the precision of {RA}950 is within 1.5p.p. {PD}15 demonstrated greater sensitivity to breathhold, and a precision of 3{HU} was achieved by requiring breathhold reproducibility of 250m{L}. Our analysis revealed no relationship between scanner calibration and density measures, although this finding may change for a wider range of calibrations.}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, +} + +@article{Chon11b, + author = {D. Chong and M. S. Brown and H. J. Kim and E. M. van Rikxoort and L. Guzman and M. F. McNitt-Gray and M. Khatonabadi and M. Galperin-Aizenberg and K. Yang and Y. Jung and J. G. Goldin}, + title = {Reproducibility of volume and densitometric measures of emphysema on repeat computed tomography with an interval of 1 week}, + journal = ER, + year = {2012}, + pages = {287-294}, + doi = {10.1007/s00330-011-2277-1}, + abstract = {Objectives. The reproducibilities of {CT} lung volume and densitometric emphysema measures were assessed over one week. The influence of breathhold on reproducibility was assessed. Methods. {HRCT} was performed on 45 subjects at inspiration on two visits with a seven-day interval. {CT} lung volume, relative area below {-950HU} ({RA950}-raw), and 15th percentile density ({PD}15-raw) were computed. Volume correction was used to obtain {RA}950-adj and {PD}15-adj. Reproducibilities between visits were assessed using concordance correlation coefficient ({CCC}) and repeatability coefficient ({RC}). Reproducibilities were compared between raw and adjusted measures. Correlations were computed for density differences versus volume difference. Subgroup analysis was performed using a 0.25L volume difference threshold. Results. High {CCC} were observed for all measures in full group ({CCC}>0.97). Reproducibilities of volume ({RC}=0.67L), {RA}950-raw ({RC}=2.3%), and {PD}15-raw ({RC}=10.6{HU}) were observed. Volume correction significantly improved {PD}15 ({RC}=3.6{HU}) but not {RA}950 ({RC}=1.7%). {RA}950-raw and {PD}15-raw had significantly better {RC} in <0.25{L} subgroup than =0.25{L}. Significant correlations with volume were observed for {RA}950-raw and {PD}15-raw ({R}2>0.71), but not {RA}950-adj or {PD}15-adj ({R}2<0.11). Conclusions. Good breathhold and {RA}950 reproducibilities were achieved. {PD}15 was less reproducible but improved with volume correction or superior breathhold reproduction. Careful consideration of both signal and noise is necessary during study design.}, + file = {Chon11b.pdf:pdf\\Chon11b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {22011903}, + month = {10}, + gsid = {16974625732689706339}, + gscites = {22}, +} + +@conference{Chun15, + author = {K. Chung and E.T. Scholten and S.J. van Riel and F. Ciompi and P. de Jong and M.M. Wille and M. Prokop and B. van Ginneken and C.M. Schaefer-Prokop}, + title = {Differentiation of persistent and transient subsolid nodules: does morphology help?}, + booktitle = ECR, + year = {2015}, + volume = {85}, + pages = {648-652}, + abstract = {Purpose: Current recommendations propose a three month follow-up CT scan to differentiate between persistent and transient subsolid lesions leading to a substantial number of short term follow-ups. Purpose of the study was to look for morphological features to differentiate persistent from transient lesions. Methods and Materials: Transient and persistent subsolid nodules were randomly selected from the NELSON and DLCST screening trial. An experienced chest radiologist assessed a number of predefined morphological features. Likelihood of persistence was assessed using a continuous scale between 0-1 00. MANOVA statistics were used to assess the discriminating power of morphological features (p<0.05 was considered significant), ROC analysis was applied to assess ability to differentiate persistent from transient lesions. Results: A total of 1 22 nodules were assessed (median 9.7mm). Size distribution was equivalent for the two groups. The ROC area under the curve (AUC) for differentiating was 0.68 for all lesions and 0.82 for lesions >1 0mm. MANOVA revealed no significant discriminative features for all lesions, but yielded significance (p=0.02) for lesions >5mm. Multiplicity (p=0.046) and margin characteristics (p=0.006) had significant discriminative power. Demarcation by interlobular septum was predictive for transient nodules (74% vs. 26%) while spiculation was predictive for persistence (100% vs. 0%). Conclusion: There are morphological features with significant predictive power to differentiate persistent from transient subsolid nodules. They may be used in risk stratification models for subsolid nodules and serve as input for computerized classification systems.}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, +} + +@article{Chun17, + author = {Chung, Kaman and Jacobs, Colin and Scholten, Ernst T and Goo, Jin Mo and Prosch, Helmut and Sverzellati, Nicola and Ciompi, Francesco and Mets, Onno M and Gerke, Paul K and Prokop, Mathias and van Ginneken, Bram and Schaefer-Prokop, Cornelia M}, + title = {Lung-RADS Category 4X: Does It Improve Prediction of Malignancy in Subsolid Nodules?}, + journal = Radiology, + year = {2017}, + volume = {284}, + number = {1}, + pages = {264--271}, + doi = {10.1148/radiol.2017161624}, + abstract = {Purpose To evaluate the added value of Lung CT Screening Reporting and Data System (Lung-RADS) assessment category 4X over categories 3, 4A, and 4B for differentiating between benign and malignant subsolid nodules (SSNs). Materials and Methods SSNs on all baseline computed tomographic (CT) scans from the National Lung Cancer Trial that would have been classified as Lung-RADS category 3 or higher were identified, resulting in 374 SSNs for analysis. An experienced screening radiologist volumetrically segmented all solid cores and located all malignant SSNs visible on baseline scans. Six experienced chest radiologists independently determined which nodules to upgrade to category 4X, a recently introduced category for lesions that demonstrate additional features or imaging findings that increase the suspicion of malignancy. Malignancy rates of purely size-based categories and category 4X were compared. Furthermore, the false-positive rates of category 4X lesions were calculated and observer variability was assessed by using Fleiss kappa statistics. Results The observers upgraded 15%-24% of the SSNs to category 4X. The malignancy rate for 4X nodules varied from 46% to 57% per observer and was substantially higher than the malignancy rates of categories 3, 4A, and 4B SSNs without observer intervention (9%, 19%, and 23%, respectively). On average, the false-positive rate for category 4X nodules was 7% for category 3 SSNs, 7% for category 4A SSNs, and 19% for category 4B SSNs. Of the falsely upgraded benign lesions, on average 27% were transient. The agreement among the observers was moderate, with an average kappa value of 0.535 (95% confidence interval: 0.509, 0.561). Conclusion The inclusion of a 4X assessment category for lesions suspicious for malignancy in a nodule management tool is of added value and results in high malignancy rates in the hands of experienced radiologists. Proof of the transient character of category 4X lesions at short-term follow-up could avoid unnecessary invasive management.}, + file = {Chun17.pdf:pdf\\Chun17.pdf:PDF}, + optnote = {DIAG}, + pmid = {28339311}, + month = {7}, + gsid = {6175266469384712630}, + gscites = {48}, + ss_id = {bc86e211f26bd5c68e9548c1c215fa9605c6daa2}, + all_ss_ids = {['bc86e211f26bd5c68e9548c1c215fa9605c6daa2']}, +} + +@article{Chun17a, + author = {Chung, Kaman and Jacobs, Colin and Scholten, Ernst T and Mets, Onno M and Dekker, Irma and Prokop, Mathias and van Ginneken, Bram and Schaefer-Prokop, Cornelia M}, + title = {Malignancy estimation of {Lung-RADS} criteria for subsolid nodules on {CT}: accuracy of low and high risk spectrum when using {NLST} nodules}, + journal = ER, + year = {2017}, + volume = {27}, + pages = {4672-4679}, + doi = {10.1007/s00330-017-4842-8}, + abstract = {Lung-RADS proposes malignancy probabilities for categories 2 (<1%) and 4B (>15%). The purpose of this study was to quantify and compare malignancy rates for Lung-RADS 2 and 4B subsolid nodules (SSNs) on a nodule base. We identified all baseline SSNs eligible for Lung-RADS 2 and 4B in the National Lung Screening Trial (NLST) database. Solid cores and nodule locations were annotated using in-house software. Malignant SSNs were identified by an experienced radiologist using NLST information. Malignancy rates and percentages of persistence were calculated. Of the Lung-RADS 2SSNs, 94.3% (1790/1897) could be located on chest CTs. Likewise, 95.1% (331/348) of part-solid nodules larger or equal to 6 mm in diameter could be located. Of these, 120 had a solid core larger or equal to 8 mm, corresponding to category 4B. Category 2 SSNs showed a malignancy rate of 2.5%, exceeding slightly the proposed rate of <1%. Category 4B SSNs showed a malignancy rate of 23.9%. In both categories one third of benign lesions were transient. Malignancy probabilities for Lung-RADS 2 and 4B generally match malignancy rates in SSNs. An option to include also category 2 SSNs for upgrade to 4X designed for suspicious nodules might be useful in the future. Integration of short-term follow-up to confirm persistence would prevent unnecessary invasive work-up in 4B SSNs. Malignancy probabilities for Lung-RADS 2/4B generally match malignancy risks in SSNs. Transient rate between low-risk Lung-RADS 2 and high-risk 4B lesions were similar. Upgrade of highly suspicious Lung-RADS 2 SSNs to Lung-RADS 4X might be useful. Up to one third of the benign high-risk Lung-RADS 4B lesions were transient. Short-term follow-up confirming persistence would avoid unnecessary invasive work-up of 4B lesions.}, + file = {Chun17a.pdf:pdf\\Chun17a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28439653}, + month = {4}, + gsid = {10492992913475401535}, + gscites = {14}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/181940}, + ss_id = {abdee0f36ad16d6c3064aee8e98c4b2bd6c691a0}, + all_ss_ids = {['abdee0f36ad16d6c3064aee8e98c4b2bd6c691a0']}, +} + +@article{Chun18, + author = {Chung, K. and Ciompi, F. and Scholten, E. Th. Goo, J. M. and Prokop, M. and Jacobs, C. and van Ginneken, B. and Schaefer-Prokop, C. M.}, + title = {Visual Discrimination of Screen-detected Persistent from Transient Subsolid Nodules: an Observer Study}, + journal = PLOSONE, + year = {2018}, + volume = {13}, + issue = {2}, + pages = {e0191874}, + doi = {10.1371/journal.pone.0191874}, + abstract = {PURPOSE: To evaluate whether, and to which extent, experienced radiologists are + able to visually correctly differentiate transient from persistent subsolid + nodules from a single CT examination alone and to determine CT morphological + features to make this differentiation. + MATERIALS AND METHODS: We selected 86 transient and 135 persistent subsolid + nodules from the National Lung Screening Trial (NLST) database. Four experienced + radiologists visually assessed a predefined list of morphological features and + gave a final judgment on a continuous scale (0-100). To assess observer + performance, area under the receiver operating characteristic (ROC) curve was + calculated. Statistical differences of morphological features between transient + and persistent lesions were calculated using Chi-square. Inter-observer agreement + of morphological features was evaluated by percentage agreement. + RESULTS: Forty-nine lesions were excluded by at least 2 observers, leaving 172 + lesions for analysis. On average observers were able to differentiate transient + from persistent subsolid nodules >= 10 mm with an area under the curve of 0.75 + (95% CI 0.67-0.82). Nodule type, lesion margin, presence of a well-defined + border, and pleural retraction showed significant differences between transient + and persistent lesions in two observers. Average pair-wise percentage agreement + for these features was 81%, 64%, 47% and 89% respectively. Agreement for other + morphological features varied from 53% to 95%. + CONCLUSION: The visual capacity of experienced radiologists to differentiate + persistent and transient subsolid nodules is moderate in subsolid nodules larger + than 10 mm. Performance of the visual assessment of CT morphology alone is not + sufficient to generally abandon a short-term follow-up for subsolid nodules.}, + file = {Chun18.pdf:pdf/Chun18.pdf:PDF}, + optnote = {DIAG}, + pmid = {29438443}, + month = {2}, + gsid = {6862237226851140169}, + gscites = {7}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/189883}, + ss_id = {ef537c1ce62c69018575d2c90b70024a32256ef5}, + all_ss_ids = {['ef537c1ce62c69018575d2c90b70024a32256ef5']}, +} + +@phdthesis{Chun18a, + author = {Kaman Chung}, + title = {Malignancy risk estimation of subsolid nodules}, + year = {2018}, + url = {https://hdl.handle.net/2066/191598}, + abstract = {Subsolid nodules represent a spectrum from pre-invasive to invasive adenocarcinomas. To make the differentiation, management guidelines advise long-term follow-up for many years to determine longitudinal changes. Risk prediction models would help in guiding the process. In this thesis we address a few issues with respect to human and computer-enhanced characterization and risk prediction of subsolid nodules that give more insight in optimizing and ideally individualizing management of this subset of lesions. Since subsolid nodules are seen less frequent than solid nodules, we took the advantage of the largest publicly available screening data base of the NLST for our studies.}, + copromotor = {C. Jacobs}, + file = {:pdf/Chun18a.pdf:PDF}, + optnote = {DIAG}, + promotor = {B. van Ginneken and C. M. Schaefer-Prokop}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Chun18b, + author = {Chung, Kaman and Mets, Onno M and Gerke, Paul K and Jacobs, Colin and den Harder, Annemarie M and Scholten, Ernst T and Prokop, Mathias and de Jong, Pim A and van Ginneken, Bram and Schaefer-Prokop, Cornelia M}, + title = {Brock malignancy risk calculator for pulmonary nodules: validation outside a lung cancer screening population}, + journal = Thorax, + year = {2018}, + volume = {73}, + number = {9}, + pages = {857-863}, + doi = {10.1136/thoraxjnl-2017-211372}, + abstract = {To assess the performance of the Brock malignancy risk model for pulmonary nodules detected in routine clinical setting. In two academic centres in the Netherlands, we established a list of patients aged >=40 years who received a chest CT scan between 2004 and 2012, resulting in 16 850 and 23 454 eligible subjects. Subsequent diagnosis of lung cancer until the end of 2014 was established through linking with the National Cancer Registry. A nested case-control study was performed (ratio 1:3). Two observers used semiautomated software to annotate the nodules. The Brock model was separately validated on each data set using ROC analysis and compared with a solely size-based model. After the annotation process the final analysis included 177 malignant and 695 benign nodules for centre A, and 264 malignant and 710 benign nodules for centre B. The full Brock model resulted in areas under the curve (AUCs) of 0.90 and 0.91, while the size-only model yielded significantly lower AUCs of 0.88 and 0.87, respectively (p<0.001). At 10% malignancy risk, the threshold suggested by the British Thoracic Society, sensitivity of the full model was 75% and 81%, specificity was 85% and 84%, positive predictive values were 14% and 10% at negative predictive value (NPV) of 99%. The optimal threshold was 6% for centre A and 8% for centre B, with NPVs >99%. The Brock model shows high predictive discrimination of potentially malignant and benign nodules when validated in an unselected, heterogeneous clinical population. The high NPV may be used to decrease the number of nodule follow-up examinations.}, + file = {Chun18b.pdf:pdf\\Chun18b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29777062}, + month = {5}, + gsid = {13012009912748302045}, + gscites = {30}, + ss_id = {1cc0b06d80d21082d2610f11daa488cc6817b207}, + all_ss_ids = {['1cc0b06d80d21082d2610f11daa488cc6817b207']}, +} + +@conference{Ciet11, + author = {P. Ciet and P. Wielopolski and S. Lever and R. Manniesing and M. {de Bruijne} and M.H. Lequin and H.A.W.M. Tiddens}, + title = {{MRI} Tracheomalacia Assessment in Pediatric Patients: Feasibility Study}, + booktitle = ECR, + year = {2011}, + abstract = {Purpose: TM is an excessive narrowing of the intrathoracic part of the trachea. TM is a common congenital pediatric anomaly, but itA-A?A 1/2 s often not recognized due to its unspecific clinical presentation. The aims of our study are: 1) to develop cine-MRI sequences to visualize central airways in static and dynamic conditions in patients that were able to follow specific breathing manoeuvres;2) to develop post-processing tools for image analysis. Methods and Materials: To date 10 subjects (7 males; 2 adults) were enrolled in the pilot study: mean age 15, (range 6 to 30yrs). Volunteers were trained to perform spirometry controlled breathing maneuvers (peak flow and coughing) using a MRI compatible spirometer. A-A?A 1/2 Static?? 13-second breath-hold scans covering the entire thoracic region were acquired at end-inspiration and end-expiration using a 3D GRE with TR/TE=1.2/0.5 ms, a = 2A-A?A 1/2 , sagittal isotropic volume (2.8) x 3mm3 voxels. A-A?A 1/2 Dynamic?? scans were performed with the same parameters but covering only the central thorax (1/3 volume), temporal resolution was 500 ms per volume using the TRICKS. In-house developed software for segmentation and analysis was used. Results: All subjects managed to follow the required breathing maneuvers. Images of central airways during static and dynamic conditions were acquired and could be analyzed. Three out of the 8 children had a TM just above the carina during forced expiration, confirmed by bronchoscopy. Conclusion: This pilot study shows that Dynamic-MRI is feasible in pediatric population and allows avoiding radiation exposure and bronchoscopy for the evaluation of central airway dimensions.}, + file = {Ciet11.pdf:pdf/Ciet11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Ciet14, + author = {Ciet, P. and Wielopolski, P. and Manniesing, R and Lever, S. and De Bruijne, M. and Morana, G. and Muzzio, P.C. and Lequin, M.H. and Tiddens, H.A.W.M.}, + title = {Spirometer-controlled Cine-Magnetic Resonance Imaging for Diagnosis of Tracheobronchomalacia in Pediatric Patients}, + journal = ERJ, + year = {2014}, + volume = {43}, + pages = {115-124}, + doi = {10.1183/09031936.00104512}, + abstract = {Tracheobronchomalacia (TBM) is defined as an excessive collapse of the intrathoracic trachea. Bronchoscopy is the gold standard to diagnose TBM, but bronchoscopy has major disadvantages, such as general anaesthesia. Cine-CT is a non-invasive alternative to diagnose TBM, but its use in children is restricted by ionizing radiation. Our aim was to evaluate the feasibility of spirometer-controlled cine-MRI as alternative to cine-CT in a retrospective study. 12 children (mean 12 years, range 7AC/a,!aEURoe17), suspected to have TBM, underwent cine-MRI. Static scans were acquired at end-inspiration and expiration covering the thorax using a 3D SPGR sequence. 3D-Dynamic-scans were performed covering only the central airways. TBM was defined as a decrease of the trachea or bronchi diameter greater than 50% at end-expiration in the static and dynamic scans. The success rate of the cine-MRI protocol was 92%. Cine-MRI was compared with bronchoscopy or chest-CT in 7 subjects. TBM was diagnosed by cine-MRI in 7 out of 12 children (58%) and was confirmed by bronchoscopy or CT. In 4 patients, cine-MRI demonstrated tracheal narrowing that was not present in the static scans. Spirometer-controlled cineAC/a,!aEURoeMRI is a promising technique to assess TBM in children and has the potential to replace bronchoscopy.}, + file = {Ciet14.pdf:pdf\\Ciet14.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {23598953}, + month = {4}, + ss_id = {f94e09771e679cce8593b18228915cb5b7474141}, + all_ss_ids = {['f94e09771e679cce8593b18228915cb5b7474141']}, + gscites = {37}, +} + +@inproceedings{Ciom09, + author = {Ciompi, Francesco and Pujol, Oriol and Fernandez-Nofrerias, Eduard and Mauri, Josepa and Radeva, Petia}, + title = {Ecoc random fields for lumen segmentation in radial artery ivus sequences}, + booktitle = MICCAI, + year = {2009}, + series = LNCS, + publisher = {Springer}, + pages = {869--876}, + url = {http://link.springer.com/chapter/10.1007/978-3-642-04271-3_105}, + abstract = {The measure of lumen volume on radial arteries can be used to evaluate the vessel response to different vasodilators. In this paper, we present a framework for automatic lumen segmentation in longitudinal cut images of radial artery from Intravascular ultrasound sequences. The segmentation is tackled as a classification problem where the contextual information is exploited by means of Conditional Random Fields (CRFs). A multi-class classification framework is proposed, and inference is achieved by combining binary CRFs according to the Error-Correcting- Output-Code technique. The results are validated against manually segmented sequences. Finally, the method is compared with other state-ofthe- art classifiers.}, + file = {Ciom09.pdf:pdf\\Ciom09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {8616859384592445149}, + gscites = {24}, +} + +@inproceedings{Ciom09a, + author = {Ciompi, Francesco and Pujol, Oriol and Leor, Oriol Rodriguez and Gatta, Carlo and Vida, Angel Serrano and Radeva, Petia}, + title = {Enhancing in-vitro IVUS data for tissue characterization}, + booktitle = PRIA, + year = {2009}, + publisher = {Springer}, + pages = {241--248}, + url = {http://link.springer.com/chapter/10.1007/978-3-642-02172-5_32}, + abstract = {Intravascular Ultrasound (IVUS) data validation is usually performed by comparing post-mortem (in-vitro) IVUS data and corresponding histological analysis of the tissue, obtaining a reliable ground truth. The main drawback of this method is the few number of available study cases due to the complex procedure of histological analysis. In this work we propose a novel semi-supervised approach to enhance the in-vitro training set by including examples from in-vivo coronary plaques data set. For this purpose, a Sequential Floating Forward Selection method is applied on in-vivo data and plaque characterization performances are evaluated by Leave-One-Patient-Out cross-validation technique. Supervised data inclusion improves global classification accuracy from 89.39% to 91.82%.}, + file = {Ciom09a.pdf:pdf\\Ciom09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {1585581466834494042}, + gscites = {6}, +} + +@article{Ciom10, + author = {Ciompi, Francesco and Pujol, Oriol and Gatta, Carlo and Rodriguez-Leor, Oriol and Mauri-Ferre, Josepa and Radeva, Petia}, + title = {Fusing in-vitro and in-vivo intravascular ultrasound data for plaque characterization}, + journal = IJCI, + year = {2010}, + volume = {26}, + pages = {763--779}, + pmid = {20091123}, + url = {http://link.springer.com/article/10.1007/s10554-009-9543-1}, + abstract = {Accurate detection of in-vivo vulnerable plaque in coronary arteries is still an open problem. Recent studies show that it is highly related to tissue structure and composition. Intravascular Ultrasound (IVUS) is a powerful imaging technique that gives a detailed cross-sectional image of the vessel, allowing to explore arteries morphology. IVUS data validation is usually performed by comparing post-mortem (invitro) IVUS data and corresponding histological analysis of the tissue. The main drawback of this method is the few number of available case studies and validated data due to the complex procedure of histological analysis of the tissue. On the other hand, IVUS data from in-vivo cases is easy to obtain but it can not be histologically validated. In this work, we propose to enhance the in-vitro training data set by selectively including examples from in-vivo plaques. For this purpose, a Sequential Floating Forward Selection method is reformulated in the context of plaque characterization. The enhanced classifier performance is validated on in-vitro data set, yielding an overall accuracy of 91.59% in discriminating among fibrotic, lipidic and calcified plaques, while reducing the gap between in-vivo and in-vitro data analysis. Experimental results suggest that the obtained classifier could be properly applied on in-vivo plaque characterization and also demonstrate that the common hypothesis of assuming the difference between in-vivo and in-vitro as negligible is incorrect.}, + file = {Ciom10.pdf:pdf\\Ciom10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {7}, + publisher = {Springer}, + gsid = {9740655195131479380}, + gscites = {28}, +} + +@inproceedings{Ciom10a, + author = {Ciompi, Francesco and Pujol, Oriol and Radeva, Petia}, + title = {A meta-learning approach to conditional random fields using error-correcting output codes}, + booktitle = ICPR, + year = {2010}, + pages = {710--713}, + url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5596027}, + abstract = {We present a meta-learning framework for the design of potential functions for Conditional Random Fields. The design of both node potential and edge potential is formulated as a classification problem where margin classifiers are used. The set of state transitions for the edge potential is treated as a set of different classes, thus defining a multiclass learning problem. The Error-Correcting Output Codes (ECOC) technique is used to deal with the multi-class problem. Furthermore, the point defined by the combination of margin classifiers in the ECOC space is interpreted in a probabilistic manner, and the obtained distance values are then converted into potential values. The proposed model exhibits very promising results when applied to two real detection problems.}, + file = {Ciom10a.pdf:pdf\\Ciom10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {8}, + gsid = {8339407405073558184}, + gscites = {5}, +} + +@inproceedings{Ciom11, + author = {Ciompi, Francesco and Pujol, Oriol and Gatta, Carlo and Carrillo, Xavier and Mauri, Josepa and Radeva, Petia}, + title = {A holistic approach for the detection of media-adventitia border in IVUS}, + booktitle = MICCAI, + year = {2011}, + series = LNCS, + publisher = {Springer}, + pages = {411--419}, + url = {http://link.springer.com/chapter/10.1007/978-3-642-23626-6_51}, + abstract = {In this paper we present a methodology for the automatic detection of media-adventitia border (MAb) in Intravascular Ultrasound. A robust computation of the MAb is achieved through a holistic approach where the position of the MAb with respect to other tissues of the vessel is used. A learned quality measure assures that the resulting MAb is optimal with respect to all other tissues. The mean distance error computed through a set of 140 images is 0.2164 (A,A+-0.1326) mm.}, + file = {Ciom11.pdf:pdf\\Ciom11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {253415321777801169}, + gscites = {11}, +} + +@article{Ciom12, + author = {Ciompi, Francesco and Pujol, Oriol and Gatta, Carlo and Alberti, Marina and Balocco, Simone and Carrillo, Xavier and Mauri-Ferre, Josepa and Radeva, Petia}, + title = {HoliMAb: A holistic approach for Media--Adventitia border detection in intravascular ultrasound}, + journal = MIA, + year = {2012}, + pmid = {22854037}, + url = {http://www.sciencedirect.com/science/article/pii/S1361841512000898}, + abstract = {We present a fully automatic methodology for the detection of the MediaAC/a,!aEURoeAdventitia border (MAb) in human coronary artery in Intravascular Ultrasound (IVUS) images. A robust border detection is achieved by means of a holistic interpretation of the detection problem where the target object, i.e. the media layer, is considered as part of the whole vessel in the image and all the relationships between tissues are learnt. A fairly general framework exploiting multi-class tissue characterization as well as contextual information on the morphology and the appearance of the tissues is presented. The methodology is (i) validated through an exhaustive comparison with both Inter-observer variability on two challenging databases and (ii) compared with state-of-the-art methods for the detection of the MAb in IVUS. The obtained averaged values for the mean radial distance and the percentage of area difference are 0.211 mm and 10.1%, respectively. The applicability of the proposed methodology to clinical practice is also discussed.}, + file = {Ciom12.pdf:pdf\\Ciom12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + publisher = {Elsevier}, + month = {8}, + gsid = {1017006820364634001}, + gscites = {53}, +} + +@phdthesis{Ciom12a, + author = {Ciompi, Francesco}, + title = {Multi-Class Learning for Vessel Characterization in Intravascular Ultrasound}, + year = {2012}, + url = {http://www.cvc.uab.es/?p=361}, + abstract = {In this thesis we tackle the problem of automatic characterization of human coronary vessel in Intravascular Ultrasound (IVUS) image modality. The basis for the whole characterization process is machine learning applied to multiclass problems. In all the presented approaches, the Error-Correcting Output Codes (ECOC) framework is used as central element for the design of multi-class classi ers. Two main topics are tackled in this thesis. First, the automatic detection of the vessel borders is presented. For this purpose, a novel context-aware classi er for multi-class classi cation of the vessel morphology is presented, namely ECOCDRF. Based on ECOC-DRF, the lumen border and the media-adventitia border in IVUS are robustly detected by means of a novel holistic approach, achieving an error comparable with inter-observer variability and with state of the art methods. The two vessel borders de ne the atheroma area of the vessel. In this area, tissue characterization is required. For this purpose, we present a framework for automatic plaque characterization by processing both texture in IVUS images and spectral information in raw Radio Frequency data. Furthermore, a novel method for fusing in-vivo and in-vitro IVUS data for plaque characterization is presented, namely pSFFS. The method demonstrates to e ectively fuse data generating a classi er that improves the tissue characterization in both in-vitro and in-vivo datasets. A novel method for automatic video summarization in IVUS sequences is also presented. The method aims to detect the key frames of the sequence, i.e., the frames representative of morphological changes. This novel method represents the basis for video summarization in IVUS as well as the markers for the partition of the vessel into morphological and clinically interesting events. Finally, multi-class learning based on ECOC is applied to lung tissue characterization in Computed Tomography. The novel proposed approach, based on supervised and unsupervised learning, achieves accurate tissue classi cation on a large and heterogeneous dataset.}, + file = {Ciom12a.pdf:pdf\\Ciom12a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + school = {University of Barcelona, Spain}, + journal = {PhD thesis}, +} + +@inproceedings{Ciom13, + author = {Ciompi, Francesco and Balocco, Simone and Caus, Carles and Mauri, Josepa and Radeva, Petia}, + title = {Stent Shape Estimation through a Comprehensive Interpretation of Intravascular Ultrasound Images}, + booktitle = MICCAI, + year = {2013}, + series = LNCS, + pages = {345-352}, + abstract = {We present a method for automatic struts detection and stent shape estimation in cross-sectional intravascular ultrasound images. A stent shape is first estimated through a comprehensive interpretation of the vessel morphology, performed using a supervised context-aware multi-class classification scheme. Then, the successive strut identification exploits both local appearance and the defined stent shape. The method is tested on 589 images obtained from 80 patients, achieving a F-measure of 74.1% and an averaged distance between manual and automatic struts of 0.10 mm.}, + file = {Ciom13.pdf:pdf\\Ciom13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {14808204315824444400}, + gscites = {6}, + ss_id = {499ba2c155d9968756e92cf2ba8107dfa1e8c618}, + all_ss_ids = {['499ba2c155d9968756e92cf2ba8107dfa1e8c618']}, +} + +@inproceedings{Ciom13a, + author = {Ciompi, Francesco and Hua, Rui and Balocco, Simone and Alberti, Marina and Pujol, Oriol and Caus, Carles and Mauri, Josepa and Radeva, Petia}, + title = {Learning to Detect Stent Struts in Intravascular Ultrasound}, + booktitle = PRIA, + year = {2013}, + publisher = {Springer}, + pages = {575--583}, + url = {http://link.springer.com/chapter/10.1007/978-3-642-38628-2_68}, + abstract = {In this paper we tackle the automatic detection of struts elements (metallic braces of a stent device) in Intravascular Ultrasound (IVUS) sequences. The proposed method is based on context-aware classification of IVUS images, where we use Multi-Class Multi-Scale Stacked Sequential Learning (M2SSL). Additionally, we introduce a novel technique to reduce the amount of required contextual features. The comparison with binary and multi-class learning is also performed, using a dataset of IVUS images with struts manually annotated by an expert. The best performing configuration reaches a F-measure F = 63.97% .}, + file = {Ciom13a.pdf:pdf\\Ciom13a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {14811347107651563370}, + gscites = {6}, + ss_id = {a4e7fa7efd9d50e18411b053402aab5bcbc9ff99}, + all_ss_ids = {['a4e7fa7efd9d50e18411b053402aab5bcbc9ff99']}, +} + +@article{Ciom14, + author = {Francesco Ciompi and Oriol Pujol and Petia Radeva}, + title = {ECOC-DRF: Discriminative Random Fields based on Error-Correcting Output Codes}, + journal = PR, + year = {2014}, + volume = {47}, + pages = {2193-2204}, + doi = {10.1016/j.patcog.2013.12.007}, + abstract = {We present ECOC-DRF, a framework where potential functions for Discriminative Random Fields are formulated as an ensemble of classifiers. We introduce the label trick, a technique to express transitions in the pairwise potential as meta-classes. This allows to independently learn any possible transition between labels without assuming any pre-defined model. The Error Correcting Output Codes matrix is used as ensemble framework for the combination of margin classifiers. We apply ECOC-DRF to a large set of classification problems, covering synthetic, natural and medical images for binary and multi-class cases, outperforming state-of-the art in almost all the experiments.}, + file = {Ciom14.pdf:pdf\\Ciom14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {6}, + gsid = {2279805513688046695}, + gscites = {5}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/139326}, + ss_id = {a5416d72cf39f365e4f12ea31fcba1a314e73567}, + all_ss_ids = {['a5416d72cf39f365e4f12ea31fcba1a314e73567']}, +} + +@conference{Ciom14a, + author = {Francesco Ciompi and Bartjan de Hoop and Colin Jacobs and Mathias Prokop and Pim a de Jong and Bram van Ginneken}, + title = {Automatic Classification of Perifissural Pulmonary Nodules in Thoracic {CT} Images}, + booktitle = RSNA, + year = {2014}, + abstract = {Title. Automatic Classification of Perifissural Pulmonary Nodules in Thoracic CT Images. Purpose. Up to one third of pulmonary nodules detected in heavy smokers are perifissural nodules (PFNs) that do not require follow-up. An automatic method is presented to distinguish PFNs from solid nodules. Materials and Methods. We used all baseline scans with a pulmonary nodule from one of the sites of the NELSON trial. All participants were either current or former heavy smokers (age between 50 and 75 years), and underwent low-dose CT (Mx8000 IDT 16; Philips Medical Systems, Cleveland, Ohio). Human experts annotated non-calcified solid nodules in 1,729 scans, and classified these as PFN (788) and non-PFN (3,038). We formulated PFN classification as a machine learning problem where a classifier is trained to automatically label nodules as PFN or non-PFN. Given the characteristic triangular-like shape of PFNs, a novel descriptor encoding information on nodule morphology was designed. The descriptor is based on frequency analysis of intensity profiles sampled in the CT image. Given a detected nodule, spherical surfaces up to a maximum radius R are considered, centered on the center of mass of the nodule. For each sphere, the image intensity is sampled along C circular profiles on the surface of each sphere at constant angular distance. The profiles are interpreted as a periodic signal, and their spectrum is obtained using a Fast Fourier Transform. Each spectrum encodes information on nodule morphology through a set of characteristic frequencies. A set of K spectral signatures is computed applying K-means on the collected set of spectra. A compact nodule descriptor is obtained as the histogram of spectral signatures along the spheres. A Random Forests classifier with 100 trees was used for supervised learning. A 10-folds cross-validation scheme was applied to evaluate the method on the 3,826 nodules, using C=128, K=100. Since the range of PFNs diameters is 2.8-10.6 mm, we used R = 7.5 mm. Results. We obtained a value of area under the ROC curve of 0.85, with an optimal operating point of 77% sensitivity and 79% specificity. Misclassified PFNs were often close to the pleura or to other vascular structures. Conclusion. Classification of pulmonary nodules as PFN is feasible and has the potential to be used as an automatic tool in CAD. Clinical Relevance. PFNs rarely turn out to be malignant, even though their growth rate is similar to that of malignant nodules. Automatic recognition of PFNs could reduce the number of unnecessary follow-up CT exams.}, + file = {:pdf\\Ciom14a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Ciom14c, + author = {Ciompi, Francesco and Jacobs, Colin and Scholten, Ernst and Winkler Wille, Mathilde and de Jong, Pim and Prokop, Mathias and van Ginneken, Bram}, + title = {Bag of frequencies: a descriptor of pulmonary nodules in Computed Tomography images}, + journal = TMI, + year = {2015}, + volume = {34}, + pages = {1-12}, + doi = {10.1109/TMI.2014.2371821}, + abstract = {We present a novel descriptor for the characterization of pulmonary nodules in computed tomography (CT) images. The descriptor encodes information on nodule morphology and has scale-invariant and rotation-invariant properties. Information on nodule morphology is captured by sampling intensity profiles along circular patterns on spherical surfaces centered on the nodule, in a multi-scale fashion. Each intensity profile is interpreted as a periodic signal, where the Fourier transform is applied, obtaining a spectrum. A library of spectra is created and labeled via unsupervised clustering, obtaining a Bag-of- Frequencies, which is used to assign each spectra a label. The descriptor is obtained as the histogram of labels along all the spheres. Additional contributions are a technique to estimate the nodule size, based on the sampling strategy, as well as a technique to choose the most informative plane to cut a 2-D view of the nodule in the 3-D image. We evaluate the descriptor on several nodule morphology classification problems, namely discrimination of nodules versus vascular structures and characterization of spiculation. We validate the descriptor on data from European screening trials NELSON and DLCST and we compare it with state-of-the-art approaches for 3-D shape description in medical imaging and computer vision, namely SPHARM and 3-D SIFT, outperforming them in all the considered experiments.}, + file = {Ciom14c.pdf:pdf\\Ciom14c.pdf:PDF}, + optnote = {DIAG}, + number = {4}, + pmid = {25420257}, + month = {4}, + gsid = {4309508126026672345}, + gscites = {54}, + ss_id = {c46078f5f85c01090f2ee949a0d332de7d6e4463}, + all_ss_ids = {['c46078f5f85c01090f2ee949a0d332de7d6e4463']}, +} + +@inproceedings{Ciom15, + author = {Francesco Ciompi and Colin Jacobs and Ernst Th Scholten and Sarah van Riel and Mathilde M. W. Wille and Mathias Prokop and Bram van Ginneken}, + title = {Automatic detection of spiculation of pulmonary nodules in Computed Tomography images}, + booktitle = MI, + year = {2015}, + volume = {9414}, + series = SPIE, + doi = {10.1117/12.2081426}, + abstract = {We present a fully automatic method for the assessment of spiculation of pulmonary nodules in low-dose Computed Tomography (CT) images. Spiculation is considered as one of the indicators of nodule malignancy and an important feature to assess in order to decide on a patient-tailored follow-up procedure. For this reason, lung cancer screening scenario would benefit from the presence of a fully automatic system for the assessment of spiculation. The presented framework relies on the fact that spiculated nodules mainly differ from non-spiculated ones in their morphology. In order to discriminate the two categories, information on morphology is captured by sampling intensity profiles along circular patterns on spherical surfaces centered on the nodule, in a multi-scale fashion. Each intensity profile is interpreted as a periodic signal, where the Fourier transform is applied, obtaining a spectrum. A library of spectra is created by clustering data via unsupervised learning. The centroids of the clusters are used to label back each spectrum in the sampling pattern. A compact descriptor encoding the nodule morphology is obtained as the histogram of labels along all the spherical surfaces and used to classify spiculated nodules via supervised learning. We tested our approach on a set of nodules from the Danish Lung Cancer Screening Trial (DLCST) dataset. Our results show that the proposed method outperforms other 3-D descriptors of morphology in the automatic assessment of spiculation.}, + file = {Ciom15.pdf:pdf\\Ciom15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {941409}, + month = {3}, + gsid = {1720898603461111674}, + gscites = {6}, + ss_id = {671d9c99335001133ebf2125167aa2f2bcee6efa}, + all_ss_ids = {['671d9c99335001133ebf2125167aa2f2bcee6efa']}, +} + +@article{Ciom15a, + author = {Francesco Ciompi and Bartjan de Hoop and Sarah J. van Riel and Kaman Chung and Ernst Th. Scholten and Matthijs Oudkerk and Pim A de Jong and Mathias Prokop and Bram van Ginneken}, + title = {Automatic classification of pulmonary peri-fissural nodules in computed tomography using an ensemble of {2D} views and a convolutional neural network out-of-the-box}, + journal = MIA, + year = {2015}, + volume = {26}, + pages = {195-202}, + doi = {10.1016/j.media.2015.08.001}, + url = {http://www.medicalimageanalysisjournal.com/article/S1361-8415(15)00125-5/}, + abstract = {In this paper, we tackle the problem of automatic classification of pulmonary peri-fissural nodules (PFNs). The classification problem is formulated as a machine learning approach, where detected nodule candidates are classified as PFNs or non-PFNs. Supervised learning is used, where a classifier is trained to label the detected nodule. The classification of the nodule in 3D is formulated as an ensemble of classifiers trained to recognize PFNs based on 2D views of the nodule. In order to describe nodule morphology in 2D views, we use the output of a pre-trained convolutional neural network known as OverFeat. We compare our approach with a recently presented descriptor of pulmonary nodule morphology, namely Bag of Frequencies, and illustrate the advantages offered by the two strategies, achieving performance of AUC = 0.868, which is close to the one of human experts.}, + file = {Ciom15a.pdf:pdf\\Ciom15a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {26458112}, + month = {12}, + gsid = {14568521204042342742}, + gscites = {256}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/154608}, + ss_id = {622791cfb4e509158fbc858e9a970283de0f8045}, + all_ss_ids = {['622791cfb4e509158fbc858e9a970283de0f8045']}, +} + +@article{Ciom16a, + author = {Ciompi, Francesco and Balocco, Simone and Rigla, Juan and Carrillo, Xavier and Mauri, Josepa and Radeva, Petia}, + title = {Computer-aided detection of intracoronary stent in intravascular ultrasound sequences}, + journal = MP, + year = {2016}, + volume = {43}, + issue = {10}, + month = {10}, + pages = {5616}, + doi = {10.1118/1.4962927}, + abstract = {An intraluminal coronary stent is a metal mesh tube deployed in a stenotic artery during percutaneous coronary intervention (PCI), in order to prevent acute vessel occlusion. The identification of struts location and the definition of the stent shape is relevant for PCI planning and for patient follow-up. The authors present a fully automatic framework for computer-aided detection (CAD) of intracoronary stents in intravascular ultrasound (IVUS) image sequences. The CAD system is able to detect stent struts and estimate the stent shape. The proposed CAD uses machine learning to provide a comprehensive interpretation of the local structure of the vessel by means of semantic classification. The output of the classification stage is then used to detect struts and to estimate the stent shape. The proposed approach is validated using a multicentric data-set of 1,015 images from 107 IVUS sequences containing both metallic and bioabsorbable stents. The method was able to detect struts in both metallic stents with an overall F-measure of 77.7% and a mean distance of 0.15 mm from manually annotated struts, and in bioabsorbable stents with an overall F-measure of 77.4% and a mean distance of 0.09 mm from manually annotated struts. The results are close to the interobserver variability and suggest that the system has the potential of being used as a method for aiding percutaneous interventions.}, + file = {Ciom16a.pdf:pdf\\Ciom16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27782708}, + gsid = {13722029973076101949}, + gscites = {15}, + ss_id = {271e85d19b4cd1997a110525ffb66df7a8fa7fe6}, + all_ss_ids = {['de1fc90d61276094416b430605d47ee5f8cc7b4d', '271e85d19b4cd1997a110525ffb66df7a8fa7fe6']}, +} + +@conference{Ciom16b, + author = {Francesco Ciompi and Kaman Chung and Arnaud A. A. Setio and Sarah J van Riel and Ernst Th. Scholten and Paul K. Gerke and Colin Jacobs and Ugo Pastorino and Alfonso Marchiano and Mathilde M. W. Wille and Mathias Prokop and Bram van Ginneken}, + title = {Pulmonary nodule type classification with convolutional networks}, + booktitle = MICCAI, + year = {2016}, + abstract = {Classification of detected pulmonary nodules is a key task in deciding the optimal follow-up strategy for patients in lung cancer screening. We propose a framework based on Convolutional Networks (ConvNets) to automatically assess nodule type for lesions detected in CT scans. The proposed ConvNet processes nodules in 3D scans through a combination of several 2D views and classifies it as solid, part-solid, non-solid and calcified. We validated the method on data from the lung cancer screening trials DLCST and MILD.}, + file = {Ciom16b.pdf:pdf\\Ciom16b.pdf:PDF}, + optnote = {DIAG}, +} + +@inproceedings{Ciom17, + author = {Francesco Ciompi and Oscar G. F. Geessink and Babak Ehteshami Bejnordi and Gabriel Silva de Souza and Alexi Baidoshvili and Geert Litjens and B. van Ginneken and Iris D. Nagtegaal and Jeroen A.W.M. van der Laak}, + title = {The importance of stain normalization in colorectal tissue classification with convolutional networks}, + booktitle = ISBI, + year = {2017}, + pages = {160-163}, + doi = {10.1109/ISBI.2017.7950492}, + url = {https://arxiv.org/abs/1702.05931}, + abstract = {The development of reliable imaging biomarkers for the analysis of colorectal cancer (CRC) in hematoxylin and eosin (H\&E) stained histopathology images requires an accurate and reproducible classification of the main tissue components in the image. In this paper, we propose a system for CRC tissue classification based on convolutional networks (ConvNets). We investigate the importance of stain normalization in tissue classification of CRC tissue samples in H&E-stained images. Furthermore, we report the performance of ConvNets on a cohort of rectal cancer samples and on an independent publicly available dataset of colorectal H\&E images.}, + file = {Ciom17.pdf:pdf\\Ciom17.pdf:PDF}, + optnote = {DIAG}, + month = {4}, + gsid = {4102008958123816219}, + gscites = {171}, + ss_id = {87b612323f759cea5cb224331bfacf59a0f335b6}, + all_ss_ids = {['87b612323f759cea5cb224331bfacf59a0f335b6']}, +} + +@article{Ciom17a, + author = {F. Ciompi and K. Chung and S. J. van Riel and A. A. A. Setio and P. K. Gerke and C. Jacobs and E. Th. Scholten and C. M. Schaefer-Prokop and M. M. W. Wille and A. Marchiano and U. Pastorino and M. Prokop and B. van Ginneken}, + title = {Towards automatic pulmonary nodule management in lung cancer screening with deep learning}, + journal = NATSCIREP, + year = {2017}, + series = {7}, + number = {46479}, + doi = {10.1038/srep46479}, + url = {https://arxiv.org/abs/1610.09157}, + abstract = {The introduction of lung cancer screening programs will produce an unprecedented amount of chest CT scans in the near future, which radiologists will have to read in order to decide on a patient follow-up strategy. According to the current guidelines, the workup of screen-detected nodules strongly relies on nodule size and nodule type. In this paper, we present a deep learning system based on multi-stream multi-scale convolutional networks, which automatically classifies all nodule types relevant for nodule workup. The system processes raw CT data containing a nodule without the need for any additional information such as nodule segmentation or nodule size and learns a representation of 3D data by analyzing an arbitrary number of 2D views of a given nodule. The deep learning system was trained with data from the Italian MILD screening trial and validated on an independent set of data from the Danish DLCST screening trial. We analyze the advantage of processing nodules at multiple scales with a multi-stream convolutional network architecture, and we show that the proposed deep learning system achieves performance at classifying nodule type that surpasses the one of classical machine learning approaches and is within the inter-observer variability among four experienced human observers.}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28880026}, + month = {4}, + gsid = {8567006734401252147}, + gscites = {305}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/174848}, + all_ss_ids = {['86b9d39fb026746752f95f87f87fd26b8512c913', 'fd0901b1f2121506391a7859de8fb695d159a393']}, +} + +@article{Ciom21, + author = {Francesco Ciompi and Mitko Veta and Jeroen van der Laak and Nasir Rajpoot}, + title = {Editorial Computational Pathology}, + doi = {10.1109/jbhi.2021.3052029}, + number = {2}, + pages = {303--306}, + volume = {25}, + abstract = {IEEE Journal of Biomedical and Health Informatics;2021;25;2;10.1109/JBHI.2021.3052029}, + file = {Ciom21.pdf:pdf\\Ciom21.pdf:PDF}, + journal = {{IEEE} Journal of Biomedical and Health Informatics}, + month = {2}, + optnote = {DIAG}, + publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, + year = {2021}, + ss_id = {f8208405f959304ba68f3d11d7f880c305ac7b39}, + all_ss_ids = {['f8208405f959304ba68f3d11d7f880c305ac7b39']}, + gscites = {0}, +} + +@article{Cohe16, + author = {Cohen, Julien G. and Goo, Jin Mo and Yoo, Roh-Eul and Park, Chang Min and Lee, Chang Hyun and {van Ginneken}, Bram and Chung, Doo Hyun and Kim, Young Tae}, + title = {Software performance in segmenting ground-glass and solid components of subsolid nodules in pulmonary adenocarcinomas}, + journal = ER, + year = {2016}, + volume = {26}, + number = {12}, + pages = {4465-4474}, + doi = {10.1007/s00330-016-4317-3}, + url = {http://dx.doi.org/10.1007/s00330-016-4317-3}, + abstract = {To evaluate the performance of software in segmenting ground-glass and solid components of subsolid nodules in pulmonary adenocarcinomas.Seventy-three pulmonary adenocarcinomas manifesting as subsolid nodules were included. Two radiologists measured the maximal axial diameter of the ground-glass components on lung windows and that of the solid components on lung and mediastinal windows. Nodules were segmented using software by applying five (-850A-A?A 1/2 HU to -650A-A?A 1/2 HU) and nine (-130A-A?A 1/2 HU to -500A-A?A 1/2 HU) attenuation thresholds. We compared the manual and software measurements of ground-glass and solid components with pathology measurements of tumour and invasive components.Segmentation of ground-glass components at a threshold of -750A-A?A 1/2 HU yielded mean differences of +0.06A-A?A 1/2 mm (p=0.83, 95% limits of agreement, 4.51 to 4.67) and -2.32A-A?A 1/2 mm (p<0.001, -8.27 to 3.63) when compared with pathology and manual measurements, respectively. For solid components, mean differences between the software (at -350A-A?A 1/2 HU) and pathology measurements and between the manual (lung and mediastinal windows) and pathology measurements were -0.12A-A?A 1/2 mm (p=0.74, -5.73 to 5.55]), 0.15A-A?A 1/2 mm (p=0.73, -6.92 to 7.22), and -1.14A-A?A 1/2 mm (p<0.001, -7.93 to 5.64), respectively.Software segmentation of ground-glass and solid components in subsolid nodules showed no significant difference with pathology. - Software can effectively segment ground-glass and solid components in subsolid nodules. - Software measurements show no significant difference with pathology measurements. - Manual measurements are more accurate on lung windows than on mediastinal windows.}, + file = {Cohe16.pdf:pdf\\Cohe16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27048527}, + month = {4}, + gsid = {3518501568376626554}, + gscites = {40}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/165779}, + ss_id = {2639e166ecb1dc63dcfe61eefe66b3e2213f17f6}, + all_ss_ids = {['2639e166ecb1dc63dcfe61eefe66b3e2213f17f6']}, +} + +@article{Cohe16a, + author = {Cohen, Julien G. and Goo, Jin Mo and Yoo, Roh-Eul and Park, Su Bin and {van Ginneken}, Bram and Ferretti, Gilbert R. and Lee, Chang Hyun and Park, Chang Min}, + title = {The effect of late-phase contrast enhancement on semi-automatic software measurements of CT attenuation and volume of part-solid nodules in lung adenocarcinomas}, + journal = EJR, + year = {2016}, + volume = {85}, + number = {6}, + month = {6}, + pages = {1174--1180}, + doi = {10.1016/j.ejrad.2016.03.027}, + url = {http://dx.doi.org/10.1016/j.ejrad.2016.03.027}, + abstract = {To evaluate the differences in semi-automatic measurements of CT attenuation and volume of part-solid nodules (PSNs) between unenhanced and enhanced CT scans.CT scans including unenhanced and enhanced phases (slice thickness 0.625 and 1.25mm, respectively) for 53 adenocarcinomas presenting as PSNs in 50 patients were retrospectively evaluated. For each nodule, semi-automatic segmentation provided the diameter, mean attenuation, mass, and volume of a whole nodule and its solid component. Interscan variability and statistical significance of the differences in those measures according to the adenocarcinoma category were evaluated by one reader.All parameters except for the mean attenuation of the solid components, were significantly increased on enhanced CT (p<0.05). For the whole nodule, the mean relative differences were as follows: the longest diameter, 1.4\% (limits of agreement, -6.2-9.1); volume, 2.4\% (-26.7-31.4); mass, 7.0\% (-11.3-25.2); mean attenuation, 2.7\% (-5.6-11). For the nodule's solid component, those differences were as follow: the longest diameter, 6.9\% (-34.4-48.2); volume, 17.9\% (-77.8-113.7); mass, 18.8\% (-77.8-115.4). The differences of measures between the unenhanced and enhanced CT were not significantly different between two groups of adenocarcinoma in situ/minimally invasive adenocarcinomas and invasive adenocarcinomas (p>0.05).As most volumetric and attenuation measurements changed significantly after contrast enhancement, care should be taken in comparing unenhanced and enhanced CT in the evaluation of PSNs.}, + file = {Cohe16a.pdf:pdf\\Cohe16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27161068}, + gsid = {13638511362314366120}, + gscites = {15}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/171720}, + ss_id = {670931e779bb4fe0dbb9a9bf3c00e5a53ce0f26b}, + all_ss_ids = {['670931e779bb4fe0dbb9a9bf3c00e5a53ce0f26b']}, +} + +@article{Cohe17, + author = {Cohen, Julien G and Kim, Hyungjin and Park, Su Bin and van Ginneken, Bram and Ferretti, Gilbert R and Lee, Chang Hyun and Goo, Jin Mo and Park, Chang Min}, + title = {Comparison of the effects of model-based iterative reconstruction and filtered back projection algorithms on software measurements in pulmonary subsolid nodules}, + journal = ER, + year = {2017}, + volume = {27}, + month = {8}, + pages = {3266-3274}, + doi = {10.1007/s00330-016-4716-5}, + abstract = {To evaluate the differences between filtered back projection (FBP) and model-based iterative reconstruction (MBIR) algorithms on semi-automatic measurements in subsolid nodules (SSNs). Unenhanced CT scans of 73 SSNs obtained using the same protocol and reconstructed with both FBP and MBIR algorithms were evaluated by two radiologists. Diameter, mean attenuation, mass and volume of whole nodules and their solid components were measured. Intra- and interobserver variability and differences between FBP and MBIR were then evaluated using Bland-Altman method and Wilcoxon tests. Longest diameter, volume and mass of nodules and those of their solid components were significantly higher using MBIR (p??0.05). Semi-automatic measurements of SSNs significantly differed between FBP and MBIR; however, the differences were within the range of measurement variability. ? Intra- and interobserver reproducibility of measurements did not differ between FBP and MBIR. ? Differences in SSNs' semi-automatic measurement induced by reconstruction algorithms were not clinically significant. ? Semi-automatic measurement may be conducted regardless of reconstruction algorithm. ? SSNs' semi-automated classification agreement (pure vs. part-solid) did not significantly differ between algorithms.}, + file = {Cohe17.pdf:pdf\\Cohe17.pdf:PDF}, + optnote = {DIAG}, + pmid = {28058482}, + gsid = {10862932001783753179}, + gscites = {15}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/178219}, + ss_id = {5f74400c97e716a85f20871b4b477dbca9680664}, + all_ss_ids = {['5f74400c97e716a85f20871b4b477dbca9680664']}, +} + +@article{Dale04, + author = {J. A. van Dalen and W. Vogel and H. J. Huisman and W. J G Oyen and G. J. Jager and N. Karssemeijer}, + title = {Accuracy of rigid {CT}-{FDG}-{PET} image registration of the liver}, + journal = PMB, + year = {2004}, + volume = {49}, + pages = {5393--5405}, + doi = {10.1088/0031-9155/49/23/014}, + file = {Dale04.pdf:pdf/Dale04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TRACER}, + number = {23}, + pmid = {15656285}, + month = {11}, + gsid = {1953127824396335218}, + gscites = {26}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/57635}, + ss_id = {1536728fbcefe01ebf1d6f670b2308fac4565910}, + all_ss_ids = {['1536728fbcefe01ebf1d6f670b2308fac4565910']}, +} + +@article{Dale07, + author = {J. A. van Dalen and A. L. Hoffmann and V. Dicken and W. V. Vogel and B. Wiering and T. J. Ruers and N. Karssemeijer and W. J. G. Oyen}, + title = {A novel iterative method for lesion delineation and volumetric quantification with {FDG} {PET}}, + journal = NUCMC, + year = {2007}, + volume = {28}, + pages = {485--493}, + doi = {10.1097/MNM.0b013e328155d154}, + file = {Dale07.pdf:pdf\\Dale07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TRACER}, + number = {6}, + pmid = {17460540}, + month = {6}, + gsid = {9089375917348682570}, + gscites = {123}, + ss_id = {98607603fde29967bbef46cb8e5915c2d0b2d739}, + all_ss_ids = {['98607603fde29967bbef46cb8e5915c2d0b2d739']}, +} + +@conference{Dalm15, + author = {M.U. Dalmis and A. Gubern-M\'{e}rida and S. Vreemann and B. Platel and R. Mann and N. Karssemeijer}, + title = {Is Late Phase Information Necessary for Dynamic Evaluation of Breast Cancer?}, + booktitle = ECR, + year = {2015}, + abstract = {Purpose: Novel breast MRI protocols evaluate early contrast uptake of lesions, using a short acquisition time and high tempor al resolution (tRes). However, these acquisitions do not provide information about late phase enhancement (curve- type). In this study we investigated whether adding late phase information to early contrast uptake dynamics improves the differentiation between benign and malignant lesions as measured with a computer aided diagnosis (CADx) system. Methods and Materials: 106 malignant and 44 benign lesions were evaluated, which were imaged with a 4.3 seconds tRes sequence for the initial 100 seconds and a high spatial resolution sequence for the subsequent 510 seconds. The locations of the lesions were marked manually. The CAD system automatically segmented the lesions and extracted six features that describe the early contrast uptake. These features were used in a random forest classifier for malignant/benign classification. In the second step, washout rate (WR) of the lesions obtained from the late phase acquisitions were added to these features and classification was repeated. The classification accuracies of both methods were compared using ROC analysis. Results: The area under the curve (AUC) was 0.816 with high tRes early contrast uptake dynamics features alone. Adding the WR feature slightly increased the AUC value to 0.824, where the difference was not statistically significant. Conclusion: Addition of late phase information on top of high tRes early uptake dynamics did not increase the diagnostic performance significantly. This result supports the feasibility of shorter MRI protocols for classification of breast lesions.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Dalm15a, + author = {M.U. Dalmis and A. Gubern-M\'{e}rida and S. Vreemann and R. Mann and N. Karssemeijer and B. Platel}, + title = {Early Phase Contrast Enhancement Dynamics of Breast Lesions of Different Molecular Subtypes Characterized by a Computer-Aided-Diagnosis System}, + booktitle = RSNA, + year = {2015}, + abstract = {PURPOSE: To evaluate early-phase contrast enhancement biomarkers computed by a computer-aided diagnosis (CADx) system to distinguish between different molecular subtypes of invasive breast lesions, imaged with a high spatiotemporal resolution view-sharing Dynamic Contrast Enhancement (DCE)-MRI protocol. METHOD AND MATERIALS: We collected images of 164 (21 basal, 24 HER2+ and 119 ER/PR+) invasive breast cancer lesions from 145 patients. Our MRI protocol provided 20 images of contrast-agent uptake each 4.3 seconds apart. A CADx system was developed to characterize the lesions based on early-phase contrast uptake dynamics. This CADx system was composed of four main steps. First, the lesion was segmented using manually placed seed points. Second, the aorta was automatically detected in the MR volume and initial time of enhancement in the aorta was computed. Third, for each voxel, contrast uptake data was fit to an exponential model and six dynamic features were computed based on this curve. These features were computed with respect to the first time point where the aorta starts to enhance. For each feature, we computed mean and standard deviation values in the entire segmented lesion. Additionally these features were computed in an automatically selected small hot-spot of the lesion (without standard deviation). This resulted in a total of 18 features characterizing the early phase dynamics. Finally, random-forests classifiers were trained and tested in a leave-one-out fashion in the study dataset in 3 different ways, to distinguish: (1) basal-type lesions from others, (2) HER2+ lesions from others and (3) ER/PR+ lesions from other type lesions. We evaluated performance for each classification using ROC analysis. We computed 5 percent confidence intervals for area under curve (AUC) values using a bootstrapping method. RESULTS: The AUC values for classification of basal-type/others, HER2+/others and ER/PR+/others were 0.68 (0.58-0.77), 0.62 (0.52-0.73) and 0.58 (0.49-0.68), respectively. CONCLUSION: A CADx system was used to distinguish between three different molecular subtypes of invasive breast lesions, where the highest performance was found in classification of basal type lesions from others. CLINICAL RELEVANCE/APPLICATION: Early phase contrast enhancement dynamics measured with DCE-MRI of the breast can be used to give an indication of molecular subtype for invasive breast cancer lesions. CADx systems can be used for this purpose.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Dalm16, + author = {M.U. Dalmis and A. Gubern-M\'{e}rida and S. Vreemann and N. Karssemeijer and R. Mann and B. Platel}, + title = {A Computer-Aided Diagnosis System for Breast {DCE-MRI} at High Spatiotemporal Resolution}, + journal = MP, + year = {2016}, + volume = {43}, + number = {1}, + pages = {84--94}, + doi = {10.1118/1.4937787}, + abstract = {With novel MRI sequences, high spatiotemporal resolution has become available in dynamic contrast-enhanced magnetic resonance imaging (DCE-MRI) of the breast. Since benign structures in the breast can show enhancement similar to malignancies in DCE-MRI, characterization of detected lesions is an important problem. The purpose of this study is to develop a computer-aided diagnosis (CADx) system for characterization of breast lesions imaged with high spatiotemporal resolution DCE-MRI.The developed CADx system is composed of four main parts: semiautomated lesion segmentation, automated computation of morphological and dynamic features, aorta detection, and classification between benign and malignant categories. Lesion segmentation is performed by using a "multiseed smart opening" algorithm. Five morphological features were computed based on the segmentation of the lesion. For each voxel, contrast enhancement curve was fitted to an exponential model and dynamic features were computed based on this fitted curve. Average and standard deviations of the dynamic features were computed over the entire segmented area, in addition to the average value in an automatically selected smaller "most suspicious region." To compute the dynamic features for an enhancement curve, information of aortic enhancement is also needed. To keep the system fully automated, the authors developed a component which automatically detects the aorta and computes the aortic enhancement time. The authors used random forests algorithm to classify benign lesions from malignant. The authors evaluated this system in a dataset of breast MRI scans of 325 patients with 223 malignant and 172 benign lesions and compared its performance to an existing approach. The authors also evaluated the classification performances for ductal carcinoma in situ (DCIS), invasive ductal carcinoma (IDC), and invasive lobular carcinoma (ILC) lesions separately. The classification performances were measured by receiver operating characteristic (ROC) analysis in a leave-one-out cross validation scheme.The area under the ROC curve (AUC) obtained by the proposed CADx system was 0.8543, which was significantly higher (p = 0.007) than the performance obtained by the previous CADx system (0.8172) on the same dataset. The AUC values for DCIS, IDC, and ILC lesions were 0.7924, 0.8688, and 0.8650, respectively.The authors developed a CADx system for high spatiotemporal resolution DCE-MRI of the breast. This system outperforms a previously proposed system in classifying benign and malignant lesions, while it requires less user interactions.}, + file = {Dalm16.pdf:pdf\\Dalm16.pdf:PDF}, + optnote = {DIAG}, + pmid = {26745902}, + month = {12}, + gsid = {1749839565429400652}, + gscites = {30}, + ss_id = {2662ff8ddd7135754bc72ca833eacfc1f307aeb1}, + all_ss_ids = {['2662ff8ddd7135754bc72ca833eacfc1f307aeb1', '14198a817d2c0a800bfb0a0a36baf5097fe22054']}, +} + +@inproceedings{Dalm16a, + author = {Ufuk Dalmi\c{s}, Mehmet and Gubern-M\'{e}rida, Albert and Borelli, Cristina and Vreemann, Suzan and Mann, Ritse M. and Karssemeijer, Nico}, + title = {A fully automated system for quantification of background parenchymal enhancement in breast DCE-MRI}, + doi = {10.1117/12.2211640}, + year = {2016}, + abstract = {Background parenchymal enhancement (BPE) observed in breast dynamic contrast enhanced magnetic resonance imaging (DCE-MRI) has been identified as an important biomarker associated with risk for developing breast cancer. In this study, we present a fully automated framework for quantification of BPE. We initially segmented fibroglandular tissue (FGT) of the breasts using an improved version of an existing method. Subsequently, we computed BPEabs (volume of the enhancing tissue), BPErf (BPEabs divided by FGT volume) and BPErb (BPEabs divided by breast volume), using different relative enhancement threshold values between 1% and 100%. To evaluate and compare the previous and improved FGT segmentation methods, we used 20 breast DCE-MRI scans and we computed Dice similarity coefficient (DSC) values with respect to manual segmentations. For evaluation of the BPE quantification, we used a dataset of 95 breast DCE-MRI scans. Two radiologists, in individual reading sessions, visually analyzed the dataset and categorized each breast into minimal, mild, moderate and marked BPE. To measure the correlation between automated BPE values to the radiologists' assessments, we converted these values into ordinal categories and we used Spearman's rho as a measure of correlation. According to our results, the new segmentation method obtained an average DSC of 0.81 0.09, which was significantly higher (p<0.001) compared to the previous method (0.76 0.10). The highest correlation values between automated BPE categories and radiologists' assessments were obtained with the BPErf measurement (r=0.55, r=0.49, p<0.001 for both), while the correlation between the scores given by the two radiologists was 0.82 (p<0.001). The presented framework can be used to systematically investigate the correlation between BPE and risk in large screening cohorts.}, + url = {http://dx.doi.org/10.1117/12.2211640}, + file = {Dalm16a.pdf:pdf\Dalm16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Imaging 2016: Computer-Aided Diagnosis}, + citation-count = {2}, + automatic = {yes}, +} + +@article{Dalm17, + author = {Dalmis, Mehmet Ufuk and Litjens, Geert and Holland, Katharina and Setio, Arnaud and Mann, Ritse and Karssemeijer, Nico and Gubern-M\'{e}rida, Albert}, + title = {Using deep learning to segment breast and fibroglandular tissue in {MRI} volumes}, + journal = MP, + year = {2017}, + volume = {44}, + issue = {2}, + month = {2}, + pages = {533--546}, + doi = {10.1002/mp.12079}, + abstract = {Automated segmentation of breast and fibroglandular tissue (FGT) is required for various computer-aided applications of breast MRI. Traditional image analysis and computer vision techniques, such atlas, template matching, or, edge and surface detection, have been applied to solve this task. However, applicability of these methods is usually limited by the characteristics of the images used in the study datasets, while breast MRI varies with respect to the different MRI protocols used, in addition to the variability in breast shapes. All this variability, in addition to various MRI artifacts, makes it a challenging task to develop a robust breast and FGT segmentation method using traditional approaches. Therefore, in this study, we investigated the use of a deep-learning approach known as "U-net." We used a dataset of 66 breast MRI's randomly selected from our scientific archive, which includes five different MRI acquisition protocols and breasts from four breast density categories in a balanced distribution. To prepare reference segmentations, we manually segmented breast and FGT for all images using an in-house developed workstation. We experimented with the application of U-net in two different ways for breast and FGT segmentation. In the first method, following the same pipeline used in traditional approaches, we trained two consecutive (2C) U-nets: first for segmenting the breast in the whole MRI volume and the second for segmenting FGT inside the segmented breast. In the second method, we used a single 3-class (3C) U-net, which performs both tasks simultaneously by segmenting the volume into three regions: nonbreast, fat inside the breast, and FGT inside the breast. For comparison, we applied two existing and published methods to our dataset: an atlas-based method and a sheetness-based method. We used Dice Similarity Coefficient (DSC) to measure the performances of the automated methods, with respect to the manual segmentations. Additionally, we computed Pearson's correlation between the breast density values computed based on manual and automated segmentations. The average DSC values for breast segmentation were 0.933, 0.944, 0.863, and 0.848 obtained from 3C U-net, 2C U-nets, atlas-based method, and sheetness-based method, respectively. The average DSC values for FGT segmentation obtained from 3C U-net, 2C U-nets, and atlas-based methods were 0.850, 0.811, and 0.671, respectively. The correlation between breast density values based on 3C U-net and manual segmentations was 0.974. This value was significantly higher than 0.957 as obtained from 2C U-nets (P < 0.0001, Steiger's Z-test with Bonferoni correction) and 0.938 as obtained from atlas-based method (P = 0.0016). In conclusion, we applied a deep-learning method, U-net, for segmenting breast and FGT in MRI in a dataset that includes a variety of MRI protocols and breast densities. Our results showed that U-net-based methods significantly outperformed the existing algorithms and resulted in significantly more accurate breast density computation.}, + file = {:pdf/Dalm17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28035663}, + gsid = {9993343296257816404}, + gscites = {171}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/173062}, + ss_id = {11f7a41abab06b8792057fa46cd3de08b22ac7d0}, + all_ss_ids = {['11f7a41abab06b8792057fa46cd3de08b22ac7d0']}, +} + +@article{Dalm18, + author = {Dalmis, Mehmet Ufuk and Vreemann, Suzan and Kooi, Thijs and Mann, Ritse M and Karssemeijer, Nico and Gubern-Merida, Albert}, + title = {Fully automated detection of breast cancer in screening MRI using convolutional neural networks}, + journal = JMI, + year = {2018}, + volume = {5}, + issue = {1}, + month = {1}, + pages = {014502}, + doi = {10.1117/1.JMI.5.1.014502}, + abstract = {Current computer-aided detection (CADe) systems for contrast-enhanced breast MRI rely on both spatial information obtained from the early-phase and temporal information obtained from the late-phase of the contrast enhancement. However, late-phase information might not be available in a screening setting, such as in abbreviated MRI protocols, where acquisition is limited to early-phase scans. We used deep learning to develop a CADe system that exploits the spatial information obtained from the early-phase scans. This system uses three-dimensional (3-D) morphological information in the candidate locations and the symmetry information arising from the enhancement differences of the two breasts. We compared the proposed system to a previously developed system, which uses the full dynamic breast MRI protocol. For training and testing, we used 385 MRI scans, containing 161 malignant lesions. Performance was measured by averaging the sensitivity values between 1/8-eight false positives. In our experiments, the proposed system obtained a significantly ([Formula: see text]) higher average sensitivity ([Formula: see text]) compared with that of the previous CADe system ([Formula: see text]). In conclusion, we developed a CADe system that is able to exploit the spatial information obtained from the early-phase scans and can be used in screening programs where abbreviated MRI protocols are used.}, + file = {:pdf/Dalm18.pdf:PDF}, + optnote = {DIAG}, + pmid = {29340287}, + gsid = {14025750273420770243}, + gscites = {54}, + ss_id = {c752bf23b7c12e6f81b6d2b014e29597095a8f94}, + all_ss_ids = {['c752bf23b7c12e6f81b6d2b014e29597095a8f94']}, +} + +@article{Dalm19, + author = {Dalmis, Mehmet Ufuk and Gubern-M{\'{e}}rida, Albert and Vreemann, Suzan and Bult, Peter and Karssemeijer, Nico and Mann, Ritse and Teuwen, Jonas}, + title = {{Artificial Intelligence Based Classification of Breast Lesions Imaged With a Multi-Parametric Breast MRI Protocol With ultrafast DCE-MRI, T2 and DWI}}, + journal = IR, + year = {2019}, + volume = {56}, + issue = {6}, + pages = {325-332}, + doi = {10.1097/RLI.0000000000000544}, + abstract = {{OBJECTIVES: We investigated artificial intelligence (AI)-based classification of benign and malignant breast lesions imaged with a multiparametric breast magnetic resonance imaging (MRI) protocol with ultrafast dynamic contrast-enhanced MRI, T2-weighted, and diffusion-weighted imaging with apparent diffusion coefficient mapping. MATERIALS AND METHODS: We analyzed 576 lesions imaged with MRI, including a consecutive set of biopsied malignant (368) and benign (149) lesions, and an additional set of 59 benign lesions proven by follow-up. We used deep learning methods to interpret ultrafast dynamic contrast-enhanced MRI and T2-weighted information. A random forests classifier combined the output with patient information (PI; age and BRCA status) and apparent diffusion coefficient values obtained from diffusion-weighted imaging to perform the final lesion classification. We used receiver operating characteristic (ROC) analysis to evaluate our results. Sensitivity and specificity were compared with the results of the prospective clinical evaluation by radiologists. RESULTS: The area under the ROC curve was 0.811 when only ultrafast dynamics was used. The final AI system that combined all imaging information with PI resulted in an area under the ROC curve of 0.852, significantly higher than the ultrafast dynamics alone (P = 0.002). When operating at the same sensitivity level of radiologists in this dataset, this system produced 19 less false-positives than the number of biopsied benign lesions in our dataset. CONCLUSIONS: Use of adjunct imaging and PI has a significant contribution in diagnostic performance of ultrafast breast MRI. The developed AI system for interpretation of multiparametric ultrafast breast MRI may improve specificity.}}, + file = {Dalm19.pdf:pdf\\Dalm19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30652985}, + month = {6}, + gsid = {7865697668757504203}, + gscites = {69}, + ss_id = {78a25fcd0825f006d8326a932d681e9420a9b77f}, + all_ss_ids = {['78a25fcd0825f006d8326a932d681e9420a9b77f']}, +} + +@phdthesis{Dalm19a, + author = {Mehmet Ufuk Dalmis}, + title = {Automated Analysis of Breast MRI From traditional methods into deep learning}, + url = {https://repository.ubn.ru.nl/handle/2066/203897}, + abstract = {Breast MRI is an indispensable modality in breast imaging with its high sensitivity for detecting breast cancers. Altough benign lesions also appear in breast MRI, it is possible to distinguish them from malignant lesions by interpreting them based on their morphology,contract-enhancement dynamics and other adjunct information such as their diffusion properties imaged with DWI and their appearance on T2w images.}, + copromotor = {R. Mann, A. Gubern M\'{e}rida, J. Teuwen}, + file = {:pdf/Dalm19a.pdf:PDF}, + optnote = {DIAG}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + year = {2019}, + journal = {PhD thesis}, +} + +@article{Dama22, + author = {Marina D'Amato and Przemys{\l}aw Szostak and Benjamin Torben-Nielsen}, + title = {A Comparison Between Single- and Multi-Scale Approaches for Classification of Histopathology Images}, + doi = {10.3389/fpubh.2022.892658}, + volume = {10}, + abstract = {Whole slide images (WSIs) are digitized histopathology images.}, + file = {Dama22.pdf:pdf\\Dama22.pdf:PDF}, + journal = {Frontiers in Public Health}, + month = {7}, + optnote = {DIAG}, + publisher = {Frontiers Media {SA}}, + year = {2022}, +} + +@conference{Dama23, + author = {Marina D'Amato and Maschenka Balkenhol and Mart van Rijthoven and Jeroen van der Laak and Francesco Ciompi}, + booktitle = {MIDL}, + title = {On the robustness of regressing tumor percentage as an explainable detector in histopathology whole-slide images}, + abstract = {In recent years, Multiple Instance Learning (MIL) approaches have gained popularity to address the task of weakly-supervised tumor detection in whole-slide images (WSIs). + However, standard MIL relies on classification methods for tumor detection that require negative control, i.e., tumor-free cases, which are challenging to obtain in real-world clinical scenarios, especially when considering surgical resection specimens. + Inspired by recent work, in this paper we tackle tumor detection via a MIL-like weakly-supervised regression approach to predict the percentage of tumor present in WSIs, a clinically available target that allows to overcome the problem of need for manual annotations or presence of tumor-free slides. + We characterize the quality of such a target by investigating its robustness in the presence of noise on regression percentages and provide explainability through attention maps. We test our approach on breast cancer data from primary tumor and lymph node metastases.}, + optnote = {DIAG, PATHOLOGY}, + year = {2023}, +} + +@inproceedings{Dana97, + author = {K. J. Dana and B. van Ginneken and S. K. Nayar and J. J. Koenderink}, + title = {Reflectance and texture of real-world surfaces}, + booktitle = CVPR, + year = {1997}, + pages = {151--157}, + doi = {10.1109/CVPR.1997.609313}, + abstract = {{I}n this work, we investigate the visual appearance of real-world surfaces and the dependence of appearance on imaging conditions. {W}e present a {BRDF} (bidirectional reflectance distribution function) database with reflectance measurements for over 60 different samples, each observed with over 200 different combinations of viewing and source directions. {W}e fit the {BRDF} measurements to two recent models to obtain a {BRDF} parameter database. {T}hese {BRDF} parameters can be directly used for both image analysis and image synthesis. {F}inally, we present a {BTF} (bidirectional texture function) database with image textures from over 60 different samples, each observed with over 200 different combinations of viewing and source directions. {E}ach of these unique databases has important implications for a variety of vision algorithms and each is made publicly available.}, + file = {Dana97.pdf:pdf\\Dana97.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {1}, + ss_id = {8a07de049e417cc61442d8c1ecc576155f6777a8}, + all_ss_ids = {['8a07de049e417cc61442d8c1ecc576155f6777a8']}, + gscites = {84}, +} + +@article{Dana99, + author = {K. J. Dana and B. van Ginneken and S. K. Nayar and J. J. Koenderink}, + title = {Reflectance and texture of real world surfaces}, + journal = ACMTG, + year = {1999}, + volume = {18}, + pages = {1-34}, + doi = {10.1145/300776.300778}, + abstract = {{I}n this work, we investigate the visual appearance of real-world surfaces and the dependence of appearance on the geometry of imaging conditions. {W}e discuss a new texture representation called the {BTF} (bidirectional texture function) which captures the variation in texture with illumination and viewing direction. {W}e present a {BTF} database with image textures from over 60 different samples, each observed with over 200 different combinations of viewing and illumination directions. {W}e describe the methods involved in collecting the database as well as the importance and uniqueness of this database for computer graphics. {A} related quantity to the {BTF} is the familiar {BRDF} (bidirectional reflectance distribution function). {T}he measurement methods involved in the {BTF} database are conducive to a simultaneous measurement of the {BRDF}. {A}ccordingly, we also present a {BRDF} database with reflectance measurements for over 60 different samples, each observed with over 200 different combinations of viewing and illumination directions. {B}oth of these unique databases are publicly available and have important implications for computer graphics.}, + file = {Dana99.pdf:pdf\\Dana99.pdf:PDF}, + gsid = {10703879051793209252,17798667214681108922}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {1}, + gscites = {1745}, + ss_id = {1e0bf0fbefbb0404c3c18740123a969091c67ef4}, + all_ss_ids = {['1e0bf0fbefbb0404c3c18740123a969091c67ef4']}, +} + +@article{Davi18, + author = {Davide Belli and Shi Hu and Ecem Sogancioglu and Bram van Ginneken}, + title = {Chest X-Rays Image Inpainting with Context Encoders}, + journal = {arXiv:1812.00964}, + year = {2018}, + abstract = {Chest X-rays are one of the most commonly used technologies for medical diagnosis. Many deep learning models have been proposed to improve and automate the abnormality detection task on this type of data. In this paper, we propose a different approach based on image inpainting under adversarial training first introduced by Goodfellow et al. We configure the context encoder model for this task and train it over 1.1M 128x128 images from healthy X-rays. The goal of our model is to reconstruct the missing central 64x64 patch. Once the model has learned how to inpaint healthy tissue, we test its performance on images with and without abnormalities. We discuss and motivate our results considering PSNR, MSE and SSIM scores as evaluation metrics. In addition, we conduct a 2AFC observer study showing that in half of the times an expert is unable to distinguish real images from the ones reconstructed using our model. By computing and visualizing the pixel-wise difference between source and reconstructed images, we can highlight abnormalities to simplify further detection and classification tasks.}, + optnote = {DIAG}, + month = {12}, + ss_id = {cb8d5a5a21c99bdf5ad5734742c04229c978401d}, + all_ss_ids = {['cb8d5a5a21c99bdf5ad5734742c04229c978401d']}, + gscites = {0}, +} + +@article{DeSo19, + author = {deSouza, Nandita M and Achten, Eric and Alberich-Bayarri, Angel and Bamberg, Fabian and Boellaard, Ronald and Clement, Olivier and Fournier, Laure and Gallagher, Ferdia and Golay, Xavier and Heussel, Claus Peter and Jackson, Edward F and Manniesing, Rashindra and Mayerhofer, Marius E and Neri, Emanuele and O'Connor, James and Oguz, Kader Karli and Persson, Anders and Smits, Marion and van Beek, Edwin J R and Zech, Christoph J and European Society of Radiology}, + title = {Validated imaging biomarkers as decision-making tools in clinical trials and routine practice: current status and recommendations from the EIBALL* subcommittee of the European Society of Radiology (ESR)}, + doi = {10.1186/s13244-019-0764-0}, + issue = {1}, + pages = {87}, + volume = {10}, + abstract = {Observer-driven pattern recognition is the standard for interpretation of medical images. To achieve global parity in interpretation, semi-quantitative scoring systems have been developed based on observer assessments; these are widely used in scoring coronary artery disease, the arthritides and neurological conditions and for indicating the likelihood of malignancy. However, in an era of machine learning and artificial intelligence, it is increasingly desirable that we extract quantitative biomarkers from medical images that inform on disease detection, characterisation, monitoring and assessment of response to treatment. Quantitation has the potential to provide objective decision-support tools in the management pathway of patients. Despite this, the quantitative potential of imaging remains under-exploited because of variability of the measurement, lack of harmonised systems for data acquisition and analysis, and crucially, a paucity of evidence on how such quantitation potentially affects clinical decision-making and patient outcome. This article reviews the current evidence for the use of semi-quantitative and quantitative biomarkers in clinical settings at various stages of the disease pathway including diagnosis, staging and prognosis, as well as predicting and detecting treatment response. It critically appraises current practice and sets out recommendations for using imaging objectively to drive patient management decisions.}, + file = {:pdf\\DeSo19.pdf:PDF}, + journal = INSI, + month = {8}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31468205}, + year = {2019}, + gsid = {16227468342087800522}, + gscites = {61}, + ss_id = {4d1a108034c95b70c51ffcc73cec398e312b3446}, + all_ss_ids = {['4d1a108034c95b70c51ffcc73cec398e312b3446']}, +} + +@inproceedings{Deba10, + author = {O. A. Debats and N. Karssemeijer and J. O. Barentsz and H. J. Huisman}, + title = {Automated classification of lymph nodes in {USPIO}-enhanced {MR}-images: a comparison of three segmentation methods}, + booktitle = MI, + year = {2010}, + volume = {7624}, + series = SPIE, + pages = {76240Q}, + doi = {10.1117/12.845640}, + url = {http://link.aip.org/link/?PSI/7624/76240Q/1}, + abstract = {Computer assisted detection (CAD) of lymph node metastases may help reduce reading time and improve interpretation of the large amount of image data in an MR-lymphography exam. We compared the influence of using different segmentation methods on the performance of a CAD system for classification of normal and metastasized lymph nodes. Our database consisted of T1 and T2*-weighted pelvic MR images of 603 lymph nodes, enhanced by USPIO contrast medium. For each lymph node, one seed point was manually defined Three automated segmentation methods were compared: 1. Confidence Connected segmentation, extended with automated Bandwidth Factor selection; 2. Conventional Graph Cut segmentation; 3. Pseudo-segmentation by selecting a sphere around the seed point. All lymph nodes were also manually segmented by a radiologist. The resulting segmentations were used to calculate 2 features (mean T1 and T2* signal intensity). Linear discriminant analysis was used for classification. The diagnostic accuracy (AUC at ROC-analysis) was: 0.95 (Confidence- Connected); 0.95 (Graph-Cut); 0.85 (spheres); and 0.95 (manual segmentations). The CAD performance of both the Confidence Connected and Graph Cut methods was as good as the manual segmentation. The substantially lower performance of the sphere segmentations demonstrates the need for accurate segmentations, even in USPIO-enhanced images.}, + file = {Deba10.pdf:pdf/Deba10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {18035026185759165920}, + gscites = {2}, + ss_id = {6d0c6c2a9a8c9d13e071fe9b6d76e57f3c301a33}, + all_ss_ids = {['6d0c6c2a9a8c9d13e071fe9b6d76e57f3c301a33']}, +} + +@article{Deba11, + author = {Oscar A. Debats AND G. Litjens AND Jelle O. Barentsz AND N. Karssemeijer AND H.J. Huisman}, + title = {Automated 3-Dimensional Segmentation of Pelvic Lymph Nodes in Magnetic Resonance Images}, + journal = MP, + year = {2011}, + volume = {38}, + pages = {6178--6187}, + doi = {10.1118/1.3654162}, + abstract = {PURPOSE: Computer aided diagnosis (CAD) of lymph node metastases may help reduce reading time and improve interpretation of the large amount of image data in a 3-D pelvic MRI exam. The purpose of this study was to develop an algorithm for automated segmentation of pelvic lymph nodes from a single seed point, as part of a CAD system for the classification of normal vs metastatic lymph nodes, and to evaluate its performance compared to other algorithms. METHODS: The authors' database consisted of pelvic MR images of 146 consecutive patients, acquired between January 2008 and April 2010. Each dataset included four different MR sequences, acquired after infusion of a lymph node specific contrast medium based on ultrasmall superparamagnetic particles of iron oxide. All data sets were analyzed by two expert readers who, reading in consensus, annotated and manually segmented the lymph nodes. The authors compared four segmentation algorithms: confidence connected region growing (CCRG), extended CCRG (ECC), graph cut segmentation (GCS), and a segmentation method based on a parametric shape and appearance model (PSAM). The methods were ranked based on spatial overlap with the manual segmentations, and based on diagnostic accuracy in a CAD system, with the experts' annotations as reference standard. RESULTS: A total of 2347 manually annotated lymph nodes were included in the analysis, of which 566 contained a metastasis. The mean spatial overlap (Dice similarity coefficient) was: 0.35 (CCRG), 0.57 (ECC), 0.44 (GCS), and 0.46 (PSAM). When combined with the classification system, the area under the ROC curve was: 0.805 (CCRG), 0.890 (ECC), 0.807 (GCS), 0.891 (PSAM), and 0.935 (manual segmentation). CONCLUSIONS: We identified two segmentation methods, ECC and PSAM, that achieve a high diagnostic accuracy when used in conjunction with a CAD system for classification of normal vs metastatic lymph nodes. The manual segmentations still achieve the highest diagnostic accuracy.}, + file = {Deba11.pdf:pdf\\Deba11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pmid = {22047383}, + month = {10}, + gsid = {1254525851921315650}, + gscites = {18}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/96662}, + ss_id = {5fd64cf068745acfc9df060a4aecb7ff6e68ead4}, + all_ss_ids = {['5fd64cf068745acfc9df060a4aecb7ff6e68ead4']}, +} + +@conference{Deba11a, + author = {O.A. Debats and T. Hambrock and G. Litjens and H.J. Huisman and J.O. Barentsz}, + title = {Detection of Lymph Node Metastases with Ferumoxtran-10 vs Ferumoxytol}, + booktitle = RSNA, + year = {2011}, + abstract = {PURPOSE: Magnetic Resonance Lymphography (MRL) with ferumoxtran-10 as contrast agent has a high diagnostic accuracy in the detection of lymph node metastases in prostate cancer (PCa) patients, but ferumoxtran-10 is no longer available. Ferumoxytol may be an alternative. Our purpose was to compare the diagnostic accuracy of ferumoxytol-based MRL with ferumoxtran-10-based MRL. METHOD AND MATERIALS: Our dataset contained the MRL images from patients who had had two MRLs as part of their clinical work-up in 2009 or 2010: one with ferumoxtran-10 and one with ferumoxytol as contrast agent. Each MRL consisted of at least a 3D T1-weighted sequence and a 3D T2*-weighted sequence. Two expert readers (an experienced radiologist and an experienced researcher), reading in consensus, interpreted each lymph node as normal or metastasized, based on the contrast uptake pattern in the T2*-weighted images, and delineated the lymph nodes in both types of MRL. Based on these delineations, quantitative measurements of the contrast-induced signal decrease were performed using histogram analysis. Measurements of signal decrease inside the lymph nodes were used to discriminate metastatic from normal nodes. The areas under the receiver operating characteristic curves (AUC) were computed to compare diagnostic accuracy, and the statistical significance of the difference between the two contrast agents was tested using the R Project for Statistical Computing and the ROCKIT software package. RESULTS: A total of 57 lymph nodes from four patients were included in the analysis, of which 7 were metastatic. The AUC was 0.96 for ferumoxtran-10-MRL, which is similar to results found in previous studies, and 0.87 for ferumoxytol-MRL. The difference between ferumoxtran-10 and ferumoxytol was statistically significant (p<0.05). The difference between ferumoxytol-enhanced and non-enhanced imaging was also significant (p<0.01). CONCLUSION: Although the number of lymph nodes included in this pilot study is limited, our results show that the performance of ferumoxytol-MRL is significantly lower than ferumoxtran-10-MRL. However, ferumoxytol-MRL has still a significantly higher discriminative performance compared to plain MRI. CLINICAL RELEVANCE/APPLICATION: While ferumoxtran-10 performs best in the detection of nodal metastases but is not available, ferumoxytol improves accuracy compared to plain MRI and is recommended prior to PCa treatment planning.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Deba16, + author = {Debats, Oscar A. and Meijs, Midas and Litjens, Geert J. S. and Huisman, Henkjan J.}, + title = {Automated multistructure atlas-assisted detection of lymph nodes using pelvic MR lymphography in prostate cancer patients}, + journal = MP, + year = {2016}, + volume = {43}, + number = {6}, + month = {6}, + pages = {3132}, + doi = {10.1118/1.4951726}, + url = {http://dx.doi.org/10.1118/1.4951726}, + abstract = {To investigate whether atlas-based anatomical information can improve a fully automated lymph node detection system for pelvic MR lymphography (MRL) images of patients with prostate cancer.Their data set contained MRL images of 240 prostate cancer patients who had an MRL as part of their clinical work-up between January 2008 and April 2010, with ferumoxtran-10 as contrast agent. Each MRL consisted of at least a 3D T1-weighted sequence, a 3D T2*-weighted sequence, and a FLASH-3D sequence. The reference standard was created by two expert readers, reading in consensus, who annotated and interactively segmented the lymph nodes in all MRL studies. A total of 5089 lymph nodes were annotated. A fully automated computer-aided detection (CAD) system was developed to find lymph nodes in the MRL studies. The system incorporates voxel features based on image intensities, the Hessian matrix, and spatial position. After feature calculation, a GentleBoost-classifier in combination with local maxima detection was used to identify lymph node candidates. Multiatlas based anatomical information was added to the CAD system to assess whether this could improve performance. Using histogram analysis and free-receiver operating characteristic analysis, this was compared to a strategy where relative position features were used to encode anatomical information.Adding atlas-based anatomical information to the CAD system reduced false positive detections both visually and quantitatively. Median likelihood values of false positives decreased significantly in all annotated anatomical structures. The sensitivity increased from 53\% to 70\% at 10 false positives per lymph node.Adding anatomical information through atlas registration significantly improves an automated lymph node detection system for MRL images.}, + file = {Deba16.pdf:pdf\\Deba16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27277059}, + gsid = {4915040609318816659}, + gscites = {2}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/171995}, + ss_id = {70079b1af89534ad1caa342fdac269789d31459c}, + all_ss_ids = {['70079b1af89534ad1caa342fdac269789d31459c']}, +} + +@article{Deba16a, + author = {Debats, Oscar A and Fortuin, Ansje S and Meijer, Hanneke J M and Hambrock, Thomas and Litjens, Geert J S and Barentsz, Jelle O and Huisman, Henkjan J}, + title = {Intranodal signal suppression in pelvic MR lymphography of prostate cancer patients: a quantitative comparison of ferumoxtran-10 and ferumoxytol}, + journal = PRJ, + year = {2016}, + volume = {4}, + pages = {e2471}, + doi = {10.7717/peerj.2471}, + abstract = {The key to MR lymphography is suppression of T2* MR signal in normal lymph nodes, while retaining high signal in metastatic nodes. Our objective is to quantitatively compare the ability of ferumoxtran-10 and ferumoxytol to suppress the MR signal in normal pelvic lymph nodes. In 2010, a set of consecutive patients who underwent intravenous MR Lymphography (MRL) were included. Signal suppression in normal lymph nodes in T2*-weighted images due to uptake of USPIO (Ultra-Small Superparamagnetic Particles of Iron Oxide) was quantified. Signal suppression by two USPIO contrast agents, ferumoxtran-10 and ferumoxytol was compared using Wilcoxon's signed rank test. Forty-four patients were included, of which all 44 had a ferumoxtran-10 MRL and 4 had additionally a ferumoxytol MRL. A total of 684 lymph nodes were identified in the images, of which 174 had been diagnosed as metastatic. USPIO-induced signal suppression in normal lymph nodes was significantly stronger in ferumoxtran-10 MRL than in ferumoxytol MRL (p < 0.005). T2* signal suppression in normal pelvic lymph nodes is significantly stronger with ferumoxtran-10 than with ferumoxytol, which may affect diagnostic accuracy.}, + file = {Deba16a.pdf:pdf\\Deba16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27781154}, + month = {10}, + gsid = {14959316295455027146}, + gscites = {7}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/165687}, + ss_id = {7b919a690fea032f8b55339c2a0f4abc56add55b}, + all_ss_ids = {['7b919a690fea032f8b55339c2a0f4abc56add55b']}, +} + +@article{Deba19, + author = {Debats, Oscar A. and Litjens, Geert J. S. and Huisman, Henkjan J.}, + title = {Lymph node detection in MR Lymphography: false positive reduction using multi-view convolutional neural networks}, + journal = PRJ, + year = {2019}, + volume = {7}, + pages = {e8052}, + doi = {10.7717/peerj.8052}, + abstract = {To investigate whether multi-view convolutional neural networks can improve a fully automated lymph node detection system for pelvic MR Lymphography (MRL) images of patients with prostate cancer. A fully automated computer-aided detection (CAD) system had been previously developed to detect lymph nodes in MRL studies. The CAD system was extended with three types of 2D multi-view convolutional neural networks (CNN) aiming to reduce false positives (FP). A 2D multi-view CNN is an efficient approximation of a 3D CNN, and three types were evaluated: a 1-view, 3-view, and 9-view 2D CNN. The three deep learning CNN architectures were trained and configured on retrospective data of 240 prostate cancer patients that received MRL images as the standard of care between January 2008 and April 2010. The MRL used ferumoxtran-10 as a contrast agent and comprised at least two imaging sequences: a 3D T1-weighted and a 3D T2*-weighted sequence. A total of 5089 lymph nodes were annotated by two expert readers, reading in consensus. A first experiment compared the performance with and without CNNs and a second experiment compared the individual contribution of the 1-view, 3-view, or 9-view architecture to the performance. The performances were visually compared using free-receiver operating characteristic (FROC) analysis and statistically compared using partial area under the FROC curve analysis. Training and analysis were performed using bootstrapped FROC and 5-fold cross-validation. Adding multi-view CNNs significantly ( < 0.01) reduced false positive detections. The 3-view and 9-view CNN outperformed ( < 0.01) the 1-view CNN, reducing FP from 20.6 to 7.8/image at 80% sensitivity. Multi-view convolutional neural networks significantly reduce false positives in a lymph node detection system for MRL images, and three orthogonal views are sufficient. At the achieved level of performance, CAD for MRL may help speed up finding lymph nodes and assessing them for potential metastatic involvement.}, + file = {:pdf/Deba19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31772836}, + month = {11}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/215826}, + ss_id = {663b19914b7fe99a2c3cc1641ba8d47109d63db2}, + all_ss_ids = {['663b19914b7fe99a2c3cc1641ba8d47109d63db2']}, + gscites = {11}, +} + +@article{Dekk20, + author = {Mirthe Dekker and Farahnaz Waissi and Ingrid E.M. Bank and Nikolas Lessmann and Ivana I{\v{s}}gum and Birgitta K. Velthuis and Asbj{\o}rn M. Scholtens and Geert E. Leenders and Gerard Pasterkamp and Dominique P.V. de Kleijn and Leo Timmers and Arend Mosterd}, + title = {Automated calcium scores collected during myocardial perfusion imaging improve identification of obstructive coronary artery disease}, + journal = IJCARDHV, + year = {2020}, + volume = {26}, + pages = {100434}, + doi = {10.1016/j.ijcha.2019.100434}, + pmid = {31768415}, + optnote = {DIAG, RADIOLOGY}, + file = {Dekk20.pdf:pdf\\Dekk20.pdf:PDF}, + abstract = {Myocardial perfusion imaging (MPI) is an accurate noninvasive test for patients with suspected obstructive coronary artery disease (CAD) and coronary artery calcium (CAC) score is known to be a powerful predictor of cardiovascular events. Collection of CAC scores simultaneously with MPI is unexplored. We aimed to investigate whether automatically derived CAC scores during myocardial perfusion imaging would further improve the diagnostic accuracy of MPI to detect obstructive CAD. We analyzed 150 consecutive patients without a history of coronary revascularization with suspected obstructive CAD who were referred for 82Rb PET/CT and available coronary angiographic data. Myocardial perfusion was evaluated both semi quantitatively as well as quantitatively according to the European guidelines. CAC scores were automatically derived from the low-dose attenuation correction CT scans using previously developed software based on deep learning. Obstructive CAD was defined as stenosis >70% (or >50% in the left main coronary artery) and/or fractional flow reserve (FFR) <=0.80. In total 58% of patients had obstructive CAD of which seventy-four percent were male. Addition of CAC scores to MPI and clinical predictors significantly improved the diagnostic accuracy of MPI to detect obstructive CAD. The area under the curve (AUC) increased from 0.87 to 0.91 (p: 0.025). Sensitivity and specificity analysis showed an incremental decrease in false negative tests with our MPI+CAC approach (n=14 to n=4), as a consequence an increase in false positive tests was seen (n=11 to n=28). CAC scores collected simultaneously with MPI improve the detection of obstructive coronary artery disease in patients without a history of coronary revascularization.}, + ss_id = {583fa25f21581a7bf39510373038cb9d69004232}, + all_ss_ids = {['583fa25f21581a7bf39510373038cb9d69004232']}, + gscites = {13}, +} + +@article{Dekk21, + title = {High Levels of Osteoprotegerin Are Associated with Coronary Artery Calcification in Patients Suspected of a Chronic Coronary Syndrome}, + author = {Dekker, Mirthe and Waissi, Farahnaz and Silvis, Max J. M. and Bennekom, Joelle V. and Schoneveld, Arjan H. and {de Winter}, Robbert J. and Isgum, Ivana and Lessmann, Nikolas and Velthuis, Birgitta K. and Pasterkamp, Gerard and Mosterd, Arend and Timmers, Leo and {de Kleijn}, Dominique P. V.}, + year = {2021}, + journal = NATSCIREP, + volume = {11}, + number = {1}, + pages = {18946}, + pmid = {34556709}, + doi = {10.1038/s41598-021-98177-4}, + abstract = {Plasma osteoprotegerin (OPG) and vascular smooth muscle cell (VSMC) derived extracellular vesicles (EVs) are important regulators in the process of vascular calcification (VC). In population studies, high levels of OPG are associated with events. In animal studies, however, high OPG levels result in reduction of VC. VSMC-derived EVs are assumed to be responsible for OPG transport and VC but this role has not been studied. For this, we investigated the association between OPG in plasma and circulating EVs with coronary artery calcium (CAC) as surrogate for VC in symptomatic patients. We retrospectively assessed 742 patients undergoing myocardial perfusion imaging (MPI). CAC scores were determined on the MPI-CT images using a previously developed automated algorithm. Levels of OPG were quantified in plasma and two EV-subpopulations (LDL and TEX), using an electrochemiluminescence immunoassay. Circulating levels of OPG were independently associated with CAC scores in plasma; OR 1.39 (95\% CI 1.17\textendash 1.65), and both EV populations; EV-LDL; OR 1.51 (95\% CI 1.27\textendash 1.80) and EV-TEX; OR 1.21 (95\% CI 1.02\textendash 1.42). High levels of OPG in plasma were independently associated with CAC scores in this symptomatic patient cohort. High levels of EV-derived OPG showed the same positive association with CAC scores, suggesting that EV-derived OPG mirrors the same pathophysiological process as plasma OPG.}, + optnote = {DIAG, RADIOLOGY}, + algorithm = {https://grand-challenge.org/algorithms/calcium-scoring/}, + ss_id = {f8fd6c688a6da313e69e95559c1db7cfa1fb04d0}, + all_ss_ids = {['f8fd6c688a6da313e69e95559c1db7cfa1fb04d0']}, + gscites = {9}, +} + +@article{Delc18, + author = {Delcourt, C\'{e}cile and Le Goff, M\'{e}lanie and von Hanno, Therese and Mirshahi, Alireza and Khawaja, Anthony P. and Verhoeven, Virginie J.M. and Hogg, Ruth E. and Anastosopoulos, Eleftherios and Cachulo, Maria Luz and H\"{o}hn, Ren\'{e} and Wolfram, Christian and Bron, Alain and Miotto, Stefania and Carri\`{e}re, Isabelle and Colijn, Johanna M. and Buitendijk, Gabri\"{e}lle H.S. and Evans, Jennifer and Nitsch, Dorothea and Founti, Panayiota and Yip, Jennifer L.Y. and Pfeiffer, Norbert and Creuzot-Garcher, Catherine and Silva, Rufino and Piermarocchi, Stefano and Topouzis, Fotis and Bertelsen, Geir and Foster, Paul J. and Fletcher, Astrid and Klaver, Caroline C.W. and Korobelnik, Jean-Fran\c{c}ois and Acar, Niyazi and Anastosopoulos, Eleftherios and Azuara-Blanco, Augusto and Berendschot, Tos and Bergen, Arthur and Bertelsen, Geir and Binquet, Christine and Bird, Alan and Bobak, Martin and Boon, Camiel and Br\'{e}tillon, Lionel and Broe, Rebecca and Bron, Alain and Buitendijk, Gabrielle and Cachulo, Maria Luz and Capuano, Vittorio and Carri\`{e}re, Isabelle and Chakravarthy, Usha and Chan, Michelle and Chang, Petrus and Colijn, Johanna and Cougnard-Gr\'{e}goire, Audrey and Cree, Angela and Creuzot-Garcher, Catherine and Cumberland, Phillippa and Cunha-Vaz, Jos\'{e} and Daien, Vincent and De Jong, Eiko and Deak, Gabor and Delcourt, C\'{e}cile and Delyfer, Marie-No\"{e}lle and den Hollander, Anneke and Dietzel, Martha and Erke, Maja Gran and Faria, Pedro and Farinha, Claudia and Fauser, Sascha and Finger, Robert and Fletcher, Astrid and Foster, Paul and Founti, Panayiota and Gorgels, Theo and Grauslund, Jakob and Grus, Franz and Hammond, Christopher and Hansen, Morten and Helmer, Catherine and Hense, Hans-Werner and Hermann, Manuel and Hoehn, Ren\'{e} and Hogg, Ruth and Holz, Frank and Hoyng, Carel and Jansonius, Nomdo and Janssen, Sarah and Kersten, Eveline and Khawaja, Anthony and Klaver, Caroline and Korobelnik, Jean-Fran\c{c}ois and Lamparter, Julia and Le Goff, M\'{e}lanie and Lechanteur, Yara and Lehtim\"{a}ki, Terho and Leung, Irene and Lotery, Andrew and Mauschitz, Matthias and Meester, Magda and Merle, B\'{e}n\'{e}dicte and Meyer zu Westrup, Verena and Midena, Edoardo and Miotto, Stefania and Mirshahi, Alireza and Mohan-Sa\"{i}d, Sadek and Mueller, Michael and Muldrew, Alyson and Murta, Joaquim and Nickels, Stefan and Nunes, Sandrina and Owen, Christopher and Peto, Tunde and Pfeiffer, Norbert and Piermarocchi, Stefano and Prokofyeva, Elena and Rahi, Jugnoo and Raitakari, Olli and Rauscher, Franziska and Ribeiro, Luisa and Rougier, Marie-B\'{e}n\'{e}dicte and Rudnicka, Alicja and Sahel, Jos\'{e} and Salonikiou, Aggeliki and Sanchez, Clarisa and Schmitz-Valckenberg, Steffen and Schouten, Johannes and Schuster, Alexander and Schweitzer, C\'{e}dric and Segato, Tatiana and Shehata, Jasmin and Silva, Rufino and Silvestri, Giuliana and Simader, Christian and Souied, Eric and Speckauskas, Martynas and Springelkamp, Henriet and Tapp, Robyn and Topouzis, Fotis and van Leeuwen, Elisa and Verhoeven, Virginie and Verzijden, Timo and Von Hanno, Therese and Vujosevic, Stela and Wiedemann, Peter and Williams, Katie and Wolfram, Christian and Yip, Jennifer and Zerbib, Jennyfer}, + title = {The Decreasing Prevalence of Nonrefractive Visual Impairment in Older Europeans}, + doi = {10.1016/j.ophtha.2018.02.005}, + year = {2018}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ophtha.2018.02.005}, + file = {Delc18.pdf:pdf\Delc18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Ophthalmology}, + citation-count = {15}, + automatic = {yes}, + pages = {1149-1159}, + volume = {125}, +} + +@article{Demi09, + author = {Demirci, M. Fatih and Platel, Bram and Shokoufandeh, Ali and Florack, Luc L. and Dickinson, Sven J.}, + title = {The Representation and Matching of Images Using Top Points}, + journal = JMIV, + year = {2009}, + volume = {35}, + issue = {2}, + pages = {103--116}, + doi = {10.1007/s10851-009-0157-y}, + url = {http://portal.acm.org/citation.cfm?id=1574521.1574528}, + abstract = {In previous work, singular points (or top points) in the scale space representation of generic images have proven valuable for image matching. In this paper, we propose a novel construction that encodes the scale space description of top points in the form of a directed acyclic graph. This representation allows us to utilize coarse-to-fine graph matching algorithms for comparing images represented in terms of top point configurations instead of using solely the top points and their features in a point matching algorithm, as was done previously. The nodes of the graph represent the critical paths together with their top points. The edge set captures the neighborhood distribution of vertices in scale space, and is constructed through a hierarchical tessellation of scale space using a Delaunay triangulation of the top points. We present a coarse-to-fine many-to-many matching algorithm for comparing such graph-based representations. The algorithm is based on a metric-tree representation of labeled graphs and their low-distortion embeddings into normed vector spaces via spherical encoding. This is a two-step transformation that reduces the matching problem to that of computing a distribution-based distance measure between two such embeddings. To evaluate the quality of our representation, four sets of experiments are performed. First, the stability of this representation under Gaussian noise of increasing magnitude is examined. Second, a series of recognition experiments is run on a face database. Third, a set of clutter and occlusion experiments is performed to measure the robustness of the algorithm. Fourth, the algorithm is compared to a leading interest point-based framework in an object recognition experiment.}, + file = {:pdf/Demi09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + publisher = {Kluwer Academic Publishers}, + month = {5}, + gsid = {17753494853780604466}, + gscites = {21}, +} + +@mastersthesis{Derc19, + author = {Koen Dercksen}, + title = {Prostate Cancer Classification and Label Scarcity}, + year = {2019}, + file = {:pdf/Derc19.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University}, + journal = {Master thesis}, +} + +@inproceedings{Derck19, + author = {Koen Dercksen and Wouter Bulten and Geert Litjens}, + title = {Dealing with Label Scarcity in Computational Pathology: A Use Case in Prostate Cancer Classification}, + booktitle = MIDL, + year = {2019}, + url = {https://openreview.net/forum?id=SJlq_10N94}, + abstract = {Large amounts of unlabelled data are commonplace for many applications in computational pathology, whereas labelled data is often expensive, both in time and cost, to acquire. We investigate the performance of unsupervised and supervised deep learning methods when few labelled data are available. Three methods are compared: clustering autoencoder latent vectors (unsupervised), a single layer classifier combined with a pre-trained autoencoder (semi-supervised), and a supervised CNN. We apply these methods on hematoxylin and eosin (H\&E) stained prostatectomy images to classify tumour versus non-tumour tissue. Results show that semi-/unsupervised methods have an advantage over supervised learning when few labels are available. Additionally, we show that incorporating immunohistochemistry (IHC) stained data provides an increase in performance over only using H\&E.}, + file = {Derck19.pdf:pdf\\Derck19.pdf:PDF}, + optnote = {DIAG}, + gsid = {12320966003094629420}, + gscites = {6}, + ss_id = {8da1812001d6d2753f2ef55fa5e50e39a648030a}, + all_ss_ids = {['8da1812001d6d2753f2ef55fa5e50e39a648030a']}, +} + +@conference{Dese09, + author = {W. Deserno AND O. A. Debats AND Y. Hoogeveen AND J. O. Barentsz}, + title = {Effect of {MR} Lymphography on the Probability for Lymph Node Involvement in Patients with Prostate Cancer}, + booktitle = RSNA, + year = {2009}, + abstract = {CLINICAL RELEVANCE/APPLICATION: In prostate cancer patients, the presence of lymph node metastasis is an important factor when evaluating treatment options. MRL may help reduce the need of invasive staging by PLND. PURPOSE: To investigate the effect of the results of MR Lymphography (MRL), compared to using the Nodal Risk Formula (NRF), on the probability of lymph node metastases. METHOD AND MATERIALS: Our database contained 375 consecutive patients with prostate cancer, included retrospectively, who had a serum PSA level >10 ng/ml or a Gleason Sum (GS) > 6. All patients had been assessed by MR Lymphography (MRL) between 4/8/2003 and 4/19/2005, and had undergone pelvic lymph node dissection (PLND) or fine-needle aspiration biopsy. A contrast agent based on ultrasmall superparamagnetic particles of iron oxide (USPIO) was used to dicriminate lymph node metastases from normal lymphatic tissue. All MRL images were analysed by a radiologist. The risk of lymph node metastasis (Rm) was estimated using the Roach Nodal Risk Formula (NRF): Rm=(2/3)*PSA+[(GS-6)*10], where an outcome >15 is considered high risk. Two logistic regression models were constructed. In both, the result of histopathology of the lymph nodes was the dependent variable and the NRF result was taken as independent variable. In one of the models, the result of MRL was added as a second independent variable. The impact of interactive terms was also estimated. RESULTS: Positive lymph nodes were detected by histology in in 61 out of 375 patients (16%). A total of 21 out of 312 patients with a negative MRL result had metastatic lymph nodes. Of the 63 patients with a positive MRL result, 40 had metastatic lymph nodes. A total of 13 out of 131 with low risk according to NRF had metastatic lymph nodes. Of the 244 patients with a high risk according to NRF, only 48 had metastatic lymph nodes. In the regression analysis, interactive terms were not statistically significant and were excluded from the model. After including the result of MRL in the logistic regression model, the outcome of the NRF was no longer significant. The result of MRL was significant in the model (p<0.001). CONCLUSION: MRL has a significant effect on the probability of lymph node involvement. As soon as the result of MRL is known, computing the nodal risk with the Nodal Risk Formula is no longer appropriate.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Dese11, + author = {Willem M L L G Deserno and Oscar A Debats and Tom Rozema and Ansje S Fortuin and Roel A M Heesakkers and Yvonne Hoogeveen and Petronella G M Peer and Jelle O Barentsz and Emile N J T van Lin}, + title = {Comparison of Nodal Risk Formula and {MR} Lymphography for Predicting Lymph Node Involvement in Prostate Cancer}, + journal = IJROBP, + year = {2011}, + volume = {81}, + pages = {8--15}, + doi = {10.1016/j.ijrobp.2010.05.043}, + abstract = {PURPOSE: To compare the nodal risk formula (NRF) as a predictor for lymph node (LN) metastasis in patients with prostate cancer with magnetic resonance lymphography (MRL) using Ultrasmall Super-Paramagnetic particles of Iron Oxide (USPIO) and with histology as gold standard. METHODS AND MATERIALS: Logistic regression analysis was performed with the results of histopathological evaluation of the LN as dependent variable and the nodal risk according to the NRF and the result of MRL as independent input variables. Receiver operating characteristic (ROC) analysis was performed to assess the performance of the models. RESULTS: The analysis included 375 patients. In the single-predictor regression models, the NRF and MRL results were both significantly (p <0.001) predictive of the presence of LN metastasis. In the models with both predictors included, NRF was nonsignificant (p = 0.126), but MRL remained significant (p <0.001). For NRF, sensitivity was 0.79 and specificity was 0.38; for MRL, sensitivity was 0.82 and specificity was 0.93. After a negative MRL result, the probability of LN metastasis is 4\% regardless of the NRF result. After a positive MRL, the probability of having LN metastasis is 68\%. CONCLUSIONS: MRL is a better predictor of the presence of LN metastasis than NRF. Using only the NRF can lead to a significant overtreatment on the pelvic LN by radiation therapy. When the MRL result is available, the NRF is no longer of added value.}, + file = {Dese11.pdf:pdf\\Dese11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {20800390}, + month = {9}, +} + +@article{Desl14, + author = {Deslee, Ga\"etan and Klooster, Karin and Hetzel, Martin and Stanzel, Franz and Kessler, Romain and Marquette, Charles-Hugo and Witt, Christian and Blaas, Stefan and Gesierich, Wolfgang and Herth, Felix J F. and Hetzel, Juergen and van Rikxoort, Eva M. and Slebos, Dirk-Jan}, + title = {Lung volume reduction coil treatment for patients with severe emphysema: a {E}uropean multicentre trial}, + journal = Thorax, + year = {2014}, + volume = {69}, + pages = {980-986}, + doi = {10.1136/thoraxjnl-2014-205221}, + abstract = {The lung volume reduction ({LVR}) coil is a minimally invasive bronchoscopic nitinol device designed to reduce hyperinflation and improve elastic recoil in severe emphysema. We investigated the feasibility, safety and efficacy of {LVR} coil treatment in a prospective multicentre cohort trial in patients with severe emphysema.Patients were treated in 11 centres. Safety was evaluated by recording all adverse events, efficacy by the St George's Respiratory Questionnaire ({SGRQ}) as primary endpoint, and pulmonary function testing, modified Medical Research Council dyspnoea score ({mMRC}) and 6-min walk distance ({6MWD}) up to 12AC/a,!aEUR|months after the final treatment.Sixty patients (60.9AC/a,!aEURdegA,A+-AC/a,!aEURdeg7.5AC/a,!aEUR|years, forced expiratory volume in 1AC/a,!aEUR|s ({FEV1}) 30.2AC/a,!aEURdegA,A+-AC/a,!aEURdeg6.3\% pred) were bronchoscopically treated with coils (55 bilateral, 5 unilateral), with a median of 10 (range 5-15) coils per lobe. Within 30AC/a,!aEUR|days post-treatment, seven chronic obstructive pulmonary disease exacerbations (6.1\%), six pneumonias (5.2\%), four pneumothoraces (3.5\%) and one haemoptysis (0.9\%) occurred as serious adverse events. At 6 and 12AC/a,!aEUR|months, respectively, {AZaEUR?SGRQ} was -12.1A,A+-12.9 and -11.1A,A+-13.3 points, {AZaEUR?6MWD} was +29.7A,A+-74.1AC/a,!aEUR|m and +51.4A,A+-76AC/a,!aEUR|m, {AZaEUR?FEV1} was +0.11A,A+-0.20AC/a,!aEUR|{L} and +0.11A,A+-0.30AC/a,!aEUR|{L}, and {AZaEUR?RV} (residual volume) was -0.65A,A+-0.90AC/a,!aEUR|{L} and -0.71A,A+-0.81AC/a,!aEUR|{L} (all p<0.01). Post hoc analyses showed significant responses for {SGRQ}, {6MWD} and {RV} in patients with both heterogeneous and homogeneous emphysema.{LVR} coil treatment results in significant clinical improvements in patients with severe emphysema, with a good safety profile and sustained results for up to 1AC/a,!aEUR|year.{NCT01328899.}}, + file = {Desl14.pdf:pdf\\Desl14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pmid = {24891327}, + month = {6}, + gsid = {2996768261833148300}, + gscites = {147}, +} + +@article{Dett14, + author = {Dettmer, Sabine and Peters, Lars and de Wall, Claudia and Schaefer-Prokop, Cornelia and Schmidt, Michael and Warnecke, Gregor and Gottlieb, Jens and Wacker, Frank and Shin, Hoen-Oh}, + title = {Bronchial wall measurements in patients after lung transplantation: evaluation of the diagnostic value for the diagnosis of bronchiolitis obliterans syndrome}, + journal = PLOSONE, + year = {2014}, + volume = {9}, + pages = {e93783}, + doi = {10.1371/journal.pone.0093783}, + abstract = {To prospectively evaluate quantitative airway wall measurements of thin-section {CT} for the diagnosis of Bronchiolitis Obliterans Syndrome ({BOS}) following lung transplantation.In 141 {CT} examinations, bronchial wall thickness ({WT}), the wall area percentage ({WA%}) calculated as the ratio of the bronchial wall area and the total area (sum of bronchial wall area and bronchial lumen area) and the difference of the WT on inspiration and expiration ({WT}diff) were automatically measured in different bronchial generations. The measurements were correlated with the lung function parameters. {WT} and {WA%} in {CT} examinations of patients with (nAC/a,!A =AC/a,!A 25) and without (nAC/a,!A =AC/a,!A 116) {BOS}, were compared using the unpaired t-test and univariate analysis of variance, while also considering the differing lung volumes.Measurements could be performed in 2,978 bronchial generations. {WT}, {WA%}, and {WT}diff did not correlate with the lung function parameters (r<0.5). The {WA%} on inspiration was significantly greater in patients with BOS than in patients without {BOS}, even when considering the dependency of the lung volume on the measurements. WT on inspiration and expiration and {WA%} on expiration did not show significant differences between the groups. {WA%} on inspiration was significantly greater in patients with than in those without {BOS}. However, {WA%} measurements were significantly dependent on lung volume and showed a high variability, thus not allowing the sole use of bronchial wall measurements to differentiate patients with from those without {BOS}.}, + file = {Dett14.pdf:pdf/Dett14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + pmid = {24713820}, + month = {4}, + ss_id = {8b8211800c55a1cf2cb5b9809294d09bc21b8168}, + all_ss_ids = {['8b8211800c55a1cf2cb5b9809294d09bc21b8168']}, + gscites = {9}, +} + +@inproceedings{Deva11, + author = {Pandu R. Devarakota AND Dinesh M. Siddu AND Pragnya Maduskar AND Siddharth Vikal AND Laks Raghupathi}, + title = {Automatic lung nodule detection in thick slice {CT}: a comparative study of different gating schemes in {CAD}}, + booktitle = MI, + year = {2011}, + volume = {7963}, + series = SPIE, + pages = {79630E}, + doi = {10.1117/12.878404}, + abstract = {Common lung nodule detection workflows use 5mm slice thickness protocol whereas existing CAD systems require ?2mm data. A major challenge for widespread Lung CAD clinical use is thick and thin reconstruction availability for radiologist and CAD respectively. This is not always possible and applying current CAD algorithms on thick data outside their designed acquisition parameters may result in sensitivity degradation and high false-positives, hence clinically unacceptable. We propose a multi-stage classifier CAD system which works directly on thick scans. Exploring gating systems using wall-attachment and lesion location, we show significant improvement of CAD sensitivity at much better false positive rates.}, + file = {Deva11.pdf:pdf/Deva11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, +} + +@article{Deva17, + author = {Devaraj, Anand and van Ginneken, Bram and Nair, Arjun and Baldwin, David}, + title = {Use of Volumetry for Lung Nodule Management: Theory and Practice}, + journal = Radiology, + year = {2017}, + volume = {284}, + issue = {3}, + month = {9}, + pages = {630--644}, + doi = {10.1148/radiol.2017151022}, + abstract = {A consistent feature of many lung nodule management guidelines is the recommendation to evaluate nodule size by using diameter measurements and electronic calipers. Traditionally, the use of nodule volumetry applications has primarily been reserved for certain lung cancer screening trials rather than clinical practice. However, even before the first nodule management guidelines were published more than a decade ago, research has been ongoing into the use of nodule volumetry as a means of measuring nodule size, and this research has accelerated in recent years. This article aims to provide radiologists with an up-to-date review of the most recent literature on volumetry and volume doubling times in lung nodule management, outlining their benefits and drawbacks. A brief technical review of typical volumetry applications is also provided.}, + file = {Deva17.pdf:pdf\\Deva17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28825886}, + gsid = {2824529338802696200}, + gscites = {105}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/181824}, + ss_id = {f2ea47c7c53d8728b0ba4f4f0edb945edf877d4c}, + all_ss_ids = {['f2ea47c7c53d8728b0ba4f4f0edb945edf877d4c']}, +} + +@inproceedings{Diez14, + author = {Y. Diez and A. Gubern-M\'{e}rida and L. Wang and S. Diekmann and J. Mart\'{i} and B. Platel and J. Kramme and R. Mart\'{i}}, + title = {Comparison of Methods for Current-to-Prior Registration of Breast {DCE}-{MRI}}, + booktitle = {IWDM '14: Proceedings of the 12th international workshop on Digital Mammography}, + year = {2014}, + volume = {8539}, + publisher = {Springer-Verlag}, + pages = {689--695}, + doi = {10.1007/978-3-642-13666-5_67}, + file = {Diez14.pdf:pdf\\Diez14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {2740440396846849245}, + gscites = {10}, + ss_id = {5ca3d1d79becff9d2683985814dee4fc47e882c0}, + all_ss_ids = {['5ca3d1d79becff9d2683985814dee4fc47e882c0']}, +} + +@article{Dijc11, + author = {J. A. A. M. van Dijck and J. D. M. Otten and N. Karssemeijer and P. Kenemans and A. L. M. Verbeek and M. J. van der Mooren}, + title = {Less mammographic density after nasal versus oral administration of postmenopausal hormone therapy}, + journal = Climacteric, + year = {2011}, + volume = {14}, + pages = {683--688}, + doi = {10.3109/13697137.2011.586752}, + abstract = {Objective: Nasal administration gives a more acute but shorter rise in serum hormone levels than oral administration and may therefore have less effect on the fibroglandular tissue in the breasts. We studied the change in mammographic breast density after nasal vs. oral administration of postmenopausal hormone therapy (PHT). Methods: We studied participants in a randomized, controlled trial on the impact of nasal vs. oral administration of PHT (combined 17A-A?A 1/2 -estradiol plus norethisterone) for 1 year. Two radiologists classified mammographic density at baseline and after 1 year into four categories. Also, the percentage density was calculated by a computer-based method. The main outcome measure was the difference in the proportion of women with an increase in mammographic density category after 1 year between the nasal and oral groups. Also, the change in the percentage density was calculated. Results: The study group comprised 112 healthy postmenopausal women (mean age 56 years), of whom 53 received oral and 59 intranasal PHT. An increase in mammographic density category after 1 year was seen in 20\% of the women in the nasal group and in 34\% of the oral group. This resulted in a non-significant difference in the proportion of women in whom mammographic breast density had increased by 214\% (95\% confidence interval (CI) 230\% to 2.7\%). The mean change in percentage density was 21.2\% in the nasal group and + 1.2\% in the oral group, yielding a 22.4\% differential effect (95\% CI 27.3\% to 2.5\%). Conclusions: One year of nasal PHT gave a smaller, although not statistically significant, increase in mammographic density than oral PHT. Remaining issues are the relation between the route of administration of PHT and breast complaints and breast cancer risk.}, + file = {Dijc11.pdf:pdf\\Dijc11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {21942620}, + month = {9}, + ss_id = {de4199466e65c57d27343bcc67a9261037d304cc}, + all_ss_ids = {['de4199466e65c57d27343bcc67a9261037d304cc']}, + gscites = {1}, +} + +@conference{Dijk10b, + author = {J. P. van Dijk and W. J. M. van de Ven and D. F. Stegeman}, + title = {Evaluating the motor unit number index ({MUNIX}) as a measure for motor unit loss}, + booktitle = {International Society of Electrophysiology and Kinesiology}, + year = {2010}, + abstract = {AIM: With the increased number of potential therapeutic agents, monitoring of motor neuron loss in patients is of great importance. Functional measures are masked by motor neuron loss because of collateral reinnervation. A more direct measure is required. In this study, we evaluate a recent technique called MUNIX to determine motor neuron loss. METHODS: A model was constructed to study the effect of denervation and collateral reinnervation on surface motor unit potentials (MUPs). A small muscle containing 200 motor units was simulated. The process of motor neuron denervation is simulated by removing one motor neuron at a time leaving all its fibres orphaned. As muscle fibre loss after denervation is counteracted by reinnervation, the orphaned fibres could be reinnervated by a motor unit (MU) with a fibre adjacent to this fibre. MUNIX is calculated from the surface interference pattern (SIP) together with the maximal compound muscle action potential (CMAP) as described in by Nandedkar et al. (2003). Briefly, SIP area and SIP power are calculated for five levels of contractions. The motor unit count (ICMUC) is defined as ICMUC=(CMAPpower*SIParea)/(CMAParea*SIPpower). The ICMUC provides the real number of MUs if all MUs would be equal in size and no phase cancellation would occur. Since this is not the case, the relation between ICMUC and SIP area is determined as ICMUC=B*SIParea)^alpha, where B and alpha are determined by means of nonlinear regression and MUNIX is calculated as MUNIX=B(20)^alpha (inset figure 1). To test how well this method can follow motor neuron loss, we used the above denervation model and a MU firing pattern generated based on the model of Matthews et al. (1996). The size principle was applied creating the surface interference patterns, so that small MUs (i.e. small number of fibres) are recruited before and had a lower firing rate than large MUs. RESULTS: The MUNIX and CMAP results were evaluated in steps of 5% MU loss. We used different firing patterns and slightly different contraction levels (i.e. number of MUs) per run to obtain a measure of variability. Figure 1 shows that the MUNIX (blue) does not significantly decline until at least 40% of all MUs are lost and seems strongly related to the change in the CMAP amplitude (red). CONCLUSION: This preliminary simulation study shows that MUNIX follows motor neuron loss only slightly better than CMAP amplitude. The results after severe denervation should be interpreted with care as it is likely that the model is incorrect at this level.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Dijk13, + author = {Dijkstra, Akkelies E. and Postma, Dirkje S. and Ten Hacken, Nick and Vonk, Judith M. and Oudkerk, Matthijs and van Ooijen, Peter Ma and Zanen, Pieter and Mohamed Hoesein, F. A. A. and van Ginneken, Bram and Schmidt, Michael and Groen, Harry Jm}, + title = {Low-dose {CT} measurements of airway dimensions and emphysema associated with airflow limitation in heavy smokers: a cross sectional study}, + journal = RESPR, + year = {2013}, + volume = {14}, + pages = {1--9}, + doi = {10.1186/1465-9921-14-11}, + abstract = {ABSTRACT: BACKGROUND: Increased airway wall thickness (AWT) and parenchymal lung destruction both contribute to airflow limitation. Advances in computed tomography (CT) post-processing imaging allow to quantify these features. The aim of this Dutch population study is to assess the relationships between AWT, lung function, emphysema and respiratory symptoms. METHODS: AWT and emphysema were assessed by low-dose CT in 500 male heavy smokers, randomly selected from a lung cancer screening population. AWT was measured in each lung lobe in cross-sectionally reformatted images with an automated imaging program at locations with an internal diameter of 3.5 mm, and validated in smaller cohorts of patients. The 15th percentile method (Perc15) was used to assess the severity of emphysema. Information about respiratory symptoms and smoking behavior was collected by questionnaires and lung function by spirometry. RESULTS: Median AWT in airways with an internal diameter of 3.5 mm (AWT3.5) was 0.57 (0.44 - 0.74) mm. Median AWT in subjects without symptoms was 0.52 (0.41-0.66) and in those with dyspnea and/or wheezing 0.65 (0.52-0.81) mm (p<0.001). In the multivariate analysis only AWT3.5 and emphysema independently explained 31.1\%and 9.5\%of the variance in FEV1\%predicted, respectively, after adjustment for smoking behavior. CONCLUSIONS: Post processing standardization of airway wall measurements provides a reliable and useful method to assess airway wall thickness. Increased airway wall thickness contributes more to airflow limitation than emphysema in a smoking male population even after adjustment for smoking behavior.}, + file = {Dijk13.pdf:pdf\\Dijk13.pdf:PDF}, + optnote = {DIAG}, + number = {11}, + pmid = {23356533}, + gsid = {9922086244292779435}, + gscites = {33}, +} + +@article{Dijk15, + author = {Dijkstra, Akkelies E. and Postma, Dirkje S. and van Ginneken, Bram and Wielp\"utz, Mark O. and Schmidt, Michael and Becker, Nikolaus and Owsijewitsch, Michael and Kauczor, Hans-Ulrich and de Koning, Harry J. and Lammers, Jan W. and Oudkerk, Matthijs and Brandsma, Corry-Anke and Boss\'{e}, Yohan and Nickle, David C. and Sin, Don D. and Hiemstra, Pieter S. and Wijmenga, Ciska and Smolonska, Joanna and Zanen, Pieter and Vonk, Judith M. and van den Berge, Maarten and Boezen, H Marike and Groen, Harry J M.}, + title = {Novel Genes for Airway Wall Thickness Identified with Combined Genome Wide Association and Expression Analyses}, + journal = AJRCCM, + year = {2015}, + volume = {191}, + pages = {547-556}, + doi = {10.1164/rccm.201405-0840OC}, + abstract = {Rationale Airway wall thickness (AWT) is affected by both environmental and genetic factors and is strongly associated with airflow limitation in smaller airways. Objectives The aim of our study was to investigate its genetic component. Methods AWT was measured on low-dose CT-scans in male heavy smokers participating in a lung cancer screening study (n = 2,640). Genome wide association studies on AWT were performed under an additive model using linear regression (adjusted for pack-years, lung volume), followed by meta-analysis. An independent cohort was used for validation of the most strongly associated single nucleotide polymorphisms (SNPs). The functional relevance of significant SNPs was evaluated. Measurements and main results Three significant loci on chromosomes 2q (rs734556, p = 6.2x10-7) and 10q (rs10794108, p = 8.6x10-8; rs7078439, p = 2.3x10-7) were associated with AWT and confirmed in the meta-analysis in cohorts with comparable lung function: p-values 4.6x10-8, 7.4x10-8 and 7.5x10-8, respectively. SNP rs734556 was associated with decreased lung tissue expression of SERPINE2, a susceptibility gene for emphysema. Two nominally significant SNPs showed effects with similar direction: rs10251504 in MAGI2 (p = 5.8x10-7) and rs4796712 in NT5C3B (p = 3.1x10-6). Higher MAGI2 expression in bronchial biopsies of COPD patients was significantly associated with lower inflammatory cell numbers, lower NT5C3B expression with worse lung function. The NT5C3B risk allele was associated with higher lung tissue expression (p = 1.09x10-41). Conclusions Genetic variants contribute to AWT. Amongst others, the identified genes are involved in emphysema, airway obstruction and bronchial inflammation.}, + file = {Dijk15.pdf:pdf\\Dijk15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {25517131}, + month = {3}, + gsid = {13303851726201550707}, + gscites = {25}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/153524}, + ss_id = {32cfbed76bd55d2aa3c2fa54652941d01994dc35}, + all_ss_ids = {['32cfbed76bd55d2aa3c2fa54652941d01994dc35']}, +} + +@article{Dilz19, + author = {Dilz, Roeland and Schr\"oder, Lukas and Moriakov, Nikita and Sonke, Jan-Jakob and Teuwen, Jonas}, + title = {Learned SIRT for Cone Beam Computed Tomography Reconstruction}, + journal = {arXiv:1908.10715}, + year = {2019}, + abstract = {We introduce the learned simultaneous iterative reconstruction technique (SIRT) for tomographic reconstruction. The learned SIRT algorithm is a deep learning based reconstruction method combining model knowledge with a learned component. The algorithm is trained by mapping raw measured data to the reconstruction results over several iterations. The Learned SIRT algorithm is applied to a cone beam geometry on a circular orbit, a challenging problem for learned methods due to its 3D geometry and its inherent inability to completely capture the patient anatomy. A comparison of 2D reconstructions is shown, where the learned SIRT approach produces reconstructions with superior peak signal to noise ratio (PSNR) and structural similarity (SSIM), compared to FBP, SIRT and U-net post-processing and similar PSNR and SSIM compared to the learned primal dual algorithm. Similar results are shown for cone beam geometry reconstructions of a 3D Shepp Logan phantom, where we obtain between 9.9 and 28.1 dB improvement over FBP with a substantial improvement in SSIM. Finally we show that our algorithm scales to clinically relevant problems, and performs well when applied to measurements of a physical phantom.}, + optnote = {DIAG}, + month = {8}, + ss_id = {b484f95f94ef39029ba3acd3f481bc729172eef5}, + all_ss_ids = {['b484f95f94ef39029ba3acd3f481bc729172eef5']}, + gscites = {1}, +} + +@conference{Dine10, + author = {Dinesh, M. S. AND Laks Raghupathi AND Pandu Devarakota AND Pragnya Maduskar AND Marcos Salganicoff AND Luca Bogoni}, + title = {Thick-slice lung nodule detection: {CAD} prototype performance evaluation}, + booktitle = ECR, + year = {2010}, + abstract = {Purpose: In many clinical situations, only thick slice CT lung studies are available due to storage, transmission or acquisition constraints. However, it is still desirable to have CAD available as a tool. Unfortunately, most commercial Lung CAD systems operate only on thin slice (< 2.5mm) input data. We evaluate the detection capability of a prototype thick slice Lung Nodule CAD system. Methods and Materials: The system was evaluated using 131 unseen cases, from institutions in the United States and Europe, using various vendor?s scanners. Group 1 (G1, n=60) included cases from Siemens? scanners (VolumeZoom=12, Sensation16=31, Sensation64=17) at 5mmq5mm. Group 2 (G2, n=35) with cases from Philip?s scanner (Mx8000 16) at slice thickness 6.5mmq5mm slice interval. Group 3 (G3 n=36) with cases from GE?s scanner (LightSpeed 16) at 5mmq5mm. The prototype?s performance was evaluated on all nodules with diameter range 6-25mm. Group G1 had 42 solid and part-solid pulmonary nodules, G2 had 40 and G3 had 59. Results: For G1, per-nodule sensitivity was 90.4% with an average FP of 4/case. For G2 per-nodule sensitivity was 72.5% with an average FP of 3.05/case and for G3 per-nodule sensitivity was 78% with average FP of 2/case. Conclusion: The prototype generalized across scanners from multiple vendors, models, and different sites. The system has the potential to extend the practical range of acquisition data over which Lung CAD may be applied to help overcome existing constraints on CAD utilization.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Dong10, + author = {E. van Dongen and B. van Ginneken}, + title = {Automatic segmentation of pulmonary vasculature in thoracic {CT} scans with local thresholding and airway wall removal}, + booktitle = ISBI, + year = {2010}, + pages = {668--671}, + doi = {10.1109/ISBI.2010.5490088}, + abstract = {{A} system for the automatic segmentation of the pulmonary vasculature in thoracic {CT} scans is presented. {T}he method is based on a vesselness filter and includes a local thresholding procedure to accurately segment vessels of varying diameters. {T}he output of an automatic segmentation of the airways is used to remove false positive detections in the airway walls. {T}he algorithm is tested with a quantitative evaluation framework based on manual classification of well-dispersed local maxima and random points on ten axial sections in a scan. {T}he algorithm has been applied to ten low dose {CT} scans annotated by two observers. {R}esults show that local thresholding and airway wall removal both improve segmentation performance and that the accuracy of the proposed method approaches the interobserver variability.ss}, + file = {Dong10.pdf:pdf\\Dong10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {17657664511689579052}, + gscites = {28}, + ss_id = {644ca5e97840d8532c7bdcf142cb49e398ae4ea0}, + all_ss_ids = {['644ca5e97840d8532c7bdcf142cb49e398ae4ea0']}, +} + +@article{Donn19, + author = {Donnelly, J Peter and Chen, Sharon C and Kauffman, Carol A and Steinbach, William J and Baddley, John W and Verweij, Paul E and Clancy, Cornelius J and Wingard, John R and Lockhart, Shawn R and Groll, Andreas H and Sorrell, Tania C and Bassetti, Matteo and Akan, Hamdi and Alexander, Barbara D and Andes, David and Azoulay, Elie and Bialek, Ralf and Bradsher, Robert W and Bretagne, Stephane and Calandra, Thierry and Caliendo, Angela M and Castagnola, Elio and Cruciani, Mario and Cuenca-Estrella, Manuel and Decker, Catherine F and Desai, Sujal R and Fisher, Brian and Harrison, Thomas and Heussel, Claus Peter and Jensen, Henrik E and Kibbler, Christopher C and Kontoyiannis, Dimitrios P and Kullberg, Bart-Jan and Lagrou, Katrien and Lamoth, Fr\'{e}d\'{e}ric and Lehrnbecher, Thomas and Loeffler, Jurgen and Lortholary, Olivier and Maertens, Johan and Marchetti, Oscar and Marr, Kieren A and Masur, Henry and Meis, Jacques F and Morrisey, C Orla and Nucci, Marcio and Ostrosky-Zeichner, Luis and Pagano, Livio and Patterson, Thomas F and Perfect, John R and Racil, Zdenek and Roilides, Emmanuel and Ruhnke, Marcus and Prokop, Cornelia Schaefer and Shoham, Shmuel and Slavin, Monica A and Stevens, David A and Thompson, George R and Vazquez, Jose A and Viscoli, Claudio and Walsh, Thomas J and Warris, Adilia and Wheat, L Joseph and White, P Lewis and Zaoutis, Theoklis E and Pappas, Peter G}, + title = {Revision and Update of the Consensus Definitions of Invasive Fungal Disease From the European Organization for Research and Treatment of Cancer and the Mycoses Study Group Education and Research Consortium}, + doi = {10.1093/cid/ciz1008}, + year = {2019}, + abstract = {Abstract + + Background + Invasive fungal diseases (IFDs) remain important causes of morbidity and mortality. The consensus definitions of the Infectious Diseases Group of the European Organization for Research and Treatment of Cancer and the Mycoses Study Group have been of immense value to researchers who conduct clinical trials of antifungals, assess diagnostic tests, and undertake epidemiologic studies. However, their utility has not extended beyond patients with cancer or recipients of stem cell or solid organ transplants. With newer diagnostic techniques available, it was clear that an update of these definitions was essential. + + + Methods + To achieve this, 10 working groups looked closely at imaging, laboratory diagnosis, and special populations at risk of IFD. A final version of the manuscript was agreed upon after the groups' findings were presented at a scientific symposium and after a 3-month period for public comment. There were several rounds of discussion before a final version of the manuscript was approved. + + + Results + There is no change in the classifications of "proven," "probable," and "possible" IFD, although the definition of "probable" has been expanded and the scope of the category "possible" has been diminished. The category of proven IFD can apply to any patient, regardless of whether the patient is immunocompromised. The probable and possible categories are proposed for immunocompromised patients only, except for endemic mycoses. + + + Conclusions + These updated definitions of IFDs should prove applicable in clinical, diagnostic, and epidemiologic research of a broader range of patients at high-risk. + }, + url = {http://dx.doi.org/10.1093/cid/ciz1008}, + file = {Donn19.pdf:pdf\Donn19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Clinical Infectious Diseases}, + citation-count = {1173}, + automatic = {yes}, + pages = {1367-1376}, + volume = {71}, +} + +@article{Doop23, + author = {Dooper, Stephan and Pinckaers, Hans and Aswolinskiy, Witali and Hebeda, Konnie and Jarkman, Sofia and van der Laak, Jeroen and Litjens, Geert}, + title = {Gigapixel end-to-end training using streaming and attention}, + doi = {10.1016/j.media.2023.102881}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.media.2023.102881}, + file = {Doop23.pdf:pdf\Doop23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Image Analysis}, + citation-count = {2}, + automatic = {yes}, + pages = {102881}, + volume = {88}, +} + +@conference{Durm12, + author = {B. Durmus and R. Gaillard and R. Manniesing and D.H.M. Heppe and A. Hofman and V.W.V. Jaddoe}, + booktitle = {International Society for Development Origins of Health and Disease}, + title = {Maternal Smoking During Pregnancy and Adiposity in School Age Children: {The Generation R Study}}, + abstract = {Several epidemiologic studies have suggested an association of maternal smoking during pregnancy with an increased risk of childhood overweight or obesity. These studies used body mass index as outcome measure. Not much is known about the associations of maternal smoking during pregnancy with more specific measures of body fat and body fat distribution. The aim of the present study is to assess the associations of active maternal smoking during pregnancy and cigarette dose with body mass index and measures of general and abdominal adiposity in children at the age of 6 years. This study is embedded in the Generation R study, a population-based prospective cohort study from early fetal life until adulthood. Information on maternal smoking status was collected by questionnaires sent during pregnancy. Body mass index (kg/m2) was calculated. Information on general adiposity was obtained by Dual-energy X-ray absorptiometry (DXA) and abdominal adiposity by ultrasound. Results will be presented during the congress.}, + optnote = {DIAG, RADIOLOGY}, + year = {2012}, +} + +@article{Durm14, + author = {Durmus, B. and Heppe, D.H.M. and Taal, H.R. and Manniesing, R. and Raat, H. and Hofman, A. and Steegers, E.A.P. and Gaillard, R. and Jaddoe, V.W.V.}, + title = {Parental Smoking During Pregnancy and Total and Abdominal Fat Distribution in School-age Children: the {Generation R} Study}, + journal = IJO, + year = {2014}, + volume = {38}, + pages = {966-972}, + doi = {10.1038/ijo.2014.9}, + abstract = {Objective:Fetal smoke exposure may influence growth and body composition later in life. We examined the associations of maternal and paternal smoking during pregnancy with total and abdominal fat distribution in school-age children.Methods:We performed a population-based prospective cohort study among 5243 children followed from early pregnancy onward in the Netherlands. Information about parental smoking was obtained by questionnaires during pregnancy. At the median age of 6.0 years (90% range: 5.7-7.4), we measured anthropometrics, total fat and android/gynoid fat ratio by dual-energy X-ray absorptiometry, and preperitoneal and subcutaneous abdominal fat were measured by ultrasound.Results:The associations of maternal smoking during pregnancy were only present among girls (P-value for sex interaction<0.05). Compared with girls from mothers who did not smoke during pregnancy, those from mothers who smoked during the first trimester only had a higher android/gynoid fat ratio (difference 0.23 (95% confidence interval (CI): 0.09-0.37) s.d. scores (SDS). Girls from mothers who continued smoking throughout pregnancy had a higher body mass index (difference: 0.24 (95% CI: 0.14-0.35) SDS), total fat mass (difference: 0.23 (95% CI: 0.14-0.33) SDS), android/gynoid fat ratio (difference: 0.34 (95% CI: 0.22-0.46) SDS), subcutaneous abdominal fat (difference: 0.22 (95% CI: 0.11-0.33) SDS) and preperitoneal abdominal fat (difference: 0.20 (95% CI: 0.08-0.31) SDS). Similar associations with body fat distribution outcomes were observed for paternal smoking during pregnancy. Both continued maternal and paternal smoking during pregnancy may be associated with an increased risk of childhood overweight. The corresponding odds ratios were 1.19 (95% CI: 0.98-1.46) and 1.32 (1.10-1.58), respectively.Conclusions:Maternal and paternal smoking during pregnancy are associated with an adverse body and abdominal fat distribution and increased risk of overweight in children. Similar effects of maternal and paternal smoking suggest that direct intrauterine mechanisms and common family-based lifestyle-related factors explain the associations.}, + file = {Durm14.pdf:pdf\\Durm14.pdf:PDF}, + optnote = {DIAG}, + number = {7}, + pmid = {24448598}, + month = {1}, + gsid = {14089140236204567566}, + gscites = {27}, + ss_id = {c20d695eb6eabe26c449e5e4a75d214c92b636dd}, + all_ss_ids = {['c20d695eb6eabe26c449e5e4a75d214c92b636dd']}, +} + +@article{Durm14a, + author = {Durmus, B\"usra and Heppe, Denise Hm and Gishti, Olta and Manniesing, Rashindra and Abrahamse-Berkeveld, Marieke and van der Beek, Eline M. and Hofman, Albert and Duijts, Liesbeth and Gaillard, Romy and Jaddoe, Vincent Wv}, + title = {General and abdominal fat outcomes in school-age children associated with infant breastfeeding patterns}, + journal = AJCN, + year = {2014}, + volume = {12}, + pages = {1351-1358}, + doi = {10.3945/ajcn.113.075937}, + abstract = {Breastfeeding may have a protective effect on the development of obesity in later life. Not much is known about the effects of infant feeding on more-specific fat measures.We examined associations of breastfeeding duration and exclusiveness and age at the introduction of solid foods with general and abdominal fat outcomes in children.We performed a population-based, prospective cohort study in 5063 children. Information about infant feeding was obtained by using questionnaires. At the median age of 6.0 y (90\% range: 5.7\%, 6.8\%), we measured childhood anthropometric measures, total fat mass and the android:gynoid fat ratio by using dual-energy X-ray absorptiometry and preperitoneal abdominal fat by using ultrasound.We observed that, in the models adjusted for child age, sex, and height only, a shorter breastfeeding duration, nonexclusive breastfeeding, and younger age at the introduction of solid foods were associated with higher childhood general and abdominal fat measures (P-trend < 0.05) but not with higher childhood body mass index. The introduction of solid foods at a younger age but not breastfeeding duration or exclusivity was associated with higher risk of overweight or obesity (OR: 2.05; 95\% CI: 1.41, 2.90). After adjustment for family-based sociodemographic, maternal lifestyle, and childhood factors, the introduction of solid food between 4-4.9 mo of age was associated with higher risks of overweight or obesity, but the overall trend was not significant.Associations of infant breastfeeding and age at the introduction of solid foods with general and abdominal fat outcomes are explained by sociodemographic and lifestyle-related factors. Whether infant dietary composition affects specific fat outcomes at older ages should be further studied.}, + file = {Durm14a.pdf:pdf\\Durm14a.pdf:PDF}, + optnote = {DIAG}, + number = {6}, + pmid = {24622802}, + month = {3}, + gsid = {1224010184676285656}, + gscites = {54}, + ss_id = {dc399a6d32501c1b878002ed27dfd7c54ab82965}, + all_ss_ids = {['dc399a6d32501c1b878002ed27dfd7c54ab82965']}, +} + +@mastersthesis{Eeke20, + author = {Leander van Eekelen}, + title = {Deep learning-based analysis of bone marrow histopathology images}, + abstract = {The topic of this thesis is analysis of digital bone marrow slides using deep learning. Chapter 2 details the development of a convolutional neural network for the automatic segmentation of six different cell/tissue types in bone marrow histopathology images. Using the segmentation output of this neural network, a classifier capable of classifying normocellular and aplastic bone marrow is trained. In Chapter 3, the neural network is applied to a cohort of 130 patients and the segmentation output is used for the automatic quantification of bone marrow cellularity. The age-related decrease of bone marrow cellularity is studied and compared to results in the literature. Also, the agreement between the cellularity quantification and visual estimation by a pathologist is measured. Chapter 4 covers preliminary experiments on the WSI-level classification of four different hematopathologies. Lastly, Chapter 5 summarizes and discusses the results of Chapters 2 through 4.}, + file = {:pdf/Eeke20.pdf:PDF}, + optnote = {DIAG}, + school = {Eindhoven University of Technology}, + year = {2020}, + journal = {Master thesis}, +} + +@inproceedings{Eeke20a, + author = {van Eekelen, Leander and Pinckaers, Hans and Hebeda, Konnie M and Litjens, Geert}, + booktitle = MI, + title = {Multi-class semantic cell segmentation and classification of aplasia in bone marrow histology images}, + doi = {10.1117/12.2549654}, + pages = {113200B}, + series = {SPIE}, + volume = {11320}, + file = {:pdf/Eeke20a.pdf:PDF}, + optnote = {DIAG}, + year = {2020}, + ss_id = {8bc7e464965aabd635e4bc2e76d186d6d2c04e01}, + all_ss_ids = {['8bc7e464965aabd635e4bc2e76d186d6d2c04e01']}, + gscites = {1}, +} + +@article{Eeke21, + author = {van Eekelen, Leander and Pinckaers, Hans and van den Brand, Michiel and Hebeda, Konnie M. and Litjens, Geert}, + journal = {Pathology}, + title = {Using deep learning for quantification of cellularity and cell lineages in bone marrow biopsies and comparison to normal age-related variation.}, + doi = {10.1016/j.pathol.2021.07.011}, + abstract = {Cellularity estimation forms an important aspect of the visual examination of bone marrow biopsies. In clinical practice, cellularity is estimated by eye under a microscope, which is rapid, but subjective and subject to inter- and intraobserver variability. In addition, there is little consensus in the literature on the normal variation of cellularity with age. Digital image analysis may be used for more objective quantification of cellularity. As such, we developed a deep neural network for the segmentation of six major cell and tissue types in digitized bone marrow trephine biopsies. Using this segmentation, we calculated the overall bone marrow cellularity in a series of biopsies from 130 patients across a wide age range. Using intraclass correlation coefficients (ICC), we measured the agreement between the quantification by the neural network and visual estimation by two pathologists and compared it to baseline human performance. We also examined the age-related changes of cellularity and cell lineages in bone marrow and compared our results to those found in the literature. The network was capable of accurate segmentation (average accuracy and dice score of 0.95 and 0.76, respectively). There was good neural network-pathologist agreement on cellularity measurements (ICC=0.78, 95% CI 0.58-0.85). We found a statistically significant downward trend for cellularity, myelopoiesis and megakaryocytes with age in our cohort. The mean cellularity began at approximately 50% in the third decade of life and then decreased +-2% per decade to 40% in the seventh and eighth decade, but the normal range was very wide (30-70%).}, + file = {:pdf/Eeke21.pdf:PDF}, + month = nov, + optnote = {DIAG, PATHOLOGY}, + pmid = {34772487}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/252113}, + ss_id = {191e15acc68311c29c38a4bbfa0769608fa48fbc}, + all_ss_ids = {['191e15acc68311c29c38a4bbfa0769608fa48fbc']}, + gscites = {5}, +} + +@conference{Eeke22, + author = {Leander van Eekelen and Enrico Munari and Ilaria Girolami and Albino Eccher and Jeroen van der Laak and Katrien Grunberg and Monika Looijen-Salamon and Shoko Vos and Francesco Ciompi}, + title = {Inter-rater agreement of pathologists on determining PD-L1 status in non-small cell lung cancer}, + booktitle = {ECP}, + year = {2022}, + abstract = {Artificial intelligence (AI) based quantification of cell-level PD-L1 status enables spatial analysis and allows reliable and reproducible assessment of the tumor proportion score. In this study, we assess the cell-level inter-pathologist agreement as human benchmark for AI development and validation. Three pathologists manually annotated the centers of all nuclei within 53 regions of interest in 12 whole- slide images (40X magnification) of NSCLC cases and classified them as PD-L1 negative/positive tumor cells, PD-L1 positive immune cells or other cells. Agreement was quantified using F1 score analysis, with agreement defined as annotations less than 10 um apart and of the same class. An average of 9044 nuclei (1550 negative, 2367 positive tumor cells, 1244 positive immune cells, 3881 other cells) were manually annotated by the three pathologists. The mean F1 score over pairs of pathologists at dataset level was 0.59 (range 0.54-0.65). When split across classes, the mean per-pair F1 scores stay approximately the same, indicating the readers perform similarly regardless of cell type. Besides human variability in manual point annotations with respect to the center of nuclei, lack of context contributed to disagreement: readers who reported they solely examined the ROIs tended to disagree more with readers that reported they also looked outside the ROIs for additional (morphological/density) information. + + In conclusion, agreement on determining the PD-L1 status of individual cells is only moderate, suggesting a role for AI. By quantifying the inter-rater agreement of pathologists, we have created a human benchmark which may serve as an upper bound (and could be combined via majority vote) for the validation of AI at celllevel, something not done previously. Cell-level AI-based assessment of PD-L1 may supersede slide level scoring, adding significant information on the heterogeneity and spatial distribution over the tumor.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Eeke22a, + author = {Leander van Eekelen and Enrico Munari and Luca Dulce Meesters and Gabriel Silva de Souza and Muradije Demirel-Andishmand and Daan Zegers and Monika Looijen-Salamon and Shoko Vos and Francesco Ciompi}, + title = {Nuclei detection with YOLOv5 in PD-L1 stained non-small cell lung cancer whole slide images}, + booktitle = {ECP}, + year = {2022}, + abstract = {Nuclei detection in histopathology images is an important prerequisite step of downstream research and clinical analyses, such as counting cells and spatial interactions. In this study, we developed an AI-based nuclei detector using the YOLOv5 framework in whole-slide NSCLC cases. Our dataset consisted of 42 PD-L1 stained cases (30 training, 12 test). Four trained (non-expert) readers manually annotated all nuclei (both positive/negative) within regions of interest (ROIs) viewed at 40X magnification. We trained a YOLOv5(s) network on annotations of one reader. Performance was measured using F1 score analysis; hits were defined as being less than 10 um away from annotations. + + We evaluate YOLOv5 on the test set by pairing it against all four readers separately. There, YOLOv5 performs excellently, falling within the interrater variability of the four readers: the mean F1 score over algorithm-reader pairs is 0.84 (range 0.76-0.92) while the mean F1 score over pairs of readers is 0.82 (range 0.76-0.86). When we determine the cell count (number of annotations/predictions) per ROI in the test set, agreement of algorithm-reader pairs and reader pairs is equally well aligned: 0.93 (range 0.90-0.97) versus 0.94 (range 0.92-0.96). Visual inspection indicates YOLOv5 performs equally well on PD-L1 positive and negative cells. + + In future work, we could extend this detector to additional tissues and immunohistochemistry stainings. Moreover, this detector could be used as a AI-assisted manual point annotation tool: while human readers perform the (context-driven) task of delineating homogeneous regions (e.g. clusters of PD-L1positive stained cells), the detector performs the (local, yet laborious) task of identifying individual nuclei within these regions, providing labelled point annotations.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Eeke23, + author = {van Eekelen, Leander and Litjens, Geert and Hebeda, Konnie}, + year = {2023}, + month = {2}, + journal = PATHOB, + title = {Artificial intelligence in bone marrow histological diagnostics: potential applications and challenges.}, + doi = {10.1159/000529701}, + abstract = {The expanding digitalization of routine diagnostic histological slides holds a potential to apply artificial intelligence (AI) to pathology, including bone marrow (BM) histology. In this perspective we describe potential tasks in diagnostics that can be supported, investigations that can be guided and questions that can be answered by the future application of AI on whole slide images of BM biopsies. These range from characterization of cell lineages and quantification of cells and stromal structures to disease prediction. First glimpses show an exciting potential to detect subtle phenotypic changes with AI that are due to specific genotypes. The discussion is illustrated by examples of current AI research using BM biopsy slides. In addition, we briefly discuss current challenges for implementation of AI-supported diagnostics.}, + file = {:pdf/Eeke23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + pmid = {36791682}, + ss_id = {fc5eb96c03e65d357cb8425a5938f4a6b62be9bd}, + all_ss_ids = {['fc5eb96c03e65d357cb8425a5938f4a6b62be9bd']}, + gscites = {2}, +} + +@conference{Eerd18, + author = {Anke W van der Eerden and Thomas LA van den Heuvel and Bram H Geurts and Bram Platel and Thijs Vande Vyveree and Luc van den Hauwee and Teuntje MJC Andriessen and Bozena M Goraj and Rashindra Manniesing}, + title = {Automatic versus human detection of traumatic cerebral microbleeds on susceptibility weighted imaging}, + booktitle = ECR, + year = {2018}, + abstract = {Purpose: To evaluate the performance of computer-aided traumatic cerebral microbleed detection (CAD) with and without human interference. + Methods and Materials: 33 adult patients admitted to our emergency department with moderate or severe TBI (mean age 33 years, 21 males) underwent a standardized trauma 3T MRI-protocol at 28 weeks. The microbleeds in their SWI-scans were annotated by an expert. A CAD system was developed, based on this training set. Six experts, blind to the CAD-results, annotated a subset of ten patients. In two experiments, we compared the performance of the CAD system to each of these six experts, using the majority voting results of the other five experts as the reference standard for the calculation of performance characteristics (paired t-test). In the first experiment, the performance of fully automatic microbleed detection was assessed. In the second experiment, one expert removed CAD-annotations she considered false positives from the automatically detected microbleeds, and briefly screened the CAD-annotated SWI-scans to complete the dataset with missed definite microbleeds. + Results: Fully manual evaluation took one hour per patient with an average sensitivity of 77% (SD 12.4%). The sensitivity of fully automatic detection of candidate microbleeds was 89% (SD 0.8%). Evaluation of the CAD results by an expert took 13 minutes per patient with a sensitivity of 93% (SD 1.0%) (p < 0.05 versus fully manual evaluation). + Conclusion: This CAD system allows detecting more microbleeds in a reduced reading time. This may facilitate the execution of otherwise too time-consuming large studies on the clinical relevance of microbleeds.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Eerd21, + author = {van der Eerden, A.W. and van den Heuvel, T.L. and Perlbarg, V. and Vart, P. and Vos, P.E. and Puybasset, L. and Galanaud, D. and Platel, B. and Manniesing, R. and Goraj, B.M.}, + title = {Traumatic Cerebral Microbleeds in the Subacute Phase Are Practical and Early Predictors of Abnormality of the Normal-Appearing White Matter in the Chronic Phase}, + doi = {10.3174/ajnr.a7028}, + year = {2021}, + abstract = {BACKGROUND AND PURPOSE: In the chronic phase after traumatic brain injury, DTI findings reflect WM integrity. DTI interpretation in the subacute phase is less straightforward. Microbleed evaluation with SWI is straightforward in both phases. We evaluated whether the microbleed concentration in the subacute phase is associated with the integrity of normal-appearing WM in the chronic phase. MATERIALS AND METHODS: Sixty of 211 consecutive patients 18 years of age or older admitted to our emergency department <=24 hours after moderate to severe traumatic brain injury matched the selection criteria. Standardized 3T SWI, DTI, and T1WI were obtained 3 and 26 weeks after traumatic brain injury in 31 patients and 24 healthy volunteers. At baseline, microbleed concentrations were calculated. At follow-up, mean diffusivity (MD) was calculated in the normal-appearing WM in reference to the healthy volunteers (MDz). Through linear regression, we evaluated the relation between microbleed concentration and MDz in predefined structures. RESULTS: In the cerebral hemispheres, MDz at follow-up was independently associated with the microbleed concentration at baseline (left: B = 38.4 [95% CI 7.5-69.3], P = .017; right: B = 26.3 [95% CI 5.7-47.0], P = .014). No such relation was demonstrated in the central brain. MDz in the corpus callosum was independently associated with the microbleed concentration in the structures connected by WM tracts running through the corpus callosum (B = 20.0 [95% CI 24.8-75.2], P < .000). MDz in the central brain was independently associated with the microbleed concentration in the cerebral hemispheres (B = 25.7 [95% CI 3.9-47.5], P = .023). CONCLUSIONS: SWI-assessed microbleeds in the subacute phase are associated with DTI-based WM integrity in the chronic phase. These associations are found both within regions and between functionally connected regions. B + : linear regression coefficient + Bcmb-conc + : linear regression coefficient with microbleed concentration as independent variable + Bcmb-nr + : linear regression coefficient with microbleed number as independent variable + MD + : mean diffusivity + MDz + : Z -score of mean diffusivity, normalized to the healthy control participants + t1 + : 3 (2-5) weeks after TBI + t2 + : 26 (25-28) weeks after TBI + TAI + : traumatic axonal injury + TBI + : traumatic brain injury + FA + : fractional anisotropy + MARS + : Microbleed Anatomical Rating Scale + GCS + : Glasgow Coma Scale}, + url = {http://dx.doi.org/10.3174/ajnr.A7028}, + file = {Eerd21.pdf:pdf\Eerd21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {American Journal of Neuroradiology}, + citation-count = {3}, + automatic = {yes}, + pages = {861-867}, + volume = {42}, +} + +@article{Ehte16, + author = {Bejnordi, Babak Ehteshami and Litjens, Geert and Timofeeva, Nadya and Otte-Holler, Irene and Homeyer, Andre and Karssemeijer, Nico and van der Laak, Jeroen}, + title = {Stain specific standardization of whole-slide histopathological images}, + journal = TMI, + year = {2016}, + volume = {35}, + number = {2}, + month = {9}, + pages = {404--415}, + doi = {10.1109/TMI.2015.2476509}, + url = {http://dx.doi.org/10.1109/TMI.2015.2476509}, + abstract = {Variations in the color and intensity of hematoxylin and eosin (H&E) stained histological slides can potentially hamper the effectiveness of quantitative image analysis. This paper presents a fully automated algorithm for standardization of whole-slide histopathological images to reduce the effect of these variations. The proposed algorithm, called whole-slide image color standardizer (WSICS), utilizes color and spatial information to classify the image pixels into different stain components. The chromatic and density distributions for each of the stain components in the hue-saturation-density color model are aligned to match the corresponding distributions from a template wholeslide image (WSI). The performance of the WSICS algorithm was evaluated on two datasets. The first originated from 125 H&E stained WSIs of lymph nodes, sampled from 3 patients, and stained in 5 different laboratories on different days of the week. The second comprised 30 H&E stained WSIs of rat liver sections. The result of qualitative and quantitative evaluations using the first dataset demonstrate that the WSICS algorithm outperforms competing methods in terms of achieving color constancy. The WSICS algorithm consistently yields the smallest standard deviation and coefficient of variation of the normalized median intensity measure. Using the second dataset, we evaluated the impact of our algorithm on the performance of an already published necrosis quantification system. The performance of this system was significantly improved by utilizing the WSICS algorithm. The results of the empirical evaluations collectively demonstrate the potential contribution of the proposed standardization algorithm to improved diagnostic accuracy and consistency in computer-aided diagnosis for histopathology data.}, + file = {Ehte16.pdf:pdf\\Ehte16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26353368}, + gsid = {11832967785880346265}, + gscites = {242}, + ss_id = {2729d2918978d5ed602aa843fbdd027d83e0036f}, + all_ss_ids = {['2729d2918978d5ed602aa843fbdd027d83e0036f']}, +} + +@article{Ehte16a, + author = {Bejnordi, B. Ehteshami and Balkenhol, M. and Litjens, G. and Holland, R. and Bult, P. and Karssemeijer, N. and van der Laak, J.}, + title = {Automated Detection of {DCIS} in Whole-Slide {H\&E} Stained Breast Histopathology Images}, + journal = TMI, + year = {2016}, + volume = {35}, + number = {9}, + month = {9}, + pages = {2141-2150}, + doi = {10.1109/TMI.2016.2550620}, + url = {http://dx.doi.org/10.1109/TMI.2016.2550620}, + abstract = {This paper presents and evaluates a fully automatic method for detection of ductal carcinoma in situ (DCIS) in digitized hematoxylin and eosin (H&E) stained histopathological slides of breast tissue. The proposed method applies multi-scale superpixel classification to detect epithelial regions in whole-slide images (WSIs). Subsequently, spatial clustering is utilized to delineate regions representing meaningful structures within the tissue such as ducts and lobules. A region-based classifier employing a large set of features including statistical and structural texture features and architectural features is then trained to discriminate between DCIS and benign/normal structures. The system is evaluated on two datasets containing a total of 205 WSIs of breast tissue. Evaluation was conducted both on the slide and the lesion level using FROC analysis. The results show that to detect at least one true positive in every DCIS containing slide, the system finds 2.6 false positives per WSI. The results of the per-lesion evaluation show that it is possible to detect 80% and 83% of the DCIS lesions in an abnormal slide, at an average of 2.0 and 3.0 false positives per WSI, respectively. Collectively, the result of the experiments demonstrate the efficacy and accuracy of the proposed method as well as its potential for application in routine pathological diagnostics. To the best of our knowledge, this is the first DCIS detection algorithm working fully automatically on WSIs.}, + file = {Ehte16a.pdf:pdf\\Ehte16a.pdf:PDF}, + optnote = {DIAG}, + pmid = {27076354}, + gsid = {4689762899907422351}, + gscites = {82}, + ss_id = {7e39820fb261f367ce8bf362de991ebff4400083}, + all_ss_ids = {['7e39820fb261f367ce8bf362de991ebff4400083']}, +} + +@article{Ehte17, + author = {Ehteshami Bejnordi, Babak and Veta, Mitko and P. J. van Diest and van Ginneken, Bram and Karssemeijer, Nico and Litjens, Geert and van der Laak, Jeroen A W M and the CAMELYON16 Consortium and Hermsen, Meyke and Manson, Quirine F and Balkenhol, Maschenka and Geessink, Oscar and Stathonikos, Nikolaos and van Dijk, Marcory Crf and Bult, Peter and Beca, Francisco and Beck, Andrew H and Wang, Dayong and Khosla, Aditya and Gargeya, Rishab and Irshad, Humayun and Zhong, Aoxiao and Dou, Qi and Li, Quanzheng and Chen, Hao and Lin, Huang-Jing and Heng, Pheng-Ann and Ha{\ss}, Christian and Bruni, Elia and Wong, Quincy and Halici, Ugur and \"Oner, Mustafa \"Umit and Cetin-Atalay, Rengul and Berseth, Matt and Khvatkov, Vitali and Vylegzhanin, Alexei and Kraus, Oren and Shaban, Muhammad and Rajpoot, Nasir and Awan, Ruqayya and Sirinukunwattana, Korsuk and Qaiser, Talha and Tsang, Yee-Wah and Tellez, David and Annuscheit, Jonas and Hufnagl, Peter and Valkonen, Mira and Kartasalo, Kimmo and Latonen, Leena and Ruusuvuori, Pekka and Liimatainen, Kaisa and Albarqouni, Shadi and Mungal, Bharti and George, Ami and Demirci, Stefanie and Navab, Nassir and Watanabe, Seiryo and Seno, Shigeto and Takenaka, Yoichi and Matsuda, Hideo and Ahmady Phoulady, Hady and Kovalev, Vassili and Kalinovsky, Alexander and Liauchuk, Vitali and Bueno, Gloria and Fernandez-Carrobles, M Milagro and Serrano, Ismael and Deniz, Oscar and Racoceanu, Daniel and Ven\^ancio, Rui}, + title = {Diagnostic Assessment of Deep Learning Algorithms for Detection of Lymph Node Metastases in Women With Breast Cancer}, + journal = JAMA, + year = {2017}, + volume = {318}, + issue = {22}, + month = {12}, + pages = {2199--2210}, + doi = {10.1001/jama.2017.14585}, + abstract = {Application of deep learning algorithms to whole-slide pathology images can potentially improve diagnostic accuracy and efficiency. Assess the performance of automated deep learning algorithms at detecting metastases in hematoxylin and eosin-stained tissue sections of lymph nodes of women with breast cancer and compare it with pathologists' diagnoses in a diagnostic setting. Researcher challenge competition (CAMELYON16) to develop automated solutions for detecting lymph node metastases (November 2015-November 2016). A training data set of whole-slide images from 2 centers in the Netherlands with (n = 110) and without (n = 160) nodal metastases verified by immunohistochemical staining were provided to challenge participants to build algorithms. Algorithm performance was evaluated in an independent test set of 129 whole-slide images (49 with and 80 without metastases). The same test set of corresponding glass slides was also evaluated by a panel of 11 pathologists with time constraint (WTC) from the Netherlands to ascertain likelihood of nodal metastases for each slide in a flexible 2-hour session, simulating routine pathology workflow, and by 1 pathologist without time constraint (WOTC). Deep learning algorithms submitted as part of a challenge competition or pathologist interpretation. The presence of specific metastatic foci and the absence vs presence of lymph node metastasis in a slide or image using receiver operating characteristic curve analysis. The 11 pathologists participating in the simulation exercise rated their diagnostic confidence as definitely normal, probably normal, equivocal, probably tumor, or definitely tumor. The area under the receiver operating characteristic curve (AUC) for the algorithms ranged from 0.556 to 0.994. The top-performing algorithm achieved a lesion-level, true-positive fraction comparable with that of the pathologist WOTC (72.4% [95% CI, 64.3%-80.4%]) at a mean of 0.0125 false-positives per normal whole-slide image. For the whole-slide image classification task, the best algorithm (AUC, 0.994 [95% CI, 0.983-0.999]) performed significantly better than the pathologists WTC in a diagnostic simulation (mean AUC, 0.810 [range, 0.738-0.884]; P < .001). The top 5 algorithms had a mean AUC that was comparable with the pathologist interpreting the slides in the absence of time constraints (mean AUC, 0.960 [range, 0.923-0.994] for the top 5 algorithms vs 0.966 [95% CI, 0.927-0.998] for the pathologist WOTC). In the setting of a challenge competition, some deep learning algorithms achieved better diagnostic performance than a panel of 11 pathologists participating in a simulation exercise designed to mimic routine pathology workflow; algorithm performance was comparable with an expert pathologist interpreting whole-slide images without time constraints. Whether this approach has clinical utility will require evaluation in a clinical setting.}, + file = {Ehte17.pdf:pdf\\Ehte17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29234806}, + gsid = {6260116032142865268}, + gscites = {1000}, + ss_id = {ba913e2c03ece1c75f0af4d16dd11c7ffbc6e3ba}, + all_ss_ids = {['ba913e2c03ece1c75f0af4d16dd11c7ffbc6e3ba']}, +} + +@article{Ehte18, + author = {Ehteshami Bejnordi, Babak and Mullooly, Maeve and Pfeiffer, Ruth M and Fan, Shaoqi and Vacek, Pamela M and Weaver, Donald L and Herschorn, Sally and Brinton, Louise A and van Ginneken, Bram and Karssemeijer, Nico and Beck, Andrew H and Gierach, Gretchen L and van der Laak, Jeroen A W M and Sherman, Mark E}, + title = {Using deep convolutional neural networks to identify and classify tumor-associated stroma in diagnostic breast biopsies}, + journal = MODP, + year = {2018}, + volume = {31}, + number = {10}, + month = {10}, + pages = {1502-1512}, + doi = {10.1038/s41379-018-0073-z}, + abstract = {The breast stromal microenvironment is a pivotal factor in breast cancer development, growth and metastases. Although pathologists often detect morphologic changes in stroma by light microscopy, visual classification of such changes is subjective and non-quantitative, limiting its diagnostic utility. To gain insights into stromal changes associated with breast cancer, we applied automated machine learning techniques to digital images of 2387 hematoxylin and eosin stained tissue sections of benign and malignant image-guided breast biopsies performed to investigate mammographic abnormalities among 882 patients, ages 40-65 years, that were enrolled in the Breast Radiology Evaluation and Study of Tissues (BREAST) Stamp Project. Using deep convolutional neural networks, we trained an algorithm to discriminate between stroma surrounding invasive cancer and stroma from benign biopsies. In test sets (928 whole-slide images from 330 patients), this algorithm could distinguish biopsies diagnosed as invasive cancer from benign biopsies solely based on the stromal characteristics (area under the receiver operator characteristics curve = 0.962). Furthermore, without being trained specifically using ductal carcinoma in situ as an outcome, the algorithm detected tumor-associated stroma in greater amounts and at larger distances from grade 3 versus grade 1 ductal carcinoma in situ. Collectively, these results suggest that algorithms based on deep convolutional neural networks that evaluate only stroma may prove useful to classify breast biopsies and aid in understanding and evaluating the biology of breast lesions.}, + file = {:pdf/Ehte18.pdf:PDF}, + optnote = {DIAG}, + pmid = {29899550}, + gsid = {5374110760369104933}, + gscites = {147}, + ss_id = {a58015a59562caf325a3f05288147704c055ce8d}, + all_ss_ids = {['a58015a59562caf325a3f05288147704c055ce8d']}, +} + +@article{Eile08, + author = {A. L. Eilertsen and N. Karssemeijer and P. Skaane and E. Qvigstad and P. M. Sandset}, + title = {Differential impact of conventional and low-dose oral hormone therapy, tibolone and raloxifene on mammographic breast density, assessed by an automated quantitative method}, + journal = BJOG, + year = {2008}, + volume = {115}, + pages = {773--779}, + doi = {10.1111/j.1471-0528.2008.01690.x}, + abstract = {{OBJECTIVE}: {T}o evaluate impact of different postmenopausal hormone therapy ({HT}) regimens and raloxifene on mammographic breast density. {DESIGN}: {O}pen, randomised, comparative clinical trial. {SETTING}: {W}omen were recruited through local newspapers and posters. {T}hey were examined at the {D}epartments of {H}aematology, {G}ynaecology, and {R}adiology in a {U}niversity {H}ospital. {POPULATION}: {A} total of 202 healthy postmenopausal women between the age of 45 and 65 years. {METHODS}: {W}omen were randomly assigned to receive daily treatment for 12 weeks with tablets containing low-dose {HT} containing 1 mg 17 beta-estradiol + 0.5 mg norethisterone acetate ({NETA}) (n = 50), conventional-dose {HT} containing 2 mg 17 beta-estradiol and 1 mg {NETA} (n = 50), 2.5 mg tibolone (n = 51), or 60 mg raloxifene (n = 51). {M}ammographic density was determined at baseline and after 12 weeks by an automated technique in full-field digital mammograms. {MAIN} {OUTCOME} {MEASURES}: {M}ammographic density was expressed as volumetric breast density estimations. {RESULTS}: {M}ammographic breast density increased significantly and to a similar degree in both the conventional- and low-dose {HT} groups. {A} small reduction in mammographic breast density was seen in the raloxifene group, whereas those allocated to tibolone treatment only showed minor changes. {CONCLUSIONS}: {O}ur findings demonstrated a significant difference in impact on mammographic breast density between the regimens. {A}lthough these results indicate a differential effect of these regimens on breast tissue, the relation to breast cancer risk remains unresolved.}, + file = {Eile08.pdf:pdf\\Eile08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {18355366}, + month = {5}, + gsid = {1711316046672219902}, + gscites = {37}, +} + +@article{Eise03, + author = {Eisenhuber, Edith and Stadler, Alfred and Prokop, Mathias and Fuchsjager, Michael and Weber, Michael and Schaefer-Prokop, Cornelia}, + title = {Detection of monitoring materials on bedside chest radiographs with the most recent generation of storage phosphor plates: dose increase does not improve detection performance}, + journal = Radiology, + year = {2003}, + volume = {227}, + pages = {216--221}, + doi = {10.1148/radiol.2271020045}, + abstract = {To evaluate the performance of the most recent generation of storage phosphor plates for the detection of low-contrast catheter material on bedside chest radiographs.In 10 patients in the intensive care unit, bedside chest radiographs were obtained with a 400-speed conventional screen-film system and with storage phosphor plates with exposure levels comparable to a 200-, 400-, or 800-speed conventional system. The chest radiograph was divided into 20 regions, 60\% of which were superimposed with low-contrast catheter fragments. Six observers independently assessed the presence of catheter fragments by using a receiver operating characteristic (ROC) methodology.Detection performance (mean area under the ROC curve [Az]) with the storage phosphor plates was significantly superior to that with the screen-film system (Az = 0.76) at all three dose levels (Az = 0.88, 0.87, and 0.83 for 200-, 400-, and 800-speed doses, respectively; P <.05). Increasing the dose to a 200-speed system did not significantly increase detection performance compared with that with the 400-speed digital radiographs (Az = 0.88 vs 0.87). Dose reduction to 800 speed significantly deteriorated the detection performance (Az = 0.83) compared with that with the 400- and 200-speed digital radiographs, respectively.The most recent generation of storage phosphor plates is superior to a 400-speed screen-film system for the detection of catheter material, even at an exposure level of 800 speed.}, + file = {Eise03.pdf:pdf\\Eise03.pdf:PDF}, + optnote = {CXR, DIAG, RADIOLOGY}, + number = {1}, + pmid = {12668747}, + month = {4}, + gsid = {7034525693920076958}, + gscites = {12}, +} + +@article{Eise12, + author = {Eisenhuber, Edith and Schaefer-Prokop, Cornelia M. and Prosch, Helmut and Schima, Wolfgang}, + title = {Bedside chest radiography}, + journal = RESPC, + year = {2012}, + volume = {57}, + pages = {427--443}, + doi = {10.4187/respcare.01712}, + abstract = {The bedside chest x-ray (CXR) is an indispensible diagnostic tool for monitoring seriously ill patients in the intensive care unit. The CXR often reveals abnormalities that may not be detected clinically. In addition, bedside CXRs are an irreplaceable tool with which to detect the malposition of tubes and lines and to identify associated complications. Although the image quality is often limited, bedside CXRs still provide valuable diagnostic information. The interpretation of the bedside CXRs is often challenging, and requires extensive radiologic experience to avoid misinterpretation of the wide spectrum of pleural and pulmonary disease. The clinical information is of substantial value for the interpretation of the frequently nonspecific findings.}, + file = {Eise12.pdf:pdf\\Eise12.pdf:PDF}, + optnote = {DIAG}, + number = {3}, + pmid = {22391269}, + month = {3}, + ss_id = {bb07c9b92d8d59595eb0a5e135c3f54ebae9e43b}, + all_ss_ids = {['bb07c9b92d8d59595eb0a5e135c3f54ebae9e43b']}, + gscites = {66}, +} + +@article{Emau15, + author = {Emaus, Marleen J. and Bakker, Marije F. and Peeters, Petra H M. and Loo, Claudette E. and Mann, Ritse M. and {de Jong}, Mathijn D F. and Bisschops, Robertus H C. and Veltman, Jeroen and Duvivier, Katya M. and Lobbes, Marc B I. and Pijnappel, Ruud M. and Karssemeijer, Nico and {de Koning}, Harry J. and {van den Bosch}, Maurice A A J. and Monninkhof, Evelyn M. and Mali, Willem P Th M. and Veldhuis, Wouter B. and {van Gils}, Carla H.}, + title = {{MR} Imaging as an Additional Screening Modality for the Detection of Breast Cancer in Women Aged 50-75 Years with Extremely Dense Breasts: The {DENSE} Trial Study Design}, + journal = Radiology, + year = {2015}, + volume = {277}, + number = {2}, + month = {11}, + pages = {527--537}, + doi = {10.1148/radiol.2015141827}, + url = {http://dx.doi.org/10.1148/radiol.2015141827}, + abstract = {Women with extremely dense breasts have an increased risk of breast cancer and lower mammographic tumor detectability. Nevertheless, in most countries, these women are currently screened with mammography only. Magnetic resonance (MR) imaging has the potential to improve breast cancer detection at an early stage because of its higher sensitivity. However, MR imaging is more expensive and is expected to be accompanied by an increase in the number of false-positive results and, possibly, an increase in overdiagnosis. To study the additional value of MR imaging, a randomized controlled trial (RCT) design is needed in which one group undergoes mammography and the other group undergoes mammography and MR imaging. With this design, it is possible to determine the proportion of interval cancers within each study arm. For this to be an effective screening strategy, the additional cancers detected at MR imaging screening must be accompanied by a subsequent reduction in interval cancers. The Dense Tissue and Early Breast Neoplasm Screening, or DENSE, trial is a multicenter RCT performed in the Dutch biennial population-based screening program (subject age range, 50-75 years). The study was approved by the Dutch Minister of Health, Welfare and Sport. In this study, mammographic density is measured by using a fully automated volumetric method. Participants with extremely dense breasts (American College of Radiology breast density category 4) and a negative result at mammography (Breast Imaging Recording and Data System category 1 or 2) are randomly assigned to undergo additional MR imaging (n = 7237) or to be treated according to current practice (n = 28 948). Participants provide written informed consent before the MR imaging examination, which consists of dynamic breast MR imaging with gadolinium-based contrast medium and is intended to be performed for three consecutive screening rounds. The primary outcome is the difference in the proportions of interval cancers between the study arms. Secondary outcomes are the number of MR imaging screening-detected cancers, proportions of false-positive results, diagnostic yield of MR imaging, tumor characteristics, quality of life, and cost effectiveness. (A,A(c)) RSNA, 2015.}, + file = {Emau15.pdf:pdf\\Emau15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26110667}, + gsid = {11553070516898932123}, + gscites = {88}, + all_ss_ids = {['fb1084b454e9b6f203c860e6f89ca4389cd1a8e9', 'fc7b70a4d154e3d2dfabe4393809dfb29cc36a7a']}, +} + +@article{Emau19, + author = {Emaus, Marleen J. and I{\v{s}}gum, Ivana and van Velzen, Sanne G. M. and van den Bongard, H. J. G. Desir{\'{e}}e and Gernaat, Sofie A. M. and Lessmann, Nikolas and Sattler, Margriet G. A. and Teske, Arco J. and Penninkhof, Joan and Meijer, Hanneke and Pignol, Jean-Philippe and Verkooijen, Helena M.}, + title = {Bragatston study protocol: a multicentre cohort study on automated quantification of cardiovascular calcifications on radiotherapy planning {CT} scans for cardiovascular risk prediction in patients with breast cancer}, + journal = BMJO, + year = {2019}, + volume = {9}, + pages = {e028752}, + doi = {10.1136/bmjopen-2018-028752}, + optnote = {DIAG, RADIOLOGY}, + file = {Emau19.pdf:pdf\\Emau19.pdf:PDF}, + ss_id = {88d7b3243c9083be8542ce1fb171241c8e069948}, + all_ss_ids = {['88d7b3243c9083be8542ce1fb171241c8e069948']}, + gscites = {13}, +} + +@inproceedings{Enge01, + author = {S. van Engeland and N. Karssemeijer}, + title = {Matching breast lesions in multiple mammographic views}, + booktitle = MICCAI, + year = {2001}, + volume = {2208/2010}, + series = LNCS, + publisher = {Springer Berlin / Heidelberg}, + pages = {1172-1173}, + doi = {10.1007/3-540-45468-3_149}, + abstract = {By combining information from multiple mammographic views (temporal, mediolateral oblique (MLO) and cranio-caudal (CC), or bilateral) it should be possible to improve the accuracy of computer-aided diagnosis (CAD) methods. In literature various approaches have been described to establish correspondence between multiple views. Highnam et al. [1] used a model-based method to find a curve in the MLO view which corresponds to the potential positions of a point in the CC view. Kok-Wiles et al. [2] used a representation of the nested structure of ?salient? bright regions to match mammogram pairs. Karssemeijer et al. [3] and Lau et al. [4] both used a set of landmarks and applied a nonlinear interpolation to align the skin line of two breast images. Almost all matching approaches are based on acquiring a set of landmarks. In a mammogram the nipple is the most obvious landmark. Radiologists use the distance to the nipple to correlate a lesion in MLO and CC view. It is generally believed that this distance remains fairly constant. The goal of this paper is twofold: first, to investigate to what extent this distance remains constant in multiple views, and second, to investigate if the accuracy of automated detection of the nipple is sufficient to use the distance to the nipple as a reliable measure for matching. For this purpose we used an annotated database which contained 327 corresponding mammogram pairs from the Dutch breast cancer screening program.}, + file = {Enge01.pdf:pdf\\Enge01.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {9965525877276769229}, + gscites = {6}, + ss_id = {2c91e8d6e8f91f58003862001c4d9b65ac2e966e}, + all_ss_ids = {['2c91e8d6e8f91f58003862001c4d9b65ac2e966e']}, +} + +@article{Enge02, + author = {Engelke, Christoph and Schaefer-Prokop, Cornelia and Schirg, Eckart and Freihorst, Joachim and Grubnic, Sisa and Prokop, Mathias}, + title = {High-resolution {CT} and {CT} angiography of peripheral pulmonary vascular disorders}, + journal = Radiographics, + year = {2002}, + volume = {22}, + pages = {739--764}, + abstract = {Peripheral pulmonary vascular disorders that can be evaluated with computed tomography (CT) include various disease entities with overlapping imaging features and a wide range of clinical manifestations. The overall accuracy of CT in the diagnosis of pulmonary vascular disorders increases with improved spatial resolution, administration of a high-flow contrast material bolus, and the use of cardiac gating. The integration of high-resolution CT and CT angiographic techniques into one scanning protocol has important clinical implications for multisection CT and makes it the imaging modality of choice in the evaluation of this complex group of disorders.}, + file = {Enge02.pdf:pdf\\Enge02.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + pmid = {12110707}, + month = {7}, + gsid = {7114789308053633858}, + gscites = {84}, +} + +@article{Enge03, + author = {S. van Engeland and P. Snoeren and J. Hendriks and N. Karssemeijer}, + title = {A comparison of methods for mammogram registration}, + journal = TMI, + year = {2003}, + volume = {22}, + pages = {1436--1444}, + doi = {10.1109/TMI.2003.819273}, + abstract = {{M}ammogram registration is an important technique to optimize the display of cases on a digital viewing station, and to find corresponding regions in temporal pairs of mammograms for computer-aided diagnosis algorithms. {F}our methods for mammogram registration were tested and results were compared. {T}he performance of all registration methods was measured by comparing the distance between annotations of abnormalities in the previous and current view before and after registration. {R}egistration by mutual information outperformed alignment based on nipple location, alignment based on center of mass of breast tissue, and warping.}, + file = {Enge03.pdf:pdf\\Enge03.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pmid = {14606677}, + month = {11}, + gsid = {9254307573267034363}, + gscites = {101}, + ss_id = {047a1a51dff311bfdbf5eed4ab11805d059bdf84}, + all_ss_ids = {['047a1a51dff311bfdbf5eed4ab11805d059bdf84']}, +} + +@article{Enge03a, + author = {Engelbrecht, Marc R. and Huisman, Henkjan J. and Laheij, Robert J F. and Jager, Gerrit J. and van Leenders, Geert J L H. and Hulsbergen-van de Kaa, Christina A. and de la Rosette, Jean J M C H. and Blickman, Johan G. and Barentsz, Jelle O.}, + title = {Discrimination of prostate cancer from normal peripheral zone and central gland tissue by using dynamic contrast-enhanced {MR} imaging}, + journal = Radiology, + year = {2003}, + volume = {229}, + pages = {248--254}, + doi = {10.1148/radiol.2291020200}, + abstract = {PURPOSE {T}o evaluate which parameters of dynamic magnetic resonance ({MR}) imaging and {T}2 relaxation rate would result in optimal discrimination of prostatic carcinoma from normal peripheral zone ({PZ}) and central gland ({CG}) tissues and to correlate these parameters with tumor stage, {G}leason score, patient age, and tumor markers. MATERIALS AND METHODS {O}f 58 patients with prostatic carcinoma, 36 were included for analysis. {P}atients underwent {MR} imaging at 1.5 {T} with an endorectal-pelvic phased-array coil and subsequently underwent prostatectomy. {A} {T}2-weighted turbo spin-echo sequence, an intermediate-weighted sequence, and a fast {T}1-weighted gradient-echo sequence (seven sections in 2.03 seconds) during bolus injection of 0.1 mmol gadopentetate dimeglumine per kilogram of body weight were performed. {C}ontrast agent concentration-time curves were obtained for prostatic carcinoma and normal {PZ} and {CG} tissue by using whole-mount sections to guide placement of regions of interest. {O}nset time, time to peak, peak enhancement, relative peak enhancement, washout, and {T}2 relaxation rates were calculated. {M}ultivariate receiver operating characteristic analysis was performed with and without relative peak enhancement. RESULTS {R}esults of multivariate receiver operating characteristic analysis showed that relative peak enhancement demonstrated the highest area under the receiver operating characteristic curve ({AUC}) in the {PZ} and the {CG} ({AUC} = 0.93, 0.82). {R}esults of multivariate analysis without relative peak enhancement showed that relative peak enhancement in the {PZ} and washout in the {CG} demonstrated the highest {AUC} ({AUC} = 0.9, 0.81). {P}earson correlation coefficients between the dynamic parameters or {T}2 relaxation rates in carcinoma and the tumor stage, {G}leason score, patient age, and tumor markers ranged between 0.02 and 0.44. CONCLUSION {T}he optimal parameter for discrimination of prostatic carcinoma in the {PZ} and {CG} was relative peak enhancement. {I}f relative peak enhancement was not used, then peak enhancement was optimal in the {PZ}, and washout was optimal in the {CG}. {P}oor-to-moderate correlation was present between the dynamic parameters or {T}2 relaxation rate in carcinoma and the tumor stage, {G}leason score, patient age, tumor volume, and prostate-specific antigen.}, + file = {Enge03a.pdf:pdf/Enge03a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {12944607}, + month = {10}, + gsid = {2675577220654358659,13589576213391752314}, + gscites = {461}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/122329}, + ss_id = {3887e2f1a64b3558388508be00169a39379c473a}, + all_ss_ids = {['3887e2f1a64b3558388508be00169a39379c473a']}, +} + +@inproceedings{Enge03b, + author = {S. van Engeland and P. R. Snoeren and N. Karssemeijer and J. H. C. L. Hendriks}, + title = {Optimized perception of lesion growth in mammograms using digital display}, + booktitle = MI, + year = {2003}, + volume = {5034}, + series = SPIE, + pages = {25-31}, + doi = {10.1117/12.480074}, + url = {http://link.aip.org/link/?PSI/5034/25/1}, + abstract = {In this study we investigate two ways of presenting prior and current mammograms on a mammography workstation: next to each other and alternating at the same display (toggle). The experiment consisted of 420 trials with prior-current mammogram pairs, displayed on a dedicated mammography workstation. In two-alternative forced-choice (2AFC) experiment, observers were asked to select the image containing the largest lesion. The stimuli were created by pasting extracted lesions into normal mammograms. Results showed that the observers preformed more accurate in selecting the largest lesion when using the toggle option.}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {2357872457316668238}, + gscites = {8}, + ss_id = {a77f68203e9f4e3b0bb83aea1fdc085ee50af4d4}, + all_ss_ids = {['a77f68203e9f4e3b0bb83aea1fdc085ee50af4d4']}, +} + +@inproceedings{Enge05, + author = {S. van Engeland and C. Varela and S. Timp and P. Snoeren and N. Karssemeijer}, + title = {Using context for mass detection and classification in mammograms}, + booktitle = MI, + year = {2005}, + volume = {5794}, + series = SPIE, + pages = {94-102}, + doi = {10.1117/12.594483}, + abstract = {In mammography, computer-aided diagnosis (CAD) techniques for mass detection and classification mainly use local image information to determine whether a region is abnormal or not. There is a lot of interest in developing CAD methods that use context, asymmetry, and multiple view information. However, it is not clear to what extent this may improve CAD results. In this study, we made use of human observers to investigate the potential benefit of using context information for CAD. We investigated to what extent human readers make use of context information derived from the whole breast area and from asymmetry for the tasks of mass detection and classification. Results showed that context information can be used to improve CAD programs for mass detection. However, there is still a lot to be gained from improvement of local feature extraction and classification. This is demonstrated by the fact that the observers did much better in classifying true positive (TP) and false positive (FP) regions than the CAD program. For classification of benign and malignant masses context seems to be less important.}, + file = {Enge05.pdf:pdf\\Enge05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + gsid = {6185505303724264209}, + gscites = {5}, + ss_id = {bc98a58c4ad70fe8e10233541b8b7174239c649f}, + all_ss_ids = {['bc98a58c4ad70fe8e10233541b8b7174239c649f']}, +} + +@inproceedings{Enge05b, + author = {S. van Engeland and N. Karssemeijer}, + title = {Regrouping initial {CAD} mass detections to facilitate classification of suspicious regions in mammography}, + booktitle = MI, + year = {2005}, + volume = {5747}, + series = SPIE, + pages = {975-986}, + doi = {10.1117/12.595205}, + url = {http://link.aip.org/link/?PSI/5747/975/1}, + abstract = {There is a lot of interest in developing computer-aided detection (CAD) techniques for mammography that use multiple view information. During the development of such techniques we have noticed that they are hampered by the phenomena that mass lesions are sometimes detected by multiple regions. This has encouraged us to develop a technique to regroup initial CAD detections to facilitate the final classification of suspicious regions. The regrouping technique searches for detections that belong to the same structure. Therefore, it takes into account the distance between the detections and the image structure along a path between the detections. When correspondence is found, the two detections are replaced by a new detection in between the initial detections. Our regrouping technique correctly regrouped the detections in 48 percent of the masses initially detected by multiple regions. Of the false positive detections two percent were combined, and the percentage of true positive - false positive combinations was one. Incorporation of thealgorithm into our CAD scheme resulted in a slight increase in detection performance. In addition, in our multiple view scheme it also resulted in a decrease in the number of incorrectly linked regions in corresponding mammographic views.}, + file = {Enge05b.pdf:pdf/Enge05b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + gsid = {8156651029382882086}, + gscites = {2}, + ss_id = {2ef80dbd4ef99216073e7287dfd6f785d1a15877}, + all_ss_ids = {['2ef80dbd4ef99216073e7287dfd6f785d1a15877']}, +} + +@article{Enge06, + author = {S. van Engeland and S. Timp and N. Karssemeijer}, + title = {Finding corresponding regions of interest in mediolateral oblique and craniocaudal mammographic views}, + journal = MP, + year = {2006}, + volume = {33}, + pages = {3203--3212}, + doi = {10.1118/1.2230359}, + abstract = {{I}n this paper we present a method to link potentially suspicious mass regions detected by a {C}omputer-{A}ided {D}etection ({CAD}) scheme in mediolateral oblique ({MLO}) and craniocaudal ({CC}) mammographic views of the breast. {F}or all possible combinations of mass candidate regions, a number of features are determined. {T}hese features include the difference in the radial distance from the candidate regions to the nipple, the gray scale correlation between both regions, and the mass likelihood of the regions determined by the single view {CAD} scheme. {L}inear {D}iscriminant {A}nalysis ({LDA}) is used to discriminate between correct and incorrect links. {T}he method was tested on a set of 412 cancer cases. {I}n each case a malignant mass, architectural distortion, or asymmetry was annotated. {I}n 92% of these cases the candidate mass detections by {CAD} included the cancer regions in both views. {I}t was found that in 82% of the cases a correct link between the true positive regions in both views could be established by our method. {P}ossible applications of the method may be found in multiple view analysis to improve {CAD} results, and for the presentation of {CAD} results to the radiologist on a mammography workstation.}, + file = {Enge06.pdf:pdf\\Enge06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {9}, + pmid = {17022213}, + month = {8}, + gsid = {2233176143093901288}, + gscites = {53}, +} + +@article{Enge06a, + author = {S. van Engeland and P. R. Snoeren and H. Huisman and C. Boetes and N. Karssemeijer}, + title = {Volumetric breast density estimation from full-field digital mammograms}, + journal = TMI, + year = {2006}, + volume = {25}, + pages = {273--282}, + doi = {10.1109/TMI.2005.862741}, + abstract = {{A} method is presented for estimation of dense breast tissue volume from mammograms obtained with full-field digital mammography ({FFDM}). {T}he thickness of dense tissue mapping to a pixel is determined by using a physical model of image acquisition. {T}his model is based on the assumption that the breast is composed of two types of tissue, fat and parenchyma. {E}ffective linear attenuation coefficients of these tissues are derived from empirical data as a function of tube voltage (k{V}p), anode material, filtration, and compressed breast thickness. {B}y employing these, tissue composition at a given pixel is computed after performing breast thickness compensation, using a reference value for fatty tissue determined by the maximum pixel value in the breast tissue projection. {V}alidation has been performed using 22 {FFDM} cases acquired with a {GE} {S}enographe 2000{D} by comparing the volume estimates with volumes obtained by semi-automatic segmentation of breast magnetic resonance imaging ({MRI}) data. {T}he correlation between {MRI} and mammography volumes was 0.94 on a per image basis and 0.97 on a per patient basis. {U}sing the dense tissue volumes from {MRI} data as the gold standard, the average relative error of the volume estimates was 13.6%.}, + file = {Enge06a.pdf:pdf\\Enge06a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {16524084}, + month = {3}, + gsid = {12818555548460097494}, + gscites = {250}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/51174}, + ss_id = {878d8de48dd9eafa5ec823e17d918b56c856eed9}, + all_ss_ids = {['878d8de48dd9eafa5ec823e17d918b56c856eed9']}, +} + +@phdthesis{Enge06b, + author = {van Engeland, S.}, + title = {Detection of mass lesions in mammograms by using multiple views}, + year = {2006}, + url = {http://repository.ubn.ru.nl/handle/2066/50316}, + abstract = {Computer-aided Detection (CAD) and image processing techniques for mammography are being developed to aid radiologists during screening and to increase the detection rate. During interpretation, radiologists use information from all available views, i.e. previous examinations and projections from different angles. Most current CAD systems, on the other hand, use information from only one view at the time. The research described in this thesis concerns the development of multiple view CAD techniques to increase the performance of CAD for the detection of masses in mammograms. The first part of this thesis addresses the comparison of temporal mammogram pairs. The second part concerns the combination of information from two projections of the same breast. The final chapter describes a method for accurate quantitative estimation of the dense tissue volume from mammograms. Such a measure for breast density can be used for instance for comparisons of the left and right breast.}, + copromotor = {N. Karssemeijer}, + file = {Enge06b.pdf:pdf/Enge06b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {C. C. A. M. Gielen}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Enge07, + author = {S. van Engeland and N. Karssemeijer}, + title = {Combining two mammographic projections in a computer aided mass detection method}, + journal = MP, + year = {2007}, + volume = {34}, + pages = {898--905}, + doi = {10.1118/1.2436974}, + abstract = {{A} method is presented to improve computer aided detection ({CAD}) results for masses in mammograms by fusing information obtained from two views of the same breast. {I}t is based on a previously developed approach to link potentially suspicious regions in mediolateral oblique ({MLO}) and craniocaudal ({CC}) views. {U}sing correspondence between regions, we extended our {CAD} scheme by building a cascaded multiple-classifier system, in which the last stage computes suspiciousness of an initially detected region conditional on the existence and similarity of a linked candidate region in the other view. {W}e compared the two-view detection system with the single-view detection method using free-response receiver operating characteristic ({FROC}) analysis and cross validation. {T}he dataset used in the evaluation consisted of 948 four-view mammograms, including 412 cancer cases with a mass, architectural distortion, or asymmetry. {A} statistically significant improvement was found in the lesion based detection performance. {A}t a false positive ({FP}) rate of 0.1 {FP}/image, the lesion sensitivity improved from 56% to 61%. {C}ase based sensitivity did not improve.}, + file = {Enge07.pdf:pdf\\Enge07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {17441235}, + month = {2}, + gsid = {1290158304389131269}, + gscites = {73}, + ss_id = {e5991fe901cb59f561010d99925459a0a99cb9ac}, + all_ss_ids = {['e5991fe901cb59f561010d99925459a0a99cb9ac']}, +} + +@conference{Enge19, + author = {Engelberts, Jonne and Gonz\'{a}lez-Gonzalo, Cristina and S\'{a}nchez, Clara I. and van Grinsven, Mark J.}, + booktitle = ARVO, + title = {Automatic Segmentation of Drusen and Exudates on Color Fundus Images using Generative Adversarial Networks}, + abstract = {Purpose: The presence of drusen and exudates, visible as bright lesions on color fundus images, is one of the early signs of visual threatening diseases such as Age-related Macular Degeneration and Diabetic Retinopathy. Accurate detection and quantification of these lesions during screening can help identify patients that would benefit from treatment. We developed a method based on generative adversarial networks (GANs) to segment bright lesions on color fundus images. + + Methods: We used 4179 color fundus images that were acquired during clinical routine. The images were contrast enhanced to increase the contrast between bright lesions and the background. All bright lesions were manually annotated by marking the center point of the lesions. The GAN was trained to estimate the image without bright lesions. The final segmentation was obtained by taking the difference between the input image and the estimated output. + + Results: This method was applied to an independent test set of 52 color fundus images with non-advanced stages of AMD from the European Genetic Database, which were fully segmented for bright lesions by two trained human observers. The method achieved Dice scores of 0.4862 and 0.4849 when compared to the observers, whereas the inter-observer Dice score was 0.5043. The total segmented bright lesion area per image was evaluated using the intraclass correlation (ICC). The method scored 0.8537 and 0.8352 when compared to the observers, whereas the inter-observer ICC was 0.8893. + + Conclusions: The results show the performance is close to the agreement between trained observers. This automatic segmentation of bright lesions can help early diagnosis of visual threatening diseases and opens the way for large scale clinical trials.}, + optnote = {DIAG, RADIOLOGY}, + year = {2019}, + all_ss_ids = {34559bb0d95c5166625945eef9b53b21a30838fa}, + gscites = {1}, +} + +@article{Esch22, + author = {Eschert, Tim and Schwendicke, Falk and Krois, Joachim and Bohner, Lauren and Vinayahalingam, Shankeeth and Hanisch, Marcel}, + title = {A Survey on the Use of Artificial Intelligence by Clinicians in Dentistry and Oral and Maxillofacial Surgery.}, + doi = {10.3390/medicina58081059}, + issue = {8}, + volume = {58}, + abstract = {Applications of artificial intelligence (AI) in medicine and dentistry have been on the rise in recent years. In dental radiology, deep learning approaches have improved diagnostics, outperforming clinicians in accuracy and efficiency. This study aimed to provide information on clinicians' knowledge and perceptions regarding AI. A 21-item questionnaire was used to study the views of dentistry professionals on AI use in clinical practice. In total, 302 questionnaires were answered and assessed. Most of the respondents rated their knowledge of AI as average (37.1%), below average (22.2%) or very poor (23.2%). The participants were largely convinced that AI would improve and bring about uniformity in diagnostics (mean Likert +- standard deviation 3.7 +- 1.27). Among the most serious concerns were the responsibility for machine errors (3.7 +- 1.3), data security or privacy issues (3.5 +- 1.24) and the divestment of healthcare to large technology companies (3.5 +- 1.28). : Within the limitations of this study, insights into the acceptance and use of AI in dentistry are revealed for the first time.}, + file = {Esch22.pdf:pdf\\Esch22.pdf:PDF}, + journal = {Medicina (Kaunas, Lithuania)}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36013526}, + year = {2022}, +} + +@conference{Estr12, + author = {L. Gallardo-Estrella and P. A. de Jong and O. M. Mets and B. van Ginneken and E. M. van Rikxoort}, + title = {Automatic classification of pulmonary function in {COPD} patients using trachea analysis in chest {CT} scans}, + booktitle = ESTI, + year = {2012}, + abstract = {{OBJECTIVES}:Tracheal morphologic change during breathing may be related to {COPD} severity. We investigated if automatic shape analysis of the trachea from inspiration and expiration computed tomography ({CT}) scans can improve {CT}-based classification of {COPD} patients into {GOLD} stages. {MATERIALS AND METHODS}: A database of 187 subjects of {GOLD} stages 0 through 4 was constructed. The trachea was automatically segmented on an automatically determined axial section 1 cm above the carina from both the inspiration and expiration scans. The tracheal shape ({TS}), encoded by the length of rays cast from the center of the trachea, and emphysema score ({ES}) at -950 {HU} on the inspiration scan, were used as features for classification. A nearest mean statistical classifier was trained to assign subjects to {GOLD} stage based on three sets of features: {ES}, {TS}, and {ES}+{TS}. {RESULTS}: Accuracy of {GOLD} stage classification was 42%, 41%, and 51% for {ES}, {TS}, and {ES}+{TS}, respectively. For distinguishing non-{COPD} subjects ({GOLD} 0) versus {COPD} patients ({GOLD} 1-4), accuracies were 67%, 72% and 80%. {CONCLUSIONS}: Tracheal shape can be extracted automatically from {CT} scans and is related to pulmonary function. Including tracheal shape features together with density mask scores improves {CT}-based detection and quantification of {COPD}.}, + optnote = {DIAG, RADIOLOGY}, +} + +@phdthesis{Estr19, + author = {Leticia Gallardo Estrella}, + title = {Quantification of COPD biomarkers in thoracic CT scans}, + year = {2019}, + url = {https://repository.ubn.ru.nl/handle/2066/201194}, + abstract = {In this thesis we present an automatic method to quantify tracheal abnonnalities in thoracic CT images from COPD patients and validated a CT standardization pro tocol that allows for a robust quantification of emphysema that is independent of reconstruction settings. This section contains a general summary of every chapter in this thesis.}, + copromotor = {E. van Rikxoort}, + file = {:pdf/Estr19.pdf:PDF;:png/publications/Estr19-1.png:PNG image}, + optnote = {DIAG}, + promotor = {B. van Ginneken and M. Prokop}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Exte20, + author = {den Exter, Paul L. and Kroft, Lucia J.M. and Gonsalves, Carol and Le Gal, Gregoire and Schaefer-Prokop, Cornelia M. and Carrier, Marc and Huisman, Menno V. and Klok, Frederikus A.}, + title = {Establishing diagnostic criteria and treatment of subsegmental pulmonary embolism: A Delphi analysis of experts}, + doi = {10.1002/rth2.12422}, + year = {2020}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1002/rth2.12422}, + file = {Exte20.pdf:pdf\Exte20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Research and Practice in Thrombosis and Haemostasis}, + citation-count = {15}, + automatic = {yes}, + pages = {1251-1261}, + volume = {4}, +} + +@article{Fait21, + author = {Faita, Francesco and Oranges, Teresa and Di Lascio, Nicole and Ciompi, Francesco and Vitali, Saverio and Aringhieri, Giacomo and Janowska, Agata and Romanelli, Marco and Dini, Valentina}, + title = {Ultra-high-frequency ultrasound and machine learning approaches for the differential diagnosis of melanocytic lesions.}, + doi = {10.1111/exd.14330}, + abstract = {Malignant melanoma (MM) is one of the most dangerous skin cancers. The aim of this study was to present a potential new method for the differential diagnosis of MM from melanocytic naevi (MN). We examined 20 MM and 19 MN with a new ultra-high-frequency ultrasound (UHFUS) equipped with a 70 MHz linear probe. Ultrasonographic images were processed for calculating 8 morphological parameters (area, perimeter, circularity, area ratio, standard deviation of normalized radial range, roughness index, overlap ratio and normalized residual mean square value) and 122 texture parameters. Colour Doppler images were used to evaluate the vascularization. Features reduction was implemented by means of principal component analysis (PCA), and 23 classification algorithms were tested on the reduced features using histological response as ground-truth. Best results were obtained using only the first component of the PCA and the weighted k-nearest neighbour classifier; this combination led to an accuracy of 76.9%, area under the ROC curve of 83%, sensitivity of 84% and specificity of 70%. The histological analysis still remains the gold-standard, but the UHFUS images processing using a machine learning approach could represent a new non-invasive approach.}, + journal = _Experimental_Dermatology_, + month = {3}, + optnote = {DIAG}, + pmid = {33738861}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/249929}, + ss_id = {dda2624afa1a1e32c91322c192e397913fe2fc7d}, + all_ss_ids = {['dda2624afa1a1e32c91322c192e397913fe2fc7d']}, + gscites = {11}, +} + +@mastersthesis{Fary20, + author = {Faryna, Khrystyna}, + title = {Brain MRI synthesis via pathology factorization and adversarial cycle-consistent learning for data augmentation}, + year = {2020}, + abstract = {Identifying pathology in medical imaging data is a crucial step for patient diagnosis, treatment and prognosis. Deep learning, particularly convolutional neural networks, has led to breakthroughs in computer-aided diagnosis and detection. Nonetheless, these methods are heavily dependent on large number of training samples, which is not often available in medical imaging field. Moreover, while state-of-art supervised segmentation methods rely on precise voxel-wise annotations, manual lesion delineation in medical images is extremely laborious and time consuming task. Recent advancements in the field of generative adversarial networks (GAN) show promising results in generating realistic data samples for the purpose of augmenting datasets for downstream tasks, however the quality of samples generated by GANs also depends on the variability and size of the training set, particularly for large images. Unlike the majority of recent GAN methods, which focus on generation of either unlabeled samples or data restricted to particular classes, we propose a framework for controllable pathological image synthesis. Our approach is inspired by CycleGAN, where instead of generating images from random noise, we perform cycle-consistent image-to-image translation between two domains: healthy and pathological. Guided by a semantic map, an adversarially trained generator synthesizes pathology on a healthy image in the specified location. We demonstrate our approach in two distinct applications: a public dataset for brain tumors segmentation (BraTS2018) and an institutional dataset of cerebral microbleeds in traumatic brain injury patients. We subsequently utilize synthetic images generated with our method for data augmentation for the detection of cerebral microbleeds. Enriching the training dataset with synthetic images produced by our method exhibits the potential to increase sensitivity of cerebral microbleeds in traumatic brain injury detection system. The model trained only on real samples achieves an average sensitivity of 88% at 20 false positives per patient, after augmenting the training set with synthetic samples the model achieves an average sensitivity of 92% at the same rate of false positives per patient.}, + file = {Fary20.pdf:pdf\\Fary20.pdf:PDF}, + optnote = {DIAG}, + school = {MAIA Master Program}, + journal = {Master thesis}, +} + +@inproceedings{Fary20a, + author = {Khrystyna Faryna and Fakrul I. Tushar and Vincent M. D'Anniballe and Rui Hou and Geoffrey D. Rubin and Joseph Y. Lo}, + title = {{Attention-guided classification of abnormalities in semi-structured computed tomography reports}}, + booktitle = MI, + series = SPIE, + year = {2020}, + volume = {11314}, + pages = {397 -- 403}, + doi = {10.1117/12.2551370}, + url = {https://doi.org/10.1117/12.2551370}, + abstract = {Lack of annotated data is a major challenge to machine learning algorithms, particularly in the field of radiology. Algorithms that can efficiently extract labels in a fast and precise manner are in high demand. Weak supervision is a compromise solution, particularly, when dealing with imaging modalities like Computed Tomography (CT), where the number of slices can reach 1000 per case. Radiology reports store crucial information about clinicians' findings and observations in CT slices. Automatic generation of labels from CT reports is not a trivial task due to the complexity of sentences and diversity of expression in free-text narration. In this study, we focus on abnormality classification in lungs, liver and kidneys. Firstly, a rule-based model is used to extract weak labels at the case level. Afterwards, attention guided recurrent neural network (RNN) is trained to perform binary classification of radiology reports in terms of whether the organ is normal or abnormal. Additionally, a multi-label RNN with attention mechanism is trained to perform binary classification by aggregating its output for four representative diseases (lungs: emphysema, mass-nodule, effusion and atelectasis-pneumonia; liver: dilatation, fatty infiltration-steatosis, calcification-stone-gallstone, lesion-mass; kidneys: atrophy, cyst, stone-calculi, lesion) into a single abnormal class. Performance has been evaluated using the receiver operating characteristic (ROC) area under the curve (AUC) on 274, 306 and 278 reports for lungs, liver and kidneys correspondingly, manually annotated by radiology experts. The change in performance was evaluated for different sizes of training dataset for lungs. The AUCs of multi-label pretrained models: lungs - 0.929, liver - 0.840, kidney - 0.844; multi-label models: lungs - 0.903, liver - 0.848, kidney - 0.906; binary pretrained models: lungs - 0.922, liver - 0.826, kidneys - 0.928.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Fary20b, + author = {Khrystyna Faryna and Kevin Koschmieder and Marcella M. Paul and Thomas van den Heuvel and Anke van der Eerden and Rashindra Manniesing and Bram van Ginneken}, + title = {Adversarial cycle-consistent synthesis of cerebral microbleeds for data augmentation}, + booktitle = {Medical Imaging Meets NeurIPS Workshop - 34th Conference on Neural Information Processing Systems (NeurIPS)}, + year = {2020}, + url = {https://arxiv.org/abs/2101.06468}, + abstract = {We propose a novel framework for controllable pathological image synthesis for data augmentation. Inspired by CycleGAN, we perform cycle-consistent image-to-image translation between two domains: healthy and pathological. Guided by a semantic mask, an adversarially trained generator synthesizes pathology on a healthy image in the specified location. We demonstrate our approach on an institutional dataset of cerebral microbleeds in traumatic brain injury patients. We utilize synthetic images generated with our method for data augmentation in cerebral microbleeds detection. Enriching the training dataset with synthetic images exhibits the potential to increase detection performance for cerebral microbleeds in traumatic brain injury patients.}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {4c4f04d1af903f4eabe7767b47439a79e0f6a711}, + all_ss_ids = {['4c4f04d1af903f4eabe7767b47439a79e0f6a711']}, + gscites = {2}, +} + +@inproceedings{Fary21, + author = {Faryna, Khrystyna and van der Laak, Jeroen and Litjens, Geert}, + title = {Tailoring automated data augmentation to H\&E-stained histopathology}, + booktitle = MIDL, + url = {https://openreview.net/forum?id=JrBfXaoxbA2}, + file = {:pdf/Fary21.pdf:PDF}, + optnote = {DIAG}, + year = {2021}, + ss_id = {79224099fea90dd0316719579f0c635540a3af7e}, + all_ss_ids = {['79224099fea90dd0316719579f0c635540a3af7e']}, + gscites = {26}, +} + +@article{Fehe22, + author = {Feher, Balazs and Kuchler, Ulrike and Schwendicke, Falk and Schneider, Lisa and Cejudo Grano de Oro, Jose Eduardo and Xi, Tong and Vinayahalingam, Shankeeth and Hsu, Tzu-Ming Harry and Brinz, Janet and Chaurasia, Akhilanand and Dhingra, Kunaal and Gaudin, Robert Andre and Mohammad-Rahimi, Hossein and Pereira, Nielsen and Perez-Pastor, Francesc and Tryfonos, Olga and Uribe, Sergio E. and Hanisch, Marcel and Krois, Joachim}, + title = {Emulating Clinical Diagnostic Reasoning for Jaw Cysts with Machine Learning.}, + doi = {10.3390/diagnostics12081968}, + issue = {8}, + volume = {12}, + abstract = {The detection and classification of cystic lesions of the jaw is of high clinical relevance and represents a topic of interest in medical artificial intelligence research. The human clinical diagnostic reasoning process uses contextual information, including the spatial relation of the detected lesion to other anatomical structures, to establish a preliminary classification. Here, we aimed to emulate clinical diagnostic reasoning step by step by using a combined object detection and image segmentation approach on panoramic radiographs (OPGs). We used a multicenter training dataset of 855 OPGs (all positives) and an evaluation set of 384 OPGs (240 negatives). We further compared our models to an international human control group of ten dental professionals from seven countries. The object detection model achieved an average precision of 0.42 (intersection over union (IoU): 0.50, maximal detections: 100) and an average recall of 0.394 (IoU: 0.50-0.95, maximal detections: 100). The classification model achieved a sensitivity of 0.84 for odontogenic cysts and 0.56 for non-odontogenic cysts as well as a specificity of 0.59 for odontogenic cysts and 0.84 for non-odontogenic cysts (IoU: 0.30). The human control group achieved a sensitivity of 0.70 for odontogenic cysts, 0.44 for non-odontogenic cysts, and 0.56 for OPGs without cysts as well as a specificity of 0.62 for odontogenic cysts, 0.95 for non-odontogenic cysts, and 0.76 for OPGs without cysts. Taken together, our results show that a combined object detection and image segmentation approach is feasible in emulating the human clinical diagnostic reasoning process in classifying cystic lesions of the jaw.}, + file = {Fehe22.pdf:pdf\\Fehe22.pdf:PDF}, + journal = {Diagnostics (Basel, Switzerland)}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36010318}, + year = {2022}, +} + +@article{Fens13, + author = {Fens, Niki and van Rossum, Annelot G J. and Zanen, Pieter and van Ginneken, Bram and van Klaveren, Rob J. and Zwinderman, Aeilko H. and Sterk, Peter J.}, + title = {Subphenotypes of Mild-to-Moderate COPD by Factor and Cluster Analysis of Pulmonary Function, {CT} Imaging and Breathomics in a Population-Based Survey}, + journal = COPD, + year = {2013}, + volume = {10}, + pages = {277-285}, + doi = {10.3109/15412555.2012.744388}, + abstract = {Abstract Introduction: Classification of COPD is currently based on the presence and severity of airways obstruction. However, this may not fully reflect the phenotypic heterogeneity of COPD in the (ex-) smoking community. We hypothesized that factor analysis followed by cluster analysis of functional, clinical, radiological and exhaled breath metabolomic features identifies subphenotypes of COPD in a community-based population of heavy (ex-) smokers. Methods: Adults between 50-75 years with a smoking history of at least 15 pack-years derived from a random population-based survey as part of the NELSON study underwent detailed assessment of pulmonary function, chest CT scanning, questionnaires and exhaled breath molecular profiling using an electronic nose. Factor and cluster analyses were performed on the subgroup of subjects fulfilling the GOLD criteria for COPD (post-BD FEV1/FVC < 0.70). Results: Three hundred subjects were recruited, of which 157 fulfilled the criteria for COPD and were included in the factor and cluster analysis. Four clusters were identified: cluster 1 (n = 35; 22\%): mild COPD, limited symptoms and good quality of life. Cluster 2 (n = 48; 31\%): low lung function, combined emphysema and chronic bronchitis and a distinct breath molecular profile. Cluster 3 (n = 60; 38\%): emphysema predominant COPD with preserved lung function. Cluster 4 (n = 14; 9\%): highly symptomatic COPD with mildly impaired lung function. In a leave-one-out validation analysis an accuracy of 97.4\% was reached. Conclusions: This unbiased taxonomy for mild to moderate COPD reinforces clusters found in previous studies and thereby allows better phenotyping of COPD in the general (ex-) smoking population.}, + file = {Fens13.pdf:pdf\\Fens13.pdf:PDF}, + optnote = {DIAG}, + pmid = {23536961}, + month = {3}, + gsid = {14645718639357321343}, + gscites = {33}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/140583}, + ss_id = {14629625274f335f1c62fddf4caf435dde0489f6}, + all_ss_ids = {['14629625274f335f1c62fddf4caf435dde0489f6']}, +} + +@inproceedings{Ferr08, + author = {N. de Carvalho Ferreira and M.Velikova and P. Lucas}, + title = {Bayesian Modelling of Multi-View Mammography}, + booktitle = {ICML workshop: Machine Learning for Health Care Applications}, + year = {2008}, + file = {Ferr08.pdf:pdf/Ferr08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Fick21, + author = {Rutger H.J. Fick and Brice Tayart and Capucine Bertrand and Solene Chan Lang and Tina Rey and Francesco Ciompi and Cyprien Tilmant and Isabelle Farre and Saima Ben Hadj}, + booktitle = {2021 43rd Annual International Conference of the {IEEE} Engineering in Medicine and Biology Society ({EMBC})}, + title = {A Partial Label-Based Machine Learning Approach For Cervical Whole-Slide Image Classification: The Winning {TissueNet} Solution}, + doi = {10.1109/embc46164.2021.9631009}, + publisher = {{IEEE}}, + abstract = {Cervical cancer is the fourth most common cancer in women worldwide. To determine early treatment for patients, it is critical to accurately classify the cervical intraepithelial lesion status based on a microscopic biopsy. Lesion classification is a 4-class problem, with biopsies being designated as benign or increasingly malignant as class 1-3, with 3 being invasive cancer. Unfortunately, traditional biopsy analysis by a pathologist is time-consuming and subject to intra-and inter-observer variability. For this reason, it is of interest to develop automatic analysis pipelines to classify lesion status directly from a digitalized whole slide image (WSI). The recent TissueNet Challenge was organized to find the best automatic detection pipeline for this task, using a dataset of 1015 annotated WSI slides. In this work, we present our winning end-to-end solution for cervical slide classification composed of a two-step classification model: First, we classify individual slide patches using an ensemble CNN, followed by an SVM-based slide classification using statistical features of the aggregated patch-level predictions. Importantly, we present the key innovation of our approach, which is a novel partial label-based loss function that allows us to supplement the supervised WSI patch annotations with weakly supervised patches based on the WSI class. This led to us not requiring additional expert tissue annotation, while still reaching the winning score of 94.7%. Our approach is a step towards the clinical inclusion of automatic pipelines for cervical cancer treatment planning. Clinical relevance-The explanation of the winning Tis-sueNet AI algorithm for automated cervical cancer classification, which may provide insights for the next generation of computer assisted tools in digital pathology.}, + file = {Fick21.pdf:pdf\\Fick21.pdf:PDF}, + month = {11}, + optnote = {DIAG}, + year = {2021}, + ss_id = {bb3eedbda08fa1290810666ec202624593e6e2bc}, + all_ss_ids = {['bb3eedbda08fa1290810666ec202624593e6e2bc']}, + gscites = {2}, +} + +@article{Fing19, + author = {Finger, Robert P. and Schmitz-Valckenberg, Steffen and Schmid, Matthias and Rubin, Gary S. and Dunbar, Hannah and Tufail, Adnan and Crabb, David P. and Binns, Alison and S\'{a}nchez, Clara I. and Margaron, Philippe and Normand, Guillaume and Durbin, Mary K. and Luhmann, Ulrich F. O. and Zamiri, Parisa and Cunha-Vaz, Jose and Asmus, Friedrich and Holz, Frank G. and on behalf of the MACUSTAR consortium}, + title = {MACUSTAR: Development and Clinical Validation of Functional, Structural, and Patient-Reported Endpoints in Intermediate Age-Related Macular Degeneration}, + journal = Ophthalmologica, + year = {2019}, + volume = {241}, + issue = {2}, + pages = {61--72}, + doi = {10.1159/000491402}, + url = {https://www.karger.com/Article/FullText/491402}, + abstract = {Currently, no outcome measures are clinically validated and accepted as clinical endpoints by regulatory agencies for drug development in intermediate age-related macular degeneration (iAMD). The MACUSTAR Consortium, a public-private research group funded by the European Innovative Medicines Initiative intends to close this gap. Development of study protocol and statistical analysis plan including predictive modelling of multimodal endpoints based on a review of the literature and expert consensus. This observational study consists of a cross-sectional and a longitudinal part. Functional outcome measures assessed under low contrast and low luminance have the potential to detect progression of visual deficit within iAMD and to late AMD. Structural outcome measures will be multimodal and investigate topographical relationships with function. Current patient-reported outcome measures (PROMs) are not acceptable to regulators and may not capture the functional deficit specific to iAMD with needed precision, justifying development of novel PROMs for iAMD. The total sample size will be n = 750, consisting mainly of subjects with iAMD (n = 600). As clinical endpoints currently accepted by regulators cannot detect functional loss or patient-relevant impact in iAMD, we will clinically validate novel candidate endpoints for iAMD.}, + file = {Fing19.pdf:pdf\\Fing19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30153664}, + month = {8}, + gsid = {3052267128114152032}, + gscites = {66}, + ss_id = {14d5f7dc42e1af0d92d9e3e3124619a0cc0e395d}, + all_ss_ids = {['14d5f7dc42e1af0d92d9e3e3124619a0cc0e395d']}, +} + +@conference{Firo08, + author = {A. Firouzian and R. Manniesing and A. {van der Lugt} and H.Z. Flach and W.J. Niessen}, + title = {Intracranial Aneurysm Segmentation in {CTA}: Method and Quantitative Validation}, + booktitle = RSNA, + year = {2008}, + abstract = {PURPOSE: Automated and accurate segmentation of intracranial aneurysms from Computed Tomography Angiography (CTA) for characterizing aneurysm volume and shape RESULTS: The segmentation method successfully segmented the aneurysm and surrounding vasculature. The average surface distance between the method and the observers were 0.53A-A?A 1/2 0.19mm and 0.61A-A?A 1/2 0.22mm respectively, which was smaller than the interobserver average surface distance (0.95A-A?A 1/2 0.33mm). A pair T-test revealed that there was no significant difference in volume between the automatic segmentation and two observers (p=0.66, p=0.41). CONCLUSION: A method for semi-automated aneurysm segmentation has been presented which can replace manual segmentation.}, + file = {Firo08.pdf:pdf\\Firo08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Firo10, + author = {A. Firouzian and R. Manniesing and H. Z. Flach and R. Risselada and F. {van Kooten} and M.C. Sturkenboom and A. {van der Lugt} and W.J. Niessen}, + title = {Intracranial Aneurysm Segmentation in {3D} {CT} Angiography: Method and Quantitative Validation with and without Prior Noise Filtering}, + journal = EJR, + year = {2011}, + volume = {79}, + pages = {299--304}, + doi = {10.1016/j.ejrad.2010.02.015}, + url = {http://www.ncbi.nlm.nih.gov/pubmed/20346606}, + abstract = {Intracranial aneurysm volume and shape are important factors for predicting rupture risk, for pre-surgical planning and for follow-up studies. To obtain these parameters, manual segmentation can be employed; however, this is a tedious procedure, which is prone to inter- and intra-observer variability. Therefore there is a need for an automated method, which is accurate, reproducible and reliable. This study aims to develop and validate an automated method for segmenting intracranial aneurysms in Computed Tomography Angiography (CTA) data. Also, it is investigated whether prior smoothing improves segmentation robustness and accuracy. The proposed segmentation method is implemented in the level set framework, more specifically Geodesic Active Surfaces, in which a surface is evolved to capture the aneurysmal wall via an energy minimization approach. The energy term is composed of three different image features, namely; intensity, gradient magnitude and intensity variance. The method requires minimal user interaction, i.e. a single seed point inside the aneurysm needs to be placed, based on which image intensity statistics of the aneurysm are derived and used in defining the energy term. The method has been evaluated on 15 aneurysms in 11 CTA data sets by comparing the results to manual segmentations performed by two expert radiologists. Evaluation measures were Similarity Index, Average Surface Distance and Volume Difference. The results show that the automated aneurysm segmentation method is reproducible, and performs in the range of inter-observer variability in terms of accuracy. Smoothing by nonlinear diffusion with appropriate parameter settings prior to segmentation, slightly improves segmentation accuracy.}, + file = {Firo10.pdf:pdf\\Firo10.pdf:PDF;Firo10.png:png\\Firo10.png:PNG image}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {20346606}, + month = {8}, + gsid = {19274435147675320}, + gscites = {53}, + ss_id = {2f7749b662adea99d4231d993319f7f24afb844b}, + all_ss_ids = {['2f7749b662adea99d4231d993319f7f24afb844b']}, +} + +@inproceedings{Firo10a, + author = {A. Firouzian and R. Manniesing and H.Z. Flach and R. Risselada and F. {van Kooten} and M.C.J.M. Sturkenboom and A. {van der Lugt} and W.J. Niessen}, + title = {Intracranial Aneurysm Segmentation in {3D} {CT} Angiography: Method and Quantitative Validation}, + booktitle = MI, + year = {2010}, + volume = {7623}, + series = SPIE, + pages = {76233M-1--76233M-8}, + doi = {10.1117/12.843674}, + url = {http://spie.org/x648.html?product_id=843674}, + abstract = {Accurately quantifying aneurysm shape parameters is of clinical importance, as it is an important factor in choosing the right treatment modality (i.e. coiling or clipping), in predicting rupture risk and operative risk and for pre-surgical planning. The first step in aneurysm quantification is to segment it from other structures that are present in the image. As manual segmentation is a tedious procedure and prone to inter- and intra-observer variability, there is a need for an automated method which is accurate and reproducible. In this paper a novel semi-automated method for segmenting aneurysms in Computed Tomography Angiography (CTA) data based on Geodesic Active Contours is presented and quantitatively evaluated. Three different image features are used to steer the level set to the boundary of the aneurysm, namely intensity, gradient magnitude and variance in intensity. The method requires minimum user interaction, i.e. clicking a single seed point inside the aneurysm which is used to estimate the vessel intensity distribution and to initialize the level set. The results show that the developed method is reproducible, and performs in the range of interobserver variability in terms of accuracy.}, + file = {Firo10a.pdf:pdf\\Firo10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {6944444081743431375}, + gscites = {1}, + ss_id = {ee26d8460390b683da4bbdf90ce004257b400878}, + all_ss_ids = {['ee26d8460390b683da4bbdf90ce004257b400878']}, +} + +@inproceedings{Firo12, + author = {A. Firouzian and R. Manniesing and C.T. Metz and S. Klein and B. K. Velthuis and GJE Rinkel and A. van der Lugt and W.J. Niessen}, + title = {Intracranial aneurysm growth quantification on {CTA}}, + booktitle = MI, + year = {2012}, + volume = {8314}, + series = SPIE, + pages = {831448-1--83148-9}, + doi = {10.1117/12.910713}, + abstract = {Next to aneurysm size, aneurysm growth over time is an important indicator for aneurysm rupture risk. In this paper a semi-automated method for quantifying aneurysm volume growth over time in CTA data is presented. The method treats a series of longitudinal images as a 4D dataset. Using a 4D non-rigid registration, deformations with respect to baseline scan are determined. Combined with 3D aneurysm segmentation in baseline scan, volume change is assessed by inspecting the deformation field at the aneurysm wall. The results of applying the method to 10 patient data are in line with the assessment in the radiology reports.}, + file = {Firo12.pdf:pdf\\Firo12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + ss_id = {70cb5fd701a0abcb387281dcd399ba803df711b4}, + all_ss_ids = {['70cb5fd701a0abcb387281dcd399ba803df711b4']}, + gscites = {0}, +} + +@article{Firo12a, + author = {A. Firouzian and R. Manniesing and C. T. Metz and R. Risselada and S. Klein and F. {van Kooten} and M. C. J. M. Sturkenboom and and A. {van der Lugt} and W. J. Niessen}, + title = {Quantification of Intracranial Aneurysm Morphodynamics from {ECG}-gated {CT} Angiography}, + journal = AR, + year = {2013}, + volume = {20}, + pages = {52--58}, + doi = {10.1016/j.acra.2012.06.008}, + abstract = {Rationale and Objectives: Aneurysm morphodynamics is potentially relevant for assessing aneurysm rupture risk. A method is proposed for automated quantification and visualization of intracranial aneurysm morphodynamics from ECG-gated computed tomography angiography (CTA) data. Materials and Methods: A prospective study was performed in 19 aneurysms from 14 patients with diagnostic workup for recently discovered aneurysms (n=15) or follow-up of untreated known aneurysms (n=4). The study was approved by the IRB (Institutional Review Board) of the hospital and written informed consent was obtained from each patient. An image post-processing method was developed for quantifying aneurysm volume changes and visualizing local displacement of the aneurysmal wall over a heart cycle using multiphase ECG-gated (4D) CTA. Percentage volume changes over the heart cycle were determined for aneurysms, surrounding arteries and the skull. Results: Pulsation of the aneurysm and its surrounding vasculature during the heart cycle could be assessed from ECG-gated CTA data. The percentage aneurysmal volume change ranged from 3 -18%. Conclusion: ECG-gated CTA can be used to study morphodynamics of intracranial aneurysms. The proposed image analysis method is capable of quantifying the volume changes and visualizing local displacement of the vascular structures over the cardiac cycle.}, + file = {Firo12a.pdf:pdf\\Firo12a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {22884403}, + month = {1}, + gsid = {12392195614729486756}, + gscites = {17}, + ss_id = {bad6b84ad1b5439be33b69909ff18b2553f2f403}, + all_ss_ids = {['bad6b84ad1b5439be33b69909ff18b2553f2f403']}, +} + +@phdthesis{Firo13, + author = {Azadeh Firouzian}, + title = {Automated Analysis of Intracranial Aneurysm Morphology and Dynamics from {CTA} Data}, + year = {2013}, + url = {http://repub.eur.nl/pub/38703}, + abstract = {The focus of the research presented in this book is on morphometry and morphodynamic analysis of intracranial aneurysms in {CTA} using automated methods. The first step for such analysis is segmentation of aneurysm from other structures present in the image. Therefore, a novel semi-automatic segmentation method, implemented in the level set frame work, has been developed and validated. Further, the dynamic behavior of the aneurysm has been analyzed over a heart cycle using {ECG}-gated {CTA}. To quantify this behavior, a combination of the previously developed segmentation method and {4D} registration was used. The results showed similar trend in aneurysm volume change as in arterial pulse wave and potential rupture areas on the aneurysm with large displacements were detected. This analysis provides additional information on the status of the aneurysmal wall AC/a,!A 1/2 which, together with other geometrical information, may help to better estimate aneurysm growth and predict rupture risk. Dynamic behavior was also investigated over a longer period of time to quantify growth and growth rate in a longitudinal {CTA} study. Again a combination of the previously developed segmentation method and groupwise registration was used. The method uses all the information from all follow-up scans simultaneously and gives better results than when performing measurements on each scan individually. This method can benefit the clinical diagnosis and treatment planning in the sense that the results are objective and quantitative. For future studies, further attention is to be paid more to the areas of automated aneurysms detection and bone masking in {CTA}. Fusing these two tools with the previously developed ones will make a big step forward in intracranial aneurysms diagnosis and treatment.}, + copromotor = {R. Manniesing}, + file = {Firo13.pdf:pdf\\Firo13.pdf:PDF}, + optnote = {DIAG}, + promotor = {W. J. Niessen}, + school = {Erasmus University Rotterdam}, + journal = {PhD thesis}, +} + +@inproceedings{Fond13, + author = {Fondon, I. and van Grinsven, M. J. J. P. and S\'{a}nchez, C. I. and Saez, A.}, + title = {Perceptually adapted method for optic disc detection on retinal fundus images}, + booktitle = {Computer-Based Medical Systems (CBMS), 2013 IEEE 26th International Symposium on}, + year = {2013}, + pages = {279-284}, + doi = {10.1109/CBMS.2013.6627802}, + file = {Fond13.pdf:pdf\\Fond13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {6}, + gsid = {14225812249063646700}, + gscites = {10}, + ss_id = {13aa11f60bf53aee440facaee410163650dbedf3}, + all_ss_ids = {['13aa11f60bf53aee440facaee410163650dbedf3']}, +} + +@article{Fort12, + author = {Fortuin, Ansje S. and Deserno, Willem M L L G. and Meijer, Hanneke J M. and Jager, Gerrit J. and Takahashi, Satoru and Debats, Oscar A. and Reske, Sven N. and Schick, Christian and Krause, Bernd J. and van Oort, Inge and Witjes, Alfred J. and Hoogeveen, Yvonne L. and Th van Lin, Emile N J. and Barentsz, Jelle O.}, + title = {Value of {PET/CT} and {MR} Lymphography in Treatment of Prostate Cancer Patients with Lymph Node Metastases}, + journal = IJROBP, + year = {2012}, + volume = {84}, + pages = {712-718}, + doi = {10.1016/j.ijrobp.2011.12.093}, + abstract = {PURPOSE: To determine the clinical value of two novel molecular imaging techniques: (11)C-choline positron emission tomography (PET)/computed tomography (CT) and ferumoxtran-10 enhanced magnetic resonance imaging (magnetic resonance lymphography [MRL]) for lymph node (LN) treatment in prostate cancer (PCa) patients. Therefore, we evaluated the ability of PET/CT and MRL to assess the number, size, and location of LN metastases in patients with primary or recurrent PCa. METHODS AND MATERIALS: A total of 29 patients underwent MRL and PET/CT for LN evaluation. The MRL and PET/CT data were analyzed independently. The number, size, and location of the LN metastases were determined. The location was described as within or outside the standard clinical target volume for elective pelvic irradiation as defined by the Radiation Therapy Oncology Group. Subsequently, the results from MRL and PET/CT were compared. RESULTS: Of the 738 LNs visible on MRL, 151 were positive in 23 of 29 patients. Of the 132 LNs visible on PET/CT, 34 were positive in 13 of 29 patients. MRL detected significantly more positive LNs (p < 0.001) in more patients than PET/CT (p = 0.002). The mean diameter of the detected suspicious LNs on MRL was significantly smaller than those detected by PET/CT, 4.9 mm and 8.4 mm, respectively (p < 0.0001). In 14 (61\%) of 23 patients, suspicious LNs were found outside the clinical target volume with MRL and in 4 (31\%) of 13 patients with PET/CT. CONCLUSION: In patients with PCa, both molecular imaging techniques, MRL and (11)C-choline PET/CT, can detect LNs suspicious for metastasis, irrespective of the existing size and shape criteria for CT and conventional magnetic resonance imaging. On MRL and PET/CT, 61\% and 31\% of the suspicious LNs were located outside the conventional clinical target volume. Therefore, these techniques could help to individualize treatment selection and enable image-guided radiotherapy for patients with PCa LN metastases.}, + file = {Fort12.pdf:pdf\\Fort12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {22417806}, + month = {11}, +} + +@conference{Fransen22, + author = {S. J. Fransen and C. Roest and Q. Y. van Lohuizen and J. S. Bosma and T. C. Kwee and D. Yakar, and H. Huisman}, + booktitle = RSNA, + title = {Diagnostic AI to speed up MRI protocols by identifying redundant sequences: are all diffusion-weighted prostate MRI sequences necessary?}, + abstract = {PURPOSE: To explore if an expert-level diagnostic AI can help speed up MRI by identifying redundant sequences in diffusion-weighted imaging (DWI) for the diagnostic detection of clinically significant prostate cancer (csPCa). + MATERIALS AND METHODS: Existing deep learning AI architectures detects csPCa based on bpMRI at expert-level. We developed a method in which this AI assesses the added diagnostic value of a sequence. This retrospective study included 840 patients with a bi-parametric prostate MRI (bpMRI) for initial detection of csPCa. The bmMRI comprises a T2-weighted image and DWI with b-values of 50, 400, and 800 s/mm2 on a 3T scanner (Skyra and Prima). Our method entails computing ADC and b1400 maps based on different DWI combinations: 1) b800 excluded, 2) b400 excluded, 3) complete set. AI models for the various bpMRI combination were trained 5-fold and statistically compared with receiver operating curve (ROC) analysis at patient and lesion level using respectively the DeLong's and permutation test. + RESULTS: The mean area under the ROC of the three combinations were respectively 0.78 +-0.027 (SD), 0.76 +-0.051, and 0.77 +- 0.057. The partial area under of the free ROC between 0.1 and 2.5 false positives lesions per patient was respectively 1.44 +- 0.22, 1.58 +- 0.18 and 1.50 +- 0.12. The slight difference in diagnostic performance (patient-level 0.01, lesion-level 0.06 ) when omitting sequence DWI b800 is not significant (respectively p = 0.2 and p = 0.43). + CONCLUSION: We conclude that expert-level AI can identify redundant sequences in MRI. Furthermore, our method provides evidence that in DWI for csPCa detection, the b800 series can be omitted from the regular bpMRI protocol decreasing total MRI scan time by 33\%. These results can provide significant speed-up of any MRI.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@inproceedings{Froh23, + author = {Frohwitter, Nils and Hering, Alessa and M\"{o}ller, Ralf and Hartwig, Mattis}, + title = {Evaluating the Effects of a Priori Deep Learning Image Synthesis on Multi-Modal MR-to-CT Image Registration Performance}, + doi = {10.5220/0011669000003414}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.5220/0011669000003414}, + file = {Froh23.pdf:pdf\Froh23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Proceedings of the 16th International Joint Conference on Biomedical Engineering Systems and Technologies}, + citation-count = {0}, + automatic = {yes}, +} + +@article{Fuch03, + author = {Michael H Fuchsj\"ager and Cornelia M {Schaefer-Prokop} and Edith Eisenhuber and Peter Homolka and Michael Weber and Martin A Funovics and Mathias Prokop}, + title = {Impact of ambient light and window settings on the detectability of catheters on soft-copy display of chest radiographs at bedside}, + journal = AJR, + year = {2003}, + pmid = {14573447}, + doi = {10.2214/ajr.181.5.1811415}, + volume = {181}, + pages = {1415--1421}, + file = {Fuch03.pdf:pdf\\Fuch03.pdf:PDF}, + optnote = {CXR, DIAG, RADIOLOGY}, + number = {5}, + gsid = {8346239334545808443}, + gscites = {32}, +} + +@article{Fuet05, + author = {F\"utterer, Jurgen J. and Engelbrecht, Marc R. and Huisman, Henkjan J. and Jager, Gerrit J. and Hulsbergen-van de Kaa, Christina A. and Witjes, J Alfred and Barentsz, Jelle O.}, + title = {Staging prostate cancer with dynamic contrast-enhanced endorectal {MR} imaging prior to radical prostatectomy: experienced versus less experienced readers}, + journal = Radiology, + year = {2005}, + volume = {237}, + pages = {541--549}, + doi = {10.1148/radiol.2372041724}, + abstract = {{PURPOSE}: {T}o prospectively determine the accuracy of experienced and less experienced readers in the interpretation of combined {T}2-weighted fast spin-echo ({SE}) magnetic resonance ({MR}) images and dynamic contrast material-enhanced {MR} images compared with {T}2-weighted fast {SE} alone, with respect to differentiation of stage {T}2 versus stage {T}3 prostate carcinoma, with histologic analysis serving as the reference standard. {MATERIALS} {AND} {METHODS}: {I}nstitutional review board approval and informed consent were obtained, and 124 consecutive men (age range, 42-74 years; median age, 63 years) with biopsy-proved prostate cancer underwent {MR} imaging and were candidates for radical prostatectomy. {T}2-weighted fast {SE} {MR} images and multisection dynamic contrast-enhanced {MR} images with a 2-second time resolution for the whole prostate were obtained. {T}he {T}2-weighted and fused color-coded parametric dynamic contrast-enhanced {MR} images with {T}2-weighted images were evaluated prospectively and scored with regard to local extent by one experienced reader and evaluated retrospectively by two less experienced readers working in consensus by using a five-point scale; images with a score greater than or equal to four were considered indicative of {T}3 disease. {R}esults were correlated with whole-mount section histopathologic findings, and receiver operating characteristics analysis was performed. {RESULTS}: {T}wenty-five patients were excluded because of positive findings in the lymph nodes (n = 16), preoperative biopsy-proved seminal vesicle invasion (n = 5), and an absent dynamic dataset (n = 4). {N}inety-nine patients were included in this study. {T}he overall sensitivity, specificity, and accuracy of {MR} staging performance in prostate cancer with dynamic contrast-enhanced {MR} imaging was 69\% (24 of 35 patients), 97\% (62 of 64 patients), and 87\% (86 of 99 patients), respectively, for the experienced reader. {T}his difference was not significant ({P} = .48) when results were compared with results from the {T}2-weighted images. {S}taging performance for the less experienced readers with parametric dynamic contrast-enhanced {MR} imaging, however, resulted in significant improvement of the area under the receiver operating characteristics curve ({A}(z)) compared with {T}2-weighted {MR} imaging alone ({A}(z) = .66 and .82, respectively; {P} = .01). {CONCLUSION}: {T}he use of multisection dynamic contrast-enhanced {MR} imaging in staging prostate cancer showed significant improvement in staging performance for the less experienced readers but had no benefit for the experienced reader.}, + file = {Fuet05.pdf:pdf\\Fuet05.pdf:PDF}, + optnote = {DIAG, MAGIC, RADIOLOGY}, + number = {2}, + pmid = {16244263}, + month = {11}, + gsid = {10790555521593470988,13094974208617290718}, + gscites = {253}, + ss_id = {f1b86085164de19137d89c94ca9a0acd90d9fb2f}, + all_ss_ids = {['f1b86085164de19137d89c94ca9a0acd90d9fb2f']}, +} + +@article{Fuet06, + author = {J. J. F\"utterer and S. W. T. P. J. Heijmink and T. W. J. Scheenen and J. Veltman and H. J. Huisman and P. Vos and C. A. {Hulsbergen-van de Kaa} and J. A. Witjes and P. F. M. Krabbe and A. Heerschap and J. O. Barentsz}, + title = {Prostate cancer localization with dynamic contrast-enhanced {MR} imaging and proton {MR} spectroscopic imaging}, + journal = Radiology, + year = {2006}, + volume = {241}, + pages = {449--458}, + doi = {10.1148/radiol.2412051866}, + abstract = {{PURPOSE}: {T}o prospectively determine the accuracies of {T}2-weighted magnetic resonance ({MR}) imaging, dynamic contrast material-enhanced {MR} imaging, and quantitative three-dimensional (3{D}) proton {MR} spectroscopic imaging of the entire prostate for prostate cancer localization, with whole-mount histopathologic section findings as the reference standard. {MATERIALS} {AND} {METHODS}: {T}his study was approved by the institutional review board, and informed consent was obtained from all patients. {T}hirty-four consecutive men with a mean age of 60 years and a mean prostate-specific antigen level of 8 ng/m{L} were examined. {T}he median biopsy {G}leason score was 6. {T}2-weighted {MR} imaging, dynamic contrast-enhanced {MR} imaging, and 3{D} {MR} spectroscopic imaging were performed, and on the basis of the image data, two readers with different levels of experience recorded the location of the suspicious peripheral zone and central gland tumor nodules on each of 14 standardized regions of interest ({ROI}s) in the prostate. {T}he degree of diagnostic confidence for each {ROI} was recorded on a five-point scale. {L}ocalization accuracy and {ROI}-based receiver operating characteristic ({ROC}) curves were calculated. {RESULTS}: {F}or both readers, areas under the {ROC} curve for {T}2-weighted {MR}, dynamic contrast-enhanced {MR}, and 3{D} {MR} spectroscopic imaging were 0.68, 0.91, and 0.80, respectively. {R}eader accuracy in tumor localization with dynamic contrast-enhanced imaging was significantly better than that with quantitative spectroscopic imaging ({P} < .01). {R}eader accuracy in tumor localization with both dynamic contrast-enhanced imaging and spectroscopic imaging was significantly better than that with {T}2-weighted imaging ({P} < .01). {CONCLUSION}: {C}ompared with use of {T}2-weighted {MR} imaging, use of dynamic contrast-enhanced {MR} imaging and 3{D} {MR} spectroscopic imaging facilitated significantly improved accuracy in prostate cancer localization.}, + file = {Fuet06.pdf:pdf\\Fuet06.pdf:PDF}, + optnote = {BioMR, DIAG, MAGIC, RADIOLOGY}, + number = {2}, + pmid = {16966484}, + month = {11}, + gsid = {1735278023980059617}, + gscites = {603}, + ss_id = {ee32a128265b62aff8581e2a28bc9a7c8496cf0d}, + all_ss_ids = {['ee32a128265b62aff8581e2a28bc9a7c8496cf0d']}, +} + +@article{Fuet07, + author = {J. J. F\"utterer and T. W. J. Scheenen and S. W. T. P. J. Heijmink and H. J. Huisman and C. A. Hulsbergen-Van de Kaa and J. A. Witjes and A. Heerschap and J. O. Barentsz}, + title = {Standardized threshold approach using three-dimensional proton magnetic resonance spectroscopic imaging in prostate cancer localization of the entire prostate}, + journal = IR, + year = {2007}, + volume = {42}, + pages = {116--122}, + doi = {10.1097/01.rli.0000251541.03822.bb}, + abstract = {{OBJECTIVES}: {W}e sought to determine the localization accuracy using 3-dimensional (3{D}) proton magnetic resonance spectroscopic imaging ({MRSI}) of the entire prostate with a standardized thresholds approach in prostate cancer patients. {MATERIALS} {AND} {METHODS}: {I}n a prospective study, 32 consecutive patients were examined. {M}ean age and prostate specific antigen level were 61 years and 7.8 ng/m{L}, respectively. {M}edian biopsy {G}leason score was 6. {T}2-weighted {MRI} and 3{D} {MRSI} of the entire prostate were performed. {T}hree readers recorded the location of suspicious peripheral zone and central gland cancer nodules on a standardized division of the prostate (14 regions of interest [{ROI}]) using a standardized thresholds approach. {T}he degree of diagnostic confidence for each {ROI} was recorded on a 5-point scale. {R}econstructed whole-mount section histopathology was the standard of reference. {T}he sensitivity, specificity, positive, and negative predictive value, overall accuracy and interobserver agreement were calculated. {A}reas under the {ROI}-based receiver operating characteristic curve ({AUC}) and diagnostic performance parameters were determined. {RESULTS}: {T}he standardized thresholds approach had an accuracy of 81\% and an {AUC} of 0.85-0.86 for differentiation between benign and malignant {ROI}s in the peripheral zone and an accuracy of 87\% and an {AUC} of 0.86-0.91 for this differentiation in the central gland, respectively. {S}pecificities of 81\% to 88\% were achieved with accompanying sensitivities of 75\% to 92\% for both peripheral zone and central gland, respectively. {M}oderate to near-perfect interobserver agreement was demonstrated (kappa=0.42-0.91). {CONCLUSION}: {O}ur data indicate that a standardized zone-specific threshold approach in {MRSI} of the prostate is able to prospectively differentiate between benign and malignant tissues in the peripheral zone and the central gland with good accuracy and interobserver agreement.}, + file = {Fuet07.pdf:pdf\\Fuet07.pdf:PDF}, + optnote = {BioMR, DIAG, MAGIC, RADIOLOGY}, + number = {2}, + pmid = {17220729}, + month = {2}, + gsid = {3858472456176980186}, + gscites = {86}, + ss_id = {6ee67f5869179ec04d752869d0fb9f7f41131fd8}, + all_ss_ids = {['6ee67f5869179ec04d752869d0fb9f7f41131fd8']}, +} + +@article{Fuett04, + author = {F\"utterer, Jurgen J. and Scheenen, Tom W J. and Huisman, Henkjan J. and Klomp, Dennis W J. and van Dorsten, Ferdi A. and Hulsbergen-van de Kaa, Christina A. and Witjes, J Alfred and Heerschap, Arend and Barentsz, Jelle O.}, + title = {Initial experience of 3 tesla endorectal coil magnetic resonance imaging and 1H-spectroscopic imaging of the prostate}, + journal = IR, + year = {2004}, + volume = {39}, + pages = {671--680}, + abstract = {RATIONALE AND OBJECTIVES: We sought to explore the feasibility of magnetic resonance imaging (MRI) of the prostate at 3T, with the knowledge of potential drawbacks of MRI at high field strengths. MATERIAL AND METHOD: MRI, dynamic MRI, and 1H-MR spectroscopic imaging were performed in 10 patients with prostate cancer on 1.5T and 3T whole-body scanners. Comparable scan protocols were used, and additional high-resolution measurements at 3T were acquired. For both field strengths the signal-to-noise ratio was calculated and image quality was assessed. RESULT: At 3T the signal-to-noise ratio improved. This resulted in increased spatial MRI resolution, which significantly improved anatomic detail. The increased spectral resolution improved the separation of individual resonances in MRSI. Contrast-enhanced time-concentration curves could be obtained with a doubled temporal resolution. CONCLUSIONS: Initial results of endorectal 3T 1H-MR spectroscopic imaging in prostate cancer patients showed potential advantages: the increase in spatial, temporal, and spectral resolution at higher field strength may result in an improved accuracy in delineating and staging prostate cancer.}, + file = {Fuett04.pdf:pdf\\Fuett04.pdf:PDF}, + optnote = {BioMR, DIAG, MAGIC, RADIOLOGY}, + number = {11}, + pmid = {15486528}, + gsid = {4129980350114929790,6707800161905153786}, + gscites = {176}, + ss_id = {796c7d3bc7978fba27b164353a948395f88289f3}, + all_ss_ids = {['796c7d3bc7978fba27b164353a948395f88289f3']}, +} + +@article{Gal21, + author = {Gal, Roxanne and van Velzen, Sanne G. M. and Hooning, Maartje J. and Emaus, Marleen J. and van der Leij, Femke and Gregorowitsch, Madelijn L. and Blezer, Erwin L. A. and Gernaat, Sofie A. M. and Lessmann, Nikolas and Sattler, Margriet G. A. and Leiner, Tim and de Jong, Pim A. and Teske, Arco J. and Verloop, Janneke and Penninkhof, Joan J. and Vaartjes, Ilonca and Meijer, Hanneke and van Tol-Geerdink, Julia J. and Pignol, Jean-Philippe and van den Bongard, Desiree H. J. G. and Isgum, Ivana and Verkooijen, Helena M.}, + title = {Identification of Risk of Cardiovascular Disease by Automatic Quantification of Coronary Artery Calcifications on Radiotherapy Planning CT Scans in Patients With Breast Cancer}, + journal = {JAMA Oncology}, + doi = {10.1001/jamaoncol.2021.1144}, + pmid = {33956083}, + abstract = {Cardiovascular disease (CVD) is common in patients treated for breast cancer, especially in patients treated with systemic treatment and radiotherapy and in those with preexisting CVD risk factors. Coronary artery calcium (CAC), a strong independent CVD risk factor, can be automatically quantified on radiotherapy planning computed tomography (CT) scans and may help identify patients at increased CVD risk. To evaluate the association of CAC with CVD and coronary artery disease (CAD) in patients with breast cancer. In this multicenter cohort study of 15915 patients with breast cancer receiving radiotherapy between 2005 and 2016 who were followed until December 31, 2018, age, calendar year, and treatment-adjusted Cox proportional hazard models were used to evaluate the association of CAC with CVD and CAD. Overall CAC scores were automatically extracted from planning CT scans using a deep learning algorithm. Patients were classified into Agatston risk categories (0, 1-10, 11-100, 101-399, >400 units). Occurrence of fatal and nonfatal CVD and CAD were obtained from national registries. Of the 15915 participants included in this study, the mean (SD) age at CT scan was 59.0 (11.2; range, 22-95) years, and 15879 (99.8%) were women. Seventy percent (n=11179) had no CAC. Coronary artery calcium scores of 1 to 10, 11 to 100, 101 to 400, and greater than 400 were present in 10.0% (n=1584), 11.5% (n=1825), 5.2% (n=830), and 3.1% (n=497) respectively. After a median follow-up of 51.2 months, CVD risks increased from 5.2% in patients with no CAC to 28.2% in patients with CAC scores higher than 400. After adjustment, CVD risk increased with higher CAC score. Coronary artery calcium was particularly strongly associated with CAD. The association between CAC and CVD was strongest in patients treated with anthracyclines and patients who received a radiation boost. This cohort study found that coronary artery calcium on breast cancer radiotherapy planning CT scan results was associated with CVD, especially CAD. Automated CAC scoring on radiotherapy planning CT scans may be used as a fast and low-cost tool to identify patients with breast cancer at increased risk of CVD, allowing implementing CVD risk-mitigating strategies with the aim to reduce the risk of CVD burden after breast cancer.}, + year = {2021}, + volume = {7}, + number = {7}, + pages = {1024-1032}, + optnote = {DIAG, RADIOLOGY}, + algorithm = {https://grand-challenge.org/algorithms/calcium-scoring-in-ct-showing-the-heart/}, + file = {Gal21.pdf:pdf\\Gal21.pdf:PDF}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/235298}, + ss_id = {3b4df694a48f1f01b83215e8be048e1f83ef9bb9}, + all_ss_ids = {['3b4df694a48f1f01b83215e8be048e1f83ef9bb9']}, + gscites = {25}, +} + +@article{Gala92, + author = {Galanski, M. and Prokop, M. and Thorns, E. and Oestmann, J. W. and Reichelt, S. and Haubitz, B. and Milbradt, H. and Gr\"aser, A. and Verner, L. and Schaefer, C.}, + title = {The visibility of a central venous catheter using digital luminescence radiography in intensive care radiology}, + journal = Rofo, + year = {1992}, + volume = {156}, + pages = {68--72}, + doi = {10.1055/s-2008-1032838}, + abstract = {The aim of the following study was to assess the impact of dose alterations on the detection of catheters. We compared the performance of well-exposed conventional and digital portable chest radiographs in the detection of thin catheters and tested the influences of dose alterations. Portable chest radiographs of 20 patients were obtained with conventional film/screen (FR) and with storage phosphors at 50\% (SRL), 100\% (SRN), and 250\% (SRH) of the conventionally required exposure dose. The region of the mediastinum was subdivided into an average of 18 fields, 50\% of which were superimposed with thin catheter segments. ROC analysis of 11,600 observations by 8 readers found only SRH equivalent to FR in catheter visualisation. Performance decreased significantly with SRN and SRL. Detection of low contrast catheters was found to be significantly decreased in storage phosphor radiographs obtained with standard exposure dose. A dose reduction is not feasible with current equipment if performance equivalent to conventional radiography is to be achieved.}, + file = {Gala92.pdf:pdf\\Gala92.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {1733477}, + month = {1}, + gsid = {7982434057463882154}, + gscites = {5}, +} + +@article{Gala92a, + author = {Galanski, M. and Schmoll, E. and Reichelt, S. and B\"ohmer, G. and Prokop, M. and Schaefer, C. and Sch\"uler, A. and Ringe, B. and Schmidt, F. W. and Schmoll, H. J.}, + title = {Chemoembolization of hepatocellular carcinoma in cases of isolated liver involvement}, + journal = Radiologe, + year = {1992}, + volume = {32}, + pages = {49--55}, + abstract = {Chemoembolization is an effective treatment for hepatocellular carcinoma, giving results equally as good as surgical therapy for T2 tumours. Survival can be prolonged and side-effects can be reduced by combining Lipiodol and Gelfoam for chemoembolization, employing a modified technique, with repeated procedures, and using appropriate follow-up treatment. The toxicity of the procedure is acceptable, but it requires supportive therapy necessitating an intense interdisciplinary co-operation.}, + optnote = {DIAG}, + number = {2}, + pmid = {1314400}, +} + +@article{Gala93, + author = {Galanski, M. and Prokop, M. and Chavan, A. and Schaefer, C. M. and Jandeleit, K. and Nischelsky, J. E.}, + title = {Renal arterial stenoses: spiral {CT} angiography}, + journal = Radiology, + year = {1993}, + volume = {189}, + pages = {185--192}, + abstract = {To evaluate the role of spiral computed tomography (CT) in diagnosis of renal arterial stenoses.In 22 patients with suspected renovascular hypertension, spiral CT angiography of the renal arteries was compared with arterial digital subtraction angiography (DSA). For each patient, the peak transit time t (from intravenous injection to maximum enhancement of the abdominal aorta) was determined with a bolus injection of 15 mL of contrast medium. Spiral scanning started at the level of the superior mesenteric artery with a delay of t + 5 seconds after the start of injection of 100-150 mL of contrast medium.Spiral CT angiography demonstrated multiple renal arteries in five patients. A renal artery stenosis or occlusion was found in 15 of 22 patients (22 of 54 arteries). All findings were confirmed with arterial DSA. Vessel contrast on spiral CT scans was good to excellent in 19 of 22 patients. For diagnosis, axial-section and multiplanar reformatted images were superior to three-dimensional surface reconstructions and maximum-intensity projections.Spiral CT angiography is a promising screening method for renal artery stenosis.}, + optnote = {DIAG}, + number = {1}, + pmid = {8372191}, + month = {10}, + gsid = {6100577287151225155}, + gscites = {326}, +} + +@article{Gala93a, + author = {Galanski, M. and Chavan, A. and Prokop, M. and Schaefer, C. and Harms, J.}, + title = {Current status of the imaging modalities in the pre- and postoperative diagnostic workup of liver transplant patients}, + journal = Bildgebung, + year = {1993}, + volume = {60}, + number = {2}, + pages = {56--62}, + abstract = {Before transplantation, imaging procedures are mainly used to exclude factors which may serve as contraindications, render surgery difficult, or necessitate a modification of the operative technique. In addition, assessment of liver volume is necessary before segmental liver transplantation. Sonography or other cross-sectional imaging modalities are sufficient for these purposes. After transplantation, imaging is principally required in cases with suspected complications. Sonography, including Duplex and Doppler sonography, is an excellent first investigative modality for this purpose in addition to assessment of the clinical and biochemical parameters. Sonography can diagnose not only vascular complications but also biliary and infective complications; the former may also be a concealed cause of the latter. In unclear infections sonography often has to be supplemented by other modalities like CT. In equivocal cases a puncture is indicated. Invasive procedures are necessary in only selected cases, in particular when an intervention is under consideration. At present MR plays a limited role in the diagnostic workup as it rarely offers additional information except in very few cases. The early diagnosis of rejection cannot be reliably made by any of the imaging modalities and is essentially dependent on the biopsy.}, + optnote = {DIAG}, + pmid = {8358213}, + gsid = {1218019720779703823}, + gscites = {1}, +} + +@article{Gala94, + author = {Galanski, M. and Prokop, M. and Chavan, A. and Schaefer, C. and Jandeleit, K. and Olbricht, C.}, + title = {[Accuracy of {CT} angiography in the diagnosis of renal artery stenosis]}, + journal = Rofo, + year = {1994}, + volume = {161}, + pages = {519--525}, + doi = {10.1055/s-2008-1032579}, + abstract = {Spiral CT in the form of CT angiography (CTA) provides a new method for the investigation of the large vessels. 52 patients with suspected renovascular hypertension were examined by CTA and arterial DSA. The computer tomographic and angiographic findings in 124 renal arteries were compared. Sensitivity of CTA for demonstrating renal artery stenosis was 95\%, and for relevant stenoses it actually reached 100\%. Specificity was 92\%. Positive/negative prediction value was 85\% and 100\%. CTA proved outstandingly good for demonstrating or excluding renal artery stenosis and was superior to other imaging methods in several respects. CTA requires high contrast values, optimal timing and careful evaluation. Amongst various forms of image reconstruction, interactive viewing of the primary axial images and multiplanar reconstruction in a cine mode are essential and MIP projections are valuable.}, + optnote = {DIAG}, + number = {6}, + pmid = {7803775}, + month = {12}, + gsid = {2112058975235645642}, + gscites = {35}, +} + +@inproceedings{Gall11b, + author = {Gallardo-Estrella, L. and Poncela, A.}, + title = {Human/Robot Interface for Voice Teleoperation of a Robotic Platform}, + booktitle = ADVCI, + year = {2011}, + volume = {6691}, + series = LNCS, + publisher = {Springer Science + Business Media}, + pages = {240-247}, + doi = {10.1007/978-3-642-21501-8_30}, + abstract = {Speech is the most natural way of human communication. If the interaction between humans an machines is accomplished through voice, humans will feel more comfortable. Thus, this paper presents a Human/Robot Interface to teleoperate a robot by means of voice commands. To that purpose, an acoustic model in Spanish have been developed to recognize voice commands with Julius. The model is user dependent and has been suited to the proposed set of commands to achieve a better recognition rate. One of the advantages of the proposed speech recognition mechanism is that it can be easily adapted to a new list of commands. A robot has been successfully teleoperated with voice. Results about the recognition rate are promising in using the proposed Human/Robot Interface for voice teleoperation.}, + file = {Gall11b.pdf:pdf\\Gall11b.pdf:PDF}, + journal = LNCS, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Gall12a, + author = {L. Gallardo-Estrella and B. van Ginneken and O. M. Mets and P. Zanen and P. A. de Jong and C. M. Schaefer-Prokop and E. M. van Rikxoort}, + title = {Trachea Shape Analysis from Inspiration and Expiration Thoracic Computed Tomography Scans}, + booktitle = RSNA, + year = {2012}, + abstract = {{PURPOSE} Changes in trachea shape during breathing may be related to chronic obstructive pulmonary disease ({COPD}). This study presents a method to automatically measure shape changes of the trachea from paired inspiration and expiration computed tomography ({CT}) scans and investigates the influence on {COPD} {GOLD} stage classification. {METHOD AND MATERIALS} A database of 184 subjects well distributed over {GOLD} stages 0 to 4 who received inspiration {CT} (16x0.75mm, 120-140 {kVp}, 30-160 {mAs}), expiration {CT} (90 {kVp}, 20 {mAs}) and pulmonary function testing on the same day was constructed. We developed software to automatically extract the lungs, the trachea and the carina in all scans based on region growing and morphological processing. The shape of the trachea ({TS}) was encoded in axial sections by the length of eight equiangular rays cast from the center of gravity of the trachea. {TS} was computed in the inspiration scan in three axial slices 1.5, 2.5, 3.5 cm above the carina, and in corresponding expiration sections obtained using elastic registration based on B-spline deformations and mutual information on the slices from the inspiration scan. The inspiration and expiration features were concatenated and norrmalized by dividing by the length of the longest ray in the inspiration scan. In addition, an emphysema score ({ES}) was computed as the percentage of lung voxels below -950 {HU} in inspiration scans. The database was divided into a training set and a test set with equal size and distribution of {GOLD} stages. A linear discriminant classifier was trained to classify subjects into {GOLD} stage based on {ES}, {TS} or {ES}+{TS}. For the last two feature sets, Principal Component Analysis was applied to reduce the number of features. {RESULTS} The percentages of subjects correctly classified were 35%, 33% and 44% for the feature sets {ES}, {TS} and {ES}+{TS}. Thus, including tracheal shape features improved performance with 9 percentage points compared to using only an emphysema measure. Using {ES}+{TS} the percentage of subjects assigned to either the correct class or a class neighbouring the correct one was 80%. {CONCLUSION} Tracheal morphology changes can be extracted automatically from {CT} scans. Combining the proposed trachea shape features with emphysema score, classification into {GOLD} stages improved substantially. {CLINICAL RELEVANCE/APPLICATION} Trachea morphology in inspiration and expiration scans can provide useful information for {GOLD} stage classification of {COPD}.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Gall13, + author = {L. Gallardo-Estrella and B. van Ginneken and E. M. van Rikxoort}, + title = {Normalization of {CT} scans reconstructed with different kernels to reduce variability in emphysema measurements}, + booktitle = MI, + year = {2013}, + volume = {8670}, + series = SPIE, + pages = {86700E}, + doi = {10.1117/12.2007962}, + abstract = {{C}hronic {O}bstructive {P}ulmonary {D}isease (COPD) is a lung disease characterized by progressive air flow limitation caused by emphysema and chronic bronchitis. Emphysema is quantified from chest computed tomography {(CT)} scans as the percentage of attentuation values below a fixed threshold. The emphysema quantification varies substantially between scans reconstructed with different kernels, limiting the possibilities to compare emphysema quantifications obtained from scans with different reconstruction parameters. In this paper we propose a method to normalize scans reconstructed with different kernels to have the same characteristics as scans reconstructed with a reference kernel and investigate if this normalization reduces the variability in emphysema quantification. The proposed normalization splits a {CT} scan into different frequency bands based on hierarchical unsharp masking. Normalization is performed by changing the energy in each frequency band to the average energy in each band in the reference kernel. A database of 15 subjects with {COPD} was constructed for this study. All subjects were scanned at total lung capacity and the scans were reconstructed with four different reconstruction kernels. The normalization was applied to all scans. Emphysema quantification was performed before and after normalization. It is shown that the emphysema score varies substantially before normalization but the variation diminishes after normalization.}, + file = {Gall13.pdf:pdf\\Gall13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {13232639958665219632}, + gscites = {4}, + ss_id = {a9085fd32666cee3c7dd7117d4654f4e09ad4337}, + all_ss_ids = {['a9085fd32666cee3c7dd7117d4654f4e09ad4337']}, +} + +@conference{Gall14, + author = {Gallardo-Estrella, L. and Pompe, E. and Mohamed Hoesein, F. A. and De Jong, P. and van Ginneken, B. and van Rikxoort, E. M. and de Koning, H. and Oudkerk, M. and Lammers, J.}, + title = {Relationship between Lung Function Parameters and {CT} Measurements of Emphysema, Airways, and Tracheal Collapse in Subjects with Mild {COPD}}, + booktitle = RSNA, + year = {2014}, + abstract = {PURPOSE. Excessive expiratory tracheal collapse may be related to chronic obstructive pulmonary disease (COPD). However, the relationship between tracheal changes, CT measurements, and lung function is not very well known. This study aims to assess the correlation between pulmonary function tests (PFT) and CT measurements of emphysema, airways, and tracheal collapse in patients with and without COPD. METHOD AND MATERIALS. A database of 1032 male subjects who received inspiration CT (16x0.75mm, 120-140 kVp, 30-160 mAs), expiration CT (90 kVp, 20 mAs) and PFT on the same day was constructed. 389 participants (38%) had COPD, defined as ratio of forced expiratory volume in 1 second (FEV1) to forced vital capacity (FVC) <70%. The number of patients per GOLD stage (0-3) was 643, 247, 122, 20. CT parameters assessed were tracheal collapsibility (TCo), defined as the highest ratio of cross-sectional area of the trachea in expiration and inspiration; emphysema score (ES), computed as the percentage of lung voxels below -950 HU in inspiration scans; air trapping (AT), defined as the ratio of mean lung density in expiration and inspiration; and square root of the wall area of a hypothetical airway of 10-mm internal perimeter of segmental and subsegmental airways (Pi10). Correlations were evaluated using the Spearman correlation coefficients and differences between normal and COPD subjects with a StudentsAC/a,!a,,C/ t-test. RESULTS. ES, Pi10 and AT correlated (p < 0.01) with FEV1 (r= -0.17, r= -0.53, r= -0.36) and FEV1/FVC (r=-0.48, r=-0.34, r=-0.51). TCo did not correlate with FEV1 (r= 0.01), but with FEV1/FVC (r=-0.14). Mean A,A+- standard deviation (SD) ES, Pi10, AT, and TCo in the no-COPD group were 0.82 A,A+- 0.98, 2.29 A,A+- 0.44, 81.57 A,A+- 5.76, and 0.67 A,A+- 0.13. Mean A,A+- SD ES, Pi10, AT, and TCo in the COPD group were 2.80 A,A+- 4.04, 2.65 A,A+- 0.55, 87.18 A,A+- 4.93, and 0.7 A,A+- 0.14. These values were significantly different between both groups ( p < 0.0001). CONCLUSION. ES, Pi10, and AT are significantly different between normal and COPD subjects and are correlated to PFT. TCo is also significantly different between normal and COPD subjects but does not show a strong correlation with PFT. CLINICAL RELEVANCE/APPLICATION. Emphysema, tracheal collapse, and airway morpholgy obtained from chest CT data may be used to discriminate between normal and mild COPD subjects.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Gall16, + author = {Gallardo-Estrella, L. and Lynch, D. A. and Prokop, M. and Stinson, D. and Zach, J. and Judy, P. F. and van Ginneken, B. and van Rikxoort, E. M.}, + title = {Normalizing computed tomography data reconstructed with different filter kernels: effect on emphysema quantification}, + journal = ER, + year = {2016}, + volume = {26}, + pages = {478--486}, + doi = {10.1007/s00330-015-3824-y}, + abstract = {To propose and evaluate a method to reduce variability in emphysema quantification among different computed tomography (CT) reconstructions by normalizing CT data reconstructed with varying kernels.We included 369 subjects from the COPDGene study. For each subject, spirometry and a chest CT reconstructed with two kernels were obtained using two different scanners. Normalization was performed by frequency band decomposition with hierarchical unsharp masking to standardize the energy in each band to a reference value. Emphysema scores (ES), the percentage of lung voxels below -950 HU, were computed before and after normalization. Bland-Altman analysis and correlation between ES and spirometry before and after normalization were compared. Two mixed cohorts, containing data from all scanners and kernels, were created to simulate heterogeneous acquisition parameters.The average difference in ES between kernels decreased for the scans obtained with both scanners after normalization (7.7AC/a,!aEURdegA,A+-AC/a,!aEURdeg2.7 to 0.3AC/a,!aEURdegA,A+-AC/a,!aEURdeg0.7; 7.2AC/a,!aEURdegA,A+-AC/a,!aEURdeg3.8 to -0.1AC/a,!aEURdegA,A+-AC/a,!aEURdeg0.5). Correlation coefficients between ES and FEV1, and FEV1/FVC increased significantly for the mixed cohorts.Normalization of chest CT data reduces variation in emphysema quantification due to reconstruction filters and improves correlation between ES and spirometry.AC/a,!AC/ Emphysema quantification is sensitive to the reconstruction kernel used. AC/a,!AC/ Normalization allows comparison of emphysema quantification from images reconstructed with varying kernels. AC/a,!AC/ Normalization allows comparison of emphysema quantification obtained with scanners from different manufacturers. AC/a,!AC/ Normalization improves correlation of emphysema quantification with spirometry. AC/a,!AC/ Normalization can be used to compare data from different studies and centers.}, + file = {Gall16.pdf:pdf\\Gall16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26002132}, + month = {5}, + gsid = {15682115650147851893}, + gscites = {52}, + ss_id = {0586cbd2a31688a22dac13eafb768fa2edf56b47}, + all_ss_ids = {['0586cbd2a31688a22dac13eafb768fa2edf56b47']}, +} + +@article{Gall17, + author = {Gallardo Estrella, Leticia and Pompe, Esther and Kuhnigk, Jan-Martin and Lynch, David A and Bhatt, Surya P and van Ginneken, Bram and van Rikxoort, Eva Marjolein}, + title = {Computed tomography quantification of tracheal abnormalities in {COPD} and their influence on airflow limitation}, + journal = MP, + year = {2017}, + volume = {44}, + issue = {7}, + month = {7}, + pages = {3594--3603}, + doi = {10.1002/mp.12274}, + abstract = {To present a method to automatically quantify tracheal morphology changes during breathing and investigate its contribution to airflow impairment when adding CT measures of emphysema, airway wall thickness, air trapping and ventilation. Because tracheal abnormalities often occur localized, a method is presented that automatically determines the most abnormal trachea section based on automatically computed sagittal and coronal lengths. In this most abnormal section, trachea morphology is encoded using four equiangular rays from the center of the trachea and the normalized lengths of these rays are used as features in a classification scheme. Consequently, trachea measurements are used as input for classification into GOLD stages in addition to emphysema, air trapping and ventilation. A database of 200 subjects distributed across all GOLD stages is used to evaluate the classification with a k nearest neighbour algorithm. Performance is assessed in two experimental settings: (a) when only inspiratory scans are taken; (b) when both inspiratory and expiratory scans are available. Given only an inspiratory CT scan, measuring tracheal shape provides complementary information only to emphysema measurements. The best performing set in the inspiratory setting was a combination of emphysema and bronchial measurements. The best performing feature set in the inspiratory-expiratory setting includes measurements of emphysema, ventilation, air trapping, and trachea. Inspiratory and inspiratory-expiratory settings showed similar performance. The fully automated system presented in this study provides information on trachea shape at inspiratory and expiratory CT. Addition of tracheal morphology features improves the ability of emphysema and air trapping CT-derived measurements to classify COPD patients into GOLD stages and may be relevant when investigating different aspects of COPD.}, + file = {Gall17.pdf:pdf\\Gall17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28423189}, + gsid = {9797853204362969292,6492259953092023726}, + gscites = {4}, + all_ss_ids = {['d1e0a041fc417d82c584191ecf5b3b536127d9e0', 'e15e0cd21632aa9d6643b314a4d1d1a6580f2b39']}, +} + +@article{Gall17a, + author = {Gallardo-Estrella, Leticia and Pompe, Esther and de Jong, Pim A and Jacobs, Colin and van Rikxoort, Eva M and Prokop, Mathias and S\'{a}nchez, Clara I and van Ginneken, Bram}, + title = {Normalized emphysema scores on low dose CT: Validation as an imaging biomarker for mortality}, + journal = PLOSONE, + year = {2017}, + volume = {12}, + issue = {12}, + pages = {e0188902}, + doi = {10.1371/journal.pone.0188902}, + abstract = {The purpose of this study is to develop a computed tomography (CT) biomarker of emphysema that is robust across reconstruction settings, and evaluate its ability to predict mortality in patients at high risk for lung cancer. Data included baseline CT scans acquired between August 2002 and April 2004 from 1737 deceased subjects and 5740 surviving controls taken from the National Lung Screening Trial. Emphysema scores were computed in the original scans (origES) and after applying resampling, normalization and bullae analysis (normES). We compared the prognostic value of normES versus origES for lung cancer and all-cause mortality by computing the area under the receiver operator characteristic curve (AUC) and the net reclassification improvement (NRI) for follow-up times of 1-7 years. normES was a better predictor of mortality than origES. The 95% confidence intervals for the differences in AUC values indicated a significant difference for all-cause mortality for 2 through 6 years of follow-up, and for lung cancer mortality for 1 through 7 years of follow-up. 95% confidence intervals in NRI values showed a statistically significant improvement in classification for all-cause mortality for 2 through 7 years of follow-up, and for lung cancer mortality for 3 through 7 years of follow-up. Contrary to conventional emphysema score, our normalized emphysema score is a good predictor of all-cause and lung cancer mortality in settings where multiple CT scanners and protocols are used.}, + file = {Gall17a.pdf:pdf\\Gall17a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29227997}, + month = {12}, + gsid = {12834685557055984441}, + gscites = {11}, + ss_id = {02a6b1ae18332136192bc7977c366e6340e5c62c}, + all_ss_ids = {['02a6b1ae18332136192bc7977c366e6340e5c62c']}, +} + +@conference{Galp11, + author = {M. Galperin-Aizenberg and E. M. van Rikxoort and M. S. Brown and H.J. Kim and F. Abtin and J. G. Goldin}, + title = {Automatic Fissural Integrity Quantification from Chest {CT} Predicts Lobar Atelectasis in Endobronchial Treatment}, + booktitle = ATS, + year = {2011}, + abstract = {{RATIONALE} {P}atients with emphysema are investigated for minimally invasive occlusive endobronchial treatment as an alternative to lung volume reduction surgery ({LVRS}). Presence of collateral flow through incomplete pulmonary fissure may affect the treatment outcome. {P}revious research has shown that visual fissure integrity score is associated with lobar collapse. {T}he purpose of this study is to evaluate if an automatic quantitative fissural integrity score from chest {CT} scans is predictive of achieving lobe volume reduction in patients with severe emphysema treated with minimally invasive treatment. {METHODS} {B}aseline and post treatment follow up {CT} scans from 18 subjects treated with occlusive endobronchial treatment were selected from an anonymized image database. {CT} imaging of the lung was performed at suspended total lung capacity with 1.25 mm slice thickness, 1mm slice spacing, at 120 {KV} and 90 m{A}s. The target lobe for lobar volume reduction was selected as the lobe with the most severe emphysema. {Q}uantification of lobar volumes was performed using in-house developed software that segments the lungs and lobes. {T}he differences between lobar volumes at the baseline and follow up were calculated. {I}n the baseline scans, fissures were automatically segmented and the fissural integrity was quantified as the percentage of the lobar border defined by a fissure. {T}he association between automatically computed fissure integrity percentage at baseline and lobar volume reduction at follow up was assessed using spearman rank test. {T}he odds of complete lobar collapse (volume change of more than 85%) in subjects with complete fissures (integrity percentage more than 90%) was assessed using logistic model. {RESULTS} {T}he correlation between fissure integrity percentage and percentage lobar collapse was 0.53 (p=0.023). The odds ratio of complete lobar collapse was 2.4 times higher for subjects with complete fissure than with incomplete fissure (p=0.035). {CONCLUSION} {A}n automated system to both detect and assess fissure integrity correlated with lobar volume reduction post endoluminal occlusion techniques. {A}dvantages of automatic fissural completeness quantification over visual assessment are robustness, precision, and time consumption.}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, +} + +@inproceedings{Garc06, + author = {M. Garc\'{i}a and C. I. S\'{a}nchez and A. D\'{i}ez and M. I. L\'{o}pez and R. Hornero}, + title = {Detection of hard exudates based on neural networks as a diagnostic aid in the screening for diabetic retinopathy}, + booktitle = {Telemedicine in Future Health}, + year = {2006}, + optnote = {DIAG, RADIOLOGY}, + gsid = {13839770616103893450}, + gscites = {5}, +} + +@inproceedings{Garc07, + author = {M. Garc\'{i}a and R. Hornero and C. I. S\'{a}nchez and M. I. L\'{o}pez and A. D\'{i}ez}, + title = {Feature extraction and selection for the automatic detection of hard exudates in retinal images}, + booktitle = EMBS, + year = {2007}, + pages = {4969--4972}, + doi = {10.1109/IEMBS.2007.4353456}, + abstract = {{D}iabetic {R}etinopathy ({DR}) is a common cause of visual impairment among people of working age in industrialized countries. {A}utomatic recognition of {DR} lesions, like hard exudates ({HE}s), in fundus images can contribute to the diagnosis and screening of this disease. {I}n this study, we extracted a set of features from image regions and selected the subset which best discriminates between {HE}s and the retinal background. {T}he selected features were then used as inputs to a multilayer perceptron ({MLP}) classifier to obtain a final segmentation of {HE}s in the image. {O}ur database was composed of 100 images with variable color, brightness, and quality. 50 of them were used to train the {MLP} classifier and the remaining 50 to assess the performance of the method. {U}sing a lesion-based criterion, we achieved a mean sensitivity of 84.4\% and a mean positive predictive value of 62.7\%. {W}ith an image-based criterion, our approach reached a 100\% mean sensitivity, 84.0\% mean specificity and 92.0\% mean accuracy.}, + optnote = {DIAG, RADIOLOGY}, + pmid = {18003122}, + month = {8}, + gsid = {8987291218969393102}, + gscites = {59}, +} + +@inproceedings{Garc08a, + author = {M. Garc\'{i}a and C. I. S\'{a}nchez and M. I. L\'{o}pez and A. D\'{i}ez and R. Hornero}, + title = {Automatic detection of red lesions in retinal images using a multilayer perceptron neural network}, + booktitle = EMBS, + year = {2008}, + volume = {2008}, + pages = {5425--5428}, + doi = {10.1109/IEMBS.2008.4650441}, + abstract = {{D}iabetic {R}etinopathy ({DR}) is an important cause of visual impairment among people of working age in industrialized countries. {A}utomatic detection of {DR} clinical signs in retinal images would be an important contribution to the diagnosis and screening of the disease. {T}he aim of the present study is to automatically detect some of these clinical signs: red lesions ({RL}s), like hemorrhages ({HE}s) and microaneurysms ({MA}s). {B}ased on their properties, we extracted a set of features from image regions and selected the subset which best discriminated between these {RL}s and the retinal background. {A} multilayer perceptron ({MLP}) classifier was subsequently used to obtain the final segmentation of {RL}s. {O}ur database was composed of 100 images with variable color, brightness, and quality. 50 of them were used to obtain the examples to train the {MLP} classifier. {T}he remaining 50 images were used to test the performance of the method. {U}sing a lesion based criterion, we reached a mean sensitivity of 86.1\% and a mean positive predictive value of 71.4\%. {W}ith an image-based criterion, we achieved a 100\% mean sensitivity, 60.0\% mean specificity and 80.0\% mean accuracy.}, + file = {Garc08a.pdf:retina\\Garc08a.pdf:PDF;Garc08a.pdf:pdf\\Garc08a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {19163944}, + month = {8}, + gsid = {12824459198516979159}, + gscites = {54}, + ss_id = {56f820f835847a8d976f67889dce8d3904d838cf}, + all_ss_ids = {['56f820f835847a8d976f67889dce8d3904d838cf']}, +} + +@article{Garc09, + author = {M. Garc\'{i}a and C. I. S\'{a}nchez and J. Poza and M. I. L\'{o}pez and R. Hornero}, + title = {Detection of hard exudates in retinal images using a radial basis function classifier}, + journal = AOBE, + year = {2009}, + volume = {37}, + number = {7}, + pages = {1448--1463}, + doi = {10.1007/s10439-009-9707-0}, + abstract = {{D}iabetic retinopathy ({DR}) is one of the most important causes of visual impairment. {A}utomatic recognition of {DR} lesions, like hard exudates ({EX}s), in retinal images can contribute to the diagnosis and screening of the disease. {T}he aim of this study was to automatically detect these lesions in fundus images. {T}o achieve this goal, each image was normalized and the candidate {EX} regions were segmented by a combination of global and adaptive thresholding. {T}hen, a group of features was extracted from image regions and the subset which best discriminated between {EX}s and retinal background was selected by means of logistic regression ({LR}). {T}his optimal subset was subsequently used as input to a radial basis function ({RBF}) neural network. {T}o improve the performance of the proposed algorithm, some noisy regions were eliminated by an innovative postprocessing of the image. {T}he main novelty of the paper is the use of {LR} in conjunction with {RBF} and the proposed postprocessing technique. {O}ur database was composed of 117 images with variable color, brightness and quality. {T}he database was divided into a training set of 50 images (from {DR} patients) and a test set of 67 images (40 from {DR} patients and 27 from healthy retinas). {U}sing a lesion-based criterion (pixel resolution), a mean sensitivity of 92.1\% and a mean positive predictive value of 86.4\% were obtained. {W}ith an image-based criterion, a mean sensitivity of 100\%, mean specificity of 70.4\% and mean accuracy of 88.1\% were achieved. {T}hese results suggest that the proposed method could be a diagnostic aid for ophthalmologists in the screening for {DR}.}, + file = {Garc09.pdf:pdf\\Garc09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {19430906}, + month = {5}, + gsid = {17642644843566285360}, + gscites = {71}, + ss_id = {8c75cf4de784901aba7d8671a1be37789073407b}, + all_ss_ids = {['8c75cf4de784901aba7d8671a1be37789073407b']}, +} + +@article{Garc09a, + author = {M. Garc\'{i}a and C. I. S\'{a}nchez and M. I. L\'{o}pez and D. Ab\'{a}solo and R. Hornero}, + title = {Neural network based detection of hard exudates in retinal images}, + journal = CMPB, + year = {2009}, + volume = {93}, + number = {1}, + pages = {9--19}, + doi = {10.1016/j.cmpb.2008.07.006}, + abstract = {{D}iabetic retinopathy ({DR}) is an important cause of visual impairment in developed countries. {A}utomatic recognition of {DR} lesions in fundus images can contribute to the diagnosis of the disease. {T}he aim of this study is to automatically detect one of these lesions, hard exudates ({EX}s), in order to help ophthalmologists in the diagnosis and follow-up of the disease. {W}e propose an algorithm which includes a neural network ({NN}) classifier for this task. {T}hree {NN} classifiers were investigated: multilayer perceptron ({MLP}), radial basis function ({RBF}) and support vector machine ({SVM}). {O}ur database was composed of 117 images with variable colour, brightness, and quality. 50 of them (from {DR} patients) were used to train the {NN} classifiers and 67 (40 from {DR} patients and 27 from healthy retinas) to test the method. {U}sing a lesion-based criterion, we achieved a mean sensitivity ({SE}(l)) of 88.14\% and a mean positive predictive value ({PPV}(l)) of 80.72\% for {MLP}. {W}ith {RBF} we obtained {SE}(l)=88.49\% and {PPV}(l)=77.41\%, while we reached {SE}(l)=87.61\% and {PPV}(l)=83.51\% using {SVM}. {W}ith an image-based criterion, a mean sensitivity ({SE}(i)) of 100\%, a mean specificity ({SP}(i)) of 92.59\% and a mean accuracy ({AC}(i)) of 97.01\% were obtained with {MLP}. {U}sing {RBF} we achieved {SE}(i)=100\%, {SP}(i)=81.48\% and {AC}(i)=92.54\%. {W}ith {SVM} the image-based results were {SE}(i)=100\%, {SP}(i)=77.78\% and {AC}(i)=91.04\%.}, + file = {Garc09a.pdf:pdf\\Garc09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {18778869}, + month = {1}, + gsid = {11747171028471696967}, + gscites = {193}, + ss_id = {87a7e292cd3b829687b776d158feb9ba2a64c6ac}, + all_ss_ids = {['87a7e292cd3b829687b776d158feb9ba2a64c6ac']}, +} + +@inproceedings{Garc15, + author = {Garcia, E. and Oliver, A. and Diez, Y. and Diaz O. and Georgii, J. and Gubern-M\'{e}rida, A. and Mart\'{i}, J. and Mart\'{i}, R.}, + title = {Comparing regional breast density using {F}ull-{F}ield {D}igital {M}ammograms and {M}agnetic {R}esonance {I}maging: A preliminary study}, + booktitle = {{MICCAI} {W}orkshop: {B}reast {I}mage {A}nalysis}, + year = {2015}, + file = {Garc15.pdf:pdf\\Garc15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Garc16, + author = {Garc\'{i}a, E. and Oliver, A. and Diez, Y. and Diaz, O. and Gubern-M\'{e}rida, A. and Llad\'{o}, X. and Mart\'{i}, J.}, + title = {Comparison of Four Breast Tissue Segmentation Algorithms for Multi-modal MRI to X-ray Mammography Registration}, + booktitle = {Breast Imaging}, + year = {2016}, + volume = {9699}, + series = {Lecture Notes in Computer Science}, + publisher = {Springer International Publishing Switzerland}, + pages = {493-500}, + doi = {10.1007/978-3-319-41546-8_62}, + abstract = {Abstract. Breast MRI to X-ray mammography registration using patient-specific biomechanical models is one challenging task in medical imaging. To solve this problem, the accurate knowledge about internal and external factors of the breast, such as internal tissues distribution, is needed for modelling a suitable physical behavior. In this work, we compare four different tissue segmentation algorithms, two intensity-based segmentation algorithms (Fuzzy C-means and Gaussian mixture model) and two improvements that incorporate spatial information (Kernelized Fuzzy C-means and Markov Random Fields, respectively), and analyze their effect to the multi-modal registration. The overall framework consists on using a density estimation software (VolparaTM) to extract the glandular tissue from full-field digital mammograms, meanwhile, a biomechanical model is used to mimic the mammographic acquisition from the MRI, computing the glandular tissue traversed by the X-ray beam. Results with 40 patients show a high agreement between the amount of glandular tissue computed for each method.}, + file = {:pdf/Garc16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Garc20, + author = {Garc\'{i}a, Eloy and Diez, Yago and Oliver, Arnau and Karssemeijer, Nico and Mart\'{i}, Joan and Mart\'{i}, Robert and Diaz, Oliver}, + title = {Evaluation of elastic parameters for breast compression using a MRI-mammography registration approach}, + doi = {10.1117/12.2564155}, + year = {2020}, + abstract = {Patient-specific finite element (FE) models of the breast have received increasing attention due to the potential capability of fusing information from different image modalities. During the Magnetic Resonance Imaging (MRI) to X-ray mammography (MG) registration procedure, a FE model is compressed mimicking the mammographic acquisition. To develop an accurate model of the breast, the elastic properties and stress-strain relationship of breast tissues need to be properly defined. Several studies (in vivo and ex vivo experiments) have proposed a range of values associated to the mechanical properties of different tissues. This work analyse the elastic parameters (Young Modulus and Poisson ratio) obtained during the process of registering MRI to X-ray MG images. Position, orientation, elastic parameters and amount of compression are optimised using a simulated annealing algorithm, until the biomechanical model reaches a suitable position with respect to the corresponding mammogram. FE models obtained from 29 patients, 46 MRI-MG studies, were used to extract the optimal elastic parameters for breast compression. The optimal Young modulus obtained in the entire dataset correspond to 4.46 ± 1.81 kP a for adipose and 16.32 ± 8.36 kP a for glandular tissue, while the average Poisson ratio was 0.0492 ± 0.004. Furthermore, we did not find a correlation between the elastic parameters and other patient-specific factors such as breast density or patient age.}, + url = {http://dx.doi.org/10.1117/12.2564155}, + file = {Garc20.pdf:pdf\Garc20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {15th International Workshop on Breast Imaging (IWBI2020)}, + citation-count = {0}, + automatic = {yes}, +} + +@inproceedings{Gatt09, + author = {Gatta, Carlo and Valencia, Juan Diego Gomez and Ciompi, Francesco and Leor, Oriol Rodriguez and Radeva, Petia}, + title = {Toward robust myocardial blush grade estimation in contrast angiography}, + booktitle = PRIA, + year = {2009}, + publisher = {Springer}, + pages = {249--256}, + url = {http://link.springer.com/chapter/10.1007/978-3-642-02172-5_33}, + optnote = {DIAG, RADIOLOGY}, + gsid = {7247104007072235243}, + gscites = {2}, +} + +@inproceedings{Gatt10, + author = {Gatta, Carlo and Balocco, Simone and Ciompi, Francesco and Hemetsberger, Rayyan and Leor, Oriol Rodriguez and Radeva, Petia}, + title = {Real-time gating of IVUS sequences based on motion blur analysis: method and quantitative validation}, + booktitle = MICCAI, + year = {2010}, + doi = {10.1007/978-3-642-15745-5_8}, + series = LNCS, + publisher = {Springer}, + pages = {59--67}, + url = {http://link.springer.com/chapter/10.1007/978-3-642-15745-5_8}, + abstract = {Intravascular Ultrasound (IVUS) is an image-guiding technique for cardiovascular diagnostic, providing cross-sectional images of vessels. During the acquisition, the catheter is pulled back (pullback) at a constant speed in order to acquire spatially subsequent images of the artery. However, during this procedure, the heart twist produces a swinging fluctuation of the probe position along the vessel axis. In this paper we propose a real-time gating algorithm based on the analysis of motion blur variations during the IVUS sequence. Quantitative tests performed on an in-vitro ground truth data base shown that our method is superior to state of the art algorithms both in computational speed and accuracy.}, + file = {Gatt10.pdf:pdf\\Gatt10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {16074478644414706388}, + gscites = {27}, +} + +@article{Gatt14, + author = {Gatta, Carlo and Ciompi, Francesco}, + title = {Stacked sequential scale-space {T}aylor context}, + journal = TPAMI, + year = {2014}, + volume = {36(8)}, + pages = {1694--1700}, + pmid = {26353349}, + doi = {10.1109/TPAMI.2013.2297706}, + url = {https://ieeexplore.ieee.org/document/6701326}, + abstract = {We analyze sequential image labeling methods that sample the posterior label field in order to gather contextual information. We propose an effective method that extracts local Taylor coefficients from the posterior at different scales. Results show that our proposal outperforms state-of-the-art methods on MSRC-21, CAMVID, eTRIMS8 and KAIST2 data sets.}, + file = {Gatt14.pdf:pdf\\Gatt14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + publisher = {IEEE}, + month = {8}, + gsid = {16487560082385627861}, + gscites = {11}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/136855}, + ss_id = {25c32af274aa8bed2e01063cc8d71c4d1ff238f7}, + all_ss_ids = {['25c32af274aa8bed2e01063cc8d71c4d1ff238f7']}, +} + +@article{Gees19, + author = {Geessink, Oscar and Baidoshvili, Alexi and Klaase, Joost and Ehteshami Bejnordi, Babak and Litjens, Geert and van Pelt, Gabi and Mesker, Wilma and Nagtegaal, Iris and Ciompi, Francesco and van der Laak, Jeroen}, + title = {Computer aided quantification of intratumoral stroma yields an independent prognosticator in rectal cancer}, + journal = CELLO, + year = {2019}, + pages = {1-11}, + doi = {10.1007/s13402-019-00429-z}, + abstract = {Purpose:Tumor-stroma ratio (TSR) serves as an independent prognostic factor in colorectal cancer and other solid malignancies. The recent introduction of digital pathology in routine tissue diagnostics holds opportunities for automated TSR analysis. We investigated the potential of computer-aided quantification of intratumoral stroma in rectal cancer whole-slide images. Methods: Histological slides from 129 rectal adenocarcinoma patients were analyzed by two experts who selected a suitable stroma hot-spot and visually assessed TSR. A semi-automatic method based on deep learning was trained to segment all relevant tissue types in rectal cancer histology and subsequently applied to the hot-spots provided by the experts. Patients were assigned to a 'stroma-high' or 'stroma-low' group by both TSR methods (visual and automated). This allowed for prognostic comparison between the two methods in terms of disease-specific and disease-free survival times. Results: With stroma-low as baseline, automated TSR was found to be prognostic independent of age, gender, pT-stage, lymph node status, tumor grade, and whether adjuvant therapy was given, both for disease-specific survival (hazard ratio = 2.48 (95% confidence interval 1.29-4.78)) and for disease-free survival (hazard ratio = 2.05 (95% confidence interval 1.11-3.78)). Visually assessed TSR did not serve as an independent prognostic factor in multivariate analysis. Conclusions: This work shows that TSR is an independent prognosticator in rectal cancer when assessed automatically in user-provided stroma hot-spots. The deep learning-based technology presented here may be a significant aid to pathologists in routine diagnostics.}, + file = {Gees19.pdf:pdf\\Gees19.pdf:PDF + timestamp = (01-03-2019}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30825182}, + month = {3}, + gsid = {14546191034356758304}, + gscites = {70}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/204300}, + all_ss_ids = {['2d7797e69a35b5ffe021a2762ef251764562dca4', 'c7ce6b90707871b922aca4cb6a8b16625980788b']}, +} + +@inproceedings{Geij18, + author = {Geijs, D.J. and Intezar, M. and van der Laak, J.A.W.M. and Litjens, G.J.S.}, + title = {Automatic color unmixing of {IHC} stained whole slide images}, + booktitle = MI, + year = {2018}, + volume = {10581}, + series = SPIE, + doi = {10.1117/12.2293734}, + abstract = {Assessment of immunohistochemically stained slides is often a crucial diagnostic step in clinical practice. However, as this assessment is generally performed visually by pathologists it can suffer from signi?cant inter-observer variability. The introduction of whole slide scanners facilitates automated analysis of immunohistochemical slides. Color deconvolution (CD) is one of the most popular ?rst steps in quantifying stain density in histopathological images. However, color deconvolution requires stain color vectors for accurate unmixing. Often it is assumed that these stain vectors are static. In practice, however, they are influenced by many factors. This can cause inferior CD unmixing and thus typically results in poor quanti?cation. Some automated methods exist for color stain vector estimation, but most depend on a signi?cant amount of each stain to be present in the whole slide images. In this paper we propose a method for automatically ?nding stain color vectors and unmixing IHC stained whole slide images, even when some stains are sparsely expressed. We collected 16 tonsil slides and stained them for different periods of time with hematoxylin and a DAB-colored proliferation marker Ki67. RGB pixels of WSI images were converted to the hue saturation density (HSD) color domain and subsequently K-means clustering was used to separate stains and calculate the stain color vectors for each slide. Our results show that staining time affects the stain vectors and that calculating a unique stain vector for each slide results in better unmixing results than using a standard stain vector.}, + file = {Geij18.pdf:pdf\\Geij18.pdf:PDF}, + optnote = {DIAG}, + month = {3}, + gsid = {12313121299719118304}, + gscites = {11}, + ss_id = {8e8b923c809ab504558fd4822a7696c6e2492cfe}, + all_ss_ids = {['8e8b923c809ab504558fd4822a7696c6e2492cfe']}, +} + +@mastersthesis{Geij19, + author = {Daan Geijs}, + title = {Tumor segmentation in fluorescent TNBC immunohistochemical multiplex images using deep learning}, + year = {2019}, + abstract = {Breast cancer is among females the most frequently diagnosed cancer and the leading cause of cancer death. A subtype of breast cancer called triple negative breast cancer(TNBC) is known to be more aggressive, generally occur at younger age and even very small (<1cm) node-negative TNBC show recurrence within 5 years in 15% of cases if left untreated. For TNBC, several studies showed that the number of tumor-in?ltraing lymphocytes (TIL) in hematoxylin and eosin (H&E) stained sections strongly correlates with disease free survival. Subtyping of lymphocytes could strongly help ?nding more powerful prognostic markers. However, standard H&E stained sections do not permit speci?c subtyping of lymphocytes and immunohistochemistry(IHC) allows very limited subtyping. New scanning systems, staining protocols and medical imaging analysis algorithms allows to gather spatial information of lymphocyte subtypes and to determine subtype positioning of lymfocytes in peri- or intertumoral regions. A ?rst step towards this goal is to detect tumor regions to determine whether a lymfocyte is positioned perior intertumoral. Therefore, the aim of this thesis was to investigate the performance of convolutional networks to segment tumor regions in TNBC whole-slide multiplex IHC slides. Multiple experiments were conducted to investigate and maximize the performance. The data used for training was investigated and it was concluded that training a FCNN (fully convolutional neural network) using the DAPI and CK8-18 data channels together with a resolution of 0.96 um/pix and patch size of 128x128 resulted in the highest segmentation performance. Enriching the dataset with hard mining had no positive effects on the performance. Using the different architecture U-net resulted in similar results compared to that of a FCNN. A 'model averaging ensemble' resulted in the highest segmentation performance with a F1 score of 0.83. It can be concluded that fully convolutional networks were able to segment tumor regions in triple negative and holds true for both FCNN and U-net architectures and can be used for the overarching aim of this research, namely extracting powerful prognostic information from intra- and peritumoral lymphocyte}, + file = {:pdf/Geij19.pdf:PDF}, + optnote = {DIAG}, + school = {University of Twente}, + journal = {Master thesis}, +} + +@inproceedings{Geij21, + author = {Geijs, DJ and Pinckaers, H and Amir, AL and Litjens, GJS}, + booktitle = MI, + title = {End-to-end classification on basal-cell carcinoma histopathology whole-slides images}, + doi = {10.1117/12.2581042}, + pages = {1160307}, + series = SPIE, + volume = {11603}, + abstract = {The high incidence of BCC skin cancer caused that the amount of work for pathologists has risen to unprecedented levels. Acquiring outlined annotations for training deep learning models classifying BCC is often tedious and time consuming. End-to-end learning provides relief in labelling data by using a single label to predict an clinical outcome. We compared multiple-instance-learning (MIL) and a streaming performance for detecting BCC in 420 slides collected from 72 BCC positive patients. This resulted in an ROC with AUC of 0.96 and 0.98 for respectively streaming and MIL. Saliency and probability maps showed that both methods were capable of classifying classifying BCC in an end-to-end way with single labels.}, + file = {Geij21.pdf:pdf\\Geij21.pdf:PDF}, + year = {2021}, + ss_id = {4ea1f801a7d14b2bf284fef25d83bcc222c01629}, + all_ss_ids = {['4ea1f801a7d14b2bf284fef25d83bcc222c01629']}, + gscites = {2}, +} + +@article{Geld18, + author = {Ard de Gelder and Henkjan Huisman}, + title = {Autoencoders for Multi-Label Prostate MR Segmentation}, + journal = {arXiv:1806.08216}, + year = {2018}, + abstract = {Organ image segmentation can be improved by implementing prior knowledge about the anatomy. One way of doing this is by training an autoencoder to learn a lowdimensional representation of the segmentation. In this paper, this is applied in multi-label prostate MR segmentation, with some positive results.}, + optnote = {DIAG}, + month = {6}, + gsid = {10073906740286911860}, + gscites = {6}, + ss_id = {08509a76ccc2ef61e33ca17c13733ad2397780bb}, + all_ss_ids = {['08509a76ccc2ef61e33ca17c13733ad2397780bb']}, +} + +@conference{Genu22, + author = {E. A. J. van Genugten and B. Piet and G. Schreibelt and T. van Oorschot and G. van den Heuvel and F. Ciompi and C. Jacobs and J. de Vries and M. M. van den Heuvel and E. Aarntzen}, + title = {Imaging tumor-infiltrating CD8 (+) T-cells in non-small cell lung cancer patients upon neo-adjuvant treatment with durvalumab}, + booktitle = {European Molecular Imaging Meeting}, + year = {2022}, + abstract = {INTRODUCTION: Immune checkpoint inhibitors (ICI), like targeting programmed death receptor ligand 1 (PD-L1), have revolutionized anti-cancer treatments, including non-small cell lung cancer (NSCLC) [1, 2]. Assessment of PD-L1 expression on tumor biopsies is current practice, but there is a need for additional biomarkers correlating to the complex mechanism of action of ICI. The presence of tumor-infiltrating CD8+ T-cells (TILs) is a robust biomarker associated with immune therapy success [3-6]. Tools to track TILs in patients during ICI treatment would allow further development of immune-oncology drugs. METHODS: This ongoing single-center prospective study (NCT03853187) includes patients with histologically proven T1b-3N0-1M0 NSCLC eligible for resection. Exclusion criteria are previous anti-cancer therapy <6 months and immune disorders or suppression. Patients receive two courses neo-adjuvant durvalumab (750mg Q2W) and consecutive TIL imaging. Cohort 1 underwent apheresis and magnetic-activated cells sorting (CliniMACS) to isolate 100 x10e6 autologous CD8+ T-cells for ex vivo labeling with 111In-oxine. Re-injection was followed by 4h post-injection (p.i.) planar imaging, 70h p.i. SPECT imaging, standard-of-care surgery and 78h p.i. uSPECT of the resected lobe. Patients in cohort 2 (ongoing) receive 1.5mg 89Zr-Df-crefmirlimab followed by PET/CT 24h p.i. and 74h p.i. uPET/CT of the resected lobe. RESULTS/DISCUSSION: In cohort 1, 8/10 patients completed TIL imaging; one procedure was withdrawn due to COVID-19 restrictions and one due to unsuccessful T-cell isolation. CliniMACS yield ranged 240-714 x10e6 CD8+ T-cells, purity 84-97% and cell viability 92-100%. Labeling efficacy of 100 x10e6 cells for re-injection ranged 42-64% with injected activity of 22,4-36,7 MBq In-111. TIL imaging was completed by 2/3 patients in cohort 2, one subject discontinued neo-adjuvant treatment due to pneumonia. To determine the potential for visual assessment of TILs, we analyzed ratios between tumor uptake and contralateral lung. We observed large variations within cohort 1, dependent on tumor localization. Ratios between tumor and bloodpool activity were determined to quantify specific accumulation in the tumor. Our results favor quantification of T-cells on PET over SPECT given its higher sensitivity and spatial resolution. Correlation of imaging with CD8+ T-cells in the resected tumor is ongoing (will be presented). CONCLUSIONS: We implemented two methods for tracking CD8+ T-cells in early-stage NSCLC patients after neo-adjuvant durvalumab. Although ex vivo cell labeling perhaps more specifically targets migrating TILs into the tumor, 89Zr-Df-crefmirlimab has the potential to also target residing cells. Quantitative correlation with presence of TILs in the resected tumor will help to determine the role of these imaging tools in the development of immune-oncology drugs.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Geor16, + author = {Georgii, J. and Paetz, T. and Harz, M. and Stoecker, C. and Rothgang, M. and Colletta, J. and Schilling, K. and Schlooz-Vries, M. and Mann, R.M. and Hahn, H. K.}, + title = {Simulation and Visualization to Support Breast Surgery Planning}, + booktitle = {Breast Imaging}, + year = {2016}, + volume = {9699}, + series = {Lecture Notes in Computer Science}, + publisher = {Springer International Publishing Switzerland}, + pages = {257-264}, + doi = {10.1007/978-3-319-41546-8_33}, + abstract = {Abstract. Today, breast surgeons plan their procedures using preoperatively placed metal clips or radioactive seeds and radiological images. These images show the breast in a positioning different from the one during surgery. We show a research prototype that eases the surgeon's planning task by providing 3D visualizations based on the radiological images. With a FEM-based deformation simulation, we mimic the real surgical scenario. In particular, we have developed a ligament model that increases the robustness of a fully automatic prone-supine deformation simulation, and we have developed specific visualization methods to aid intra-operative breast lesion localization.}, + file = {:pdf/Geor16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Gern18, + author = {Gernaat, Sofie A. M. and van Velzen, Sanne G. M. and Koh, Vicky and Emaus, Marleen J. and I{\v{s}}gum, Ivana and Lessmann, Nikolas and Moes, Shinta and Jacobson, Anouk and Tan, Poey W. and Grobbee, Diederick E. and van den Bongard, Desiree H. J. and Tang, Johann I. and Verkooijen, Helena M.}, + title = {Automatic quantification of calcifications in the coronary arteries and thoracic aorta on radiotherapy planning {CT} scans of {Western} and {Asian} breast cancer patients}, + journal = RTO, + year = {2018}, + volume = {127}, + pages = {487--492}, + doi = {10.1016/j.radonc.2018.04.011}, + optnote = {DIAG}, + file = {Gern18.pdf:pdf\\Gern18.pdf:PDF}, +} + +@mastersthesis{Geur23, + author = {Geurtjens, Ruben and Peeters, Dr\'{e} and Jacobs, Colin}, + title = {Self-supervised Out-of-Distribution detection for medical imaging}, + abstract = {Out-of-distribution (OOD) detection is an important aspect of deep learning-based medicalimaging approaches for ensuring the safety and accuracy of diagnostic tools. In this paper,we investigate the effectiveness of three self-supervised learning techniques for OOD detec-tion in both a labeled RadboudXR dataset and a clinical dataset with OOD data but nolabels. Specifically, we explore two predictive self-supervised techniques and one contrastiveself-supervised technique and evaluate their ability to detect OOD samples. Furthermore,we evaluate the performance of the state-of-the-art vision transformer model on medicaldata both as a standalone method and as the backbone of a self-supervised task. Our resultsindicate that the contrastive self-supervised method Bootstrap-Your-Own-latent (BYOL)and vision transformer model were not effective in detecting OOD samples. However, thepredictive methods performed well on both 2D and 3D data, and demonstrated scalabilityin difficulty. These findings suggest the potential utility of self-supervised learning tech-niques for OOD detection in medical imaging. When determining an OOD cut-off valuefor clinical usage there are, however, problems with separation between datasets. Thesechallenges suggest that further research is needed before these techniques can be adoptedfor clinical usage.}, + file = {Geur23.pdf:pdf\\Geur23.pdf:PDF}, + journal = {Master thesis}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2023}, +} + +@inproceedings{Ghaf13, + author = {Mohsen Ghafoorian and Nasrin Taghizadeh and Hamid Beigy}, + title = {Automatic Abstraction in Reinforcement Learning Using Ant System Algorithm}, + booktitle = {AAAI Spring Symposium: Lifelong Machine Learning}, + year = {2013}, + abstract = {Nowadays developing autonomous systems, which can act in various environments and interactively perform their assigned tasks, are intensively desirable. These systems would be ready to be applied in different fields such as medicine, controller robots and social life. Reinforcement learning is an attractive area of machine learning which addresses these concerns. In large scales, learning performance of an agent can be improved by using hierarchical Reinforcement Learning techniques and temporary extended actions. The higher level of abstraction helps the learning agent approach lifelong learning goals. In this paper a new method is presented for discovering subgoal states and constructing useful skills. The method utilizes Ant System optimization algorithm to identify bottleneck edges, which act like bridges between different connected areas of the problem space. Using discovered subgoals, the agent creates temporal abstractions, which enable it to explore more effectively. Experimental Results show that the proposed method can significantly improve the learning performance of the agent.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Ghaf15, + author = {Mohsen Ghafoorian and Nico Karssemeijer and Frank Erik de Leeuw and Tom Heskes and Elena Marchiori and Bram Platel}, + title = {Small White Matter Lesion Detection in Cerebral Small Vessel Disease}, + booktitle = MI, + year = {2015}, + volume = {9414}, + series = SPIE, + pages = {941411}, + doi = {10.1117/12.2081597}, + abstract = {Cerebral small vessel disease (SVD) is a common finding on magnetic resonance images of elderly people. White matter lesions (WML) are important markers for not only the small vessel disease, but also neuro-degenerative diseases including multiple sclerosis, AlzheimerAC/a,!a,,C/s disease and vascular dementia. Volumetric measurements such as the AC/a,!A"total lesion loadA-A?A 1/2 ?, have been studied and related to these diseases. With respect to SVD we conjecture that small lesions are important, as they have been observed to grow over time and they form the majority of lesions in number. To study these small lesions they need to be annotated, which is a complex and time-consuming task. Existing (semi)automatic methods have been aimed at volumetric measurements and large lesions, and are not suitable for the detection of small lesions. In this research we established a supervised voxel classification CAD system, optimized and trained to exclusively detect small WMLs. To achieve this, several preprocessing steps were taken, which included a robust standardization of subject intensities to reduce inter-subject intensity variability as much as possible. A number of features that were found to be well identifying small lesions were calculated including multimodal intensities, tissue probabilities, several features for accurate location description, a number of second order derivative features as well as multi-scale annular filter for blobness detection. Only small lesions were used to learn the target concept via Adaboost using random forests as its basic classifiers. Finally the results were evaluated using Free-response receiver operating characteristic.}, + file = {Ghaf15.pdf:pdf\\Ghaf15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {18321616094304826808}, + gscites = {14}, + ss_id = {aa66d8655463fa432fd3ce023e6ad10eec2d337a}, + all_ss_ids = {['aa66d8655463fa432fd3ce023e6ad10eec2d337a']}, +} + +@inproceedings{Ghaf16, + author = {Mohsen Ghafoorian and Nico Karssemeijer and Tom Heskes and Inge W M van Uden and Frank-Erik de Leeuw and Elena Marchiori and Bram van Ginneken and Bram Platel}, + title = {Non-uniform patch sampling with deep convolutional neural networks for white matter hyperintensity segmentation}, + booktitle = ISBI, + year = {2016}, + pages = {1414--1417}, + doi = {10.1109/ISBI.2016.7493532}, + abstract = {Convolutional neural networks (CNN) have been widely used for visual recognition tasks including semantic segmentation of images. While the existing methods consider uniformly sampled single- or multi-scale patches from the neighborhood of each voxel, this approach might be sub-optimal as it captures and processes unnecessary details far away from the center of the patch. We instead propose to train CNNs with non-uniformly sampled patches that allow a wider extent for the sampled patches. This results in more captured contextual information, which is in particular of interest for biomedical image analysis, where the anatomical location of imaging features are often crucial. We evaluate and compare this strategy for white matter hyperintensity segmentation on a test set of 46 MRI scans. We show that the proposed method not only outperforms identical CNNs with uniform patches of the same size (0.780 Dice coefficient compared to 0.736), but also gets very close to the performance of an independent human expert (0.796 Dice coefficient)}, + file = {Ghaf16.pdf:pdf\\Ghaf16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + gsid = {13928783358122591393}, + gscites = {71}, + ss_id = {19ef79992e71a374d59f8be3feb93125fd7815d9}, + all_ss_ids = {['19ef79992e71a374d59f8be3feb93125fd7815d9']}, +} + +@article{Ghaf16a, + author = {Mohsen Ghafoorian and Nico Karssemeijer and Inge W M van Uden and Frank-Erik de Leeuw and Tom Heskes and Elena Marchiori and Bram Platel}, + title = {Automated Detection of White Matter Hyperintensities of All Sizes in Cerebral Small Vessel Disease}, + journal = MP, + year = {2016}, + volume = {43}, + number = {12}, + pages = {6246-6258}, + doi = {10.1118/1.4966029}, + abstract = {Purpose: + White matter hyperintensities (WMH) are seen on FLAIR-MRI in several neurological disorders, including multiple sclerosis, dementia, Parkinsonism, stroke and cerebral small vessel disease (SVD). WMHs are often used as biomarkers for prognosis or disease progression in these diseases, and additionally longitudinal quantification of WMHs is used to evaluate therapeutic strategies. Human readers show considerable disagreement and inconsistency on detection of small lesions. A multitude of automated detection algorithms for WMHs exists, but since most of the current automated approaches are tuned to optimize segmentation performance according to Jaccard or Dice scores, smaller WMHs often go undetected in these approaches. In this paper, the authors propose a method to accurately detect all WMHs, large as well as small. + + Methods: + A two-stage learning approach was used to discriminate WMHs from normal brain tissue. Since small and larger WMHs have quite a different appearance, the authors have trained two probabilistic classifiers: one for the small WMHs (<3 mm effective diameter) and one for the larger WMHs (>3 mm in-plane effective diameter). For each size-specific classifier, an Adaboost is trained for five iterations, with random forests as the basic classifier. The feature sets consist of 22 features including intensities, location information, blob detectors, and second order derivatives. The outcomes of the two first-stage classifiers were combined into a single WMH likelihood by a second-stage classifier. Their method was trained and evaluated on a dataset with MRI scans of 362 SVD patients (312 subjects for training and validation annotated by one and 50 for testing annotated by two trained raters). To analyze performance on the separate test set, the authors performed a free-response receiving operating characteristic (FROC) analysis, instead of using segmentation based methods that tend to ignore the contribution of small WMHs. + + Results: + Experimental results based on FROC analysis demonstrated a close performance of the proposed computer aided detection (CAD) system to human readers. While an independent reader had 0.78 sensitivity with 28 false positives per volume on average, their proposed CAD system reaches a sensitivity of 0.73 with the same number of false positives. + + Conclusions: + The authors have developed a CAD system with all its ingredients being optimized for a better detection of WMHs of all size, which shows performance close to an independent reader.}, + file = {Ghaf16a.pdf:pdf\\Ghaf16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27908171}, + month = {11}, + gsid = {2058349102085812132}, + gscites = {61}, + ss_id = {ea864dd65dfd1207fde07941ceaf1bda38ba9c1b}, + all_ss_ids = {['ea864dd65dfd1207fde07941ceaf1bda38ba9c1b']}, +} + +@article{Ghaf17a, + author = {Ghafoorian, Mohsen and Karssemeijer, Nico and Heskes, Tom and Bergkamp, Mayra and Wissink, Joost and Obels, Jiri and Keizer, Karlijn and de Leeuw, Frank-Erik and Ginneken, Bram van and Marchiori, Elena and Platel, Bram}, + title = {Deep multi-scale location-aware 3D convolutional neural networks for automated detection of lacunes of presumed vascular origin}, + journal = NEUICL, + year = {2017}, + volume = {14}, + pages = {391-399}, + doi = {10.1016/j.nicl.2017.01.033}, + abstract = {Lacunes of presumed vascular origin (lacunes) are associated with an increased risk of stroke, gait impairment, and dementia and are a primary imaging feature of the small vessel disease. Quantification of lacunes may be of great importance to elucidate the mechanisms behind neuro-degenerative disorders and is recommended as part of study standards for small vessel disease research. However, due to the different appearance of lacunes in various brain regions and the existence of other similar-looking structures, such as perivascular spaces, manual annotation is a difficult, elaborative and subjective task, which can potentially be greatly improved by reliable and consistent computer-aided detection (CAD) routines. In this paper, we propose an automated two-stage method using deep convolutional neural networks (CNN). We show that this method has good performance and can considerably benefit readers. We first use a fully convolutional neural network to detect initial candidates. In the second step, we employ a 3D CNN as a false positive reduction tool. As the location information is important to the analysis of candidate structures, we further equip the network with contextual information using multi-scale analysis and integration of explicit location features. We trained, validated and tested our networks on a large dataset of 1075 cases obtained from two different studies. Subsequently, we conducted an observer study with four trained observers and compared our method with them using a free-response operating characteristic analysis. Shown on a test set of 111 cases, the resulting CAD system exhibits performance similar to the trained human observers and achieves a sensitivity of 0.974 with 0.13 false positives per slice. A feasibility study also showed that a trained human observer would considerably benefit once aided by the CAD system.}, + file = {Ghaf17a.pdf:pdf\\Ghaf17a.pdf:PDF}, + optnote = {DIAG}, + pmid = {28271039}, + gsid = {14192982400070367057}, + gscites = {116}, + ss_id = {7f7990b0d4a5f7b356d06c7017a337b5ae3b31cf}, + all_ss_ids = {['7f7990b0d4a5f7b356d06c7017a337b5ae3b31cf']}, +} + +@inproceedings{Ghaf17b, + author = {Mohsen Ghafoorian and Alireza Mehrtash and Tina Kapur and Nico Karssemeijer and Elena Marchiori and Mehran Pesteie and Charles R. G. Guttmann and Frank-Erik de Leeuw and Clare M. Tempany and Bram van Ginneken and Andriy Fedorov and Purang Abolmaesumi and Bram Platel and William M. Wells}, + title = {Transfer Learning for Domain Adaptation in {MRI}: Application in Brain Lesion Segmentation}, + booktitle = MICCAI, + year = {2017}, + volume = {10435}, + series = LNCS, + pages = {516-524}, + doi = {10.1007/978-3-319-66179-7_59}, + url = {https://arxiv.org/abs/1702.07841}, + abstract = {Magnetic Resonance Imaging (MRI) is widely used in routine clinical diagnosis and treatment. However, variations in MRI acquisition protocols result in different appearances of normal and diseased tissue in the images. Convolutional neural networks (CNNs), which have shown to be successful in many medical image analysis tasks, are typically sensitive to the variations in imaging protocols. Therefore, in many cases, networks trained on data acquired with one MRI protocol, do not perform satisfactorily on data acquired with different protocols. This limits the use of models trained with large annotated legacy datasets on a new dataset with a different domain which is often a recurring situation in clinical settings. In this study, we aim to answer the following central questions regarding domain adaptation in medical image analysis: Given a fitted legacy model, 1) How much data from the new domain is required for a decent adaptation of the original network?; and, 2) What portion of the pre-trained model parameters should be retrained given a certain number of the new domain training samples? To address these questions, we conducted extensive experiments in white matter hyperintensity segmentation task. We trained a CNN on legacy MR images of brain and evaluated the performance of the domain-adapted network on the same task with images from a different domain. We then compared the performance of the model to the surrogate scenarios where either the same trained network is used or a new network is trained from scratch on the new dataset.The domain-adapted network tuned only by two training examples achieved a Dice score of 0.63 substantially outperforming a similar network trained on the same set of examples from scratch.}, + file = {Ghaf17b.pdf:pdf\\Ghaf17b.pdf:PDF}, + optnote = {DIAG}, + gsid = {14433108461414425390}, + gscites = {277}, + ss_id = {102a580a89b7e9d5a6d8e79864db183d6b5fb6ad}, + all_ss_ids = {['102a580a89b7e9d5a6d8e79864db183d6b5fb6ad']}, +} + +@article{Ghaf17c, + author = {Mohsen Ghafoorian and Nico Karssemeijer and Tom Heskes and Inge van Uden and Clara I. S\'{a}nchez and Geert Litjens and Frank-Erik de Leeuw and Bram van Ginneken and Elena Marchiori and Bram Platel}, + title = {Location Sensitive Deep Convolutional Neural Networks for Segmentation of White Matter Hyperintensities}, + journal = NATSCIREP, + year = {2017}, + volume = {7}, + number = {1}, + pages = {5110}, + doi = {10.1038/s41598-017-05300-5}, + url = {https://arxiv.org/abs/1610.04834}, + abstract = {The anatomical location of imaging features is of crucial importance for accurate diagnosis in many medical tasks. Convolutional neural networks (CNN) have had huge successes in computer vision, but they lack the natural ability to incorporate the anatomical location in their decision making process, hindering success in some medical image analysis tasks. In this paper, to integrate the anatomical location information into the network, we propose several deep CNN architectures that consider multi-scale patches or take explicit location features while training. We apply and compare the proposed architectures for segmentation of white matter hyperintensities in brain MR images on a large dataset. As a result, we observe that the CNNs that incorporate location information substantially outperform a conventional segmentation method with handcrafted features as well as CNNs that do not integrate location information. On a test set of 50 scans, the best configuration of our networks obtained a Dice score of 0.792, compared to 0.805 for an independent human observer. Performance levels of the machine and the independent human observer were not statistically significantly different (p-value = 0.06).}, + file = {Ghaf17c.pdf:pdf\\Ghaf17c.pdf:PDF}, + optnote = {DIAG}, + pmid = {28698556}, + month = {7}, + gsid = {5144190841808936245}, + gscites = {207}, + all_ss_ids = {['91feb0cb04d67a5c38d0a4e7c67fc7874491abc6', '925024223075dcc5b4db81f4b07eddf2cbdf4519']}, +} + +@inproceedings{Ghaf18, + author = {Mohsen Ghafoorian and Jonas Teuwen and Rashindra Manniesing and Frank-Erik de Leeuw and Bram van Ginneken and Nico Karssemeijer and Bram Platel}, + title = {Student Beats the Teacher: Deep Neural Networks for Lateral Ventricles Segmentation in Brain MR}, + booktitle = MI, + year = {2018}, + volume = {10574}, + series = SPIE, + pages = {105742U}, + doi = {10.1117/12.2293569}, + url = {https://arxiv.org/abs/1801.05040}, + abstract = {Ventricular volume and its progression are known to be linked to several brain diseases such as dementia and schizophrenia. Therefore accurate measurement of ventricle volume is vital for longitudinal studies on these disorders, making automated ventricle segmentation algorithms desirable. In the past few years, deep neural networks have shown to outperform the classical models in many imaging domains. However, the success of deep networks is dependent on manually labeled data sets, which are expensive to acquire especially for higher dimensional data in the medical domain. In this work, we show that deep neural networks can be trained on muchcheaper-to-acquire pseudo-labels (e.g., generated by other automated less accurate methods) and still produce more accurate segmentations compared to the quality of the labels. To show this, we use noisy segmentation labels generated by a conventional region growing algorithm to train a deep network for lateral ventricle segmentation. Then on a large manually annotated test set, we show that the network significantly outperforms the conventional region growing algorithm which was used to produce the training labels for the network. Our experiments report a Dice Similarity Coefficient (DSC) of 0.874 for the trained network compared to 0.754 for the conventional region growing algorithm (p < 0.001).}, + file = {:pdf/ghaf18.pdf:PDF}, + optnote = {DIAG}, + month = {3}, + gsid = {14380272279536530363}, + gscites = {17}, + ss_id = {2e11acb5e60b59af637cc7faea6e390ed6a9429b}, + all_ss_ids = {['2e11acb5e60b59af637cc7faea6e390ed6a9429b']}, +} + +@phdthesis{Ghaf18a, + author = {Mohsen Ghafoorian}, + title = {Machine Learning for Quantification of Small Vessel Disease Imaging Biomarkers}, + year = {2018}, + url = {https://repository.ubn.ru.nl/bitstream/handle/2066/183226/183226.pdf?sequence=1}, + abstract = {This thesis describes various methods for the quantification of white matter hyperintensities (WMH) and lacunes as two imaging biomarkers of small vessel disease (SVD) in MR images}, + copromotor = {B. Platel}, + file = {:pdf/Ghaf18a.pdf:PDF}, + optnote = {DIAG}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@book{Gibs15, + author = {Gibson, Eli and Huisman, Henkjan J. and Barratt, Dean C.}, + title = {Statistical Power in Image Segmentation: Relating Sample Size to Reference Standard Quality}, + doi = {10.1007/978-3-319-24574-4_13}, + year = {2015}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-319-24574-4_13}, + file = {Gibs15.pdf:pdf\Gibs15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Lecture Notes in Computer Science}, + citation-count = {0}, + automatic = {yes}, + pages = {105-113}, +} + +@article{Gibs17, + author = {Eli Gibson and Yipeng Hu and Henkjan Huisman and Dean Barratt}, + title = {Designing image segmentation studies: statistical power, sample size and reference standard quality}, + journal = MIA, + year = {2017}, + volume = {42}, + pages = {44-59}, + doi = {10.1016/j.media.2017.07.004}, + url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5666910/}, + abstract = {Segmentation algorithms are typically evaluated by comparison to an accepted reference standard. The cost of generating accurate reference standards for medical image segmentation can be substantial. Since the study cost and the likelihood of detecting a clinically meaningful difference in accuracy both depend on the size and on the quality of the study reference standard, balancing these trade-offs supports the efficient use of research resources. + + In this work, we derive a statistical power calculation that enables researchers to estimate the appropriate sample size to detect clinically meaningful differences in segmentation accuracy (i.e. the proportion of voxels matching the reference standard) between two algorithms. Furthermore, we derive a formula to relate reference standard errors to their effect on the sample sizes of studies using lower-quality (but potentially more affordable and practically available) reference standards. + + The accuracy of the derived sample size formula was estimated through Monte Carlo simulation, demonstrating, with 95% confidence, a predicted statistical power within 4% of simulated values across a range of model parameters. This corresponds to sample size errors of less than 4 subjects and errors in the detectable accuracy difference less than 0.6%. The applicability of the formula to real-world data was assessed using bootstrap resampling simulations for pairs of algorithms from the PROMISE12 prostate MR segmentation challenge data set. The model predicted the simulated power for the majority of algorithm pairs within 4% for simulated experiments using a high-quality reference standard and within 6% for simulated experiments using a low-quality reference standard. A case study, also based on the PROMISE12 data, illustrates using the formulae to evaluate whether to use a lower-quality reference standard in a prostate segmentation study.}, + file = {Gibs17.pdf:pdf\\Gibs17.pdf:PDF}, + optnote = {DIAG}, + pmid = {28772163}, + month = {12}, + gsid = {10750453317236818615}, + gscites = {14}, + ss_id = {91628553dee20285dd23c7a77bcc1b9510abe0c8}, + all_ss_ids = {['91628553dee20285dd23c7a77bcc1b9510abe0c8']}, +} + +@inproceedings{Gibs18, + author = {Eli Gibson and Yipeng and Hu Nooshin Ghavami and Hashim U. Ahmed and Caroline Moore and Mark Emberton and Henkjan Huisman and Dean Barratt}, + title = {Inter-site variability in prostate segmentation accuracy using deep learning}, + booktitle = MICCAI, + year = {2018}, + doi = {10.1007/978-3-030-00937-3_58}, + abstract = {Deep-learning-based segmentation tools have yielded higher reported segmentation accuracies for many medical imaging applications. However, inter-site variability in image properties can challenge the translation of these tools to data from 'unseen' sites not included in the training data. This study quantifies the impact of inter-site variability on the accuracy of deep-learning-based segmentations of the prostate from magnetic resonance (MR) images, and evaluates two strategies for mitigating the reduced accuracy for data from unseen sites: training on multi-site data and training with limited additional data from the unseen site. Using 376 T2-weighted prostate MR images from six sites, we compare the segmentation accuracy (Dice score and boundary distance) of + three deep-learning-based networks trained on data from a single site and on various configurations of data from multiple sites. We found that the segmentation accuracy of a single-site network was substantially worse on data from unseen sites than on data from the training site. Training on multi-site data yielded marginally improved accuracy and robustness. However, including as few as 8 subjects from the unseen site, e.g. during commissioning of a new clinical system, yielded substantial improvement (regaining 75% of the difference in Dice score).}, + file = {Gibs18.pdf:pdf\\Gibs18.pdf:PDF}, + optnote = {DIAG}, + gsid = {6478096673123844302}, + gscites = {45}, + ss_id = {7920c85aa097d57a787de42802120aaaf3f94ae9}, + all_ss_ids = {['7920c85aa097d57a787de42802120aaaf3f94ae9']}, +} + +@article{Giet07a, + author = {H. A. Gietema and A. M. Schilham and B. van Ginneken and R. J. van Klaveren and J. W. J. Lammers and M. Prokop}, + title = {Monitoring of smoking-induced emphysema with {CT} in a lung cancer screening setting: detection of real increase in extent of emphysema}, + journal = Radiology, + year = {2007}, + volume = {244}, + pages = {890--897}, + doi = {10.1148/radiol.2443061330}, + abstract = {{PURPOSE}: {T}o retrospectively establish the minimum increase in emphysema score ({ES}) required for detection of real increased extent of emphysema with 95\% confidence by using multi-detector row computed tomography ({CT}) in a lung cancer screening setting. {MATERIALS} {AND} {METHODS}: {T}he study was a substudy of the {NELSON} project that was approved by the {D}utch {M}inistry of {H}ealth and the ethics committee of each participating hospital, with patient informed consent. {F}or this substudy, original approval and informed consent allowed use of data for future research. {A}mong 1684 men screened with low-dose multi-detector row {CT} (30 m{A}s, 16 detector rows, 0.75-mm section thickness) between {A}pril 2004 and {M}arch 2005, only participants who underwent repeat multi-detector row {CT} with the same scanner after 3 months because of an indeterminate pulmonary nodule were included. {E}xtent of emphysema was considered to remain stable in this short period. {E}xtent of low-attenuation areas representing emphysema was computed for repeat and baseline scans as percentage of lung volume below three attenuation threshold values (-910 {HU}, -930 {HU}, -950 {HU}). {L}imits of agreement were determined with {B}land-{A}ltman approach; upper limits were used to deduce the minimum increase in {ES} required for detecting increased extent of emphysema with 95\% probability. {F}actors influencing the limits of agreement were determined. {RESULTS}: {I}n total, 157 men (mean age, 60 years) were included in the study. {L}imits of agreement for differences in total lung volume between repeat and baseline scans were -13.4\% to +12.6\% at -910 {HU}, -4.7\% to +4.2\% at -930 {HU}, and -1.3\% to +1.1\% at -950 {HU}. {D}ifferences in {ES} showed weak to moderate correlation with variation in level of inspiration (r=0.20-0.49, {P}<.05). {S}canner calibration could be excluded as a factor contributing to variation in {ES}. {CONCLUSION}: {I}ncrease in {ES} required to detect increased extent of smoking-related emphysema with 95\% probability varies between 1.1\% of total lung volume at -950 {HU} and 12.6\% at -910 {HU} for low-dose multi-detector row {CT}. {C}linical trial registration no. {ISRCTN}63545820.}, + file = {Giet07a.pdf:pdf\\Giet07a.pdf:PDF}, + optnote = {DIAG, MAGIC, RADIOLOGY}, + number = {3}, + pmid = {17709835}, + month = {9}, + gsid = {9253661431333367415}, + gscites = {69}, + ss_id = {82a456a6d5fec100feaeeeffa1dbfc24754ccc12}, + all_ss_ids = {['82a456a6d5fec100feaeeeffa1dbfc24754ccc12']}, +} + +@conference{Giet07c, + author = {H. Gietema and P. Zanen and A. M. R. Schilham and B. van Ginneken and R. van Klaveren and M. Prokop}, + title = {Distribution of emphysema in heavy smokers: {I}mpact on extent of airflow limitation and gas exchange impairment}, + booktitle = RSNA, + year = {2007}, + pages = {610}, + file = {Giet07.pdf:pdf\\Giet07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@phdthesis{Giet07d, + author = {H. A. Gietema}, + title = {Low {D}ose {C}omputed {T}omography of the {C}hest: {A}pplications and {L}imitations}, + url = {http://igitur-archive.library.uu.nl/dissertations/2007-0306-201724/index.htm}, + abstract = {In areas with a high intrinsic contrast such as the chest, radiation dose can be reduced for specific indications. Low dose chest CT is feasible and cannot only be applied for lung cancer screening, but also in daily routine and for early detection of lung destruction. We showed in a small sample of patients from the outpatient department of pulmonology that ultralow-dose CT is feasible and can provide more information than CXR in two directions, while radiation dose is similar. The main disadvantage of radiation dose reduction is the accompanying increase in image noise. The detection of abnormalities with a high contrast to the surrounding normal tissue is not limited by the amount of image noise on the CT scans performed with the current minimum radiation dose. However, when the structure of interest shows a low contrast to the surrounding normal tissue, image noise can hamper the detection and especially the automated size measurement of the abnormality. We demonstrated that an increase in image noise results in overestimation of the emphysema score compared to standard dose CT, but also that the application of a dedicated noise reduction filter to the reconstructed data before the automated quantification of emphysema can prevent this overestimations. This thesis is partly based on the Dutch Belgian Lung Cancer Screening project (NELSON), a randomized multi-center trial studying current and former smokers in order to detect lung cancer in an early, treatable stage. The detection of growth is the main feature to distinguish benign non-calcified nodules from potentially benign ones, but interscan and to a lesser extent interobserver variability can limit the detection of growth. We showed that the performance of the algorithm to segment the nodule completely was the most important factor contributing to the variability. Since chronic obstructive pulmonary disease (COPD) and lung cancer share smoking as main risk factor, lung cancer screening trials provide a good opportunity to study the early stages and natural progression of COPD. Emphysema can cause COPD and can easily be detected and automated be quantified on CT by highlighting voxels with an abnormally low X-ray attenuation. We provided the limits of agreement for three common attenuation thresholds for emphysema scores obtained in a lung cancer screening setting. Moreover, we studied the prevalence of emphysema and airflow impairment in participants of the NELSON-project. Many participants showed destructed parenchyma, but a preserved lung function. However, the gas exchange is often already impaired in these subjects, but gas exchange is not one of the hallmarks of COPD (yet). The diffusion of DNO is shown to be a marker of vascular changes, which can precede macroscopic lung tissue destruction. We showed that DNO can be impaired before CT can detect emphysema. Furthermore, we showed that the distribution pattern of emphysema has an impact on the extent of airflow impairment. Current and former smokers with an apical predominance of emphysema showed more severe airflow limitation than subjects with an equal emphysema score, but a basal predominance of lung destruction.}, + copromotor = {P. Zanen}, + file = {Giet07d.pdf:pdf\\Giet07d.pdf:PDF}, + optnote = {DIAG, LungCancerScreening, RADIOLOGY}, + promotor = {M. Prokop and J. W. J. Lammers}, + school = {Utrecht University}, + year = {2007}, + journal = {PhD thesis}, +} + +@article{Giet10, + author = {H. A. Gietema and P. Zanen and A. Schilham and B. van Ginneken and R. J. van Klaveren and M. Prokop and J. W.J. Lammers}, + title = {Distribution of emphysema in heavy smokers: impact on pulmonary function}, + journal = RESPM, + year = {2010}, + volume = {104}, + pages = {76--82}, + doi = {10.1016/j.rmed.2009.08.004}, + abstract = {{PURPOSE}: {T}o investigate impact of distribution of computed tomography ({CT}) emphysema on severity of airflow limitation and gas exchange impairment in current and former heavy smokers participating in a lung cancer screening trial. {MATERIALS} {AND} {METHODS}: {I}n total 875 current and former heavy smokers underwent baseline low-dose {CT} (30 m{A}s) in our center and spirometry and diffusion capacity testing on the same day as part of the {D}utch-{B}elgian {L}ung {C}ancer {S}creening {T}rial ({NELSON}). {E}mphysema was quantified for 872 subjects as the number of voxels with an apparent lowered {X}-ray attenuation coefficient. {V}oxels attenuated <-950 {HU} were categorized as representing severe emphysema ({ES}950), while voxels attenuated between -910 {HU} and -950 {HU} represented moderate emphysema ({ES}910). {I}mpact of distribution on severity of pulmonary function impairment was investigated with logistic regression, adjusted for total amount of emphysema. {RESULTS}: {F}or {ES}910 an apical distribution was associated with more airflow obstruction and gas exchange impairment than a basal distribution (both p<0.01). {T}he {FEV}(1)/{FVC} ratio was 1.6\% (95\% {CI} 0.42\% to 2.8\%) lower for apical predominance than for basal predominance, for {T}lco/{V}({A}) the difference was 0.12\% (95\% {CI} 0.076-0.15\%). {D}istribution of {ES}950 had no impact on {FEV}(1)/{FVC} ratio, while an apical distribution was associated with a 0.076\% (95\% {CI} 0.038-0.11\%) lower {T}lco/{V}({A}) (p<0.001). {CONCLUSION}: {I}n a heavy smoking population, an apical distribution is associated with more severe gas exchange impairment than a basal distribution; for moderate emphysema it is also associated with a lower {FEV}(1)/{FVC} ratio. {H}owever, differences are small, and likely clinically irrelevant.}, + file = {Giet10.pdf:pdf\\Giet10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {19734030}, + month = {1}, + gsid = {3368562612864940397}, + gscites = {30}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/88840}, + ss_id = {2262ebcd92f9597d37813d0d3d27d397260cf674}, + all_ss_ids = {['2262ebcd92f9597d37813d0d3d27d397260cf674']}, +} + +@article{Giet10a, + author = {H. A. Gietema and P. Zanen and A. M. R. Schilham and B. van Ginneken and R. J. van Klaveren and M. Prokop and J. W. J. Lammers}, + title = {Reply to {H}ochheggar et al}, + journal = RESPM, + year = {2010}, + volume = {104}, + pages = {1074}, + doi = {10.1016/j.rmed.2010.02.027}, + file = {Giet10a.pdf:pdf\\Giet10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {7}, + month = {7}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/89855}, + ss_id = {93eee9d038ddbc0564db2fffd697209ba44d2f86}, + all_ss_ids = {['93eee9d038ddbc0564db2fffd697209ba44d2f86']}, + gscites = {0}, +} + +@article{Gige01, + author = {M. L. Giger and N. Karssemeijer and S. G. Armato}, + title = {Computer-aided diagnosis in medical imaging}, + journal = TMI, + year = {2001}, + volume = {20}, + pages = {1205--1208}, + file = {Gige01.pdf:pdf\\Gige01.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {12}, + pmid = {11811820}, + gsid = {2190644524997558672}, + gscites = {238}, + ss_id = {b8aaa440384a41171db801a9dfd7e0eee001fa2c}, + all_ss_ids = {['b8aaa440384a41171db801a9dfd7e0eee001fa2c']}, +} + +@article{Gige13, + author = {Giger, Maryellen L. and Karssemeijer, Nico and Schnabel, Julia A.}, + title = {Breast Image Analysis for Risk Assessment, Detection, Diagnosis, and Treatment of Cancer}, + journal = ARBE, + year = {2013}, + volume = {15}, + pages = {327-57}, + doi = {10.1146/annurev-bioeng-071812-152416}, + abstract = {The role of breast image analysis in radiologists' interpretation tasks in cancer risk assessment, detection, diagnosis, and treatment continues to expand. Breast image analysis methods include segmentation, feature extraction techniques, classifier design, biomechanical modeling, image registration, motion correction, and rigorous methods of evaluation. We present a review of the current status of these task-based image analysis methods, which are being developed for the various image acquisition modalities of mammography, tomosynthesis, computed tomography, ultrasound, and magnetic resonance imaging. Depending on the task, image-based biomarkers from such quantitative image analysis may include morphological, textural, and kinetic characteristics and may depend on accurate modeling and registration of the breast images. We conclude with a discussion of future directions. Expected final online publication date for the Annual Review of Biomedical Engineering Volume 15 is July 11, 2013. Please see http://www.annualreviews.org/catalog/pubdates.aspx for revised estimates.}, + file = {Gige13.pdf:pdf\\Gige13.pdf:PDF}, + optnote = {DIAG}, + pmid = {23683087}, + month = {7}, + gsid = {6411119075964017758}, + gscites = {194}, + ss_id = {7b178f081b9c64dae9141dc9d197f49934b00c58}, + all_ss_ids = {['7b178f081b9c64dae9141dc9d197f49934b00c58']}, +} + +@article{Gils99, + author = {C. H. van Gils and J. H. Hendriks and R. Holland and N. Karssemeijer and J. D. Otten and H. Straatman and A. L. Verbeek}, + title = {Changes in mammographic breast density and concomitant changes in breast cancer risk}, + journal = EJCP, + year = {1999}, + volume = {8}, + pages = {509--515}, + file = {Gils99.pdf:pdf/Gils99.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {10643940}, + month = {12}, + gsid = {12917222000575413863}, + gscites = {105}, + ss_id = {214568f4f1bcdbd4423f944a5c6b282201370750}, + all_ss_ids = {['214568f4f1bcdbd4423f944a5c6b282201370750']}, +} + +@inproceedings{Ginn00, + author = {B. van Ginneken and B. M. ter Haar Romeny}, + title = {Automatic delineation of ribs in frontal chest radiographs}, + booktitle = MI, + year = {2000}, + volume = {3979}, + series = SPIE, + pages = {825-836}, + doi = {10.1117/12.387746}, + abstract = {{A}n automatic method for the delineation of posterior ribs in frontal chest radiographs is presented. {W}e develop a statistical shape model for the complete rib cage. {C}ontrary to previous work, we fit the global rib cage directly to a radiograph, instead of detecting rib border candidates locally and applying rules to infer the rib cage from these candidates. {E}ach posterior rib is modeled by two parallel parabolas. {T}he full rib cage, from rib 2 up and including rib 10, therefore contains 72 parameters. {T}his number is reduced with principal component analysis: {I}t is demonstrated that 10 parameters explain over 98% of the variability in a training set of 35 chest radiographs. {T}he rib cage is fitted with {P}owell's direction set method for optimizing the model parameters, with a fit measure that gives high output when rib borders are located on edge pixels in the image. {T}he method is robust and fairly accurate: {O}n the 35 test images with a resolution of 512 by 512 pixels, rib borders are located with an accuracy of 3 pixels on average.}, + file = {Ginn00.pdf:pdf\\Ginn00.pdf:PDF}, + gsid = {10267710118205806385}, + optnote = {DIAG, RADIOLOGY}, + month = {6}, + gscites = {43}, + ss_id = {68c218a07341bf3a5fd2a587ef4cfb1845046798}, + all_ss_ids = {['68c218a07341bf3a5fd2a587ef4cfb1845046798']}, +} + +@article{Ginn00a, + author = {B. van Ginneken and B. M. ter Haar Romeny}, + title = {Automatic segmentation of lung fields in chest radiographs}, + journal = MP, + year = {2000}, + volume = {27}, + pages = {2445--2455}, + doi = {10.1118/1.1312192}, + abstract = {{T}he delineation of important structures in chest radiographs is an essential preprocessing step in order to automatically analyze these images, e.g., for tuberculosis screening support or in computer assisted diagnosis. {W}e present algorithms for the automatic segmentation of lung fields in chest radiographs. {W}e compare several segmentation techniques: a matching approach; pixel classifiers based on several combinations of features; a new rule-based scheme that detects lung contours using a general framework for the detection of oriented edges and ridges in images; and a hybrid scheme. {E}ach approach is discussed and the performance of nine systems is compared with interobserver variability and results available from the literature. {T}he best performance is obtained by the hybrid scheme that combines the rule-based segmentation algorithm with a pixel classification approach. {T}he combinations of two complementary techniques leads to robust performance; the accuracy is above 94\% for all 115 images in the test set. {T}he average accuracy of the scheme is 0.969 +/- 0.0080, which is close to the interobserver variability of 0.984 +/- 0.0048. {T}he methods are fast, and implemented on a standard {PC} platform.}, + file = {Ginn00a.pdf:pdf\\Ginn00a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + pmid = {11099215}, + month = {10}, + gsid = {17671026951302443772}, + gscites = {158}, + ss_id = {54b1c3215b4de092378641f88dcd39a773671cd1}, + all_ss_ids = {['54b1c3215b4de092378641f88dcd39a773671cd1', '9d9ab68f1203691be52eab07457ffb7bf6bfdc2b']}, +} + +@article{Ginn00b, + author = {B. van Ginneken and B. M. ter Haar Romeny}, + title = {Applications of locally orderless images}, + journal = JVCIR, + year = {2000}, + volume = {11}, + pages = {196-208}, + doi = {10.1006/jvci.1999.0445}, + abstract = {{I}n a recent work, {K}oenderink and {V}an {D}oorn consider a family of three intertwined scale-spaces coined the locally orderless image ({LOI}). {T}he {LOI} essentially represents the image, observed at inner scale sigma, as a local histogram with bin-width beta, at each location, with a {G}aussian-shape region of interest of extent alpha. {LOI}s form a natural and elegant extension of scale-space theory, show causal consistency and enable the smooth transition between pixels, histograms and isophotes. {T}he aim of this work is to demonstrate the wide applicability and versatility of {LOI}s. {W}e present applications for a range of image processing tasks, including new non-linear diffusion schemes, adaptive histogram equalization and variations, several methods for noise and scratch removal, texture rendering, classiffication and segmentation.}, + file = {Ginn00b.pdf:pdf\\Ginn00b.pdf:PDF}, + gsid = {14802806611808806892}, + optnote = {DIAG, RADIOLOGY}, + month = {6}, + gscites = {17}, +} + +@inproceedings{Ginn01, + author = {B. van Ginneken}, + title = {Towards a clinical chest workstation}, + booktitle = {Proceedings of VISIM}, + file = {:pdf\\Ginn01.pdf:PDF}, + year = {2001}, + abstract = {Finding similar images, or reference cases, is one way to aid a radiologist during daily clinical practice. Attracting his attention to possible sites of lesions is another one, and directly providing a diagnosis would be the most useful, but in most cases unattainable form of assistance. However, all of these types of support require a set of computer analysis methods that needs to be developed or adapted for each clinical problem. In this work I outline a set of methods for automatic analysis of standard chest radiographs. The focus is on texture analysis and the method is applied to mass chest screening for tuberculosis and to the detection interstitial disease. I discuss various ways to use this method in practice, as part of a clinical chest workstation and hint on the role of CBIR in such workstations.}, + optnote = {DIAG, RADIOLOGY}, +} + +@phdthesis{Ginn01a, + author = {B. van Ginneken}, + title = {Computer-aided diagnosis in chest radiography}, + year = {2001}, + url = {http://igitur-archive.library.uu.nl/dissertations/1954192/UUindex.html}, + abstract = {Chest radiographs account for more than half of all radiological examinations; the chest is the mirror of health and disease. {T}his thesis is about techniques for computer analysis of chest radiographs. {I}t describes methods for texture analysis and segmenting the lung fields and rib cage in a chest film. {I}t includes a description of an automatic system for detecting regions with abnormal texture, that is applied to a database of images from a tuberculosis screening program.}, + copromotor = {B. M. ter Haar Romeny}, + file = {Ginn01a.pdf:pdf\\Ginn01a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + promotor = {M. A. Viergever}, + school = {Utrecht University}, + month = {6}, + gsid = {3030326376977132859}, + gscites = {51}, + journal = {PhD thesis}, + all_ss_ids = {['febeaee15cc4cdf91f445ce50e7b59160e90302b']}, +} + +@inproceedings{Ginn01b, + author = {B. van Ginneken and A. F. Frangi and J. J. Staal and B. M. ter Haar Romeny and M. A. Viergever}, + title = {A non-linear gray-level appearance model improves active shape model segmentation}, + booktitle = {IEEE Workshop on Mathematical Models in Biomedical Image Analysis}, + year = {2001}, + pages = {205-212}, + doi = {10.1109/MMBIA.2001.991735}, + abstract = {{A}ctive {S}hape {M}odels ({ASM}s), a knowledge-based segmentation algorithm developed by {C}ootes and {T}aylor, have become a standard and popular method for detecting structures in medical images. {I}n {ASM}s - and various comparable approaches - the model of the object's shape and of its gray-level variations is based the assumption of linear distributions. {I}n this work, we explore a new way to model the gray-level appearance of the objects, using a k-nearest-neighbors (k{NN}) classifier and a set of selected features for each location and resolution of the {A}ctive {S}hape {M}odel. {T}he construction of the k{NN} classifier and the se-lection of features from training images is fully automatic. {W}e compare our approach with the standard {ASM}s on synthetic data and in four medical segmentation tasks. {I}n all cases, the new method produces significantly better results (p < 0.001).}, + file = {Ginn01b.pdf:pdf\\Ginn01b.pdf:PDF}, + gsid = {9058761510841848250}, + optnote = {DIAG, RADIOLOGY}, + gscites = {52}, + ss_id = {793cc33a9f29953685ca942d513e82870e9ccd13}, + all_ss_ids = {['793cc33a9f29953685ca942d513e82870e9ccd13']}, +} + +@article{Ginn01c, + author = {van Ginneken, B. and ter Haar Romeny, B. M. and Viergever, M. A.}, + title = {Computer-aided diagnosis in chest radiography: a survey}, + journal = TMI, + year = {2001}, + volume = {20}, + pages = {1228--1241}, + doi = {10.1109/42.974918}, + abstract = {{T}he traditional chest radiograph is still ubiquitous in clinical practice, and will likely remain so for quite some time. {Y}et, its interpretation is notoriously difficult. {T}his explains the continued interest in computer-aided diagnosis for chest radiography. {T}he purpose of this survey is to categorize and briefly review the literature on computer analysis of chest images, which comprises over 150 papers published in the last 30 years. {R}emaining challenges are indicated and some directions for future research are given.}, + file = {Ginn01c.pdf:pdf\\Ginn01c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {12}, + pmid = {11811823}, + gsid = {6827224050908194764}, + gscites = {597}, + ss_id = {ceddfad717e3e521d337f35904a1c78c1d75c490}, + all_ss_ids = {['ceddfad717e3e521d337f35904a1c78c1d75c490']}, +} + +@article{Ginn02, + author = {B. van Ginneken and A. F. Frangi and J. J. Staal and B. M. ter Haar Romeny and M. A. Viergever}, + title = {Active shape model segmentation with optimal features}, + journal = TMI, + year = {2002}, + volume = {21}, + pages = {924--933}, + doi = {10.1109/TMI.2002.803121}, + abstract = {{A}n active shape model segmentation scheme is presented that is steered by optimal local features, contrary to normalized first order derivative profiles, as in the original formulation [{C}ootes and {T}aylor, 1995, 1999, and 2001]. {A} nonlinear k{NN}-classifier is used, instead of the linear {M}ahalanobis distance, to find optimal displacements for landmarks. {F}or each of the landmarks that describe the shape, at each resolution level taken into account during the segmentation optimization procedure, a distinct set of optimal features is determined. {T}he selection of features is automatic, using the training images and sequential feature forward and backward selection. {T}he new approach is tested on synthetic data and in four medical segmentation tasks: segmenting the right and left lung fields in a database of 230 chest radiographs, and segmenting the cerebellum and corpus callosum in a database of 90 slices from {MRI} brain images. {I}n all cases, the new method produces significantly better results in terms of an overlap error measure (p < 0.001 using a paired {T}-test) than the original active shape model scheme.}, + file = {Ginn02.pdf:pdf\\Ginn02.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {8}, + pmid = {12472265}, + month = {8}, + gsid = {565682893025109187}, + gscites = {652}, + ss_id = {99840ed7cc49c379273099b57bf58f55f5274e64}, + all_ss_ids = {['99840ed7cc49c379273099b57bf58f55f5274e64']}, +} + +@article{Ginn02a, + author = {van Ginneken, B. and Katsuragawa, S. and ter Haar Romeny, B. M. and Doi, K. and Viergever, M. A.}, + title = {Automatic detection of abnormalities in chest radiographs using local texture analysis}, + journal = TMI, + year = {2002}, + volume = {21}, + pages = {139--149}, + doi = {10.1109/42.993132}, + abstract = {{A} fully automatic method is presented to detect abnormalities in frontal chest radiographs which are aggregated into an overall abnormality score. {T}he method is aimed at finding abnormal signs of a diffuse textural nature, such as they are encountered in mass chest screening against tuberculosis ({TB}). {T}he scheme starts with automatic segmentation of the lung fields, using active shape models. {T}he segmentation is used to subdivide the lung fields into overlapping regions of various sizes. {T}exture features are extracted from each region, using the moments of responses to a multiscale filter bank. {A}dditional "difference features" are obtained by subtracting feature vectors from corresponding regions in the left and right lung fields. {A} separate training set is constructed for each region. {A}ll regions are classified by voting among the k nearest neighbors, with leave-one-out. {N}ext, the classification results of each region are combined, using a weighted multiplier in which regions with higher classification reliability weigh more heavily. {T}his produces an abnormality score for each image. {T}he method is evaluated on two databases. {T}he first database was collected from a {TB} mass chest screening program, from which 147 images with textural abnormalities and 241 normal images were selected. {A}lthough this database contains many subtle abnormalities, the classification has a sensitivity of 0.86 at a specificity of 0.50 and an area under the receiver operating characteristic ({ROC}) curve of 0.820. {T}he second database consist of 100 normal images and 100 abnormal images with interstitial disease. {F}or this database, the results were a sensitivity of 0.97 at a specificity of 0.90 and an area under the {ROC} curve of 0.986.}, + file = {Ginn02a.pdf:pdf\\Ginn02a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + number = {2}, + pmid = {11929101}, + gsid = {5477064095233485439,10626615429567403174}, + gscites = {262}, + ss_id = {cdc58c3da5a84aa0cb1bc0d38d35c094c3e34579}, + all_ss_ids = {['cdc58c3da5a84aa0cb1bc0d38d35c094c3e34579']}, +} + +@inproceedings{Ginn02b, + author = {B. van Ginneken and B. M. ter Haar Romeny and M. A. Viergever}, + title = {Automatic segmentation and texture analysis of {PA} chest radiographs to detect abnormalities related to interstitial disease and tuberculosis}, + booktitle = CARS, + year = {2002}, + pages = {685-688}, + doi = {10.1007/978-3-642-56168-9_114}, + optnote = {DIAG, RADIOLOGY, TB}, + gsid = {902421355768523937}, + gscites = {3}, +} + +@article{Ginn03, + author = {B. van Ginneken}, + title = {Computerized detection of interstitial disease in chest radiographs}, + journal = MIT, + year = {2003}, + abstract = {In this article the elements that constitute a computer-aided diagnosis system for the detection of interstitial disease in chest radiographs are described. First, because of the diffuse nature of interstitial abnormalities, a method for texture analysis is usually involved. A second element is the classifier: In the classification stage, one has to choose between multiclass or one-class classification. A third aspect is the selection of regions of interest for analysis, which is linked to the segmentation problem. Finally, there are several ways to combine the results of analyses of ROIs into a final diagnosis of the complete image in several ways. Results from the approach I have adopted will be presented and some directions for future research are given.}, + volume = {21}, + pages = {15-20}, + optnote = {DIAG, RADIOLOGY}, + doi = {10.11409/mit.21.15}, + file = {pdf\\Ginn03.pdf:PDF}, +} + +@inproceedings{Ginn03a, + author = {B. van Ginneken and M. de Bruijne and M. Loog and M. A. Viergever}, + title = {Interactive {S}hape {M}odels}, + booktitle = MI, + year = {2003}, + volume = {5032}, + series = SPIE, + pages = {1206--1216}, + doi = {10.1117/12.480165}, + abstract = {{S}upervised segmentation methods in which a model of the shape of an object and its gray-level appearance is used to segment new images have become popular techniques in medical image segmentation. {H}owever, the results of these methods are not always accurate enough. {W}e show how to extend one of these segmentation methods, active shape models ({ASM}) so that user interaction can be incorporated. {I}n this interactive shape model (i{ASM}), a user drags points to their correct position thus guiding the segmentation process. {E}xperiments for three medical segmentation tasks are presented: segmenting lung fields in chest radiographs, hand outlines in hand radiographs and thrombus in abdominal aorta aneurysms from {CTA} data. {B}y only fixing a small number of points, the part of sufficiently accurate segmentations can be increased from 20-70% for no interaction to over 95%. {W}e believe that i{ASM} can be used in many clinical applications.}, + file = {Ginn03a.pdf:pdf\\Ginn03a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {5452437485381444577}, + gscites = {34}, + ss_id = {3d2b3cfbd2d98c33a3667c5f13b12344353582f8}, + all_ss_ids = {['3d2b3cfbd2d98c33a3667c5f13b12344353582f8']}, +} + +@article{Ginn03b, + author = {B. van Ginneken and B. M. ter Haar Romeny}, + title = {Multi-scale texture classification from generalized locally orderless images}, + journal = PR, + year = {2003}, + volume = {36}, + pages = {899-911}, + doi = {10.1016/S0031-3203(02)00118-8}, + abstract = {{L}ocally orderless images are families of three intertwined scale-spaces that describe local histograms. {W}e generalize locally orderless images by considering local histograms of a collection of filtered versions of the image, and by extending them to joint probability distributions. {T}hese constructions can be used to derive texture features and are shown to be a more general description of two established texture classification methods, viz. filter bank methods and cooccurrence matrices. {B}ecause all scale parameters are stated explicitly in this formulation, multi-resolution feature sets can be extracted in a systematic way. {T}his includes new types of multi-resolution analysis, not only based on the spatial scale, but on the window size and intensity scale as well. {E}ach multi-resolution approach improves texture classification performance, the best result being obtained if a multi-resolution approach for all scale parameters is used. {T}his is demonstrated in experiments on a large data set of 1152 images for 72 texture classes.}, + file = {Ginn03b.pdf:pdf/Ginn03b.pdf:PDF}, + gsid = {3590992795902461226}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + gscites = {33}, + ss_id = {631209b8421b31dc7911d58a324a8afa4cb29dcd}, + all_ss_ids = {['631209b8421b31dc7911d58a324a8afa4cb29dcd']}, +} + +@inproceedings{Ginn04, + author = {B. van Ginneken and M. Loog}, + title = {Pixel {P}osition {R}egression - {A}pplication to medical image segmentation}, + booktitle = ICPR, + year = {2004}, + pages = {718--721}, + doi = {10.1109/ICPR.2004.1334629}, + abstract = {{P}ixel position regression ({PPR}), an automatic supervised method for image segmentation, is presented. {T}he method uses a set of corresponding points indicated in each train image. {F}or each point in this set, the mean position in all train images is determined. {B}y warping the set of corresponding points to their mean positions, one can associate with each position in each train image a reference position. {PPR} estimates the reference position from a rich set of local image features through k-nearest-neighbor regression. {T}he deformation field thus obtained determines the segmentation. {I}t is demonstrated that the deformation field estimate can be improved by (weighted) blurring and more sophisticated methods such as global modeling of the deformation field through principal component analysis and iterated regression. {T}he method is evaluated on a set of chest radiographs in which the lung fields, heart and clavicles are segmented.}, + file = {Ginn04.pdf:pdf\\Ginn04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {2680329712311893870}, + gscites = {11}, + ss_id = {5213634acf39625658b52e14b18b2fcc322044c6}, + all_ss_ids = {['5213634acf39625658b52e14b18b2fcc322044c6']}, +} + +@conference{Ginn05, + author = {B. van Ginneken and A. M. R. Schilham and H. Gietema and M. Prokop}, + title = {Automatic correction for the influence of inspiration on pulmonary emphysema quantification from multidetector {CT} scans}, + booktitle = RSNA, + year = {2005}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Ginn05a, + author = {B. van Ginneken and I. C. Sluimer and M. Prokop and I. Hartmann}, + title = {Automatic detection of the presence of hyperlucencies, fibrosis, ground-glass, consolidations, and focal abnormalities in high resolution {CT (HRCT)} lung data}, + booktitle = RSNA, + year = {2005}, + pages = {308}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Ginn06, + author = {B. van Ginneken}, + title = {Supervised probabilistic segmentation of pulmonary nodules in {CT} scans}, + booktitle = MICCAI, + year = {2006}, + volume = {4191}, + series = LNCS, + pages = {912-919}, + doi = {10.1007/11866763_112}, + abstract = {{A}n automatic method for lung nodule segmentation from computed tomography ({CT}) data is presented that is different from previous work in several respects. {F}irstly, it is supervised; it learns how to obtain a reliable segmentation from examples in a training phase. {S}econdly, the method provides a soft, or probabilistic segmentation, thus taking into account the uncertainty inherent in this segmentation task. {T}he method is trained and tested on a public data set of 23 nodules for which soft labelings are available. {T}he new method is shown to outperform a previously published conventional method. {B}y merely changing the training data, non-solid nodules can also be segmented.}, + file = {Ginn06.pdf:pdf\\Ginn06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {17354860}, + gsid = {11048173155976316259}, + gscites = {42}, +} + +@inproceedings{Ginn06a, + author = {van Ginneken, B. and Mendrik, A.}, + title = {Image {D}enoising with k-nearest {N}eighbor and {S}upport {V}ector {R}egression}, + booktitle = ICPR, + year = {2006}, + volume = {3}, + pages = {603--606}, + doi = {10.1109/ICPR.2006.685}, + abstract = {{D}enoising is an important application of image processing, especially for medical image data. {T}hese images tend to be very noisy when a low radiation dose, less harmful to the patient, is used for acquisition. {F}or computed tomography ({CT}) data, it is possible to simulate realistic low dose images from the raw scanner data. {W}e use this data to construct a supervised denoising system, that learns an optimal mapping from input features to denoised voxel values. {A}s input features we use several general filters and the output of existing standard noise reduction filters, notably non-linear diffusion schemes. {A}fter feature selection, these are mapped to the denoised values by k-nearest neighbor and support vector regression. {T}he resulting regression denoising systems are shown to perform significantly better than non-linear diffusion schemes, {G}aussian smoothing and median filtering in experiments on {CT} chest scans.}, + file = {Ginn06a.pdf:pdf\\Ginn06a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {6718720819333291168}, + gscites = {15}, + ss_id = {eb2b4becf4429230504117b0654db1b7760695d8}, + all_ss_ids = {['eb2b4becf4429230504117b0654db1b7760695d8']}, +} + +@article{Ginn06b, + author = {B. van Ginneken and M. B. Stegmann and M. Loog}, + title = {Segmentation of anatomical structures in chest radiographs using supervised methods: a comparative study on a public database}, + journal = MIA, + year = {2006}, + volume = {10}, + pages = {19-40}, + doi = {10.1016/j.media.2005.02.002}, + abstract = {{T}he task of segmenting the lung fields, the heart, and the clavicles in standard posterior-anterior chest radiographs is considered. {T}hree supervised segmentation methods are compared: active shape models, active appearance models and a multi-resolution pixel classification method that employs a multi-scale filter bank of {G}aussian derivatives and a k-nearest-neighbors classifier. {T}he methods have been tested on a publicly available database of 247 chest radiographs, in which all objects have been manually segmented by two human observers. {A} parameter optimization for active shape models is presented, and it is shown that this optimization improves performance significantly. {I}t is demonstrated that the standard active appearance model scheme performs poorly, but large improvements can be obtained by including areas outside the objects into the model. {F}or lung field segmentation, all methods perform well, with pixel classification giving the best results: a paired t-test showed no significant performance difference between pixel classification and an independent human observer. {F}or heart segmentation, all methods perform comparably, but significantly worse than a human observer. {C}lavicle segmentation is a hard problem for all methods; best results are obtained with active shape models, but human performance is substantially better. {I}n addition, several hybrid systems are investigated. {F}or heart segmentation, where the separate systems perform comparably, significantly better performance can be obtained by combining the results with majority voting. {A}s an application, the cardio-thoracic ratio is computed automatically from the segmentation results. {B}land and {A}ltman plots indicate that all methods perform well when compared to the gold standard, with confidence intervals from pixel classification and active appearance modeling very close to those of a human observer. {A}ll results, including the manual segmentations, have been made publicly available to facilitate future comparative studies.}, + file = {Ginn06b.pdf:pdf\\Ginn06b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {15919232}, + month = {2}, + gsid = {13219292505695896632}, + gscites = {410}, +} + +@conference{Ginn06c, + author = {B. van Ginneken and M. A. J. Klik and E. M. van Rikxoort and H. Gietema and J. Peters and M. Prokop}, + title = {Computer-aided diagnosis for distinguishing benign perifissural opacities from other pulmonary nodules in computed tomography chest scans}, + booktitle = RSNA, + year = {2006}, + pages = {598}, + abstract = {Purpose: Around a third of pulmonary nodules detected with CT lung cancer screening programs are perifissural opacities (PFOs). These nodules are flattened and attached to a pulmonary fissure and represent benign findings. A computer-aided diagnosis system is presented that automatically determines if a nodule is a PFO. Method: Data was obtained from a lung cancer screening program with low dose multidetector CT (Philips Mx8000IDT, 16 x 0.75 mm collimation, 30 mAs). Only nodules with a volume between 50 and 500 mm3 were considered. Scans from 221 patients containing at least one such nodule were randomly selected, resulting in a set of 284 nodules. A radiologist determined which of these findings represented PFOs. Around each nodule a volume of interest (VOI) of 60 x 60 x 60 mm was extracted. An automatic algorithm segmented all nodules. Another automatic algorithm detected voxels belonging to fissures in the VOI, using a plate detector based on density values and the directions of principal curvature and a grouping algorithm to remove false isolated responses. Hough transforms were applied to the fissure voxels and the nodule boundary voxels to determine if the nodule had a flattened side that coincided with an attached fissure. From this analysis a number of numerical features were extracted. In addition, a number of features describing the shape of the nodule were computed. A Parzen density classifier was used to infer the probability that a nodule was a PFO from these features. Cross validation was used to train and test the system. Results: From the 284 nodules, 99 (35%) were identified as PFOs by the radiologist. The system obtained an area under the ROC curve of 0.80. It could detect 40% of all PFOs without any false positive finding and 65% at 95% specificity. PFOs that were not well detected were usually attached to fissures that were barely visible in the low dose data. Conclusion: Computer-aided diagnosis can be used to identify a large amount of benign nodules from chest CT data. This can be used in the work-up of patients with pulmonary nodules and may prevent unnecessary repeat examinations.}, + file = {Ginn06c.pdf:pdf\\Ginn06c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Ginn07, + author = {B. van Ginneken and T. Heimann and M. Styner}, + title = {{3D} {S}egmentation in the {C}linic: {A} {G}rand {C}hallenge}, + booktitle = {3D Segmentation in the Clinic: A Grand Challenge}, + year = {2007}, + pages = {7-15}, + abstract = {{T}his paper describes the set-up of a segmentation competition for automatic and semi-automatic extraction of the liver from computed tomography scans and the caudate nucleus from brain {MRI} data. {T}his competition was held in the form of a workshop at the 2007 {M}edical {I}mage {C}omputing and {C}omputer {A}ssisted {I}ntervention conference. {T}he rationale for organizing the competition is discussed, the training and test data sets for both segmentation tasks are described and the scoring system used to evaluate the segmentation is presented.}, + file = {Ginn07.pdf:pdf\\Ginn07.pdf:PDF}, + gsid = {16549881996732767718}, + optnote = {DIAG, RADIOLOGY}, +} + +@book{Ginn07a, + author = {T. Heimann and M. Styner and B. van Ginneken}, + title = {{3D} Segmentation in the Clinic: A Grand Challenge}, + year = {2007}, + publisher = {MICCAI}, + optnote = {DIAG, RADIOLOGY}, + gsid = {16549881996732767718,13002973049163846939,11855423073604549115}, + gscites = {190}, +} + +@inproceedings{Ginn08, + author = {B. van Ginneken and W. Baggerman and E. M. van Rikxoort}, + title = {Robust segmentation and anatomical labeling of the airway tree from thoracic {CT} scans}, + booktitle = MICCAI, + year = {2008}, + volume = {5241}, + series = LNCS, + pages = {219--226}, + doi = {10.1007/978-3-540-85988-8_27}, + abstract = {{A} method for automatic extraction and labeling of the airway tree from thoracic {CT} scans is presented and extensively evaluated on 150 scans of clinical dose, low dose and ultra-low dose data, in inspiration and expiration from both relatively healthy and severely ill patients. {T}he method uses adaptive thresholds while growing the airways and it is shown that this strategy leads to a substantial increase in the number, total length and number of correctly labeled airways extracted. {F}rom inspiration scans on average 170 branches are found, from expiration scans 59.}, + file = {Ginn08.pdf:pdf\\Ginn08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {18979751}, + gsid = {11381407062701827735}, + gscites = {95}, + ss_id = {d38f3d596081bef8ffe67e92d112e0633d0be0fe}, + all_ss_ids = {['d38f3d596081bef8ffe67e92d112e0633d0be0fe']}, +} + +@article{Ginn08a, + author = {B. van Ginneken}, + title = {Computer-{A}ided {D}iagnosis in {T}horacic {C}omputed {T}omography}, + journal = IDMRI, + year = {2008}, + volume = {12}, + pages = {11-22}, + doi = {10.1111/j.1617-0830.2009.00129.x}, + abstract = {{C}urrent computed tomography ({CT}) technology allows for isotropic, sub-millimetre resolution acquisition of the thorax in a few seconds. {T}hese thin-slice chest scans have become indispensable in thoracic radiology, but have also increased the time and effort required from radiologists for reporting. {I}ndustry has focused on the development of computer-aided diagnosis ({CAD}) tools to facilitate the interpretation of thoracic {CT} data. {I}n this paper {I} discuss the three most ?senior? {CAD} applications for chest {CT}: nodule detection, nodule volumetry and quantification of emphysema. {A}re these applications ready for widespread application?}, + file = {Ginn08a.pdf:pdf\\Ginn08a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + month = {9}, + gsid = {282439729250169033}, + gscites = {16}, + ss_id = {0c4d24dadb96b397af79b4d6ce7087548c797d72}, + all_ss_ids = {['0c4d24dadb96b397af79b4d6ce7087548c797d72']}, +} + +@inproceedings{Ginn08b, + author = {B. van Ginneken and A. Tan and K. Murphy and B. J. de Hoop and M. Prokop}, + title = {Automated detection of nodules attached to the pleural and mediastinal surface in low-dose {CT} scans}, + booktitle = MI, + year = {2008}, + volume = {6915}, + series = SPIE, + pages = {69150X1--69150X10}, + doi = {10.1117/12.772298}, + abstract = {{T}his paper presents a new computer-aided detection scheme for lung nodules attached to the pleural or mediastinal surface in low dose {CT} scans. {F}irst the lungs are automatically segmented and smoothed. {A}ny connected set of voxels attached to the wall - with each voxel above minus 500 {HU} and the total object within a specified volume range - was considered a candidate finding. {F}or each candidate, a refined segmentation was computed using morphological operators to remove attached structures. {F}or each candidate, 35 features were defined, based on their position in the lung and relative to other structures, and the shape and density within and around each candidate. {I}n a training procedure an optimal set of 15 features was determined with a k-nearest-neighbor classifier and sequential floating forward feature selection. {T}he algorithm was trained with a data set of 708 scans from a lung cancer screening study containing 224 pleural nodules and tested on an independent test set of 226 scans from the same program with 58 pleural nodules. {T}he algorithm achieved a sensitivity of 52% with an average of 0.76 false positives per scan. {A}t 2.5 false positive marks per scan, the sensitivity increased to 80%.}, + file = {Ginn08b.pdf:pdf\\Ginn08b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, noduleDetectionCT}, + month = {3}, + ss_id = {1a0ce62f3c2a0c435c4493e3b287d9c481aef1c7}, + all_ss_ids = {['1a0ce62f3c2a0c435c4493e3b287d9c481aef1c7']}, + gscites = {2}, +} + +@conference{Ginn08c, + author = {B. van Ginneken and B. de Hoop and M. Prokop}, + title = {Automatic {E}stimation of {T}hree-{D}imensional {L}ung {V}olume from {P}osterior-{A}nterior and {L}ateral {C}hest {R}adiographs}, + booktitle = RSNA, + year = {2008}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Ginn08d, + author = {B. van Ginneken and B. de Hoop and E. M. van Rikxoort and K. Murphy and M. Prokop}, + title = {Relating {A}irway {M}orphology in {H}eavy {S}mokers to {P}ulmonary {F}unction {I}mpairment}, + booktitle = RSNA, + year = {2008}, + abstract = {PURPOSE We hypothesize that in some patients chronic obstructive pulmonary disease reveals itself in thoracic CT scans not only through the presence of emphysematous regions but also as visible changes in airway morphology. This study relates automatically extracted measures of airway morphology to lung function impairment in heavy smokers. METHOD AND MATERIALS As part of a lung cancer screening trial, 508 male current and former heavy smokers underwent low-dose CT (16x0.75mm, 120-140 kVp, 30 mAs) at full inspiration, ultra low-dose CT (90 kVp, 20 mAs) at full expiration and pulmonary function testing on the same day. In house developed software extracted and labeled the bronchial tree in both inspiration and expiration scans automatically. In each scan, the total number of extracted airway branches per generation, their lengths and average diameters were computed for the 32 central bronchi individually (up to the segmental level) and for the peripheral bronchi (beyond this level and up to 16th bronchial generation) pooled per generation. Ratios between measurements at inspiration and expiration were determined as well. Linear discriminant analysis was used to detect lung function impairment (defined as FEV1/FVC<0.7) from these airway measurements. Results are reported as sensitivity at a specificity level of 90%. RESULTS Lung function impairment was seen in 181 individuals (36%). It was mild (FEV1 > 80% of predicted value) in 61% and severe (FEV1 < 50%) in less than 7% of these cases. The linear discriminate analysis using all automated airway measurements was able to identify 47% of these 181 individuals. Sensitivity dropped to 40% and 20%, respectively if only measurements from inspiration or expirations scans were used. When only central or peripheral airways were evaluated, sensitivity was reduced to 36% and 38%, respectively. CONCLUSION A substantial percentage of heavy smokers with mildly impaired lung function display abnormal airway morphology that can be detected automatically from CT data. CLINICAL RELEVANCE/APPLICATION The results contribute to the knowledge about COPD. Quantitative measures of airway morphology, automatically extracted from CT data, may become a biomarker for severity and progression of COPD.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Ginn08e, + author = {B. van Ginneken and E. M. van Rikxoort and W. Baggerman and B. de Hoop and M. Prokop}, + title = {Automatic {E}xtraction and {A}natomical {L}abeling of the {A}irway {T}ree from {I}nspiration and {E}xpiration {T}horacic {CT} {S}cans}, + booktitle = RSNA, + year = {2008}, + abstract = {PURPOSE To develop and validate a robust technique for automatic extraction and labeling of the airway tree from thoracic CT scans. METHOD AND MATERIALS Three sets of 50 scans were used. For set 1, 50 heavy smokers underwent low-dose CT (16x0.75mm, 30 mAs, 120-140 kVp, full inspiration) for a lung cancer screening trial. For set 2, the same 50 subjects were scanned again in full expiration at ultra low-dose (20 mAs, 90 kVp). Set 3 comprised 50 scans, many including gross pulmonary opacifications, from patients with interstitial lung disease. These were acquired at clinical dose (120-170 mAs, 120 kVp), in full inspiration, with and without contrast material. The method starts in the trachea to grow the airways, using locally adaptive criteria for accepting voxels as part of a bronchus. While growing, the branch centerline, orientation and diameter are tracked and bifurcations are detected. A rule set based on diameter and orientation of the current segment and its parent determines if a potential segment is accepted, or discarded as leakage. When extraction is completed, the 32 central bronchi are labeled automatically based on parent-child relationships. Incorrect segments were visually identified. The number, generation and total length of extracted airways was evaluated. RESULTS The method required around 20 seconds of computation time and segmentations contained almost no false positive segments (<1%). In Set 1 and 3 on average 166 and 174 branches were extracted with an average total length of 2183 and 1949 mm. For the noisy expiration data in Set 2 this was substantially less: 59 segments and 789 mm. After the 6th generation, on average less segments were extracted but usually the tree included some segments up to the 10th generation. Central bronchi were found in 93% of all cases. CONCLUSION Fast and automatic extraction of airway tree including most central bronchi and many peripheral bronchi is feasible in inspiration CT scans, but challenging in expiration data. CLINICAL RELEVANCE/APPLICATION Quantitative descriptors of airway morphology are essential to measure progression of lung diseases such as COPD, asthma, CF, ILD. This requires robust automatic extraction of the airway tree.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Ginn09a, + author = {B. van Ginneken and L. Hogeweg and M. Prokop}, + title = {Computer-aided diagnosis in chest radiography: beyond nodules}, + journal = EJR, + year = {2009}, + volume = {72}, + pages = {226--230}, + doi = {10.1016/j.ejrad.2009.05.061}, + abstract = {{C}hest radiographs are the most common exam in radiology. {T}hey are essential for the management of various diseases associated with high mortality and morbidity and display a wide range of findings, many of them subtle. {I}n this survey we identify a number of areas beyond pulmonary nodules that could benefit from computer-aided detection and diagnosis ({CAD}) in chest radiography. {T}hese include interstitial infiltrates, catheter tip detection, size measurements, detection of pneumothorax and detection and quantification of emphysema. {R}ecent work in these areas is surveyed, but we conclude that the amount of research devoted to these topics is modest. {R}easons for the slow pace of {CAD} development in chest radiography beyond nodules are discussed.}, + file = {Ginn09a.pdf:pdf\\Ginn09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {19604661}, + month = {11}, + gsid = {4166719588344969996}, + gscites = {97}, + ss_id = {1bdcdb8e63f995041451642c53c434face9033f7}, + all_ss_ids = {['1bdcdb8e63f995041451642c53c434face9033f7']}, +} + +@conference{Ginn09b, + author = {B. van Ginneken and K. Murphy and E. M. van Rikxoort and I. I{\v{s}}gum and B. de Hoop and M. Prokop and P. A. de Jong and H. Gietema}, + title = {Quantification of {E}mphysema and {S}mall {A}irway {D}isease in {COPD} {P}atients from {L}obar {A}nalysis of {V}olumetric {I}nspiration and {E}xpiration {T}horacic {CT} {S}cans}, + booktitle = RSNA, + year = {2009}, + abstract = {PURPOSE In COPD patients, airflow obstruction can be caused by emphysema and/or small airways disease, leading to air trapping. We quantified both underlying causes of COPD from lobar analysis of paired inspiration-expiration CT scans. METHOD AND MATERIALS A database was collected of 231 patients and heavy smokers with GOLD stages 0 (at risk) to 4 who received volumetric inspiration CT (16x0.75mm, 120-140 kVp, 30-160 mAs), ultra low dose volumetric expiration CT (90 kVp, 20 mAs) and pulmonary function testing on the same day. In house developed software automatically extracted the five pulmonary lobes in all scans. Cases where segmentation failed in one scan in a pair were excluded. As a measure for emphysema, the percentages of lobar volume below -950 HU were computed in inspiration scans (IN-950). As a quantification of either emphysema or air-trapping, the percentages of lobar volume below -850 HU were computed in expiration scans (EX-850). A lobe was considered to be affected by emphysema if IN-950 was larger than the 95th percentile for that lobe in GOLD 0 subjects. Otherwise, the lobe was considered to be affected by air-trapping if EX-850 was larger than the 95th percentile for that lobe in GOLD 0 subjects. Otherwise, the lobe was considered normal. If four or five lobes were affected by emphysema, airtrapping or were normal, that subject was classified as emphysema dominant (ED), air trapping dominant (ATD) or normal; otherwise, the COPD phenotype was considered 'mixed'. RESULTS 213 pairs were available for analysis with GOLD stages 0-4: 49, 49, 50, 40, 25, respectively. The occurrence of normal, ED, ATD and mixed phenotypes for GOLD 0 subjects was 98%, 2% 0%, and 6%. For GOLD 1: 69%, 4%, 0%, 27%. For GOLD 2: 36%, 20%, 14%, 30%. For GOLD 3: 5%, 47%, 15%, 33%. For GOLD 4: 0%, 84%, 4%, 12%. CONCLUSION Lobar analysis of inspiration and expiration CT scan revealed different patterns in COPD patients. For the lower GOLD stages (1-3), a substantial subgroup of COPD patients have predominantly small airway disease, and a mixed pattern with emphysema and air trapping affecting different lobes is also a common finding. COPD patients in GOLD category 4 usually have four or all lobes affected by emphysema. CLINICAL RELEVANCE/APPLICATION This study indicates that the combination of inspiration and expiration CT can be used to identify COPD patients with predominantly small airway disease, for whom drug treatment could be effective.}, + optnote = {DIAG, RADIOLOGY}, + gsid = {2335580216503493786}, + gscites = {3}, +} + +@inproceedings{Ginn10, + author = {B. van Ginneken}, + title = {Computer-aided diagnosis in chest imaging: how to improve performance and avoid reinventing the wheel}, + booktitle = ISBI, + year = {2010}, + pages = {274}, + doi = {10.1109/ISBI.2010.5490361}, + file = {Ginn10.pdf:pdf\\Ginn10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {2207935246817237524}, + gscites = {2}, +} + +@article{Ginn10a, + author = {B. van Ginneken and S. G. Armato and B. de Hoop and S. van de Vorst and T. Duindam and M. Niemeijer and K. Murphy and A. M. R. Schilham and A. Retico and M. E. Fantacci and N. Camarlinghi and F. Bagagli and I. Gori and T. Hara and H. Fujita and G. Gargano and R. Belloti and F. De Carlo and R. Megna and S. Tangaro and L. Bolanos and P. Cerello and S. C. Cheran and E. Lopez Torres and M. Prokop}, + title = {Comparing and combining algorithms for computer-aided detection of pulmonary nodules in computed tomography scans: the {ANODE09} study}, + journal = MIA, + year = {2010}, + volume = {14}, + pages = {707-722}, + doi = {10.1016/j.media.2010.05.005}, + abstract = {{N}umerous publications and commercial systems are available that deal with automatic detection of pulmonary nodules in thoracic computed tomography scans, but a comparative study where many systems are applied to the same data set has not yet been performed. {T}his paper introduces {ANODE}09 (http://anode09.isi.uu.nl), a database of 55 scans from a lung cancer screening program and a web-based framework for objective evaluation of nodule detection algorithms. {A}ny team can upload results to facilitate benchmarking. {T}he performance of six algorithms for which results are available are compared; five from academic groups and one commercially available system. {A} method to combine the output of multiple systems is proposed. {R}esults show a substantial performance difference between algorithms, and demonstrate that combining the output of algorithms leads to marked performance improvements.}, + file = {Ginn10a.pdf:pdf\\Ginn10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, noduleDetectionCT}, + pmid = {20573538}, + month = {12}, + gsid = {16898155382402394650}, + gscites = {287}, + ss_id = {39ae90f765547171cc6163ba3ab2e977c86c03d3}, + all_ss_ids = {['39ae90f765547171cc6163ba3ab2e977c86c03d3']}, +} + +@book{Ginn10b, + author = {B. van Ginneken and K. Murphy and T. Heimann and V. Pekar and X. Deng}, + title = {Medical {I}mage {A}nalysis for the {C}linic: a {G}rand {C}hallenge}, + year = {2010}, + publisher = {CreateSpace}, + url = {http://www.amazon.com/Medical-Image-Analysis-Clinic-Challenge/dp/1453759395/}, + abstract = {This book contains the proceedings of Medical Image Analysis for the Clinic - A Grand Challenge, a workshop that was part of MICCAI 2010, the 13th International Conference on Medical Image Computing and Computer Assisted Intervention, and was held in Beijing, China, on September 24, 2010. This is the fourth edition of the Grand Challenges in Medical Image Analysis workshops held during MICCAI since 2007. The proceedings contain papers on three challenges: EMPIRE10 (Evaluation of Methods for Pulmonary Image Registration 2010, http://empire10.isi.uu.nl); SKI10 (Segmentation of Knee Images 2010, http://www.ski10.org); Head & Neck Auto-segmentation Challenge 2010: Segmentation of the Parotid Glands (http://www.grand-challenge2010.ca/)}, + file = {Ginn10b.pdf:pdf\\Ginn10b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Ginn11, + author = {van Ginneken, B. and Schaefer-Prokop, C. M. and Prokop, M.}, + title = {Computer-aided Diagnosis: how to Move from the Laboratory to the Clinic}, + journal = Radiology, + year = {2011}, + volume = {261}, + number = {3}, + pages = {719--732}, + doi = {10.1148/radiol.11091710}, + abstract = {Computer-aided diagnosis (CAD), encompassing computer-aided detection and quantification, is an established and rapidly growing field of research. In daily practice, however, most radiologists do not yet use CAD routinely. This article discusses how to move CAD from the laboratory to the clinic. The authors review the principles of CAD for lesion detection and for quantification and illustrate the state-of-the-art with various examples. The requirements that radiologists have for CAD are discussed: sufficient performance, no increase in reading time, seamless workflow integration, regulatory approval, and cost efficiency. Performance is still the major bottleneck for many CAD systems. Novel ways of using CAD, extending the traditional paradigm of displaying markers for a second look, may be the key to using the technology effectively. The most promising strategy to improve CAD is the creation of publicly available databases for training and validation. This can identify the most fruitful new research directions, and provide a platform to combine multiple approaches for a single task to create superior algorithms. A-A?A 1/2 RSNA, 2011.}, + file = {Ginn11.pdf:pdf\\Ginn11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {22095995}, + month = {12}, + gsid = {11154494557308057950,14993629594919203131}, + gscites = {236}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/96751}, + ss_id = {cb1fabd765f28f4e61a5e6771b1468cb9a113fc4}, + all_ss_ids = {['cb1fabd765f28f4e61a5e6771b1468cb9a113fc4']}, +} + +@conference{Ginn12, + author = {B. van Ginneken and L. Hogeweg and P. Maduskar and L. Peters-Bax and R. Dawson and K. Dheda and H. Ayles and J. Melendez and C. I. S\'{a}nchez}, + title = {Performance of inexperienced and experienced observers in detection of active tuberculosis on digital chest radiographs with and without the use of computer-aided diagnosis}, + booktitle = RSNA, + year = {2012}, + abstract = {PURPOSE: Chest radiography is an important diagnostic test for the detection of tuberculosis (TB) but there are not enough experts to read chest radiographs (CXRs) in high burden countries. We compared the reading performance of inexperienced observers with and without the support of a computer-aided diagnosis (CAD) system with that of an experienced reader. METHOD AND MATERIALS: A set of 100 digital CXRs (Oldelca DR, Delft Imaging Systems, Veenendaal, The Netherlands) of TB suspects was collected from two sites in Sub-Saharan Africa. Sputum culture was used as the reference standard. All cases were scored on a scale of 0 to 100 for the presence of active TB by seven non-experts (undergraduate medical students) and one expert CRRS certified reader for reading CXRs for TB. Prior to reading, the non-experts received one hour of general instruction from a thoracic radiologist and one hour of case reading training with another CRRS certified reader. Cases were also processed by a CAD system (CAD4TB, version 1.08, Diagnostic Image Analysis Group, Nijmegen, The Netherlands). Scores of human readers and CAD were independently combined by averaging. Performance was evaluated as area under the ROC curve (Az), multi-reader-multi-case (MRMC) analysis was used to compare performance with and without CAD and pairwise comparisons were made with bootstrap estimation. p<0.05 was considered significant. RESULTS: The data set contained 56 negative and 44 positive cases. The expert reader scored Az = 0.84. The non-experts scored on average Az = 0.80, range 0.69-0.86. CAD standalone scored Az = 0.82. With CAD, all readers improved performance (Az = 0.85 for the expert, for non-experts average Az = 0.82, range 0.73-0.87). For four out of seven non-experts, the increase was significant and MRMC indicated an overall significant increase in performance for reading with CAD. CONCLUSION: Diagnostic performance of non-experts for detection of active TB on digital CXRs is good and similar to that of an expert. Support from CAD further improves performance. Digital chest radiography with reading of non-experts with little training can be used for active TB case finding at reasonable sensitivity with very high specificity, or as a first line test with high sensitivity and reasonable specificity. CLINICAL RELEVANCE/APPLICATION: Digital chest radiography and reading by inexperienced readers supported by CAD can be a fast, simple and low-cost point-of-care diagnostic for TB.}, + optnote = {DIAG, RADIOLOGY, TB}, +} + +@conference{Ginn13, + author = {Bram van Ginneken and Eva M van Rikxoort and Sven J Lafebre and Colin Jacobs and Michael Schmidt and Jan-Martin Kuhnigk and Mathias Prokop and Cornelia M Schaefer-Prokop and Charbonnier, JP. and Laurens Hogeweg and Pragnya Maduskar and Leticia Gallardo-Estrella and Rick Philipsen and Bianca Lassen}, + title = {{CIRRUS} {L}ung: an optimized workflow for quantitative image analysis of thoracic computed tomography and chest radiography for major pulmonary diseases: chronic obstructive pulmonary disease, lung cancer and tuberculosis}, + booktitle = RSNA, + year = {2013}, + abstract = {BACKGROUND Lung diseases are among the most deadly disorders: chronic obstructive pulmonary disease ({COPD}), a devastating disease with 12 million people in the United States currently diagnosed, ranks #3 on the list of causes of death wordwide. Lung cancer, by far the most common and most deadly cancer in men and women worldwide, ranks #5. Tuberculosis ({TB}), despite the availability of a cheap and effective cure, ranks #10. Imaging is crucially important for early detection, diagnosis, follow-up, and treatment planning of {COPD}, lung cancer and {TB}. Chest radiography and computed tomography are the most important imaging modalities for the lung. METHODOLOGY/APPLICATION We present a flexible workstation for a quick and effective extraction of quantitative imaging parameters related to {COPD}, lung cancer and {TB}. The workstation loads an arbitrary number of {CT} and chest radiography studies of each subject simultaneously, allowing the user to instantly track the evolution of any lesion. Each {CT} scan is elastically registered to all prior {CT} scans of the same subject. Findings in prior scans have been automatically propagated and linked to findings in the current scan. All scans and processing results are preloaded in the background to ensure rapid reading. The {CIRRUS} {L}ung workstation has been developed jointly by the Diagnostic Image Analysis Group, Radboud University Nijmegen Medical Centre, Nijmegen The Netherlands, and Fraunhofer MEVIS, Bremen, Germany. It is based on the MeVisLab software platform. The workstation is available through research collaboration agreements and in active use in a variety of projects . {CIRRUS} {L}ung has a number of modes that will be demonstrated: 1) High throughput lung screening. Scan quality is automatically assessed; data with low quality, artifacts or underlying interstitial lung disease are flagged. High sensitivity computerized detection (CAD) of solid nodules and sub-solid nodules is included. High throughput reading with CAD as a first reader is supported. Each nodule is automatically characterized as solid, part-solid, non-solid, or benign (calcified lesions, perifissural lymph nodes). Volumetry, volume growth rate, mass and mass growth rate are automatically computed with advanced segmentation algorithms that have can handle sub-solid lesions and segment the solid core of part-solid nodules. Findings are summarized in a structured report. Follow-up recommendation according to Fleischner guidelines are included. 2) Clinical oncology work-up. This mode is similar to the screening mode, but includes completely automatic generation of {RECIST} workup. 3) Chest radiography lung screening. Chest radiographs can be annotated and viewed with various tools such as bone suppression and gray scale inversion. Computer-aided detection and interactive CAD reading are supported. 4) {COPD} quantification. Elastic registration between inspiration and expiration scans has been precomputed and allows for linked scrolling. Lungs, lobes, airways, fissures, and segments are automatically segmented for regional functional analysis. In case the user is not satisfied with the segmentation results, (s)he can quickly correct these with an intuitive interactive correction method. {CT} image standardization is included using a precomputed dedicated energy correction algorithm that makes quantifications less dependent on scan protocol (scanner model, kernel, iterative reconstruction). Once the segmentations have been approved, a range of quantifiable features can be visualized in the workstation: parenchyma features, airway features, and fissural completeness. Measurements are reported for both inspiration and expiration for the whole lung as well as per lobe and segment. Changes between inspiration and expiration are reported. After workup of a study of a {COPD} patient, a structured report is produced that contains screenshots, renderings, and all requested measurements. 5) {TB} Diagnostics. In this mode chest radiographs can be inspected and texture analysis that detects + abnormalities consistent with {TB} can be inspected. A novel symmetry analysis is available to facilitate contralateral comparisons. Detection and quantification of costophrenic angle bluntness is included. Cavities can be semi-automatically segmented. DEMONSTRATION STRATEGY The exhibit will be accompanied by an informational poster that will highlight the key features and algorithmic concepts that underlie the automated analysis. Attendees will be able to gain hands-on experience with the workstation and read cases. For each reading mode, extensive example datasets are available. In particular, the completely processed {LIDC/IDRI} database, including all {CT} scans and chest radiographs, is available for inspection. REFERENCES AND PUBLICATIONS The algorithms presented in the showcase are based on over 20 different journal publications. These are listed on http://cirrus.diagnijmegen.nl.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Ginn13a, + author = {van Ginneken, B. and Philipsen, R. H. H. M. and Hogeweg, L. and Maduskar, P. and Melendez, J. C. and S\'{a}nchez, C. I. and Maane, R. and dei Alorse, B. and {d'Alessandro}, U. and Adetifa, I. M. O.}, + title = {Automated Scoring of Chest Radiographs for Tuberculosis Prevalence Surveys: A Combined Approach}, + booktitle = {The Fifth International Workshop on Pulmonary Image Analysis}, + year = {2013}, + pages = {9--19}, + abstract = {Chest radiography is one of the key techniques for investigating suspected tuberculosis (TB). Computerized reading of chest radiographs (CXRs) is an appealing concept because there is a severe shortage of human experts trained to interpret CXRs in countries with a high prevalence of TB. This paper presents a comprehensive computerized system for the detection of abnormalities in CXRs and evaluates the system on digital data from a TB prevalence survey in The Gambia. The system contains algorithms to normalize the images, segment the lung fields, analyze the shape of the segmented lungs, detect textural abnormalities, measure bluntness of the costophrenic angles and quantify the asymmetry in the lung fields. These subsystems are combined with a Random Forest classifier into an overall score indicating the abnormality of the radiograph. The results approach the performance of an independent human reader.}, + file = {Ginn13a.pdf:pdf\\Ginn13a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {614866246313148987}, + gscites = {5}, + ss_id = {55fe10fb639ffc833c1ba84dcb2b673275ef3773}, + all_ss_ids = {['55fe10fb639ffc833c1ba84dcb2b673275ef3773']}, +} + +@conference{Ginn14, + author = {Bram van Ginneken and C. Jacobs and E. Th. Scholten and M. Prokop and de Jong, P. A.}, + title = {Feasibility of Rapid Reading of {CT} Lung Cancer Screening with Computer-Aided Detection Support}, + booktitle = RSNA, + year = {2014}, + abstract = {PURPOSE: The reading effort associated with CT lung cancer screening programs is substantial. We investigated the performance of rapid reading of chest CT scans with integrated CAD support, with the goal of quickly assigning a subject to either regular one-year follow-up, short-term follow-up or immediate work-up. METHOD AND MATERIALS: From the baseline round of a large randomized controlled low-dose CT lung cancer screening trial, randomly 23 cases were selected from each of the three categories used in the trial: 1) no significant nodules, 1 year follow-up CT; 2) nodule 50-500 mm3, 3 month follow-up CT; 3) nodule >500 mm3, referral to pulmonologist. All 69 cases were pre-processed with three different CAD systems aimed at detecting both solid and subsolid lesions and set to operate at high sensitivity. CAD marks were merged and presented in a prototype software environment optimized for rapid reading that includes one-click immediate volumetric segmentation and study preloading to navigate to the next case in the worklist without delay. Seven blinded readers read all cases in random order in a single session as follows. First, CAD marks were inspected and accepted or rejected. Next, readers quickly inspected the scan and added relevant nodules if CAD had not identified these. Finally, readers assigned the scan to one of the three categories of the screening protocol. RESULTS: Cases had 5.1 CAD marks on average. 73A,A+-7% of cases (range 58-80%) were assigned to the correct category. 94% of discordances were between category 1 versus 2, or category 2 versus 3. In most cases the reason was that the volume of the most suspicious nodule was very close to the cutpoints used in the screening protocol. Of the 23 cases in category 3, 14 contained lung cancer. None of these were put in category 1 by any reader; only two of these were placed in category 2, each by only 1/7 readers. 2/9 of the benign category 3 cases were put in category 1: one case by 4/7 readers (a relatively large pleural lesion missed by CAD), and one by 1/7 readers. Total median reading time per case was 67A,A+-17 seconds. CONCLUSION: With the support of highly effective CAD systems, nodule volumetry, and an optimized reading environment, it is possible to accurately read lung cancer CT scans in around one minute per case. CLINICAL RELEVANCE/APPLICATION: An optimized reading environment is presented that can be used for large scale implementation of lung CT screening.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Ginn15, + author = {Bram van Ginneken and Arnaud A. A. Setio and Colin Jacobs and Francesco Ciompi}, + title = {Off-the-shelf convolutional neural network features for pulmonary nodule detection in computed tomography scans}, + booktitle = ISBI, + year = {2015}, + pages = {286--289}, + doi = {10.1109/ISBI.2015.7163869}, + abstract = {Convolutional neural networks (CNNs) have emerged as the most powerful technique for a range of different tasks in computer vision. Recent work suggested that CNN features are generic and can be used for classification tasks outside the exact domain for which the networks were trained. In this work we use the features from one such network, OverFeat, trained for object detection in natural images, for nodule detection in computed tomography scans. We use 865 scans from the publicly available LIDC data set, read by four thoracic radiologists. Nodule candidates are generated by a state-of-theart nodule detection system. We extract 2D sagittal, coronal and axial patches for each nodule candidate and extract 4096 features from the penultimate layer of OverFeat and classify these with linear support vector machines. We show for various configurations that the off-the-shelf CNN features perform surprisingly well, but not as good as the dedicated detection system. When both approaches are combined, significantly better results are obtained than either approach alone. We conclude that CNN features have great potential to be used for detection tasks in volumetric medical data.}, + file = {Ginn15.pdf:pdf\\Ginn15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + gsid = {3459188085599203383}, + gscites = {259}, + ss_id = {81570ccbf4a1edb8898e8969f9befa3561e46318}, + all_ss_ids = {['81570ccbf4a1edb8898e8969f9befa3561e46318']}, +} + +@article{Ginn17, + author = {van Ginneken, Bram}, + title = {Fifty years of computer analysis in chest imaging: rule-based, machine learning, deep learning}, + journal = RPT, + year = {2017}, + volume = {10}, + number = {1}, + pages = {23-32}, + doi = {10.1007/s12194-017-0394-5}, + abstract = {Half a century ago, the term "computer-aided diagnosis" (CAD) was introduced in the scientific literature. Pulmonary imaging, with chest radiography and computed tomography, has always been one of the focus areas in this field. In this study, I describe how machine learning became the dominant technology for tackling CAD in the lungs, generally producing better results than do classical rule-based approaches, and how the field is now rapidly changing: in the last few years, we have seen how even better results can be obtained with deep learning. The key differences among rule-based processing, machine learning, and deep learning are summarized and illustrated for various applications of CAD in the chest.}, + file = {Ginn17.pdf:pdf\\Ginn17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28211015}, + month = {2}, + gsid = {14217309155856769796}, + gscites = {155}, + all_ss_ids = {['46479bbea7749cb2db35b139206039531327053c', 'b69fe5a837277ddbea5215d6bacd3a902e9d11ce']}, +} + +@article{Ginn18, + author = {van Ginneken, Bram}, + title = {Deep Learning for Triage of Chest Radiographs: Should Every Institution Train Its Own System?}, + journal = Radiology, + year = {2019}, + volume = {290}, + month = {11}, + pages = {545-546}, + doi = {10.1148/radiol.2018182318}, + file = {:pdf/Ginn18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30422089}, + gsid = {3985594963871751836}, + gscites = {10}, + ss_id = {f46d34f0234422fb7db3837dd2b32efe03fc6e9a}, + all_ss_ids = {['f46d34f0234422fb7db3837dd2b32efe03fc6e9a', '2723ce1686eea776df179e362cd9a8b8e2bb7ff1']}, +} + +@conference{Ginn18a, + author = {B. van Ginneken}, + title = {Deep Machine Learning for Screening LDCT}, + booktitle = JTO, + year = {2018}, + volume = {13}, + pages = {S190}, + abstract = {The first computer algorithms to automatically detect pulmonary nodules in CT scans, based on classical machine learning approaches, were developed almost two decades ago. These systems appeared in commercially available computer-aided detection packages. However, a recent study concluded that such older software systems fail to flag a substantial number of cancerous lesions and have a fairly high false positive rate. Recently, algorithms based on deep learning, in particular, convolutional neural networks, have been developed that report high sensitivity with low false positive rates. Similar deep learning algorithms have been successful in classifying nodules as solid, subsolid or part-solid with accuracy comparable to radiologists, and in estimating the probability of malignancy of nodules. The 2017 Kaggle Data Science Bowl combined these tasks into a single challenge where 2000 teams developed ...}, + file = {:pdf/Ginn18a.pdf:PDF}, + optnote = {DIAG}, +} + +@conference{Ginn18b, + author = {Bram van Ginneken}, + title = {Real-Life Artificial Intelligence Applications}, + booktitle = JBELSORAD, + year = {2018}, + doi = {10.5334/jbsr.1656}, + abstract = {Artificial intelligence (AI), particularly deep learning, is currently at the top of the hype cycle. Application of this technology to the analysis of medical images is attracting a lot of attention worldwide. + + At the same time, the average radiologist is using very little to no AI tools in her daily practice. This lecture provides a brief explanation of deep learning and explains what makes this technology different from previous approaches and why it is so powerful. A number of AI applications, some in use that were developed and commercialized in our research group, are presented. These applications serve as examples to define a number of different types of AI products that differ in the way they are placed in (or outside) the workflow of radiologists. This lecture emphasizes how some of these tools replace (a small part of the work of) radiologists, while other augment radiologists, and yet others take the radiologists out of the loop in the care cycle of the patient. Finally, it is discussed how radiologists can, and should, be involved in the development of real-life AI applications.}, + file = {Ginn18b.pdf:pdf\\Ginn18b.pdf:PDF}, + optnote = {DIAG}, + gsid = {2647745699443927210}, + gscites = {1}, +} + +@article{Ginn20, + doi = {10.1148/radiol.2020204238}, + pmid = {33236962}, + year = {2020}, + pages = {204238}, + author = {Bram van Ginneken}, + title = {The Potential of Artificial Intelligence to Analyze Chest Radiographs for Signs of {COVID}-19 Pneumonia}, + journal = Radiology, + optnote = {DIAG, INPRESS}, + file = {Ginn20.pdf:pdf\\Ginn20.pdf:PDF}, + ss_id = {11cfab281e93fa7803931be8b589aa1f09d7af02}, + all_ss_ids = {['11cfab281e93fa7803931be8b589aa1f09d7af02']}, + gscites = {11}, +} + +@article{Ginn22, + doi = {10.1148/radiol.221769}, + author = {Bram van Ginneken}, + year = {2022}, + journal = Radiology, + title = {Tuberculosis Detection from Chest Radiographs: Stop Training Radiologists Now}, + pages = {1-2}, + volume = {00}, + file = {Ginn22.pdf:pdf\\Ginn22.pdf:PDF}, + ss_id = {13d81eb12871aa6a4f15fc5c9b127bc320f55179}, + all_ss_ids = {['13d81eb12871aa6a4f15fc5c9b127bc320f55179']}, + gscites = {1}, +} + +@article{Ginn98, + author = {B. van Ginneken and M. Stavridi and J. J. Koenderink}, + title = {Diffuse and specular reflectance from rough surfaces}, + journal = APOPT, + year = {1998}, + volume = {37}, + pages = {130-139}, + doi = {10.1364/AO.37.000130}, + abstract = {{W}e present a reflection model for isotropic rough surfaces that have both specular and diffuse components. {T}he surface is assumed to have a normal distribution of heights. {P}arameters of the model are the surface roughness given by the rms slope, the albedo, and the balance between diffuse and specular reflection. {T}he effect of roughness on diffuse reflection is taken into account, instead of our modeling this component as a constant {L}ambertian term. {T}he model includes geometrical effects such as masking and shadowing. {T}he model is compared with experimental data obtained from goniophotometric measurements on samples of tiles and bricks. {T}he model fits well to samples with very different reflection properties. {M}easurements of the sample profiles performed with a laser profilometer to determine the rms slope show that the assumed surface model is realistic. {T}he model could therefore be used in machine vision and computer graphics to approximate reflection characteristics of surfaces. {I}t could also be used to predict the texture of surfaces as a function of illumination and viewing angles.}, + file = {Ginn98.pdf:pdf\\Ginn98.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {18268570}, + month = {1}, + gsid = {16914516602261093772}, + gscites = {191}, +} + +@inproceedings{Ginn99, + author = {B. van Ginneken and B. M. ter Haar Romeny}, + title = {Applications of locally orderless images}, + booktitle = {Scale-Space Theories in Computer Vision}, + year = {1999}, + volume = {1682}, + series = LNCS, + pages = {10-21}, + doi = {10.1007/3-540-48236-9_2}, + abstract = {{I}n a recent work, {K}oenderink and van {D}oorn consider a family of three intertwined scale-spaces coined the locally orderless image ({LOI}). {T}he {LOI} represents the image, observed at inner scale ?, as a local histogram with bin-width ?, at each location, with a {G}aussian- shaped region of interest of extent ?. {LOI}s form a natural and elegant extension of scale-space theory, show causal consistency and enable the smooth transition between pixels, histograms and isophotes. {T}he aim of this work is to demonstrate the wide applicability and versatility of {LOI}s. {W}e consider a range of image processing tasks, including variations of adaptive histogram equalization, several methods for noise and scratch removal, texture rendering, classification and segmentation.}, + file = {Ginn99.pdf:pdf\\Ginn99.pdf:PDF}, + gsid = {10753743025636204872}, + optnote = {DIAG, RADIOLOGY}, + month = {6}, + gscites = {29}, + ss_id = {b6c2b03aef8689cbd5bbe2cae322eaaea832b7f6}, + all_ss_ids = {['b6c2b03aef8689cbd5bbe2cae322eaaea832b7f6']}, +} + +@inproceedings{Ginn99a, + author = {B. van Ginneken and B. M. ter Haar Romeny}, + title = {Automatic segmentation of lung fields in chest radiographs}, + booktitle = MICCAI, + year = {1999}, + volume = {1679}, + series = LNCS, + pages = {184--191}, + doi = {10.1007/10704282_20}, + abstract = {{W}e present algorithms for the automatic delineation of lung fields in chest radiographs. {W}e first develop a rule-based scheme that detects lung contours using a general framework for the detection of oriented edges and ridges. {T}his algorithm is compared to several pixel classifiers using different combinations of features. {W}e propose a hybrid system that combines both approaches. {T}he performance of each system is compared with interobserver variability and results available from the literature. {O}ur hybrid scheme turns out to be accurate and robust; the accuracy is 0.969 ? 0.00803, and above 94% for all 115 test images.}, + file = {Ginn99a.pdf:pdf\\Ginn99a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {10}, + gsid = {8334497321397385275}, + gscites = {14}, + ss_id = {99042acbce8ec757d91749259821dce751a9a809}, + all_ss_ids = {['99042acbce8ec757d91749259821dce751a9a809']}, +} + +@article{Ginn99b, + author = {B. van Ginneken and J. J. Koenderink and K. J. Dana}, + title = {Texture histograms as a function of irradiation and viewing direction}, + journal = IJCV, + year = {1999}, + volume = {31}, + pages = {169-184}, + doi = {10.1023/A:1008018015948}, + abstract = {{T}he textural appearance of materials encountered in our daily environment depends on two directions, the irradiation and viewing direction. {W}e investigate the bidirectional grey level histograms of a large set of materials, obtained from a texture database. {W}e distinguish important categories, relate the various effects to physical mechanisms, and list material attributes that influence the bidirectional histograms. {W}e use a model for rough surfaces with locally diffuse and/or specular reflection properties, a class of materials that commonly occurs, to generate bidirectional histograms and obtain close agreement with experimental data. {W}e discuss several applications of bidirectional texture functions and histograms. {I}n particular, we present a new approach to texture mapping based on bidirectional histograms. {F}or 3{D} texture, this technique is superior to standard 2{D} texture mapping at hardly any extra computational cost or memory requirements.}, + file = {Ginn99b.pdf:pdf\\Ginn99b.pdf:PDF}, + gsid = {5477947646226846288}, + optnote = {DIAG, RADIOLOGY}, + number = {2-3}, + gscites = {65}, + ss_id = {8ae7d6146b6d9ed23f6b35f8fa23d3b467bdecef}, + all_ss_ids = {['8ae7d6146b6d9ed23f6b35f8fa23d3b467bdecef']}, +} + +@article{Giro22, + author = {Girolami, Ilaria and Pantanowitz, Liron and Marletta, Stefano and Hermsen, Meyke and van der Laak, Jeroen and Munari, Enrico and Furian, Lucrezia and Vistoli, Fabio and Zaza, Gianluigi and Cardillo, Massimo and Gesualdo, Loreto and Gambaro, Giovanni and Eccher, Albino}, + title = {Artificial intelligence applications for pre-implantation kidney biopsy pathology practice: a systematic review.}, + doi = {10.1007/s40620-022-01327-8}, + abstract = {Transplant nephropathology is a highly specialized field of pathology comprising both the evaluation of organ donor biopsy for organ allocation and post-transplant graft biopsy for assessment of rejection or graft damage. The introduction of digital pathology with whole-slide imaging (WSI) in clinical research, trials and practice has catalyzed the application of artificial intelligence (AI) for histopathology, with development of novel machine-learning models for tissue interrogation and discovery. We aimed to review the literature for studies specifically applying AI algorithms to WSI-digitized pre-implantation kidney biopsy. A systematic search was carried out in the electronic databases PubMed-MEDLINE and Embase until 25th September, 2021 with a combination of the key terms "kidney", "biopsy", "transplantation" and "artificial intelligence" and their aliases. Studies dealing with the application of AI algorithms coupled with WSI in pre-implantation kidney biopsies were included. The main theme addressed was detection and quantification of tissue components. Extracted data were: author, year and country of the study, type of biopsy features investigated, number of cases, type of algorithm deployed, main results of the study in terms of diagnostic outcome, and the main limitations of the study. Of 5761 retrieved articles, 7 met our inclusion criteria. All studies focused largely on AI-based detection and classification of glomerular structures and to a lesser extent on tubular and vascular structures. Performance of AI algorithms was excellent and promising. All studies highlighted the importance of expert pathologist annotation to reliably train models and the need to acknowledge clinical nuances of the pre-implantation setting. Close cooperation between computer scientists and practicing as well as expert renal pathologists is needed, helping to refine the performance of AI-based models for routine pre-implantation kidney biopsy clinical practice.}, + file = {Giro22.pdf:pdf\\Giro22.pdf:PDF}, + journal = {Journal of nephrology}, + month = apr, + optnote = {DIAG}, + pmid = {35441256}, + year = {2022}, + ss_id = {8c882c8737d351dfe19e663228e4c3bd2cafa992}, + all_ss_ids = {['8c882c8737d351dfe19e663228e4c3bd2cafa992']}, + gscites = {22}, +} + +@article{Gish14, + author = {Gishti, Olta and Gaillard, Romy and Manniesing, Rashindra and Abrahamse-Berkeveld, Marieke and van der Beek, Eline M. and Heppe, Denise H.M. and Steegers, Eric A.P. and Hofman, Albert and Duijts, Liesbeth and Durmus, B\"usra and Jaddoe, Vincent W.V.}, + title = {Fetal and infant growth patterns associated with total and abdominal fat distribution in school-age children}, + journal = JCLINEM, + year = {2014}, + volume = {99}, + pages = {2557-2566}, + doi = {10.1210/jc.2013-4345}, + abstract = {Context: Higher infant growth rates are associated with an increased risk of obesity in later life. Objective: We examined the associations of longitudinally measured fetal and infant growth patterns with total and abdominal fat distribution in childhood. Design, Settings and participants: We performed a population-based prospective cohort study among 6,464 children. We measured growth characteristics in second and third trimester of pregnancy, at birth, and at 6, 12, and 24 months. Main Outcomes Measures: Body mass index, fat mass index (body fat mass/height2), lean mass index (body lean mass/height2) and android/gynoid fat ratio measured by Dual-energy X-ray Absorptiometry, and subcutaneous and preperitoneal abdominal fat measured by ultrasound at the median age of 6.0 years (90% range 5.7 AC/a,!aEURoe 7.4). Results: We observed that weight gain in second and third trimester of fetal life, and in early, mid and late infancy were independently and positively associated with childhood body mass index (p-values<0.05). Only infant weight gain was associated with higher fat mass index, android/gynoid fat ratio, and abdominal fat in childhood (p-values<0.05). Children with both fetal and infant growth acceleration had the highest childhood body mass index, fat mass index and subcutaneous abdominal fat, whereas children with fetal growth deceleration and infant growth acceleration had the highest value for android/gynoid fat ratio and the lowest value for lean mass index (p-values<0.05). Conclusions: Both growth in fetal life and infancy affects childhood body mass index, whereas only infant growth affects directly measured total body and abdominal fat. Fetal growth deceleration followed by infant growth acceleration may lead to an adverse body fat distribution in childhood.}, + file = {Gish14.pdf:pdf\\Gish14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {7}, + pmid = {24712569}, + month = {7}, + gsid = {4001748312327312389}, + gscites = {76}, + ss_id = {cb1b0ac31149df13be8ea554792b8379f31e3d7f}, + all_ss_ids = {['cb1b0ac31149df13be8ea554792b8379f31e3d7f']}, +} + +@article{Glas23, + author = {Glaser, Naomi and Bosman, Shannon and Madonsela, Thandanani and van Heerden, Alastair and Mashaete, Kamele and Katende, Bulemba and Ayakaka, Irene and Murphy, Keelin and Signorell, Aita and Lynen, Lutgarde and Bremerich, Jens and Reither, Klaus}, + title = {Incidental radiological findings during clinical tuberculosis screening in Lesotho and South Africa: a case series}, + doi = {10.1186/s13256-023-04097-4}, + year = {2023}, + abstract = {Abstract + Background + Chest X-ray offers high sensitivity and acceptable specificity as a tuberculosis screening tool, but in areas with a high burden of tuberculosis, there is often a lack of radiological expertise to interpret chest X-ray. Computer-aided detection systems based on artificial intelligence are therefore increasingly used to screen for tuberculosis-related abnormalities on digital chest radiographies. The CAD4TB software has previously been shown to demonstrate high sensitivity for chest X-ray tuberculosis-related abnormalities, but it is not yet calibrated for the detection of non-tuberculosis abnormalities. When screening for tuberculosis, users of computer-aided detection need to be aware that other chest pathologies are likely to be as prevalent as, or more prevalent than, active tuberculosis. However, non-tuberculosis chest X-ray abnormalities detected during chest X-ray screening for tuberculosis remain poorly characterized in the sub-Saharan African setting, with only minimal literature. + + Case presentation + In this case series, we report on four cases with non-tuberculosis abnormalities detected on CXR in TB TRIAGE + ACCURACY (ClinicalTrials.gov Identifier: NCT04666311), a study in adult presumptive tuberculosis cases at health facilities in Lesotho and South Africa to determine the diagnostic accuracy of two potential tuberculosis triage tests: computer-aided detection (CAD4TB v7, Delft, the Netherlands) and C-reactive protein (Alere Afinion, USA). The four Black African participants presented with the following chest X-ray abnormalities: a 59-year-old woman with pulmonary arteriovenous malformation, a 28-year-old man with pneumothorax, a 20-year-old man with massive bronchiectasis, and a 47-year-old woman with aspergilloma. + + Conclusions + Solely using chest X-ray computer-aided detection systems based on artificial intelligence as a tuberculosis screening strategy in sub-Saharan Africa comes with benefits, but also risks. Due to the limitation of CAD4TB for non-tuberculosis-abnormality identification, the computer-aided detection software may miss significant chest X-ray abnormalities that require treatment, as exemplified in our four cases. Increased data collection, characterization of non-tuberculosis anomalies and research on the implications of these diseases for individuals and health systems in sub-Saharan Africa is needed to help improve existing artificial intelligence software programs and their use in countries with high tuberculosis burden. + }, + url = {http://dx.doi.org/10.1186/s13256-023-04097-4}, + file = {Glas23.pdf:pdf\Glas23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Medical Case Reports}, + citation-count = {0}, + automatic = {yes}, + volume = {17}, +} + +@article{Gola21, + author = {Golatta, Michael and Pfob, Andr\'{e} and B\"{u}sch, Christopher and Bruckner, Thomas and Alwafai, Zaher and Balleyguier, Corinne and Clevert, Dirk-Andr\'{e} and Duda, Volker and Goncalo, Manuela and Gruber, Ines and Hahn, Markus and Kapetas, Panagiotis and Ohlinger, Ralf and Rutten, Matthieu and Tozaki, Mitsuhiro and Wojcinski, Sebastian and Rauch, Geraldine and Heil, J\"{o}rg and Barr, Richard G.}, + title = {The Potential of Shear Wave Elastography to Reduce Unnecessary Biopsies in Breast Cancer Diagnosis: An International, Diagnostic, Multicenter Trial}, + doi = {10.1055/a-1543-6156}, + year = {2021}, + abstract = {Abstract + Purpose In this prospective, multicenter trial we evaluated whether additional shear wave elastography (SWE) for patients with BI-RADS 3 or 4 lesions on breast ultrasound could further refine the assessment with B-mode breast ultrasound for breast cancer diagnosis. + Materials and Methods We analyzed prospective, multicenter, international data from 1288 women with breast lesions rated by conventional 2 D B-mode ultrasound as BI-RADS 3 to 4c and undergoing 2D-SWE. After reclassification with SWE the proportion of undetected malignancies should be < 2 %. All patients underwent histopathologic evaluation (reference standard). + Results Histopathologic evaluation showed malignancy in 368 of 1288 lesions (28.6 %). The assessment with B-mode breast ultrasound resulted in 1.39 % (6 of 431) undetected malignancies (malignant lesions in BI-RADS 3) and 53.80 % (495 of 920) unnecessary biopsies (biopsies in benign lesions). Re-classifying BI-RADS 4a patients with a SWE cutoff of 2.55 m/s resulted in 1.98 % (11 of 556) undetected malignancies and a reduction of 24.24 % (375 vs. 495) of unnecessary biopsies. + Conclusion A SWE value below 2.55 m/s for BI-RADS 4a lesions could be used to downstage these lesions to follow-up, and therefore reduce the number of unnecessary biopsies by 24.24 %. However, this would come at the expense of some additionally missed cancers compared to B-mode breast ultrasound (rate of undetected malignancies 1.98 %, 11 of 556, versus 1.39 %, 6 of 431) which would, however, still be in line with the ACR BI-RADS 3 definition (< 2 % of undetected malignancies).}, + url = {http://dx.doi.org/10.1055/a-1543-6156}, + file = {Gola21.pdf:pdf\Gola21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Ultraschall in der Medizin - European Journal of Ultrasound}, + citation-count = {6}, + automatic = {yes}, + pages = {162-168}, + volume = {44}, +} + +@conference{Gome17, + author = {Juan Jose Gomez and Clara I. S\'{a}nchez and Bart Liefers and Freerk G. Venhuizen and Gianluca Fatti and Antonio Morilla-Grasa and Yasmin Cartagena and Alejandra Herranz Cabarcos and Andres Santos and Mar\'{i}a Jes\'{u}s Ledesma-Carbayo and Alfonso Anton-Lopez}, + title = {Automated Analysis of Retinal Images for detection of Glaucoma based on Convolutional Neural Networks}, + booktitle = ARVO, + year = {2017}, + abstract = {Purpose : To assess the performance of deep learning architectures based on convolutional neural networks (CNN) for the diagnosis of glaucoma in screening campaigns using color fundus images. + + Methods : Two independent data sets were used to develop and evaluate the proposed method. 1) 805 color fundus images with a field of view of 45 degrees, centered on the macula and including the optic disc (OD) from patients with age ranging from 55 to 86 years old included in a glaucoma detection campaign performed at Hospital Esperanza (Barcelona). Annotations were performed by eight observers having 8 to 26 years of clinical experience. 2) 101 images from the publicly available Drishti-GS retinal image dataset (http://cvit.iiit.ac.in/projects/mip/drishti-gs/mip-dataset2/Home.php). The total 906 images were further organized into a training, monitoring and test set according to a 60-20-20 split. The process to train and validate the CNN had 3 steps. 1) Preprocessing: the edges and the background were blurred to reduce the effect of the bright fringe and the border. Then patches centered at the OD of size 256x256x3 pixels were automatically segmented and scaled to values from 0 to 1. 2) Implementation: The architecture consisted of ten convolutional layers (32 filters 3x3 pixels size) followed by rectified linear units and spatial max-pooling. The network ends with a fully connected layer and a soft-max classifier which outputs a score from 0 to 1. The network was trained using stochastic gradient descent and a learning rate of 0.005. To avoid overfitting data augmentation was performed applying randomly translations, flipping and rotations during the training, and dropout with probability of 0.5. 3) Monitoring and evaluation: the training was completed after 50 epochs. To evaluate the classification capabilities of the algorithm, the area under the receiver operating characteristic curve (ROC) was calculated using the training set. + + Results : An automatic classification algorithm based on CNN was developed. The present method achieved an area under the ROC of 0.894. The accuracy to identify healthy and glaucoma cases was 0.884 and 0.781 respectively, using a threshold of 0.5. + + Conclusions : The good performance of the proposed CNN architecture suggests potential usefulness of these methods for an initial automatic classification of images in screening campaigns for glaucoma.}, + optnote = {DIAG, RADIOLOGY}, + gsid = {11583482517657678688}, + gscites = {2}, + all_ss_ids = {8b92dcfb8d8b92314d63de92852a28880a81f4ea}, +} + +@article{Gome19, + author = {Juan J. G\'{o}mez-Valverde and Alfonso Ant\'{o}n and Gianluca Fatti and Bart Liefers and Alejandra Herranz and Andr\'{e}s Santos and Clara I. S\'{a}nchez and Mar\'{i}a J. Ledesma-Carbayo}, + title = {Automatic glaucoma classification using color fundus images based on convolutional neural networks and transfer learning}, + journal = BOE, + year = {2019}, + volume = {10}, + number = {2}, + month = {2}, + pages = {892--913}, + doi = {10.1364/BOE.10.000892}, + url = {http://www.osapublishing.org/boe/abstract.cfm?URI=boe-10-2-892}, + abstract = {Glaucoma detection in color fundus images is a challenging task that requires expertise and years of practice. In this study we exploited the application of different Convolutional Neural Networks (CNN) schemes to show the influence in the performance of relevant factors like the data set size, the architecture and the use of transfer learning vs newly defined architectures. We also compared the performance of the CNN based system with respect to human evaluators and explored the influence of the integration of images and data collected from the clinical history of the patients. We accomplished the best performance using a transfer learning scheme with VGG19 achieving an AUC of 0.94 with sensitivity and specificity ratios similar to the expert evaluators of the study. The experimental results using three different data sets with 2313 images indicate that this solution can be a valuable option for the design of a computer aid system for the detection of glaucoma in large-scale screening programs.}, + file = {Gome19.pdf:pdf\\Gome19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30800522}, + publisher = {OSA}, + gsid = {5163022841600007243}, + gscites = {126}, + ss_id = {b5f1ef811b206720957f6c4412c9a2ad9bf480c9}, + all_ss_ids = {['b5f1ef811b206720957f6c4412c9a2ad9bf480c9']}, +} + +@inproceedings{Gonz18, + author = {Gonz\'{a}lez-Gonzalo, Cristina and Liefers, Bart and van Ginneken, Bram and S\'{a}nchez, Clara I.}, + title = {Improving weakly-supervised lesion localization with iterative saliency map refinement}, + booktitle = MIDL, + year = {2018}, + url = {https://openreview.net/forum?id=r15c8gnoG}, + abstract = {Interpretability of deep neural networks in medical imaging is becoming an important technique to understand network classification decisions and increase doctors' trust. Available methods for visual interpretation, though, tend to highlight only the most discriminant areas, which is suboptimal for clinical output. We propose a novel deep visualization framework for improving weakly-supervised lesion localization. The framework applies an iterative approach where, in each step, the interpretation maps focus on different, less discriminative areas of the images, but still important for the final classification, reaching a more refined localization of abnormalities. We evaluate the performance of the method for the localization of diabetic retinopathy lesions in color fundus images. The results show the obtained visualization maps are able to detect more lesions after the iterative procedure in the case of more severely affected retinas.}, + file = {:pdf/Gonz18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {14923642687050576388}, + gscites = {4}, + ss_id = {05078a4b34fbf98a940b3ca7f97773632c787397}, + all_ss_ids = {['05078a4b34fbf98a940b3ca7f97773632c787397']}, +} + +@conference{Gonz19a, + author = {Gonz\'{a}lez-Gonzalo, Cristina and Liefers, Bart and Akshayaa Vaidyanathan and Harm van Zeeland and Klaver, Caroline C W and S\'{a}nchez, Clara I.}, + booktitle = ARVO, + url = {https://iovs.arvojournals.org/article.aspx?articleid=2746850}, + title = {Opening the "black box" of deep learning in automated screening of eye diseases}, + abstract = {Purpose: Systems based on deep learning (DL) have demonstrated to provide a scalable and high-performance solution for screening of eye diseases. However, DL is usually considered a "black box? due to lack of interpretability. We propose a deep visualization framework to explain the decisions made by a DL system, iteratively unveiling abnormalities responsible for referable predictions without needing lesion-level annotations. We apply the framework to automated screening of diabetic retinopathy (DR) in color fundus images (CFIs). + + Methods: The proposed framework consists of a baseline deep convolutional neural network to classify CFIs by DR stage. For each CFI classified as referable DR, the framework extracts initial visual evidence of the predicted stage by computing a saliency map, which indicates regions in the image that would contribute the most to changes in the prediction if modified. This provides localization of abnormalities that are then removed through selective inpainting. The image is again classified, expecting reduced referability. We iteratively apply this procedure to increase attention to less discriminative areas and generate refined visual evidence. The Kaggle DR database, with CFIs graded regarding DR severity (stages 0 and 1: non-referable DR, stages 2 to 4: referable DR), is used for training and validation of the image-level classification task. For validation of the obtained visual evidence, we used the DiaretDB1 dataset, which contains CFIs with manually-delineated areas for 4 types of lesions: hemorrhages, microaneurysms, hard and soft exudates. + + Results: The baseline classifier obtained an area under the Receiver Operating Characteristic (ROC) curve of 0.93 and a quadratic weighted kappa of 0.77 on the Kaggle test set (53576 CFIs). Free-response ROC (FROC) curves (Figure 2) analyze the correspondence between highlighted areas and each type of lesion for those images classified as referable DR in the DiaretDB1 dataset (62 CFIs), comparing between initial and refined visual evidence. + + Conclusions : The proposed framework provides visual evidence for the decisions made by a DL system, iteratively unveiling abnormalities in CFIs based on the prediction of a classifier trained only with image-level labels. This provides a "key? to open the "black box? of artificial intelligence in screening of eye diseases, aiming to increase experts' trust and facilitate its integration in screening settings.}, + optnote = {DIAG, RADIOLOGY}, + year = {2019}, + all_ss_ids = {80af090645088134f058db53a708b7092dd28786}, + gscites = {0}, +} + +@article{Gonz19b, + author = {Gonz\'{a}lez-Gonzalo, Cristina and Liefers, Bart and van Ginneken, Bram and S\'{a}nchez, Clara I}, + title = {Iterative augmentation of visual evidence for weakly-supervised lesion localization in deep interpretability frameworks}, + journal = {arXiv:1910.07373}, + year = {2019}, + abstract = {Interpretability of deep learning (DL) systems is gaining attention in medical imaging to increase experts' trust in the obtained predictions and facilitate their integration in clinical settings. We propose a deep visualization method to generate interpretability of DL classification tasks in medical imaging by means of visual evidence augmentation. The proposed method iteratively unveils abnormalities based on the prediction of a classifier trained only with image-level labels. For each image, initial visual evidence of the prediction is extracted with a given visual attribution technique. This provides localization of abnormalities that are then removed through selective inpainting. We iteratively apply this procedure until the system considers the image as normal. This yields augmented visual evidence, including less discriminative lesions which were not detected at first but should be considered for final diagnosis. We apply the method to grading of two retinal diseases in color fundus images: diabetic retinopathy (DR) and age-related macular degeneration (AMD). We evaluate the generated visual evidence and the performance of weakly-supervised localization of different types of DR and AMD abnormalities, both qualitatively and quantitatively. We show that the augmented visual evidence of the predictions highlights the biomarkers considered by the experts for diagnosis and improves the final localization performance. It results in a relative increase of 11.2+-2.0% per image regarding average sensitivity per average 10 false positives, when applied to different classification tasks, visual attribution techniques and network architectures. This makes the proposed method a useful tool for exhaustive visual support of DL classifiers in medical imaging.}, + optnote = {DIAG}, + month = {10}, +} + +@article{Gonz20, + author = {Gonz\'{a}lez-Gonzalo, Cristina and Liefers, Bart and van Ginneken, Bram and S\'{a}nchez, Clara I.}, + title = {Iterative augmentation of visual evidence for weakly-supervised lesion localization in deep interpretability frameworks: application to color fundus images}, + journal = TMI, + doi = {10.1109/TMI.2020.2994463}, + year = {2020}, + pmid = {32746093}, + number = {11}, + pages = {3499--3511}, + volume = {39}, + abstract = {Interpretability of deep learning (DL) systems is gaining attention in medical imaging to increase experts' trust in the obtained predictions and facilitate their integration in clinical settings. We propose a deep visualization method to generate interpretability of DL classification tasks in medical imaging by means of visual evidence augmentation. The proposed method iteratively unveils abnormalities based on the prediction of a classifier trained only with image-level labels. For each image, initial visual evidence of the prediction is extracted with a given visual attribution technique. This provides localization of abnormalities that are then removed through selective inpainting. We iteratively apply this procedure until the system considers the image as normal. This yields augmented visual evidence, including less discriminative lesions which were not detected at first but should be considered for final diagnosis. We apply the method to grading of two retinal diseases in color fundus images: diabetic retinopathy (DR) and age-related macular degeneration (AMD). We evaluate the generated visual evidence and the performance of weakly-supervised localization of different types of DR and AMD abnormalities, both qualitatively and quantitatively. We show that the augmented visual evidence of the predictions highlights the biomarkers considered by experts for diagnosis and improves the final localization performance. It results in a relative increase of 11.2+-2.0% per image regarding sensitivity averaged at 10 false positives/image on average, when applied to different classification tasks, visual attribution techniques and network architectures. This makes the proposed method a useful tool for exhaustive visual support of DL classifiers in medical imaging.}, + file = {Gonz20.pdf:pdf\\Gonz20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {44cb242be984781ad380e9f785157c13384d0026}, + all_ss_ids = {['44cb242be984781ad380e9f785157c13384d0026']}, + gscites = {20}, +} + +@article{Gonz20a, + author = {Gonz\'{a}lez-Gonzalo, Cristina and S\'{a}nchez-Guti\'{e}rrez, Ver\'{o}nica and Hern\'{a}ndez-Mart\'{i}nez, Paula and Contreras, In\'{e}s and Lechanteur, Yara T and Domanian, Artin and van Ginneken, Bram and S\'{a}nchez, Clara I.}, + title = {Evaluation of a deep learning system for the joint automated detection of diabetic retinopathy and age-related macular degeneration}, + doi = {10.1111/aos.14306}, + number = {4}, + pages = {368--377}, + volume = {98}, + abstract = {To validate the performance of a commercially available, CE-certified deep learning (DL) system, RetCAD v.1.3.0 (Thirona, Nijmegen, The Netherlands), for the joint automatic detection of diabetic retinopathy (DR) and age-related macular degeneration (AMD) in colour fundus (CF) images on a dataset with mixed presence of eye diseases. Evaluation of joint detection of referable DR and AMD was performed on a DR-AMD dataset with 600 images acquired during routine clinical practice, containing referable and non-referable cases of both diseases. Each image was graded for DR and AMD by an experienced ophthalmologist to establish the reference standard (RS), and by four independent observers for comparison with human performance. Validation was furtherly assessed on Messidor (1200 images) for individual identification of referable DR, and the Age-Related Eye Disease Study (AREDS) dataset (133 821 images) for referable AMD, against the corresponding RS. Regarding joint validation on the DR-AMD dataset, the system achieved an area under the ROC curve (AUC) of 95.1% for detection of referable DR (SE = 90.1%, SP = 90.6%). For referable AMD, the AUC was 94.9% (SE = 91.8%, SP = 87.5%). Average human performance for DR was SE = 61.5% and SP = 97.8%; for AMD, SE = 76.5% and SP = 96.1%. Regarding detection of referable DR in Messidor, AUC was 97.5% (SE = 92.0%, SP = 92.1%); for referable AMD in AREDS, AUC was 92.7% (SE = 85.8%, SP = 86.0%). The validated system performs comparably to human experts at simultaneous detection of DR and AMD. This shows that DL systems can facilitate access to joint screening of eye diseases and become a quick and reliable support for ophthalmological experts.}, + file = {Gonz20a.pdf:pdf\\Gonz20a.pdf:PDF}, + journal = ACTOPH, + optnote = {DIAG, RADIOLOGY}, + pmid = {31773912}, + year = {2020}, + month = {11}, + gsid = {12212989465195722761}, + gscites = {59}, + ss_id = {d0760faf253e27b6c105d76d06acc4c6ab3674c7}, + all_ss_ids = {['d0760faf253e27b6c105d76d06acc4c6ab3674c7']}, +} + +@conference{Gonz20c, + author = {Gonz\'{a}lez-Gonzalo, Cristina and Wetstein, Suzanne C. and Bortsova, Gerda and Liefers, Bart and van Ginneken, Bram and S\'{a}nchez, Clara I.}, + booktitle = EURETINA, + url = {https://www.euretina.org/congress/amsterdam-2020/virtual-2020-freepapers/}, + title = {Are adversarial attacks an actual threat for deep learning systems in real-world eye disease screening settings?}, + abstract = {Purpose: + Deep learning (DL) systems that perform image-level classification with convolutional neural networks (CNNs) have been shown to provide high-performance solutions for automated screening of eye diseases. Nevertheless, adversarial attacks have been recently screening settings, where there is restricted access to the systems and limited knowledge about certain factors, such as their CNN architecture or the data used for development. + Setting: + Deep learning for automated screening of eye diseases. + Methods: + We used the Kaggle dataset for diabetic retinopathy detection. It contains 88,702 manually-labelled color fundus images, which we split into test (12%) and development (88%). Development data were split into two equally-sized sets (d1 and d2); a third set (d3) was generated using half of the images in d2. In each development set, 80%/20% of the images were used for training/validation. All splits were done randomly at patient-level. As attacked system, we developed a randomly-initialized CNN based on the Inception-v3 architecture using d1. We performed the attacks (1) in a white-box (WB) setting, with full access to the attacked system to generate the adversarial images, and (2) in black-box (BB) settings, without access to the attacked system and using a surrogate system to craft the attacks. We simulated different BB settings, sequentially decreasing the available knowledge about the attacked system: same architecture, using d1 (BB-1); different architecture (randomly-initialized DenseNet-121), using d1 (BB-2); same architecture, using d2 (BB-3); different architecture, using d2 (BB-4); different architecture, using d3 (BB-5). In each setting, adversarial images containing non-perceptible noise were generated by applying the fast gradient sign method to each image of the test set and processed by the attacked system. + Results: + The performance of the attacked system to detect referable diabetic retinopathy without attacks and under the different attack settings was measured on the test set using the area under the receiver operating characteristic curve (AUC). Without attacks, the system achieved an AUC of 0.88. In each attack setting, the relative decrease in AUC with respect to the original performance was computed. In the WB setting, there was a 99.9% relative decrease in performance. In the BB-1 setting, the relative decrease in AUC was 67.3%. In the BB-2 setting, the AUC suffered a 40.2% relative decrease. In the BB-3 setting, the relative decrease was 37.9%. In the BB-4 setting, the relative decrease in AUC was 34.1%. Lastly, in the BB-5 setting, the performance of the attacked system decreased 3.8% regarding its original performance. + Conclusions: + The results obtained in the different settings show a drastic decrease of the attacked DL system's vulnerability to adversarial attacks when the access and knowledge about it are limited. The impact on performance is extremely reduced when restricting the direct access to the system (from the WB to the BB-1 setting). The attacks become slightly less effective when not having access to the same development data (BB-3), compared to not using the same CNN architecture (BB-2). Attacks' effectiveness further decreases when both factors are unknown (BB-4). If the amount of development data is additionally reduced (BB-5), the original performance barely deteriorates. This last setting is the most similar to realistic screening settings, since most systems are currently closed source and use additional large private datasets for development. In conclusion, these factors should be acknowledged for future development of robust DL systems, as well as considered when evaluating the vulnerability of currently-available systems to adversarial attacks. Having limited access and knowledge about the systems determines the actual threat these attacks pose. We believe awareness about this matter will increase experts' trust and facilitate the integration of DL systems in real-world settings.}, + optnote = {DIAG, RADIOLOGY}, + year = {2020}, + month = {9}, +} + +@conference{Gonz21, + author = {Gonz\'{a}lez-Gonzalo, Cristina and Thee, Eric F. and Liefers, Bart and de Vente, Coen and Klaver, Caroline C.W. and S\'{a}nchez, Clara I.}, + booktitle = ARVO, + url = {https://iovs.arvojournals.org/article.aspx?articleid=2773295}, + title = {Hierarchical curriculum learning for robust automated detection of low-prevalence retinal disease features: application to reticular pseudodrusen}, + abstract = {Purpose: The low prevalence of certain retinal disease features compromises data collection for deep neural networks (DNN) development and, consequently, the benefits of automated detection. We robustify the detection of such features in scarce data settings by exploiting hierarchical information available in the data to learn from generic to specific, low-prevalence features. We focus on reticular pseudodrusen (RPD), a hallmark of intermediate age-related macular degeneration (AMD). + + Methods: Color fundus images (CFI) from the AREDS dataset were used for DNN development (106,994 CFI) and testing (27,066 CFI). An external test set (RS1-6) was generated with 2,790 CFI from the Rotterdam Study. In both datasets CFI were graded from generic to specific features. This allows to establish a hierarchy of binary classification tasks with decreasing prevalence: presence of AMD findings (AREDS prevalence: 88%; RS1-6: 77%), drusen (85%; 73%), large drusen (40%; 24%), RPD (1%; 4%). We created a hierarchical curriculum and developed a DNN (HC-DNN) that learned each task sequentially. We computed its performance for RPD detection in both test sets and compared it to a baseline DNN (B-DNN) that learned to detect RPD from scratch disregarding hierarchical information. We studied their robustness across datasets, while reducing the size of data available for development (same prevalences) + + Results: Area under the receiver operating characteristic curve (AUC) was used to measure RPD detection performance. When large development data were available, there was no significant difference between DNNs (100% data, HC-DNN: 0.96 (95% CI, 0.94-0.97) in AREDS, 0.82 (0.78-0.86) in RS1-6; B-DNN: 0.95 (0.94-0.96) in AREDS, 0.83 (0.79-0.87) in RS1-6). However, HC-DNN achieved better performance and robustness across datasets when development data were highly reduced (<50% data, p-values<0.05) (1% data, HC-DNN: 0.63 (0.60-0.66) in AREDS, 0.76 (0.72-0.80) in RS1-6; B-DNN: 0.53 (0.49-0.56) in AREDS, 0.48 (0.42-0.53) in RS1-6). + + Conclusions: Hierarchical curriculum learning allows for knowledge transfer from general, higher-prevalence features and becomes beneficial for the detection of low-prevalence retinal features, such as RPD, in scarce data settings. Moreover, exploiting hierarchical information improves DNN robustness across datasets.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, +} + +@conference{Gonz21a, + author = {Gonz\'{a}lez-Gonzalo, Cristina and Thee, Eric F. and Liefers, Bart and Klaver, Caroline C.W. and S\'{a}nchez, Clara I.}, + booktitle = EURETINA, + title = {Deep learning for automated stratification of ophthalmic images: Application to age-related macular degeneration and color fundus images}, + url = {https://euretina.org/resource/abstract_2021_deep-learning-for-automated-stratification-of-ophthalmic-images-application-to-age-related-macular-degeneration-and-color-fundus-images/}, + abstract = {Purpose: Deep learning (DL) systems based on convolutional neural networks (CNNs) have achieved expert-level performance in different classification tasks, and have shown the potential to reduce current experts' workload significantly. We explore this potential in the context of automated stratification of ophthalmic images. DL could accelerate the setup of clinical studies by filtering large amounts of images or patients based on specific inclusion criteria, as well as aid in patient selection for clinical trials. DL could also allow for automated categorization of entering images in busy clinical or screening settings, enhancing data triaging, searching, retrieval, and comparison. Automated stratification could also facilitate data collection and application of further DL-based phenotyping analysis, by generating useful sets of images for expert annotation, training, or testing of segmentation algorithms. In our work, we focus on the stratification of color fundus images (CFI) based on multiple features related to age-related macular degeneration (AMD) at different hierarchical levels. We further analyze the robustness of the automated stratification system when the amount of data available for development is limited. We performed our validation on two different population studies. + + Setting/Venue: Deep learning applied to ophthalmic imaging. + + Methods: Automated stratification of CFI was performed based on the presence or absence of the following AMD features, following a hierarchical tree with different branches (Bi) and levels (Hi) from generic features (H0) to specific features (H3): AMD findings (H0); B1: drusen (H1), large drusen (H2), reticular pseudodrusen (H3); B2: pigmentary changes (H1), hyperpigmentation (H2), hypopigmentation (H2); B3: late AMD (H1), geographic atrophy (H2), choroidal neovascularization (H2). The automated stratification system consisted of a set of CNNs (based on the Inception-v3 architecture) able to classify the multiple AMD features (presence/absence) at higher and lower levels. This allowed to automatically stratify incoming CFI into the hierarchical tree. CFI from the AREDS dataset were used for development (106,994 CFI) and testing (27,066 CFI) of the CNNs. We validated the robustness of the system to a gradual decrease in the amount of data available for development (100%, 75%, 50%, 25%, 10%, 5%, 2.5%, and 1% of development data). An external test set (RS1-6) was generated with 2,790 CFI from the Rotterdam Study. This allowed to validate the performance of the automated stratification across studies where different CFI grading protocols were used. + + Results: Area under the receiver operating characteristic curve (AUC) was used to measure the performance of each feature's classification within the automated stratification. The AUC averaged across AMD features when 100% of development data was available was 93.8% (95% CI, 93.4%-94.2%) in AREDS and 84.4% (82.1%-86.5%) in RS1-6. There was an average relative decrease in performance of 10.0+-4.7% between AREDS and the external test set, RS1-6. The performance of the system decreased gradually with each development data reduction. When only 1% of data was available for development, the average AUC was 81.9% (81.0%-82.8%) in AREDS and 74.0% (70.8%-77.0%) in RS1-6. This corresponded to an average relative decrease in performance of 12.7+-13.2% in AREDS and 12.6+-7.8% in RS1-6. + + Conlusions: The automated stratification system achieved overall high performance in the classification of different features independently of their hierarchical level. This shows the potential of DL systems to identify diverse phenotypes and to obtain an accurate automated stratification of CFI. The results showed that automated stratification was also robust to a dramatic reduction in the data available for development, maintaining the average AUC above 80%. This is a positive observation, considering that the amount of data available for DL development can be limited in some settings, and the gradings can be costly to obtain. Nevertheless, variability in performance across features could be observed, especially for those with very low prevalence, such as reticular pseudodrusen, where performance became more unstable when few data were available. The external validation showed these observations held when the automated stratification was applied in a different population study, with an expected (but not drastic) drop of performance due to differences between datasets and their grading protocols. In conclusion, our work supports that DL is a powerful tool for the filtering and stratification of ophthalmic images, and has the potential to reduce the workload of experts while supporting them in research and clinical settings.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, +} + +@conference{Gonz21b, + author = {Gonz\'{a}lez-Gonzalo, Cristina and Verbraak, F. and Schlingemann, R.O. and Klaver, Caroline C.W. and Lee, A.Y. and Tufail, A. and S\'{a}nchez, Clara I.}, + booktitle = {European Association for the Study of Diabetes Eye Complications Study Group}, + url = {https://journals.sagepub.com/doi/full/10.1177/11206721211047031}, + title = {Trustworthy AI: closing the gap between development and integration of AI in Ophthalmology}, + abstract = {Design: Descriptive study. + + Purpose: To identify the main aspects that currently complicate the integration of artificial intelligence (AI) in ophthalmic settings. + + Methods: Based on an extensive review of state-of-the-art literature of AI applied to Ophthalmology plus interviews with multidisciplinary, international experts, we identified the most relevant aspects to consider during AI design to generate trustworthy (i.e., transparent, robust, and sustainable) AI systems and, consequently, facilitate a subsequent successful integration in real-world ophthalmic settings. + + Results: Several essential aspects to consider were identified: + 1) The reliability of the human annotations that are used for establishing the reference standard an AI system learns from, or for setting robust observer studies that allow for fair human-AI performance comparison. + 2) The ability of an AI system to generalize across populations, ophthalmic settings, and data acquisition protocols in order to avoid the negative consequences of algorithmic bias and lack of domain adaptation. + 3)The integration of multimodal data for AI development to consider multiple contexts when available (phenotyping, genotyping, systemic variables, patient medical history...). + 4) The importance of providing interpretable AI-based predictions to open the "black box" and increase trust and clinical usability. + 5) A plan to monitor the impact of AI on the clinical workflow, i.e., the adaptation of healthcare providers and patients to the new technology, human-AI interaction, cost-benefit analyses... + 6) The necessity to update current regulations to accelerate and control AI integration and all related aspects, such as patient privacy, systems' updates, and liability. + + Conclusions: It is important that healthcare providers in Ophthalmology consider these aspects and their consequences when thinking of AI in practice. It is key that all involved stakeholders collaborate and interact from the beginning of the AI design process to ensure a good alignment with real-world clinical needs and settings. This way, it will be possible to generate trustworthy AI solutions and close the gap between development and deployment, so that the AI benefits currently shown on paper reach the final users.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, +} + +@article{Gonz21c, + author = {Gonz\'{a}lez-Gonzalo, Cristina and Thee, Eric F. and Klaver, Caroline C.W. and Lee, A.Y. and Schlingemann, R.O. and Tufail, A. and Verbraak, F. and S\'{a}nchez, Clara I.}, + title = {Trustworthy AI: Closing the gap between development and integration of AI systems in ophthalmic practice}, + journal = PRER, + doi = {10.1016/j.preteyeres.2021.101034}, + pmid = {34902546}, + year = {2021}, + abstract = {An increasing number of artificial intelligence (AI) systems are being proposed in ophthalmology, motivated by the variety and amount of clinical and imaging data, as well as their potential benefits at the different stages of patient care. Despite achieving close or even superior performance to that of experts, there is a critical gap between development and integration of AI systems in ophthalmic practice. This work focuses on the importance of trustworthy AI to close that gap. We identify the main aspects or challenges that need to be considered along the AI design pipeline so as to generate systems that meet the requirements to be deemed trustworthy, including those concerning accuracy, resiliency, reliability, safety, and accountability. We elaborate on mechanisms and considerations to address those aspects or challenges, and define the roles and responsibilities of the different stakeholders involved in AI for ophthalmic care, i.e., AI developers, reading centers, healthcare providers, healthcare institutions, ophthalmological societies and working groups or committees, patients, regulatory bodies, and payers. Generating trustworthy AI is not a responsibility of a sole stakeholder. There is an impending necessity for a collaborative approach where the different stakeholders are represented along the AI design pipeline, from the definition of the intended use to post-market surveillance after regulatory approval. This work contributes to establish such multi-stakeholder interaction and the main action points to be taken so that the potential benefits of AI reach real-world ophthalmic settings.}, + optnote = {DIAG, INPRESS}, + ss_id = {14ac8b3d719781be45a46e7d33fdee702eecce2d}, + all_ss_ids = {['14ac8b3d719781be45a46e7d33fdee702eecce2d']}, + gscites = {24}, +} + +@article{Goud20, + author = {Goudsmit, Jeroen and Teuwen, Jonas}, + title = {{Tussen data en theorie}}, + journal = {Tijdschrift voor Toezicht}, + year = {2020}, + volume = {11}, + number = {1}, + month = {4}, + pages = {48--53}, + doi = {10.5553/TvT/187987052020011001008}, + url = {https://www.bjutijdschriften.nl/doi/10.5553/TvT/187987052020011001008}, + file = {Goud20.pdf:pdf\\Goud20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {83307292dabff3b5669d5df253867f1d47f5363f}, + all_ss_ids = {['83307292dabff3b5669d5df253867f1d47f5363f']}, + gscites = {0}, +} + +@conference{Graa22, + title = {Segmentation of vertebrae and intervertebral discs in lumbar spine MR images with iterative instance segmentation}, + author = {van der Graaf, Jasper W and van Hooff, Miranda L and Buckens, Constantinus FM and Lessmann, Nikolas}, + booktitle = {Medical Imaging 2022: Image Processing}, + volume = {12032}, + pages = {909--913}, + year = {2022}, + abstract = {Segmentation of vertebrae and intervertebral discs (IVD) in MR images are important steps for automatic image analysis. This paper proposes an extension of an iterative vertebra segmentation method that relies on a 3D fully-convolutional neural network to segment the vertebrae one-by-one. We augment this approach with an additional segmentation step following each vertebra detection to also segment the IVD below each vertebra. To train and test the algorithm, we collected and annotated T2-weighted sagittal lumbar spine MR scans of 53 patients. The presented approach achieved a mean Dice score of 93 % +- 2 % for vertebra segmentation and 86 % +- 7 % for IVD segmentation. The method was able to cope with pathological abnormalities such as compression fractures, Schmorl's nodes and collapsed IVDs. In comparison, a similar network trained for IVD segmentation without knowledge of the adjacent vertebra segmentation result did not detect all IVDs (89 %) and also achieved a lower Dice score of 83 % +- 9 %. These results indicate that combining IVD segmentation with vertebra segmentation in lumbar spine MR images can help to improve the detection and segmentation performance compared with separately segmenting these structures.}, + doi = {10.1117/12.2611423}, + all_ss_ids = {a46c8e13b227cfc9d208915fdc79a6aff9fc58ea}, + gscites = {1}, +} + +@article{Graa23, + title = {MRI image features with an evident relation to low back pain: a narrative review}, + author = {van der Graaf, Jasper W and Kroeze, Robert Jan and Buckens, Constantinus FM and Lessmann, Nikolas and van Hooff, Miranda L}, + journal = {European Spine Journal}, + pages = {1--12}, + year = {2023}, + publisher = {Springer}, + abstract = {Purpose: Low back pain (LBP) is one of the most prevalent health condition worldwide and responsible for the most years lived with disability, yet the etiology is often unknown. Magnetic resonance imaging (MRI) is frequently used for treatment decision even though it is often inconclusive. There are many different image features that could relate to low back pain. Conversely, multiple etiologies do relate to spinal degeneration but do not actually cause the perceived pain. This narrative review provides an overview of all possible relevant features visible on MRI images and determines their relation to LBP. Methods: We conducted a separate literature search per image feature. All included studies were scored using the GRADE guidelines. Based on the reported results per feature an evidence agreement (EA) score was provided, enabling us to compare the collected evidence of separate image features. The various relations between MRI features and their associated pain mechanisms were evaluated to provide a list of features that are related to LBP. Results: All searches combined generated a total of 4472 hits of which 31 articles were included. Features were divided into five different categories:'discogenic', 'neuropathic','osseous', 'facetogenic', and'paraspinal', and discussed separately. Conclusion: Our research suggests that type I Modic changes, disc degeneration, endplate defects, disc herniation, spinal canal stenosis, nerve compression, and muscle fat infiltration have the highest probability to be related to LBP. These can be used to improve clinical decision-making for patients with LBP based on MRI.}, + doi = {10.1007/s00586-023-07602-x}, +} + +@article{Graa23a, + author = {van der Graaf, Jasper W. and van Hooff, Miranda L. and Buckens, Constantinus F. M. and Rutten, Matthieu and van Susante, Job L. C. and Kroeze, Robert Jan and de Kleuver, Marinus and van Ginneken, Bram and Lessmann, Nikolas}, + title = {Lumbar spine segmentation in MR images: a dataset and a public benchmark}, + journal = {arXiv:2306.12217}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {aafec95246c8d87c5d49c368220e29c8fed9775a}, + all_ss_ids = {['aafec95246c8d87c5d49c368220e29c8fed9775a']}, + gscites = {0}, +} + +@article{Grae93, + author = {Graeter, T. and Schaefer, C. and Prokop, M. and Laas, J.}, + title = {Three-dimensional vascular imaging--an additional diagnostic tool}, + journal = THOCVS, + year = {1993}, + volume = {41}, + pages = {183--185}, + doi = {10.1055/s-2007-1013850}, + abstract = {Angiography is still the standard for imaging of vascular structures. However, since the number of projections is limited, complex pathoanatomy may not be sufficiently visible. This study presents two patients in whom 3D reconstruction from Spiral CT data revealed combined vascular lesions undisclosed by angiography. In one case the combination of coarctation and chronic dissection type B, in another the combination of two aneurysms of the celiac trunk in series was disclosed. We conclude that 3D reconstruction can be a valuable asset in the diagnosis of complex vascular pathoanatomy.}, + optnote = {DIAG}, + number = {3}, + pmid = {8367872}, + month = {6}, + gsid = {14442378706909680802}, + gscites = {22}, +} + +@article{Gras07, + author = {A. Graser and F. T. Kolligs and T. Mang and C. Schaefer and S. Geisb\"usch and M. F. Reiser and C. R. Becker}, + title = {Computer-aided detection in {CT} colonography: initial clinical experience using a prototype system}, + journal = ER, + year = {2007}, + volume = {17}, + pages = {2608--2615}, + doi = {10.1007/s00330-007-0579-0}, + abstract = {Computer-aided detection (CAD) algorithms help to detect colonic polyps at CT colonography (CTC). The purpose of this study was to evaluate the accuracy of CAD versus an expert reader in CTC. One hundred forty individuals (67 men, 73 women; mean age, 59 years) underwent screening 64-MDCT colonography after full cathartic bowel cleansing without fecal tagging. One expert reader interpreted supine and prone scans using a 3D workstation with integrated CAD used as "second reader." The system's sensitivity for the detection of polyps, the number of false-positive findings, and its running time were evaluated. Polyps were classified as small (< or =5 mm), medium (6-9 mm), and large (> or =10 mm). A total of 118 polyps (small, 85; medium, 19; large, 14) were found in 56 patients. CAD detected 72 polyps (61\%) with an average of 2.2 false-positives. Sensitivity was 51\% (43/85) for small, 90\% (17/19) for medium, and 86\% (12/14) for large polyps. For all polyps, per-patient sensitivity was 89\% (50/56) for the radiologist and 73\% (41/56) for CAD. For large and medium polyps, per-patient sensitivity was 100\% for the radiologist, and 96\% for CAD. In conclusion, CAD shows high sensitivity in the detection of clinically significant polyps with acceptable false-positive rates.}, + file = {Gras07.pdf:pdf/Gras07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + pmid = {17429646}, + month = {2}, +} + +@conference{Grau22a, + author = {M. J. J. De Grauw and B. Van Ginneken}, + title = {Semi-supervised 3D universal lesion segmentation in CT thorax-abdomen scans}, + booktitle = ECR, + year = {2022}, + abstract = {PURPOSE: Extracting volumetric information using automated lesion segmentation could allow for more accurate quantification of disease response in heterogeneous lesions. Using a single model for Universal Lesion Segmentation has the potential for faster inference times compared to multi-model approaches and allows for internal representation of lesion type features. METHODS: We compiled eight public datasets with segmentation masks for various lesion types in CT thorax-abdomen scans. Scans were resampled to 1mm isotropic voxel spacing and regions of interest were cropped centered on each lesion. A nnUnet was trained with 3213 lesions from 1481 studies and used to predict 3D segmentation masks for the circa 32,000 partially annotated lesions from the DeepLesion dataset. Masks were further refined by applying the GrabCut algorithm in three orthogonal directions based on the provided long and short-axis diameter measurements. We fine-tuned the nnUnet using the resulting masks and evaluated on a test set with full annotations. We experimented with epoch numbers and learning rate decay. All models were trained using 5-fold cross validation. RESULTS: Fine-tuning the model using the DeepLesion masks improved segmentation performance from 0.71 to 0.73 Dice compared to the baseline nnUnet. Segmentation performance ranged from 0.53, 0.61, 0.66, 0.77, 0.79 to 0.9 Dice for colon, pancreas, lymph node, lung, liver and kidney lesions. CONCLUSIONS: 3D universal lesion segmentation using large, aggregated datasets shows promise as an alternative to lesion specific models. By incorporating partially annotated data in a semi-supervised manner we can further increase data volume and model performance with minimal annotation effort. LIMITATIONS: This study used a small number of scans in the test set and did not evaluate model performance on out-of-distribution lesion types. FUNDING: This research was supported by the Eurostars PIANO project E113829.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Grau22b, + author = {M. J. J. De Grauw and B. Van Ginneken and B. Geisler and E. J. Smit and M. De Rooij and S. Schalekamp and M. Prokop}, + title = {Deep learning universal lesion segmentation for automated RECIST measurements on CT: comparison to manual assessment by radiologists}, + booktitle = ECR, + year = {2022}, + abstract = {PURPOSE: Automating aspects of RECIST evaluation can save time and potentially reduce inter-observer variability. We trained a 3D Universal Lesion Segmentation model (ULS) to estimate long and short axis diameters in CT exams based on a single click inside the lesion. METHODS: We used the nnUnet framework to train the ULS using 3213 lesions from 1481 studies collected from eight public challenge datasets. We fine-tuned the model using masks predicted for lesions from a subset of the public DeepLesion dataset. A reader study was conducted with 128 separate DeepLesion scans. Four radiologists manually measured long- and short-axis of lesions on axial CT slices and assessed whether a lesion was eligible as target lesion. RESULTS: For 85 out of 128 scans, all readers agreed that it contained a valid RECIST target lesion. For those lesions, the relative difference between the DeepLesion measurements and the radiologists was -4.2% +- 14.2 and -0.3% +- 13.2, for the long and short axis respectively. For ULS these measures were 6% +- 17 and -5.8% +- 18.9. The mean absolute differences were 2.5 +- 3 mm and 1.9 +- 2 mm for radiologists. For ULS these measures were 4.1 +- 5.8 mm and 2.8 +- 2.7mm. For 78.8% of lesions the absolute difference between DeepLesion and ULS measurements fell within a standard deviation of the inter-radiologist variability. CONCLUSIONS: Single-click measurement using ULS shows promise to simplify and speed-up RECIST evaluation in circa 80% oncological CT exams. LIMITATIONS: This study used a small number of lesions in the test set, and readers measured long and short axis in all lesions, which is not required by RECIST. FUNDING: This research was supported by the Eurostars PIANO project E113829.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Gray17, + author = {Gray, Ewan and Donten, Anna and Karssemeijer, Nico and van Gils, Carla and Evans, D. Gareth and Astley, Sue and Payne, Katherine}, + title = {Evaluation of a Stratified National Breast Screening Program in the United Kingdom: An Early Model-Based Cost-Effectiveness Analysis}, + doi = {10.1016/j.jval.2017.04.012}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.jval.2017.04.012}, + file = {Gray17.pdf:pdf\Gray17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Value in Health}, + citation-count = {44}, + automatic = {yes}, + pages = {1100-1109}, + volume = {20}, +} + +@article{Gree16, + author = {H. Greenspan and R. M. Summers and B. van Ginneken}, + title = {Deep Learning in Medical Imaging: Overview and Future Promise of an Exciting New Technique}, + journal = TMI, + year = {2016}, + volume = {35}, + number = {5}, + pages = {1153--1159}, + doi = {10.1109/TMI.2016.2553401}, + file = {Gree16.pdf:pdf\\Gree16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/172640}, +} + +@conference{Grin12, + author = {M. J. J. P. van Grinsven and J. P. H. van de Ven and Y. T. E. Lechanteur and B. van Ginneken and C. B. Hoyng and T. Theelen and C. I. S\'{a}nchez}, + title = {Automatic Drusen Detection and Quantification for Diagnosis of Age-Related Macular Degeneration}, + booktitle = ARVO, + year = {2012}, + abstract = {Purpose: To develop a new algorithm for a reliable fully-automatic method for the detection and quantification of drusen in color fundus images. Methods: Sixty color fundus images of 15 patients with age-related macular degeneration (AMD) and of 15 control subjects, centered on the macula with a field of view of 60?, were used. Images were acquired with a flood illuminated 3CCD fundus camera (TRX 50DX, Topcon Medical Systems). Two trained graders annotated all visible drusen in the 60 images. One was used as reference standard and the other as second observer. The proposed method uses a two-step classification. In a first step, candidate drusen objects were extracted using a k-nearest neighbor (kNN) classifier and Gaussian filter outputs. In a second step, these were classified as being true drusen or not by a support vector machine (SVM) classifier, using features based on shape, context, intensity and color. Results: The proposed algorithm was evaluated using a patient-based 3-fold cross-validation scheme. The figure of Merit of the FROC was 0.4016 using JAFROC analysis. The second observer has a sensitivity of 0.82 at a false positive rate of 30.12 per image. At the same false positive rate, the system obtained a sensitivity of 0.79 which was comparable to the second observer. Conclusions: A method based on a SVM classifier was presented for drusen detection and quantification on color fundus images. The method is able to detect drusen automatically in the central and peripheral zone. By not only detecting, but also quantifying the drusen, this method opens the way for automatic diagnosis and classification of AMD.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Grin12a, + author = {M. J. J. P. van Grinsven and B. van Ginneken and C. I. S\'{a}nchez}, + title = {Web-based workstation for the analysis of color fundus images}, + booktitle = {ISBI Medical Image Analysis Workshop}, + year = {2012}, + abstract = {We have developed a comprehensive workstation with a user friendly interface for the analysis of color fundus images. This workstation gathers the output of several computer-aided diagnosis modules for fundus image processing. These modules include algorithms to automatically segment vessels1, to detect anatomical structures like the optic disk and fovea location2, and to automatically detect different kind of lesions3. The workstation allows the quantitative analysis of retinal lesions and structures which can help in the diagnosis and quantification of major eye diseases including Diabetic Retinopathy (DR), Age-related Macular Degeneration (AMD) and Glaucoma. In addition to overlays highlighting individual lesion, quantitative measurements are collected in structured reports similar to the ones used in clinical practice. In the future, the workstation will be integrated into a publicly available web-based system. This way, research groups will be able to test the system on their own data sets in order to allow a comparison with their own developed software. This approach provides a new methodology for evaluating and comparing computer-aided diagnosis systems without the need to make code or executables publicly available and without being bound by the use of public data sets or by the performance measurements currently proposed in challenges.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Grin13, + author = {van Grinsven, Mark J J P. and Lechanteur, Yara T E. and van de Ven, Johannes P H. and van Ginneken, Bram and Hoyng, Carel B. and Theelen, Thomas and S\'{a}nchez, Clara I.}, + title = {Automatic Drusen Quantification and Risk Assessment of Age-related Macular Degeneration on Color Fundus Images}, + journal = IOVS, + year = {2013}, + volume = {54}, + number = {4}, + pages = {3019-3027}, + doi = {10.1167/iovs.12-11449}, + abstract = {PURPOSE: To evaluate a machine learning algorithm that allows for computer aided diagnosis (CAD) of non-advanced age-related macular degeneration (AMD) by providing an accurate detection and quantification of drusen location, area and size. METHODS: Color fundus photographs of 407 eyes without AMD or with early to moderate AMD were randomly selected from a large European multicenter database. A machine learning system was developed to automatically detect and quantify drusen on each image. Based on detected drusen, the CAD software provided a risk assessment to develop advanced AMD. Evaluation of the CAD system was performed using annotations made by two blinded human graders. RESULTS: Free-response Receiver Operating Characteristics (FROC) analysis showed that the proposed system approaches the performance of human observers in detecting drusen. The estimated drusen area showed excellent agreement with both observers, with mean intra-class correlation coefficients (ICC) larger than 0.85. Maximum druse diameter agreement was lower with a maximum ICC of 0.69 but comparable to the interobserver agreement (ICC=0.79). For automatic AMD risk assessment, the system achieved areas under the Receiver Operating Characteristic (ROC) curve of 0.948 and 0.954, reaching similar performance as human observers. CONCLUSIONS: A machine learning system, capable of separating high risk from low risk patients with non-advanced AMD by providing accurate detection and quantification of drusen, was developed. The proposed method allows for quick and reliable diagnosis of AMD, opening the way for large dataset analysis within population studies and genotype-phenotype correlation analysis.}, + file = {Grin13.pdf:pdf\\Grin13.pdf:PDF}, + optnote = {DIAG}, + pmid = {23572106}, + month = {4}, + gsid = {4549341628942225242}, + gscites = {40}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/118034}, + ss_id = {b47eb89360053492a2c57e7474301f12260ac1ea}, + all_ss_ids = {['b47eb89360053492a2c57e7474301f12260ac1ea']}, +} + +@inproceedings{Grin13a, + author = {M. J. J. P. van Grinsven and Y. T. E. Lechanteur and J. P. H. van de Ven and B. van Ginneken and T. Theelen and C. I. S\'{a}nchez}, + title = {Automatic Age-related macular degeneration detection and staging}, + booktitle = MI, + year = {2013}, + volume = {8670}, + series = SPIE, + pages = {86700M}, + doi = {10.1117/12.2007563}, + abstract = {Age-related macular degeneration ({AMD}) is a degenerative disorder of the central parts of the retina, which mainly affects older people and leads to permanent loss of vision in advanced stages of the disease. {AMD} grading of non-advanced {AMD} patients allows risk assessment for the development of advanced {AMD} and enables timely treatment of patients, to prevent vision loss. {AMD} grading is currently performed manually on color fundus images, which is time consuming and expensive. In this paper, we propose a supervised classification method to distinguish high risk {AMD} patients from low risk {AMD} patients and provide an exact {AMD} stage determination. The method is based on the analysis of the number and size of drusen on color fundus images, as drusen are the early characteristics of {AMD}. An automatic drusen detection algorithm is used to detect all drusen. A weighted histogram of the detected drusen is constructed to summarize the drusen extension and size and fed into a random forest classifier in order to separate low risk from high risk patients and to allow exact {AMD} stage determination. Experiments show the proposed method achieved similar performance as human observers in distinguishing low risk from high risk {AMD} patients, obtaining areas under the {R}eceiver {O}perating {C}haracteristic curve of 0.929 and 0.934. A weighted kappa agreement of 0.641 and 0.622 versus two observers were obtained for {AMD} stage evaluation. Our method allows for quick and reliable {AMD} staging at low costs.}, + file = {Grin13a.pdf:pdf\\Grin13a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {17026275504420456787}, + gscites = {5}, + ss_id = {d80fa93bf60f2f3635eec2903b09e9848ecacf03}, + all_ss_ids = {['d80fa93bf60f2f3635eec2903b09e9848ecacf03']}, +} + +@inproceedings{Grin13b, + author = {M. J. J. P. van Grinsven and A. Chakravarty and J. Sivaswamy and T. Theelen and B. van Ginneken and C. I. S\'{a}nchez}, + title = {A bag of words approach for discriminating between retinal images containing exudates or drusen}, + booktitle = ISBI, + year = {2013}, + pages = {1444-1447}, + doi = {10.1109/ISBI.2013.6556806}, + abstract = {Population screening for sight threatening diseases based on fundus imaging is in place or being considered worldwide. Most existing programs are focussed on a specific disease and are based on manual reading of images, though automated image analysis based solutions are being developed. Exudates and drusen are bright lesions which indicate very different diseases, but can appear to be similar. Discriminating between them is of interest to increase screening performance. In this paper, we present a Bag of Words approach which can be used to design a system that can play the dual role of content based retrieval (of images with exudates or drusen) system and a decision support system to address the problem of bright lesion discrimination. The approach consists of a novel partitioning of an image into patches from which colour, texture, edge and granulometry based features are extracted to build a dictionary. A bag of words approach is then employed to help retrieve images matching a query image as well as derive a decision on the type of bright lesion in the given (query) image. This approach has been implemented and tested on a combination of public and local dataset of 415 images. The area under the curve for image classification is 0.90 and retrieved precision is 0.76.}, + file = {Grin13b.pdf:pdf\\Grin13b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + gsid = {17742882494926958127}, + gscites = {23}, + ss_id = {8be6e1c3b1224ea96f6ddbfde8153e32ab9a1f75}, + all_ss_ids = {['8be6e1c3b1224ea96f6ddbfde8153e32ab9a1f75']}, +} + +@conference{Grin14, + author = {M. J. J. P. van Grinsven and G. H. S. Buitendijk and C. Brussee and B. van Ginneken and T. Theelen and C. C. W. Klaver and C. I. S\'{a}nchez}, + title = {Automatic detection of reticular drusen using multimodal retinal image analysis}, + booktitle = ARVO, + year = {2014}, + abstract = {Purpose: Reticular drusen ({RD}) have been shown to be associated with a high risk of progression to neovascular age-related macular degeneration. {RD} identification is challenging due to their subtle characteristics on fundus images, especially using a single imaging method. We report a machine learning system to automatically identify {RD} using multiple retinal imaging modalities. Methods: Color fundus photographs ({CFP}), fundus autofluorescent images ({FAF}) and near-infrared reflectance images ({NIR}) of 175 eyes of 158 patients with presence of either reticular drusen, soft distinct/indistinct drusen, or no signs of drusen were selected from the {R}otterdam {S}tudy, a population-based cohort. A machine learning system was developed to automatically identify eyes with presence of reticular drusen. First, semi-automatic multimodal affine image registration was performed. After this, features based on {G}aussian moments were calculated on the red, green and blue color channels of the {CFP} as well as on the {FAF} and {NIR} images and combined using a random forest classifier to make a classification. Evaluation was performed by comparing the system output with annotations made by an experienced human grader. Results: The human grader identified 44 eyes with reticular drusen, 78 eyes with soft distinct/indistinct drusen and 53 eyes without drusen in the dataset. The system was evaluated using {R}eceiver {O}perating {C}haracteristics ({ROC}) curve analysis in a leave-one-out cross-validation scheme. The proposed system was able to identify images with {RD} with an area under the {ROC} curve ({AUC}) of 0.849 and highest accuracy of 0.857. An {AUC} of 0.887 was obtained for the differentiation between images with {RD} and images without drusen; whereas an {AUC} of 0.834 was achieved if the task was to distinguish images with {RD} from images with soft distinct/indistinct drusen. Conclusions: A machine learning system, using information of different retinal imaging modalities, was developed for the identification of patients with reticular drusen. The proposed system achieved good performance and allows for a fast and accurate reticular drusen detection using several imaging modalities in an automated way.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Grin15, + author = {Mark J. J. P. van Grinsven and Gabri\"elle H. S. Buitendijk and Corina Brussee and Bram van Ginneken and Carel B. Hoyng and Thomas Theelen and Caroline C. W. Klaver and Clara I. S\'{a}nchez}, + title = {Automatic identification of reticular pseudodrusen using multimodal retinal image analysis}, + journal = IOVS, + year = {2015}, + volume = {56}, + number = {1}, + pages = {633-639}, + doi = {10.1167/iovs.14-15019}, + abstract = {To examine human performance and agreement on reticular pseudodrusen (RPD) detection and quantification by using single- and multimodality grading protocols and to describe and evaluate a machine learning system for the automatic detection and quantification of reticular pseudodrusen by using single- and multimodality information.Color fundus, fundus autofluoresence, and near-infrared images of 278 eyes from 230 patients with or without presence of RPD were used in this study. All eyes were scored for presence of RPD during single- and multimodality setups by two experienced observers and a developed machine learning system. Furthermore, automatic quantification of RPD area was performed by the proposed system and compared with human delineations.Observers obtained a higher performance and better interobserver agreement for RPD detection with multimodality grading, achieving areas under the receiver operating characteristic (ROC) curve of 0.940 and 0.958, and a AZAo agreement of 0.911. The proposed automatic system achieved an area under the ROC of 0.941 with a multimodality setup. Automatic RPD quantification resulted in an intraclass correlation (ICC) value of 0.704, which was comparable with ICC values obtained between single-modality manual delineations.Observer performance and agreement for RPD identification improved significantly by using a multimodality grading approach. The developed automatic system showed similar performance as observers, and automatic RPD area quantification was in concordance with manual delineations. The proposed automatic system allows for a fast and accurate identification and quantification of RPD, opening the way for efficient quantitative imaging biomarkers in large data set analysis.}, + file = {Grin15.pdf:pdf\\Grin15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {25574052}, + month = {1}, + gsid = {2566342930661349456}, + gscites = {38}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/154610}, + ss_id = {2da30c95504bfe2cb1e3c1058531db3226ac7780}, + all_ss_ids = {['2da30c95504bfe2cb1e3c1058531db3226ac7780']}, +} + +@conference{Grin15a, + author = {Mark J. J. P. van Grinsven and Freerk G. Venhuizen and Bram van Ginneken and Carel B. Hoyng and Thomas Theelen and Clara I. S\'{a}nchez}, + title = {Automatic detection of eye diseases using automated color fundus image analysis}, + booktitle = ARVO, + year = {2015}, + abstract = {Purpose: {D}iabetic {R}etinopathy ({DRP}) and {A}ge-related {M}acular {D}egeneration are the most common visual threatening eye diseases in industrialized countries. Detection of these diseases at an early stage can help to identify patients that would benefit from treatment to slow or prevent their progression into more severe stages with visual loss. We report an automatic software solution for the detection of early stages of {DRP} and {AMD} using color fundus ({CF}) images. Methods: {CF} images from several public datasets ({D}iaret{B}0, {D}iaret{B}1, {S}tare, {M}essidor, {DR}1/{DR}2) and two private data sets were pooled together and used in this study. Only macular centered images with sufficient quality for manual assessment were included. Images in advanced stages of AMD or DRP were excluded. The remaining study set consisted of 2128 CF images. All images were labeled by an expert into one of three classes: early stage DRP, early stage AMD or control. Two existing machine learning systems, one for early stage AMD detection and one for early stage DRP detection, were combined to detect the early stages of DRP and AMD simultaneously. Final classification features of both systems were concatenated and a random forest classifier was trained to make a classification between the target cases (early stage {DRP} or {AMD}) and the control cases. Evaluation was performed by comparing the output of the system with the human expert's labels. Results: The human expert labeled 596 and 196 cases as early stage {DRP} or {AMD}, respectively. The remaining 1336 {CF} images were labeled as controls. The automatic system was evaluated using {R}eceiver {O}perating {C}haracteristics ({ROC}) curve analysis in a 10-fold cross-validation scheme. The combined system for detection of the early stages of {DRP} and {AMD} was able to separate the target cases from the control cases with an area under the {ROC} curve ({AUC}) of 0.948. The system achieved a sensitivity of 0.880 and a specificity of 0.886. Conclusions: A machine learning system was developed for the identification of early stages of {AMD} and {DRP}. The proposed system achieved good performance and allows for a fast and accurate identification of patients that may benefit from treatment, opening the way to a cost-effective mass screening procedure of patients at risk of {AMD} and {DRP}.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Grin16, + author = {Mark J. J. P. van Grinsven and Freerk G. Venhuizen and Bram van Ginneken and Carel B. Hoyng and Thomas Theelen and Clara I. S\'{a}nchez}, + title = {Automatic detection of hemorrhages on color fundus images using deep learning}, + booktitle = ARVO, + year = {2016}, + abstract = {Purpose: The presence of hemorrhages is one of the common signs of diabetic retinopathy ({DRP}), a vision threatening retinal disease affecting patient with diabetes. Automatic detection of hemorrhages is important to facilitate the timely detection of patients that would benefit from treatment to slow down or prevent disease progression to vision-threatening stages of {DRP}. We report an automatic system based on deep learning to automatically detect hemorrhages on color fundus images. Methods: Data was drawn from two public datasets ({K}aggle and {M}essidor) by selecting images with sufficient quality for analysis, including a total of 4624 and 1102 images, respectively. The {K}aggle set was split into a development (4048) and an evaluation set (576). The {M}essidor set was solely used as external evaluation set. A reference observer indicated presence of hemorrhages for all images and also marked their locations in the development set. Both evaluation sets were also scored for the presence of hemorrhages by two independent human experts. An automatic system based on deep learning, employing a convolutional neural network ({CNN}), was developed and used to identify images with hemorrhages. The {CNN} consisted of a layered architecture of 5 convolutional layers, a fully connected layer, and a final classification layer. The {CNN} used 41x41 pixel sized color patches as input in the first layer. In each convolutional layer, inputs were convolved with a set of small sized filters and the response maps were used as input in the next layer. In the last layer, an image score indicating the likelihood for the presence of hemorrhages was generated. Evaluation was performed by comparing system results and human expert annotations with the reference. Results: The reference observer marked 99 and 289 images as containing hemorrhages; and 477 and 813 as controls in the two test sets. The automatic system achieved areas ({A}z) under the receiver operating characteristics curve of 0.957 and 0.968 with sensitivity/specificity pairs of 0.889/0.912 and 0.931/0.888, whereas the human experts achieved sensitivity/specificity of 0.919/0.979 and 0.899/0.977 in the {K}aggle test set; and 0.976/0.894 and 0.958/0.872 in the {M}essidor test set. Conclusions: An automatic system was developed for the detection of hemorrhages on color fundus images. The system approaches human expert performance and allows for a fast and reliable identification of patients with moderate to severe {DRP} in a screening setup.}, + optnote = {DIAG, RADIOLOGY}, + gsid = {6287248310931733301}, + gscites = {6}, + all_ss_ids = {e4db301a185bccd105017fb66e3f9e2adf876495}, +} + +@article{Grin16a, + author = {van Grinsven, Mark J J P. and Theelen, Thomas and Witkamp, Leonard and van der Heijden, Job and van de Ven, Johannes P H. and Hoyng, Carel B. and van Ginneken, Bram and S\'{a}nchez, Clara I.}, + title = {Automatic differentiation of color fundus images containing drusen or exudates using a contextual spatial pyramid approach}, + journal = BOE, + year = {2016}, + volume = {7}, + number = {3}, + pages = {709-725}, + doi = {10.1364/BOE.7.000709}, + abstract = {We developed an automatic system to identify and differentiate color fundus images containing no lesions, drusen or exudates. Drusen and exudates are lesions with a bright appearance, associated with age-related macular degeneration and diabetic retinopathy, respectively. The system consists of three lesion detectors operating at pixel-level, combining their outputs using spatial pooling and classification with a random forest classifier. System performance was compared with ratings of two independent human observers using human-expert annotations as reference. Kappa agreements of 0.89, 0.97 and 0.92 and accuracies of 0.93, 0.98 and 0.95 were obtained for the system and observers, respectively.}, + file = {Grin16a.pdf:pdf\\Grin16a.pdf:PDF}, + optnote = {DIAG}, + pmid = {27231583}, + month = {2}, + gsid = {12016324152117679054}, + gscites = {10}, + ss_id = {2ebb428ce82dc699122e1a98d518e48d0c068ed0}, + all_ss_ids = {['2ebb428ce82dc699122e1a98d518e48d0c068ed0']}, +} + +@article{Grin16b, + author = {M. J. J. P. van Grinsven and B. van Ginneken and C. B. Hoyng and T. Theelen and C. I. S\'{a}nchez.}, + title = {Fast Convolutional Neural Network Training Using Selective Data Sampling: Application to Hemorrhage Detection in Color Fundus Images}, + journal = TMI, + year = {2016}, + volume = {35}, + number = {5}, + pages = {1273-1284}, + doi = {10.1109/TMI.2016.2526689}, + abstract = {Convolutional neural networks ({CNNs}) are deep learning network architectures that have pushed forward the state-of-the-art in a range of computer vision applications and are increasingly popular in medical image analysis. However, training of CNNs is time-consuming and challenging. In medical image analysis tasks, the majority of training examples are easy to classify and therefore contribute little to the CNN learning process. In this paper, we propose a method to improve and speed-up the CNN training for medical image analysis tasks by dynamically selecting misclassified negative samples during training. Training samples are heuristically sampled based on classification by the current status of the CNN. Weights are assigned to the training samples and informative samples are more likely to be included in the next CNN training iteration. We evaluated and compared our proposed method by training a CNN with (SeS) and without (NSeS) the selective sampling method. We focus on the detection of hemorrhages in color fundus images. A decreased training time from 170 epochs to 60 epochs with an increased performance AC/a,!aEURoe on par with two human experts AC/a,!aEURoe was achieved with areas under the receiver operating characteristics curve of 0.894 and 0.972 on two data sets. The SeS CNN statistically outperformed the NSeS CNN on an independent test set.}, + file = {Grin16b.pdf:pdf\\Grin16b.pdf:PDF}, + optnote = {DIAG}, + pmid = {26886969}, + month = {5}, + gsid = {9170568847453666888}, + gscites = {369}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/167797}, + ss_id = {226df2c0315fcd6ac45413e70d3a0bac3e1b1072}, + all_ss_ids = {['226df2c0315fcd6ac45413e70d3a0bac3e1b1072']}, +} + +@phdthesis{Grin16c, + author = {van Grinsven, M.}, + title = {Automated analysis of retinal images for detection of age-related macular degeneration and diabetic retinopathy}, + year = {2016}, + url = {http://hdl.handle.net/2066/159478}, + abstract = {In this thesis, an effort is made to pursue the VISION 2020 goals of eliminating avoidable blindness and visual impairment worldwide. It describes and validates new automatic methods to detect Diabetic Retinopathy (DR) and Age-related Macular Degeneration (AMD), two of the most common retinal diseases worldwide. Achieving automatic detection of these diseases will facilitate and accelerate implementation of screening programs for retinal diseases worldwide. It is estimated that 80% of blindness is preventable if timely awareness of presence of these diseases is achieved.}, + copromotor = {C.I. S\'{a}nchez-Guti\'{e}rrez and T. Theelen}, + file = {Grin16c.pdf:pdf/Grin16c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken and C. Hoyng}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@phdthesis{Grob19, + author = {Dagmar Grob}, + title = {Functional CT Imaging of the Lung: Substraction CT as a novel technique}, + url = {https://repository.ubn.ru.nl/handle/2066/212659}, + abstract = {Two important functions of the lungs that are needed for the exchange of oxygen and carbon dioxide are ventilation and perfusion. One or both of these functionalities could be hampered as a result of cardiopulmonary disease. Pulmonary embolism is one of those diseases where perfusion in the lung is reduced, because a blood clot is lodged in a pulmonary artery, disturbing blood flow. In the Netherlands, pulmonary embolism occurs in approximately 10,000 to 12,500 patients per year, and resulting in significant morbidityand mortality.}, + copromotor = {M. Brink, I. Sechopoulos}, + file = {:pdf/Grob19.pdf:PDF;:png/publications/Grob19 - Functional CT Imaging of the Lung_ Substraction CT As a Novel Technique.png:PNG image}, + optnote = {AXTI, DIAG, RADIOLOGY}, + promotor = {M. Prokop}, + school = {Radboud University, Nijmegen, The Netherlands}, + year = {2019}, + journal = {PhD thesis}, +} + +@article{Grob19a, + author = {Grob, Dagmar and Smit, Ewoud and Prince, Jip and Kist, Jakob and St\"{o}ger, Lauran and Geurts, Bram and Snoeren, Miranda M. and van Dijk, Rogier and Oostveen, Luuk J. and Prokop, Mathias and Schaefer-Prokop, Cornelia M. and Sechopoulos, Ioannis and Brink, Monique}, + title = {Iodine Maps from Subtraction CT or Dual-Energy CT to Detect Pulmonary Emboli with CT Angiography: A Multiple-Observer Study}, + doi = {10.1148/radiol.2019182666}, + year = {2019}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.2019182666}, + file = {Grob19a.pdf:pdf\Grob19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + citation-count = {30}, + automatic = {yes}, + pages = {197-205}, + volume = {292}, +} + +@conference{Grob19b, + author = {Grob, Dagmar and Oostveen, Luuk J. and Jacobs, Colin and Prokop, Mathias and Schaefer-Prokop, Cornelia and Sechopoulos, Ioannis and Brink, Monique}, + title = {Intra-patient comparison of pulmonary nodule enhancement in subtraction CT and dual-energy CT}, + booktitle = ESTI, + year = {2019}, + abstract = {PURPOSE/OBJECTIVES: Subtraction CT (SCT) is a new software-based imaging modality that has the same diagnostic performance as dual-energy CT (DECT) in pulmonary embolism (PE) detection, but its performance for other applications is not investigated yet. In this study we aimed to determine the capability of SCT in depicting benign and malignant pulmonary nodule enhancement by comparing it to that of DECT. METHODS AND MATERIALS: We identified 29 patients with solid pulmonary nodules (>= 4 mm) in a cohort of 295 consecutive patients recruited for a prospective study on PE. Patients were worked up or followed for at least 20 months to obtain a standard of reference. In total, 96 nodules were investigated (72 malignant, 24 benign). Patients in this study had received a pre-contrast CT at 100 kV prior to a dual-energy CT angiography (CTA) at 100 and 140 kV (Siemens Definition Flash, Germany), with 60 ml of iodinated contrast (300 mg I/ml) followed by 40 ml saline flush injected at 5 ml/s. Scan acquisition was 5 s after reaching a relative threshold enhancement (60 HU) in the pulmonary trunk. For SCT, iodine maps were obtained by subtracting the pre contrast from the contrast-enhanced scan after non-rigid registration (Canon Medical Systems, Japan). Nodules were annotated and volumetrically segmented on the CTA on a lung screening workstation (CIRRUS Lung Screening, The Netherlands) and mean enhancement calculated. Student's t-test was used for statistical testing. RESULTS: Mean enhancement of the nodules was significantly higher for SCT than for DECT (34.6+-12.7 vs 25.7+-20.8 HU; p<0.001). Mean enhancement in malignant and benign nodules was 35.5+-11.7 HU and 31.9+-15.4 HU on SCT (p=0.24), and 26.6+-20.7 HU and 22.8+-21.4 HU on DECT (p=0.44), respectively. Dose-length-product was slightly lower for SCT than DECT (156 and 163 mGy*cm; p<0.001). CONCLUSION: Subtraction CT shows higher nodule enhancement compared to dual-energy CT. As expected, due to the use of early acquisition, CTA for PE cannot be used to characterize incidental nodules as benign or malignant. However, previous studies have shown the feasibility of nodule characterization with DECT with late acquisition. Therefore, given that it results in better enhancement depiction, it is expected that, SCT with optimized timing could be a promising development for improved nodule characterization at a comparable radiation dose to DECT, without the need for dedicated additional hardware.}, + optnote = {AXTI, DIAG, RADIOLOGY}, +} + +@article{Grob20, + author = {Dagmar Grob and Luuk J. Oostveen and Colin Jacobs and Ernst Scholten and Mathias Prokop and Cornelia M. Schaefer-Prokop and Ioannis Sechopoulos and Monique Brink}, + title = {Pulmonary nodule enhancement in subtraction CT and dual-energy CT: A comparison study}, + journal = EJR, + volume = {134}, + pages = {109443}, + year = {2021}, + doi = {10.1016/j.ejrad.2020.109443}, + pmid = {33310553}, + abstract = {Objective: To compare nodule enhancement on subtraction CT iodine maps to that on dual-energy CT iodine maps using CT datasets acquired simultaneously. Methods: A previously-acquired set of lung subtraction and dual-energy CT maps consisting of thirty patients with 95 solid pulmonary nodules (>=4 mm diameter, 72 malignant) was used. Nodules were annotated and segmented on CT angiography, and mean nodule enhancement in the iodine maps calculated. Three radiologists scored nodule visibility with both techniques on a 4-point scale. Results: Mean nodule enhancement was higher (p < 0.001) at subtraction CT (34.9 +- 12.9HU) than at dual-energy CT (25.4 +- 21.0HU). Nodule enhancement at subtraction CT was judged more often to be "highly visible" for each observers (p < 0.001) with an area under the curve of 0.81. Conclusions: Subtraction CT is able to depict iodine enhancement in pulmonary nodules better than dual-energy CT.}, + file = {Grob20.pdf:pdf\\Grob20.pdf:PDF}, + optnote = {AXTI, DIAG, RADIOLOGY}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/229492}, + ss_id = {1479138d85b4af9d10bd3c2301ea68d2406e8d85}, + all_ss_ids = {['1479138d85b4af9d10bd3c2301ea68d2406e8d85']}, + gscites = {2}, +} + +@conference{Grob20a, + author = {Grob, Dagmar and Schalekamp, Steven and Oostveen, Luuk J. and van der Woude, Willem Jan and Jacobs, Colin and Prokop, Mathias and Sechopoulos, Ioannis and Brink, Monique}, + title = {Pulmonary nodule growth: can follow-up be shortened with a high-end or an ultra-high-resolution CT scanner?}, + booktitle = ECR, + year = {2020}, + abstract = {PURPOSE: To determine the interscan variability of pulmonary nodule volume measurements in CT scans acquired with state-of-the-art wide-area and ultra-high-resolution CT systems. METHODS AND MATERIALS: In this prospective study, patients with at least two non-calcified solid pulmonary nodules suspicious for metastases on previous CT scans were imaged twice with either a high-end 320 detector CT (MDCT, Aquilion ONE Genesis, Canon, slice thickness 0.5 mm, 512x512 matrix) or an ultra-high-resolution CT (UHRCT, Precision, Canon, 0.25 mm, 1024x1024). In between scans, an off-and-on table strategy was used to simulate follow-up scans with no nodule growth. Semi-automated volumetric nodule segmentation and volume estimation (max. 4 per patient, effective diameter 4-15 mm) were performed on a lung screening workstation (Veolity). 95%-limits of agreement (LOA) and the time to estimate actual nodule growth rate at a nodule volume doubling time (VDT) of 400 days were calculated. RESULTS: 17 patients (60 nodules, average volume: 218 mm3) were imaged on the MDCT and 27 patients (90 nodules, 177 mm3) on the UHRCT at a similar dose (mean dose-length-product: 126.6 mGycm vs 127.2 mGycm, respectively (p=0.98)). The 95%-LOA was +-7.0% for the MDCT and +-5.9% for the UHRCT (p=0.07). Therefore, the minimum required interscan period to detect a VDT of 400 days is 33-39 days. CONCLUSION: Both scanners result in low interscan variability, especially compared to current clinical standards, which requires a volume change of 25% (the current 95%-LOA) as significant nodule growth. Therefore, the follow-up period to detect pulmonary nodule growth could be dramatically shortened from three to about one month, reducing patient anxiety and the potential for stage shift in lung nodule management. LIMITATIONS: Pulmonary metastases instead of incidental nodules were measured.}, + optnote = {AXTI, DIAG, RADIOLOGY}, +} + +@mastersthesis{Gube10, + author = {Gubern-M\'{e}rida, A.}, + title = {Multi-class probabilistic atlas-based segmentation method in breast {MRI}}, + year = {2010}, + file = {Gube10.pdf:pdf/Gube10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + school = {Universitat de Girona / Radboud University Nijmegen}, + journal = {Master thesis}, +} + +@inproceedings{Gube11, + author = {Gubern-M\'{e}rida, A. and Kallenberg, M.G.J. and Mart\'{i}, R. and Karssemeijer, N.}, + title = {Multi-class probabilistic atlas-based segmentation method in breast {MRI}}, + booktitle = {Pattern Recognition and Image Analysis: proceedings of 5th Iberian Conference}, + year = {2011}, + volume = {5}, + abstract = {Organ localization is an important topic in medical imaging in aid of cancer treatment and diagnosis. An example are the pharmacokinetic model calibration methods based on a reference tissue, where a pectoral muscle delineation in breast MRI is needed to detect malignancy signs. Atlas-based segmentation has been proven to be powerful in brain MRI. This is the first attempt to apply an atlas-based approach to segment breast in T1 weighted MR images. The atlas consists of 5 structures (fatty and dense tissues, heart, lungs and pectoral muscle). It has been used in a Bayesian segmentation framework to delineate the mentioned structures. Global and local registration have been compared, where global registration showed the best results in terms of accuracy and speed. Overall, a Dice Similarity Coefficient value of 0.8 has been obtained which shows the validity of our approach to Breast MRI segmentation.}, + file = {Gube11.pdf:pdf/Gube11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {6065729845957490171}, + gscites = {18}, + all_ss_ids = {['5d799e097d8f9b91a509bce2ebb98a04006c0c24']}, +} + +@inproceedings{Gube11a, + author = {Gubern-M\'{e}rida, A. and Kallenberg, M.G.J. and Mart\'{i}, R. and Karssemeijer, N.}, + title = {Fully automatic fibroglandular tissue segmentation in breast {MRI}: atlas-based approach}, + booktitle = {{MICCAI} {W}orkshop: {B}reast {I}mage {A}nalysis}, + year = {2011}, + abstract = {Breast density measurement in Breast Magnetic Resonance Imaging (MRI) is becoming important due to its relation with the risk factor for developing breast cancer. In this work we developed a fully automatic method for the segmentation of fibroglandular tissue. The method consist of a first step based on a Bayesian framework using atlas information for the separation of the pectoral and the breast, and a second refinement stage by LDA. The method was evaluated over 27 cases comparing the obtained segmentations to the manual ones. A Dice Similarity Coefficient (DSC) of 0.75 was obtained.}, + file = {Gube11a.pdf:pdf/Gube11a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {3060254826683519914}, + gscites = {17}, +} + +@inproceedings{Gube12, + author = {Gubern-M\'{e}rida, Albert and Kallenberg, Michiel and Mart\'{i}, Robert and Karssemeijer, Nico}, + title = {Segmentation of the pectoral muscle in breast {MRI} using atlas-based approaches}, + booktitle = MICCAI, + year = {2012}, + volume = {15}, + series = LNCS, + pages = {371--378}, + doi = {10.1007/978-3-642-33418-4_46}, + abstract = {Pectoral muscle segmentation is an important step in automatic breast image analysis methods and crucial for multi-modal image registration. In breast MRI, accurate delineation of the pectoral is important for volumetric breast density estimation and for pharmacokinetic analysis of dynamic contrast enhancement. In this paper we propose and study the performance of atlas-based segmentation methods evaluating two fully automatic breast MRI dedicated strategies on a set of 27 manually segmented MR volumes. One uses a probabilistic model and the other is a multi-atlas registration based approach. The multi-atlas approach performed slightly better, with an average Dice coefficient (DSC) of 0.74, while with the much faster probabilistic method a DSC of 0.72 was obtained.}, + file = {Gube12.pdf:pdf/Gube12.pdf:PDF}, + issue = {Pt 2}, + journal = MICCAI, + optnote = {DIAG, RADIOLOGY}, + number = {Pt 2}, + pmid = {23286070}, + gsid = {2427097091307085452}, + gscites = {45}, + ss_id = {2b40545a6af971b370932b854150e74c623f7357}, + all_ss_ids = {['2b40545a6af971b370932b854150e74c623f7357']}, +} + +@inproceedings{Gube13, + author = {A. Gubern-M\'{e}rida and L. Wang and M. Kallenberg and R. Mart\'{i} and H.K. Hahn and N. Karssemeijer}, + title = {Breast segmentation in {MRI}: quantitative evaluation of three methods}, + booktitle = MI, + year = {2013}, + series = SPIE, + pages = {86693G-86693G-7}, + doi = {10.1117/12.2006541}, + abstract = {A precise segmentation of breast tissue is often required for computer-aided diagnosis ({CAD}) of breast {MRI}. Only a few methods have been proposed to automatically segment breast in {MRI}. Authors reported satisfactory performance, but a fair comparison has not been done yet as all breast segmentation methods were evaluated on their own data sets with diferent manual annotations. Moreover, breast volume overlap measures, which were commonly used for evaluations, do not seem to be adequate to accurately quantify the segmentation qualities. Breast volume overlap measures are not sensitive to small errors, such as local misalignments, because breast appears to be much larger than other structures. In this work, two atlas-based approaches and a breast segmentation method based on {H}essian sheetness filter were exhaustively evaluated and benchmarked on a data set of 52 manually annotated breast {MR} images. Three quantitative measures including percentage of missed dense tissue, percentage of missed pectoral muscle and pectoral surface distance were defi ned to objectively reflect the practical use of breast segmentation in {CAD} methods. The evaluation measures provided important evidence to conclude that the three evaluated techniques performed accurate breast segmentations. More speci cally, the atlas-based methods appeared to be more precise, but required larger computation time than the sheetness-based breast segmentation approach.}, + file = {Gube13.pdf:pdf\\Gube13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {14620347730568680552}, + gscites = {14}, + ss_id = {024137b64819b9f18837d405e4ecff2572468c80}, + all_ss_ids = {['024137b64819b9f18837d405e4ecff2572468c80']}, +} + +@inproceedings{Gube13a, + author = {Gubern-M\'{e}rida, A. and Platel, B. and Mart\'{i}, R. and Karssemeijer, N.}, + title = {Automated localization of malignant lesions in breast {DCE}-{MRI}}, + booktitle = {{MICCAI} {W}orkshop: {B}reast {I}mage {A}nalysis}, + year = {2013}, + abstract = {Dynamic contrast-enhanced magnetic resonance imaging (DCEMRI) is increasingly used for breast cancer assessment. Compared to mammography DCE-MRI provides higher sensitivity, however the specificity of DCE-MRI is variable. Continued e orts are focused on identifying distinguishing characteristics of malignant and benign lesions. DCE-MRI data analysis is time consuming and presents high inter- and intra-observer variability. The aim of this work is to propose an automated breast lesion localization system for DCE-MRI. Such a system can be used to support radiologists during DCE-MRI analysis, to facilitate pre-calculation of very computationally demanding features and to form the basis of a standalone computer aided diagnosis application. The proposed method initially segments the breast and uses a gentle adaboost classifier and features extracted from the relative signal enhancement to detect malignant lesions. Evaluation was performed on a dataset of 212 DCE-MRI studies from 126 patients with no sign of breast cancer and 86 patients with biopsy-proven annotated malignant lesions. The results obtained by our method are promising for clinical applications: 96% of the lesions of our study dataset were correctly detected at 10.4 false positives per patient without cancer.}, + file = {Gube13a.pdf:pdf/Gube13a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Gube14, + author = {A. Gubern-M\'{e}rida and M. Kallenberg and B. Platel and R.M. Mann and R. Marti and N. Karssemeijer}, + title = {Volumetric breast density estimation from Full-Field Digital Mammograms: A validation study}, + journal = PLOSONE, + year = {2014}, + volume = {9}, + issue = {1}, + pages = {e85952}, + doi = {10.1371/journal.pone.0085952}, + url = {http://dx.doi.org/10.1371%2Fjournal.pone.0085952}, + abstract = {Objectives

To objectively evaluate automatic volumetric breast density assessment in Full-Field Digital Mammograms (FFDM) using measurements obtained from breast Magnetic Resonance Imaging (MRI).

Material and Methods

A commercially available method for volumetric breast density estimation on FFDM is evaluated by comparing volume estimates obtained from 186 FFDM exams including mediolateral oblique (MLO) and cranial-caudal (CC) views to objective reference standard measurements obtained from MRI.

Results

Volumetric measurements obtained from FFDM show high correlation with MRI data. PearsonAC/a,!a,,C/s correlation coefficients of 0.93, 0.97 and 0.85 were obtained for volumetric breast density, breast volume and fibroglandular tissue volume, respectively.

Conclusions

Accurate volumetric breast density assessment is feasible in Full-Field Digital Mammograms and has potential to be used in objective breast cancer risk models and personalized screening.

}, + file = {Gube14.pdf:pdf\\Gube14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {24465808}, + month = {1}, + gsid = {9649554823203874553}, + gscites = {156}, + ss_id = {15e91f8c7fc112ced9946aef9761fa92a1f235c0}, + all_ss_ids = {['15e91f8c7fc112ced9946aef9761fa92a1f235c0']}, +} + +@article{Gube15, + author = {Gubern-M\'{e}rida, Albert and Kallenberg, Michiel and Mann, Ritse M. and Marti, Robert and Karssemeijer, Nico}, + title = {Breast Segmentation and Density Estimation in Breast {MRI}: A Fully Automatic Framework}, + journal = JBHI, + year = {2015}, + volume = {19}, + issue = {1}, + month = {1}, + pages = {349-357}, + doi = {10.1109/JBHI.2014.2311163}, + abstract = {Breast density measurement is an important aspect in breast cancer diagnosis as dense tissue has been related to the risk of breast cancer development. The purpose of this study is to develop a method to automatically compute breast density in breast MRI. The framework is a combination of image processing techniques to segment breast and fibroglandular tissue. Intra- and interpatient signal intensity variability is initially corrected. The breast is segmented by automatically detecting body-breast and air-breast surfaces. Subsequently, fibroglandular tissue is segmented in the breast area using expectation-maximization. A dataset of 50 cases with manual segmentations was used for evaluation. Dice similarity coefficient (DSC), total overlap, false negative fraction (FNF), and false positive fraction (FPF) are used to report similarity between automatic and manual segmentations. For breast segmentation, the proposed approach obtained DSC, total overlap, FNF, and FPF values of 0.94, 0.96, 0.04, and 0.07, respectively. For fibroglandular tissue segmentation, we obtained DSC, total overlap, FNF, and FPF values of 0.80, 0.85, 0.15, and 0.22, respectively. The method is relevant for researchers investigating breast density as a risk factor for breast cancer and all the described steps can be also applied in computer aided diagnosis systems.}, + file = {Gube15.pdf:pdf\\Gube15.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {25561456}, + gsid = {13886477572492923291}, + gscites = {117}, + ss_id = {9356a0862ad0dd269e24f09924d2d1f99ea7fda9}, + all_ss_ids = {['9356a0862ad0dd269e24f09924d2d1f99ea7fda9']}, +} + +@phdthesis{Gube15a, + author = {A. Gubern-M\'{e}rida}, + title = {Automated Analysis of Magnetic Resonance Imaging of the Breast}, + year = {2015}, + url = {http://repository.ubn.ru.nl/handle/2066/140305}, + abstract = {Automated image analysis techniques and Computer-Aided Detection (CAD) systems can be used in order to aid radiologists in reading and interpreting MRI images. This thesis focuses on the investigation of image analysis techniques for the automated interpretation of breast DCE-MRI images.}, + copromotor = {R. Mart\'{i} and B. Platel}, + file = {Gube15a.pdf:pdf/Gube15a.pdf:PDF}, + optnote = {DIAG}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen / Universitat de Girona, Spain}, + journal = {PhD thesis}, +} + +@article{Gube15b, + author = {A. Gubern-M\'{e}rida and R. Marti and J. Melendez and J.L. Hauth and R.M. Mann and N. Karssemeijer and B. Platel}, + title = {Automated localization of breast cancer in {DCE}-{MRI}}, + journal = MIA, + year = {2015}, + volume = {20}, + number = {1}, + issue = {1}, + month = {2}, + pages = {265-274}, + doi = {10.1016/j.media.2014.12.001}, + abstract = {Dynamic contrast-enhanced magnetic resonance imaging (DCE-MRI) is increasingly being used for the detection and diagnosis of breast cancer. Compared to mammography, DCE-MRI provides higher sensitivity, however its specificity is variable. Moreover, DCE-MRI data analysis is time consuming and depends on reader expertise. The aim of this work is to propose a novel automated breast cancer localization system for DCE-MRI. Such a system can be used to support radiologists in DCE-MRI analysis by marking suspicious areas. The proposed method initially corrects for motion artifacts and segments the breast. Subsequently, blob and relative enhancement voxel features are used to locate lesion candidates. Finally, a malignancy score for each lesion candidate is obtained using region-based morphological and kinetic features computed on the segmented lesion candidate. We performed experiments to compare the use of different classifiers in the region classification stage and to study the effect of motion correction in the presented system. The performance of the algorithm was assessed using free-response operating characteristic (FROC) analysis. For this purpose, a dataset of 209 DCE-MRI studies was collected. It is composed of 95 DCE-MRI studies with 105 breast cancers (55 mass-like and 50 non-mass-like malignant lesions) and 114 DCE-MRI studies from women participating in a screening program which were diagnosed to be normal. At 4 false positives per normal case, 89\% of the breast cancers (91\% and 86\% for mass-like and non-mass-like malignant lesions, respectively) were correctly detected.}, + file = {Gube15b.pdf:pdf\\Gube15b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {25532510}, + gsid = {10687699890920421893}, + gscites = {111}, + ss_id = {60149277867d3e86391c0aa4ce5720e1a1e5cb48}, + all_ss_ids = {['60149277867d3e86391c0aa4ce5720e1a1e5cb48']}, +} + +@conference{Gube15c, + author = {A. Gubern-M\'{e}rida and S. Vreemann and R. Marti and J. Melendez and S. Lardenoije and R. M. Mann and B. Platel and N. Karssemeijer}, + title = {Automated detection of breast cancer as an aid in the interpretation of screening {MRI}}, + booktitle = ECR, + year = {2015}, + abstract = {PURPOSE: Computer aided detection (CADe) of suspect abnormalities in MRI may prevent reading errors in breast cancer screening of women at high risk. In this study, we evaluate the performance of a CADe system in detecting breast cancers missed in screening and compare this to the performance obtained on screen-detected cancers. METHODS AND MATERIALS: We collected DCE-MRI studies from 163 women participating in a high risk screening program. These data included 26 scans with screen-detected cancers, and 23 scans with 10 and 13 cancers that were retrospectively visible (BI-RADS 4/5) or minimally visible (BI-RADS 2/3) in prior MRI screening exams, but were reported to be normal. Cancers were detected at the following screening round. Furthermore, 114 normal scans with no sign of breast cancer were included. Lesions were annotated on the first post-contrast subtraction image. A CADe system was developed in-house. The detection performance was evaluated using free-response receiver operating characteristic and bootstrapping. A CADe finding was considered true positive when its center was inside a lesion annotation. The false-positive rate (FP/case) was determined on the normal cases. RESULTS: At 4 FP/case, the sensitivity for screen-detected lesions was 0.80 (95% confidence interval 0.62-0.96). For lesions that were visible or minimally visible in prior false-negative studies, the sensitivities were 0.69 (0.33-1.00) and 0.47 (0.18-0.75), respectively. CONCLUSION: The detection performance for missed cancers of a CADe system was almost as high as for screen-detected cancers. The integration of such a system in clinical practice might aid radiologists to avoid screening errors.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Gube15d, + author = {Gubern-M\'{e}rida, A. and Tan, T. and van Zelst, J. and Mann, R. M. and Platel, B. and Karssemeijer, N.}, + title = {Pectoral muscle surface segmentation in automated 3{D} breast ultrasound using cylindrical transform and atlas information}, + booktitle = {{MICCAI} {W}orkshop: {B}reast {I}mage {A}nalysis}, + year = {2015}, + file = {Gube15d.pdf:pdf\\Gube15d.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Gube15e, + author = {A. Gubern-M\'{e}rida and S. Vreemann and R. Marti and J. Melendez and R. M. Mann and B. Platel and N. Karssemeijer}, + title = {Automated Detection of Mass-like, Non-mass-like and Focus Breast Cancer Lesions Visible in False-negative Screening DCE-MRI}, + booktitle = RSNA, + year = {2015}, + abstract = {PURPOSE Breast cancer lesions are regularly overlooked or misinterpreted in breast MRI screening due to lesion appearance suggesting benign disease, extensive background enhancement or fatigue and lack of experience analyzing 4D data. In this study, we evaluate the performance of an automated computer-aided detection (CAD) system to detect mass-like, non-mass-like and focus breast cancer lesions that were, in retrospect, visible on earlier screening MRIs but only detected in a subsequent scans. METHOD AND MATERIALS Between 2003 and 2013, we identified 24 prior-negative MRI scans (BI-RADS 1/2) with 24 breast cancers (10 mass-like, 8 non-mass-like and 6 foci) in a MRI screening program. Cancers were detected by radiologists at the following screening round. Additionally, 120 normal scans were collected from the same MRI screening program from different women without history of breast cancer or breast surgery. A previously validated fully automated CAD system was applied to this dataset to detect malignant lesions. The system corrects for motion artifacts and segments the breast. Subsequently, lesion candidates are detected using relative enhancement and texture features to characterize breast cancer lesions. The final classification is performed using region-based morphological and kinetics features computed on segmented lesion candidates. The detection performance was evaluated using free-response receiver operating characteristic analysis and bootstrapping. A CAD finding was considered a true positive when its center was inside a lesion annotation. The false positive rate (FP/case) was determined on the normal cases. RESULTS At 4 FP/case, the sensitivity for detecting mass-like and non-mass-like lesions in prior-negative scans was 0.50 (95% confidence interval 0.17-0.83) and 0.85 (0.50-1.00), respectively. At the same FP/case, the CAD system did not detect focus breast cancer lesions. CONCLUSION A CAD system was able to automatically detect 50% and 85% of mass-like and non-mass-like enhancement lesions that were missed in screening with MRI, respectively. Further improvement is required to detect focus lesions. The integration of such a system in clinical practice might aid radiologists to avoid screening errors. CLINICAL RELEVANCE/APPLICATION Automated lesion detection in breast MRI can facilitate breast cancer screening and reduce reading errors.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Gube15f, + author = {A. Gubern-M\'{e}rida and T. Tan and J. van Zelst and R. M. Mann and B. Platel and N. Karssemeijer}, + title = {Evaluation of a Novel Method to Segment the Pectoral Muscle Surface in Automated Whole Breast Ultrasound}, + booktitle = RSNA, + year = {2015}, + abstract = {PURPOSE Segmentation of anatomical structures in automated 3D breast ultrasound (ABUS) is required for development of computer-aided detection (CAD) and other techniques to make clinical workflow more efficient, such as automatic linking of findings between different ABUS views and multimodal registration. We propose a novel method to segment the anterior pectoral surface in ABUS images. METHOD AND MATERIALS We randomly collected 74 ABUS (25 Anterior-Posterior, 15 MEDial, 31 LATeral and 3 SUPerior views) volumes obtained in routine clinical care at two medical centers using the S2000 automated 3D breast ultrasound system (Siemens, Erlangen, Germany). Manual pectoral muscle delineations of the anterior surface were provided by a trained researcher. We developed an algorithm to segment the pectoral muscle surface in ABUS volumes: First, the chest wall is segmented using a previously validated software that models the chest wall as a cylinder. Thereafter, the chest wall surface is used to perform a cylindrical transformation on the ABUS volume. By applying this transformation, the chest wall and the pectoral muscle are straightened and shape variability of the pectoral muscle across volumes can be encoded in a probabilistic atlas. In the last step, gradient and atlas information are used to guide the pectoral muscle surface segmentation in a dynamic programming approach. The algorithm was applied to the 74 ABUS volumes of the study dataset following a leave-one-out strategy. Distance (mean+-stdev) between manual and automated pectoral muscle surfaces was used as evaluation measure. RESULTS The presented approach achieved a mean surface distance error of 3.47+-3.03 mm, compared to the manual annotations. The surface distance error for AP, LAT, MED and SUP view volumes was 2.61+-4.15, 3.78+-4.15, 4.17+-2.37 and 3.78+-1.02 mm, respectively. CONCLUSION Automated pectoral muscle segmentation is challenging due to high variation in pectoral muscle anatomy. The proposed method shows promising results on segmenting the pectoral muscle surface. CLINICAL RELEVANCE/APPLICATION ABUS is a promising modality for screening but reading is time consuming for radiologists. Availability of supporting tools such as computer-aided detection may expedite introduction of ABUS in practice.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Gube16, + author = {A. Gubern-M\'{e}rida and S. Vreemann and R. Marti and J. Melendez and S. Lardenoije and R.M. Mann and N. Karssemeijer and B. Platel}, + title = {Automated detection of breast cancer in false-negative screening {MRI} studies from women at increased risk}, + journal = EJR, + year = {2016}, + volume = {85}, + number = {2}, + issue = {2}, + month = {2}, + pages = {472--479}, + doi = {10.1016/j.ejrad.2015.11.031}, + url = {http://dx.doi.org/10.1016/j.ejrad.2015.11.031}, + abstract = {Purpose: To evaluate the performance of an automated computer-aided detection (CAD) system to detect breast cancers that were overlooked or misinterpreted in a breast MRI screening program for women at increased risk. Methods: We identified 40 patients that were diagnosed with breast cancer in MRI and had a prior MRI examination reported as negative available. In these prior examinations, 24 lesions could retrospectively be identified by two breast radiologists in consensus: 11 were scored as visible and 13 as minimally visible. Additionally, 120 normal scans were collected from 120 women without history of breast cancer or breast surgery participating in the same MRI screening program. A fully automated CAD system was applied to this dataset to detect malignant lesions. Results: At 4 false-positives per normal case, the sensitivity for the detection of cancer lesions that were visible or minimally visible in retrospect in prior-negative examinations was 0.71 (95% CI = 0.38AC/a,!aEURoe1.00) and 0.31 (0.07AC/a,!aEURoe0.59), respectively. Conclusions: A substantial proportion of cancers that were misinterpreted or overlooked in an MRI screening program was detected by a CAD system in prior-negative examinations. It has to be clarified with further studies if such a CAD system has an influence on the number of misinterpreted and overlooked cancers in clinical practice when results are given to a radiologist.}, + file = {Gube16.pdf:pdf/Gube16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26781154}, + gsid = {1691742795221291249}, + gscites = {24}, + ss_id = {7620ef3e0f908161af57191c25b7ed849734f1cf}, + all_ss_ids = {['7620ef3e0f908161af57191c25b7ed849734f1cf']}, +} + +@inproceedings{Gube16a, + author = {A. Gubern-M\'{e}rida and T. Tan and J. van Zelst and R. M. Mann and N. Karssemeijer}, + title = {Automated linking of suspicious findings between automated 3{D} breast ultrasound volumes}, + booktitle = MI, + year = {2016}, + series = SPIE, + doi = {10.1117/12.2214945}, + abstract = {Automated breast ultrasound (ABUS) is a 3D imaging technique which is rapidly emerging as a safe and relatively inexpensive modality for screening of women with dense breasts. However, reading ABUS examinations is very time consuming task since radiologists need to manually identify suspicious findings in all the different ABUS volumes available for each patient. Image analysis techniques to automatically link findings across volumes are required to speed up clinical workflow and make ABUS screening more efficient. In this study, we propose an automated system to, given the location in the ABUS volume being inspected (source), find the corresponding location in a target volume. The target volume can be a different view of the same study or the same view from a prior examination. The algorithm was evaluated using 118 linkages between suspicious abnormalities annotated in a dataset of ABUS images of 27 patients participating in a high risk screening program. The distance between the predicted location and the center of the annotated lesion in the target volume was computed for evaluation. The mean +- stdev and median distance error achieved by the presented algorithm for linkages between volumes of the same study was 7.75 +- 6.71 mm and 5.16 mm, respectively. The performance was 9.54 +- 7.87 and 8.00 mm (mean +- stdev and median) for linkages between volumes from current and prior examinations. The proposed approach has the potential to minimize user interaction for finding correspondences among ABUS volumes.}, + file = {Gube16a.pdf:pdf/Gube16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {6334850745608942532}, + gscites = {2}, + ss_id = {43e4b8914c087b637ae22b6b4f85f70e31e6e4e8}, + all_ss_ids = {['43e4b8914c087b637ae22b6b4f85f70e31e6e4e8']}, +} + +@mastersthesis{Gucl21, + author = {Ismail Guclu}, + title = {Programmatically generating annotations for de-identification of clinical data}, + abstract = {Clinical records may contain protected health information (PHI) which are privacy sensitive information. It is important to annotate and replace PHI in unstructured medical records, before being able to share the data for other research purposes. Machine learning models are quick to implement and can achieve competitive results (micro-averaged F1-scores Dutch radiology dataset: 0.88 and English i2b2 dataset: 0.87). However, to develop machine learning models, we need training data. In this project, we applied weak supervision to annotate and collect training data for de-identification of medical records. It is essential to automate this process as manual annotation is a laborious and repetitive task. We used the two human annotated datasets, where we 'removed' the gold annotations to weakly tag PHI instances in medical records, where we unified the output labels using two different aggregation models: aggregation at the token level (Snorkel) and sequential labeling (Skweak). The output is then used to train a discriminative end model where we achieve competitive results on the Dutch dataset (micro-averaged F1 score: 0.76) whereas performance on the English dataset is suboptimal (micro-averaged F1-score: 0.49). The results indicate that on structured PHI tags we approach human annotated results, but more complicated entities still need more attention.}, + file = {Gucl21.pdf:pdf/Gucl21.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University}, + year = {2021}, + journal = {Master thesis}, +} + +@conference{Guev23, + author = {Bryan Cardenas Guevara and Niccolo Marini and Stefano Marchesin and Witali Aswolinskiy and Robert-Jan Schlimbach and Damian Podareanu and Francesco Ciompi}, + booktitle = {MIDL}, + title = {Caption generation from histopathology whole-slide images using pre-trained transformers}, + abstract = {The recent advent of foundation models and large language models has enabled scientists to leverage large-scale knowledge of pretrained (vision) transformers and efficiently tailor it to downstream tasks. This technology can potentially automate multiple aspects of cancer diagnosis in digital pathology, from whole-slide image classification to generating pathology reports while training with pairs of images and text from the diagnostic conclusion. In this work, we orchestrate a set of weakly-supervised transformer-based models with a first aim to address both whole-slide image classification and captioning, addressing the automatic generation of the conclusion of pathology reports in the form of image captions. We report our first results on a multicentric multilingual dataset of colon polyps and biopsies. We achieve high diagnostic accuracy with no supervision and cheap computational adaptation.}, + optnote = {DIAG, PATHOLOGY}, + year = {2023}, +} + +@article{Habi20, + author = {Habib, Shifa Salman and Rafiq, Sana and Zaidi, Syed Mohammad Asad and Ferrand, Rashida Abbas and Creswell, Jacob and Van Ginneken, Bram and Jamal, Wafa Zehra and Azeemi, Kiran Sohail and Khowaja, Saira and Khan, Aamir}, + title = {Evaluation of computer aided detection of tuberculosis on chest radiography among people with diabetes in Karachi Pakistan}, + doi = {10.1038/s41598-020-63084-7}, + issue = {1}, + pages = {6276}, + volume = {10}, + abstract = {Pakistan ranks fifth among high tuberculosis (TB) burden countries and also has seventh highest burden for diabetes mellitus (DM). DM increases the risk of developing TB and contributes to adverse TB treatment outcomes hence screening and integrated management for both diseases in high burden countries is suggested. Computer-Aided Detection for TB (CAD4TB) can potentially be used as triage tool in low resource settings to pre-screen individuals for Xpert MTB/RIF testing. The aim of this study was to evaluate the diagnostic accuracy and performance of CAD4TB software in people with diabetes (PWD) enrolled in a TB screening program in Karachi, Pakistan. A total of 694 individuals with a diagnosis of DM (of whom 31.1% were newly diagnosed) were screened with CAD4TB and simultaneously provided sputum for Xpert MTB/RIF testing. Of the 74 (10.7%) participants who had bacteriologically positive (MTB+) results on Xpert testing, 54 (73%) had a CAD4TB score >70; and 155 (25%) participants who tested MTB-negative had scores >70. The area under the receiver operator curve was 0.78 (95% CI: 0.77-0.80). Our study findings indicate that CAD4TB offers good diagnostic accuracy as a triage test for TB screening among PWD using Xpert MTB/RIF as the reference standard.}, + journal = NATSCIREP, + file = {Habi20.pdf:pdf/Habi20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {32286389}, + year = {2020}, + month = {4}, + all_ss_ids = {['13122b7c0ef4a596875eff981651d60140e67417', '142f9f46a9ca5a99de4c7b819d801d245a9334ba']}, + gscites = {13}, +} + +@mastersthesis{Hack21, + author = {Roel HACKING}, + title = {Combining CT scans and clinical features for improved automated COVID-19 detection}, + abstract = {During the first peak of the COVID-19 pandemic, hospitals in hard-hit regions were overflowing with patients at the emergency unit with respiratory complaints. Since the RT-PCR test was in limited supply at the time and test results took a long time to obtain, many hospitals opted to use chest CT scans of COVID-19 suspects. + As a result of this, several studies examined the possibility of automating the detection of COVID-19 in CT scans. One such study, by Lessmann et al., 2020, developed a model to predict COVID-19 severity scores based on these chest CT scans. In this thesis, we extended their model in several ways to take into account additional clinical values (such as blood values, sex, and age) to predict either PCR outcomes or clinical diagnoses. + Based on data from the Canisius-Wilhelmina Ziekenhuis (CWZ) hospital and Radboudumc hospitals, as well as the COVID-19 dataset by Ning et al., 2020, we found that integrating these two modalities can indeed lead to improved performance when both clinical and visual features are of sufficient quality. When training on data from the CWZ hospital and evaluating on data from the Radboudumc hospital, models using only clinical features or visual features achieved Area Under the ROC Curve (AUC) values of 0.773 and 0.826, respectively; their combination resulted in an AUC of 0.851. + Similarly, when training on data from the Union hospital in the iCTCF dataset and predicting on data from the Union hospital in that same dataset, we obtained AUCs of 0.687 and 0.812 for clinical and visual features, respectively; their combination resulted in an AUC of 0.862. + However, we also discovered that the patterns of missing data present in these clinical feature datasets can play an essential role in the performance of the models fitted on them. We thus developed additional methods to analyze and mitigate this effect to obtain fairer evaluations and increase model generalizability. Still, the high diagnostic performance of some of our models suggests that they could be adapted into clinical practice, and our methods pertaining to missing data could be used to aid further research using clinical feature datasets.}, + file = {Hack21.pdf:pdf/Hack21.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University}, + year = {2021}, + journal = {Master thesis}, +} + +@conference{Hadd19, + author = {Tariq Haddad and Navid Farahani and John-Melle Bokhorst and Femke Doubrava-Simmer and Francesco Ciompi and Iris Nagtegaal and Jeroen van der Laak}, + title = {A Colorectal Carcinoma in 3D: Merging Knife-Edge Scanning Microscopy and Deep Learning}, + booktitle = {EACR}, + year = {2019}, + abstract = {Background and Objective: + A three-dimensional visualization of a human carcinoma could provide invaluable diagnostic information and redefine how we perceive and analyze cancer invasion. As deep learning begins automating the diagnostic workflow and cutting-edge microcopy provides unprecedented ways of visualizing tissue, combining these methologies could provide novel insight into malignant tumors and other pathologic entities. By combining Knife-Edge Scanning Microscopy with convolutional neural networks, we set out to visualize an entire threedimensional colorectal carcinoma segmented into specific tissue classifications. + + Methods: + A Knife-Edge Scanning Microscope (KESM), developed by Strateos (San Francisco, CA, USA), was used to digitize a whole-mount, H&E stained, formalinfixed paraffin-embedded human tissue specimen obtained from the Radboudumc (Nijmegen, Netherlands). Sparse manual annotations of 5 tissue types (tumor, stroma, muscle, healthy glands, background) were provided using KESM data to train a convolutional neural network developed by the Computational Pathology Group (Radboudumc) for semantic segmentation of the colorectal carcinoma tissue. The three-dimensional visualization was generated using 3Scan's proprietary visualization pipeline. + + Results: The convolutional neural network was used to process roughly 1200 slices of KESM data. The stitched and rendered segmentation maps demonstrate the formalin-fixed paraffin-embedded carcinoma of roughly 5 millimeters in depth. As shown in the figure, the tumor invasive margin can be seen advancing into the surrounding tumor stroma. + + Conclusion: Based on our findings, we were capable of training a segmentation model on the 3D KESM data to create an accurate representation of a formalin-fixed paraffin-embedded colorectal carcinoma tissue block segmented into five tissue classifications. Going forward, this can have much broader implications on the research and understanding of invasive tumors.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Hadd20, + author = {Tariq Haddad and John Melle Bokhorst and Luuk van den Dobbelsteen and Femke Simmer and Jeroen van der Laak and Iris D. Nagtegaal}, + title = {Characterisation of the tumour-host interface as a prognostic factor through deep learning systems}, + booktitle = {United European Gastroenterology Journal}, + year = {2020}, + abstract = {}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Hadj22, + author = {Lubomir Hadjiiski and Kenny Cha and Heang-Ping Chan and Karen Drukker and Lia Morra and Janne J. Nappi and Berkman Sahiner and Hiroyuki Yoshida and Quan Chen and Thomas M. Deserno and Hayit Greenspan and Henkjan Huisman and Zhimin Huo and Richard Mazurchuk and Nicholas Petrick and Daniele Regge and Ravi Samala and Ronald M. Summers and Kenji Suzuki and Georgia Tourassi and Daniel Vergara and Samuel G. Armato III}, + title = {AAPM task group report 273: Recommendations on best practices for AI and machine learning for computer-aided diagnosis in medical imaging}, + journal = {Medical Physics}, + year = {2022}, + doi = {https://doi.org/10.1002/mp.16188}, + abstract = {Rapid advances in artificial intelligence (AI) and machine learning, and specifically in deep learning (DL) techniques, have enabled broad application of these methods in health care. The promise of the DL approach has spurred further interest in computer-aided diagnosis (CAD) development and applications using both "traditional" machine learning methods and newer DL-based methods. We use the term CAD-AI to refer to this expanded clinical decision support environment that uses traditional and DL-based AI methods. + + Numerous studies have been published to date on the development of machine learning tools for computer-aided, or AI-assisted, clinical tasks. However, most of these machine learning models are not ready for clinical deployment. It is of paramount importance to ensure that a clinical decision support tool undergoes proper training and rigorous validation of its generalizability and robustness before adoption for patient care in the clinic. + + To address these important issues, the American Association of Physicists in Medicine (AAPM) Computer-Aided Image Analysis Subcommittee (CADSC) is charged, in part, to develop recommendations on practices and standards for the development and performance assessment of computer-aided decision support systems. The committee has previously published two opinion papers on the evaluation of CAD systems and issues associated with user training and quality assurance of these systems in the clinic. With machine learning techniques continuing to evolve and CAD applications expanding to new stages of the patient care process, the current task group report considers the broader issues common to the development of most, if not all, CAD-AI applications and their translation from the bench to the clinic. The goal is to bring attention to the proper training and validation of machine learning algorithms that may improve their generalizability and reliability and accelerate the adoption of CAD-AI systems for clinical decision support.}, + file = {Hadj22.pdf:pdf\\Hadj22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {df2cedb6640c9c7e0627fb03cf26b49e82a154b0}, + all_ss_ids = {['df2cedb6640c9c7e0627fb03cf26b49e82a154b0']}, + gscites = {8}, +} + +@inproceedings{Hage21, + title = {Variable Fraunhofer MEVIS RegLib Comprehensively Applied to Learn2Reg Challenge}, + author = {H{\"a}ger, Stephanie and Heldmann, Stefan and Hering, Alessa and Kuckertz, Sven and Lange, Annkristin}, + booktitle = {Segmentation, Classification, and Registration of Multi-modality Medical Imaging Data. MICCAI 2020}, + series = LNCS, + volume = {12587}, + pages = {74--79}, + year = {2021}, + url = {https://doi.org/10.1007/978-3-030-71827-5_9}, + doi = {10.1007/978-3-030-71827-5_9}, + optnote = {DIAG, RADIOLOGY}, + abstract = {In this paper, we present our contribution to the learn2reg challenge. We applied the Fraunhofer MEVIS registration library RegLib comprehensively to all 4 tasks of the challenge. For tasks 1-3, we used a classic iterative registration method with NGF distance measure, second order curvature regularizer, and a multi-level optimization scheme. For task 4, a deep learning approach with a weakly supervised trained U-Net was applied using the same cost function as in the iterative approach.}, + ss_id = {ac718bfeb8188b8540e72b81b81ff22c9f5f7b44}, + all_ss_ids = {['ac718bfeb8188b8540e72b81b81ff22c9f5f7b44']}, + gscites = {4}, +} + +@inproceedings{Hago18, + author = {Yeman Bhrane Hagos and Albert Gubern-M\'{e}rida and Jonas Teuwen}, + title = {Improving Breast Cancer Detection using Symmetry Information with Deep Learning}, + booktitle = {Breast Image Analysis (BIA)}, + year = {2018}, + doi = {10.1007/978-3-030-00946-5_10}, + abstract = {Convolutional Neural Networks (CNN) have had a huge success in many areas of computer vision and medical image analysis. However, there is still an immense potential for performance improvement in mammogram breast cancer detection Computer-Aided Detection (CAD) systems by integrating all the information that radiologist utilizes, such as symmetry and temporal data. In this work, we proposed a patch based multi-input CNN that learns symmetrical difference to detect breast masses. The network was trained on a large-scale dataset of 28294 mammogram images. The performance was compared to a baseline architecture without symmetry context using Area Under the ROC Curve (AUC) and Competition Performance Metric (CPM). At candidate level, AUC value of 0.933 with 95% confidence interval of [0.920, 0.954] was obtained when symmetry information is incorporated in comparison with baseline architecture which yielded AUC value of 0.929 with [0.919, 0.947] confidence interval. By incorporating symmetrical information, although there was no a significant candidate level performance again (p=0.111), we have found a compelling result at exam level with CPM value of 0.733 (p=0.001). We believe that including temporal data, and adding benign class to the dataset could improve the detection performance.}, + file = {Hago18.pdf:pdf/Hago18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {d0df46743b04218d7a0800c82aa2ea775973a9ae}, + all_ss_ids = {['d0df46743b04218d7a0800c82aa2ea775973a9ae']}, + gscites = {25}, +} + +@article{Hali19, + author = {Halilovic, Altuna and Verweij, Dagmar I and Simons, Annet and Stevens-Kroef, Marian J P L and Vermeulen, Susan and Elsink, Janet and Tops, Bastiaan B J and Otte-Holler, Irene and van der Laak, Jeroen A W M and van de Water, Carlijn and Boelens, Oliver B A and Schlooz-Vries, Margrethe S and Dijkstra, Jeroen R and Nagtegaal, Iris D and Tol, Jolien and van Cleef, Patricia H J and Span, Paul N and Bult, Peter}, + title = {HER2, chromosome 17 polysomy and DNA ploidy status in breast cancer; a translational study}, + journal = SCIREP, + year = {2019}, + volume = {9}, + issue = {1}, + month = {8}, + pages = {11679}, + doi = {10.1038/s41598-019-48212-2}, + url = {https://www.sciencedirect.com/science/article/pii/S1361841519300799}, + abstract = {Breast cancer treatment depends on human epidermal growth factor receptor-2 (HER2) status, which is often determined using dual probe fluorescence in situ hybridisation (FISH). Hereby, also loss and gain of the centromere of chromosome 17 (CEP17) can be observed (HER2 is located on chromosome 17). CEP17 gain can lead to difficulty in interpretation of HER2 status, since this might represent true polysomy. With this study we investigated whether isolated polysomy is present and how this effects HER2 status in six breast cancer cell lines and 97 breast cancer cases, using HER2 FISH and immunohistochemistry, DNA ploidy assessment and multiplex ligation dependent probe amplification. We observed no isolated polysomy of chromosome 17 in any cell line. However, FISH analysis did show CEP17 gain in five of six cell lines, which reflected gains of the whole chromosome in metaphase spreads and aneuploidy with gain of multiple chromosomes in all these cases. In patients' samples, gain of CEP17 indeed correlated with aneuploidy of the tumour (91.1%; p < 0.001). Our results indicate that CEP17 gain is not due to isolated polysomy, but rather due to widespread aneuploidy with gain of multiple chromosomes. As aneuploidy is associated with poor clinical outcome, irrespective of tumour grade, this could improve future therapeutic decision making.}, + file = {Hali19.pdf:pdf\\Hali19.pdf:PDF}, + optnote = {DIAG}, + pmid = {31406196}, + gsid = {12149180146622984473}, + gscites = {15}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/207039}, + ss_id = {06578dbe1d4b788eb0b6b0c84e3d4fae0ddf3a37}, + all_ss_ids = {['06578dbe1d4b788eb0b6b0c84e3d4fae0ddf3a37']}, +} + +@conference{Hall18, + author = {Hall, M. and Setio, A. A. A. and Sheridan, S. and Sproule, M. and Williams, M. and Scholten, E. T. and Jacobs, C. and Van Ginneken, B. and Roditi, G.}, + title = {Computer aided detection (CAD) and scoring of lung nodules in a Scottish lung cancer screening programme}, + booktitle = ECR, + year = {2018}, + doi = {10.1594/ecr2018/C-1407}, + abstract = {Aims and objectives: Lung cancer is the most common cancer worldwide with 1.6 million deaths worldwide in 20121. Several large American institutions have recommended lung cancer screening with low dose CT chest in specific population groups2,3,4. If lung cancer screening was implemented in Scotland there would be a very large increase in requirement for chest CT scans. These would all need reported, increasing radiological workload and cost. We postulated that combining the use of computer aided detection (CAD) and analysis could safely exclude lung cancer in a large number of scans obviating the need for radiological review with associated resource savings. The hypothesis was that CAD would have a 100% negative predictive value for lung cancer screening. Methods and materials: The initial screening Chest CT scans from Glasgow patients enrolled within the prospective ECLS study5 were sent for Lung-RADs analysis by Diagnostic Image Analysis Group Department of Radiology and Nuclear Medicine, Radboud University Medical Center. Computer aided detection was used to identify nodules and then categorise them into one of four groups (CAD 1 - 4) based upon probability of malignancy taking into account lesion morphology, size, location and texture. Independently and blind to the CAD results the scans were reported by two experienced consultant respiratory radiologists and assigned a similar grading category. The patient were then followed up with interval CT scans as per the ECLS study protocol. Results: CAD categorisation had a statistically significant correlation with the radiologist categorisation (p<0.001). None of the 113 (36%) patients within the CAD 1 group (no nodule identified) were found to have lung nodule(s). Conclusion: CAD and risk stratification is useful in excluding lung cancer on thoracic CT within the screening population. CAD followed by radiology review of suspicious cases is the cost effective and would lead to a 30% reduction in screening costs.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Hamb08, + author = {Hambrock, T. and F\"utterer, J. J. and Huisman, H. J. and Hulsbergen-van de Kaa, C. and van Basten, J. and van Oort, I. and Witjes, J. A. and Barentsz, J. O.}, + title = {Thirty-two-channel coil 3{T} magnetic resonance-guided biopsies of prostate tumor suspicious regions identified on multimodality 3{T} magnetic resonance imaging: technique and feasibility}, + journal = IR, + year = {2008}, + volume = {43}, + pages = {686--694}, + doi = {10.1097/RLI.0b013e31817d0506}, + abstract = {{OBJECTIVES}: {T}o test the technique and feasibility of translating tumor suspicious region maps in the prostate, obtained by multimodality, anatomic, and functional 3{T} magnetic resonance imaging ({MRI}) data to 32-channel coil, {T}2-weighted ({T}2-w), 3{T} {MR} images, for directing {MR}-guided biopsies. {F}urthermore, to evaluate the practicability of {MR}-guided biopsy on a 3{T} {MR} scanner using a 32-channel coil and a {MR}-compatible biopsy device. {MATERIALS} {AND} {METHODS}: {T}wenty-one patients with a high prostate-specific antigen (>4.0 ng/m{L}) and at least 2 prior negative transrectal ultrasound-guided biopsies of the prostate underwent an endorectal coil 3{T} {MRI}, which included {T}2-w, diffusion weighted and dynamic contrast enhanced {MRI}. {F}rom these multimodality images, tumor suspicious regions ({TSR}) were determined. {T}he 3{D} localization of these {TSR}s within the prostatic gland was translated to the {T}2-w {MR} images of a subsequent 32-channel coil 3{T} {MRI}. {T}hese were then biopsied under 3{T} {MR} guidance. {RESULTS}: {I}n all patients, {TSR}s could be identified and accurately translated to subsequent 3{T} {MR} images and biopsied under {MR} guidance. {M}edian {MR} biopsy procedure time was 35 minutes. {O}f the 21 patients, 8 (38\%) were diagnosed with prostate cancer, 6 (29\%) had evidence of prostatitis, 6 (29\%) had combined inflammatory and atrophic changes, and only 1 (5\%) patient had no identifiable pathology. {CONCLUSIONS}: {M}ultimodality, 3{T} {MRI} determined {TSR}s could effectively be translated to {T}2-weighted images, to be used for {MR} biopsies. 3{T} {MR}-guided biopsy based on these translated {TSR}s was feasible, performed in a clinical useful time, and resulted in a high number of positive results.}, + file = {Hamb08.pdf:pdf\\Hamb08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + pmid = {18791410}, + month = {10}, + gsid = {13099020070548141810}, + gscites = {126}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/70642}, + ss_id = {f7c501d095bf0a0e58967aa336ab63ff912de195}, + all_ss_ids = {['f7c501d095bf0a0e58967aa336ab63ff912de195']}, +} + +@article{Hamb10, + author = {T. Hambrock and D. M. Somford and C. Hoeks and S. A. W. Bouwense and H. Huisman and D. Yakar and I. M. van Oort and J. A. Witjes and J. J. F\"utterer and J. O. Barentsz}, + title = {Magnetic resonance imaging guided prostate biopsy in men with repeat negative biopsies and increased prostate specific antigen}, + journal = JU, + year = {2010}, + volume = {183}, + pages = {520--527}, + doi = {10.1016/j.juro.2009.10.022}, + abstract = {{PURPOSE}: {U}ndetected cancer in repeat transrectal ultrasound guided prostate biopsies in patients with increased prostate specific antigen greater than 4 ng/ml is a considerable concern. {W}e investigated the tumor detection rate of tumor suspicious regions on multimodal 3 {T}esla magnetic resonance imaging and subsequent magnetic resonance imaging guided biopsy in 68 men with repeat negative transrectal ultrasound guided prostate biopsies. {W}e compared results to those in a matched transrectal ultrasound guided prostate biopsy population. {A}lso, we determined the clinical significance of detected tumors. {MATERIALS} {AND} {METHODS}: {A} total of 71 consecutive patients with prostate specific antigen greater than 4 ng/ml and 2 or greater negative transrectal ultrasound guided prostate biopsy sessions underwent multimodal 3 {T}esla magnetic resonance imaging. {I}n 68 patients this was followed by magnetic resonance imaging guided biopsy directed toward tumor suspicious regions. {A} matched multisession transrectal ultrasound guided prostate biopsy population from our institutional database was used for comparison. {T}he clinical significance of detected tumors was established using accepted criteria, including prostate specific antigen, {G}leason grade, stage and tumor volume. {RESULTS}: {T}he tumor detection rate of multimodal 3 {T}esla magnetic resonance imaging guided biopsy was 59\% (40 of 68 cases) using a median of 4 cores. {T}he tumor detection rate was significantly higher than that of transrectal ultrasound guided prostate biopsy in all patient subgroups (p <0.01) except in those with prostate specific antigen greater than 20 ng/ml, prostate volume greater than 65 cc and prostate specific antigen density greater than 0.5 ng/ml/cc, in which similar rates were achieved. {O}f the 40 patients with identified tumors 37 (93\%) were considered highly likely to harbor clinically significant disease. {CONCLUSIONS}: {M}ultimodal magnetic resonance imaging is an effective technique to localize prostate cancer. {M}agnetic resonance imaging guided biopsy of tumor suspicious regions is an accurate method to detect clinically significant prostate cancer in men with repeat negative biopsies and increased prostate specific antigen.}, + file = {Hamb10.pdf:pdf\\Hamb10.pdf:PDF}, + optnote = {DIAG, MAGIC, RADIOLOGY}, + number = {2}, + pmid = {20006859}, + month = {2}, + gsid = {4191275900561122443}, + gscites = {415}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/87401}, + ss_id = {3f99aa1cc1a5175b1c0bca3b0c4747724cf87757}, + all_ss_ids = {['3f99aa1cc1a5175b1c0bca3b0c4747724cf87757']}, +} + +@article{Hamb11, + author = {Hambrock, T. and Somford, D. M. and Huisman, H. J. and van Oort, I. M. and Witjes, J. A. and Hulsbergen-van de Kaa, C. A. and Scheenen, T. and Barentsz, J. O.}, + title = {Relationship between Apparent Diffusion Coefficients at 3.0-{T} {MR} Imaging and {G}leason Grade in Peripheral Zone Prostate Cancer}, + journal = Radiology, + year = {2011}, + volume = {259}, + pages = {453--461}, + doi = {10.1148/radiol.11091409}, + abstract = {PURPOSE: To retrospectively determine the relationship between apparent diffusion coefficients (ADCs) obtained with 3.0-T diffusion-weighted (DW) magnetic resonance (MR) imaging and Gleason grades in peripheral zone prostate cancer. MATERIALS AND METHODS: The requirement to obtain institutional review board approval was waived. Fifty-one patients with prostate cancer underwent MR imaging before prostatectomy, including DW MR imaging with b values of 0, 50, 500, and 800 sec/mm(2). In prostatectomy specimens, separate slice-by-slice determinations of Gleason grade groups were performed according to primary, secondary, and tertiary Gleason grades. In addition, tumors were classified into qualitative grade groups (low-, intermediate-, or high-grade tumors). ADC maps were aligned to step-sections and regions of interest annotated for each tumor slice. The median ADC of tumors was related to qualitative grade groups with linear mixed-model regression analysis. The accuracy of the median ADC in the most aggressive tumor component in the differentiation of low- from combined intermediate- and high-grade tumors was summarized by using the area under the receiver operating characteristic (ROC) curve (A(z)). RESULTS: In 51 prostatectomy specimens, 62 different tumors and 251 step-section tumor lesions were identified. The median ADC in the tumors showed a negative relationship with Gleason grade group, and differences among the three qualitative grade groups were statistically significant (P < .001). Overall, with an increase of one qualitative grade group, the median ADC (A-A?A 1/2 standard deviation) decreased 0.18 x 10(-3) mm(2)/sec A-A?A 1/2 0.02. Low-, intermediate-, and high-grade tumors had a median ADC of 1.30 x 10(-3) mm(2)/sec A-A?A 1/2 0.30, 1.07 x 10(-3) mm(2)/sec A-A?A 1/2 0.30, and 0.94 x 10(-3) mm(2)/sec A-A?A 1/2 0.30, respectively. ROC analysis showed a discriminatory performance of A(z) = 0.90 in discerning low-grade from combined intermediate- and high-grade lesions. CONCLUSION: ADCs at 3.0 T showed an inverse relationship to Gleason grades in peripheral zone prostate cancer. A high discriminatory performance was achieved in the differentiation of low-, intermediate-, and high-grade cancer.}, + file = {:pdf/Hamb11.pdf:PDF}, + optnote = {BioMR, DIAG, MAGIC, RADIOLOGY}, + number = {2}, + pmid = {21406633}, + month = {5}, + gsid = {2584223731293899692}, + gscites = {577}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/97304}, + ss_id = {0e8824d96ba3b5f58ba4114a22a6274421562f3a}, + all_ss_ids = {['0e8824d96ba3b5f58ba4114a22a6274421562f3a']}, +} + +@article{Hamb12, + author = {Hambrock, T. and Hoeks, C. and Hulsbergen-van de Kaa, C. and Scheenen, T. and F\"utterer, J. and Bouwense, S. and van Oort, I. and Schr\"oder, F. and Huisman, H. and Barentsz, J.}, + title = {Prospective Assessment of Prostate Cancer Aggressiveness Using {3-T} Diffusion-Weighted Magnetic Resonance Imaging-Guided Biopsies Versus a Systematic 10-Core Transrectal Ultrasound Prostate Biopsy Cohort}, + journal = EU, + year = {2012}, + volume = {61}, + pages = {177--184}, + doi = {10.1016/j.eururo.2011.08.042}, + abstract = {BACKGROUND: Accurate pretreatment assessment of prostate cancer (PCa) aggressiveness is important in decision making. Gleason grade is a critical predictor of the aggressiveness of PCa. Transrectal ultrasound-guided biopsies (TRUSBxs) show substantial undergrading of Gleason grades found after radical prostatectomy (RP). Diffusion-weighted magnetic resonance imaging (MRI) has been shown to be a biomarker of tumour aggressiveness. OBJECTIVE: To improve pretreatment assessment of PCa aggressiveness, this study prospectively evaluated MRI-guided prostate biopsies (MR-GBs) of abnormalities determined on diffusion-weighted imaging (DWI) apparent diffusion coefficient (ADC) maps. The results were compared with a 10-core TRUSBx cohort. RP findings served as the gold standard. DESIGN, SETTING, AND PARTICIPANTS: A 10-core TRUSBx (n=64) or MR-GB (n=34) was used for PCa diagnosis before RP in 98 patients. MEASUREMENTS: Using multiparametric 3-T MRI: T2-weighted, dynamic contrast-enhanced imaging, and DWI were performed to identify tumour-suspicious regions in patients with a negative TRUSBx. The regions with the highest restriction on ADC maps within the suspicions regions were used to direct MR-GB. A 10-core TRUSBx was used in a matched cohort. Following RP, the highest Gleason grades (HGGs) in biopsies and RP specimens were identified. Biopsy and RP Gleason grade results were evaluated using chi-square analysis. RESULTS AND LIMITATIONS: No significant differences on RP were observed for proportions of patients having a HGG of 3 (35\% vs 28\%; p=0.50), 4 (32\% vs 41\%; p=0.51), and 5 (32\% vs 31\%; p=0.61) for the MR-GB and TRUSBx cohort, respectively. MR-GB showed an exact performance with RP for overall HGG: 88\% (30 of 34); for TRUS-GB it was 55\% (35 of 64; p=0.001). In the MR-GB cohort, an exact performance with HGG 3 was 100\% (12 of 12); for HGG 4, 91\% (10 of 11); and for HGG 5, 73\% (8 of 11). The corresponding performance rates for TRUSBx were 94\% (17 of 18; p=0.41), 46\% (12 of 26; p=0.02), and 30\% (6 of 20; p=0.01), respectively. CONCLUSIONS: This study shows prospectively that DWI-directed MR-GBs significantly improve pretreatment risk stratification by obtaining biopsies that are representative of true Gleason grade.}, + file = {Hamb12.pdf:pdf\\Hamb12.pdf:PDF}, + optnote = {BioMR, DIAG, MAGIC, RADIOLOGY}, + number = {1}, + pmid = {21924545}, + month = {1}, + gsid = {18221975544734376170}, + gscites = {324}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/109206}, + ss_id = {17a88c8b4c36924e5d0b7d9f1a6e6b99a327c535}, + all_ss_ids = {['17a88c8b4c36924e5d0b7d9f1a6e6b99a327c535']}, +} + +@article{Hamb12a, + author = {Hambrock, Thomas and Vos, Pieter C. and Hulsbergen-van de Kaa, Christina A. and Barentsz, Jelle O. and Huisman, Henkjan J.}, + title = {Prostate Cancer: Computer-aided Diagnosis with Multiparametric 3-T {MR} Imaging--Effect on Observer Performance}, + journal = Radiology, + year = {2013}, + volume = {266}, + pages = {521-530}, + doi = {10.1148/radiol.12111634}, + abstract = {Purpose:To determine the effect of computer-aided diagnosis (CAD) on less-experienced and experienced observer performance in differentiation of benign from malignant prostate lesions at 3-T multiparametric magnetic resonance (MR) imaging.Materials and Methods:The institutional review board waived the need for informed consent. Retrospectively, 34 patients were included who had prostate cancer and had undergone multiparametric MR imaging, including T2-weighted, diffusion-weighted, and dynamic contrast material-enhanced MR imaging prior to radical prostatectomy. Six radiologists less experienced in prostate imaging and four radiologists experienced in prostate imaging were asked to characterize different regions suspicious for cancer as benign or malignant on multiparametric MR images first without and subsequently with CAD software. The effect of CAD was analyzed by using a multiple-reader, multicase, receiver operating characteristic analysis and a linear mixed-model analysis.Results:In 34 patients, 206 preannotated regions, including 67 malignant and 64 benign regions in the peripheral zone (PZ) and 19 malignant and 56 benign regions in the transition zone (TZ), were evaluated. Stand-alone CAD had an overall area under the receiver operating characteristic curve (AUC) of 0.90. For PZ and TZ lesions, the AUCs were 0.92 and 0.87, respectively. Without CAD, less-experienced observers had an overall AUC of 0.81, which significantly increased to 0.91 (P = .001) with CAD. For experienced observers, the AUC without CAD was 0.88, which increased to 0.91 (P = .17) with CAD. For PZ lesions, less-experienced observers increased their AUC from 0.86 to 0.95 (P < .001) with CAD. Experienced observers showed an increase from 0.91 to 0.93 (P = .13). For TZ lesions, less-experienced observers significantly increased their performance from 0.72 to 0.79 (P = .01) with CAD and experienced observers increased their performance from 0.81 to 0.82 (P = .42).Conclusion:Addition of CAD significantly improved the performance of less-experienced observers in distinguishing benign from malignant lesions; when less-experienced observers used CAD, they reached similar performance as experienced observers. The stand-alone performance of CAD was similar to performance of experienced observers.A,A(c) RSNA, 2012Supplemental material: http://radiology.rsna.org/lookup/suppl/doi:10.1148/radiol.12111634/-/DC1.}, + file = {Hamb12a.pdf:pdf\\Hamb12a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {23204542}, + month = {2}, + gsid = {16782773343024671219}, + gscites = {110}, + ss_id = {be606a6037f048bd51e9dea69e7f324900f3e0f0}, + all_ss_ids = {['be606a6037f048bd51e9dea69e7f324900f3e0f0']}, +} + +@article{Hame13, + author = {Hameeteman, R. and Rozie, S. and Metz, C.T. and Manniesing, R and van Walsum, T. and van der Lugt, A. and Niessen, W.J and Klein, S.}, + title = {Automated Carotid Artery Distensibility Measurements from {CTA} using Nonrigid Registration}, + journal = MIA, + year = {2013}, + volume = {17}, + pages = {515-24}, + doi = {10.1016/j.media.2013.02.007}, + url = {http://www.sciencedirect.com/science/article/pii/S1361841513000212}, + abstract = {The distensibility of a blood vessel is a marker of atherosclerotic disease. In this paper we investigate the feasibility of measuring carotid artery distensibility on 4D CTA, both manually and using a new automatic method. On 4D CTA datasets manual (n=38) and automatic (n=76) measurements of the carotid distensibility were performed. A subset (n=10) of the manual annotations were repeated by a second observer. The interobserver variability was assessed using a Bland-Altman analysis and appeared to be too large to reliably measure the distensibility using manual annotation. We compared two versions of the automatic method: one using 3D registration and one using a 4D registration method. The latter resulted in a more smooth deformation over time. The automatic method was evaluated using a synthetic deformation and by investigating whether known relations with cardiovascular risk factors could be reproduced. The relation between distensibility and cardiovascular risk factors was tested with a Mann-Whitney U test. Automatic measurements revealed an association with hypertension whereas the manual measurements did not. This relation has been found by other studies too. We conclude that carotid artery distensibility measurements should be performed automatically and that the method described in this paper is suitable for that. All CTA datasets and related clinical data used in this study can be downloaded from our website (http://ctadist.bigr.nlhttp://ctadist.bigr.nl).}, + file = {Hame13.pdf:pdf\\Hame13.pdf:PDF}, + optnote = {DIAG}, + pmid = {23602917}, + month = {7}, +} + +@conference{Hame17, + author = {van Hamersvelt, R. and Zreik, M. and Lessmann, N. and Wolterink, J. and Voskuil, M. and Viergever, M. A. and Leiner, T. and Isgum, I.}, + title = {Improving Specificity of Coronary {CT} Angiography for the Detection of Functionally Significant Coronary Artery Disease: A Deep Learning Approach}, + booktitle = RSNA, + year = {2017}, + optnote = {DIAG}, +} + +@article{Hame20, + author = {Hamer, O. W. and Rehbock, B. and Schaefer-Prokop, C.}, + title = {Idiopathische pulmonale Fibrose}, + doi = {10.1007/s00117-020-00675-5}, + year = {2020}, + abstract = {Die idiopathische pulmonale Lungenfibrose (IPF) ist eine chronische progressive fibrosierende interstitielle Lungenerkrankung mit schlechter Prognose. Die hochauflosende Computertomographie (HRCT) spielt in der Abklarung von Patienten mit V. a. IPF eine zentrale Rolle. In der HRCT aussert sich die IPF mit dem Muster einer gewohnlichen interstitiellen Pneumonie (Usual Interstitial Pneumonia - UIP). Lange Zeit war nur eine supportive oder immunsuppressive Therapie moglich. Seit 2012 sind antifibrotisch wirksame Substanzen zugelassen, was die IPF mehr noch als zuvor in den Fokus des klinischen und wissenschaftlichen Interesses geruckt hat. Auf dem Boden der gewonnenen Erkenntnisse ist 2018 die revidierte Version der internationalen Leitlinie zur Diagnose der IPF erschienen. Die Leitlinie enthalt unter anderem Massgaben zur Befundung der HRCT. In dem Fortbildungsartikel werden die relevanten HRCT-Zeichen vorgestellt. Die Massgaben der Leitlinie werden erlautert.}, + url = {http://dx.doi.org/10.1007/s00117-020-00675-5}, + file = {Hame20.pdf:pdf\Hame20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Der Radiologe}, + citation-count = {1}, + automatic = {yes}, + pages = {549-562}, + volume = {60}, +} + +@article{Harl22, + author = {Harlianto, Netanja I. and Westerink, Jan and Hol, Marjolein E. and Wittenberg, Rianne and Foppen, Wouter and van der Veen, Pieternella H. and van Ginneken, Bram and Verlaan, Jorrit-Jan and de Jong, Pim A. and Mohamed Hoesein, Firdaus A. A. and {UCC-SMART Study Group }}, + year = {2022}, + journal = RHEUMADV, + title = {Patients with diffuse idiopathic skeletal hyperostosis have an increased burden of thoracic aortic calcifications}, + doi = {10.1093/rap/rkac060}, + number = {2}, + pages = {rkac060}, + volume = {6}, + abstract = {Objectives: DISH has been associated with increased coronary artery calcifications and incident ischaemic stroke. The formation of bone along the spine may share pathways with calcium deposition in the aorta. We hypothesized that patients with DISH have increased vascular calcifications. Therefore we aimed to investigate the presence and extent of DISH in relation to thoracic aortic calcification (TAC) severity. + Methods: This cross-sectional study included 4703 patients from the Second Manifestation of ARTerial disease cohort, consisting of patients with cardiovascular events or risk factors for cardiovascular disease. Chest radiographs were scored for DISH using the Resnick criteria. Different severities of TAC were scored arbitrarily from no TAC to mild, moderate or severe TAC. Using multivariate logistic regression, the associations between DISH and TAC were analysed with adjustments for age, sex, BMI, diabetes, smoking status, non-high-density lipoprotein cholesterol, cholesterol lowering drug usage, renal function and blood pressure. + Results: A total of 442 patients (9.4\%) had evidence of DISH and 1789 (38\%) patients had TAC. The prevalence of DISH increased from 6.6\% in the no TAC group to 10.8\% in the mild, 14.3\% in the moderate and 17.1\% in the severe TAC group. After adjustments, DISH was significantly associated with the presence of TAC [odds ratio (OR) 1.46 [95\% CI 1.17, 1.82)]. In multinomial analyses, DISH was associated with moderate TAC [OR 1.43 (95\% CI 1.06, 1.93)] and severe TAC [OR 1.67 (95\% CI 1.19, 2.36)]. + Conclusions: Subjects with DISH have increased TACs, providing further evidence that patients with DISH have an increased burden of vascular calcifications.}, + file = {PubMed entry:http\://www.ncbi.nlm.nih.gov/pubmed/35993014:text/html}, + pmid = {35993014}, + ss_id = {f8fb57d8601ac0189ea80c6412232ac6771e51f5}, + all_ss_ids = {['f8fb57d8601ac0189ea80c6412232ac6771e51f5']}, + gscites = {2}, +} + +@article{Hart10, + author = {Hartmann, Ieneke J C. and Wittenberg, Rianne and Schaefer-Prokop, Cornelia}, + title = {Imaging of acute pulmonary embolism using multi-detector {CT} angiography: an update on imaging technique and interpretation}, + journal = EJR, + year = {2010}, + volume = {74}, + pages = {40--49}, + doi = {10.1016/j.ejrad.2010.02.007}, + file = {Hart10.pdf:pdf\\Hart10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {20227213}, + month = {4}, + ss_id = {712af65240c4f97f06da6178849a64f218a7cadc}, + all_ss_ids = {['712af65240c4f97f06da6178849a64f218a7cadc']}, + gscites = {45}, +} + +@article{Hayd15a, + author = {Hayden, Lystra P. and Hobbs, Brian D. and Cohen, Robyn T. and Wise, Robert A. and Checkley, William and Crapo, James D. and Hersh, Craig P. and {COPDGene Investigators}}, + title = {Childhood pneumonia increases risk for chronic obstructive pulmonary disease: the COPDGene study}, + journal = RESPR, + year = {2015}, + volume = {16}, + pages = {115}, + doi = {10.1186/s12931-015-0273-8}, + url = {http://dx.doi.org/10.1186/s12931-015-0273-8}, + abstract = {Background: Development of adult respiratory disease is influenced by events in childhood. The impact of childhood pneumonia on chronic obstructive pulmonary disease (COPD) is not well defined. We hypothesize that childhood pneumonia is a risk factor for reduced lung function and COPD in adult smokers. Methods: COPD cases and control smokers between 45-80 years old from the United States COPD Gene Study were included. Childhood pneumonia was defined by self-report of pneumonia at <16 years. Subjects with lung disease other than COPD or asthma were excluded. Smokers with and without childhood pneumonia were compared on measures of respiratory disease, lung function, and quantitative analysis of chest CT scans. Results: Of 10,192 adult smokers, 854 (8.4 %) reported pneumonia in childhood. Childhood pneumonia was associated with COPD (OR 1.40; 95 % CI 1.17-1.66), chronic bronchitis, increased COPD exacerbations, and lower lung function: post-bronchodilator FEV1(69.1 vs. 77.1 % predicted), FVC (82.7 vs. 87.4 % predicted), FEV1/FVCratio (0.63 vs. 0.67;p< 0.001 for all comparisons). Childhood pneumonia was associated with increased airway wall thickness on CT, without significant difference in emphysema. Having both pneumonia and asthma in childhood further increased the risk of developing COPD (OR 1.85; 95 % CI 1.10-3.18). Conclusions: Children with pneumonia are at increased risk for future smoking-related lung disease including COPD and decreased lung function. This association is supported by airway changes on chest CT scans. Childhood pneumonia may be an important factor in the early origins of COPD, and the combination ofpneumonia and asthma in childhood may pose the greatest risk. Clinical trials registration: ClinicalTrials.gov, NCT00608764 (Active since January 28, 2008).}, + file = {Hayd15a.pdf:pdf\\Hayd15a.pdf:PDF}, + optnote = {DIAG}, + pmid = {26392057}, + month = {9}, +} + +@article{Hayd18, + author = {Hayden, Lystra P and Cho, Michael H and Raby, Benjamin A and Beaty, Terri H and Silverman, Edwin K and Hersh, Craig P and COPDGene Investigators}, + title = {Childhood asthma is associated with COPD and known asthma variants in COPDGene: a genome-wide association study}, + journal = RESPR, + year = {2018}, + volume = {19}, + issue = {1}, + month = {10}, + pages = {209}, + doi = {10.1186/s12931-018-0890-0}, + abstract = {Childhood asthma is strongly influenced by genetics and is a risk factor for reduced lung function and chronic obstructive pulmonary disease (COPD) in adults. This study investigates self-reported childhood asthma in adult smokers from the COPDGene Study. We hypothesize that childhood asthma is associated with decreased lung function, increased risk for COPD, and that a genome-wide association study (GWAS) will show association with established asthma variants. We evaluated current and former smokers ages 45-80 of non-Hispanic white (NHW) or African American (AA) race. Childhood asthma was defined by self-report of asthma, diagnosed by a medical professional, with onset at < 16 years or during childhood. Subjects with a history of childhood asthma were compared to those who never had asthma based on lung function, development of COPD, and genetic variation. GWAS was performed in NHW and AA populations, and combined in meta-analysis. Two sets of established asthma SNPs from published literature were examined for association with childhood asthma. Among 10,199 adult smokers, 730 (7%) reported childhood asthma and 7493 (73%) reported no history of asthma. Childhood asthmatics had reduced lung function and increased risk for COPD (OR 3.42, 95% CI 2.81-4.18). Genotype data was assessed for 8031 subjects. Among NHWs, 391(7%) had childhood asthma, and GWAS identified one genome-wide significant association in KIAA1958 (rs59289606, p = 4.82 x 10 ). Among AAs, 339 (12%) had childhood asthma. No SNPs reached genome-wide significance in the AAs or in the meta-analysis combining NHW and AA subjects; however, potential regions of interest were identified. Established asthma SNPs were examined, seven from the NHGRI-EBI database and five with genome-wide significance in the largest pediatric asthma GWAS. Associations were found in the current childhood asthma GWAS with known asthma loci in IL1RL1, IL13, LINC01149, near GSDMB, and in the C11orf30-LRRC32 region (Bonferroni adjusted p < 0.05 for all comparisons). Childhood asthmatics are at increased risk for COPD. Defining asthma by self-report is valid in populations at risk for COPD, identifying subjects with clinical and genetic characteristics known to associate with childhood asthma. This has potential to improve clinical understanding of asthma-COPD overlap (ACO) and enhance future research into ACO-specific treatment regimens. ClinicalTrials.gov, NCT00608764 (Active since January 28, 2008).}, + file = {Hayd18.pdf:pdf\\Hayd18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30373671}, +} + +@article{Heba20, + author = {Hebar, Timea and Snoj, Ziga and Sconfienza, Luca Maria and Vanhoenacker, Filip Maria H.M. and Shahabpour, Maryam and Salapura, Vladka and Isaac, Amanda and Drakonaki, Eleni and Vasilev, Yurii and Drape, Jean-Luc and Adriaensen, Miraude and Friedrich, Klaus and Guglielmi, Giuseppe and Vieira, Alberto and Sanal, Hatice Tuba and Kerttula, Liisa and Hellund, Johan Castberg and Nagy, Judit and Heuck, Andreas and Rutten, Matthieu and Tzalonikou, Maria and Hansen, Ulrich and Niemunis-Sawicka, Joanna and Becce, Fabio and Silvestri, Enzo and Juan, Eva Llopis San and W\"{o}rtler, Klaus}, + title = {Present Status of Musculoskeletal Radiology in Europe: International Survey by the European Society of Musculoskeletal Radiology}, + doi = {10.1055/s-0040-1713119}, + year = {2020}, + abstract = {No official data exist on the status of musculoskeletal (MSK) radiology in Europe. The Committee for National Societies conducted an international survey to understand the status of training, subspecialization, and local practice among the European Society of Musculoskeletal Radiology (ESSR) partner societies. This article reports the results of that survey. An online questionnaire was distributed to all 26 European national associations that act as official partner societies of the ESSR. The 24 questions were subdivided into six sections: society structure, relationship with the national radiological society, subspecialization, present radiology practice, MSK interventional procedures, and MSK ultrasound. The findings of our study show a lack of standardized training and/or accreditation methods in the field of MSK radiology at a national level. The European diploma in musculoskeletal radiology is directed to partly overcome this problem; however, this certification is still underrecognized. Using certification methods, a more homogeneous European landscape could be created in the future with a view to subspecialist training. MSK ultrasound and MSK interventional procedures should be performed by a health professional with a solid knowledge of the relevant imaging modalities and sufficient training in MSK radiology. Recognition of MSK radiology as an official subspecialty would make the field more attractive for younger colleagues as well as attracting the brightest and best, an important key to further development of both clinical and academic radiology. + Key Points + }, + url = {http://dx.doi.org/10.1055/s-0040-1713119}, + file = {Heba20.pdf:pdf\Heba20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Seminars in Musculoskeletal Radiology}, + citation-count = {9}, + automatic = {yes}, + pages = {323-330}, + volume = {24}, +} + +@article{Hees19, + author = {Heesterbeek, Thomas J and de Jong, Eiko K and Acar, Ilhan E and Groenewoud, Joannes M M and Liefers, Bart and S\'{a}nchez, Clara I. and Peto, Tunde and Hoyng, Carel B and Pauleikhoff, Daniel and Hense, Hans W and den Hollander, Anneke I}, + title = {Genetic risk score has added value over initial clinical grading stage in predicting disease progression in age-related macular degeneration}, + journal = NATSCIREP, + year = {2019}, + volume = {9}, + issue = {1}, + month = {4}, + pages = {6611}, + doi = {10.1038/s41598-019-43144-3}, + abstract = {Several prediction models for progression of age-related macular degeneration (AMD) have been developed, but the added value of using genetic information in those models in addition to clinical characteristics is ambiguous. In this prospective cohort study, we explored the added value of genetics using a genetic risk score (GRS) based on 52 AMD-associated variants, in addition to the clinical severity grading at baseline as quantified by validated drusen detection software, to predict disease progression in 177 AMD patients after 6.5 years follow-up. The GRS was strongly associated with the drusen coverage at baseline (P < 0.001) and both the GRS and drusen coverage were associated with disease progression. When the GRS was added as predictor in addition to the drusen coverage, R increased from 0.46 to 0.56. This improvement by the GRS was predominantly seen in patients with a drusen coverage <15%. In patients with a larger drusen coverage, the GRS had less added value to predict progression. Thus, genetic information has added value over clinical characteristics in predicting disease progression in AMD, but only in patients with a less severe disease stage. Patients with a high GRS should be made aware of their risk and could be selected for clinical trials for arresting progression.}, + file = {Hees19.pdf:pdf\\Hees19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31036867}, + gsid = {5584625454185103300}, + gscites = {20}, + ss_id = {560e2100afbc64953c5155459b1f459f6e55003e}, + all_ss_ids = {['560e2100afbc64953c5155459b1f459f6e55003e']}, +} + +@article{Heet11, + author = {G. J. den Heeten and N. Karssemeijer}, + title = {[Computerised assessment of screening mammograms]}, + journal = NTVG, + year = {2011}, + volume = {155}, + pages = {A3025}, + url = {https://www.ntvg.nl/artikelen/computerbeoordeling-van-screeningsmammogrammen}, + abstract = {More than one million mammograms are performed in the Netherlands each year. Today, these images are all in digital format and processed by various types of software for optimal display during radiological analysis. As a result of continuous quality control on national public health screening for breast cancer, approximately 1000 cases of interval carcinoma and any T2 tumours identified during this screening are re-assessed. The results of these retrospective analyses suggest that there is room for improvement in terms of the reliability of screening mammography without this resulting in an excessive increase of false positive results. Research has shown that the more radiological evaluators are involved, the better the results. Alternatively, a computer can be utilised for detecting abnormalities. Large-scale use of Computer-Assisted Detection (CAD) is already underway in some countries. For this reason, the option of adding CAD to the current practice of double assessment of mammograms could be considered a sensible option for the future.}, + file = {Heet11.pdf:pdf\\Heet11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {18}, + pmid = {21466728}, + gsid = {7415660003160210862}, + gscites = {2}, + ss_id = {8b197ae83250002327590780c4303b5b038b9e2b}, + all_ss_ids = {['8b197ae83250002327590780c4303b5b038b9e2b']}, +} + +@article{Heim07a, + author = {Heijmink, Stijn W T P J. and F\"utterer, Jurgen J. and Hambrock, Thomas and Takahashi, Satoru and Scheenen, Tom W J. and Huisman, Henkjan J. and Hulsbergen-Van de Kaa, Christina A. and Knipscheer, Ben C. and Kiemeney, Lambertus A L M. and Witjes, J Alfred and Barentsz, Jelle O.}, + title = {Prostate cancer: body-array versus endorectal coil {MR} imaging at 3 {T}--comparison of image quality, localization, and staging performance}, + journal = Radiology, + year = {2007}, + volume = {244}, + pages = {184--195}, + doi = {10.1148/radiol.2441060425}, + abstract = {{PURPOSE}: {T}o prospectively compare image quality and accuracy of prostate cancer localization and staging with body-array coil ({BAC}) versus endorectal coil ({ERC}) {T}2-weighted magnetic resonance ({MR}) imaging at 3 {T}, with histopathologic findings as the reference standard. {MATERIALS} {AND} {METHODS}: {A}fter institutional review board approval and written informed consent, 46 men underwent 3-{T} {T}2-weighted {MR} imaging with a {BAC} (voxel size, 0.43 x 0.43 x 4.00 mm) and an {ERC} (voxel size, 0.26 x 0.26 x 2.50 mm) before radical prostatectomy. {F}our radiologists independently evaluated data sets obtained with the {BAC} and {ERC} separately. {T}en image quality characteristics related to prostate cancer localization and staging were assigned scores. {P}rostate cancer presence was recorded with a five-point probability scale in each of 14 segments that included the whole prostate. {D}isease stage was classified as organ-confined or locally advanced with a five-point probability scale. {W}hole-mount-section histopathologic examination was the reference standard. {A}reas under the receiver operating characteristic curve ({AUC}s) and diagnostic performance parameters were determined. {A} difference with a {P} value of less than .05 was considered significant. {RESULTS}: {F}orty-six patients (mean age, 61 years) were included for analysis. {S}ignificantly more motion artifacts were present with {ERC} imaging ({P}<.001). {A}ll other image quality characteristics improved significantly ({P}<.001) with {ERC} imaging. {W}ith {ERC} imaging, the {AUC} for localization of prostate cancer was significantly increased from 0.62 to 0.68 ({P}<.001). {ERC} imaging significantly increased the {AUC}s for staging, and sensitivity for detection of locally advanced disease by experienced readers was increased from 7\% (one of 15) to a range of 73\% (11 of 15) to 80\% (12 of 15) ({P}<.05), whereas a high specificity of 97\% (30 of 31) to 100\% (31 of 31) was maintained. {E}xtracapsular extension as small as 0.5 mm at histopathologic examination could be accurately detected only with {ERC} imaging. {CONCLUSION}: {I}mage quality and localization improved significantly with {ERC} imaging compared with {BAC} imaging. {F}or experienced radiologists, the staging performance was significantly better with {ERC} imaging.}, + file = {Heim07a.pdf:pdf\\Heim07a.pdf:PDF}, + optnote = {BioMR, DIAG, MAGIC, RADIOLOGY}, + number = {1}, + pmid = {17495178}, + month = {7}, + gsid = {9086501525841300261}, + gscites = {354}, + ss_id = {1f2daa572caaeb54270d83a0f743f3a3f7f2a207}, + all_ss_ids = {['1f2daa572caaeb54270d83a0f743f3a3f7f2a207']}, +} + +@article{Heim09, + author = {T. Heimann and B. van Ginneken and M. Styner and Y. Arzhaeva and V. Aurich and C. Bauer and A. Beck and C. Becker and R. Beichel and G. Bekes and F. Bello and G. Binnig and H. Bischof and A. Bornik and P. Cashman and Y. Chi and A. Cordova and B. Dawant and M. Fidrich and J. Furst and D. Furukawa and L. Grenacher and J. Hornegger and D. Kainmuller and R. Kitney and H. Kobatake and H. Lamecker and T. Lange and J. Lee and B. Lennon and R. Li and S. Li and H-P. Meinzer and G. Nemeth and D. Raicu and A-M. Rau and E. van Rikxoort and M. Rousson and L. Rusko and K. Saddi and G. Schmidt and D. Seghers and A. Shimizu and P. Slagmolen and E. Sorantin and G. Soza and R. Susomboon and J. Waite and A. Wimmer and I. Wolf}, + title = {Comparison and {E}valuation of {M}ethods for {L}iver {S}egmentation {F}rom {CT} {D}atasets}, + journal = TMI, + year = {2009}, + volume = {28}, + pages = {1251--1265}, + doi = {10.1109/TMI.2009.2013851}, + abstract = {{T}his paper presents a comparison study between 10 automatic and six interactive methods for liver segmentation from contrast-enhanced {CT} images. {I}t is based on results from the {MICCAI} 2007 {G}rand {C}hallenge workshop, where 16 teams evaluated their algorithms on a common database. {A} collection of 20 clinical images with reference segmentations was provided to train and tune algorithms in advance. {P}articipants were also allowed to use additional proprietary training data for that purpose. {A}ll teams then had to apply their methods to 10 test datasets and submit the obtained results. {E}mployed algorithms include statistical shape models, atlas registration, level-sets, graph-cuts and rule-based systems. {A}ll results were compared to reference segmentations five error measures that highlight different aspects of segmentation accuracy. {A}ll measures were combined according to a specific scoring system relating the obtained values to human expert variability. {I}n general, interactive methods reached higher average scores than automatic approaches and featured a better consistency of segmentation quality. {H}owever, the best automatic methods (mainly based on statistical shape models with some additional free deformation) could compete well on the majority of test images. {T}he study provides an insight in performance of different segmentation approaches under real-world conditions and highlights achievements and limitations of current image analysis techniques.}, + file = {Heim09.pdf:pdf\\Heim09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {8}, + pmid = {19211338}, + month = {8}, + gsid = {6089436202002220059}, + gscites = {940}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/81757}, + ss_id = {75352a06f8f39701d59c3a3a78e5cce6dd469ea9}, + all_ss_ids = {['75352a06f8f39701d59c3a3a78e5cce6dd469ea9']}, +} + +@conference{Hend21, + author = {Hendrix, W. and Hendrix, N. and Prokop, M. and Scholten, E., and Van Ginneken, B. and Rutten, M. and Jacobs, C.}, + booktitle = ECR, + title = {Trends in the Incidence of Pulmonary Nodules in Chest Computed Tomography: 10-Year Results from Two Dutch Hospitals}, + doi = {10.26044/ecr2021/C-11494}, + abstract = {Purpose: To study trends in the incidence of reported pulmonary nodules in chest computed tomography (CT) since 2008. Methods: We retrospectively collected the radiology reports from chest CT studies performed between 2008 and 2019 from the hospital information systems of two large Dutch hospitals. Cases were included between 2008 and 2017; two years served as follow-up. A natural language processing algorithm was used to identify studies with any reported pulmonary nodule measuring up to 3 cm in diameter. Results: Between 2008 and 2017, a total of 68,656 patients underwent 142,111 chest CT examinations at both hospitals combined. The number of annual chest CT scans increased from 11,315 in 7,861 patients to 18,511 in 12,565 patients during this period, an increase of 64%. The percentage of patients, in whom nodules were reported, increased from 33% (2,605 individuals) in 2008 to 50% (6,253 individuals) in 2017. The proportion of these patients who received a follow-up chest CT increased from 911/2,605 (35%) to 2,792/6,253 (45%). The average number of follow-up scans per patient remained similar (1.58 in 2008; 1.64 in 2017). Conclusion: The number of patients who underwent chest CT examinations substantially increased over the past decade. The proportion of patients in whom a pulmonary nodule has been found significantly increased, as did the percentage of those who received follow-up CT. These findings underline that effective nodule management becomes an increasingly important public health issue. Limitations: CT scans that only contain portions of the lungs (e.g. abdominal CT) were not included. Ethics committee approval: This study was approved by the medical ethical review boards of both institutions. Funding for this study: Junior Researcher grant from the Radboud Institute for Health Sciences, Radboudumc, Nijmegen, the Netherlands.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, +} + +@article{Hend21a, + author = {Hendrix, Nils and Scholten, Ernst and Vernhout, Bastiaan and Bruijnen, Stefan and Maresch, Bas and de Jong, Mathijn and Diepstraten, Suzanne and Bollen, Stijn and Schalekamp, Steven and de Rooij, Maarten and Scholtens, Alexander and Hendrix, Ward and Samson, Tijs and Sharon Ong, Lee-Ling and Postma, Eric and van Ginneken, Bram and Rutten, Matthieu}, + title = {Development and Validation of a Convolutional Neural Network for Automated Detection of Scaphoid Fractures on Conventional Radiographs}, + journal = RAI, + pages = {e200260}, + year = {2021}, + doi = {10.1148/ryai.2021200260}, + algorithm = {https://grand-challenge.org/algorithms/scaphoid-fracture-detection/}, + file = {:pdf/Hend21a.pdf:PDF}, + abstract = {Purpose: To compare the performance of a convolutional neural network (CNN) to 11 radiologists in detecting scaphoid fractures on conventional radiographs of the hand, wrist, and scaphoid. Materials and Methods: At two hospitals (Hospitals A and B), three datasets consisting of conventional hand, wrist, and scaphoid radiographs were retrospectively retrieved: a dataset of 1039 radiographs (775 patients [mean age, 48 +- 23 years; 505 females], period: 2017--2019, Hospitals A and B) for developing a scaphoid segmentation CNN, a dataset of 3000 radiographs (1846 patients [mean age, 42 +- 22 years; 937 females], period: 2003--2019, Hospital B) for developing a scaphoid fracture detection CNN, and a dataset of 190 radiographs (190 patients [mean age, 43 +- 20 years; 77 female] period: 2011--2020, Hospital A) for testing the complete fracture detection system. Both CNNs were applied consecutively: the segmentation CNN localized the scaphoid and then passed the relevant region to the detection CNN for fracture detection. In an observer study, the performance of the system was compared with 11 radiologists. Evaluation metrics included the Dice similarity coefficient (DSC), Hausdorff distance (HD), sensitivity, specificity, positive predictive value (PPV), and area under the receiver operating characteristic curve (AUC). Results: The segmentation CNN achieved a DSC of 97.4% +- 1.4 with an HD of 1.31 mm +- 1.03. The detection CNN had a sensitivity of 78% (95% CI: 70, 86), specificity of 84% (95% CI: 77, 92), PPV of 83% (95% CI: 77, 90), and AUC of 0.87 (95% CI: 0.81, 0.91). There was no difference between the AUC of the CNN and the radiologists (0.87 [95% CI: 0.81, 0.91] versus 0.83 [radiologist range: 0.78-0.85]; P = .09). Conclusion: The developed CNN achieved radiologist-level performance in detecting scaphoid fractures on conventional radiographs of the hand, wrist, and scaphoid.}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/238628}, + ss_id = {afbdf450023be86a89bbec9dea91f031e2b76cb2}, + all_ss_ids = {['afbdf450023be86a89bbec9dea91f031e2b76cb2']}, + gscites = {15}, +} + +@article{Hend23, + author = {Hendrix, Nils and Hendrix, Ward and van Dijke, Kees and Maresch, Bas and Maas, Mario and Bollen, Stijn and Scholtens, Alexander and de Jonge, Milko and Ong, Lee-Ling Sharon and van Ginneken, Bram and Rutten, Matthieu}, + title = {Musculoskeletal radiologist-level performance by using deep learning for detection of scaphoid fractures on conventional multi-view radiographs of hand and wrist}, + doi = {https://doi.org/10.1007/s00330-022-09205-4}, + url = {https://link.springer.com/article/10.1007/s00330-022-09205-4}, + abstract = {Objectives: To assess how an artificial intelligence (AI) algorithm performs against five experienced musculoskeletal radiologists in diagnosing scaphoid fractures and whether it aids their diagnosis on conventional multi-view radiographs. + + Methods: Four datasets of conventional hand, wrist, and scaphoid radiographs were retrospectively acquired at two hospitals (hospitals A and B). Dataset 1 (12,990 radiographs from 3353 patients, hospital A) and dataset 2 (1117 radiographs from 394 patients, hospital B) were used for training and testing a scaphoid localization and laterality classification component. Dataset 3 (4316 radiographs from 840 patients, hospital A) and dataset 4 (688 radiographs from 209 patients, hospital B) were used for training and testing the fracture detector. The algorithm was compared with the radiologists in an observer study. Evaluation metrics included sensitivity, specificity, positive predictive value (PPV), area under the characteristic operating curve (AUC), Cohen's kappa coefficient (k), fracture localization precision, and reading time. + + Results: The algorithm detected scaphoid fractures with a sensitivity of 72%, specificity of 93%, PPV of 81%, and AUC of 0.88. The AUC of the algorithm did not differ from each radiologist (0.87 [radiologists' mean], p >=.05). AI assistance improved five out of ten pairs of inter-observer Cohen's k agreements (p <.05) and reduced reading time in four radiologists (p <.001), but did not improve other metrics in the majority of radiologists (p >=.05). + + Conclusions: The AI algorithm detects scaphoid fractures on conventional multi-view radiographs at the level of five experienced musculoskeletal radiologists and could significantly shorten their reading time.}, + file = {Hend23.pdf:pdf\\Hend23.pdf:PDF}, + journal = ER, + volume = {33}, + pages = {1575--1588}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {70686c2db88212d610871b007083bcc58876b49d}, + all_ss_ids = {['70686c2db88212d610871b007083bcc58876b49d']}, + gscites = {6}, +} + +@article{Hend23a, + author = {Hendrix, Ward and Rutten, Matthieu and Hendrix, Nils and van Ginneken, Bram and Schaefer-Prokop, Cornelia and Scholten, Ernst T. and Prokop, Mathias and Jacobs, Colin}, + title = {Trends in the incidence of pulmonary nodules in chest computed tomography: 10-year results from two Dutch hospitals}, + doi = {10.1007/s00330-023-09826-3}, + pmid = {37338552}, + url = {https://doi.org/10.1007/s00330-023-09826-3}, + abstract = {Objective + To study trends in the incidence of reported pulmonary nodules and stage I lung cancer in chest CT. + + Methods + We analyzed the trends in the incidence of detected pulmonary nodules and stage I lung cancer in chest CT scans in the period between 2008 and 2019. Imaging metadata and radiology reports from all chest CT studies were collected from two large Dutch hospitals. A natural language processing algorithm was developed to identify studies with any reported pulmonary nodule. + + Results + Between 2008 and 2019, a total of 74,803 patients underwent 166,688 chest CT examinations at both hospitals combined. During this period, the annual number of chest CT scans increased from 9955 scans in 6845 patients in 2008 to 20,476 scans in 13,286 patients in 2019. The proportion of patients in whom nodules (old or new) were reported increased from 38% (2595/6845) in 2008 to 50% (6654/13,286) in 2019. The proportion of patients in whom significant new nodules (>= 5 mm) were reported increased from 9% (608/6954) in 2010 to 17% (1660/9883) in 2017. The number of patients with new nodules and corresponding stage I lung cancer diagnosis tripled and their proportion doubled, from 0.4% (26/6954) in 2010 to 0.8% (78/9883) in 2017. + + Conclusion + The identification of incidental pulmonary nodules in chest CT has steadily increased over the past decade and has been accompanied by more stage I lung cancer diagnoses. + + Clinical relevance statement + These findings stress the importance of identifying and efficiently managing incidental pulmonary nodules in routine clinical practice.}, + file = {Hend23a.pdf:pdf\\Hend23a.pdf:PDF}, + journal = ER, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {9a589d8cc38a4770cf3d5819fc363a814902bb42}, + all_ss_ids = {['9a589d8cc38a4770cf3d5819fc363a814902bb42']}, + gscites = {4}, +} + +@inproceedings{Heri18, + author = {Hering, Alessa and Kuckertz, Sven and Heldmann, Stefan and Heinrich, Mattias}, + booktitle = {Bildverarbeitung f\"ur die Medizin}, + title = {Enhancing Label-Driven Deep Deformable Image Registration with Local Distance Metrics for State-of-the-Art Cardiac Motion Tracking}, + pages = {309--314}, + publisher = {Springer}, + url = {https://arxiv.org/abs/1812.01859}, + abstract = {While deep learning has achieved significant advances in accuracy for medical image segmentation, its benefits for deformable image registration have so far remained limited to reduced computation times. Previous work has either focused on replacing the iterative optimization of distance and smoothness terms with CNN-layers or using supervised approaches driven by labels. Our method is the first to combine the complementary strengths of global semantic information (represented by segmentation labels) and local distance metrics that help align surrounding structures. We demonstrate significant higher Dice scores (of 86.5%) for deformable cardiac image registration compared to classic registration (79.0%) as well as label-driven deep learning frameworks (83.4%).}, + file = {Heri18.pdf:pdf\\Heri18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + year = {2019}, + ss_id = {1216c70fa9e3eaa3ff5f7755e1d04147caed4818}, + all_ss_ids = {['1216c70fa9e3eaa3ff5f7755e1d04147caed4818']}, + gscites = {40}, +} + +@inproceedings{Heri19, + author = {Hering, Alessa and van Ginneken, Bram and Heldmann, Stefan}, + title = {mlVIRNET: Multilevel Variational Image Registration Network}, + booktitle = MICCAI, + series = LNCS, + pages = {257-265}, + doi = {10.1007/978-3-030-32226-7_29}, + year = 2019, + volume = {11769}, + url = {https://arxiv.org/abs/1909.10084}, + abstract = {We present a novel multilevel approach for deep learning based image registration. Recently published deep learning based registration methods have shown promising results for a wide range of tasks. However, these algorithms are still limited to relatively small deformations. Our method addresses this shortcoming by introducing a multilevel framework, which computes deformation fields on different scales, similar to conventional methods. Thereby, a coarse-level alignment is obtained first, which is subsequently improved on finer levels. We demonstrate our method on the complex task of inhale-to-exhale lung registration. We show that the use of a deep learning multilevel approach leads to significantly better registration results.}, + file = {Heri19.pdf:pdf\\Heri19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {17321305290346990836}, + gscites = {50}, + ss_id = {aefbf6afa8d75939106323e38e06c7b8b85ce954}, + all_ss_ids = {['aefbf6afa8d75939106323e38e06c7b8b85ce954']}, +} + +@article{Heri19a, + author = {Hering, Alessa and Kuckertz, Sven and Heldmann, Stefan and Heinrich, Mattias P}, + title = {Memory-efficient 2.5D convolutional transformer networks for multi-modal deformable registration with weak label supervision applied to whole-heart {CT} and {MRI} scans}, + journal = CARS, + year = {2019}, + doi = {10.1007/s11548-019-02068-z}, + abstract = {PURPOSE: Despite its potential for improvements through supervision, deep learning-based registration approaches are difficult to train for large deformations in 3D scans due to excessive memory requirements. METHODS: We propose a new 2.5D convolutional transformer architecture that enables us to learn a memory-efficient weakly supervised deep learning model for multi-modal image registration. Furthermore, we firstly integrate a volume change control term into the loss function of a deep learning-based registration method to penalize occurring foldings inside the deformation field. RESULTS: Our approach succeeds at learning large deformations across multi-modal images. We evaluate our approach on 100 pair-wise registrations of CT and MRI whole-heart scans and demonstrate considerably higher Dice Scores (of 0.74) compared to a state-of-the-art unsupervised discrete registration framework (deeds with Dice of 0.71). CONCLUSION: Our proposed memory-efficient registration method performs better than state-of-the-art conventional registration methods. By using a volume change control term in the loss function, the number of occurring foldings can be considerably reduced on new registration cases.}, + file = {Heri19a.pdf:pdf\\Heri19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31538274}, + month = {9}, + ss_id = {e50f0d6b5c1b710f13e9ebe2b3e2448518c8e26d}, + all_ss_ids = {['e50f0d6b5c1b710f13e9ebe2b3e2448518c8e26d']}, + gscites = {15}, +} + +@inproceedings{Heri19b, + author = {Hering, Alessa and Heldmann, Stefan}, + title = {Unsupervised Learning for Large Motion Thoracic CT Follow-Up Registration}, + booktitle = MI, + year = {2019}, + volume = {10949}, + series = SPIE, + pages = {109491B}, + doi = {10.1117/12.2506962}, + url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/10949/109491B/Unsupervised-learning-for-large-motion-thoracic-CT-follow-up-registration/10.1117/12.2506962.short?SSO=1}, + abstract = {Image registration is the process of aligning two or more images to achieve point-wise spatial correspondence. Typically, image registration is phrased as an optimization problem w.r.t. a spatial mapping that minimizes a suitable cost function and common approaches estimate solutions by applying iterative optimization schemes such as gradient descent or Newton-type methods. This optimization is performed independently for each pair of images, which can be time consuming. In this paper we present an unsupervised learning-based approach for deformable image registration of thoracic CT scans. Our experiments show that our method performs comparable to conventional image registration methods and in particular is able to deal with large motions. Registration of a new unseen pair of images only requires a single forward pass through the network yielding the desired deformation field in less than 0.2 seconds. Furthermore, as a novelty in the context of deep-learning-based registration, we use the edge-based normalized gradient fields distance measure together with the curvature regularization as a loss function of the registration network.}, + file = {Heri19b.pdf:pdf\\Heri19b.pdf:PDF}, + optnote = {DIAG. RADIOLOGY}, + month = {3}, + ss_id = {8dff3f1ad2cb2f512ea8c8ac7523699b4f9e82c5}, + all_ss_ids = {['8dff3f1ad2cb2f512ea8c8ac7523699b4f9e82c5']}, + gscites = {19}, +} + +@inproceedings{Heri20, + author = {Alessa Hering and Stefan Heldmann}, + booktitle = {Bildverarbeitung f\"ur die Medizin}, + title = {mlVIRNET: Improved Deep Learning Registration Using a Coarse to Fine Approach to Capture all Levels of Motion}, + doi = {10.1007/978-3-658-29267-6_35}, + pages = {175}, + abstract = {While deep learning has become a methodology of choice in many areas, relatively few deep-learning-based image registration algorithms have been proposed. One reason for this is lack of ground-truth and the large variability of plausible deformations that can align corresponding anatomies. Therefore, the problem is much less constrained than for example image classification or segmentation.}, + file = {Heri20.pdf:pdf\\Heri20.pdf:PDF}, + optnote = {DIAG}, + year = {2020}, + ss_id = {1138a76e62c45bfa0a1daddf7ac45f826c2d5222}, + all_ss_ids = {['1138a76e62c45bfa0a1daddf7ac45f826c2d5222']}, + gscites = {0}, +} + +@article{Heri2021d, + author = {{Hering}, A. and {Hansen}, L. and {Mok}, T.~C.~W. and {Chung}, A. and {Siebert}, H. and {H{\"a}ger}, S. and {Lange}, A. and {Kuckertz}, S. and {Heldmann}, S. and {Shao}, W. and others}, + title = {Learn2Reg: comprehensive multi-task medical image registration challenge, dataset and evaluation in the era of deep learning}, + abstract = {Image registration is a fundamental medical image analysis task, and a wide variety of approaches have been proposed. However, only a few studies have comprehensively compared medical image registration approaches on a wide range of clinically relevant tasks, in part because of the lack of availability of such diverse data. This limits the development of registration methods, the adoption of research advances into practice, and a fair benchmark across competing approaches. The Learn2Reg challenge addresses these limitations by providing a multi-task medical image registration benchmark for comprehensive characterisation of deformable registration algorithms. A continuous evaluation will be possible at \url{https://learn2reg.grand-challenge.org}. + Learn2Reg covers a wide range of anatomies (brain, abdomen, and thorax), modalities (ultrasound, CT, MR), availability of annotations, as well as intra- and inter-patient registration evaluation. We established an easily accessible framework for training and validation of 3D registration methods, which enabled the compilation of results of over 65 individual method submissions from more than 20 unique teams. We used a complementary set of metrics, including robustness, accuracy, plausibility, and runtime, enabling unique insight into the current state-of-the-art of medical image registration. This paper describes datasets, tasks, evaluation methods and results of the challenge, and the results of further analysis of transferability to new datasets, the importance of label supervision, and resulting bias.}, + file = {Heri21d.pdf:pdf\\Heri21d.pdf:PDF}, + journal = {arXiv preprint arXiv:2112.04489}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, + ss_id = {2e09fa7387659a79f41d809ce40d32cc8c847bb7}, + all_ss_ids = {['2e09fa7387659a79f41d809ce40d32cc8c847bb7']}, + gscites = {69}, +} + +@article{Heri21, + author = {Alessa Hering and Stephanie Hager and Jan Moltz and Nikolas Lessmann and Stefan Heldmann and Bram {van Ginneken}}, + title = {{CNN}-based Lung {CT} Registration with Multiple Anatomical Constraints}, + pages = {102139}, + journal = MIA, + year = {2021}, + volume = {72}, + doi = {https://doi.org/10.1016/j.media.2021.102139}, + url = {https://www.sciencedirect.com/science/article/pii/S1361841521001857}, + abstract = {Deep-learning-based registration methods emerged as a fast alternative to conventional registration methods. However, these methods often still cannot achieve the same performance as conventional registration methods because they are either limited to small deformation or they fail to handle a superposition of large and small deformations without producing implausible deformation fields with foldings inside. In this paper, we identify important strategies of conventional registration methods for lung registration and successfully developed the deep-learning counterpart. We employ a Gaussian-pyramid-based multilevel framework that can solve the image registration optimization in a coarse-to-fine fashion. Furthermore, we prevent foldings of the deformation field and restrict the determinant of the Jacobian to physiologically meaningful values by combining a volume change penalty with a curvature regularizer in the loss function. Keypoint correspondences are integrated to focus on the alignment of smaller structures. We perform an extensive evaluation to assess the accuracy, the robustness, the plausibility of the estimated deformation fields, and the transferability of our registration approach. We show that it achieves state-of-the-art results on the COPDGene dataset compared to conventional registration method with much shorter execution time. In our experiments on the DIRLab exhale to inhale lung registration, we demonstrate substantial improvements (TRE below 1.2 mm) over other deep learning methods.}, + optnote = {DIAG, RADIOLOGY}, + algorithm = {https://grand-challenge.org/algorithms/deep-learning-based-ct-lung-registration/}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/236792}, + ss_id = {3c649c514443775e25e096cff35d58e393659ac4}, + all_ss_ids = {['ee17bc918583166ad08ff307be5f77cb130486ea', '3c649c514443775e25e096cff35d58e393659ac4']}, + gscites = {37}, +} + +@inproceedings{Heri21a, + title = {Whole-Body Soft-Tissue Lesion Tracking and Segmentation in Longitudinal CT Imaging Studies}, + author = {Alessa Hering and Felix Peisen and Teresa Amaral and Sergios Gatidis and Thomas Eigentler and Ahmed Othman and Jan Hendrik Moltz}, + booktitle = MIDL, + year = {2021}, + url = {https://openreview.net/forum?id=hzbuHGhU02Z}, + optnote = {DIAG, RADIOLOGY}, + abstract = {In follow-up CT examinations of cancer patients, therapy success is evaluated by estimating the change in tumor size. This process is time-consuming and error-prone. We present a pipeline that automates the segmentation and measurement of matching lesions, given a point annotation in the baseline lesion. First, a region around the point annotation is extracted, in which a deep-learning-based segmentation of the lesion is performed. Afterward, a registration algorithm finds the corresponding image region in the follow-up scan and the convolutional neural network segments lesions inside this region. In the final step, the corresponding lesion is selected. We evaluate our pipeline on clinical follow-up data comprising 125 soft-tissue lesions from 43 patients with metastatic melanoma. Our pipeline succeeded for 96% of the baseline and 80% of the follow-up lesions, showing that we have laid the foundation for an efficient quantitative follow-up assessment in clinical routine.}, + ss_id = {3a0446285a7db63672c263d701e7a5c16b5db113}, + all_ss_ids = {['3a0446285a7db63672c263d701e7a5c16b5db113']}, + gscites = {2}, +} + +@phdthesis{Heri22, + author = {Alessa Hering}, + title = {Deep-Learning-Based Image Registration and Tumor Follow-Up Analysis}, + url = {https://repository.ubn.ru.nl/handle/2066/273914}, + abstract = {This thesis is focused on the development of deep-learning based image registration approaches and on efficient tumor follow-up analysis. Chapter 2 describes a method for a memory-efficient weakly-supervised deep-learning model for multi-modal image registration. The method combines three 2D networks into a 2.5D registration network. Chapter 3 presents a multilevel approach for deep learning-based image registration. Chapter 4 describes a method that incorporates multiple anatomical constraints as anatomical priors into the registration network applied to CT lung registration. Chapter 5 presents the results of the Learn2Reg challenge and compares several conventional and deep-learning-based registration methods. Chapter 6 describes a pipeline that automates the segmentation and measurement of matching lesions, given a point annotation in the baseline lesion. The pipeline is based on a registration approach to locate corresponding image regions and a convolutional neural network to segment the lesion in the follow-up image. Chapter 7 presents the reader study, which investigates whether the assessment time for follow-up lesion segmentations is reduced by AI-assisted workflow while maintaining the same quality of segmentations.}, + file = {Heri22.pdf:pdf/Heri22.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University}, + copromotor = {N. Lessmann and S. Heldmann}, + promotor = {B. van Ginneken, H.K. Hahn}, + year = {2022}, + journal = {PhD thesis}, +} + +@conference{Herm16, + author = {Hermsen, Meyke and van der Laak, Jeroen}, + title = {Highly multiplexed immunofluorescence using spectral imaging}, + booktitle = {DPA's Pathology Visions Conference 2016, San Diego, CA, US}, + year = {2016}, + abstract = {Research into cancer biomarkers often comprises testing of a number of potentially relevant markers in tissue sections. Use of single antibody immunohistochemistry, however, limits the amount of information available from the analysis. One single component can be visualized at a time, prohibiting the simultaneous assessment of multiple markers. Immunofluorescence is a widely used alternative, enabling two or three simultaneous markers. In, for instance, the study of tumor infiltrating lymphocytes one may wish to use multiplex immunophenotyping to identify the relevant subtypes of T-cells. This may require more than three markers in a single section. Also, because of availability, it is not always possible to compile panels of markers raised in different species to prevent cross-reactivity. + This workshop will focus on highly multiplexed imaging, allowing up to 7 markers in a single section. The method comprises a staining procedure consisting of consecutive steps of IHC staining with tyramid signal amplification and microwave stripping of the antibody. Every antibody is labeled with a different fluorescent dye. Subsequent imaging is performed using a fully automated multispectral imaging setup. This approach enables the use of antibodies raised in the same species (e.g. mouse MAbs) and is capable of eliminating the effect of autofluorescence. The workshop will consist of two parts. First the specific staining techniques will be treated. In the second part we will focus on the imaging and analysis options. There will be ample opportunities for questions and discussion.}, + file = {Herm16.pdf:pdf\\Herm16.pdf:PDF}, + optnote = {DIAG}, +} + +@conference{Herm17, + author = {Hermsen, Meyke and de Bel, Thomas and van de Warenburg, Milly and Knuiman, Jimmy and Steenbergen, Eric and Litjens, Geert and Smeets, Bart and Hilbrands, Luuk and van der Laak, Jeroen}, + title = {Automatic segmentation of histopathological slides from renal allograft biopsies using artificial intelligence}, + booktitle = {Dutch Federation of Nephrology (NfN) Fall Symposium}, + year = {2017}, + abstract = {Objective: Histopathological analysis of renal biopsies depends on the identification and assessment of specific histological structures. Both in research and routine diagnostics, this analysis can be time-consuming and suffer from observer variability. Recently, it has been shown that the combination of high resolution whole slide imaging (WSI) and artificial intelligence yields powerful new avenues for tissue section analysis. This study aims to develop an algorithm based on a specific type of artificial intelligence (a convolutional neural network; CNN) to fully automatically segment structures in cortical fragments of renal allograft biopsies. Automated segmentation of renal tissue allows an unbiased, reproducible computation of morphological characteristics of important structures. This in turn can be used as support in diagnostic quantitative measure-based decisions as for instance is employed in the Banff-classification. Methods: The neural network was trained using a set of Periodic acid-Schiff (PAS) stained slides (n=26) of renal allograft biopsies. WSIs were produced using a 3DHISTECH Pannoramic 250 Flash II digital slide scanner with a 20x objective lens. We used a U-net architecture CNN, which has been proven to be specifically useful in biomedical image segmentation tasks. Training was based on exhaustive annotations in one to two randomly selected rectangular areas per WSI (size approximately 3000 x 4000 pixels; comparable to one 200x microscopic field of view). A total of nine classes were annotated: Glomeruli, Sclerotic glomeruli, Proximal tubuli, Distal tubuli, Atrophic tubuli, Undefined tubuli, Arteries, Capsule and Interstitium. All annotations were revised by a pathology resident (JK), under consultation of an experienced nephropathologist (ES). Our CNN was evaluated using the Dice coefficient for each individual class. This coefficient expresses the quality of the segmentation on a scale ranging from 0-1, taking into account both recall and precision. Because of the limited amount of annotations for certain classes, cross-validation was applied. Results: We found the following Dice coefficients for the different histological segments: Glomeruli: 0.89, Sclerotic glomeruli: 0.43, Proximal tubuli: 0.88, Distal tubuli: 0.77, Atrophic tubuli: 0.32, Undefined tubuli: 0.11, Arteries: 0.71, Capsule: 0.47 and Interstitium: 0.85. Conclusion: This study shows that segmentation of WSIs of PAS-stained renal allograft biopsies using a CNN is feasible. Segmentation of several important classes (Glomeruli, Interstitium, Arteries, Proximal-, and Distal tubuli) was highly accurate. CNNs learn from being exposed to many example images. The most probable reason for the lower performance for the other classes are the relatively low number of annotated regions for these classes, combined with a high level of variability inherently present in these tissue structures. To our knowledge, this is the first time artificial intelligence is being deployed in a nine-class segmentation task in the field of kidney transplant histopathology. Results of this study show the promising potential of CNNs in obtaining quantitative, spatial and morphometric information from renal tissue in an objective, reproducible, high-scale fashion supporting diagnostic decisions based on quantitative measures.}, + file = {Herm17.pdf:pdf\\Herm17.pdf:PDF}, + optnote = {DIAG}, +} + +@conference{Herm18, + author = {Hermsen, Meyke and de Bel, Thomas and den Boer, Marjolijn and Steenbergen, Eric and Kers, Jesper and Florquin, Sandrine and Smeets, Bart and Hilbrands, Luuk and van der Laak, Jeroen}, + title = {Glomerular detection, segmentation and counting in PAS-stained histopathological slides using deep learning}, + booktitle = {Dutch Federation of Nephrology (NfN) Fall Symposium}, + year = {2018}, + abstract = {Objective: Glomeruli are among the most extensively studied components in kidney histopathology. Researchers and clinicians often depend on quantitative measures for the assessment of glomeruli. Historically, these are obtained through manual counting or classical image processing techniques. These methods possess limited reproducibility, are insufficiently robust to inter-laboratory variations, and are infamous for their tedious nature. As an alternative, we trained a convolutional neural network (CNN) to detect, segment, and count healthy and sclerotic glomeruli in digitized Periodic acid-Schiff (PAS) stained tissue sections. Methods: A CNN was trained using exhaustively annotated structures in rectangular regions in 50 whole-slide images (WSIs) of renal transplant biopsies. This resulted in annotations of 182 healthy and 18 sclerotic glomeruli. 40 WSIs were used for training and validation. Segmentation was assessed by calculating the Dice-coefficient on an unseen test set of 10 WSIs. To assess the network's ability to detect glomeruli in a larger composition of varying structures, we applied the CNN to 15 fully annotated nephrectomy WSIs. We calculated Pearson's correlation coefficients for glomerular counting (healthy and sclerotic glomeruli combined) in 82 renal transplant biopsies manually performed by three renal pathologists and the quantification by the CNN. Results: We found a Dice-coefficient of 0.95 for healthy glomeruli and 0.62 for sclerotic glomeruli in the renal transplant biopsy test set. The CNN detected 93.4% of 1747 annotated healthy glomeruli in the nephrectomy samples, with 8.4% false positives. The CNN detected 76.4% of 72 annotated sclerotic glomeruli, with 45.5% false positives. Pearson's correlation coefficient for glomerular counting on 82 transplant biopsies of the CNN versus the pathologists was 0.924, 0.930 and 0.937 for pathologist 1, 2, and 3, respectively. The CNN counted on average 1.7 glomeruli more than the pathologists. The pathologists differed on average 0.78 glomerulus. Conclusion: The network can accurately detect and segment healthy glomeruli. The CNN performs moderately well on segmenting sclerotic glomeruli, most probably due to the low amount of training data that was available for this class. The CNN's higher glomerular count can partly be explained by possible false positive detections of sclerotic glomeruli. Also, partially sampled glomeruli located at biopsy's edges are not counted by the pathologist, while they are included by the network. More training data for sclerotic glomeruli and additional post-processing techniques are needed to resolve this.}, + file = {Herm18.pdf:pdf\\Herm18.pdf:PDF}, + optnote = {DIAG}, +} + +@article{Herm19, + author = {Hermsen, Meyke and de Bel, Thomas and den Boer, Marjolijn and Steenbergen, Eric J. and Kers, Jesper and Florquin, Sandrine and Roelofs, Joris J. T. H. and Stegall, Mark D. and Alexander, Mariam P. and Smith, Byron H. and Smeets, Bart and Hilbrands, Luuk B. and van der Laak, Jeroen A. W. M.}, + title = {Deep-learning based histopathologic assessment of kidney tissue}, + journal = JASN, + year = {2019}, + volume = {30}, + issue = {10}, + pages = {1968-1979}, + doi = {10.1681/ASN.2019020144}, + url = {https://jasn.asnjournals.org/content/30/10/1968}, + abstract = {Background: The development of deep neural networks is facilitating more advanced digital analysis of histopathologic images. We trained a convolutional neural network for multiclass segmentation of digitized kidney tissue sections stained with periodic acid-Schiff (PAS). Methods: We trained the network using multiclass annotations from 40 whole-slide images of stained + kidney transplant biopsies and applied it to four independent data sets. We assessed multiclass segmentation performance by calculating Dice coefficients for ten tissue classes on ten transplant biopsies from the Radboud University Medical Center in Nijmegen, The Netherlands, and on ten transplant biopsies from an external center for validation. We also fully segmented 15 nephrectomy samples and calculated the + network's glomerular detection rates and compared network-based measures with visually scored histologic components (Banff classification) in 82 kidney transplant biopsies. + Results: The weighted mean Dice coefficients of all classes were 0.80 and 0.84 in ten kidney transplant biopsies from the Radboud center and the external center, respectively. The best segmented class was + "glomeruli? in both data sets (Dice coefficients, 0.95 and 0.94, respectively), followed by "tubuli combined? and "interstitium.? The network detected 92.7% of all glomeruli in nephrectomy samples, with + 10.4% false positives. In whole transplant biopsies, the mean intraclass correlation coefficient for glomerular counting performed by pathologists versus the network was 0.94. We found significant correlations + between visually scored histologic components and network-based measures. Conclusions: This study presents the first convolutional neural network formulticlass segmentation of PASstained + nephrectomy samples and transplant biopsies. Our network may have utility for quantitative studies involving kidney histopathology across centers and provide opportunities for deep learning applications in routine diagnostics.}, + file = {Herm19.pdf:pdf\\Herm19.pdf:PDF}, + optnote = {DIAG}, + pmid = {31488607}, + month = {9}, + gsid = {7739327770842655007}, + gscites = {192}, + ss_id = {fa59a6c087ec8b88c3828a95de5e0662cd2eee7e}, + all_ss_ids = {['fa59a6c087ec8b88c3828a95de5e0662cd2eee7e']}, +} + +@conference{Herm19a, + author = {Hermsen, Meyke AND de Bel, Thomas AND den Boer, Marjolijn AND Steenbergen, Eric J AND Kers, Jesper AND Florquin, Sandrine AND Roelofs, Joris J.T.H. AND Stegall, Mark D. AND Alexander, Mariam P. AND Smith, Byron H. AND Smeets, Bart AND Hilbrands, Luuk B. AND van der Laak, Jeroen A. W. M.}, + title = {Deep learning-based histopathological assessment of renal tissue}, + booktitle = {American Society of Nephrology Kidney Week 2019}, + year = {2019}, + file = {Herm19a.pdf:pdf\\Herm19a.pdf:PDF}, +} + +@article{Herm20, + author = {Hermsen, Meyke and Smeets, Bart and Hilbrands, Luuk and van der Laak, Jeroen}, + title = {Artificial intelligence; is there a potential role in nephropathology?}, + journal = NDT, + year = {2020}, + doi = {10.1093/ndt/gfaa181}, + file = {Herm20.pdf:pdf\\Herm20.pdf:PDF}, + optnote = {DIAG, INPRESS}, + pmid = {32995871}, + ss_id = {3f9c978ea44205703f337574f5e07eb9d7ecb523}, + all_ss_ids = {['3f9c978ea44205703f337574f5e07eb9d7ecb523']}, + gscites = {5}, +} + +@article{Herm21, + author = {Hermsen, Meyke AND Volk, Valery AND Brasen, Jan Hinrich AND Geijs, Daan J AND Gwinner, Wilfried AND Kers, Jesper AND Linmans, Jasper AND Schaadt, Nadine S AND Schmitz, Jessica AND Steenbergen, Eric J AND Swiderska-Chadaj, Zaneta AND Smeets, Bart AND Hilbrands, Luuk L AND van der Laak, Jeroen A W M}, + title = {Quantitative assessment of inflammatory infiltrates in kidney transplant biopsies using multiplex tyramide signal amplification and deep learning}, + doi = {10.1038/s41374-021-00601-w}, + number = {8}, + pages = {970-982}, + volume = {101}, + abstract = {Delayed graft function (DGF) is a strong risk factor for development of interstitial fibrosis and tubular atrophy (IFTA) in kidney transplants. Quantitative assessment of inflammatory infiltrates in kidney biopsies of DGF patients can reveal predictive markers for IFTA development. In this study, we combined multiplex tyramide signal amplification (mTSA) and convolutional neural networks (CNNs) to assess the inflammatory microenvironment in kidney biopsies of DGF patients (n=22) taken at six weeks post-transplantation. Patients were stratified for IFTA development (<10 % versus >=10%) from six weeks to six months post-transplantation, based on histopathological assessment by three kidney pathologists. One mTSA panel was developed for visualization of capillaries, T- and B-lymphocytes and macrophages and a second mTSA panel for T-helper cell and macrophage subsets. The slides were multi-spectrally imaged and custom-made python scripts enabled conversion to artificial brightfield whole-slide images (WSI). We used an existing CNN for the detection of lymphocytes with cytoplasmatic staining patterns in immunohistochemistry and developed two new CNNs for the detection of macrophages and nuclear-stained lymphocytes. F1-scores were 0.77 (nuclear-stained lymphocytes), 0.81 (cytoplasmatic-stained lymphocytes), and 0.82 (macrophages) on a test set of artificial brightfield WSI. The CNNs were used to detect inflammatory cells, after which we assessed the peritubular capillary extent, cell density, cell ratios and cell distance in the two patient groups. In this cohort, distance of macrophages to other immune cells and peritubular capillary extent did not vary significantly at six weeks post-transplantation between patient groups. CD163+ cell density was higher in patients with >=10% IFTA development six months post-transplantation (p<0.05). CD3+CD8-/CD3+CD8+ ratios were higher in patients with <10% IFTA development (p<0.05). We observed a high correlation between CD163+ and CD4+GATA3+ cell density (R=0.74, p<0.001). Our study demonstrates that CNNs can be used to leverage reliable, quantitative results from mTSA-stained, multi-spectrally imaged slides of kidney transplant biopsies.}, + file = {Herm21.pdf:pdf\\Herm21.pdf:PDF}, + journal = LABINV, + month = {3}, + optnote = {DIAG}, + pmid = {34006891}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/238916}, + all_ss_ids = {['3881ae992914cd50a14782104170cc5dd5d9ae7e', 'a4d4e42f1accfd77f378549c776b7c850afbcda5']}, + gscites = {25}, +} + +@article{Herm22, + author = {Meyke Hermsen AND Francesco Ciompi AND Adeyemi Adefidipe AND Aleksandar Denic AND Amelie Dendooven AND Byron H. Smith AND Dominique van Midden AND Jan Hinrich Brasen AND Jesper Kers AND Mark D. Stegall AND P\'{e}ter B\'{a}ndi AND Tri Nguyen AND Zaneta Swiderska-Chadaj AND Bart Smeets AND Luuk B. Hilbrands AND Jeroen A. W. M. van der Laak}, + title = {Convolutional neural networks for the evaluation of chronic and inflammatory lesions in kidney transplant biopsies}, + journal = AJPAT, + year = {2022}, + volume = {192}, + issue = {10}, + pages = {1418-1432}, + doi = {https://doi.org/10.1016/j.ajpath.2022.06.009}, + abstract = {In kidney transplant biopsies, both inflammation and chronic changes are important features that predict long-term graft survival. Quantitative scoring of these features is important for transplant diagnostics and kidney research. However, visual scoring is poorly reproducible and labor-intensive. The goal of this study was to investigate the potential of convolutional neural networks (CNNs) to quantify inflammation and chronic features in kidney transplant biopsies. + A structure segmentation CNN and a lymphocyte detection CNN were applied on 125 whole-slide image pairs of PAS-, and CD3-stained slides. The CNN results were used to quantify healthy and sclerotic glomeruli, interstitial fibrosis, tubular atrophy, and inflammation both within non-atrophic and atrophic tubuli, and in areas of interstitial fibrosis. The computed tissue features showed high correlations with Banff lesion scores of five pathologists. Analyses on a small subset showed a moderate correlation towards higher CD3+ cell density within scarred regions and higher CD3+ cell count inside atrophic tubuli correlated with long-term change of estimated glomerular filtration rate. + The presented CNNs are valid tools to yield objective quantitative information on glomeruli number, fibrotic tissue, and inflammation within scarred and non-scarred kidney parenchyma in a reproducible fashion. CNNs have the potential to improve kidney transplant diagnostics and will benefit the community as a novel method to generate surrogate endpoints for large-scale clinical studies.}, + file = {Herm22.pdf:pdf\\Herm22.pdf:PDF}, + optnote = {DIAG}, + pmid = {35843265}, + ss_id = {1d7cb6f85cf6478da6fef4d5630f02aa3a053f8c}, + all_ss_ids = {['1d7cb6f85cf6478da6fef4d5630f02aa3a053f8c']}, + gscites = {13}, +} + +@inproceedings{Heuv15, + author = {Thomas L. A. van den Heuvel and Mohsen Ghafoorian and Anke W. van der Eerden and Bozena M. Goraj and Teuntje M. J. C. Andriessen and Bart M. ter Haar Romeny and Bram Platel}, + title = {Computer Aided Detection of Brain Micro-Bleeds in Traumatic Brain Injury}, + booktitle = MI, + year = {2015}, + volume = {9414}, + series = SPIE, + pages = {94142F}, + doi = {10.1117/12.2075353}, + abstract = {Brain micro-bleeds (BMBs) are used as surrogate markers for detecting diffuse axonal injury in traumatic brain injury (TBI) patients. The location and number of BMBs have been shown to influence the long-term outcome of TBI. To further study the importance of BMBs for prognosis, accurate localization and quantification are required. The task of annotating BMBs is laborious, complex and prone to error, resulting in a high inter- and intra-reader variability. In this paper we propose a computer-aided detection (CAD) system to automatically detect BMBs in MRI scans of moderate to severe neuro-trauma patients. Our method consists of four steps. Step one: preprocessing of the data. Both susceptibility (SWI) and T1 weighted MRI scans are used. The images are co-registered, a brain-mask is generated, the bias field is corrected, and the image intensities are normalized. Step two: initial candidates for BMBs are selected as local minima in the processed SWI scans. Step three: feature extraction. BMBs appear as round or ovoid signal hypo-intensities on SWI. Twelve features are computed to capture these properties of a BMB. Step four: Classification. To identify BMBs from the set of local minima using their features, different classifiers are trained on a database of 33 expert annotated scans and 18 healthy subjects with no BMBs. Our system uses a leave-one-out strategy to analyze its performance. With a sensitivity of 90% and 1.3 false positives per BMB, our CAD system shows superior results compared to state-of-the-art BMB detection algorithms (developed for non-trauma patients).}, + file = {Heuv15.pdf:pdf\\Heuv15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {17467758196509053337}, + gscites = {8}, + ss_id = {1fd0e5d0355bb0798803da2ff5e743d1039819bb}, + all_ss_ids = {['1fd0e5d0355bb0798803da2ff5e743d1039819bb']}, +} + +@article{Heuv16, + author = {Thomas L. A. van den Heuvel and Anke W. van der Eerden and Rashindra Manniesing and Mohsen Ghafoorian and Tao Tan and Teuntje M. J. C. Andriessen and Thijs Vande Vyvere and Luc van den Hauwe and Bart M. ter Haar Romeny and Bozena M. Goraj and Bram Platel}, + title = {Automated detection of cerebral microbleeds in patients with Traumatic Brain Injury}, + journal = {NeuroImage: Clinical}, + year = {2016}, + volume = {12}, + pages = {241 - 251}, + doi = {10.1016/j.nicl.2016.07.002}, + abstract = {In this paper a Computer Aided Detection (CAD) system is presented to automatically detect Cerebral Microbleeds (CMBs) in patients with Traumatic Brain Injury (TBI). It is believed that the presence of CMBs has clinical prognostic value in TBI patients. To study the contribution of CMBs in patient outcome, accurate detection of CMBs is required. Manual detection of CMBs in TBI patients is a time consuming task that is prone to errors, because CMBs are easily overlooked and are difficult to distinguish from blood vessels. + + This study included 33 TBI patients. Because of the laborious nature of manually annotating CMBs, only one trained expert manually annotated the CMBs in all 33 patients. A subset of ten TBI patients was annotated by six experts. Our CAD system makes use of both Susceptibility Weighted Imaging (SWI) and T1 weighted magnetic resonance images to detect CMBs. After pre-processing these images, a two-step approach was used for automated detection of CMBs. In the first step, each voxel was characterized by twelve features based on the dark and spherical nature of CMBs and a random forest classifier was used to identify CMB candidate locations. In the second step, segmentations were made from each identified candidate location. Subsequently an object-based classifier was used to remove false positive detections of the voxel classifier, by considering seven object-based features that discriminate between spherical objects (CMBs) and elongated objects (blood vessels). A guided user interface was designed for fast evaluation of the CAD system result. During this process, an expert checked each CMB detected by the CAD system. + + A Fleiss' kappa value of only 0.24 showed that the inter-observer variability for the TBI patients in this study was very large. An expert using the guided user interface reached an average sensitivity of 93%, which was significantly higher (p = 0.03) than the average sensitivity of 77% (sd 12.4%) that the six experts manually detected. Furthermore, with the use of this CAD system the reading time was substantially reduced from one hour to 13 minutes per patient, because the CAD system only detects on average 25.9 false positives per TBI patient, resulting in 0.29 false positives per definite CMB finding.}, + file = {Heuv16.pdf:pdf\\Heuv16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27489772}, + publisher = {Elsevier}, + month = {2}, + gsid = {7154632919206928793}, + gscites = {61}, + ss_id = {490b2120bffd758919d924ee1b062789aed16ef3}, + all_ss_ids = {['490b2120bffd758919d924ee1b062789aed16ef3']}, +} + +@inproceedings{Heuv17, + author = {Thomas L. A. van den Heuvel and Hezkiel Petros and Stefano Santini and Chris L. de Korte and Bram van Ginneken}, + title = {A step towards measuring the fetal head circumference with the use of obstetric ultrasound in a low resource setting}, + booktitle = MI, + year = {2017}, + volume = {10139}, + series = SPIE, + pages = {101390V}, + doi = {10.1117/12.2253671}, + abstract = {Worldwide, 99% of all maternal deaths occur in low-resource countries. Ultrasound imaging can be used to detect maternal risk factors, but requires a well-trained sonographer to obtain the biometric parameters of the fetus. One of the most important biometric parameters is the fetal Head Circumference (HC). The HC can be used to estimate the Gestational Age (GA) and assess the growth of the fetus. In this paper we propose a method to estimate the fetal HC with the use of the Obstetric Sweep Protocol (OSP). With the OSP the abdomen of pregnant women is imaged with the use of sweeps. These sweeps can be taught to somebody without any prior knowledge of ultrasound within a day. + Both the OSP and the standard two-dimensional ultrasound image for HC assessment were acquired by an experienced gynecologist from fifty pregnant women in St. Luke's Hospital in Wolisso, Ethiopia. The reference HC from the standard two-dimensional ultrasound image was compared to both the manually measured HC and the automatically measured HC from the OSP data. + The median difference between the estimated GA from the manual measured HC using the OSP and the reference standard was -1.1 days (Median Absolute Deviation (MAD) 7.7 days). The median difference between the estimated GA from the automatically measured HC using the OSP and the reference standard was -6.2 days (MAD 8.6 days). + Therefore, it can be concluded that it is possible to estimate the fetal GA with simple obstetric sweeps with a deviation of only one week.}, + file = {Heuv17.pdf:pdf\\Heuv17.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + month = {3}, + gsid = {11830984244326787685}, + gscites = {5}, + ss_id = {f8d5915101494c63b044909b93b821caa688f220}, + all_ss_ids = {['f8d5915101494c63b044909b93b821caa688f220']}, +} + +@article{Heuv17a, + author = {Thomas L. A. van den Heuvel and David J. Graham and Kristopher J. Smith and Chris L. de Korte and Jeffrey A. Neasham}, + title = {Development of a Low-Cost Medical Ultrasound Scanner Using a Monostatic Synthetic Aperture}, + journal = TBCAS, + year = {2017}, + volume = {11}, + number = {4}, + pages = {849--857}, + doi = {10.1109/TBCAS.2017.2695240}, + abstract = {Objective: In this paper, we present the design of low-cost medical ultrasound scanners aimed at the detection of maternal mortality risk factors in developing countries. Method: Modern ultrasound scanners typically employ a high element count transducer array with multichannel transmit and receive electronics. To minimize hardware costs, we employ a single piezoelectric element, mechanically swept across the target scene, and a highly cost-engineered single channel acquisition circuit. Given this constraint, we compare the achievable image quality of a monostatic fixed focus scanner (MFFS) with a monostatic synthetic aperture scanner (MSAS) using postfocusing. Quantitative analysis of image quality was carried out using simulation and phantom experiments, which were used to compare a proof-of-concept MSAS prototype with an MFFS device currently available on the market. Finally, in vivo experiments were performed to validate the MSAS prototype in obstetric imaging. Results: Simulations show that the achievable lateral resolution of the MSAS approach is superior at all ranges compared to the fixed focus approach. Phantom experiments verify the improved resolution of the MSAS prototype but reveal a lower signal to noise ratio. In vivo experiments show promising results using the MSAS for clinical diagnostics in prenatal care. Conclusion: The proposed MSAS achieves superior resolution but lower SNR compared to an MFFS approach, principally due to lower acoustic energy emitted. Significance: The production costs of the proposed MSAS could be an order of magnitude lower than any other ultrasound system on the market today, bringing affordable obstetric imaging a step closer for developing countries.}, + file = {Heuv17a.pdf:pdf\\Heuv17a.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + pmid = {28715339}, + month = {8}, +} + +@inproceedings{Heuv17b, + author = {Thomas L. A. van den Heuvel and Hezkiel Petros and Stefano Santini and Chris L. de Korte and Bram van Ginneken}, + title = {Combining Automated Image Analysis with Obstetric Sweeps for Prenatal Ultrasound Imaging in Developing Countries}, + booktitle = {{MICCAI} Workshop: Point-of-Care Ultrasound}, + year = {2017}, + volume = {10549}, + series = LNCS, + publisher = {Springer}, + pages = {105--112}, + doi = {10.1007/978-3-319-67552-7_13}, + abstract = {Ultrasound imaging can be used to detect maternal risk factors, but it remains out of reach for most pregnant women in developing countries because there is a severe shortage of well-trained sonographers. In this paper we show the potential of combining the obstetric sweep protocol (OSP) with image analysis to automatically obtain information about the fetus. The OSP can be taught to any health care worker without any prior knowledge of ultrasound within a day, obviating the need for a well-trained sonographer to acquire the ultrasound images. The OSP was acquired from 317 pregnant women using a low-cost ultrasound device in St. Luke's Hospital in Wolisso, Ethiopia. A deep learning network was used to automatically detect the fetal head in the OSP data. The fetal head detection was used to detect twins, determine fetal presentation and estimate gestational age without the need of a well-trained sonographer.}, + file = {Heuv17b.pdf:pdf\\Heuv17b.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + gsid = {948935549850201486}, + gscites = {3}, + ss_id = {aa1ddbeb489b3e103f3dd681ae20ac0014af2209}, + all_ss_ids = {['aa1ddbeb489b3e103f3dd681ae20ac0014af2209']}, +} + +@conference{Heuv17d, + author = {van den Heuvel, Thomas L. A. and de Korte, Chris L. and van Ginneken, Bram}, + title = {Automated Measurement of Fetal Head Circumference in Ultrasound Images}, + booktitle = DBME, + year = {2017}, + file = {Heuv17d.pdf:pdf\\Heuv17d.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, +} + +@article{Heuv18, + author = {van den Heuvel, Thomas L. A. and de Bruijn, Dagmar and Moens-van de Moesdijk, D. and Beverdam, Anette and van Ginneken, Bram and de Korte, Chris L.}, + title = {Comparison Study of Low-Cost Ultrasound Devices for Estimation of Gestational Age in Resource-Limited Countries}, + journal = UMB, + year = {2018}, + volume = {44}, + number = {11}, + pages = {2250--2260}, + doi = {10.1016/j.ultrasmedbio.2018.05.023}, + abstract = {We investigated how accurately low-cost ultrasound devices can estimate the gestational age (GA) using both the standard plane and the obstetric sweep protocol (OSP). The OSP can be taught to health care workers without prior knowledge of ultrasound within one day OSP and thus avoid the need to train dedicated sonographers. Three low-cost ultrasound devices were compared to one high-end ultrasound device. GA was estimated with the head circumference (HC), abdominal circumference (AC) and femur length (FL) using both the standard plane and the OSP. The results revealed that the HC, AC and FL can be used to estimate GA using low-cost ultrasound devices in the standard plane within the inter-observer variability presented in literature. The OSP can be used to estimate the GA by measuring the HC and the AC, but not the FL. This study shows that it is feasible to estimate GA in resource-limited countries with low-cost ultrasound devices using the OSP. This makes it possible to estimate GA and assess fetal growth for pregnant women in rural areas of resource-limited countries.}, + file = {Heuv18.pdf:pdf\\Heuv18.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + pmid = {30093339}, + month = {11}, + gsid = {3596078569620781703}, + gscites = {11}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/196891}, + ss_id = {80d1a72e3c8bf0e3457492b1f27a05ad6f5dd9a8}, + all_ss_ids = {['80d1a72e3c8bf0e3457492b1f27a05ad6f5dd9a8']}, +} + +@article{Heuv18a, + author = {van den Heuvel, Thomas L. A. and de Bruijn, Dagmar and de Korte, Chris L. and van Ginneken, Bram}, + title = {Automated measurement of fetal head circumference using 2D ultrasound images}, + journal = PLOSONE, + year = {2018}, + volume = {13}, + number = {8}, + month = {8}, + doi = {10.1371/journal.pone.0200412}, + abstract = {In this paper we present a computer aided detection (CAD) system for automated measurement of the fetal head circumference (HC) in 2D ultrasound images for all trimesters of the pregnancy. The HC can be used to estimate the gestational age and monitor growth of the fetus. Automated HC assessment could be valuable in developing countries, where there is a severe shortage of trained sonographers. The CAD system consists of two steps: First, Haar-like features were computed from the ultrasound images to train a random forest classifier to locate the fetal skull. Secondly, the HC was extracted using Hough transform, dynamic programming and an ellipse fit. The CAD system was trained on 999 images and validated on an independent test set of 335 images from all trimesters. The test set was manually annotated by an experienced sonographer and a medical researcher. The reference gestational age (GA) was estimated using the crown-rump length measurement (CRL). The mean difference between the reference GA and the GA estimated by the experienced sonographer was 0.8 +- 2.6, -0.0 +- 4.6 and 1.9 +- 11.0 days for the first, second and third trimester, respectively. The mean difference between the reference GA and the GA estimated by the medical researcher was 1.6 +- 2.7, 2.0 +- 4.8 and 3.9 +- 13.7 days. The mean difference between the reference GA and the GA estimated by the CAD system was 0.6 +- 4.3, 0.4 +- 4.7 and 2.5 +- 12.4 days. The results show that the CAD system performs comparable to an experienced sonographer. The presented system shows similar or superior results compared to systems published in literature. This is the first automated system for HC assessment evaluated on a large test set which contained data of all trimesters of the pregnancy.}, + file = {:Heuv18a - Automated Measurement of Fetal Head Circumference Using 2D Ultrasound Images.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + pmid = {30138319}, + publisher = {Public Library of Science}, + gsid = {11316581470777681571}, + gscites = {137}, + ss_id = {1427f469d6fb172d200d6f25837cbeaddad425e5}, + all_ss_ids = {['1427f469d6fb172d200d6f25837cbeaddad425e5']}, +} + +@phdthesis{Heuv18b, + author = {Thomas L. A. van den Heuvel}, + title = {Automated low-cost ultrasound: improving antenatal care in resource-limited settings}, + year = {2019}, + url = {https://repository.ubn.ru.nl/handle/2066/199000}, + abstract = {Worldwide, 99% of all maternal deaths occur in developing countries. In absolute numbers, this corresponds to approximately 820 deaths per day. Ultrasound imaging can be used to detect maternal risk factors, but too often remains out of reach for pregnant women in developing countries. This is mainly caused by two reasons: ultrasound is too expensive for resource-limited countries and it requires a trained sonographer to acquire and interpret the ultrasound images, while there is a severe shortage of well-trained medical personnel in these countries. In this thesis we aim to solve this problem by combining low-cost ultrasound devices with the obstetric sweep protocol (OSP) and automated image analysis.}, + file = {:pdf/Heuv18b.pdf:PDF;:png/publications/Heuv18b.png:PNG image;:png/publications/thumbs/Heuv18b.png:PNG image}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + promotor = {B. van Ginneken and C. de Korte}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@conference{Heuv19, + author = {van den Heuvel, Thomas L. A. and van Ginneken, Bram and de Korte, Chris L.}, + title = {Improving Maternal Care In Resource-Limited Settings Using A Low-Cost Ultrasound Device And Machine Learning}, + booktitle = DBME, + year = {2019}, + abstract = {Worldwide, 99% of all maternal deaths occur in developing countries. Ultrasound can be used + to detect maternal risk factors, but this technique is rarely used in developing countries + because it is too expensive, and it requires a trained sonographer to acquire and interpret the + ultrasound images. In this work we use a low-cost ultrasound device which was combined + with the obstetric sweep protocol (OSP) and deep learning algorithms to automatically detect + maternal risk factors. The OSP can be taught to any health care worker without prior + knowledge of ultrasound within one day, so there is no need for a trained sonographer. + The OSP was acquired from 318 pregnant women using the low-cost MicrUs (Telemed + Ultrasound Medical Systems, Milan, Italy) in Ethiopia. Two deep learning networks and two + random forest classifiers were trained to automatically detect twin pregnancies, estimate + gestational age (GA) and determine fetal presentation. The first deep learning network + performs a frame classification, which was used to automatically separate the six sweeps of + the OSP and automatically detect the fetal head and torso. The second deep learning network + was trained to measure the fetal head circumference (HC) using all frames in which the first + deep learning system detected the fetal head. The HC was used to determine the GA. Two + random forest classifiers were trained to detect twin pregnancies and determine fetal + presentation using the frame classification of the first deep learning network. + The developed algorithm can automatically estimate the GA with an interquartile range of + 15.2 days, correctly detected 61% of all twins with a specificity of 99%, and correctly detect + all 31 breech presentations and 215 of the 216 cephalic presentations. The developed + algorithm can be computed in less than two seconds, making real-time application feasible. + The presented system is able to determine three maternal risk factors using the OSP. The OSP + can be acquired without the need of a trained sonographer, which makes widespread obstetric + ultrasound affordable and fast to implement in resource-limited settings. This makes is + possible to refer pregnant women in time to a hospital to receive treatment when risk factors + are detected.}, + file = {Heuv19.pdf:pdf\\Heuv19.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, +} + +@article{Heuv19a, + author = {van den Heuvel, Thomas L A and Petros, Hezkiel and Santini, Stefano and de Korte, Chris L and van Ginneken, Bram}, + title = {Automated Fetal Head Detection and Circumference Estimation from Free-Hand Ultrasound Sweeps Using Deep Learning in Resource-Limited Countries}, + journal = UMB, + year = {2019}, + volume = {45}, + number = {3}, + pages = {773--785}, + doi = {10.1016/j.ultrasmedbio.2018.09.015}, + abstract = {Ultrasound imaging remains out of reach for most pregnant women in developing countries because it requires a trained sonographer to acquire and interpret the images. We address this problem by presenting a system that can automatically estimate the fetal head circumference (HC) from data obtained with use of the obstetric sweep protocol (OSP). The OSP consists of multiple pre-defined sweeps with the ultrasound transducer over the abdomen of the pregnant woman. The OSP can be taught within a day to any health care worker without prior knowledge of ultrasound. An experienced sonographer acquired both the standard plane-to obtain the reference HC-and the OSP from 183 pregnant women in St. Luke's Hospital, Wolisso, Ethiopia. The OSP data, which will most likely not contain the standard plane, was used to automatically estimate HC using two fully convolutional neural networks. First, a VGG-Net-inspired network was trained to automatically detect the frames that contained the fetal head. Second, a U-net-inspired network was trained to automatically measure the HC for all frames in which the first network detected a fetal head. The HC was estimated from these frame measurements, and the curve of Hadlock was used to determine gestational age (GA). The results indicated that most automatically estimated GAs fell within the P2.5-P97.5 interval of the Hadlock curve compared with the GAs obtained from the reference HC, so it is possible to automatically estimate GA from OSP data. Our method therefore has potential application for providing maternal care in resource-constrained countries.}, + file = {Heuv19a.pdf:pdf\\Heuv19a.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + pmid = {30573305}, + month = {3}, + gsid = {7628412435066869852}, + gscites = {66}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/201329}, + ss_id = {f05cc42c38bae2cfbd8a283fc97a8bdede43dabe}, + all_ss_ids = {['f05cc42c38bae2cfbd8a283fc97a8bdede43dabe']}, +} + +@inproceedings{Heuv19b, + author = {van den Heuvel, Thomas L A and de Korte, Chris L and van Ginneken, Bram}, + title = {Automated interpretation of prenatal ultrasound using a predefined acquisition protocol in resource-limited countries}, + booktitle = MIDL, + year = {2019}, + url = {https://openreview.net/forum?id=H1eUCb6at4}, + abstract = {In this study, we combine a standardized acquisition protocol with image analysis algorithms to investigate if it is possible to automatically detect maternal risk factors without a trained sonographer. The standardized acquisition protocol can be taught to any health care worker within two hours. This protocol was acquired from 280 pregnant women at St. Luke's Catholic Hospital, Wolisso, Ethiopia. A VGG-like network was used to perform a frame classification for each frame within the acquired ultrasound data. This frame classification was used to automatically determine the number of fetuses and the fetal presentation. A U-net was trained to measure the fetal head circumference in all frames in which the VGG-like network detected a fetal head. This head circumference was used to estimate the gestational age. The results show that it possible automatically determine gestational age and determine fetal presentation and the potential to detect twin pregnancies using the standardized acquisition protocol.}, + file = {Heuv19b.pdf:pdf\\Heuv19b.pdf:PDF;:png/publications/Heuv19b.png:PNG}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + ss_id = {631e2d528d8ae1e1817f73503eadfec39b35a428}, + all_ss_ids = {['631e2d528d8ae1e1817f73503eadfec39b35a428']}, + gscites = {4}, +} + +@article{Hoek09, + author = {C. M. A. Hoeks and J. J. F\"utterer and D. M. Somford and I. M. van Oort and H. Huisman and J. O. Barentsz}, + title = {Multiparametric {MRI} for prostate cancer screening}, + journal = NTVG, + year = {2009}, + volume = {153}, + pages = {B487}, + abstract = {{T}wo recent studies on prostate cancer screening gave conflicting results about the effects of screening on prostate cancer mortality. {T}he current screening method of {PSA} determination in combination with transrectal ultrasonic biopsy leads to a large number of false positive results and overtreatment. {A} screening test is needed that reduces the number of unnecessary prostate biopsies and which discriminates between more and less aggressive forms of prostate cancer. {M}ultiparametric {MRI} has a high specificity for prostate cancer detection and provides information about prostate cancer aggressiveness. {PSA} in combination with multiparametric {MRI} performed at 1.5 {T}esla appears to be a fairly accurate screening test. {D}ue to its high costs and limited availability, multiparametric {MRI} is not suitable as a primary screening test. {H}owever, it could serve as a subsequent screening test if the {PSA} has increased above threshold values. {U}sing multiparametric {MRI} as a follow-up test during screening would provide more accurate biopsies, prevent unnecessary prostate biopsies and improve the characterization of prostate cancer.}, + file = {Hoek09.pdf:pdf\\Hoek09.pdf:PDF}, + optnote = {DIAG, MAGIC, RADIOLOGY}, + pmid = {20003566}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/79762}, + ss_id = {dd8e6561ed2730e2c4cc6e6204fde7d60a2930f7}, + all_ss_ids = {['dd8e6561ed2730e2c4cc6e6204fde7d60a2930f7']}, + gscites = {8}, +} + +@article{Hoek11a, + author = {Caroline M A Hoeks and Jelle O Barentsz and Thomas Hambrock and Derya Yakar and Diederik M Somford and Stijn W T P J Heijmink and Tom W J Scheenen and Pieter C Vos and Henkjan Huisman and Inge M van Oort and J. Alfred Witjes and Arend Heerschap and Jurgen J F\"utterer}, + title = {Prostate Cancer: Multiparametric {MR} Imaging for Detection, Localization, and Staging}, + journal = Radiology, + year = {2011}, + volume = {261}, + pages = {46--66}, + doi = {10.1148/radiol.11091822}, + abstract = {This review presents the current state of the art regarding multiparametric magnetic resonance (MR) imaging of prostate cancer. Technical requirements and clinical indications for the use of multiparametric MR imaging in detection, localization, characterization, staging, biopsy guidance, and active surveillance of prostate cancer are discussed. Although reported accuracies of the separate and combined multiparametric MR imaging techniques vary for diverse clinical prostate cancer indications, multiparametric MR imaging of the prostate has shown promising results and may be of additional value in prostate cancer localization and local staging. Consensus on which technical approaches (field strengths, sequences, use of an endorectal coil) and combination of multiparametric MR imaging techniques should be used for specific clinical indications remains a challenge. Because guidelines are currently lacking, suggestions for a general minimal protocol for multiparametric MR imaging of the prostate based on the literature and the authors' experience are presented. Computer programs that allow evaluation of the various components of a multiparametric MR imaging examination in one view should be developed. In this way, an integrated interpretation of anatomic and functional MR imaging techniques in a multiparametric MR imaging examination is possible. Education and experience of specialist radiologists are essential for correct interpretation of multiparametric prostate MR imaging findings. Supportive techniques, such as computer-aided diagnosis are needed to obtain a fast, cost-effective, easy, and more reproducible prostate cancer diagnosis out of more and more complex multiparametric MR imaging data. A-A?A 1/2 RSNA, 2011.}, + file = {Hoek11a.pdf:pdf\\Hoek11a.pdf:PDF}, + optnote = {BioMR, DIAG, MAGIC, RADIOLOGY}, + number = {1}, + pmid = {21931141}, + month = {10}, + gsid = {8709303086776931557}, + gscites = {633}, +} + +@article{Hoes11, + author = {Mohamed Hoesein, F. A. A. and P. Zanen and B. van Ginneken and R. J. van Klaveren and J-W. J. Lammers}, + title = {Association of the transfer coefficient of the lung for carbon monoxide with emphysema progression in male smokers}, + journal = ERJ, + year = {2011}, + volume = {38}, + pages = {1012--1018}, + doi = {10.1183/09031936.00050711}, + abstract = {A decreased Kco is associated with emphysema. We evaluated whether in heavy smokers, baseline Kco was associated with progression of CT-detected emphysema, and progression of airflow limitation. Heavy smokers, mean (SD) 41.3 (18.7) pack years, participating in a lung cancer screening trial underwent diffusion testing and CT-scanning of the lungs. CT-scanning was repeated after median (25(th)-75(th) percentile) 2.8 (2.7-3.0) years and emphysema was assessed by lung densitometry using the 15(th) percentile (Perc15). The association between Kco at baseline with progression of emphysema and lung function decline was assessed by multiple linear regression, correcting for baseline CT-quantified emphysema severity and FEV1/FVC, age, height, BMI, pack years and smoking status (current/former smoker). 522 participants were included with a mean (SD) age of 60.1 (5.4) years. Mean) Perc15 was -938 (19), absolute FEV1/FVC was 71.6\% (9) and Kco was 1.23 (0.25), which is 81.8\% (16.5) of predicted. By interpolation: a one standard deviation (0.25) lower Kco value at baseline, predicted a 1.6 HU lower Perc15 and a 0.78\% lower FEV1/FVC after follow-up (p<0.001). A lower baseline Kco value is independently associated with a more rapid progression of emphysema and airflow limitation in heavy smokers.}, + file = {Hoes11.pdf:pdf\\Hoes11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {21565924}, + month = {5}, + gsid = {10827837198402292096}, + gscites = {26}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/97657}, + ss_id = {8cce5c02b19e5f77784efa75cc6aac29c507a602}, + all_ss_ids = {['8cce5c02b19e5f77784efa75cc6aac29c507a602']}, +} + +@article{Hoes12, + author = {Mohamed Hoesein, F. A. A. and van Rikxoort, E. M. and van Ginneken, B. and de Jong, P. A. and Prokop, M. and Lammers, J-W J. and Zanen, P.}, + title = {{CT}-quantified emphysema distribution is associated with lung function decline}, + journal = ERJ, + year = {2012}, + volume = {40}, + pages = {844--850}, + doi = {10.1183/09031936.00186311}, + abstract = {Emphysema distribution is associated with COPD. It is however unknown whether CT-quantified emphysema distribution (upper/lower lobe) is associated with lung function decline in heavy (former) smokers.587 male participants underwent lung CT-scanning and pulmonary function testing at baseline and after a median (interquartile range) follow-up of 2.9 (2.8-3.0) years. The lungs were automatically segmented based on anatomically defined lung lobes. Severity of emphysema was automatically quantified per anatomical lung lobe and was expressed as the 15(th) percentile (HU-point below which 15\% of the low attenuation voxels are distributed (Perc15)). The CT-quantified emphysema distribution was based on principal component analysis. Linear mixed models were used to assess the association of emphysema distribution with FEV1/FVC, FEV1 and FVC-decline.Mean (SD) age was 60.2 (5.4) years, mean baseline FEV1/FVC was 71.6 (9.0) \% and overall mean Perc15 was -908.5 (20.9) HU. Participants with upper lobe predominant CT-quantified emphysema had a lower FEV1/FVC, FEV1 and FVC after follow-up compared to participants with lower lobe predominant CT-quantified emphysema (p=0.001), independent of the total extent of CT-quantified emphysema.Heavy (former) smokers with upper lobe predominant CT-quantified emphysema have a more rapid decrease in lung function than those with lower lobe predominant CT-quantified emphysema.}, + file = {Hoes12.pdf:pdf\\Hoes12.pdf:PDF}, + optnote = {DIAG}, + number = {4}, + pmid = {22323577}, + month = {2}, + gsid = {1084747010435036452}, + gscites = {68}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/110461}, + ss_id = {f2b3202d9635c6ec6114a21814eb02638ac82a05}, + all_ss_ids = {['f2b3202d9635c6ec6114a21814eb02638ac82a05']}, +} + +@article{Hoes12a, + author = {Mohamed Hoesein, F. A. A. and Zanen, P. and Boezen, H. M. and Groen, H J M. and van Ginneken, Bram and de Jong, Pim A. and Postma, D. S. and Lammers, J-W J.}, + title = {Lung function decline in heavy male smokers relates to baseline airflow obstruction severity}, + doi = {10.1378/chest.11-2837}, + number = {6}, + pages = {1530--1538}, + volume = {142}, + abstract = {RATIONALE Recent evidence indicates that the rate of lung function decline is steepest in mild COPD and slower in moderate to severe COPD. The current study assessed whether lung function decline relates to baseline airflow obstruction severity in male heavy smokers. METHODS In total, 2,003 male smokers with a mean (SD) age of 59.8 (5.3) years underwent pulmonary function testing at baseline and after 3 year follow-up. Participants were classified by entry FEV(1)/FVC as follows: Group 1, >70\%; Group 2, <70\% but >LLN; and group 3, 70%; group 2<70% but >LLN; and group 370% but FEV1 <80% predicted, were excluded. Multivariate regression analysis correcting for covariates was used to asses the extent of emphysema, airway wall thickening and gas trapping according to three groups of airflow limitation. Results: Mean (standard deviation) age was 62.5 (5.2) years and packyears smoked was 41.0 (18.0). Group 2 subjects when compared to group 1 had a significantly lower 15th percentile, -920.6 HU versus -912.2 HU; a higher Pi10, 2.87 mm versus 2.57 mm; and a higher E/I-ratio, 88.6% versus 85.6% (all p<0.001). Conclusion: Subjects with an FEV1/FVC<70%, but above the LLN, have a significant greater degree of structural lung changes on CT compared to subjects without airflow limitation.}, + file = {Hoes13.pdf:pdf/Hoes13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {23785411}, + month = {6}, + ss_id = {c8656b4c7a205284545931f3c11b92f7219c5ffa}, + all_ss_ids = {['c8656b4c7a205284545931f3c11b92f7219c5ffa']}, + gscites = {17}, +} + +@article{Hoff16, + author = {Hoffman, Joshua D. and {van Grinsven}, Mark J. J. P. and Li, Chun and Brantley, Jr, Milam and McGrath, Josephine and Agarwal, Anita and Scott, William K. and Schwartz, Stephen G. and Kovach, Jaclyn and Pericak-Vance, Margaret and S\'{a}nchez, Clara I. and Haines, Jonathan L.}, + title = {Genetic Association Analysis of Drusen Progression}, + journal = IOVS, + year = {2016}, + volume = {57}, + number = {4}, + month = {4}, + pages = {2225--2231}, + doi = {10.1167/iovs.15-18571}, + url = {http://dx.doi.org/10.1167/iovs.15-18571}, + abstract = {Age-related macular degeneration is a common form of vision loss affecting older adults. The etiology of AMD is multifactorial and is influenced by environmental and genetic risk factors. In this study, we examine how 19 common risk variants contribute to drusen progression, a hallmark of AMD pathogenesis.Exome chip data was made available through the International AMD Genomics Consortium (IAMDGC). Drusen quantification was carried out with color fundus photographs using an automated drusen detection and quantification algorithm. A genetic risk score (GRS) was calculated per subject by summing risk allele counts at 19 common genetic risk variants weighted by their respective effect sizes. Pathway analysis of drusen progression was carried out with the software package Pathway Analysis by Randomization Incorporating Structure.We observed significant correlation with drusen baseline area and the GRS in the age-related eye disease study (AREDS) dataset (? = 0.175, P = 0.006). Measures of association were not statistically significant between drusen progression and the GRS (P = 0.54). Pathway analysis revealed the cell adhesion molecules pathway as the most highly significant pathway associated with drusen progression (corrected P = 0.02).In this study, we explored the potential influence of known common AMD genetic risk factors on drusen progression. Our results from the GRS analysis showed association of increasing genetic burden (from 19 AMD associated loci) to baseline drusen load but not drusen progression in the AREDS dataset while pathway analysis suggests additional genetic contributors to AMD risk.}, + file = {Hoff16.pdf:pdf\\Hoff16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27116550}, + gsid = {18147613682259186463}, + gscites = {15}, + ss_id = {f8e3b007a829e53c9a9f7527d26d495c8463dd7f}, + all_ss_ids = {['37e383517c34818ad049af0aa763ad5906e9f51a', 'f8e3b007a829e53c9a9f7527d26d495c8463dd7f']}, +} + +@inproceedings{Hoge10, + author = {L. Hogeweg and C. Mol and P. A. de Jong and B. van Ginneken}, + title = {Rib suppression in chest radiographs to improve classification of textural abnormalities}, + booktitle = MI, + year = {2010}, + volume = {7624}, + series = SPIE, + pages = {76240Y1-76240Y6}, + doi = {10.1117/12.844409}, + abstract = {{T}he computer aided diagnosis ({CAD}) of abnormalities on chest radiographs is difficult due to the presence of overlapping normal anatomy. {S}uppression of the normal anatomy is expected to improve performance of a {CAD} system, but such a method has not yet been applied to the computer detection of interstitial abnormalities such as occur in tuberculosis ({TB}). {T}he aim of this research is to evaluate the effect of rib suppression on a {CAD} system for {TB}. {P}rofiles of pixel intensities sampled perpendicular to segmented ribs were used to create a local {PCA}-based shape model of the rib. {T}he model was normalized to the local background intensity and corrected for gradients perpendicular to the rib. {S}ubsequently rib suppressed images were created by subtracting the models for each rib from the original image. {T}he effect of rib suppression was evaluated using a {CAD} system for {TB} detection. {S}mall square image patches were sampled randomly from 15 normal and 35 {TB}-affected images containing textural abnormalities. {A}bnormalities were outlined by a radiologist and were given a subtlety rating from 1 to 5. {F}eatures based on moments of intensity distributions of {G}aussian derivative filtered images were extracted. {A} supervised learning approach was used to discriminate between normal and diseased image patches. {T}he use of rib suppressed images increased the overall performance of the system, as measured by the area under the receiver operator characteristic ({ROC}) curve, from 0.75 to 0.78. {F}or the more subtly rated patches (rated 1-3) the performance increased from 0.62 to 0.70.}, + file = {Hoge10.pdf:pdf\\Hoge10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + month = {3}, + gsid = {3846607945750329588}, + gscites = {14}, + ss_id = {9b206a6f4f5b4f3c98394e8109e7a04cf730d9d4}, + all_ss_ids = {['9b206a6f4f5b4f3c98394e8109e7a04cf730d9d4']}, +} + +@inproceedings{Hoge10a, + author = {L. Hogeweg and C. Mol and P. A. de Jong and R. Dawson and H. Ayles and B. van Ginneken}, + title = {Fusion of local and global detection systems to detect tuberculosis in chest radiographs}, + booktitle = MICCAI, + year = {2010}, + volume = {6363}, + series = LNCS, + pages = {650-657}, + doi = {10.1007/978-3-642-15711-0_81}, + abstract = {{A}utomatic detection of tuberculosis ({TB}) on chest radiographs is a difficult problem because of the diverse presentation of the disease. {A} combination of detection systems for abnormalities and normal anatomy is used to improve detection performance. {A} textural abnormality detection system operating at the pixel level is combined with a clavicle detection system to suppress false positive responses. {T}he output of a shape abnormality detection system operating at the image level is combined in a next step to further improve performance by reducing false negatives. {S}trategies for combining systems based on serial and parallel configurations were evaluated using the minimum, maximum, product, and mean probability combination rules. {T}he performance of {TB} detection increased, as measured using the area under the {ROC} curve, from 0.67 for the textural abnormality detection system alone to 0.86 when the three systems were combined. {T}he best result was achieved using the sum and product rule in a parallel combination of outputs}, + file = {Hoge10a.pdf:pdf\\Hoge10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + pmid = {20879456}, + gsid = {938566649715148496}, + gscites = {82}, + ss_id = {08e4dedc42f4673f3af944da663d5f50ada17176}, + all_ss_ids = {['08e4dedc42f4673f3af944da663d5f50ada17176']}, +} + +@conference{Hoge10b, + author = {L. Hogeweg and C. Mol and P. A. de Jong and H. Ayles and R. Dawson and B. van Ginneken}, + title = {Evaluation of a computer aided detection system for tuberculosis on chest radiographs in a high-burden setting}, + booktitle = RSNA, + year = {2010}, + abstract = {{PURPOSE} : {I}n high tuberculosis ({TB}) burden regions the use of chest radiographs ({CXR}) to detect {TB} is limited by the low number of skilled readers available. {T}he performance of a computer aided diagnosis ({CAD}x) system to detect {TB} on {CXR} is evaluated. {METHOD} {AND} {MATERIALS}: {T}he {CAD}x system was designed to detect diffuse abnormalities and lung shape distortions. {U}nobscured lung fields were first segmented using pixel classification. {F}or diffuse abnormality detection multiple small circular image patches were sampled inside the lung fields. {F}eatures based on moments of {G}aussian derivatives and position in the lung field were calculated to classify the patches as normal/abnormal using a k nearest neighbor classifier. {T}he probabilistic labels of the classified patches were combined using a quantile rule to assign one probability of abnormality to the image. {T}o detect shape distortions, a shape representation was extracted from the lung field segmentation based on the distance of boundary points to the lung field centroid. {A} normal lung field shape model was created from training images using principal component analysis ({PCA}). {A} large {M}ahalanobis distance of an image to the {PCA} model indicates a high probability of being abnormal. {T}he probabilities from the textural system and the shape system were averaged to obtain a final probability of being abnormal. {T}he training set consisted of 216 digital {CXR}s ({O}delca{DR}, {D}elft {D}iagnostic {I}maging, {T}he {N}etherlands, 0.25 mm pixel spacing) from an outpatient clinic in {C}ape {T}own, {S}outh {A}frica in which abnormal regions were manually indicated by a radiologist. {T}he test set consisted of 209 digital {CXR}s from a high {TB} burden outpatient clinic in {K}anyama, {Z}ambia, and had 66 normal and 143 abnormal cases. {T}his reference standard was set by a radiologist. {CAD}x was evaluated with {A}z, the area under the {R}eceiver {O}perator {C}haracteristic curve. {RESULTS}: {A}z of the {CAD}x system was 0.81. {A}z for the textural abnormality detection system and the shape distortion system alone was 0.77, and 0.80, respectively. {CONCLUSION} : {CAD}x is a promising tool for the detection of tuberculosis, and could improve {TB} case detection in high-burden regions. {CLINICAL} {RELEVANCE}/{APPLICATION}: {T}he use of {CAD}x for {CXR} could help to improve {TB} case detection rate in high-burden regions where resources and number of skilled readers are low.}, + optnote = {DIAG, RADIOLOGY, TB}, + gsid = {4501100822019468090}, + gscites = {2}, +} + +@conference{Hoge11, + author = {L. Hogeweg and A. Story and A. Hayward and R. Aldridge and I. Abubakar and P. Maduskar and B. van Ginneken}, + title = {Computer-aided detection of tuberculosis among high risk groups: potential for automated triage}, + booktitle = RSNA, + year = {2011}, + abstract = {PURPOSE Tuberculosis (TB) screening programs are expensive because of, the large numbers of chest radiographs (CXR) that need to be read by human experts. The performance of a computer aided detection (CADx) system to detect TB on CXR is evaluated to determine its potential to triage images within a high throughput digital mobile TB screening program for high risk groups in London, UK. METHOD AND MATERIALS A large image database consisting of 47,510 CXR from 38,717 individuals was collected by the screening program between 2005 and 2010. In that period 120 screened patients were diagnosed with pulmonary TB. A set of 184 digital chest radiographs (DigitalDiagnost Trixel, Philips Healthcare, The Netherlands) from the screening program were selected to evaluate the CAD system on its ability to discriminate between TB proven and non-TB images. For training 89 images were used (69 consecutive non-TB images and all 20 culture proven TB images from 2006). The system was tested on the remaining 95 cases (67 consecutive non-TB images from 2006 and all 28 culture proven TB images from 2009). The research prototype CADx system (Diagnostic Image Analysis Group, Nijmegen, The Netherlands, Delft Diagnostic Imaging, Veenendaal, The Netherlands) was originally developed for analysis of CXR from high burden countries in sub Sahara Africa. It was retrained with abnormal regions that were outlined in the proven TB training cases. Diffuse abnormalities are detected by classifying small patches as normal or suggestive of TB inside automatically segmented unobscured lung fields. The probabilistic labels of the classified patches are then combined into one abnormality score for each image. The CADx system was evaluated on these scores using Receiver Operator Characteristic (ROC) analysis and specificity at 95% sensitivity. RESULTS The area under the ROC curve of the CADx system was 0.86. At a sensitivity of 95%, the specificity was 60%. CONCLUSION CADx can identify a large proportion of normal images in a TB screening setting at high sensitivity and has potential to be used for triage. CLINICAL RELEVANCE/APPLICATION Initial results demonstrate that the system could discard approximately 60% of images to potentially reduce the workload and costs of human readers while keeping a sensitivity of 95%.}, + optnote = {DIAG, RADIOLOGY, TB}, +} + +@article{Hoge12, + author = {L. Hogeweg and C. I. S\'{a}nchez and P. A. de Jong and P. Maduskar and B. van Ginneken}, + title = {Clavicle segmentation in chest radiographs}, + journal = MIA, + year = {2012}, + volume = {16}, + pages = {1490 - 1502}, + doi = {10.1016/j.media.2012.06.009}, + abstract = {Automated delineation of anatomical structures in chest radiographs is difficult due to superimposition of multiple structures. In this work an automated technique to segment the clavicles in posterior-anterior chest radiographs is presented in which three methods are combined. Pixel classification is applied in two stages and separately for the interior, the border and the head of the clavicle. This is used as input for active shape model segmentation. Finally dynamic programming is employed with an optimized cost function that combines appearance information of the interior of the clavicle, the border, the head and shape information derived from the active shape model. The method is compared with a number of previously described methods and with independent human observers on a large database. This database contains both normal and abnormal images and will be made publicly available. The mean contour distance of the proposed method on 249 test images is 1.1 +/- 1.6 mm and the intersection over union is 0.86 +/- 0.10.}, + file = {Hoge12.pdf:pdf\\Hoge12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {8}, + pmid = {22998970}, + month = {12}, + gsid = {15700805333399506982}, + gscites = {57}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/110554}, + ss_id = {561164998d2d1b53d5709c2ddb84f4c306a82624}, + all_ss_ids = {['561164998d2d1b53d5709c2ddb84f4c306a82624']}, +} + +@article{Hoge13, + author = {Hogeweg, L. and S\'{a}nchez, C. I. and Melendez,J. and Maduskar, P. and Story, A. and Hayward, A. and van Ginneken, B.}, + title = {Foreign object detection and removal to improve automated analysis of chest radiographs}, + journal = MP, + year = {2013}, + volume = {40}, + pages = {071901}, + doi = {10.1118/1.4805104}, + abstract = {Purpose: Chest radiographs commonly contain projections of foreign objects, such as buttons, brassier clips, jewellery, or pacemakers and wires. The presence of these structures can substantially affect the output of computer analysis of these images. An automated method is presented to detect, segment, and remove foreign objects from chest radiographs. Methods: Detection is performed using supervised pixel classification with a kNN classifier, resulting in a probability estimate per pixel to belong to a projected foreign object. Segmentation is performed by grouping and post-processing pixels with a probability above a certain threshold. Next, the objects are replaced by texture inpainting. Results: The method is evaluated in experiments on 257 chest radiographs. The detection at pixel level is evaluated with receiver operating characteristic analysis on pixels within the unobscured lung fields and an Az value of 0.949 is achieved. Free response operator characteristic analysis is performed at the object level, and 95.6% of objects are detected with on average 0.25 false positive detections per image. To investigate the effect of removing the detected objects through inpainting, a texture analysis system for tuberculosis detection is applied to images with and without pathology and with and without foreign object removal. Unprocessed, the texture analysis abnormality score of normal images with foreign objects is comparable to those with pathology. After removing foreign objects, the texture score of normal images with and without foreign objects is similar, while abnormal images, whether they contain foreign objects or not, achieve on average higher scores. Conclusions: The authors conclude that removal of foreign objects from chest radiographs is feasible and beneficial for automated image analysis.}, + file = {Hoge13.pdf:pdf\\Hoge13.pdf:PDF}, + optnote = {DIAG}, + number = {7}, + pmid = {23822438}, + month = {6}, + gsid = {1997428185924918867}, + gscites = {8}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/118444}, + ss_id = {a87ed4355c439f449612db464993e47922629779}, + all_ss_ids = {['a87ed4355c439f449612db464993e47922629779']}, +} + +@article{Hoge13a, + author = {Hogeweg, L. and S\'{a}nchez, C. I. and van Ginneken, B.}, + title = {Suppression of translucent elongated structures: applications in chest radiography}, + journal = TMI, + year = {2013}, + volume = {32}, + pages = {2099-2113}, + doi = {10.1109/TMI.2013.2274212}, + abstract = {Projection images, such as those routinely acquired in radiological practice, are difficult to analyze because multiple 3D structures superimpose at a single point in the 2D image. Removal of particular superimposed structures may improve interpretation of these images, both by humans and by computers. This work therefore presents a general method to isolate and suppress structures in 2D projection images. The focus is on elongated structures, which allows an intensity model of a structure of interest to be extracted using local information only. The model is created from profiles sampled perpendicular to the structure. Profiles containing other structures are detected and removed to reduce the influence on the model. Subspace filtering, using blind source separation techniques, is applied to separate the structure to be suppressed from other structures. By subtracting the modeled structure from the original image a structure suppressed image is created. The method is evaluated in four experiments. In the first experiment ribs are suppressed in 20 artificial radiographs simulated from 3D lung computed tomography (CT) images. The proposed method with blind source separation and outlier detection shows superior suppression of ribs in simulated radiographs, compared to a simplified approach without these techniques. Additionally, the ability of three observers to discriminate between patches containing ribs and containing no ribs, as measured by the Area under the Receiver Operating Characteristic curve (AUC), reduced from 0.99-1.00 on original images to 0.75-0.84 on suppressed images. In the second experiment clavicles are suppressed in 253 chest radiographs. The effect of suppression on clavicle visibility is evaluated using the clavicle contrast and border response, showing a reduction of 78\% and 34\% respectively. In the third experiment nodules extracted from CT were simulated close to the clavicles in 100 chest radiographs. It was found that after suppression contrast of the nodules was higher than of the clavicles and 2.46 respectively). In the fourth experiment catheters were suppressed in chest radiographs. The ability of three observers to discriminate between patches originating from 36 images with and 21 images without catheters, as measured by the AUC, reduced from 0.98-0.99 on original images to 0.64-0.74 on suppressed images. We conclude that the presented method can markedly reduce the visibility of elongated structures in chest radiographs and shows potential to enhance diagnosis.}, + file = {Hoge13a.pdf:pdf\\Hoge13a.pdf:PDF}, + optnote = {DIAG}, + number = {11}, + pmid = {23880041}, + month = {11}, + gsid = {8722134071273679653}, + gscites = {29}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/126283}, + ss_id = {3740f221395f8283dfc90cf82449321c96034d73}, + all_ss_ids = {['3740f221395f8283dfc90cf82449321c96034d73']}, +} + +@phdthesis{Hoge13b, + author = {Laurens E. Hogeweg}, + title = {Automatic detection of tuberculosis in chest radiographs}, + year = {2013}, + url = {http://repository.ubn.ru.nl/handle/2066/118035}, + abstract = {Tuberculosis is a common disease with high morbidity and mortality rates worldwide. Chest radiography plays an important role in screening algorithms. The introduction of digital radiography has made it easier to develop automated systems that detect abnormalities related to tuberculosis in chest radiographs. This thesis describes the development of such an automated system and its evaluation.}, + copromotor = {C. I. S\'{a}nchez}, + file = {Hoge13b.pdf:pdf/Hoge13b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, + all_ss_ids = {['7a172048b86dc29f63206822728e6dd730086ff3']}, + gscites = {8}, +} + +@article{Hoge15, + author = {Hogeweg, L. and S\'{a}nchez, C. I. and Maduskar, P. and Philipsen, R. and Story, A. and Dawson, R. and Theron, G. and Dheda, K. and Peters-Bax, L. and van Ginneken, B.}, + title = {Automatic detection of tuberculosis in chest radiographs using a combination of textural, focal, and shape abnormality analysis}, + journal = TMI, + year = {2015}, + volume = {34}, + number = {12}, + pages = {2429--2442}, + doi = {10.1109/TMI.2015.2405761}, + abstract = {Tuberculosis (TB) is a common disease with high mortality and morbidity rates worldwide. The chest radiograph (CXR) is frequently used in diagnostic algorithms for pulmonary TB. Automatic systems to detect TB on CXRs can improve the efficiency of such diagnostic algorithms. The diverse manifestation of TB on CXRs from different populations requires a system that can be adapted to deal with different types of abnormalities. A computer aided detection (CAD) system was developed which combines the results of supervised subsystems detecting textural, shape, and focal abnormalities into one TB score. The textural abnormality subsystem provided several subscores analyzing different types of textural abnormalities and different regions in the lung. The shape and focal abnormality subsystem each provided one subscore. A general framework was developed to combine an arbitrary number of subscores: subscores were normalized, collected in a feature vector and then combined using a supervised classifier into one combined TB score. Two databases, both consisting of 200 digital CXRs, were used for evaluation, acquired from (A) a Western high-risk group screening and (B) TB suspect screening in Africa. The subscores and combined TB score were compared to two references: an external, non-radiological, reference and a radiological reference determined by a human expert. The area under the Receiver Operator Characteristic (ROC) curve Az was used to measure performance. Additionally, the performance of an independent human observer was compared to the best individual subscore and to the combined TB score. For database A, the best performing subscores achieved Az = 0.827 and 0.821 for the external and radiological reference respectively, whereas in database B Az = 0.759 and 0.866 was achieved. Different subscores performed best in the two databases. The combined TB score performed better than the individual subscores, except for the external reference in database B, giving performances of 0.868 and 0.847 in database A and 0.741 and 0.899 in database B. The performances of the independent observer, 0.910 and 0.942 in database A and 0.755 and 0.939 in database B were slightly higher than the combined TB score. Compared to the external reference, differences in performance between the combined TB score and the independent observer were not significant in both databases. The combined TB score performed better than the individual subscores and approaches performance of human observers with respect to the external and radiological reference. Supervised combination to compute an overall TB score allows for a necessary adaptation of the CAD system to different settings or different operational requirements. I.}, + file = {Hoge15.pdf:pdf\\Hoge15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {25706581}, + month = {12}, + gsid = {9749688157397588773}, + gscites = {72}, + ss_id = {bcdc96b82387f57a2cfda77682add942d2fe798c}, + all_ss_ids = {['bcdc96b82387f57a2cfda77682add942d2fe798c']}, +} + +@article{Hoge17, + author = {Hogeweg, Laurens and S\'{a}nchez, Clara I and Maduskar, Pragnya and Philipsen, Rick H H M and van Ginneken, Bram}, + title = {Fast and effective quantification of symmetry in medical images for pathology detection: application to chest radiography}, + journal = MP, + year = {2017}, + volume = {44}, + number = {6}, + month = {6}, + pages = {2242-2256}, + doi = {10.1002/mp.12127}, + abstract = {Symmetry is an important feature of human anatomy and the absence of symmetry in medical images can indicate the presence of pathology. Quantification of image symmetry can then be used to improve the automatic analysis of medical images. A method is presented that computes both local and global symmetry in 2D medical images. A symmetry axis is determined to define for each position p in the image a mirrored position p' on the contralateral side of the axis. In the neighborhood of p', an optimally corresponding position ps is determined by minimizing a cost function d that combines intensity differences in a patch around p and the mirrored patch around ps and the spatial distance between p' and ps . The optimal value of d is used as a measure of local symmetry s. The average of all values of s, indicated as S, quantifies global symmetry. Starting from an initial approximation of the symmetry axis, the optimal orientation and position of the axis is determined by greedy minimization of S. The method was evaluated in three experiments concerning abnormality detection in frontal chest radiographs. In the first experiment, global symmetry S was used to discriminate between 174 normal images and 174 images containing diffuse textural abnormalities from the publicly available CRASS database of tuberculosis suspects. Performance, measured as Area under the Receiver Operating Characteristic curve Az was 0.838. The second experiment investigated whether adding the local symmetry s as an additional feature to a set of 106 texture features resulted in improvements in classifying local patches in the same image database. We found that Az increased from 0.878 to 0.891 (p = 0.001). In the third experiment it was shown that the contrast of pulmonary nodules, obtained from the publicly available JSRT database, increased significantly in the local symmetry map compared to the original image. We conclude that the proposed algorithm for symmetry computation provides informative features which can be used to improve abnormality detection in medical images both at a local and a global level. This article is protected by copyright. All rights reserved.}, + file = {Hoge17.pdf:pdf\\Hoge17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28134985}, + gsid = {11690052651496044399}, + gscites = {5}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/175031}, + ss_id = {9ec13cc1fde3f48b96db330a11c523621f2ef6de}, + all_ss_ids = {['9ec13cc1fde3f48b96db330a11c523621f2ef6de']}, +} + +@inproceedings{Holl14, + author = {Holland, K. and Kallenberg, M. and Mann, R. and van Gils, C. and Karssemeijer, N}, + title = {Stability of Volumetric Tissue Composition Measured in Serial Screening Mammograms}, + booktitle = {Breast Imaging -12th International Workshop, IWDM 2014, Gifu City, Japan, June 29 AC/a,!aEURoe July 2, 2014. Proceedings}, + year = {2014}, + volume = {8539}, + series = {Lecture Notes in Computer Science}, + publisher = {Springer International Publishing}, + doi = {10.1007/978-3-319-07887-8_34}, + file = {Holl14.pdf:pdf/Holl14.pdf:PDF}, + optnote = {DIAG}, + gsid = {5470278989230824793}, + gscites = {7}, + ss_id = {56a1fe85a51f377f8985f38d5798b7037f90febb}, + all_ss_ids = {['56a1fe85a51f377f8985f38d5798b7037f90febb']}, +} + +@conference{Holl15, + author = {Holland, Katharina and Gubern-M\'{e}rida, Albert and Mann, Ritse and Karssemeijer, Nico}, + title = {Improved volumetric breast density assessment in dense breasts}, + booktitle = {7th International Workshop on Breast Densitometry and Cancer Risk Assessment}, + year = {2015}, + abstract = {To personalize screening procedures, volumetric percent density (VPD) may be used to stratify risk groups. To obtain VPD, the glandular tissue volume (GTV) is estimated in unprocessed mammograms using a physics-based method which relies on an internal reference value (RV) representing the projection of fat only. However, pure fat pixels are rare in dense breasts, causing an underestimation of GTV and VPD. The purpose of this work is to improve the VPD estimate in dense breasts. We collected 43 paired FFDM and MRI examinations. Mammographic VPD was estimated in different ways using three different reference values and compared to estimations based on MRI data. Pearson correlation coefficients were calculated with estimations averaged over both breasts and both mammographic views. The first two RVs are percentiles (0.99) of the pixel value distribution in the breast interior (BI). RV1 was obtained with a small BI. For RV2, a larger BI was used. Especially in dense breasts this may facilitate the identification of a pure fat pixel, that may not be present in the small BI. RV3 was defined by estimating the proportion of dense tissue in the densest location in the larger BI, using the maximum fraction of dense tissue projected on a line crossing the BI. Additionally we investigated a combination of the three estimations, by taking estimations of RV1 for nondense breasts and a combination of the results of RV2 and RV3 for dense breasts, using the estimation with RV1 to determine if the breast is dense. We found correlations of 0.89, 0.87 and 0.76 using RV1, RV2 and RV3 respectively. This improved to 0.91 when combining the three estimations. The reference value determination is crucial for calculation of VPD. The combination of three different methods yields the best result as different breasts density patterns require different approaches.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Holl15a, + author = {Holland, K. and van Gils, C.H. and Wanders, J.O.P. and Mann, R.M. and Karssemeijer, N}, + title = {How can we identify women at risk for a masked cancer, who may benefit from supplemental screening?}, + booktitle = RSNA, + year = {2015}, + abstract = {PURPOSE The sensitivity of mammograms is low for women with dense breasts, since cancers may be masked by dense tissue. In this study, we investigate methods to identify women with density patterns associated with a high masking risk. Three methods based on quantitative volumetric breast density analysis are compared to an area based density measure. METHOD AND MATERIALS We used the last negative screening mammograms of 87 women who subsequently presented an interval cancer (IC) and, as controls, 870 randomly selected normal screening exams from women without cancer. Volumetric breast density maps (VBDMs) were computed using software provided by Matakina (Wellington, New Zealand). These maps provide dense tissue thickness for each pixel location. We used the VBDMs to compute three masking measures: 1) Volume of glandular tissue (VGT), 2) Percent dense volume (PDV), and 3) Percent area where dense tissue thickness exceeds 1cm (PA1). In addition, we determined percentage dense area (PDA) after classifying pixels automatically in dense and nondense classes (random forest classifier). Methods were applied to MLO views and averaged per exam. For each method, we selected cases with the highest masking measure (by thresholding) and computed the fraction of ICs as a function of the fraction of controls selected. We used the Volpara Density Grade (VDG, threshold on PDV) to distinguish women with nondense breasts from dense breasts (VDG3+4). In practice women with dense breasts are offered supplemental screening. We determined the fraction of controls corresponding to this categorization, and determined sensitivity of our masking measures to select women with masked cancers. RESULTS Using VDG, 38% of the controls have dense breasts. When offering 38% of the women supplemental screening, 55%, 66%, 71% and 60% of the women with IC would be included using VGT, PDV, PA1 and PDA respectively. The sensitivity of PA1 was significantly higher compared to VGT and PDA (p-value <0.05). CONCLUSION Measures based on volumetric density maps are a promising tool to identify women with a high risk for a masked cancer. Novel masking risk measures have a higher sensitivity than often used measures such as percent dense volume and area. CLINICAL RELEVANCE/APPLICATION When offering supplemental screening to women with a high risk for masked cancer, the response of this group should be as high as possible to make supplemental screening feasible and cost efficient.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Holl15b, + author = {Holland, K. and van Gils, C.H. and Wanders, J.O.P. and Mann, R.M. and Karssemeijer, N}, + title = {Optimisation of the selection of women with an increased risk of a masked tumour for supplementary screening}, + booktitle = RSNA, + year = {2015}, + abstract = {PURPOSE The sensitivity of mammograms is low for women with dense breasts, because cancers may be masked by dense tissue. In this study we investigate methods to identify women with density patterns associated with a high masking risk. Four methods based on quantitative volumetric breast density analysis are compared to an area based density measure. METHOD AND MATERIALS We used the last negative screening mammograms of 87 women who subsequently presented an interval cancer (IC), and 870 randomly selected normal screening exams from women without cancer as controls. Volumetric breast density maps (VBDMs) were computed using software provided by Matakina (Wellington, New Zealand). These maps provide the dense tissue thickness for each pixel location. We used the VBDMs to compute four masking measures: 1) Volume of glandular tissue (VGT), 2) Percent dense volume (PDV), 3) Percent area where dense tissue thickness exceeds 1cm (PA1), and 4) Possibility that there is a tumor, with diameter t, at a location with dense tissue thickness d, normalized to the breast area, taking into account the size distribution of screen detected cancers (PT). In addition we determined percentage dense area (PDA) after classifying pixels automatically in dense and non-dense classes using a random forest classifier. Methods were applied to MLO views and then averaged per exam. For each method, we selected cases with the highest masking measure by thresholding and computed the fraction of ICs as a function of the fraction of controls selected. Furthermore we computed the ratio between IC with supplemental screening offer and the supplemental screening rate. RESULTS The highest ratio between IC and supplemental screening rate has been observed for PA1 at a screening rate of 5% and a sensitivity of almost 20%. For screening rates above 20%, the highest response of IC can be observed when using PA1 and PT for stratification. CONCLUSION We showed that the different breast density measures are suited for stratification. A careful choice of the stratification criteria is necessary depending on the number of women that one is willing to offer supplemental screening. CLINICAL RELEVANCE/APPLICATION To make supplemental breast cancer screening feasible and cost efficient, it is necessary to have a high response in the target group while the selected women in the overall screening population remains low.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Holl15c, + author = {Holland, K. and van Gils, C.H. and Wanders, J.O.P. and Mann, R.M. and Karssemeijer, N}, + title = {Consistency of density categories over multiple screening rounds using volumetric breast density}, + booktitle = RSNA, + year = {2015}, + abstract = {PURPOSE As a result of the breast density laws, clinicians in many states are obliged to inform women about their breast density and the increased risk that is associated with dense breasts. An increasing number of women with dense breasts decides to have supplemental screening. Using an automated software system, we investigated the consistency of the classification of serial screening mammograms in non-dense and dense classes over time. METHOD AND MATERIALS Full field digital mammograms from a breast cancer screening program were used in which women are invited every two years, starting at the age of 50. The initial screening exam and three subsequent screening exams were available for 2504 women. The average screening interval was 24.4 months. All images were processed by Volpara 1.5.0 (Matakina, Wellington, New Zealand); volumetric percent breast density (PDV) was calculated and averaged over both MLO images. Using the thresholds of the Volpara Density Grade (VDG), all exams were classified as non-dense (PDV<7.5, VDG1+2) or dense (PDV>7.5, VDG3+4). Additionally, to avoid class switches due to small fluctuations of PDV, we defined a gated threshold as follows: For a change to the dense category a PDV greater than 8.3 was required, for a decrease a threshold of 6.7 was used. The gate width was based on noise measures. RESULTS The majority of women stayed in the same category for the whole period, 38.9% non-dense and 34.5% dense, using the fixed threshold. In 18.1% of the women density decreased and the class changed from dense to non-dense; The deviating patterns were as follows: For 2.4% of the women one intermediate exam was classified as non-dense, while all other exams were dense. Three non-dense and one dense exam were observed in 3.8%. In 2.4% two exams were classified as dense and two as non-dense. Use of the gated threshold reduced the number of women with a deviating pattern. CONCLUSION Classification into dense and non-dense classes gives stable results over time. Only in a small fraction of the population do we need to assume that an exam was not assigned to the proper class. Use of a gated threshold to separate the non-dense from the dense class reduces the percentage of misclassified exams. CLINICAL RELEVANCE/APPLICATION A consistent classification in non-dense and dense classes is important, as women and clinicians might lose confidence in the stratification process when supplemental screening is offered in deviating pattern.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Holl16, + author = {Holland, K. and van Gils, C. H. and Wanders, J. O. P. and Mann, R. M. and Karssemeijer, N.}, + title = {Quantification of mammographic masking risk with volumetric breast density maps: How to select women for supplemental screening}, + booktitle = MI, + year = {2016}, + series = SPIE, + doi = {10.1117/12.2216810}, + abstract = {The sensitivity of mammograms is low for women with dense breasts, since cancers may be masked by dense tissue. In this study, we investigated methods to identify women with density patterns associated with a high masking risk. Risk measures are derived from volumetric breast density maps. + We used the last negative screening mammograms of 93 women who subsequently presented with an interval cancer (IC), and, as controls, 930 randomly selected normal screening exams from women without cancer. Volumetric breast density maps were computed from the mammograms, which provide the dense tissue thickness at each location. These were used to compute absolute and percentage glandular tissue volume. + We modeled the masking risk for each pixel location using the absolute and percentage dense tissue thickness and we investigated the effect of taking the cancer location probability distribution (CLPD) into account. + For each method, we selected cases with the highest masking measure (by thresholding) and computed the fraction of ICs as a function of the fraction of controls selected. The latter can be interpreted as the negative supplemental screening rate (NSSR). + Between the models, when incorporating CLPD, no significant differences were found. In general, the methods performed better when CLPD was included. At higher NSSRs some of the investigated masking measures had a significantly higher performance than volumetric breast density. These measures may therefore serve as an alternative to identify women with a high risk for a masked cancer.}, + file = {:pdf/Holl16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {4492352441379685768}, + gscites = {4}, + ss_id = {0dc755ded5e6a44e9ce9ac47bc69aa5573b458f0}, + all_ss_ids = {['0dc755ded5e6a44e9ce9ac47bc69aa5573b458f0']}, +} + +@inproceedings{Holl16a, + author = {Holland, K. and Sechopoulos, I. and den Heeten,G.J. and Mann, R.M. and Karssemeijer, N.}, + title = {Performance of breast cancer screening depends on mammographic compression}, + booktitle = {Breast Imaging}, + year = {2016}, + volume = {9699}, + series = {Lecture Notes in Computer Science}, + publisher = {Springer International Publishing Switzerland}, + pages = {183-189}, + doi = {10.1007/978-3-319-41546-8_24}, + abstract = {During mammographic acquisition, the breast is compressed between the breast support plate and the compression paddle to improve image quality and reduce dose, among other reasons. The applied force, which is measured by the imaging device, varies substantially, due to local guidelines, positioning, and breast size. Force measurements may not be very relevant though, because the amount of compression will be related to pressure rather than force. With modern image analysis techniques, the contact surface of the breast under compression can be determined and pressure can be computed retrospectively. In this study, we investigate if there is a relation between pressure applied to the breast during compression and screening performance. + In a series of 113,464 screening exams from the Dutch breast cancer screening program we computed the compression pressure applied in the MLO projections of the right and left breasts. The exams were binned into five groups of increasing applied pressure, in such a way that each group contains 20% of the exams. Thresholds were 7.68, 9.18, 10.71 and 12.81 kPa. Screening performance measures were determined for each group. Differences across the groups were investigated with a Pearson's Chi Square test. It was found that PPV and the cancer detection rate vary significantly within the five groups (p = 0.001 and p = 0.011 respectively).The PPV was 25.4, 31.2, 32.7, 25.8 and 22.0 for the five groups with increasing pressure. The recall rate, false positive rate and specificity were not statistically significant from the expectation (p-values: 0.858, 0.088 and 0.094 respectively). Even though differences are not significant, there is a trend that the groups with a moderate pressure have a better performance compared to the first and last category. + The results suggest that high pressure reduces detectability of breast cancer. The best screening results were found in the groups with a moderate pressure.}, + file = {:pdf/Holl16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {10535534135064321671}, + gscites = {17}, + ss_id = {0b86365ed2e292ae5da868946983b31f7a9dc391}, + all_ss_ids = {['0b86365ed2e292ae5da868946983b31f7a9dc391']}, +} + +@article{Holl16b, + author = {Holland, Katharina and {van Zelst}, Jan and {den Heeten}, Gerard J. and Imhof-Tas, Mechli and Mann, Ritse M. and {van Gils}, Carla H. and Karssemeijer, Nico}, + title = {Consistency of breast density categories in serial screening mammograms: A comparison between automated and human assessment}, + journal = {Breast}, + year = {2016}, + volume = {29}, + month = {7}, + pages = {49--54}, + doi = {10.1016/j.breast.2016.06.020}, + url = {http://dx.doi.org/10.1016/j.breast.2016.06.020}, + abstract = {Reliable breast density measurement is needed to personalize screening by using density as a risk factor and offering supplemental screening to women with dense breasts. We investigated the categorization of pairs of subsequent screening mammograms into density classes by human readers and by an automated system. With software (VDG) and by four readers, including three specialized breast radiologists, 1000 mammograms belonging to 500 pairs of subsequent screening exams were categorized into either two or four density classes. We calculated percent agreement and the percentage of women that changed from dense to non-dense and vice versa. Inter-exam agreement (IEA) was calculated with kappa statistics. Results were computed for each reader individually and for the case that each mammogram was classified by one of the four readers by random assignment (group reading). Higher percent agreement was found with VDG (90.4\%, CI 87.9-92.9\%) than with readers (86.2-89.2\%), while less plausible changes from non-dense to dense occur less often with VDG (2.8\%, CI 1.4-4.2\%) than with group reading (4.2\%, CI 2.4-6.0\%). We found an IEA of 0.68-0.77 for the readers using two classes and an IEA of 0.76-0.82 using four classes. IEA is significantly higher with VDG compared to group reading. The categorization of serial mammograms in density classes is more consistent with automated software than with a mixed group of human readers. When using breast density to personalize screening protocols, assessment with software may be preferred over assessment by radiologists.}, + file = {:pdf/Holl16b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27420382}, + gsid = {8184360643827561041}, + gscites = {20}, + ss_id = {e034856f4f0949b6ebbe7d4161e253719fdcb399}, + all_ss_ids = {['862ef80662ea6cd8e646df642abc3fd343263191', 'e034856f4f0949b6ebbe7d4161e253719fdcb399']}, +} + +@article{Holl17, + author = {Holland, K. and {van Gils}, C. H. and Mann, R. M. and Karssemeijer, N.}, + title = {Quantification of masking risk in screening mammography with volumetric breast density maps}, + journal = BRECRT, + year = {2017}, + volume = {162}, + number = {3}, + month = {2}, + pages = {541--548}, + doi = {10.1007/s10549-017-4137-4}, + abstract = {Purpose: Fibroglandular tissue may mask breast cancers, thereby reducing the sensitivity of mammography. Here we investigate methods for identification of women at high risk of a masked tumor, who could benefit from additional imaging. + Methods: The last negative screening mammograms of 111 women with interval cancer (IC) within 12 months after the examination and 1110 selected normal screening exams from women without cancer were used. From the mammograms volumetric breast density maps were computed, which provide the dense tissue thickness for each pixel location. With these maps, three measurements were derived: 1) Percent dense volume (PDV), 2) Percent area where dense tissue thickness exceeds 1cm (PDA), 3) Dense Tissue Masking Model (DTMM). Breast density was scored by a breast radiologist using BI-RADS. Women with heterogeneously and extremely dense breasts were considered at high masking risk. For each masking measure, mammograms were divided into a high and low risk category, such that the same proportion of the controls is at high masking risk as with BI-RADS. + Results: Of the women with IC, 66.1%, 71.9%, 69.2% and 63.0% were categorized to be at high masking risk with PDV, PDA, DTMM and BI-RADS respectively, against 38.5% of the controls. The proportion of IC at high masking risk is statistically significantly different between BI-RADS and PDA (p-value 0.022). Differences between BI-RADS and PDV, or BI-RADS and DTMM, are not statistically significant. + Conclusion: Measures based on density maps, and in particular PDA, are promising tools to identify women at high risk for a masked cancer.}, + file = {:pdf/Holl17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28161786}, + publisher = {Springer Nature}, + gsid = {14965501142326111206}, + gscites = {31}, + ss_id = {907cc7a4cdfde916a2295b9e459564f6d51e1d24}, + all_ss_ids = {['907cc7a4cdfde916a2295b9e459564f6d51e1d24']}, +} + +@article{Holl17a, + author = {Holland, K. and Gubern-M\'{e}rida, A. and Mann, R. M. and Karssemeijer, N.}, + title = {Optimization of volumetric breast density estimation in digital mammograms}, + journal = PMB, + year = {2017}, + volume = {62}, + number = {9}, + month = {4}, + pages = {3779--3797}, + doi = {10.1088/1361-6560/aa628f}, + abstract = {Fibroglandular tissue volume and percent density can be estimated in unprocessed mammograms using a physics-based method, which relies on an internal reference value representing the projection of fat only. However, pixels representing fat only may not be present in dense breasts, causing an underestimation of density measurements. In this work, we investigate alternative approaches for obtaining a tissue reference value to improve density estimations, particularly in dense breasts. + Two of three investigated reference values (F1, F2) are percentiles of the pixel value distribution in the breast interior (the contact area of breast and compression paddle). F1 is determined in a small breast interior, which minimizes the risk that peripheral pixels are included in the measurement at the cost of increasing the chance that no proper reference can be found. F2 is obtained using a larger breast interior. The new approach which is developed for very dense breasts does not require the presence of a fatty tissue region. As reference region we select the densest region in the mammogram and assume that this represents a projection of entirely dense tissue embedded between the subcutaneous fatty tissue layers. By measuring the thickness of the fat layers a reference (F3) can be computed. To obtain accurate breast density estimates irrespective of breast composition we investigated a combination of the results of the three reference values. We collected 202 pairs of MRI's and digital mammograms from 119 women. We compared the percent dense volume estimates based on both modalities and calculated Pearson's correlation coefficients. + With the references F1-F3 we found respectively a correlation of R=0.80, R=0.89 and R=0.74. Best results were obtained with the combination of the density estimations (R=0.90). + Results show that better volumetric density estimates can be obtained with the hybrid method, in particular for dense breasts, when algorithms are combined to obtain a fatty tissue reference value depending on breast composition.}, + file = {:pdf/Holl17a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28230532}, + gsid = {849413026229250543}, + gscites = {6}, + all_ss_ids = {e844b6b027c94468de8a607497f95a3771b7d48b}, +} + +@phdthesis{Holl17b, + author = {Katharina Holland}, + title = {Breast density measurement for personalised screening}, + year = {2017}, + url = {http://diagnijmegen.nl/index.php/Publication?bibkey=Holl17b}, + abstract = {It is not new that women with high breast density (the amount of ducts and epithelial tissue in relation to the breast volume) have an increased breast cancer risk compared to women with low breast density. When measuring the influence of breast density on the performance of the Dutch breast cancer screening program, we see that the sensitivity decreases with increasing density. Women with dense breast have not only an increased breast cancer risk but also an increased risk that the tumour is not detected within the screening program. Hence, breast density could be used for personalised screening. Depending on the breast cancer risk and the risk of not detecting the tumour with mammography, additional imaging with ultrasound or MRI could be offered. Next to the influence of breast density on the screening program performance, we investigated the consistency of breast density estimations between a computer program and radiologists, and whether these density measurements are suitable for a stratification into different screening regimes.}, + copromotor = {C. H. van Gils and R. M. Mann}, + file = {:pdf/Holl17b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Holl17c, + author = {Holland, K. and Sechopoulos, I. and Mann, R. M. and {den Heeten}, G. J. and {van Gils}, C. H. and Karssemeijer, N.}, + title = {Influence of breast compression pressure on the performance of population-based mammography screening}, + journal = BRECR, + year = {2017}, + volume = {19}, + number = {1}, + pages = {126}, + doi = {10.1186/s13058-017-0917-3}, + url = {https://doi.org/10.1186/s13058-017-0917-3}, + abstract = {Background In mammography, breast compression is applied to reduce the thickness of the breast. While it is widely accepted that firm breast compression is needed to ensure acceptable image quality, guidelines remain vague about how much compression should be applied during mammogram acquisition. A quantitative parameter indicating the desirable amount of compression is not available. Consequently, little is known about the relationship between the amount of breast compression and breast cancer detectability. The purpose of this study is to determine the effect of breast compression pressure in mammography on breast cancer screening outcomes. + Methods We used digital image analysis methods to determine breast volume, percent dense volume, and pressure from 132,776 examinations of 57,179 women participating in the Dutch population-based biennial breast cancer screening program. Pressure was estimated by dividing the compression force by the area of the contact surface between breast and compression paddle. The data was subdivided into quintiles of pressure and the number of screen-detected cancers, interval cancers, false positives, and true negatives were determined for each group. Generalized estimating equations were used to account for correlation between examinations of the same woman and for the effect of breast density and volume when estimating sensitivity, specificity, and other performance measures. Sensitivity was computed using interval cancers occurring between two screening rounds and using interval cancers within 12 months after screening. Pair-wise testing for significant differences was performed. + Results Percent dense volume increased with increasing pressure, while breast volume decreased. Sensitivity in quintiles with increasing pressure was 82.0%, 77.1%, 79.8%, 71.1%, and 70.8%. Sensitivity based on interval cancers within 12 months was significantly lower in the highest pressure quintile compared to the third (84.3% vs 93.9%, p=0.034). Specificity was lower in the lowest pressure quintile (98.0%) compared to the second, third, and fourth group (98.5%, p<0.005). Specificity of the fifth quintile was 98.4%. + Conclusion Results suggest that if too much pressure is applied during mammography this may reduce sensitivity. In contrast, if pressure is low this may decrease specificity.}, + file = {:pdf/Holl17c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29183348}, + month = {11}, + gsid = {15897061577657853702}, + gscites = {36}, + ss_id = {817c6ab7fde2a45210b6b80c4713b9147b3eba19}, + all_ss_ids = {['817c6ab7fde2a45210b6b80c4713b9147b3eba19']}, +} + +@article{Hols15, + author = {{van der Holst}, Helena M. and {van Uden}, Inge W M. and Tuladhar, Anil M. and {de Laat}, Karlijn F. and {van Norden}, Anouk G W. and Norris, David G. and {van Dijk}, Ewoud J. and Esselink, Rianne A J. and Platel, Bram and {de Leeuw}, Frank-Erik}, + title = {Cerebral small vessel disease and incident parkinsonism: The RUN DMC study}, + journal = Neurology, + year = {2015}, + volume = {85}, + number = {18}, + month = {11}, + pages = {1569--1577}, + doi = {10.1212/WNL.0000000000002082}, + url = {http://dx.doi.org/10.1212/WNL.0000000000002082}, + abstract = {To investigate the relation between baseline cerebral small vessel disease (SVD) and the risk of incident parkinsonism using different MRI and diffusion tensor imaging (DTI) measures.In the Radboud University Nijmegen Diffusion Tensor and Magnetic Resonance Cohort (RUN DMC) study, a prospective cohort study, 503 elderly participants with SVD and without parkinsonism were included in 2006. During follow-up (2011-2012), parkinsonism was diagnosed according to UK Brain Bank criteria. Cox regression analysis was used to investigate the association between baseline imaging measures and incident all-cause parkinsonism and vascular parkinsonism (VP). Tract-based spatial statistics analysis was used to identify differences in baseline DTI measures of white matter (WM) tracts between participants with VP and without parkinsonism.Follow-up was available from 501 participants (mean age 65.6 years; mean follow-up duration 5.2 years). Parkinsonism developed in 20 participants; 15 were diagnosed with VP. The 5-year risk of (any) parkinsonism was increased for those with a high white matter hyperintensity (WMH) volume (hazard ratio [HR] 1.8 per SD increase, 95\% confidence interval [CI] 1.3-2.4) and a high number of lacunes (HR 1.4 per number increase, 95\% CI 1.1-1.8) at baseline. For VP, this risk was also increased by the presence of microbleeds (HR 5.7, 95\% CI 1.9-16.8) and a low gray matter volume (HR 0.4 per SD increase, 95\% CI 0.2-0.8). Lower fractional anisotropy values in bifrontal WM tracts involved in movement control were observed in participants with VP compared to participants without parkinsonism.SVD at baseline, especially a high WMH volume and a high number of lacunes, is associated with incident parkinsonism. Our findings favor a role of SVD in the etiology of parkinsonism.}, + file = {Hols15.pdf:pdf\\Hols15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26446068}, + gsid = {12137067856305542323}, + gscites = {53}, + ss_id = {5d3d4d3b4197a9f121cd1a287dc9a80c5dfbb1ad}, + all_ss_ids = {['5d3d4d3b4197a9f121cd1a287dc9a80c5dfbb1ad']}, +} + +@article{Hols18, + author = {H.M. van der Holst and A.M. Tuladhar and V. Zerbi and I.W.M. van Uden and K.F. de Laat and E.M.C. van Leijsen and M. Ghafoorian and B. Platel and M.I. Bergkamp and A.G.W. van Norden and D.G. Norris}, + title = {White matter changes and gait decline in cerebral small vessel disease}, + journal = NEUICL, + year = {2018}, + volume = {17}, + pages = {731-738}, + doi = {10.1016/j.nicl.2017.12.007}, + abstract = {The relation between progression of cerebral small vessel disease (SVD) and gait decline is uncertain, and diffusion tensor imaging (DTI) studies on gait decline are lacking. We therefore investigated the longitudinal associations between (micro) structural brain changes and gait decline in SVD using DTI. 275 participants were included from the Radboud University Nijmegen Diffusion tensor and Magnetic resonance imaging Cohort (RUN DMC), a prospective cohort of participants with cerebral small vessel disease aged 50-85 years. Gait (using GAITRite) and magnetic resonance imaging measures were assessed during baseline (2006-2007) and follow-up (2011 - 2012). Linear regression analysis was used to investigate the association between changes in conventional magnetic resonance and diffusion tensor imaging measures and gait decline. Tract-based spatial statistics analysis was used to investigate region-specific associations between changes in white matter integrity and gait decline. 56.2% were male, mean age was 62.9 years (SD8.2), mean follow-up duration was 5.4 years (SD0.2) and mean gait speed decline was 0.2 m/s (SD0.2). Stride length decline was associated with white matter atrophy (b = 0.16, p = 0.007), and increase in mean white matter radial diffusivity and mean diffusivity, and decrease in mean fractional anisotropy (respectively, b = - 0.14, p = 0.009; b = - 0.12, p = 0.018; b = 0.10, p = 0.049), independent of age, sex, height, follow-up duration and baseline stride length. Tract-based spatial statistics analysis showed significant associations between stride length decline and fractional anisotropy decrease and mean diffusivity increase (primarily explained by radial diffusivity increase) in multiple white matter tracts, with the strongest associations found in the corpus callosum and corona radiata, independent of traditional small vessel disease markers. White matter atrophy and loss of white matter integrity are associated with gait decline in older adults with small vessel disease after 5 years of follow-up. These findings suggest that progression of SVD might play an important role in gait decline.}, + file = {Hols18.pdf:pdf\\Hols18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29270357}, + gsid = {1964612976773745344}, + gscites = {64}, + ss_id = {4458f33f0650bba1f12dc16956608386c46b76b0}, + all_ss_ids = {['4458f33f0650bba1f12dc16956608386c46b76b0']}, +} + +@conference{Hoop08, + author = {B. de Hoop and H. Gietema and B. van Ginneken and P. Zanen and M. Prokop}, + title = {Variability of semi-automated pulmonary nodule volume measurements: {A} comparison of 6 lung nodule evaluation software packages}, + booktitle = RSNA, + year = {2008}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Hoop08a, + author = {B. de Hoop and B. van Ginneken and C. M. Schaefer-Prokop and P. A. de Jong and H. Gietema and M. Prokop}, + title = {Workup of suspicious lesions on digital chest radiography: estimation of the number of unnecessary follow-up {CT} scans in relation to the threshold of radiological suspicion on chest radiographs}, + booktitle = RSNA, + year = {2008}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Hoop09, + author = {B. de Hoop and H. Gietema and B. van Ginneken and P. Zanen and G. Groenewegen and M. Prokop}, + title = {A comparison of six software packages for evaluation of solid lung nodules using semi-automated volumetry: what is the minimum increase in size to detect growth in repeated {CT} examinations}, + journal = ER, + year = {2009}, + volume = {19}, + pages = {800--808}, + doi = {10.1007/s00330-008-1229-x}, + abstract = {{W}e compared interexamination variability of {CT} lung nodule volumetry with six currently available semi-automated software packages to determine the minimum change needed to detect the growth of solid lung nodules. {W}e had ethics committee approval. {T}o simulate a follow-up examination with zero growth, we performed two low-dose unenhanced {CT} scans in 20 patients referred for pulmonary metastases. {B}etween examinations, patients got off and on the table. {V}olumes of all pulmonary nodules were determined on both examinations using six nodule evaluation software packages. {V}ariability (upper limit of the 95\% confidence interval of the {B}land-{A}ltman plot) was calculated for nodules for which segmentation was visually rated as adequate. {W}e evaluated 214 nodules (mean diameter 10.9 mm, range 3.3 mm-30.0 mm). {S}oftware packages provided adequate segmentation in 71\% to 86\% of nodules (p < 0.001). {I}n case of adequate segmentation, variability in volumetry between scans ranged from 16.4\% to 22.3\% for the various software packages. {V}ariability with five to six software packages was significantly less for nodules >or=8 mm in diameter (range 12.9\%-17.1\%) than for nodules <8 mm (range 18.5\%-25.6\%). {S}egmented volumes of each package were compared to each of the other packages. {S}ystematic volume differences were detected in 11/15 comparisons. {T}his hampers comparison of nodule volumes between software packages.}, + file = {Hoop09.pdf:pdf\\Hoop09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + pmid = {19018537}, + month = {11}, + gsid = {17148475176430192127}, + gscites = {154}, + ss_id = {6845a171a50763bf6409c2729dc4d368860e0baf}, + all_ss_ids = {['6845a171a50763bf6409c2729dc4d368860e0baf']}, +} + +@conference{Hoop09a, + author = {B. de Hoop and P. Zanen and H. Gietema and B. van Ginneken and H. Groen and M. Prokop}, + title = {The {P}redictive {V}alue of {CT} {Q}uantified {P}ulmonary {E}mphysema on the {D}ecline of {L}ung {F}unction in {C}hronic {S}mokers: {R}esults of a {L}ong-term {F}ollow-up {S}tudy}, + booktitle = RSNA, + year = {2009}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Hoop10, + author = {B. de Hoop and H. Gietema and S. van de Vorst and K. Murphy and R. J. van Klaveren and M. Prokop}, + title = {Pulmonary ground-glass nodules: increase in mass as an early indicator of growth}, + journal = Radiology, + year = {2010}, + volume = {255}, + pages = {199--206}, + doi = {10.1148/radiol.09090571}, + abstract = {{PURPOSE}: {T}o compare manual measurements of diameter, volume, and mass of pulmonary ground-glass nodules ({GGN}s) to establish which method is best for identifying malignant {GGN}s by determining change across time. {MATERIALS} {AND} {METHODS}: {I}n this ethics committee-approved retrospective study, baseline and follow-up {CT} examinations of 52 {GGN}s detected in a lung cancer screening trial were included, resulting in 127 {GGN} data sets for evaluation. {T}wo observers measured {GGN} diameter with electronic calipers, manually outlined {GGN}s to obtain volume and mass, and scored whether a solid component was present. {O}bserver 1 repeated all measurements after 2 months. {C}oefficients of variation and limits of agreement were calculated by using {B}land-{A}ltman methods. {I}n a subgroup of {GGN}s containing all resected malignant lesions, the ratio between intraobserver variability and growth (growth-to-variability ratio) was calculated for each measurement technique. {I}n this subgroup, the mean time for growth to exceed the upper limit of agreement of each measurement technique was determined. {RESULTS}: {T}he kappa values for intra- and interobserver agreement for identifying a solid component were 0.55 and 0.38, respectively. {I}ntra- and interobserver coefficients of variation were smallest for {GGN} mass ({P} < .001). {T}hirteen malignant {GGN}s were resected. {M}ean growth-to-variability ratios were 11, 28, and 35 for diameter, volume, and mass, respectively ({P} = .03); mean times required for growth to exceed the upper limit of agreement were 715, 673, and 425 days, respectively ({P} = .02). {CONCLUSION}: {M}ass measurements can enable detection of growth of {GGN}s earlier and are subject to less variability than are volume or diameter measurements.}, + file = {Hoop10.pdf:pdf\\Hoop10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {20123896}, + month = {4}, + gsid = {15023631776439794913}, + gscites = {199}, +} + +@article{Hoop10a, + author = {B. de Hoop and C. M. Schaefer-Prokop and H. A. Gietema and P. A. de Jong and B. van Ginneken and R. J. van Klaveren and M. Prokop}, + title = {Screening for lung cancer with digital chest radiography: sensitivity and number of secondary work-up {CT} examinations}, + journal = Radiology, + year = {2010}, + volume = {255}, + pages = {629--637}, + doi = {10.1148/radiol.09091308}, + abstract = {{PURPOSE}: {T}o estimate the performance of digital chest radiography for detection of lung cancer. {MATERIALS} {AND} {METHODS}: {T}he study had ethics committee approval, and a nested case-control design was used and included 55 patients with lung cancer detected at computed tomography ({CT}) and confirmed with histologic examination and a sample of 72 of 4873 control subjects without nodules at {CT}. {A}ll patients underwent direct-detector digital chest radiography in two projections within 2 months of the screening {CT}. {F}our radiologists with varying experience identified and localized potential cancers on chest radiographs by using a confidence scale of level 1 (no lesion) to 5 (definite lesion). {L}ocalization receiver operating characteristic ({ROC}) analysis was performed. {O}n the basis of the assumption that suspicious lesions seen at chest radiography would lead to further work-up with {CT}, the number of work-up {CT} examinations per detected cancer ({CT} examinations per cancer) was calculated at various confidence levels for the screening population (cancer rate in study population, 1.3\%). {RESULTS}: {T}umor size ranged from 6.8 to 50.7 mm (median, 11.8 mm). {A}reas under the localization {ROC} curve ranged from 0.52 to 0.69. {D}etection rates substantially varied with the observers' experience and confidence level: {A}t a confidence level of 5, detection rates ranged from 18\% at one {CT} examination per cancer to 53\% at 13 {CT} examinations per cancer. {A}t a confidence level of 2 or higher, detection rates ranged from 94\% at 62 {CT} examinations per cancer to 78\% at 44 {CT} examinations per cancer. {CONCLUSION}: {A} detection rate of 94\% for lung tumors with a diameter of 6.8-50.7 mm found at {CT} screening was achievable with chest radiography only at the expense of a high false-positive rate and an excessive number of work-up {CT} examinations. {D}etection performance is strongly observer dependent.}, + file = {Hoop10a.pdf:pdf\\Hoop10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {20413773}, + month = {5}, + gsid = {14738301691817204938}, + gscites = {39}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/89333}, +} + +@phdthesis{Hoop10b, + author = {B. de Hoop}, + title = {Lung {C}ancer {S}creening: {R}adiological {A}spects}, + year = {2010}, + url = {http://igitur-archive.library.uu.nl/dissertations/2010-0517-200217/UUindex.html}, + abstract = {{M}ultiple lung cancer screening studies are currently being conducted to study whether lung cancer screening with {C}omputed {T}omography ({CT}) can decrease lung cancer mortality. {T}his thesis addresses radiological methods that can increase efficacy and efficiency of lung cancer screening. {D}ifferent imaging modalities were tested. {C}ompared to {CT}, chest radiography ({CXR}) has the advantage of low costs, low radiation and easy accessibility. {D}igitalization of {CXR} has improved visualization of pulmonary structures. {I}n direct comparison however, digital {CXR} proved far less efficient than low-dose {CT} for the purpose of lung cancer screening. {T}he radiologists primarily failed to recognize, rather than overlook, malignancies that were being missed on {CXR}. {L}ess than 2% of nodules that are found during {CT} screening prove malignant. {N}odules can be differentiated based on growth rate or morphology. {G}rowth rates are expressed in volume doubling times. {R}eproducibility of measurements performed by multiple volumetry software packages was tested. {R}eproducibility of volumetry proved far better than measurements of nodule diameter. {T}he measurement of nodule volume is insufficient for growth measurements in ground glass nodules ({GGN}s), as these lesions can also grow by appearance or increase of a solid component. {M}easurement of {GGN} mass is introduced as a new method and proved to have superior reproducibility compared to volumetry in these lesions. {I}t may help to detect growth in {GGN}s earlier. {M}orphology can also be used to differentiate nodules. {W}e found that fissure attachment in nodules with a smooth margin indicates benignancy. {T}wenty percent of all screening-detected nodules showed these characteristics. {N}one of them proved malignant during follow-up. {T}he combination of ground glass appearance and growth proved a strong sign of malignancy, but these {GGN}s tend to grow very slow. {F}ollow-up may be a valid option to monitor changes in such lesions, but resection should be considered in any growing {GGN}. {T}he mortality reduction intended by lung cancer screening could possibly be increased when screening is expanded to other smoking-induced diseases like {COPD}. {T}his thesis addresses the automatic detection and quantification of pulmonary emphysema in smokers. {A} follow-up study was performed. {A} greater degree of emphysema was associated with stronger decline of lung function over time. {T}his suggests that quantification of {CT}-detected emphysema may aid to indentify non-obstructed individuals who will develop {COPD}.}, + copromotor = {H. A. Gietema and B. van Ginneken}, + file = {Hoop10b.pdf:pdf\\Hoop10b.pdf:PDF}, + optnote = {DIAG, LungCancerScreening, RADIOLOGY}, + promotor = {W. M. Prokop and J. W. J. Lammers}, + school = {Utrecht University}, + journal = {PhD thesis}, +} + +@article{Hoop10c, + author = {B. de Hoop and D. W. de Boo and H. A. Gietema and F. van Hoorn and B. Mearadji and L. Schijf and B. van Ginneken and M. Prokop and C. Schaefer-Prokop}, + title = {Computer-aided Detection of Lung Cancer on Chest Radiographs: Effect on Observer Performance}, + journal = Radiology, + year = {2010}, + volume = {257}, + pages = {532--540}, + doi = {10.1148/radiol.10092437}, + abstract = {Purpose: To assess how computer-aided detection (CAD) affects reader performance in detecting early lung cancer on chest radiographs. Materials and Methods: In this ethics committee-approved study, 46 individuals with 49 computed tomographically (CT)-detected and histologically proved lung cancers and 65 patients without nodules at CT were retrospectively included. All subjects participated in a lung cancer screening trial. Chest radiographs were obtained within 2 months after screening CT. Four radiology residents and two experienced radiologists were asked to identify and localize potential cancers on the chest radiographs, first without and subsequently with the use of CAD software. A figure of merit was calculated by using free-response receiver operating characteristic analysis. Results: Tumor diameter ranged from 5.1 to 50.7 mm (median, 11.8 mm). Fifty-one percent (22 of 49) of lesions were subtle and detected by two or fewer readers. Stand-alone CAD sensitivity was 61\%, with an average of 2.4 false-positive annotations per chest radiograph. Average sensitivity was 63\% for radiologists at 0.23 false-positive annotations per chest radiograph and 49\% for residents at 0.45 false-positive annotations per chest radiograph. Figure of merit did not change significantly for any of the observers after using CAD. CAD marked between five and 16 cancers that were initially missed by the readers. These correctly CAD-depicted lesions were rejected by radiologists in 92\% of cases and by residents in 77\% of cases. Conclusion: The sensitivity of CAD in identifying lung cancers depicted with CT screening was similar to that of experienced radiologists. However, CAD did not improve cancer detection because, especially for subtle lesions, observers were unable to sufficiently differentiate true-positive from false-positive annotations.}, + file = {Hoop10c.pdf:pdf\\Hoop10c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {20807851}, + month = {11}, + gsid = {12954940021726127037}, + gscites = {57}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/89546}, +} + +@article{Hoop12, + author = {de Hoop, B. and van Ginneken, B. and Gietema, H. and Prokop, M.}, + title = {Pulmonary Perifissural Nodules on {CT} Scans: Rapid Growth Is Not a Predictor of Malignancy}, + journal = Radiology, + year = {2012}, + volume = {265}, + number = {2}, + pages = {611-616}, + doi = {10.1148/radiol.12112351}, + abstract = {Purpose:To assess the prevalence, natural course, and malignancy rate of perifissural nodules (PFNs) in smokers participating in a lung cancer screening trial.Materials and Methods:As part of the ethics-committee approved Dutch-Belgian Randomised Lung Cancer Multi-Slice Screening Trial (NELSON), computed tomography (CT) was used to screen 2994 current or former heavy smokers, aged 50-74 years, for lung cancer. CT was repeated after 1 and 3 years, with additional follow-up CT scans if necessary. All baseline CT scans were screened for nodules. Nodule volume was determined with automated volumetric analysis. Homogeneous solid nodules, attached to a fissure with a lentiform or triangular shape, were classified as PFNs. Nodules were considered benign if they did not grow during the total follow-up period or were proved to be benign in a follow-up by a pulmonologist. Prevalence, growth, and malignancy rate of PFNs were assessed.Results:At baseline screening, 4026 nodules were detected in 1729 participants, and 19.7\% (794 of 4026) of the nodules were classified as PFNs. The mean size of the PFNs was 4.4 mm (range: 2.8-10.6 mm) and the mean volume was 43 mm(3) (range: 13-405 mm(3)). None of the PFNs were found to be malignant during follow-up. Between baseline and the first follow-up CT scan, 15.5\% (123 of 794) were found to have grown, and 8.3\% (66 of 794) had a volume doubling time of less than 400 days. One PFN was resected and proved to be a lymph node.Conclusion:PFNs are frequently found at CT scans for lung cancer. They can show growth rates in the range of malignant nodules, but none of the PFNs in the present study turned out to be malignant. Recognition of PFNs can reduce the number of follow-up examinations required for the workup of suspicious nodules.A-A?A 1/2 RSNA, 2012.}, + file = {Hoop12.pdf:pdf\\Hoop12.pdf:PDF}, + optnote = {DIAG}, + pmid = {22929331}, + month = {11}, + gsid = {18085872374519145921}, + gscites = {148}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/109230}, + ss_id = {846de900573aebb1a0f2b36dfd7d080cad255552}, + all_ss_ids = {['846de900573aebb1a0f2b36dfd7d080cad255552']}, +} + +@article{Hore13a, + author = {Horeweg, Nanda and van der Aalst, Carlijn M. and Vliegenthart, Rozemarijn and Zhao, Yingru and Xie, Xueqian and Scholten, Ernst Th and Mali, Willem and Thunnissen, Erik and Weenink, Carla and Groen, Harry J M. and Lammers, Jan-Willem J. and Nackaerts, Kristiaan and van Rosmalen, Joost and Oudkerk, Matthijs and de Koning, Harry J.}, + title = {Volumetric computed tomography screening for lung cancer: three rounds of the NELSON trial}, + journal = ERJ, + year = {2013}, + volume = {42}, + pages = {1659--1667}, + doi = {10.1183/09031936.00197712}, + abstract = {Several medical associations recommended lung cancer screening by low-dose computed tomography scanning for high-risk groups. Counselling of the candidates on the potential harms and benefits and their lung cancer risk is a prerequisite for screening. In the NELSON trial, screenings are considered positive for (part) solid lung nodules with a volume >500 mm(3) and for (part) solid or nonsolid nodules with a volume-doubling time <400 days. For this study, the performance of the NELSON strategy in three screening rounds was evaluated and risk calculations were made for a follow-up period of 5.5 years. 458 (6\%) of the 7582 participants screened had a positive screen result and 200 (2.6\%) were diagnosed with lung cancer. The positive screenings had a predictive value of 40.6\% and only 1.2\% of all scan results were false-positive. In a period of 5.5 years, the risk of screen-detected lung cancer strongly depends on the result of the first scan: 1.0\% after a negative baseline result, 5.7\% after an indeterminate baseline and 48.3\% after a positive baseline. The screening strategy yielded few positive and false-positive scans with a reasonable positive predictive value. The 5.5-year lung cancer risk calculations aid clinicians in counselling candidates for lung cancer screening with low-dose computed tomography.}, + file = {Hore13a.pdf:pdf\\Hore13a.pdf:PDF}, + optnote = {DIAG}, + number = {6}, + pmid = {23845716}, + month = {7}, +} + +@article{Hore13b, + author = {Horeweg, Nanda and van der Aalst, Carlijn M. and Thunnissen, Erik and Nackaerts, Kristiaan and Weenink, Carla and Groen, Harry J M. and Lammers, Jan-Willem J. and Aerts, Joachim G. and Scholten, Ernst T. and van Rosmalen, Joost and Mali, Willem and Oudkerk, Matthijs and de Koning, Harry J.}, + title = {Characteristics of lung cancers detected by computer tomography screening in the randomized NELSON trial}, + journal = AJRCCM, + year = {2013}, + volume = {187}, + pages = {848--854}, + doi = {10.1164/rccm.201209-1651OC}, + abstract = {The NELSON (Nederlands Leuvens Longkanker Screenings Onderzoek) trial is, with 15,822 participants, the largest European lung cancer computer tomography screening trial. A volumetry-based screening strategy, stringent criteria for a positive screening, and an increasing length of screening interval are particular features of the NELSON trial.To determine the effect of stringent referral criteria and increasing screening interval on the characteristics of screen-detected lung cancers, and to compare this across screening rounds, between sexes, and with other screening trials.All NELSON participants with screen-detected lung cancer in the first three rounds were included. Lung cancer stage at diagnosis, histological subtype, and tumor localization were compared between the screening rounds, the sexes, and with other screening trials.In the first three screening rounds, 200 participants were diagnosed with 209 lung cancers. Of these lung cancers, 70.8\% were diagnosed at stage I and 8.1\% at stage IIIB-IV, and 51.2\% were adenocarcinomas. There was no significant difference in cancer stage, histology, or tumor localization across the screening rounds. Women were diagnosed at a significantly more favorable cancer stage than men. Compared with other trials, the screen-detected lung cancers of the NELSON trial were relatively more often diagnosed at stage I and less often at stage IIIB-IV.Despite stringent criteria for a positive screening, an increasing length of screening interval, and few female participants, the screening strategy of the NELSON trial resulted in a favorable cancer stage distribution at diagnosis, which is essential for the effectiveness of our screening strategy. Clinical trial registered with www.trialregister.nl (ISRCTN63545820).}, + file = {Hore13b.pdf:pdf\\Hore13b.pdf:PDF}, + optnote = {DIAG}, + number = {8}, + pmid = {23348977}, + month = {4}, +} + +@article{Hore14, + author = {Horeweg, Nanda and van Rosmalen, Joost and Heuvelmans, Marjolein A and van der Aalst, Carlijn M and Vliegenthart, Rozemarijn and Scholten, Ernst Th and ten Haaf, Kevin and Nackaerts, Kristiaan and Lammers, Jan-Willem J and Weenink, Carla and others}, + title = {Lung cancer probability in patients with CT-detected pulmonary nodules: a prespecified analysis of data from the NELSON trial of low-dose CT screening}, + journal = LANCETO, + year = {2014}, + volume = {15}, + number = {12}, + pages = {1332--1341}, + doi = {10.1016/S1470-2045(14)70389-4}, + file = {Hore14.pdf:pdf\\Hore14.pdf:PDF}, + abstract = {The main challenge in CT screening for lung cancer is the high prevalence of pulmonary nodules and the relatively low incidence of lung cancer. Management protocols use thresholds for nodule size and growth rate to determine which nodules require additional diagnostic procedures, but these should be based on individuals' probabilities of developing lung cancer. In this prespecified analysis, using data from the NELSON CT screening trial, we aimed to quantify how nodule diameter, volume, and volume doubling time affect the probability of developing lung cancer within 2 years of a CT scan, and to propose and evaluate thresholds for management protocols.Eligible participants in the NELSON trial were those aged 50-75 years, who have smoked 15 cigarettes or more per day for more than 25 years, or ten cigarettes or more for more than 30 years and were still smoking, or had stopped smoking less than 10 years ago. Participants were randomly assigned to low-dose CT screening at increasing intervals, or no screening. We included all participants assigned to the screening group who had attended at least one round of screening, and whose results were available from the national cancer registry database. We calculated lung cancer probabilities, stratified by nodule diameter, volume, and volume doubling time and did logistic regression analysis using diameter, volume, volume doubling time, and multinodularity as potential predictor variables. We assessed management strategies based on nodule threshold characteristics for specificity and sensitivity, and compared them to the American College of Chest Physicians (ACCP) guidelines. The NELSON trial is registered at www.trialregister.nl, number ISRCTN63545820.Volume, volume doubling time, and volumetry-based diameter of 9681 non-calcified nodules detected by CT screening in 7155 participants in the screening group of NELSON were used to quantify lung cancer probability. Lung cancer probability was low in participants with a nodule volume of 100 mm(3) or smaller (0*6\% [95\% CI 0*4-0*8]) or maximum transverse diameter smaller than 5 mm (0*4\% [0*2-0*7]), and not significantly different from participants without nodules (0*4\% [0*3-0*6], p=0*17 and p=1*00, respectively). Lung cancer probability was intermediate (requiring follow-up CT) if nodules had a volume of 100-300 mm(3) (2*4\% [95\% CI 1*7-3*5]) or a diameter 5-10 mm (1*3\% [1*0-1*8]). Volume doubling time further stratified the probabilities: 0*8\% (95\% CI 0*4-1*7) for volume doubling times 600 days or more, 4*0\% (1*8-8*3) for volume doubling times 400-600 days, and 9*9\% (6*9-14*1) for volume doubling times of 400 days or fewer. Lung cancer probability was high for participants with nodule volumes 300 mm(3) or bigger (16*9\% [95\% CI 14*1-20*0]) or diameters 10 mm or bigger (15*2\% [12*7-18*1]). The simulated ACCP management protocol yielded a sensitivity and specificity of 90*9\% (95\% CI 81*2-96*1), and 87*2\% (86*4-87*9), respectively. A diameter-based protocol with volumetry-based nodule diameter yielded a higher sensitivity (92*4\% [95\% CI 83*1-97*1]), and a higher specificity (90*0\% [89*3-90*7). A volume-based protocol (with thresholds based on lung cancer probability) yielded the same sensitivity as the ACCP protocol (90*9\% [95\% CI 81*2-96*1]), and a higher specificity (94*9\% [94*4-95*4]).Small nodules (those with a volume <100 mm(3) or diameter <5 mm) are not predictive for lung cancer. Immediate diagnostic evaluation is necessary for large nodules (>=300 mm(3) or >=10 mm). Volume doubling time assessment is advocated only for intermediate-sized nodules (with a volume ranging between 100-300 mm(3) or diameter of 5-10 mm). Nodule management protocols based on these thresholds performed better than the simulated ACCP nodule protocol.Zorgonderzoek Nederland Medische Wetenschappen and Koningin Wilhelmina Fonds.}, + optnote = {DIAG}, + publisher = {Elsevier}, + pmid = {25282285}, + month = {11}, +} + +@article{Hore14a, + author = {Horeweg, Nanda and Scholten, Ernst Th and de Jong, Pim A. and van der Aalst, Carlijn M. and Weenink, Carla and Lammers, Jan-Willem J. and Nackaerts, Kristiaan and Vliegenthart, Rozemarijn and Ten Haaf, Kevin and Yousaf-Khan, Uraujh A. and Heuvelmans, Marjolein A. and Thunnissen, Erik and Oudkerk, Matthijs and Mali, Willem and de Koning, Harry J.}, + title = {Detection of lung cancer through low-dose {CT} screening (NELSON): a prespecified analysis of screening test performance and interval cancers}, + journal = LANCETO, + year = {2014}, + volume = {15}, + pages = {1342--1350}, + doi = {10.1016/S1470-2045(14)70387-0}, + abstract = {Low-dose CT screening is recommended for individuals at high risk of developing lung cancer. However, CT screening does not detect all lung cancers: some might be missed at screening, and others can develop in the interval between screens. The NELSON trial is a randomised trial to assess the effect of screening with increasing screening intervals on lung cancer mortality. In this prespecified analysis, we aimed to assess screening test performance, and the epidemiological, radiological, and clinical characteristics of interval cancers in NELSON trial participants assigned to the screening group.Eligible participants in the NELSON trial were those aged 50-75 years, who had smoked 15 or more cigarettes per day for more than 25 years or ten or more cigarettes for more than 30 years, and were still smoking or had quit less than 10 years ago. We included all participants assigned to the screening group who had attended at least one round of screening. Screening test results were based on volumetry using a two-step approach. Initially, screening test results were classified as negative, indeterminate, or positive based on nodule presence and volume. Subsequently, participants with an initial indeterminate result underwent follow-up screening to classify their final screening test result as negative or positive, based on nodule volume doubling time. We obtained information about all lung cancer diagnoses made during the first three rounds of screening, plus an additional 2 years of follow-up from the national cancer registry. We determined epidemiological, radiological, participant, and tumour characteristics by reassessing medical files, screening CTs, and clinical CTs. The NELSON trial is registered at www.trialregister.nl, number ISRCTN63545820.15AC/a,!E+822 participants were enrolled in the NELSON trial, of whom 7915 were assigned to low-dose CT screening with increasing interval between screens, and 7907 to no screening. We included 7155 participants in our study, with median follow-up of 8A,A*16 years (IQR 7A,A*56-8A,A*56). 187 (3\%) of 7155 screened participants were diagnosed with 196 screen-detected lung cancers, and another 34 (<1\%; 19 [56\%] in the first year after screening, and 15 [44\%] in the second year after screening) were diagnosed with 35 interval cancers. For the three screening rounds combined, with a 2-year follow-up, sensitivity was 84A,A*6\% (95\% CI 79A,A*6-89A,A*2), specificity was 98A,A*6\% (95\% CI 98A,A*5-98A,A*8), positive predictive value was 40A,A*4\% (95\% CI 35A,A*9-44A,A*7), and negative predictive value was 99A,A*8\% (95\% CI 99A,A*8-99A,A*9). Retrospective assessment of the last screening CT and clinical CT in 34 patients with interval cancer showed that interval cancers were not visible in 12 (35\%) cases. In the remaining cases, cancers were visible when retrospectively assessed, but were not diagnosed because of radiological detection and interpretation errors (17 [50\%]), misclassification by the protocol (two [6\%]), participant non-compliance (two [6\%]), and non-adherence to protocol (one [3\%]). Compared with screen-detected cancers, interval cancers were diagnosed at more advanced stages (29 [83\%] of 35 interval cancers vs 44 [22\%] of 196 screen-detected cancers diagnosed in stage III or IV; p<0A,A*0001), were more often small-cell carcinomas (seven [20\%] vs eight [4\%]; p=0A,A*003) and less often adenocarcinomas (nine [26\%] vs 102 [52\%]; p=0A,A*005).Lung cancer screening in the NELSON trial yielded high specificity and sensitivity, with only a small number of interval cancers. The results of this study could be used to improve screening algorithms, and reduce the number of missed cancers.Zorgonderzoek Nederland Medische Wetenschappen and Koningin Wilhelmina Fonds.}, + file = {Hore14a.pdf:pdf\\Hore14a.pdf:PDF}, + optnote = {DIAG}, + number = {12}, + pmid = {25282284}, + month = {11}, +} + +@inproceedings{Horn04, + author = {R. Hornero and C. I. S\'{a}nchez and M. I. L\'{o}pez}, + title = {Automated retinal image analysis in a teleophthalmology diabetic retinopathy screening program}, + booktitle = {Telemedicine Journal and e-Health}, + year = {2004}, + optnote = {DIAG, RADIOLOGY}, + gsid = {11503276467422793917}, + gscites = {1}, +} + +@inproceedings{Horn04a, + author = {R. Hornero and D. Ab\'{a}solo and J. Poza and C. I. S\'{a}nchez and P. Espino and R. de la Rosa}, + title = {Use of wavelets packets to compare electroencephalogram signal ({EEG}) in patiens with {A}lzheimer's disease and control subjects}, + booktitle = {International Workshop of Systems, Signals and Image Processing}, + year = {2004}, + pages = {35--38}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Horn06, + author = {R. Hornero and D. Ab\'{a}solo and N. Jimeno and C. I. S\'{a}nchez and J. Poza and M. Aboy}, + title = {Variability, regularity, and complexity of time series generated by schizophrenic patients and control subjects}, + journal = TBME, + year = {2006}, + volume = {53}, + pages = {210--218}, + doi = {10.1109/TBME.2005.862547}, + abstract = {{W}e analyzed time series generated by 20 schizophrenic patients and 20 sex- and age-matched control subjects using three nonlinear methods of time series analysis as test statistics: central tendency measure ({CTM}) from the scatter plots of first differences of data, approximate entropy ({A}p{E}n), and {L}empel-{Z}iv ({LZ}) complexity. {W}e divided our data into a training set (10 patients and 10 control subjects) and a test set (10 patients and 10 control subjects). {T}he training set was used for algorithm development and optimum threshold selection. {E}ach method was assessed prospectively using the test dataset. {W}e obtained 80\% sensitivity and 90\% specificity with {LZ} complexity, 90\% sensitivity, and 60\% specificity with {A}p{E}n, and 70\% sensitivity and 70\% specificity with {CTM}. {O}ur results indicate that there exist differences in the ability to generate random time series between schizophrenic subjects and controls, as estimated by the {CTM}, {A}p{E}n, and {LZ}. {T}his finding agrees with most previous results showing that schizophrenic patients are characterized by less complex neurobehavioral and neuropsychologic measurements.}, + file = {Horn06.pdf:pdf\\Horn06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {16485749}, + month = {2}, + gsid = {1572888595060064062}, + gscites = {83}, +} + +@inproceedings{Hoss19, + author = {Matin Hosseinzadeh and Patrick Brand and Henkjan Huisman}, + title = {Effect of Adding Probabilistic Zonal Prior in Deep Learning-based Prostate Cancer Detection}, + booktitle = MIDL, + year = {2019}, + url = {https://openreview.net/forum?id=SkxAwFtEqV}, + abstract = {We propose and evaluate a novel method for automatically detecting clinically significant prostate cancer (csPCa) in bi-parametric magnetic resonance imaging (bpMRI). Prostate zones play an important role in the assessment of prostate cancer on MRI. We hypothesize that the inclusion of zonal information can improve the performance of a deep learning based csPCa lesion detection model. However, segmentation of prostate zones is challenging and therefore deterministic models are inaccurate. Hence, we investigated probabilistic zonal segmentation. Our baseline detection model is a 2DUNet trained to produce a csPCa heatmap followed by a 3D detector. We experimented with the integration of zonal prior information by fusing the output of an anisotropic 3DUNet trained to produce either a deterministic or probabilistic map for each prostate zone. We also investigate the effect of early or late fusion on csPCa detection. All methods were trained and tested on 848 bpMRI. The results show that fusing zonal prior knowledge improves the baseline detection model with a preference for probabilistic over deterministic zonal segmentation.}, + file = {:pdf/Hoss19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {11874902790936487952}, + gscites = {12}, + ss_id = {d298ada2cac3052301a355fb39cf5fe6ae0fe41f}, + all_ss_ids = {['d298ada2cac3052301a355fb39cf5fe6ae0fe41f']}, +} + +@article{Hoss21, + author = {Hosseinzadeh, Matin and Saha, Anindo and Brand, Patrick and Slootweg, Ilse and de Rooij, Maarten and Huisman, Henkjan}, + title = {Deep learning-assisted prostate cancer detection on bi-parametric MRI: minimum training data size requirements and effect of prior knowledge}, + journal = ER, + year = {2021}, + doi = {10.1007/s00330-021-08320-y}, + url = {https://doi.org/10.1007/s00330-021-08320-y}, + abstract = {Objectives + To assess Prostate Imaging Reporting and Data System (PI-RADS)-trained deep learning (DL) algorithm performance and to investigate the effect of data size and prior knowledge on the detection of clinically significant prostate cancer (csPCa) in biopsy-naive men with a suspicion of PCa. + + Methods + Multi-institution data included 2734 consecutive biopsy-naive men with elevated PSA levels (>= 3 ng/mL) that underwent multi-parametric MRI (mpMRI). mpMRI exams were prospectively reported using PI-RADS v2 by expert radiologists. A DL framework was designed and trained on center 1 data (n = 1952) to predict PI-RADS >= 4 (n = 1092) lesions from bi-parametric MRI (bpMRI). Experiments included varying the number of cases and the use of automatic zonal segmentation as a DL prior. Independent center 2 cases (n = 296) that included pathology outcome (systematic and MRI targeted biopsy) were used to compute performance for radiologists and DL. The performance of detecting PI-RADS 4-5 and Gleason > 6 lesions was assessed on 782 unseen cases (486 center 1, 296 center 2) using free-response ROC (FROC) and ROC analysis. + + Results + The DL sensitivity for detecting PI-RADS >= 4 lesions was 87% (193/223, 95% CI: 82-91) at an average of 1 false positive (FP) per patient, and an AUC of 0.88 (95% CI: 0.84-0.91). The DL sensitivity for the detection of Gleason > 6 lesions was 85% (79/93, 95% CI: 77-83) @ 1 FP compared to 91% (85/93, 95% CI: 84-96) @ 0.3 FP for a consensus panel of expert radiologists. Data size and prior zonal knowledge significantly affected performance (4%, p<0.05). + + Conclusion + PI-RADS-trained DL can accurately detect and localize Gleason > 6 lesions. DL could reach expert performance using substantially more than 2000 training cases, and DL zonal segmentation.}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/249485}, + ss_id = {21face92913ea6919840f59cd3cd5e84e70ebc7d}, + all_ss_ids = {['21face92913ea6919840f59cd3cd5e84e70ebc7d']}, + gscites = {42}, +} + +@mastersthesis{Hout20, + author = {Thijs van den Hout}, + title = {Automatic muscle and fat segmentation in 3D abdominal CT images for body composition assessment}, + abstract = {Body composition is an informative biomarker in the treatment of cancer. In particular, low muscle mass has been associated with higher chemotherapy toxicity, shorter time to tumor progression, poorer surgical outcomes, impaired functional status, and shorter survival. However, because CT-based body composition assessment requires outlining the different tissues in the image, which is timeconsuming, its practical value is currently limited. To form an estimate of body composition, different tissues are often segmented manually in a single 2D slice from the abdomen. + For use in both routine care and in research studies, automatic segmentation of the different tissue types in the abdomen is desirable. + This study focuses on the development and testing of an automatic approach to segment muscle and fat tissue in the entire abdomen. The four classes of interest are skeletal muscle (SM), inter-muscular adipose tissue (IMAT), visceral adipose tissue (VAT), and subcutaneous adipose tissue (SAT). A deep neural network is trained on two-dimensional CT slices at the level of the third lumbar vertebra. Three experiments were carried out with the goal of improving the network with information from other, unannotated data sources. Active learning methods were applied to sample additional data to annotate and include in the training of the model. The proposed algorithm combines two models to segment muscle and fat in the entire abdomen and achieves state-of-the-art results. Dice scores of 0.91, 0.84, 0.97, and 0.97 were attained for SM, IMAT, VAT, and SAT, respectively, averaged over five locations throughout the abdomen.}, + file = {Hout20.pdf:pdf/Hout20.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2020}, + journal = {Master thesis}, +} + +@article{Hu10, + author = {Y. Hu and R. van den Boom and T. Carter and Z. Taylor and D. Hawkes and H. U. Ahmed and M. Emberton and C. Allen and D. Barratt}, + title = {A comparison of the accuracy of statistical models of prostate motion trained using data from biomechanical simulations}, + journal = PBMB, + year = {2010}, + volume = {103}, + pages = {262--272}, + doi = {10.1016/j.pbiomolbio.2010.09.009}, + abstract = {Statistical shape models (SSM) are widely used in medical image analysis to represent variability in organ shape. However, representing subject-specific soft-tissue motion using this technique is problematic for applications where imaging organ changes in an individual is not possible or impractical. One solution is to synthesise training data by using biomechanical modelling. However, for many clinical applications, generating a biomechanical model of the organ(s) of interest is a non-trivial task that requires a significant amount of user-interaction to segment an image and create a finite element mesh. In this study, we investigate the impact of reducing the effort required to generate SSMs and the accuracy with which such models can predict tissue displacements within the prostate gland due to transrectal ultrasound probe pressure. In this approach, the finite element mesh is based on a simplified geometric representation of the organs. For example, the pelvic bone is represented by planar surfaces, or the number of distinct tissue compartments is reduced. Such representations are much easier to generate from images than a geometrically accurate mesh. The difference in the median root-mean-square displacement error between different SSMs of prostate was <0.2?mm. We conclude that reducing the geometric complexity of the training model in this way made little difference to the absolute accuracy of SSMs to recover tissue displacements. The implication is that SSMs of organ motion based on simulated training data may be generated using simplified geometric representations, which are much more compatible with the time constraints of clinical workflows.}, + file = {Hu10.pdf:pdf/Hu10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2-3}, + pmid = {20869389}, + month = {12}, +} + +@article{Hu19, + author = {Hu, Shi and Worrall, Daniel and Knegt, Stefan and Veeling, Bas and Huisman, Henkjan and Welling, Max}, + title = {Automated deep-learning system in the assessment of MRI-visible prostate cancer: comparison of advanced zoomed diffusion-weighted imaging and conventional technique.}, + journal = {Cancer imaging : the official publication of the International Cancer Imaging Society}, + year = {2023}, + abstract = {Deep-learning-based computer-aided diagnosis (DL-CAD) systems using MRI for prostate cancer (PCa) detection have demonstrated good performance. Nevertheless, DL-CAD systems are vulnerable to high heterogeneities in DWI, which can interfere with DL-CAD assessments and impair performance. This study aims to compare PCa detection of DL-CAD between zoomed-field-of-view echo-planar DWI (z-DWI) and full-field-of-view DWI (f-DWI) and find the risk factors affecting DL-CAD diagnostic efficiency. This retrospective study enrolled 354 consecutive participants who underwent MRI including T2WI, f-DWI, and z-DWI because of clinically suspected PCa. A DL-CAD was used to compare the performance of f-DWI and z-DWI both on a patient level and lesion level. We used the area under the curve (AUC) of receiver operating characteristics analysis and alternative free-response receiver operating characteristics analysis to compare the performances of DL-CAD using f- DWI and z-DWI. The risk factors affecting the DL-CAD were analyzed using logistic regression analyses. P values less than 0.05 were considered statistically significant. DL-CAD with z-DWI had a significantly better overall accuracy than that with f-DWI both on patient level and lesion level (AUC : 0.89 vs. 0.86; AUC : 0.86 vs. 0.76; P < .001). The contrast-to-noise ratio (CNR) of lesions in DWI was an independent risk factor of false positives (odds ratio [OR] = 1.12; P < .001). Rectal susceptibility artifacts, lesion diameter, and apparent diffusion coefficients (ADC) were independent risk factors of both false positives (OR = 5.46; OR = 1.12; OR = 0.998; all P < .001) and false negatives (OR = 3.31; OR = 0.82; OR = 1.007; all P <= .03) of DL-CAD. Z-DWI has potential to improve the detection performance of a prostate MRI based DL-CAD. ChiCTR, NO. ChiCTR2100041834 . Registered 7 January 2021.}, + optnote = {DIAG, RADIOLOGY}, + month = {7}, + gsid = {4596090620314967344}, + gscites = {12}, + doi = {10.1186/s40644-023-00527-0}, + issue = {1}, + pages = {6}, + volume = {23}, + pmid = {36647150}, + file = {Hu23.pdf:pdf\\Hu23.pdf:PDF}, + ss_id = {b5c3309a499f0e8a3207f6b25b34ead5b0cbae9e}, + all_ss_ids = {['b5c3309a499f0e8a3207f6b25b34ead5b0cbae9e']}, +} + +@article{Hude20, + author = {Hudecek, Jan and Voorwerk, Leonie and van Seijen, Maartje and Nederlof, Iris and de Maaker, Michiel and van den Berg, Jose and van de Vijver, Koen K. and Sikorska, Karolina and Adams, Sylvia and Demaria, Sandra and Viale, Giuseppe and Nielsen, Torsten O. and Badve, Sunil S. and Michiels, Stefan and Symmans, William Fraser and Sotiriou, Christos and Rimm, David L. and Hewitt, Stephen M. and Denkert, Carsten and Loibl, Sibylle and Loi, Sherene and Bartlett, John M. S. and Pruneri, Giancarlo and Dillon, Deborah A. and Cheang, Maggie C. U. and Tutt, Andrew and Hall, Jacqueline A. and Kos, Zuzana and Salgado, Roberto and Kok, Marleen and Horlings, Hugo M. and Group, International Immuno-Oncology Biomarker Working}, + title = {Application of a risk-management framework for integration of stromal tumor-infiltrating lymphocytes in clinical trials.}, + doi = {10.1038/s41523-020-0155-1}, + pages = {15}, + volume = {6}, + abstract = {Stromal tumor-infiltrating lymphocytes (sTILs) are a potential predictive biomarker for immunotherapy response in metastatic triple-negative breast cancer (TNBC). To incorporate sTILs into clinical trials and diagnostics, reliable assessment is essential. In this review, we propose a new concept, namely the implementation of a risk-management framework that enables the use of sTILs as a stratification factor in clinical trials. We present the design of a biomarker risk-mitigation workflow that can be applied to any biomarker incorporation in clinical trials. We demonstrate the implementation of this concept using sTILs as an integral biomarker in a single-center phase II immunotherapy trial for metastatic TNBC (TONIC trial, NCT02499367), using this workflow to mitigate risks of suboptimal inclusion of sTILs in this specific trial. In this review, we demonstrate that a web-based scoring platform can mitigate potential risk factors when including sTILs in clinical trials, and we argue that this framework can be applied for any future biomarker-driven clinical trial setting.}, + file = {Hude20.pdf:pdf\\Hude20.pdf:PDF}, + journal = {NPJ breast cancer}, + optnote = {DIAG, RADIOLOGY}, + pmid = {32436923}, + year = {2020}, + all_ss_ids = {['c52daf1cb971120c4083116cfc213acbaac6faaf', 'cde27a74addf80ca2b3385a32966d51d35e0b2ff']}, + gscites = {18}, +} + +@article{Huis01, + author = {H. J. Huisman and M. R. Engelbrecht and J. O. Barentsz}, + title = {Accurate estimation of pharmacokinetic contrast-enhanced dynamic {MRI} parameters of the prostate}, + journal = JMRI, + year = {2001}, + volume = {13}, + pages = {607--614}, + abstract = {{Q}uantitative analysis of contrast-enhanced dynamic {MR} images has potential for diagnosing prostate cancer. {C}ontemporary fast acquisition techniques can give sufficiently high temporal resolution to sample the fast dynamics observed in the prostate. {D}ata reduction for parametric visualization requires automatic curve fitting to a pharmacokinetic model, which to date has been performed using least-squares error minimization methods. {W}e observed that these methods often produce unexpectedly noisy estimates, especially for the typically fast, intermediate parameters time-to-peak and start-of-enhancement, resulting in inaccurate pharmacokinetic parameter estimates. {W}e developed a new curve fit method that focuses on the most probable slope. {A} set of 10 patients annotated using histopathology was used to compare the conventional and new methods. {T}he results show that our new method is significantly more accurate, especially in the relatively less-enhancing peripheral zone. {W}e conclude that estimation accuracy depends on the curve fit method, which is especially important when evaluating the peripheral zone of the prostate.}, + file = {Huis01.pdf:pdf\\Huis01.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + pmid = {11276106}, + gsid = {17600801553193803424}, + gscites = {131}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/122355}, + all_ss_ids = {['4fff96d3fa57f3a194b03f5524b9f45fc67dcb00']}, +} + +@article{Huis05, + author = {H. J. Huisman and J. J. F\"utterer and E. N. J. T. van Lin and A. Welmers and T. W. J. Scheenen and J. A. van Dalen and A. G. Visser and J. A. Witjes and J. O. Barentsz}, + title = {Prostate cancer: precision of integrating functional {MR} imaging with radiation therapy treatment by using fiducial gold markers}, + journal = Radiology, + year = {2005}, + volume = {236}, + pages = {311--317}, + doi = {10.1148/radiol.2361040560}, + abstract = {{T}he use of intensity-modulated radiation therapy for treatment of dominant intraprostatic lesions may require integration of functional magnetic resonance ({MR}) imaging with treatment-planning computed tomography ({CT}). {T}he purpose of this study was to compare prospectively the landmark and iterative closest point methods for registration of {CT} and {MR} images of the prostate gland after placement of fiducial markers. {T}he study was approved by the institutional ethics review board, and informed consent was obtained. {CT} and {MR} images were registered by using fiducial gold markers that were inserted into the prostate. {T}wo image registration methods--a commonly available landmark method and dedicated iterative closest point method--were compared. {P}recision was assessed for a data set of 21 patients by using five operators. {P}recision of the iterative closest point method (1.1 mm) was significantly better ({P} < .01) than that of the landmark method (2.0 mm). {F}urthermore, a method is described by which multimodal {MR} imaging data are reduced into a single interpreted volume that, after registration, can be incorporated into treatment planning.}, + file = {Huis05.pdf:pdf\\Huis05.pdf:PDF}, + optnote = {BioMR, DIAG, RADIOLOGY}, + number = {1}, + pmid = {15983070}, + month = {7}, + gsid = {1438085584005109691}, + gscites = {73}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/47372}, + ss_id = {f4695e725707a1c6f725d6243d0f02bce4e5090a}, + all_ss_ids = {['f4695e725707a1c6f725d6243d0f02bce4e5090a']}, +} + +@inproceedings{Huis07, + author = {H. Huisman and N. Karssemeijer}, + title = {Chestwall segmentation in 3{D} breast ultrasound using a deformable volume model}, + booktitle = IPMI, + year = {2007}, + series = LNCS, + pages = {245--256}, + doi = {10.1007/978-3-540-73273-0_21}, + abstract = {{A} deformable volume segmentation method is proposed to detect the breast parenchyma in frontal scanned 3{D} whole breast ultrasound. {D}eformable volumes are a viable alternative to the deformable surface paradigm in noisy images with poorly defined object boundaries. {A} deformable ultrasound volume model was developed containing breast, rib, intercostal space and thoracic shadowing. {U}sing prior knowledge about grey value statistics and shape the parameterized model deforms by optimization to match an ultrasound scan. {A}dditionally a rib shadow enhancement filter was developed based on a {H}essian sheet detector. {A}n {ROC} chestwall detection study on 88 multi-center scans (20 non-visible chestwalls) showed a significant accuracy which improved strongly using the sheet detector. {T}he results show the potential of our methodology to extract breast parenchyma which could help reduce false positives in subsequent computer aided lesion detection.}, + file = {Huis07.pdf:pdf\\Huis07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {17633704}, + gsid = {9371668300991606553}, + gscites = {6}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/53276}, + ss_id = {cce569e05666a3817a93c128c705733c5c42df5d}, + all_ss_ids = {['cce569e05666a3817a93c128c705733c5c42df5d']}, +} + +@inproceedings{Huis10, + author = {H. Huisman and P. Vos and G. Litjens and T. Hambrock and J. Barentsz}, + title = {Computer aided detection of prostate cancer using t2w, {DWI} and {DCE-MRI}: methods and clinical applications}, + booktitle = {{MICCAI} {W}orkshop: {P}rostate {C}ancer {I}maging: {C}omputer {A}ided {D}iagnosis, {P}rognosis, and {I}ntervention}, + year = {2010}, + abstract = {One in 10 men will be diagnosed with prostate cancer during their life. PSA screening in combination with MR is likely to save lifes at low biopsy and overtreatment rates. Computer Aided Diagnosis for prostate MR will become mandatory in a high volume screening application. This paper presents an overview including our recent work in this area. It includes screening MR setup, quantitative imaging features, prostate segmentation, and pattern recognition.}, + file = {:pdf/Huis10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {85137dca01c20c973abc9ed419b67ab335822bf1}, + all_ss_ids = {['85137dca01c20c973abc9ed419b67ab335822bf1']}, + gscites = {2}, +} + +@conference{Huis10a, + author = {H. Huisman and J. Veltman and M. Zijp and R. Mann and R. Mus and N. Karssemeijer}, + title = {Dual-Time Resolution Characterization of Masses on Breast {DCEMR}}, + booktitle = RSNA, + year = {2010}, + abstract = {PURPOSE To assess the diagnostic accuracy of high resolution and transient fast DCEMR individually and both combined in discriminating benign and malignant masses using automatic computer assisted analysis. METHOD AND MATERIALS A set of 97 detected masses (fibroademas and invasive ductal carcinomas) was selected from a consecutive cohort of breast MR in 2008 and 2009. The MR scanning protocol included high temporal resolution combined with high spatial resolution imaging. The high temporal resolution images were acquired every 4.1s using a 3D TurboFLASH sequence during initial enhancement (Fast). The high spatial resolution images were acquired at a temporal resolution of 86s using a 3D FLASH sequence (HighRes). All the masses were annotated with a 3D spheroid volume covering the lesion extent. Subsequent analysis was fully automatic. The contrast enhanced region in of the annotation was segmented using Otsu thresholding followed by binary morphological operators. Each lesion was characterized by a set of features (2 HighRes + 3 Fast) comprising quartiles of relative enhancement, wash-out, and pharmacokinetic image voxel features within the enhancing lesion area. Image feature maps are automatically computed from the Fast and HighRes sequences using curve fitting and pharmacokinetic modeling. An ensemble Support Vector Machine classifier was trained to discriminate between benign and malignant lesions on each of the three feature sets: HighRes, Fast and HighRes+Fast. Using cross-validation and bootstrapping the area under the curve (AUC) for each set was estimated and compared using paired t-tests. RESULTS The diagnostic accuracy of the combined HighRes and Fast features was significantly (p<0.01) better than the HighRes features alone. The individual AUCs were: HighRes 0.78 (074-0.81); Fast 0.69 (0.66-0.72); HighRes+Fast 0.84 (0.82-0.87). CONCLUSION The increased diagnostic performance found when combining HighRes and Fast DCEMR demonstrates the additional value of our method in further improving the diagnostic performance of computerized analysis of breast MRI. This study shows that transient dynamic features from fast sequences provide additional information over conventional curve shape features in slower breast MR sequences. CLINICAL RELEVANCE/APPLICATION Dual time resolution breast MR can improve diagnostic accuracy over conventional single resolution analysis which may help increase specificity.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Huis19, + author = {Huisman, Henkjan}, + title = {Solid Science of AI Supporting Bladder Cancer CT Reading}, + journal = AR, + year = {2019}, + volume = {26}, + number = {9}, + pages = {1146--1147}, + doi = {10.1016/j.acra.2019.06.014}, + file = {:pdf/Huis19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31324578}, + publisher = {Elsevier}, + month = {9}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/209081}, + ss_id = {37e39166873ad1f981a845eda627f3d161f105e1}, + all_ss_ids = {['37e39166873ad1f981a845eda627f3d161f105e1']}, + gscites = {0}, +} + +@article{Huis21, + author = {Huisman, Henkjan J. and Engelbrecht, Marc R. and Barentsz, Jelle O.}, + title = {Accurate estimation of pharmacokinetic contrast-enhanced dynamic MRI parameters of the prostate}, + journal = JMRI, + volume = {13}, + number = {4}, + pages = {607-614}, + doi = {https://doi.org/10.1002/jmri.1085}, + url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/jmri.1085}, + abstract = {Abstract Quantitative analysis of contrast-enhanced dynamic MR images has potential for diagnosing prostate cancer. Contemporary fast acquisition techniques can give sufficiently high temporal resolution to sample the fast dynamics observed in the prostate. Data reduction for parametric visualization requires automatic curve fitting to a pharmacokinetic model, which to date has been performed using least-squares error minimization methods. We observed that these methods often produce unexpectedly noisy estimates, especially for the typically fast, intermediate parameters time-to-peak and start-of-enhancement, resulting in inaccurate pharmacokinetic parameter estimates. We developed a new curve fit method that focuses on the most probable slope. A set of 10 patients annotated using histopathology was used to compare the conventional and new methods. The results show that our new method is significantly more accurate, especially in the relatively less-enhancing periferal zone. We conclude that estimation accuracy depends on the curve fit method, which is especially important when evaluating the periferal zone of the prostate. J. Magn. Reson. Imaging 2001;13:607-614. (c) 2001 Wiley-Liss, Inc.}, + year = {2001}, + file = {:pdf/Huis21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {4fff96d3fa57f3a194b03f5524b9f45fc67dcb00}, + all_ss_ids = {['4fff96d3fa57f3a194b03f5524b9f45fc67dcb00']}, + gscites = {116}, +} + +@article{Huis96, + author = {H. J. Huisman and J. M. Thijssen}, + title = {Precision and accuracy of acoustospectrographic parameters}, + journal = UMB, + year = {1996}, + volume = {22}, + pages = {855--871}, + doi = {10.1016/0301-5629(96)00105-6}, + abstract = {Theoretical estimates of the standard deviation (STD) of four acoustospectrographic parameters (the intercept and slope of attenuation and backscatter coefficient) are derived. This derivation expands and corrects existing derivations, and is confirmed using simulations based on the adopted theoretical model. A robust parameter estimation method is applied to various phantom measurements, and to in vivo liver scans of healthy human subjects. The measured STD is higher than the theoretically predicted value, and we investigated four possible factors which explain this discrepancy. First, it is shown that the STD and bias after spectrogram calculation are rather insensitive to changes in windowing function, type, length and overlap. Second, we observed that a diffraction correction spectrogram calibrated on a medium different from the one being measured insufficiently corrects the depth-dependency of the parameters, which affects both precision as well as accuracy. We therefore propose a method that constructs an organ-specific diffraction correction spectrogram from the averaged spectrogram of a set of normal organs. We show that the organ-specific correction does not affect STD even in case of previously unseen acquisitions. Third, we introduce local inhomogeneity, which predicts excess STD due to local variations of the physical parameters within an organ (i.e., intrasubject), and global inhomogeneity, which predicts variations between organs (i.e., intersubject). We conclude that our method of estimating STD predicts normal, in vivo data very well, and propose that the deviation from these estimates is a potential tissue characterization parameter.}, + file = {Huis96.pdf:pdf\\Huis96.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + number = {7}, + pmid = {8923705}, + month = {1}, + gsid = {6709853578080453326}, + gscites = {58}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/122252}, + ss_id = {8224f913e000d00d30ddc6e5fcf7486adc9e31c3}, + all_ss_ids = {['8224f913e000d00d30ddc6e5fcf7486adc9e31c3']}, +} + +@article{Huis98, + author = {H. J. Huisman and J. M. Thijssen and D. J. Wagener and G. J. Rosenbusch}, + title = {Quantitative ultrasonic analysis of liver metastases}, + journal = UMB, + year = {1998}, + volume = {24}, + pages = {67--77}, + doi = {10.1016/S0301-5629(97)00211-1}, + abstract = {{T}he performance of five features of ultrasonic tissue characterization ({UTC}) of metastases in vivo in liver was investigated. {W}e acquired serial radiofrequency data sets of 12 patients with metastases in the liver from adenocarcinoma of the colon. {P}arenchyma and metastases {UTC} features were estimated in semiautomatically segmented regions. {O}ver 200 metastases were measured in patients and 43 dummy metastases in healthy volunteers. {T}wo attenuation features could be estimated in only 15\% of the metastases, and these were not different from those in parenchyma. {T}he texture features signal-to-noise ratio ({SNR}) could not discriminate real from dummy metastases. {A}verage backscatter intensity, b0, is an established discriminative echographic image feature. {H}owever, the metastases that were hypoechoic relative to surrounding parenchyma appeared to be isoechoic relative to normal liver parenchyma. {T}hey were visible because of an increased b0 in the surrounding liver parenchyma. {F}inally, we found an increased backscatter coefficient slope vs. frequency in hypoechoic metastases that may predict a deterioration of lesion contrast at higher transducer frequencies. {W}e conclude that the backscatter coefficient slope can improve detection of metastases, and that b0 measured relative to normal liver parenchyma should be used to correctly correlate metastasis echography with histology.}, + file = {Huis98.pdf:pdf\\Huis98.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + number = {1}, + pmid = {9483773}, + month = {1}, + gsid = {973090722216732761}, + gscites = {26}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/122231}, + ss_id = {93c05396532b476b41337b34a14b5d044f5057ef}, + all_ss_ids = {['93c05396532b476b41337b34a14b5d044f5057ef']}, +} + +@article{Huis98a, + author = {H. J. Huisman and J. M. Thijssen}, + title = {Adaptive texture feature extraction with application to ultrasonic image analysis}, + journal = UI, + year = {1998}, + volume = {20}, + pages = {132--148}, + doi = {10.1177/016173469802000204}, + abstract = {{C}omputer texture analysis methods use texture features that are traditionally chosen from a large set of fixed features known in literature. {T}hese fixed features are often not specifically designed to the problem at hand, and as a result they may have low discriminative power, and/or may be correlated. {I}ncreasing the number of selected fixed features is statistically not a good solution in limited data environments such as medical imaging. {F}or that reason, we developed an adaptive texture feature extraction method ({ATFE}) that extracts a small number of features that are tuned to the problem at hand. {B}y using a feed-forward neutral network, we ensure that even nonlinear relations are captured from the data. {U}sing extensive, repeated synthetic ultrasonic images, we compared the performance of {ATFE} with the optimal feature set. {W}e show that the {ATFE} method is capable of robust operation on small data sets with a performance close to that of the optimal feature set. {A}nother experiment confirms that our {ATFE} is capable of capturing nonlinear relations from the dataset. {W}e conclude that our method can improve performance in practical, limited dataset situations where an optimal fixed feature set can be hard to find.}, + file = {Huis98a.pdf:pdf\\Huis98a.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + number = {2}, + pmid = {9691370}, + month = {4}, + gsid = {10946179538840967233}, + gscites = {26}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/122220}, + ss_id = {ead58bdc63387bba332e8d57480558be85f263d7}, + all_ss_ids = {['ead58bdc63387bba332e8d57480558be85f263d7']}, +} + +@article{Huis98b, + author = {H. J. Huisman and J. H. Thijssen}, + title = {An in vivo ultrasonic model of liver parenchyma}, + journal = TUFF, + year = {1998}, + volume = {45}, + pages = {739--750}, + doi = {10.1109/58.677618}, + abstract = {{S}everal ultrasonic tissue characterization features are known to discriminate pathological from normal tissue in vivo. {P}reviously, the authors developed an in vivo attenuation- and backscatter estimation method with each frequency dependent coefficient being reduced to a slope and intercept at central frequency. {T}hey derived expressions predicting the standard deviation ({SD}) of these features, assuming a commonly used ultrasonic model of liver parenchyma. {I}n its application to in vivo data, the {SD} of the intercept features was unexpectedly high. {A}nother feature, signal-to-noise ratio ({SNR}), showed a significant bias related to the window size. {I}n this paper, the model is extended with the notion of inhomogeneous parenchyma background ({IPB}). {IPB} is shown to be present in normal liver parenchyma and is statistically described by a noise term with small amplitude and large correlation cell size. {A} method is presented to estimate the {IPB} characteristics. {T}he expressions predicting {SD} are extended, and an expression is derived predicting the window size bias of the feature {SNR}. {T}he accuracy and precision estimated from a large in vivo data set shows good agreement with the predictions with the extended model. {I}t is concluded that {IPB} is a realistic and relevant phenomenon and should be part of the in vivo ultrasonic model of liver parenchyma.}, + file = {Huis98b.pdf:pdf\\Huis98b.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + number = {3}, + pmid = {18244225}, + month = {5}, + gsid = {5765466617692113319}, + gscites = {13}, + ss_id = {056af49e670a8c5385d7c39977acd8d49de9fff0}, + all_ss_ids = {['056af49e670a8c5385d7c39977acd8d49de9fff0']}, +} + +@phdthesis{Huis98c, + author = {H. Huisman}, + title = {In vivo ultrasonic tissue characterization of liver metastases}, + year = {1998}, + url = {http://repository.ubn.ru.nl/handle/2066/122226}, + abstract = {Ultrasonic imaging is a commonly used, powerful diagnostic technique in medicine. The images allow a physician to visualize anatomical details in the human body with the intention to assess the state of the anatomy. Since the introduction of the concept of gray-scale ultrasonic imaging in the early 70's, the number of applications has increased and the image quality is still being improved. More recently, UTC strategies are being developed to quantify information in the available image on the ultrasound machine or to visualize new information in additional images. The quantification of existing visual information may help physicians to enhance the robustness and reproducibility of their assessments. This thesis describes improved and/or new UTC strategies. This research was motivated by developments in related research areas. Continuing fundamental research in medical ultrasound has led to an increase in the understanding of the physical mechanisms that govern the image formation process. The resulting theoretical insights may enhance the amount of information that can be retrieved from ultrasonic images. Recent techniques in signal processing, image processing and pattern recognition facilitate the extraction and quantification of this information. More specific, artificial neural networks have shown promising results in related problems. Finally, actual application of these techniques seems realistic as contemporary computer speed- and memory has reached a sufficient level. This chapter starts with a demarcation of the ultrasonic tissue characterization (UTC) field of research. The subsequent section then briefly summarizes previous research on UTC. Finally, the problem definitions for the subsequent chapters in this thesis are formulated}, + copromotor = {J. M. Thijssen}, + file = {Huis98c.pdf:pdf\\Huis98c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {A. van Oosterom}, + school = {Radboud University, Nijmegen}, + gsid = {7763442202193637713}, + gscites = {7}, + journal = {PhD thesis}, +} + +@inproceedings{Hump17, + author = {Humpire Mamani, Gabriel Efrain and Arnaud Arinda Adiyoso Setio and Bram van Ginneken and Colin Jacobs}, + title = {Organ detection in thorax abdomen CT using multi-label convolutional neural networks}, + booktitle = MI, + year = {2017}, + volume = {10134}, + series = SPIE, + doi = {10.1117/12.2254349}, + abstract = {A convolutional network architecture is presented to determine bounding boxes around six organs in thorax-abdomen CT scans. A single network for each orthogonal view determines the presence of lungs, kidneys, spleen and liver. We show that an architecture that takes additional slices before and after the slice of interest as an additional input outperforms an architecture that processes single slices. From the slice-based analysis, a bounding box around the structures of interest can be computed. The system uses 6 convolutional, 4 pooling and one fully connected layer and uses 333 scans for training and 110 for validation. The test set contains 110 scans. The average Dice score of the proposed method was 0.95 and 0.95 for the lungs, 0.59 and 0.58 for the kidneys, 0.83 for the liver and 0.63 for the spleen. This paper shows that automatic localization of organs using multi-label convolution neural networks is possible. This architecture can likely be used to identify other organs of interest as well.}, + file = {Hump17.pdf:pdf\\Hump17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {11043862804825340240}, + gscites = {16}, + ss_id = {b704867c644d95b8ddc980b19b3805181b7a28ab}, + all_ss_ids = {['b704867c644d95b8ddc980b19b3805181b7a28ab']}, +} + +@article{Hump18, + author = {Humpire Mamani, Gabriel Efrain and Setio, Arnaud and van Ginneken, Bram and Jacobs, Colin}, + title = {Efficient organ localization using multi-label convolutional neural networks in thorax-abdomen CT scans}, + journal = PMB, + year = {2018}, + volume = {63}, + number = {8}, + pages = {085003}, + doi = {10.1088/1361-6560/aab4b3}, + url = {http://iopscience.iop.org/article/10.1088/1361-6560/aab4b3}, + abstract = {Automatic localization of organs and other structures in medical images is an important preprocessing step that can improve and speed up other algorithms such as organ segmentation, lesion detection, and registration. This work presents an efficient method for simultaneous localization of multiple structures in 3D thorax-abdomen CT scans. Our approach predicts the location of multiple structures using a single multi-label convolutional neural network for each orthogonal view. Each network takes extra slices around the current slice as input to provide extra context. A sigmoid layer is used to perform multi-label classification. The output of the three networks is subsequently combined to compute a 3D bounding box for each structure. We used our approach to locate 11 structures of interest. The neural network was trained and evaluated on a large set of 1884 thorax-abdomen CT scans from patients undergoing oncological workup. Reference bounding boxes were annotated by human observers. The performance of our method was evaluated by computing the wall distance to the reference bounding boxes. The bounding boxes annotated by the first human observer were used as the reference standard for the test set. Using the best configuration, we obtained an average wall distance of 3.20+-7.33mm in the test set. The second human observer achieved 1.23+-3.39mm. For all structures, the results were better than those reported in previously published studies. In conclusion, we proposed an efficient method for the accurate localization of multiple organs. Our method uses multiple slices as input to provide more context around the slice under analysis, and we have shown that this improves performance. This method can easily be adapted to handle more organs.}, + file = {:pdf/Hump18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29512516}, + month = {4}, + gsid = {16888189080982033735}, + gscites = {29}, + ss_id = {0c7b80f04984d9cea99337c21cee93a69dae27dd}, + all_ss_ids = {['0c7b80f04984d9cea99337c21cee93a69dae27dd']}, +} + +@article{Hump20, + author = {Humpire Mamani, Gabriel Efrain and Bukala, Joris and Scholten, Ernst T and Prokop, Mathias and van Ginneken, Bram and Jacobs, Colin}, + title = {Fully Automatic Volume Measurement of the Spleen at CT Using Deep Learning}, + journal = RAI, + year = {2020}, + volume = {2}, + issue = {4}, + pages = {e190102}, + doi = {10.1148/ryai.2020190102}, + url = {https://pubs.rsna.org/doi/10.1148/ryai.2020190102}, + abstract = {Purpose: To develop a fully automated algorithm for spleen segmentation and to assess the performance of this algorithm in a large dataset. Materials and Methods: In this retrospective study, a three-dimensional deep learning network was developed to segment the spleen on thorax-abdomen CT scans. Scans were extracted from patients undergoing oncologic treatment from 2014 to 2017. A total of 1100 scans from 1100 patients were used in this study, and 400 were selected for development of the algorithm. For testing, a dataset of 50 scans was annotated to assess the segmentation accuracy and was compared against the splenic index equation. In a qualitative observer experiment, an enriched set of 100 scan-pairs was used to evaluate whether the algorithm could aid a radiologist in assessing splenic volume change. The reference standard was set by the consensus of two other independent radiologists. A Mann-Whitney U test was conducted to test whether there was a performance difference between the algorithm and the independent observer. Results: The algorithm and the independent observer obtained comparable Dice scores (P = .834) on the test set of 50 scans of 0.962 and 0.964, respectively. The radiologist had an agreement with the reference standard in 81% (81 of 100) of the cases after a visual classification of volume change, which increased to 92% (92 of 100) when aided by the algorithm. Conclusion: A segmentation method based on deep learning can accurately segment the spleen on CT scans and may help radiologists to detect abnormal splenic volumes and splenic volume changes.}, + algorithm = {https://grand-challenge.org/algorithms/spleen-segmentation/}, + file = {:pdf/Hump20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {33937830}, + gsid = {16221938647525067795}, + gscites = {21}, + ss_id = {4ad04c0dce1e8a12c49bae26252f476383ec3273}, + all_ss_ids = {['4ad04c0dce1e8a12c49bae26252f476383ec3273']}, +} + +@inproceedings{Huo11, + author = {J. Huo and E. M. van Rikxoort and K. Okada and H. J. Kim and W. Pope and J. G. Goldin and M. S. Brown}, + title = {Confidence-based ensemble for {GBM} brain tumor segmentation}, + booktitle = MI, + year = {2011}, + volume = {7962}, + series = SPIE, + pages = {79622P-1--79622P-6}, + doi = {10.1117/12.877913}, + abstract = {{I}t is a challenging task to automatically segment glioblastoma multiforme {(GBM)} brain tumors on {T}1w post-contrast isotropic {MR} images. {A} semi-automated system using fuzzy connectedness has recently been developed for computing the tumor volume that reduces the cost of manual annotation. In this study, we propose a an ensemble method that combines multiple segmentation results into a final ensemble one. The method is evaluated on a dataset of 20 cases from a multi-center pharmaceutical drug trial and compared to the fuzzy connectedness method. Three individual methods were used in the framework: fuzzy connectedness, GrowCut, and voxel classification. The combination method is a confidence map averaging {(CMA)} method. The {CMA} method shows an improved {ROC} curve compared to the fuzzy connectedness method (p < 0.001). The {CMA} ensemble result is more robust compared to the three individual methods.}, + file = {Huo11.pdf:pdf\\Huo11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {13437519549822545975}, + gscites = {6}, +} + +@article{Huo13, + author = {Huo, Jing and Okada, Kazunori and van Rikxoort, Eva M. and Kim, Hyun J. and Alger, Jeffry R. and Pope, Whitney B. and Goldin, Jonathan G. and Brown, Matthew S.}, + title = {Ensemble segmentation for {GBM} brain tumors on {MR} images using confidence-based averaging}, + journal = MP, + year = {2013}, + volume = {40}, + pages = {093502}, + doi = {10.1118/1.4817475}, + abstract = {Purpose: Ensemble segmentation methods combine the segmentation results of individual methods into a final one, with the goal of achieving greater robustness and accuracy. The goal of this study was to develop an ensemble segmentation framework for glioblastoma multiforme tumors on single-channel T1w postcontrast magnetic resonance images.Methods: Three base methods were evaluated in the framework: fuzzy connectedness, GrowCut, and voxel classification using support vector machine. A confidence map averaging (CMA) method was used as the ensemble rule.Results: The performance is evaluated on a comprehensive dataset of 46 cases including different tumor appearances. The accuracy of the segmentation result was evaluated using the F1-measure between the semiautomated segmentation result and the ground truth.Conclusions: The results showed that the CMA ensemble result statistically approximates the best segmentation result of all the base methods for each case.}, + file = {Huo13.pdf:Huo13.pdf:PDF}, + optnote = {DIAG}, + number = {9}, + pmid = {24007185}, + month = {8}, + gsid = {16764320776556676639}, + gscites = {13}, +} + +@inproceedings{Hups08, + author = {Hupse, Rianne and Karssemeijer, Nico}, + title = {Feature selection for computer-aided detection: comparing different selection criteria}, + booktitle = MI, + year = {2008}, + volume = {6915}, + series = SPIE, + pages = {691503}, + doi = {10.1117/12.771972}, + abstract = {{I}n this study we investigated different feature selection methods for use in computer-aided mass detection. {T}he data set we used (1357 malignant mass regions and 58444 normal regions) was much larger than used in previous research where feature selection did not directly improve the performance compared to using the entire feature set. {W}e introduced a new performance measure to be used during feature selection, defined as the mean sensitivity in an interval of the free response operating characteristic ({FROC}) curve computed on a logarithmic scale. {T}hismeasure is similar to the final validation performance measure we were optimizing. {T}herefore it was expected to give better results than more general feature selection criteria. {W}e compared the performance of feature sets selected using the mean sensitivity of the {FROC} curve to sets selected using the {W}ilks? lambda statistic and investigated the effect of reducing the skewness in the distribution of the feature values before performingfeature selection. {I}n the case of {W}ilks? lambda, we found that reducing skewness had a clear positive effect, yielding performances similar or exceeding performances obtained when the entire feature set was used. {O}ur results indicate that a general measure like {W}ilks? lambda selects better performing feature sets than the mean sensitivity of the {FROC} curve.}, + file = {Hups08.pdf:pdf/Hups08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {15388016742450396396}, + gscites = {4}, + ss_id = {9d77d77caa82df8186ac09d9f44131eab2f3b985}, + all_ss_ids = {['9d77d77caa82df8186ac09d9f44131eab2f3b985']}, +} + +@inproceedings{Hups09, + author = {Hupse, Rianne and Karssemeijer, Nico}, + title = {The use of contextual information for computer aided detection of masses in mammograms}, + booktitle = MI, + year = {2009}, + volume = {7260}, + series = SPIE, + pages = {72600Q}, + doi = {10.1117/12.812233}, + abstract = {{I}n breast cancer screening, radiologists not only look at local properties of suspicious regions in the mammogram but take also into account more general contextual information. {I}n this study we investigated the use of similar information for computer aided detection of malignant masses. {W}e developed a new set of features that combine information from the candidate mass region and the whole image or mammogram. {T}he developed context features were constructed to give information about suspiciousness of a region relative to other areas in the mammogram, the location in the image, the location in relation to dense tissue and the overall amount of dense tissue in the mammogram. {W}e used a step-wise floating feature selection algorithm to select subsets from the set of available features. {F}eature selection was performed two times, once using the complete feature set (37 context and 40 local features) and once using only the local features. {I}t was found that in the subsets selected from the complete feature set 30-60% were context features. {A}t most one local feature present in the subset containing context features was not present in the subset without context features. {W}e validated the performance of the selected subsets on a separate data set using cross validation and bootstrapping. {F}or each subset size we compared the performance obtained using the features selected from the complete feature set to the performance obtained using the features selected from the local feature set. {W}e found that subsets containing context features performed significantly better than feature sets containing no context features.}, + file = {Hups09.pdf:pdf\\Hups09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {15617708437218037687}, + gscites = {8}, + ss_id = {33d9ad8ffba587147b72dbf8e166f30d5170ee99}, + all_ss_ids = {['33d9ad8ffba587147b72dbf8e166f30d5170ee99']}, +} + +@article{Hups09a, + author = {R. Hupse and N. Karssemeijer}, + title = {Use of normal tissue context in computer-aided detection of masses in mammograms}, + journal = TMI, + year = {2009}, + volume = {28}, + pages = {2033--2041}, + doi = {10.1109/TMI.2009.2028611}, + abstract = {{W}hen reading mammograms, radiologists do not only look at local properties of suspicious regions but also take into account more general contextual information. {T}his suggests that context may be used to improve the performance of computer-aided detection ({CAD}) of malignant masses in mammograms. {I}n this study, we developed a set of context features that represent suspiciousness of normal tissue in the same case. {F}or each candidate mass region, three normal reference areas were defined in the image at hand. {C}orresponding areas were also defined in the contralateral image and in different projections. {E}valuation of the context features was done using 10-fold cross validation and case based bootstrapping. {F}ree response receiver operating characteristic ({FROC}) curves were computed for feature sets including context features and a feature set without context. {R}esults show that the mean sensitivity in the interval of 0.05-0.5 false positives/image increased more than 6\% when context features were added. {T}his increase was significant ( p < 0.0001). {C}ontext computed using multiple views yielded a better performance than using a single view (mean sensitivity increase of 2.9\%, p < 0.0001). {B}esides the importance of using multiple views, results show that best {CAD} performance was obtained when multiple context features were combined that are based on different reference areas in the mammogram.}, + file = {Hups09a.pdf:pdf\\Hups09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {12}, + pmid = {19666331}, + month = {12}, + gsid = {17273858064497381433}, + gscites = {49}, +} + +@article{Hups10, + author = {Rianne Hupse and Nico Karssemeijer}, + title = {The effect of feature selection methods on computer-aided detection of masses in mammograms}, + journal = PMB, + year = {2010}, + volume = {55}, + pages = {2893--2904}, + doi = {10.1088/0031-9155/55/10/007}, + abstract = {{I}n computer-aided diagnosis ({CAD}) research, feature selection methods are often used to improve generalization performance of classifiers and shorten computation times. {I}n an application that detects malignant masses in mammograms, we investigated the effect of using a selection criterion that is similar to the final performance measure we are optimizing, namely the mean sensitivity of the system in a predefined range of the free-response receiver operating characteristics ({FROC}). {T}o obtain the generalization performance of the selected feature subsets, a cross validation procedure was performed on a dataset containing 351 abnormal and 7879 normal regions, each region providing a set of 71 mass features. {T}he same number of noise features, not containing any information, were added to investigate the ability of the feature selection algorithms to distinguish between useful and non-useful features. {I}t was found that significantly higher performances were obtained using feature sets selected by the general test statistic {W}ilks' lambda than using feature sets selected by the more specific {FROC} measure. {F}eature selection leads to better performance when compared to a system in which all features were used.}, + file = {Hups10.pdf:pdf\\Hups10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + pmid = {20427855}, + month = {4}, + gsid = {10192890197026575770}, + gscites = {28}, + ss_id = {907b1d90f89dd803460133cc5f005797514ded42}, + all_ss_ids = {['907b1d90f89dd803460133cc5f005797514ded42']}, +} + +@phdthesis{Hups12a, + author = {Hupse, R.}, + title = {Detection of malignant masses in breast cancer screening by computer assisted decision making}, + year = {2012}, + url = {http://repository.ubn.ru.nl/handle/2066/99158}, + abstract = {To detect breast cancers in an early stage, in most western countries screening programs are organized. A small fraction of the mammograms acquired in screening contain malignancies, which can be very subtle. In order to reduce oversight errors, computer-aided detection (CAD) systems have been developed. The effect of CAD techniques that are currently used in screening programs is not conclusive. The research described in this thesis had two goals. The first goal was to improve the performance of current CAD techniques for mass detection. The second goal was to investigate the use of CAD as decision support instead of perception aid.}, + file = {Hups12a.pdf:pdf/Hups12a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Hups13, + author = {Hupse, Rianne and Samulski, Maurice and Lobbes, Marc and den Heeten, Ard and Imhof-Tas, Mechli W. and Beijerinck, David and Pijnappel, Ruud and Boetes, Carla and Karssemeijer, Nico}, + title = {Standalone computer-aided detection compared to radiologists' performance for the detection of mammographic masses}, + journal = ER, + year = {2013}, + volume = {23}, + pages = {93-100}, + doi = {10.1007/s00330-012-2562-7}, + abstract = {OBJECTIVES: We developed a computer-aided detection (CAD) system aimed at decision support for detection of malignant masses and architectural distortions in mammograms. The effect of this system on radiologists' performance depends strongly on its standalone performance. The purpose of this study was to compare the standalone performance of this CAD system to that of radiologists. METHODS: In a retrospective study, nine certified screening radiologists and three residents read 200 digital screening mammograms without the use of CAD. Performances of the individual readers and of CAD were computed as the true-positive fraction (TPF) at a false-positive fraction of 0.05 and 0.2. Differences were analysed using an independent one-sample t-test. RESULTS: At a false-positive fraction of 0.05, the performance of CAD (TPF?=?0.487) was similar to that of the certified screening radiologists (TPF?=?0.518, P?=?0.17). At a false-positive fraction of 0.2, CAD performance (TPF?=?0.620) was significantly lower than the radiologist performance (TPF?=?0.736, P <0.001). Compared to the residents, CAD performance was similar for all false-positive fractions. CONCLUSIONS: The sensitivity of CAD at a high specificity was comparable to that of human readers. These results show potential for CAD to be used as an independent reader in breast cancer screening. KEY POINTS : A-A?A 1/2 Computer-aided detection (CAD) systems are used to detect malignant masses in mammograms A-A?A 1/2 Current CAD systems operate at low specificity to avoid perceptual oversight A-A?A 1/2 A CAD system has been developed that operates at high specificity A-A?A 1/2 The performance of the CAD system is approaching that of trained radiologists A-A?A 1/2 CAD has the potential to be an independent reader in screening.}, + file = {Hups13.pdf:pdf\\Hups13.pdf:PDF}, + optnote = {DIAG}, + pmid = {22772149}, + month = {7}, + gsid = {12703997379279415258}, + gscites = {20}, + ss_id = {68703321fc18fde3cd4a568b328280f69b594af8}, + all_ss_ids = {['68703321fc18fde3cd4a568b328280f69b594af8']}, +} + +@article{Hups13a, + author = {Hupse, Rianne and Samulski, Maurice and Lobbes, Marc B. and Mann, Ritse M. and Mus, Roel and den Heeten, Gerard J. and Beijerinck, David and Pijnappel, Ruud M. and Boetes, Carla and Karssemeijer, Nico}, + title = {Computer-aided Detection of Masses at Mammography: Interactive Decision Support versus Prompts}, + journal = Radiology, + year = {2013}, + volume = {266}, + pages = {123--129}, + doi = {10.1148/radiol.12120218}, + abstract = {Purpose: To compare effectiveness of an interactive computer-aided detection (CAD) system, in which CAD marks and their associated suspiciousness scores remain hidden unless their location is queried by the reader, with the effect of traditional CAD prompts used in current clinical practice for the detection of malignant masses on full-field digital mammograms. Materials and Methods: The requirement for institutional review board approval was waived for this retrospective observer study. Nine certified screening radiologists and three residents who were trained in breast imaging read 200 studies (63 studies containing at least one screen-detected mass, 17 false-negative studies, 20 false-positive studies, and 100 normal studies) twice, once with CAD prompts and once with interactive CAD. Localized findings were reported and scored by the readers. In the prompted mode, findings were recorded before and after activation of CAD. The partial area under the location receiver operating characteristic (ROC) curve for an interval of low false-positive fractions typical for screening, from 0 to 0.2, was computed for each reader and each mode. Differences in reader performance were analyzed by using software. Results: The average partial area under the location ROC curve with unaided reading was 0.57, and it increased to 0.62 with interactive CAD, while it remained unaffected by prompts. The difference in reader performance for unaided reading versus interactive CAD was statistically significant (P = .009). Conclusion: When used as decision support, interactive use of CAD for malignant masses on mammograms may be more effective than the current use of CAD, which is aimed at the prevention of perceptual oversights.}, + file = {Hups13a.pdf:pdf\\Hups13a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {23091171}, + month = {1}, + gsid = {8185845335393970951}, + gscites = {54}, + ss_id = {2bde6fd417a50bf9f52eda4fa9a1dc7a3a2a5853}, + all_ss_ids = {['2bde6fd417a50bf9f52eda4fa9a1dc7a3a2a5853']}, +} + +@article{Igle09, + author = {J. E. Iglesias and N. Karssemeijer}, + title = {Robust initial detection of landmarks in film-screen mammograms using multiple {FFDM} atlases}, + journal = TMI, + year = {2009}, + volume = {28}, + pages = {1815--1824}, + doi = {10.1109/TMI.2009.2025036}, + abstract = {{A}utomated analysis of mammograms requires robust methods for pectoralis segmentation and nipple detection. {L}ocating the nipple is especially important in multiview computer aided detection systems, in which findings are matched across images using the nipple-to-finding distance. {S}egmenting the pectoralis is a key preprocessing step to avoid false positives when detecting masses due to the similarity of the texture of mammographic parenchyma and the pectoral muscle. {A} multiatlas algorithm capable of providing very robust initial estimates of the nipple position and pectoral region in digitized mammograms is presented here. {T}en full-field digital mammograms, which are easily annotated attributed to their excellent contrast, are robustly registered to the target digitized film-screen mammogram. {T}he annotations are then propagated and fused into a final nipple position and pectoralis segmentation. {C}ompared to other nipple detection methods in the literature, the system proposed here has the advantages that it is more robust and can provide a reliable estimate when the nipple is located outside the image. {O}ur results show that the change in the correlation between nipple-to-finding distances in craniocaudal and mediolateral oblique views is not significant when the detected nipple positions replace the manual annotations. {M}oreover, the pectoralis segmentation is acceptable and can be used as initialization for a more complex algorithm to optimize the outline locally. {A} novel aspect of the method is that it is also capable of detecting and segmenting the pectoralis in craniocaudal views.}, + file = {Igle09.pdf:pdf\\Igle09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pmid = {19520632}, + month = {11}, + gsid = {14708859348258176979}, + gscites = {33}, + ss_id = {b5efadc7ed85042a5af820dfc430993a08ba00af}, + all_ss_ids = {['b5efadc7ed85042a5af820dfc430993a08ba00af']}, +} + +@article{Isgu03, + author = {I. I{\v{s}}gum and B. van Ginneken}, + title = {{CT} segmentation programs extract calcifications}, + journal = DIE, + year = {2003}, + volume = {19}, + pages = {11-16}, + abstract = {{T}echnique offers a first step toward comprehensive, automatic, quantitative analysis of calcifications from {CT} scans.}, + file = {Isgu03.pdf:pdf\\Isgu03.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + gsid = {6957500468995813094}, + gscites = {1}, +} + +@inproceedings{Isgu03a, + author = {I. I{\v{s}}gum and B. van Ginneken and M. A. Viergever}, + title = {Automatic detection of calcifications in the aorta from abdominal {CT} scans}, + booktitle = CARS, + year = {2003}, + pages = {1037-1042}, + doi = {10.1016/S0531-5131(03)00427-8}, + file = {:pdf\\Isgu03a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {6017208270322190053}, + gscites = {4}, + ss_id = {e818125c17499f8c23accc7ed09b9c7bed102724}, + all_ss_ids = {['e818125c17499f8c23accc7ed09b9c7bed102724']}, +} + +@article{Isgu04, + author = {I. I{\v{s}}gum AND B. van Ginneken AND M. Olree}, + title = {Automatic detection of calcifications in the aorta from {CT} scans of the abdomen: {3D} computer-aided diagnosis}, + journal = AR, + year = {2004}, + volume = {11}, + pages = {247-257}, + doi = {10.1016/S1076-6332(03)00673-1}, + abstract = {{RATIONALE} {AND} {OBJECTIVES}: {A}utomated detection and quantification of arterial calcifications can facilitate epidemiologic research and, eventually, the use of full-body calcium scoring in clinical practice. {A}n automatic computerized method to detect calcifications in {CT} scans is presented. {MATERIALS} {AND} {METHODS}: {F}orty abdominal {CT} scans have been randomly selected from clinical practice. {T}hey all contained contrast material and belonged to one of four categories: containing "no," "small," "moderate," or "large" amounts of arterial calcification. {T}here were ten scans in each category. {T}he experiments were restricted to the vertical range from the point where the superior mesenteric artery branches off of the descending aorta until the first bifurcation of the iliac arteries. {T}he automatic method starts by extracting all connected objects above 220 {H}ounsfield units ({HU}) from the scan. {T}hese objects include all calcifications, as well as bony structures and contrast material. {T}o distinguish calcifications from non-calcifications, a number of features are calculated for each object. {T}hese features are based on the object's size, location, shape characteristics, and surrounding structures. {S}ubsequently a classification of each object is performed in two stages. {F}irst the probability that an object represents a calcification is computed assuming a multivariate {G}aussian distribution for the calcifications. {O}bjects with low probability are discarded. {T}he remaining objects are then classified into calcifications and non-calcifications using a 5-nearest-neighbor classifier and sequential forward feature selection. {B}ased on the total volume of calcifications determined by the system, the scan is assigned to one of the four categories mentioned above. {RESULTS}: {T}he 40 scans contained a total of 249 calcifications as determined by a human observer. {T}he method detected 209 calcifications (sensitivity 83.9\%) at the expense of on average 1.0 false-positive object per scan. {T}he correct category label was assigned to 30 scans and only 2 scans were off by more than one category. {M}ost incorrect classifications can be attributed to the presence of contrast material in the scans. {CONCLUSION}: {I}t is possible to identify the majority of arterial calcifications in abdominal {CT} scans in a completely automatic fashion with few false positive objects, even if the scans contain contrast material.}, + file = {Isgu04.pdf:pdf\\Isgu04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {15035514}, + month = {3}, + gsid = {2474107927891219113}, + gscites = {55}, + ss_id = {a996f17647b43c0af7b6d024b35cb990abdaf10c}, + all_ss_ids = {['edcb88076df807a341f31128b3c94748a4df8003', 'a996f17647b43c0af7b6d024b35cb990abdaf10c']}, +} + +@inproceedings{Isgu04a, + author = {I. I{\v{s}}gum AND B. van Ginneken AND M. Prokop}, + title = {A pattern recognition approach to automated coronary calcium scoring}, + booktitle = ICPR, + year = {2004}, + doi = {10.1109/ICPR.2004.1334636}, + abstract = {{A}n automated method for coronary calcification detection is presented. {F}irst the heart region is extracted, in which objects potentially representing calcifications are obtained by thresholding. {B}esides coronary calcifications, the set of objects includes other heart calcifications, bony structures and noise. {F}or each object, features describing its size, shape, position and appearance are computed. {S}everal classifiers and classification strategies are evaluated. {B}est results are obtained with a specifically designed sequence of k{NN} classifiers that employ sequential forward feature selection. {F}irst obvious non-calcifications are removed, then calcifications are distinguished from non-calcifications and a final classifier discerns coronary calcifications from other cardiac calcifications. {I}n 14 {CT} scans containing 61 coronary calcifications, 46 (75%) are detected at the expense of on average 0.9 false positive objects per scan.}, + file = {Isgu04a.pdf:pdf\\Isgu04a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {10038102016891366329}, + gscites = {10}, + ss_id = {588dd6048ff9804005f9e5f83b587fa3ee011f9f}, + all_ss_ids = {['588dd6048ff9804005f9e5f83b587fa3ee011f9f']}, +} + +@inproceedings{Isgu05, + author = {I. I{\v{s}}gum and B. van Ginneken and A. Rutten and M. Prokop}, + title = {Automated coronary calcification detection and scoring}, + booktitle = {4th International Symposium on Image and Signal Processing and Analysis}, + year = {2005}, + pages = {127--132}, + abstract = {{A}n automated method for coronary calcification detection from {ECG}-triggered multi-slice {CT} data is presented. {T}he method first segments the heart region. {I}n the obtained volume candidate objects are extracted by thresholding. {T}hey include coronary calcification, calcium located elsewhere in the heart, for example, in the valves or the myocardium, and other high density structures mostly representing noise and bone. {A} set of 57 features is calculated for each candidate object. {I}n the feature space objects are classified with a k-{NN} classifier and feature selection in three consecutive stages. {T}he method is tested on 51 scans of the heart. {T}hey contain 320 calcification in the coronary arteries, 291 in the aorta and 62 calcifications in the heart. {T}he system correctly detected 177 calcifications in the coronaries at the expense of 56 false positive objects. {O}n average the method makes 3.8 errors per scan.}, + file = {Isgu05.pdf:pdf\\Isgu05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {3137964482447567295}, + gscites = {6}, + ss_id = {bbc76f4964bdb6db523ab9e9b6052755718f2f3e}, + all_ss_ids = {['bbc76f4964bdb6db523ab9e9b6052755718f2f3e']}, +} + +@conference{Isgu05a, + author = {I. I{\v{s}}gum and A. Rutten and M. Prokop and B. van Ginneken}, + title = {Automated {C}alcium {S}coring for {R}isk {A}ssessment of {C}oronary {A}rtery {D}isease}, + booktitle = RSNA, + year = {2005}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Isgu07, + author = {I. I{\v{s}}gum and A. Rutten and M. Prokop and B. van Ginneken}, + title = {Detection of coronary calcifications from computed tomography scans for automated risk assessment of coronary artery disease}, + journal = MP, + year = {2007}, + volume = {34}, + pages = {1450-1461}, + doi = {10.1118/1.2710548}, + abstract = {{A} fully automated method for coronary calcification detection from non-contrast-enhanced, {ECG}-gated multi-slice computed tomography CT data is presented. {C}andidates for coronary calcifications are extracted by thresholding and component labeling. {T}hese candidates include coronary calcifications, calcifications in the aorta and in the heart, and other high-density structures such as noise and bone. {A} dedicated set of 64 features is calculated for each candidate object. {T}hey characterize the object's spatial position relative to the heart and the aorta, for which an automatic segmentation scheme was developed, its size and shape, and its appearance, which is described by a set of approximated {G}aussian derivatives for which an efficient computational scheme is presented. {T}hree classification strategies were designed. {T}he first one tested direct classification without feature selection. {T}he second approach also utilized direct classification, but with feature selection. {F}inally, the third scheme employed two-stage classification. {I}n a computationally inexpensive first stage, the most easily recognizable false positives were discarded. {T}he second stage discriminated between more difficult to separate coronary calcium and other candidates. {P}erformance of linear, quadratic, nearest neighbor, and support vector machine classifiers was compared. {T}he method was tested on 76 scans containing 275 calcifications in the coronary arteries and 335 calcifications in the heart and aorta. {T}he best performance was obtained employing a two-stage classification system with a k-nearest neighbor (k-{NN}) classifier and a feature selection scheme. {T}he method detected 73.8% of coronary calcifications at the expense of on average 0.1 false positives per scan. {A} calcium score was computed for each scan and subjects were assigned one of four risk categories based on this score. {T}he method assigned the correct risk category to 93.4% of all scans.}, + file = {Isgu07.pdf:pdf\\Isgu07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + pmid = {17500476}, + month = {3}, + gsid = {4877154343244870689}, + gscites = {94}, + ss_id = {b717ba083290575d9176a4620c3abe46a949cbeb}, + all_ss_ids = {['b717ba083290575d9176a4620c3abe46a949cbeb']}, +} + +@phdthesis{Isgu07a, + author = {I. I{\v{s}}gum}, + title = {Computer-aided detection and quantification of arterial calcifications with {CT}}, + year = {2007}, + url = {http://igitur-archive.library.uu.nl/dissertations/2007-0515-200312/UUindex.html}, + abstract = {{A}therosclerosis is the leading cause of death and disability in the {W}estern world. {A}rterial calcifications are a marker of the disease and can be detected with computed tomography ({CT}) scans. {I}n this thesis automatic methods for {CT} calcium scoring are presented. {I}n {CT} scans calcifications appear as bright structures, and therefore they were extracted by thresholding and component labeling. {H}owever, other high-density objects, such as noise, bony structures, and metal implants were selected by this process as well. {T}o identify true calcifications among all candidate objects, each object was described by features. {T}hose features were derived from the candidate object's location, its appearance, its shape and its size. {S}ubsequently, a pattern recognition approach was used to identify true calcifications among all extracted candidate objects. {A} first study considered aortic calcifications in {CTA} scans of the abdomen. {A}ll high-density objects in the scan were considered and separated by a pattern recognition system. {I}n terms of the number of calcifications the method resulted in a sensitivity of 83.9% at the expense of on average 1.0 false positive objects per scan. {T}he scan was assigned to one of four categories ("no", "small", "moderate" or "large" amounts of calcification). {T}he correct category label was assigned to 75.0% of scans. {A} similar approach was used for automatic coronary calcium scoring in non-contrast enhanced, {ECG}-gated multi-slice {CT} data. {H}ere, the analysis was performed on the cardiac volume only. {I}n addition to the previously mentioned characteristics, features were also derived from a segmentation of the heart and the aorta, which were extracted automatically using a rule-based scheme. {A}n {A}gatston score was computed for each scan and subjects were assigned a risk category (0-10, 11-100, 101-400, >400). {T}he correct category was assigned to 93.4% of subjects. {A}ccurate segmentation of the heart and the aorta boundary were found to be essential for the performance of the coronary calcium scoring system. {R}obust segmentation of these structures is challenging, and therefore a general multi-atlas-based segmentation method was developed. {T}he method was tested on the segmentation of the heart and the aorta in low-dose, non-gated, non-contrast enhanced {CT} scans of the thorax. {T}he proposed method yielded results very close to those of an independent human observer. {M}oreover, atlas selection led to faster segmentation at comparable performance. {F}inally, using the results of the multi-atlas-based segmentation, a system for automatic detection of calcifications in the aorta was developed. {I}n this study the same low-dose, non-gated, non-contrast enhanced {CT} scans of the thorax were used. {S}uch scans are acquired in lung cancer screening trials and it would be worthwhile to perform calcium scoring in these scans, especially if this could be done automatically. {A} pattern recognition system was applied to the segmented aortic volume. {A} correct risk category was assigned to 88.3% of subjects. {I}n conclusion, this thesis presents several systems for computerized detection of arterial calcifications. {I}t is shown that automated calcium scoring is possible and can be used for risk category determination.}, + copromotor = {B. van Ginneken}, + file = {Isgu07a.pdf:pdf\\Isgu07a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {M. A. Viergever and W. M. Prokop}, + school = {Utrecht University}, + gsid = {17165912596073417610}, + gscites = {3}, + journal = {PhD thesis}, +} + +@conference{Isgu07b, + author = {I. I{\v{s}}gum and A. Rutten and M. Prokop and B. van Ginneken}, + title = {Automated {C}alcium {S}coring in the {A}orta in {L}ow {D}ose {N}on-contrast-enhanced {CT} {S}cans of the {T}horax}, + booktitle = RSNA, + year = {2007}, + pages = {405}, + file = {Isgu07a.pdf:pdf\\Isgu07a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Isgu09, + author = {I. I{\v{s}}gum and M. Staring and A. Rutten and M. Prokop and M. Viergever and B. van Ginneken}, + title = {Multi-{A}tlas-{B}ased {S}egmentation {W}ith {L}ocal {D}ecision {F}usion - {A}pplication to {C}ardiac and {A}ortic {S}egmentation in {CT} {S}cans}, + journal = TMI, + year = {2009}, + volume = {28}, + pages = {1000--1010}, + doi = {10.1109/TMI.2008.2011480}, + abstract = {{A} novel atlas-based segmentation approach based on the combination of multiple registrations is presented. {M}ultiple atlases are registered to a target image. {T}o obtain a segmentation of the target, labels of the atlas images are propagated to it. {T}he propagated labels are combined by spatially varying decision fusion weights. {T}hese weights are derived from local assessment of the registration success. {F}urthermore, an atlas selection procedure is proposed that is equivalent to sequential forward selection from statistical pattern recognition theory. {T}he proposed method is compared to three existing atlas-based segmentation approaches, namely 1) single atlas-based segmentation, 2) average-shape atlas-based segmentation, and 3) multi-atlas-based segmentation with averaging as decision fusion. {T}hese methods were tested on the segmentation of the heart and the aorta in computed tomography scans of the thorax. {T}he results show that the proposed method outperforms other methods and yields results very close to those of an independent human observer. {M}oreover, the additional atlas selection step led to a faster segmentation at a comparable performance.}, + file = {Isgu09.pdf:pdf\\Isgu09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {19131298}, + month = {7}, + gsid = {13277270236947780741}, + gscites = {407}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/79693}, + ss_id = {af1ac5542db7aab4911c3f70b080564c83d4d0cc}, + all_ss_ids = {['af1ac5542db7aab4911c3f70b080564c83d4d0cc']}, +} + +@conference{Isgu09a, + author = {I. I{\v{s}}gum and B. van Ginneken and P. C. Jacobs and M. J. Gondrie and W. P. Th. M. Mali and M. Prokop}, + title = {Automatic determination of cardiovascular risk from thoracic {CT} scans using a coronary calcium atlas}, + booktitle = RSNA, + year = {2009}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Isgu09b, + author = {I. I{\v{s}}gum and P. C. A. Jacobs and M. Gondrie and B. van Ginneken and M. Oudkerk and W. P. Th. M. Mali and Y. van der Graaf and M. Prokop}, + title = {Cardiovascular risk assessment in lung cancer screening scans: do coronary and aortic calcium scores yield comparable risks for individual subjects?}, + booktitle = ECR, + year = {2009}, + abstract = {{P}urpose: {L}ung cancer screening trials may provide information about cardiovascular risk. {I}t is not yet clear whether coronary scores from non-gated scans or aortic scores are more suited for this purpose. {W}e examined whether risk stratification based on aortic calcifications places patients into similar groups as risk based on coronary calcium scores derived from non-gated lung cancer screening studies. {M}ethod and materials: {W}e included 1091 baseline scans from subjects participating in a lung cancer screening trial ({NELSON}). {S}cans had been acquired at two centers using 16 x 0.75 mm collimation and a low-dose, non-contrast, non-{ECG}-gated scanning protocol. {T}o control image noise, 3mm sections were reconstructed every 1.4mm. {A}ortic and coronary calcifications were scored by two observers (130 {HU} threshold). {F}or coronary calcifications four atherosclerotic risk categories were defined based on {A}gatston score (?10, 11-100, 101-400, >400). {S}ince no categorization is available for aortic scores we assigned four categories, each containing the same number of subjects as the respective coronary categories. {T}o assess the agreement, we calculated linearly weighted kappa statistics (categories) and {S}pearman rank correlation coefficients (scores). {R}esults: {T}he number of subjects was 327, 156, 175 and 433 in the four coronary risk categories, respectively. {T}he aortic calcium score assigned 526 (48%) subjects to the same risk category as the coronary score. {L}inearly weighted kappa statistic was 0.37, and {S}pearman rank correlation between scores was 0.51. {C}onclusion: {W}hile yielding similar results, aortic and coronary calcium scores cannot be used interchangeably for assessing individual arteriosclerotic risk from lung cancer screening studies.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Isgu10, + author = {I. I{\v{s}}gum and A. Rutten and M. Prokop and M.Staring and S. Klein and J. P. W. Pluim and M. A. Viergever and B. van Ginneken}, + title = {Automated aortic calcium scoring on low-dose chest computed tomography}, + journal = MP, + year = {2010}, + volume = {37}, + pages = {714--723}, + doi = {10.1118/1.3284211}, + abstract = {{PURPOSE}: {T}horacic computed tomography ({CT}) scans provide information about cardiovascular risk status. {T}hese scans are non-{ECG} synchronized, thus precise quantification of coronary calcifications is difficult. {A}ortic calcium scoring is less sensitive to cardiac motion, so it is an alternative to coronary calcium scoring as an indicator of cardiovascular risk. {T}he authors developed and evaluated a computer-aided system for automatic detection and quantification of aortic calcifications in low-dose noncontrast-enhanced chest {CT}. {METHODS}: {T}he system was trained and tested on scans from participants of a lung cancer screening trial. {A} total of 433 low-dose, non-{ECG}-synchronized, noncontrast-enhanced 16 detector row examinations of the chest was randomly divided into 340 training and 93 test data sets. {A} first observer manually identified aortic calcifications on training and test scans. {A} second observer did the same on the test scans only. {F}irst, a multiatlas-based segmentation method was developed to delineate the aorta. {S}egmented volume was thresholded and potential calcifications (candidate objects) were extracted by three-dimensional connected component labeling. {D}ue to image resolution and noise, in rare cases extracted candidate objects were connected to the spine. {T}hey were separated into a part outside and parts inside the aorta, and only the latter was further analyzed. {A}ll candidate objects were represented by 63 features describing their size, position, and texture. {S}ubsequently, a two-stage classification with a selection of features and k-nearest neighbor classifiers was performed. {B}ased on the detected aortic calcifications, total calcium volume score was determined for each subject. {RESULTS}: {T}he computer system correctly detected, on the average, 945 mm3 out of 965 mm3 (97.9\%) calcified plaque volume in the aorta with an average of 64 mm3 of false positive volume per scan. {S}pearman rank correlation coefficient was p = 0.960 between the system and the first observer compared to p = 0.961 between the two observers. {CONCLUSIONS}: {A}utomatic calcium scoring in the aorta thus appears feasible with good correlation between manual and automatic scoring.}, + file = {Isgu10.pdf:pdf\\Isgu10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {20229881}, + month = {1}, + gsid = {13271497965691699016}, + gscites = {35}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/88423}, + ss_id = {1ae278d37a894cb1d50a70c79b2db493f476631e}, + all_ss_ids = {['1ae278d37a894cb1d50a70c79b2db493f476631e']}, +} + +@inproceedings{Isgu10a, + author = {I. I{\v{s}}gum and M. Prokop and P. C. Jacobs and M. J. Gondrie and W. P. Th. M. Mali and M. A. Viergever and B. van Ginneken}, + title = {Automatic coronary calcium scoring in low-dose {non-ECG-synchronized} thoracic {CT} scans}, + booktitle = MI, + year = {2010}, + volume = {7624}, + series = SPIE, + pages = {76240M1--76240M8}, + doi = {10.1117/12.840514}, + abstract = {{T}his work presents a system for automatic coronary calcium scoring and cardiovascular risk stratification in thoracic {CT} scans. {D}ata was collected from a {D}utch-{B}elgian lung cancer screening trial. {I}n 121 low-dose, non-{ECG} synchronized, non-contrast enhanced thoracic {CT} scans an expert scored coronary calcifications manually. {A} key element of the proposed algorithm is that the approximate position of the coronary arteries was inferred with a probabilistic coronary calcium atlas. {T}his atlas was created with atlas-based segmentation from 51 scans and their manually identified calcifications, and was registered to each unseen test scan. {I}n the test scans all objects with density above 130 {HU} were considered candidates that could represent coronary calcifications. {A} statistical pattern recognition system was designed to classify these candidates using features that encode their spatial position relative to the inferred position of the coronaries obtained from the atlas registration. {I}n addition, size and texture features were computed for all candidates. {T}wo consecutive classifiers were used to label each candidate. {T}he system was trained with 35 and tested with another 35 scans. {T}he detected calcifications were quantified and cardiovascular risk was determined for each subject. {T}he system detected 71% of coronary calcifications with an average of 0.9 false positive objects per scan. {C}ardiovascular risk category was correctly assigned to 29 out of 35 subjects (83%). {F}ive scans (14%) were one category off, and only one scan (3%) was two categories off. {W}e conclude that automatic assessment of the cardiovascular risk from low-dose, non-{ECG} synchronized thoracic {CT} scans appears feasible.}, + file = {Isgu10a.pdf:pdf\\Isgu10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {3227380864420599141}, + gscites = {4}, + ss_id = {b3d2443be3028a33c52bcaeb46d7bef2addf0e5e}, + all_ss_ids = {['b3d2443be3028a33c52bcaeb46d7bef2addf0e5e']}, +} + +@conference{Isgu11, + author = {I. I{\v{s}}gum and P. A. de Jong and W. Mali and B. van Ginneken and M. Prokop and M. A. Viergever}, + title = {Automatic Coronary Calcium Scoring in Low-Dose Non-{ECG}-synchronized Chest Computed Tomography ({CT}) Scans from a Lung Cancer Screening Trial}, + booktitle = RSNA, + year = {2011}, + abstract = {PURPOSE: Coronary calcium scores from lung cancer screening computed tomography (CT) have been shown to be an independent predictor of cardiovascular events. Given the large number of cases and the inherent large variability induced by motion artifacts on these non-gated scans, automatic calcium scoring might be a feasible option. We therefore evaluated the performance of automatic coronary calcium scoring in this setting. METHOD AND MATERIALS: We included 1796 baseline scans from the Nelson trial, a lung cancer screening program with low-dose chest CT (16 x 0.75 mm, 30 mAs, no IV contrast, no ECG synchronization). All scans were reconstructed to 3.1 mm-sections with 1.4 mm increment. Scans with beam hardening artifacts due to metal implants were identified and excluded. Automatic calcium scoring was performed using a method based on multi-atlas registration and pattern recognition. Each scan was inspected by one of four trained observers. When needed, the observers corrected automatically identified calcifications. Because stents could not be securely differentiated, they were scored as calcifications. Agatston and volume scores were computed for the automatic method and after manual correction. Each subject was assigned to a cardiovascular risk category based on the Agatston score. To estimate the interobserver agreement, a subset of 45 scans was manually scored by two observers. Spearman's rank correlation was computed to assess the agreement between the automatically and manually corrected volume scores, and between observers. Linearly weighted kappa statistic was calculated to evaluate the agreement in cardiovascular risk category assignments. RESULTS: The mean volume score was 534 mm3 (range:0-11,778 mm3) based on the manually corrected scores. Fourteen (0.8%) scans with large metal implants had to be excluded. We found a correlation ? of 0.88 and an agreement ? of 0.79 between the automatic and manually corrected scores. Interobserver agreement resulted in ? = 0.89, and ? = 0.83. CONCLUSION: Fully automatic coronary calcium scoring in a lung cancer screening program is feasible. Agreement of automatic scores with observer scores is similar to interobserver agreement. CLINICAL RELEVANCE/APPLICATION: Automatic estimation of cardiovascular risk in lung cancer screening can expand the scope of screening and help identify high-risk subjects who might benefit from preventive cardiovascular treatment.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Isgu12, + author = {I{\v{s}}gum, I. and Prokop, M. and Niemeijer, M. and Viergever, M. and van Ginneken, B.}, + title = {Automatic coronary calcium scoring in low-dose chest computed tomography}, + journal = TMI, + year = {2012}, + volume = {31}, + pages = {2322 - 2334}, + doi = {10.1109/TMI.2012.2216889}, + abstract = {The calcium burden as estimated from non-ECGsynchronized CT exams acquired in screening of heavy smokers has been shown to be a strong predictor of cardiovascular events. We present a method for automatic coronary calcium scoring with low-dose, non-contrast-enhanced, non-ECG-synchronized chest CT. First, a probabilistic coronary calcium map was created using multi-atlas segmentation. This map assigned an a priori probability for the presence of coronary calcifications at every location in a scan. Subsequently, a statistical pattern recognition system was designed to identify coronary calcifications by texture, size and spatial features; the spatial features were computed using the coronary calcium map. The detected calcifications were quantified in terms of volume and Agatston score. The best results were obtained by merging the results of three different supervised classification systems, namely direct classification with a nearest neighbor classifier, and two-stage classification with nearest neighbor and support vector machine classifiers. We used a total of 231 test scans containing 45,674 mm3 of coronary calcifications. The presented method detected on average 157/198 mm3 (sensitivity 79.2%) of coronary calcium volume with on average 4 mm3 false positive volume. Calcium scoring can be performed automatically in lowdose, non-contrast enhanced, non-ECG-synchronized chest CT in screening of heavy smokers to identify subjects who might benefit from preventive treatment.}, + file = {Isgu12.pdf:pdf\\Isgu12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {22961297}, + month = {12}, + gsid = {12501517643677936280}, + gscites = {113}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/110344}, + ss_id = {5597d386256263f67338a1b41a90ac204aa5ad38}, + all_ss_ids = {['5597d386256263f67338a1b41a90ac204aa5ad38']}, +} + +@article{Jaco10, + author = {P. C. Jacobs and M. Prokop and Y. van der Graaf and M. J. Gondrie and K. J. Janssen and H. J. de Koning and I. I{\v{s}}gum and R. J. van Klaveren and M. Oudkerk and B. van Ginneken and W. P. Mali}, + title = {Comparing coronary artery calcium and thoracic aorta calcium for prediction of all-cause mortality and cardiovascular events on low-dose non-gated computed tomography in a high-risk population of heavy smokers}, + journal = Atherosclerosis, + year = {2010}, + volume = {209}, + pages = {455--462}, + doi = {10.1016/j.atherosclerosis.2009.09.031}, + abstract = {{BACKGROUND}: {C}oronary artery calcium ({CAC}) and thoracic aorta calcium ({TAC}) can be detected simultaneously on low-dose, non-gated computed tomography ({CT}) scans. {CAC} has been shown to predict cardiovascular ({CVD}) and coronary ({CHD}) events. {A} comparable association between {TAC} and {CVD} events has yet to be established, but {TAC} could be a more reproducible alternative to {CAC} in low-dose, non-gated {CT}. {T}his study compared {CAC} and {TAC} as independent predictors of all-cause mortality and cardiovascular events in a population of heavy smokers using low-dose, non-gated {CT}. {METHODS}: {W}ithin the {NELSON} study, a population-based lung cancer screening trial, the {CT} screen group consisted of 7557 heavy smokers aged 50-75 years. {U}sing a case-cohort study design, {CAC} and {TAC} scores were calculated in a total of 958 asymptomatic subjects who were followed up for all-cause death, and {CVD}, {CHD} and non-cardiac events (stroke, aortic aneurysm, peripheral arterial occlusive disease). {W}e used {C}ox proportional-hazard regression to compute hazard ratios ({HR}s) with adjustment for traditional cardiovascular risk factors. {RESULTS}: {A} close association between the prevalence of {TAC} and increasing levels of {CAC} was established (p<0.001). {I}ncreasing {CAC} and {TAC} risk categories were associated with all-cause mortality (p for trend=0.01 and 0.001, respectively) and {CVD} events (p for trend <0.001 and 0.03, respectively). {C}ompared with the lowest quartile (reference category), multivariate-adjusted {HR}s across categories of {CAC} were higher (all-cause mortality, {HR}: 9.13 for highest quartile; {CVD} events, {HR}: 4.46 for highest quartile) than of {TAC} scores ({HR}: 5.45 and {HR}: 2.25, respectively). {H}owever, {TAC} is associated with non-coronary events ({HR}: 4.69 for highest quartile, p for trend=0.01) and {CAC} was not ({HR}: 3.06 for highest quartile, p for trend=0.40). {CONCLUSIONS}: {CAC} was found to be a stronger predictor than {TAC} of all-cause mortality and {CVD} events in a high-risk population of heavy smokers scored on low-dose, non-gated {CT}. {TAC}, however, is stronger associated with non-cardiac events than {CAC} and could prove to be a preferred marker for these events.}, + file = {Jaco10.pdf:pdf\\Jaco10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {19875116}, + month = {4}, + gsid = {10232555956815609662}, + gscites = {123}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/89727}, + ss_id = {e8c5a5edd60ed560edaa87fe335cabf1702e14c4}, + all_ss_ids = {['e8c5a5edd60ed560edaa87fe335cabf1702e14c4']}, +} + +@article{Jaco10a, + author = {P. C. A. Jacobs and I. I{\v{s}}gum and M. J. A. Gondrie and W. P. Th. M. Mali and B. van Ginneken and M. Prokop and Y.van der Graaf}, + title = {Coronary artery calcification scoring in low-dose ungated {CT} screening for lung cancer: interscan agreement}, + journal = AJR, + year = {2010}, + volume = {194}, + pages = {1244--1249}, + doi = {10.2214/AJR.09.3047}, + abstract = {{OBJECTIVE}: {I}n previous studies detection of coronary artery calcification ({CAC}) with low-dose ungated {MDCT} performed for lung cancer screening has been compared with detection with cardiac {CT}. {W}e evaluated the interscan agreement of {CAC} scores from two consecutive low-dose ungated {MDCT} examinations. {SUBJECTS} {AND} {METHODS}: {T}he subjects were 584 participants in the screening segment of a lung cancer screening trial who underwent two low-dose ungated {MDCT} examinations within 4 months (mean, 3.1 +/- 0.6 months) of a baseline {CT} examination. {A}gatston score, volume score, and calcium mass score were measured by two observers. {I}nterscan agreement of stratification of participants into four {A}gatston score risk categories (0, 1-100, 101-400, > 400) was assessed with kappa values. {I}nterscan variability and 95\% repeatability limits were calculated for all three calcium measures and compared by repeated measures analysis of variance. {RESULTS}: {A}n {A}gatston score > 0 was detected in 443 baseline {CT} examinations (75.8\%). {I}nterscan agreement of the four risk categories was good (kappa = 0.67). {T}he {A}gatston scores were in the same risk category in both examinations in 440 cases (75.3\%); 578 participants (99.0\%) had scores differing a maximum of one category. {F}urthermore, mean interscan variability ranged from 61\% for calcium volume score to 71\% for {A}gatston score (p < 0.01). {A} limitation of this study was that no comparison of {CAC} scores between low-dose ungated {CT} and the reference standard {ECG}-gated {CT} was performed. {CONCLUSION}: {C}ardiovascular disease risk stratification with low-dose ungated {MDCT} is feasible and has good interscan agreement of stratification of participants into {A}gatston score risk categories. {H}igh mean interscan variability precludes the use of this technique for monitoring {CAC} scores for individual patients.}, + file = {Jaco10a.pdf:pdf\\Jaco10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {20410410}, + month = {5}, + gsid = {7723446783558799227}, + gscites = {52}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/87789}, + ss_id = {0081e840aeaaa405c804371a3768db33d86bab0e}, + all_ss_ids = {['0081e840aeaaa405c804371a3768db33d86bab0e']}, +} + +@inproceedings{Jaco10d, + author = {Anh H. Le and Young W. Park and Kevin Ma and Colin Jacobs and Brent J. Liu}, + title = {Performance evaluation for volumetric segmentation of multiple sclerosis lesions using {MATLAB} and computing engine in the graphical processing unit ({GPU})}, + booktitle = MI, + year = {2010}, + volume = {7628}, + series = SPIE, + pages = {76280W-76280W-6}, + doi = {10.1117/12.844811}, + abstract = {Multiple Sclerosis (MS) is a progressive neurological disease affecting myelin pathways in the brain. Multiple lesions in the white matter can cause paralysis and severe motor disabilities of the affected patient. To solve the issue of inconsistency and user-dependency in manual lesion measurement of MRI, we have proposed a 3-D automated lesion quantification algorithm to enable objective and efficient lesion volume tracking. The computer-aided detection (CAD) of MS, written in MATLAB, utilizes K-Nearest Neighbors (KNN) method to compute the probability of lesions on a per-voxel basis. Despite the highly optimized algorithm of imaging processing that is used in CAD development, MS CAD integration and evaluation in clinical workflow is technically challenging due to the requirement of high computation rates and memory bandwidth in the recursive nature of the algorithm. In this paper, we present the development and evaluation of using a computing engine in the graphical processing unit (GPU) with MATLAB for segmentation of MS lesions. The paper investigates the utilization of a high-end GPU for parallel computing of KNN in the MATLAB environment to improve algorithm performance. The integration is accomplished using NVIDIA's CUDA developmental toolkit for MATLAB. The results of this study will validate the practicality and effectiveness of the prototype MS CAD in a clinical setting. The GPU method may allow MS CAD to rapidly integrate in an electronic patient record or any disease-centric health care system.}, + file = {Jaco10d.pdf:pdf/Jaco10d.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {3}, + ss_id = {28200b417aacd207af7ff52bb7ede1fa7ee8b73a}, + all_ss_ids = {['28200b417aacd207af7ff52bb7ede1fa7ee8b73a']}, + gscites = {2}, +} + +@inproceedings{Jaco10e, + author = {Kevin Ma and Colin Jacobs and James Fernandez and Lilyana Amezcua and Brent Liu}, + title = {The development of a disease oriented eFolder for multiple sclerosis decision support}, + booktitle = MI, + year = {2010}, + volume = {7628}, + series = SPIE, + pages = {76280G-76280G-7}, + doi = {10.1117/12.844690}, + abstract = {Multiple sclerosis (MS) is a demyelinating disease of the central nervous system. The chronic nature of MS necessitates multiple MRI studies to track disease progression. Currently, MRI assessment of multiple sclerosis requires manual lesion measurement and yields an estimate of lesion volume and change that is highly variable and user-dependent. In the setting of a longitudinal study, disease trends and changes become difficult to extrapolate from the lesions. In addition, it is difficult to establish a correlation between these imaged lesions and clinical factors such as treatment course. To address these clinical needs, an MS specific e-Folder for decision support in the evaluation and assessment of MS has been developed. An e-Folder is a disease-centric electronic medical record in contrast to a patient-centric electronic health record. Along with an MS lesion computer aided detection (CAD) package for lesion load, location, and volume, clinical parameters such as patient demographics, disease history, clinical course, and treatment history are incorporated to make the e-Folder comprehensive. With the integration of MRI studies together with related clinical data and informatics tools designed for monitoring multiple sclerosis, it provides a platform to improve the detection of treatment response in patients with MS. The design and deployment of MS e-Folder aims to standardize MS lesion data and disease progression to aid in decision making and MS-related research.}, + file = {Jaco10e.pdf:pdf/Jaco10e.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {3}, + ss_id = {b7652a6a140d0de2324064d21ce7165f8ce44271}, + all_ss_ids = {['b7652a6a140d0de2324064d21ce7165f8ce44271']}, + gscites = {1}, +} + +@inproceedings{Jaco10f, + author = {Colin Jacobs and Kevin Ma and Paymann Moin and Brent Liu}, + title = {An automatic quantification system for {MS} lesions with integrated {DICOM} structured reporting ({DICOM-SR}) for implementation within a clinical environment}, + booktitle = MI, + year = {2010}, + volume = {7628}, + series = SPIE, + pages = {76280K-76280K-8}, + doi = {10.1117/12.844072}, + abstract = {Multiple Sclerosis (MS) is a common neurological disease affecting the central nervous system characterized by pathologic changes including demyelination and axonal injury. MR imaging has become the most important tool to evaluate the disease progression of MS which is characterized by the occurrence of white matter lesions. Currently, radiologists evaluate and assess the multiple sclerosis lesions manually by estimating the lesion volume and amount of lesions. This process is extremely time-consuming and sensitive to intra- and inter-observer variability. Therefore, there is a need for automatic segmentation of the MS lesions followed by lesion quantification. We have developed a fully automatic segmentation algorithm to identify the MS lesions. The segmentation algorithm is accelerated by parallel computing using Graphics Processing Units (GPU) for practical implementation into a clinical environment. Subsequently, characterized quantification of the lesions is performed. The quantification results, which include lesion volume and amount of lesions, are stored in a structured report together with the lesion location in the brain to establish a standardized representation of the disease progression of the patient. The development of this structured report in collaboration with radiologists aims to facilitate outcome analysis and treatment assessment of the disease and will be standardized based on DICOM-SR. The results can be distributed to other DICOM-compliant clinical systems that support DICOM-SR such as PACS. In addition, the implementation of a fully automatic segmentation and quantification system together with a method for storing, distributing, and visualizing key imaging and informatics data in DICOM-SR for MS lesions improves the clinical workflow of radiologists and visualizations of the lesion segmentations and will provide 3-D insight into the distribution of lesions in the brain.}, + file = {Jaco10f.pdf:pdf/Jaco10f.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {3}, + ss_id = {3b553792b01dc3235d0d5d19de5441c2a0570ec5}, + all_ss_ids = {['3b553792b01dc3235d0d5d19de5441c2a0570ec5']}, + gscites = {1}, +} + +@inproceedings{Jaco11, + author = {C. Jacobs and C. I. S\'{a}nchez and S. C. Saur and T. Twellmann and P. A. de Jong and B. van Ginneken}, + title = {Computer-Aided Detection of Ground Glass Nodules in Thoracic {CT} images using Shape, Intensity and Context Features}, + booktitle = MICCAI, + year = {2011}, + volume = {6893}, + series = LNCS, + pages = {207-214}, + doi = {10.1007/978-3-642-23626-6_26}, + abstract = {Ground glass nodules (GGNs) occur less frequent in computed tomography (CT) scans than solid nodules but have a much higher chance of being malignant. Accurate detection of these nodules is therefore highly important. A complete system for computer-aided detection of GGNs is presented consisting of initial segmentation steps, candidate detection, feature extraction and a two-stage classification process. A rich set of intensity, shape and context features is constructed to describe the appearance of GGN candidates. We apply a two-stage classification approach using a linear discriminant classifier and a GentleBoost classifier to efficiently classify candidate regions. The system is trained and independently tested on 140 scans that contained one or more GGNs from around 10,000 scans obtained in a lung cancer screening trial. The system shows a high sensitivity of 73\% at only one false positive per scan.}, + file = {Jaco11.pdf:pdf\\Jaco11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, noduleDetectionCT, subsolidNoduleDetection}, + pmid = {22003701}, + gsid = {8841871000202912452}, + gscites = {40}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/96752}, + ss_id = {2b13cb88382d521bab7fe58fc4220ecec7648815}, + all_ss_ids = {['2b13cb88382d521bab7fe58fc4220ecec7648815']}, +} + +@conference{Jaco11b, + author = {C. Jacobs and E. Th. Scholten and S. C. Saur and T. Twellmann and P. A. de Jong and B. van Ginneken}, + title = {Computer-Aided Detection of Ground Glass Nodules in Thoracic {CT} Images}, + booktitle = RSNA, + year = {2011}, + abstract = {{PURPOSE} : Ground glass nodules (GGNs) occur less frequent in computed tomography (CT) scans than solid nodules but have a much higher chance of being malignant. Therefore, accurate detection of these nodules is highly important. A complete system for computer-aided detection (CAD) of GGNs is presented and evaluated on an independent data set to determine its potential to be implemented in a clinical setting. {METHOD} {AND} {MATERIALS}: Data for this study was collected from 10,000 low dose chest CT scans (16x0.75mm, 120-140 kVp, 30 mAs) from a large lung cancer screening trial. All examinations in which at least one GGN was annotated were collected. This resulted in 140 scans. This set is extended with 60 randomly selected scans in which two radiologists confirmed the absence of GGNs. Training and optimization of the CAD system was performed using 67 scans each containing one or more GGNs, 91 in total. The CAD system was independently tested using FROC analysis on the remaining 133 scans, 73 with and 60 without GGNs, containing 85 GGNs in total. The detection pipeline is initiated with a robust lung and airway segmentation. The CAD system extracts ground glass voxels by applying a double-threshold density mask (-750 to -300 HU) to the lung regions. A morphological opening operation, connected component analysis and a size criterion define GGN candidates. Classification of candidates is accomplished using a two-stage classification process. First, a linear discriminant classifier using 2 shape and 2 intensity features is employed. Next, a GentleBoost classifier is applied to the remaining candidates using a rich set of 161 features that describe the intensity, shape and spatial position of candidates relative to the airways and lung boundary. {RESULTS}: Candidate extraction resulted in 560 ? 306 candidate regions per scan. On average, 32% of the candidate regions remained after the first classification stage. The CAD system achieved a sensitivity of 60% and 74% at 0.25 and 1 false positive detections per scan, respectively. Most false negatives were missed in the candidate detection stage.{CONCLUSION}: Computer-aided detection of ground glass nodules is feasible with high sensitivity at a low false positive rate. {CLINICAL} {RELEVANCE}/{APPLICATION}: Ground glass nodules are easily missed in chest CT scans. Computer-aided detection can reach high sensitivity at low false positive rates and is therefore a potentially useful aid for radiologists.}, + optnote = {DIAG, RADIOLOGY, noduleDetectionCT, subsolidNoduleDetection}, +} + +@inproceedings{Jaco11c, + author = {Colin Jacobs and Keelin Murphy and Thorsten Twellmann and Pim A. de Jong and Bram van Ginneken}, + title = {Computer-Aided Detection of Solid and Ground Glass Nodules in Thoracic {CT} images using two independent {CAD} systems}, + booktitle = {The Fourth International Workshop on Pulmonary Image Analysis}, + year = {2011}, + pages = {177--182}, + abstract = {In computed tomographic lung cancer screening, both solid and ground glass nodules are regularly encountered. Ground glass nodules may or may not have a solid component and it has been shown that they have a much higher chance of being malignant in comparison to solid nodules. Computer-aided detection ({CAD}) systems designed for solid nodules perform poorly on detection of ground glass nodules, and vice versa. Therefore, a {CAD} system that combines the output of two prototype {CAD} systems aimed at detection of ground glass nodules and solid nodules, respectively, could lead to efficient detection of the entire spectrum of lung nodules in chest {CT} scans. In this study, we combine the output of two prototype {CAD} systems and show results on a data set of 73 {CT} examinations containing both solid and ground glass nodules. The main category of nodules which are found by both {CAD} systems are ground glass nodules with a solid component.}, + file = {Jaco11c.pdf:pdf\\Jaco11c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, noduleDetectionCT, subsolidNoduleDetection}, + gsid = {17175959006383118735}, + gscites = {2}, + ss_id = {66625349b11735b78c23c1253d6e4b21add938ac}, + all_ss_ids = {['66625349b11735b78c23c1253d6e4b21add938ac']}, +} + +@conference{Jaco12, + author = {C. Jacobs and E. Th. Scholten and S. C. Saur and T. Twellmann and P. A. de Jong and B. van Ginneken}, + title = {Computer-Aided Detection of Ground Glass Nodules in Lung Cancer Screening: Retrospective Evaluation of Potential Benefit}, + booktitle = ECR, + year = {2012}, + abstract = {PURPOSE: Ground glass nodules ({GGN}s) have a high probability of being malignant. We retrospectively investigated the usefulness of a dedicated computer-aided detection (CAD) system for GGNs in a clinical screening setting. METHODS AND MATERIALS: A set of 620 low-dose chest {CT} scans (16x0.75mm, 120-140 kVp, 30 mAs) were randomly selected from a lung cancer screening trial. All scans were processed with a research prototype {CAD} system for {GGN}s (Diagnostic Image Analysis Group, Nijmegen, The Netherlands, Fraunhofer {MEVIS}, Bremen, Germany). Two chest radiologists inspected all {CAD} marks and classified them as {GGN}, other lesion, or false positive. Findings were compared with the annotations recorded in the screening trial database, which were obtained without the support of {CAD}. RESULTS: {GGN} {CAD} found 386 marks (0.62 per scan). Radiologist 1 classified 22 marks as {GGN}, 222 as other lesions and 142 as false positives. For radiologist 2 these numbers were 39, 107, and 240, respectively. Fifteen findings were considered to be {GGN}s by both radiologists. Of these 15, only 3 had been annotated in the screening trial. {CAD} marks that were classified by the radiologists as other lesions involved mainly scars and areas of inflammation. A common source of {CAD} false positives were hazy opacities caused by cardiac motion. CONCLUSION: At less than 0.7 false positives per scan, the {CAD} system can substantially increase the number of {GGN}s identified in a lung cancer screening trial at a false positive rate that does not impose a burden on the reading process.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Jaco12a, + author = {Jacobs, P. C. and Gondrie, M. J. A. and van der Graaf, Y. and de Koning, H. J. and I{\v{s}}gum, I. and van Ginneken, B. and Mali, W. P. T. M.}, + title = {Coronary Artery Calcium Can Predict All-Cause Mortality and Cardiovascular Events on Low-Dose {CT} Screening for Lung Cancer}, + journal = AJR, + year = {2012}, + volume = {198}, + pages = {505--511}, + doi = {10.2214/AJR.10.5577}, + abstract = {Performing coronary artery calcium (CAC) screening as part of low-dose CT lung cancer screening has been proposed as an efficient strategy to detect people with high cardiovascular risk and improve outcomes of primary prevention. This study aims to investigate whether CAC measured on low-dose CT in a population of former and current heavy smokers is an independent predictor of all-cause mortality and cardiac events.We used a case-cohort study and included 958 subjects 50 years old or older within the screen group of a randomized controlled lung cancer screening trial. We used Cox proportional-hazard models to compute hazard ratios (HRs) adjusted for traditional cardiovascular risk factors to predict all-cause mortality and cardiovascular events.During a median follow-up of 21.5 months, 56 deaths and 127 cardiovascular events occurred. Compared with a CAC score of 0, multivariate-adjusted HRs for all-cause mortality for CAC scores of 1-100, 101-1000, and more than 1000 were 3.00 (95\% CI, 0.61-14.93), 6.13 (95\% CI, 1.35-27.77), and 10.93 (95\% CI, 2.36-50.60), respectively. Multivariate-adjusted HRs for coronary events were 1.38 (95\% CI, 0.39-4.90), 3.04 (95\% CI, 0.95-9.73), and 7.77 (95\% CI, 2.44-24.75), respectively.This study shows that CAC scoring as part of low-dose CT lung cancer screening can be used as an independent predictor of all-cause mortality and cardiovascular events.}, + file = {Jaco12a.pdf:pdf\\Jaco12a.pdf:PDF}, + optnote = {BioMR, DIAG, RADIOLOGY}, + number = {3}, + pmid = {22357989}, + month = {3}, + gsid = {10217888044027227279}, + gscites = {101}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/110713}, + ss_id = {0566f2dbb3c0d1e814b2067f6a14664bdcbb9881}, + all_ss_ids = {['0566f2dbb3c0d1e814b2067f6a14664bdcbb9881']}, +} + +@conference{Jaco12b, + author = {C. Jacobs and E. M. van Rikxoort and T. Twellmann and P.A. de Jong and C.M. Schaefer-Prokop and M. Prokop and B. van Ginneken}, + title = {Improved computer-aided detection of pulmonary nodules by combining a solid and subsolid nodule {CAD} system}, + booktitle = RSNA, + year = {2012}, + abstract = {{PURPOSE} : Most nodule CAD systems perform poorly for detection of subsolid pulmonary nodules since they are primarily developed for detection of solid nodules. To overcome this problem, a combination of CAD systems for solid and subsolid nodule detection is proposed and evaluated on a database containing a large amount of subsolid and solid nodules. {METHOD} {AND} {MATERIALS}: Low-dose chest CT scans (16x0.75mm, 120-140 kVp, 30 mAs) were selected from the database of the Dutch-Belgian NELSON lung cancer screening trial in which either a subsolid nodule or a solid nodule with a diameter between 7mm and 30mm was present. In this way, 109 scans with 33 non-solid, 37 part-solid, and 52 large solid nodules meeting the criteria were included. In total, the scans contained 327 solid, 42 part-solid and 33 non-solid nodules. All chest CT scans were processed using a solid nodule CAD system and subsolid CAD system (Diagnostic Image Analysis Group, Nijmegen, The Netherlands, Fraunhofer MEVIS, Bremen, Germany). Both systems were trained on an independent dataset. FROC analysis was performed to evaluate the performance of the solid nodule CAD system, the subsolid CAD system and the combination of the both CAD systems. The CAD systems were combined based on operating points on the FROC curve. Since the prevalence of subsolid nodules in a screening population is approximately four times lower than of solid nodules, for each operating point of the solid nodule CAD system we selected an operating point of the subsolid CAD system with a four times lower false-positive rate. Subsequently, findings of both CAD systems are merged and this generates the output of the combined CAD. {RESULTS}: FROC analysis showed that at an average of four false positives per scan, the solid nodule CAD system reached a sensitivity of 59% and the subsolid nodule CAD system reached 27%. At this false positive level the combination of the two systems led to a sensitivity of 66%. At an average of 8 false positives per scan, the sensitivities were 69%, 30% and 76%, respectively. {CONCLUSION}: The combination of a solid nodule CAD system and subsolid CAD system substantially increases the sensitivity for detection of pulmonary nodules. {CLINICAL} {RELEVANCE}/{APPLICATION}: Given the high malignancy rates of subsolid nodules, it is important that computer-aided detection systems detect both solid and subsolid nodules.}, + optnote = {DIAG, RADIOLOGY, noduleDetectionCT, subsolidNoduleDetection}, +} + +@conference{Jaco12c, + author = {C. Jacobs and J.M. Kuhnigk and E. M. van Rikxoort and T. Twellmann and P.A. de Jong and H. Gietema and B. Lassen and C.M. Schaefer-Prokop and M. Prokop and B. van Ginneken}, + title = {Optimized workflow for low dose thoracic {CT} lung cancer screening: automated detection, measurement, temporal matching and volumetry and mass analysis, individualized prediction of cancer risk, structured reporting with follow-up recommendation}, + booktitle = RSNA, + year = {2012}, + abstract = {{BACKGROUND} Today, lung cancer is the most common and most deadly cancer in men and women worldwide. The recent positive results of the National Lung Screening Trial (NLST) [1] have provided clear scientific evidence that screening with low dose chest CT reduces lung cancer mortality. The National Comprehensive Cancer Network has updated their recommendations for screening and now strongly recommend the use of low-dose CT screening for individuals at high risk for lung cancer [2]. At least one health insurance company has started to cover the cost of lung screening. In its current form, however, large scale introduction of CT lung screening would put an enormous burden on radiologists. Building upon our clinical and technical experience in reading, image analysis and data processing for large screening trials in Europe (over 30,000 CT scans from 10,000 participants) and a careful review of the existing commercially available lung workstations, we have developed a new dedicated chest reading workstation with a number of innovations that allows for an optimized high throughput workflow to report on low dose chest CT scans. The application is currently available as a research prototype and is in use at five sites. It is used in clinical research and includes automated detection, linking, volumetry, interval change analysis, and estimation of malignancy for each nodule finding. A structured report for each patient is produced with follow-up recommendations according to several guidelines, including the upcoming revised Fleischner Society guidelines for the management of pulmonary nodules. {METHODOLOGY/APPLICATION} The workstation that will be demonstrated includes a number of innovations and enhancements compared to currently commercially available software. - Each scan is preprocessed and scan quality is automatically assessed. Scans with low quality, artifacts or underlying interstitial lung disease are automatically flagged. - Each scan is elastically registered to all prior scans of the same patient. Findings in prior scans are propagated and linked to findings in the current scan. All scans and processing results are preloaded in the background to ensure rapid reading. - Highly effective computerized detection (CAD) of solid nodules [3] and sub-solid nodules [4] is integrated. - High throughput reading with CAD as a first reader is supported. Users can accept/reject at a setting of on average 10 to 15 candidate lesions per scan and thus report much quicker than traditional thin sliding MIP viewing of the entire volume (also supported). - Each nodule is automatically characterized as solid, part-solid, or non-solid and nodules with benign characteristics are automatically flagged. Detected benign characteristics include calcifications and peri-fissural opacities (lymph nodes). - Volumetry, volume growth rate, mass and mass growth rate are automatically computed with advanced segmentation algorithms [5] that have been extended to handle sub-solid lesions and segment the solid core of part-solid nodules. If necessary, the user can interactively adjust segmentations and compare side by side with the corresponding finding in all available prior scans to detect and correct segmentation inconsistencies. - Findings are summarized in a structured report in HTML and PDF format that is stored in the database and can be sent to requesting physicians. Follow-up recommendation according to various screening algorithms and guidelines of leading Societies are included. {DEMONSTRATION STRATEGY} The exhibit will be accompanied by an informational poster that will highlight the key differences between the proposed workflow and current clinical practice. The poster will also explain algorithmic concepts that underlie the automated analysis. Attendees will be able to gain hands-on experience with the workstation and read cases and use nodule detection, automated and interactive volumetry, see the results of classification of different nodule types and produce structured reports with follow-up recommendations, all within + minutes.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Jaco13, + author = {Colin Jacobs and Eva M. van Rikxoort and Jan-Martin Kuhnigk and Ernst Th Scholten and Pim A. de Jong and Cornelia Schaefer-Prokop and Mathias Prokop and Bram van Ginneken}, + title = {Automated characterization of pulmonary nodules in thoracic {CT} images using a segmentation-based classification system}, + booktitle = ECR, + year = {2013}, + abstract = {PURPOSE: Clinical guidelines for follow-up of pulmonary nodules depend on nodule type and therefore accurate characterization of nodules is important. A novel computer-aided diagnosis (CAD) system to distinguish solid, part-solid and non-solid nodules is presented and evaluated on a large data set from a lung cancer screening trial. METHOD AND MATERIALS: The automated characterization system is based on a previously published nodule segmentation algorithm. Four different parameter settings were used to extract the solid part, non-solid part and solid core of the lesion. For each segmentation, volume, mass, average density, 5th percentile and 95th percentile of densities inside the segmentation were used as features. A k-nearest-neighbor classifier was used to classify nodules. The accuracy of the system to differentiate between solid and subsolid nodules, between solid, part-solid and non-solid nodules and between part-solid and non-solid nodules was evaluated. A data set consisting of 137 low-dose chest CT scans (16x0.75mm, 120-140 kVp, 30 mAs) with 52 solid, 50 part-solid and 50 non-solid nodules was collected from a screening trial. The nodule type recorded in the screening database was used as the reference standard. Experiments were performed in leave-one-nodule-out cross-validation. RESULTS: The accuracy of CAD to differentiate between solid and subsolid nodules was 0.88. Differentiation into solid, part-solid and non-solid nodules gave an accuracy of 0.72. CAD had an accuracy of 0.71 in differentiating part-solid from non-solid nodules. CONCLUSION: Automated characterization of pulmonary nodules shows good performance. This can aid radiologists to decide on appropriate workup in clinical practice.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Jaco13a, + author = {Colin Jacobs and Eva M. van Rikxoort and Jan-Martin Kuhnigk and Ernst Th Scholten and Pim A. de Jong and Cornelia Schaefer-Prokop and Mathias Prokop and Bram van Ginneken}, + title = {Non-solid, part-solid or solid? Classification of pulmonary nodules in thoracic {CT} by radiologists and a computer-aided diagnosis system}, + booktitle = ECR, + year = {2013}, + abstract = {PURPOSE: Classifying pulmonary nodules into solid, part-solid and non-solid is crucial for patient management. A computer algorithm is compared to a radiologist on a large data set obtained from a multi-center lung cancer screening trial. METHOD AND MATERIALS: Low-dose chest CT scans (16x0.75mm, 120-140 kVp, 30 mAs) with part-solid, non-solid, and solid nodules with a diameter between 7 and 30 mm, were randomly selected from two sites participating in the Dutch-Belgian NELSON lung cancer screening trial. The set contained 137 scans, including 50 part-solid, 50 non-solid and 52 solid nodules. The nodule type recorded in the screening database was used as a reference standard. An automated classification system for characterization of nodules was designed using morphometric features. The accuracy of the computer algorithm was evaluated in three ways: classifying nodules (1) as solid or subsolid, (2) as solid, part-solid or non-solid, and, (3) for the subsolid lesions only, as part-solid or non-solid. An experienced thoracic radiologist independently performed the same classification. RESULTS: The accuracy of the automated system to differentiate between solid and subsolid nodules was 0.88, compared to 0.95 for the radiologist. The computer classified the nodules as solid, part-solid or non-solid with an accuracy of 0.72, versus 0.80 for the radiologist. The software reached an accuracy of 0.71 in differentiating part-solid from non-solid nodules, where the radiologist had an accuracy of 0.77. CONCLUSION: A novel automated characterization tool for pulmonary nodules shows promising performance and could aid radiologists in selecting the appropriate workup for pulmonary nodules.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Jaco13b, + author = {Colin Jacobs and Bram van Ginneken and Stephan Fromme and Mathias Prokop and Eva M van Rikxoort}, + title = {Benchmarking computer-aided detection of pulmonary nodules on the recently completed publicly available LIDC/IDRI database}, + booktitle = RSNA, + year = {2013}, + abstract = {PURPOSE The recently completed {LIDC/IDRI} database provides by far the largest public resource to assess the performance of algorithms for the detection of pulmonary nodules in thoracic {CT} scans. We report the performance of two detection systems, and address the issue of completeness of the reference standard. METHOD AND MATERIALS The {LIDC/IDRI} database contains 890 thoracic {CT} scans with section thickness of 2.5mm or lower, one per patient, from 7 centers acquired with 17 different scanner models from 4 manufacturers. Cases have been annotated in an extensive reading process comprising a blinded and an unblinded review by four radiologists who indicated all nodules <3mm and >3mm effective diameter. We define nodules >3mm indicated by all four observers as positive findings. We applied two pulmonary nodule detection systems: {H}erakles, an industry research prototype (MeVis Medical Solutions, Bremen, Germany) and {ISICAD} (Image Sciences Institute, Utrecht, The Netherlands), a system trained with data from the Dutch-Belgian {NELSON} lung cancer screening trial. We report sensitivity at 1, 2, and 4 false positive ({FP}) detections per scan and analyze the {FP}s. RESULTS The 890 scans contained 775 positive findings. At 1, 2, and 4 FP/scan, Herakles had a sensitivity of 69%, 75%, and 79%, respectively. For ISICAD this was 51%, 63%, 72%. We analyzed the {FP}s of {H}erakles at an operating point of 2 {FP}/scan. Of these, 31% were annotated by at least one radiologist as a nodule >3mm. An additional 17% were indicated by at least one radiologist as a nodule <3mm. A human expert visually inspected the remaining {FP}s using multiple slices of all three orthogonal views. A substantial part of these marks (41%) were located on nodular lesions that had not been indicated by any of the four radiologists involved in the annotation of the {LIDC/IDRI} data set . CONCLUSION The {LIDC/IDRI} data set is an excellent benchmarking tool for nodule detection algorithms. Automated detection can identify pulmonary nodules that have not been annotated in an extensive reading process with blinded and unblinded review by four human observers. CLINICAL RELEVANCE/APPLICATION Algorithms for automatic detection of pulmonary nodules can be compared and improved through the availability of a common database for benchmarking.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Jaco14, + author = {C. Jacobs and E.~M. van Rikxoort and T. Twellmann and E.~Th. Scholten and P.~A. de Jong and J.~M. Kuhnigk and M. Oudkerk and H.~J. de Koning and M. Prokop and C. Schaefer-Prokop and B. van Ginneken}, + title = {Automatic Detection of Subsolid Pulmonary Nodules in Thoracic Computed Tomography Images}, + journal = MIA, + year = {2014}, + volume = {18}, + pages = {374--384}, + doi = {10.1016/j.media.2013.12.001}, + abstract = {Subsolid pulmonary nodules occur less often than solid pulmonary nodules, but show a much higher malignancy rate. Therefore, accurate detection of this type of pulmonary nodules is crucial. In this work, a computer-aided detection (CAD) system for subsolid nodules in computed tomography images is presented and evaluated on a large data set from a multi-center lung cancer screening trial. The paper describes the different components of the CAD system and presents experiments to optimize the performance of the proposed CAD system. A rich set of 128 features is defined for subsolid nodule candidates. In addition to previously used intensity, shape and texture features, a novel set of context features is introduced. Experiments show that these features significantly improve the classification performance. Optimization and training of the CAD system is performed on a large training set from one site of a lung cancer screening trial. Performance analysis on an independent test from another site of the trial shows that the proposed system reaches a sensitivity of 80% at an average of only 1.0 false positive detections per scan. A retrospective analysis of the output of the CAD system by an experienced thoracic radiologist shows that the CAD system is able to find subsolid nodules which were not contained in the screening database.}, + file = {Jaco14.pdf:pdf\\Jaco14.pdf:PDF}, + optnote = {DIAG, NoduleDetectionCT, RADIOLOGY}, + pmid = {24434166}, + month = {2}, + gsid = {13872923651436646410}, + gscites = {233}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/136826}, + ss_id = {704081db7542b73bbdcefe9ae7e7dc19fe4316c0}, + all_ss_ids = {['704081db7542b73bbdcefe9ae7e7dc19fe4316c0']}, +} + +@inproceedings{Jaco14a, + author = {C. Jacobs and S. H. T. Opdam and E. M. van Rikxoort and O. M. Mets and J. Rooyackers and P. A. de Jong and M. Prokop and B. van Ginneken}, + title = {Automated detection and quantification of micronodules in thoracic {CT} scans to identify subjects at risk for silicosis}, + booktitle = MI, + year = {2014}, + volume = {9035}, + series = SPIE, + pages = {90351I}, + doi = {10.1117/12.2043536}, + abstract = {Silica dust-exposed individuals are at high risk of developing silicosis, a fatal and incurable lung disease. The presence of disseminated micronodules on thoracic CT is the radiological hallmark of silicosis but locating micronodules, to identify subjects at risk, is tedious for human observers. We present a computer-aided detection scheme to automatically find micronodules and quantify micronodule load. The system used lung segmentation, template matching, and feature analysis. The system achieved a promising sensitivity of 84% at an average of 8.4 false positive marks per scan. In an independent data set of 54 CT scans in which we defined four risk categories, the CAD system automatically classified 83% of subjects correctly, and obtained a weighted kappa of 0.76.}, + file = {Jaco14a.PDF:pdf\\Jaco14a.PDF:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {2867289361417330505}, + gscites = {2}, + ss_id = {93b9abde7345b389113dd9656f6d7b9a43f261bb}, + all_ss_ids = {['93b9abde7345b389113dd9656f6d7b9a43f261bb']}, +} + +@article{Jaco15, + author = {Jacobs, Colin and van Rikxoort, Eva M. and Scholten, Ernst Th and de Jong, Pim A. and Prokop, Mathias and Schaefer-Prokop, Cornelia and van Ginneken, Bram}, + title = {Solid, Part-Solid, or Non-solid?: Classification of Pulmonary Nodules in Low-Dose Chest Computed Tomography by a Computer-Aided Diagnosis System}, + journal = IR, + year = {2015}, + volume = {50}, + pages = {168-173}, + doi = {10.1097/RLI.0000000000000121}, + abstract = {The purpose of this study was to develop and validate a computer-aided diagnosis (CAD) tool for automatic classification of pulmonary nodules seen on low-dose computed tomography into solid, part-solid, and non-solid. Study lesions were randomly selected from 2 sites participating in the Dutch-Belgian NELSON lung cancer screening trial. On the basis of the annotations made by the screening radiologists, 50 part-solid and 50 non-solid pulmonary nodules with a diameter between 5 and 30 mm were randomly selected from the 2 sites. For each unique nodule, 1 low-dose chest computed tomographic scan was randomly selected, in which the nodule was visible. In addition, 50 solid nodules in the same size range were randomly selected. A completely automatic 3-dimensional segmentation-based classification system was developed, which analyzes the pulmonary nodule, extracting intensity-, texture-, and segmentation-based features to perform a statistical classification. In addition to the nodule classification by the screening radiologists, an independent rating of all nodules by 3 experienced thoracic radiologists was performed. Performance of CAD was evaluated by comparing the agreement between CAD and human experts and among human experts using the Cohen AZAo statistics.Pairwise agreement for the differentiation between solid, part-solid, and non-solid nodules between CAD and each of the human experts had a AZAo range between 0.54 and 0.72. The interobserver agreement among the human experts was in the same range (AZAo range, 0.56-0.81). A novel automated classification tool for pulmonary nodules achieved good agreement with the human experts, yielding AZAo values in the same range as the interobserver agreement. Computer-aided diagnosis may aid radiologists in selecting the appropriate workup for pulmonary nodules.}, + file = {Jaco15.pdf:pdf\\Jaco15.pdf:PDF}, + optnote = {DIAG, NoduleDetectionCT, RADIOLOGY}, + number = {3}, + pmid = {25478740}, + month = {3}, + gsid = {17914761474692895903}, + gscites = {46}, + ss_id = {1727e0a37cb9a85f73d0ca4f64616e9ab5044c99}, + all_ss_ids = {['1727e0a37cb9a85f73d0ca4f64616e9ab5044c99']}, +} + +@article{Jaco15a, + author = {Jacobs, Colin and {van Rikxoort}, Eva M. and Murphy, Keelin and Prokop, Mathias and Schaefer-Prokop, Cornelia M. and {van Ginneken}, Bram}, + title = {Computer-aided detection of pulmonary nodules: a comparative study using the public {LIDC/IDRI} database}, + journal = ER, + year = {2016}, + volume = {26}, + pages = {2139--2147}, + doi = {10.1007/s00330-015-4030-7}, + url = {http://dx.doi.org/10.1007/s00330-015-4030-7}, + abstract = {To benchmark the performance of state-of-the-art computer-aided detection (CAD) of pulmonary nodules using the largest publicly available annotated CT database (LIDC/IDRI), and to show that CAD finds lesions not identified by the LIDC's four-fold double reading process.The LIDC/IDRI database contains 888 thoracic CT scans with a section thickness of 2.5A,A mm or lower. We report performance of two commercial and one academic CAD system. The influence of presence of contrast, section thickness, and reconstruction kernel on CAD performance was assessed. Four radiologists independently analyzed the false positive CAD marks of the best CAD system.The updated commercial CAD system showed the best performance with a sensitivity of 82A,A \% at an average of 3.1 false positive detections per scan. Forty-five false positive CAD marks were scored as nodules by all four radiologists in our study.On the largest publicly available reference database for lung nodule detection in chest CT, the updated commercial CAD system locates the vast majority of pulmonary nodules at a low false positive rate. Potential for CAD is substantiated by the fact that it identifies pulmonary nodules that were not marked during the extensive four-fold LIDC annotation process.}, + file = {Jaco15a.pdf:pdf\\Jaco15a.pdf:PDF}, + optnote = {DIAG, NoduleDetectionCT, RADIOLOGY}, + pmid = {26443601}, + month = {10}, + gsid = {16131269046526513617}, + gscites = {101}, + ss_id = {67ae0419e41a69c4796b9e18bf6f59814784a933}, + all_ss_ids = {['67ae0419e41a69c4796b9e18bf6f59814784a933']}, +} + +@phdthesis{Jaco15b, + author = {Colin Jacobs}, + title = {Automatic detection and characterization of pulmonary nodules in thoracic {CT} images}, + year = {2015}, + url = {http://repository.ubn.ru.nl/handle/2066/145307}, + abstract = {Automatic analysis of thoracic CT scans has the potential to improve detection rate of pulmonary nodules, reduce interobserver variability and speed up evaluation of screening CT scans. This however strongly depends on the performance of CAD systems. If the performance is good enough, it will probably positively influence the cost-effectiveness of lung cancer CT screening. This thesis presents novel algorithms to find abnormalities in thoracic CT images, which may be of benefit in the interpretation of chest CTs in lung cancer screening or clinical practice in general. The main focus of the thesis is automatic detection of pulmonary nodules in thoracic CT. The outline of this thesis is as follows. Chapter 2 describes a novel computer-aided detection system for subsolid pulmonary nodules. Detection of subsolid nodules has increased due to the use of thin-slice CT and the implementation of lung cancer screening trials and as a consequence, their prevalence and malignancy rate are better understood. Subsolid nodules are less common, but show a higher malignancy rate than solid nodules. We present a novel subsolid CAD system which is trained and validated with data from two sites from the NELSON lung cancer screening trial. In Chapter 3, a nodule detection system is developed for the detection of micronodules in subjects at high-risk for developing silicosis. Chronic silicosis is radiologically characterized by widespread, well-defined solid pulmonary micronodules, measuring 3 mm or less. Early detection is crucial to stop progression but detection and quantification of these small nodules is tedious for human observers. We present an automatic method which finds micronodules and quantifies the micronodule load. Chapter 4 describes a study in which we perform a comparative study of state-of-the-art nodule CAD systems on the largest publicly available reference data set, containing more than 1,000 CT scans. We perform an extensive analysis of the performance of three state-of-the-art CAD systems; two commercial and one academic CAD system. In Chapter 5, an automatic classification system for pulmonary nodules detected on CT is presented. Every nodule is classified as either solid, part-solid, or non-solid. This classification is crucial for management of pulmonary nodules. To put the performance of CAD into context, we also assess the interobserver variability among radiologists in this chapter. In Chapter 6, a system for automatic detection of interval change on consecutive CT scans is proposed. Interval change is of major importance when subjects are screened repeatedly. We propose an automatic detection system based on subtraction images which are acquired by employing an elastic registration method between consecutive CT images. Chapter 7 discusses how the presented algorithms could be efficiently integrated into software that could be used in clinical routine or in CT lung screening programs.}, + copromotor = {E. M. van Rikxoort}, + file = {Jaco15b.pdf:pdf/Jaco15b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken and C. M. Schaefer-Prokop}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, + all_ss_ids = {1dddfd64c4a40269d63014b21ed3ed436f38b98b}, + gscites = {1}, +} + +@article{Jaco19, + author = {Jacobs, Colin and van Ginneken, Bram}, + title = {Google's lung cancer AI: a promising tool that needs further validation}, + journal = _Nature_Reviews_Clinical_Oncology_, + year = {2019}, + volume = {16}, + issue = {9}, + pages = {532-533}, + doi = {10.1038/s41571-019-0248-7}, + url = {https://doi.org/10.1038/s41571-019-0248-7}, + abstract = {Researchers from Google AI have presented results obtained using a deep learning model for the detection of lung cancer in screening CT images. The authors report a level of performance similar to, or better than, that of radiologists. However, these claims are currently too strong. The model is promising but needs further validation and could only be implemented if screening guidelines were adjusted to accept recommendations from black-box proprietary AI systems.}, + file = {:pdf/Jaco19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31249401}, + month = {6}, + gsid = {317052302478593376}, + gscites = {26}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/209067}, + ss_id = {ec14da74fcbd65e013855cea561c2142c68f72fb}, + all_ss_ids = {['ec14da74fcbd65e013855cea561c2142c68f72fb']}, +} + +@conference{Jaco19a, + author = {Jacobs, Colin and Scholten, Ernst and Schreuder, Anton and Prokop, Mathias and van Ginneken, Bram}, + title = {An observer study comparing radiologists with the prize-winning lung cancer detection algorithms from the 2017 Kaggle Data Science Bowl}, + booktitle = RSNA, + year = {2019}, + abstract = {PURPOSE: The 2017 Kaggle Data Science Bowl challenge awarded 1 million dollars in prize money to develop computer algorithms for predicting, on the basis of a single low-dose screening CT scan, which individuals will be diagnosed with lung cancer within one year of the scan. Participating teams received a training set of around 1500 low-dose CT scans to develop and train their algorithms and final performance was measured on a test set of 500 scans, containing 151 lung cancer cases. Over 2000 teams submitted results. The best 10 algorithms all used deep learning and are freely available as open source code. To gain insight into how the performance of these algorithms compares to radiologists, we conducted an observer study including 11 readers who read 150 cases from the test set. METHOD AND MATERIALS: We randomly extracted 100 benign cases and 50 lung cancer cases from the test set of the challenge. Each algorithm scored each test case with a score between 0 (low) and 1 (high) for harboring a malignancy. We developed a web-accessible workstation in which human experts could review chest CT scans. The web workstation included the common tools found in a professional medical viewing workstation. We invited 11 readers, a mix of radiologists and radiology residents, to read these 150 CT cases and assign a score between 0 (low) and 100 (high) whether the patient will develop a lung cancer within one year of the presented scan. ROC analysis was used to compare the performance of the human readers with the algorithms. The primary outcome was area under the ROC curve. 95% confidence intervals were computed by 1000 bootstrap iterations and are reported between brackets. RESULTS: The mean area under the ROC curve for the human readers was 0.90 [0.85-0.94]. The mean area under the ROC curve for the algorithms was 0.86 [0.81-0.91]. The mean human reading time per case varied between 96 and 275 seconds. CONCLUSION: The top 10 algorithms from the Kaggle Data Science Bowl 2017 showed promising performance, but were still inferior to human readers. Future analysis will focus on understanding the strengths and weaknesses of the computer algorithms and the human readers and how these can be optimally combined. CLINICAL RELEVANCE/APPLICATION: Fully automatic algorithms using deep learning developed in a large-scale challenge show promising performance for lung cancer detection in chest CT, but performed inferior to radiologists in this subset of the test set.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Jaco19b, + author = {Jacobs, Colin and van Ginneken, Bram}, + title = {Deep learning for detection and characterization of lung nodules}, + booktitle = ESTI, + year = {2019}, + abstract = {BODY: The success of deep learning, the implementation of lung cancer screening, and recent public challenges have created a revived interest for research of automatic nodule detection and characterization algorithms. In this talk, I will give an overview of deep learning for nodule detection and characterization. Public challenges, such as the LUNA16 challenge and the Kaggle Data Science Bowl 2017 challenge, and publicly available databases have been drivers for development of novel deep learning algorithms and the performance of these novel algorithms is very promising. However, there are remaining challenges that need to be solved. Almost all approaches still rely on only one scan, while a radiologist typically looks at all available scans and the clinical history of the patient. Next to this, integration of these algorithms into clinical practice is still not easy, and validation of algorithms on larger datasets and real-world data are needed to further validate these algorithms. TAKE HOME POINTS: 1) LUNA16 and Kaggle Data Science Bowl 2017 have shown the potential for deep learning for nodule detection and deep learning. 2) Integration of these algorithms into clinical practice is still suboptimal. 3) Validation of these algorithms on larger and modern datasets is needed to further validate these algorithms.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Jaco21, + author = {Jacobs, Colin and Schreuder, Anton and van Riel, Sarah J. and Scholten, Ernst Th. and Wittenberg, Rianne and Winkler Wille, Mathilde M. and de Hoop, Bartjan and Sprengers, Ralf and Mets, Onno M. and Geurts, Bram and Prokop, Mathias and Schaefer-Prokop, Cornelia and van Ginneken, Bram}, + title = {Assisted versus Manual Interpretation of Low-Dose {CT} Scans for Lung Cancer Screening: Impact on {Lung-RADS} Agreement}, + journal = _Radiology_Imaging_Cancer_, + year = {2021}, + volume = {3}, + number = {5}, + pages = {e200160}, + doi = {10.1148/rycan.2021200160}, + file = {Jaco21.pdf:pdf\\Jaco21.pdf:PDF}, + abstract = {Purpose To compare the inter- and intraobserver agreement and reading times achieved when assigning Lung Imaging Reporting and Data System (Lung-RADS) categories to baseline and follow-up lung cancer screening studies by using a dedicated CT lung screening viewer with integrated nodule detection and volumetric support with those achieved by using a standard picture archiving and communication system (PACS)-like viewer. Materials and Methods Data were obtained from the National Lung Screening Trial (NLST). By using data recorded by NLST radiologists, scans were assigned to Lung-RADS categories. For each Lung-RADS category (1 or 2, 3, 4A, and 4B), 40 CT scans (20 baseline scans and 20 follow-up scans) were randomly selected for 160 participants (median age, 61 years; interquartile range, 58-66 years; 61 women) in total. Seven blinded observers independently read all CT scans twice in a randomized order with a 2-week washout period: once by using the standard PACS-like viewer and once by using the dedicated viewer. Observers were asked to assign a Lung-RADS category to each scan and indicate the risk-dominant nodule. Inter- and intraobserver agreement was analyzed by using Fleiss k values and Cohen weighted k values, respectively. Reading times were compared by using a Wilcoxon signed rank test. Results The interobserver agreement was moderate for the standard viewer and substantial for the dedicated viewer, with Fleiss k values of 0.58 (95\% CI: 0.55, 0.60) and 0.66 (95\% CI: 0.64, 0.68), respectively. The intraobserver agreement was substantial, with a mean Cohen weighted k value of 0.67. The median reading time was significantly reduced from 160 seconds with the standard viewer to 86 seconds with the dedicated viewer (P < .001). Conclusion Lung-RADS interobserver agreement increased from moderate to substantial when using the dedicated CT lung screening viewer. The median reading time was substantially reduced when scans were read by using the dedicated CT lung screening viewer.}, + optnote = {DIAG, RADIOLOGY}, + pmid = {34559005}, + ss_id = {35af94bb3f5654ef60446d79e88621043fb7b543}, + all_ss_ids = {['35af94bb3f5654ef60446d79e88621043fb7b543']}, + gscites = {7}, +} + +@article{Jaco21a, + author = {Jacobs, Colin and Setio, Arnaud A.A. and Scholten, Ernst T. and Gerke, Paul K. and Bhattacharya, Haimasree and M. Hoesein, Firdaus A. and Brink, Monique and Ranschaert, Erik and de Jong, Pim A. and Silva, Mario and Geurts, Bram and Chung, Kaman and Schalekamp, Steven and Meersschaert, Joke and Devaraj, Anand and Pinsky, Paul F. and Lam, Stephen C. and van Ginneken, Bram and Farahani, Keyvan}, + title = {Deep Learning for Lung Cancer Detection in Screening CT Scans: Results of a Large-Scale Public Competition and an Observer Study with 11 Radiologists}, + journal = RAI, + year = {2021}, + volume = {3}, + number = {6}, + pages = {e210027}, + doi = {10.1148/ryai.2021210027}, + file = {Jaco21a.pdf:pdf\\Jaco21a.pdf:PDF}, + abstract = {Purpose To determine whether deep learning algorithms developed in a public competition could identify lung cancer on low-dose CT scans with a performance similar to radiologists. Materials and Methods In this retrospective study, a dataset consisting of 300 patient scans was used for model assessment; 150 patient scans were from the competition set and 150 were from an independent dataset. Both test datasets contained 50 patient scans with cancer and 100 without cancer. The reference standard was set by histopathological examination for cancer positive scans and imaging follow-up for at least 2 years for cancer negative scans. The test datasets were applied to the top three performing algorithms from the Data Science Bowl 2017 public competition (called grt123, Julian de Wit \& Daniel Hammack [JWDH] and Aidence). Model outputs were compared with an observer study of 11 radiologists that assessed the same test datasets. Each scan was scored on a continuous scale by both the deep learning algorithms and the radiologists. Performance was measured using multireader multicase receiver operating characteristic analysis. Results The area under the receiver operating characteristic curve (AUC) was 0.877 (95\% CI: 0.842, 0.910) for grt123, 0.902 (95\% CI: 0.871, 0.932) for JWDH, and 0.900 (95\% CI: 0.870, 0.928) for Aidence. The average AUC of the radiologists was 0.917 (95\% CI: 0.889, 0.945), which was significantly higher than grt123 (P = .02); however, no significant difference between the radiologists and JWDH (P = .29) or Aidence (P = .26) was found. Conclusion Deep learning algorithms developed in a public competition reached performance close to radiologists.}, + optnote = {DIAG, RADIOLOGY}, + pmid = {34870218}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/245169}, + ss_id = {e91661654480905a880033a9f2266343736ea7bd}, + all_ss_ids = {['e91661654480905a880033a9f2266343736ea7bd']}, + gscites = {23}, +} + +@article{Jaco23, + author = {Jacobs, Colin}, + title = {Challenges and outlook in the management of pulmonary nodules detected on CT}, + journal = ER, + year = {2023}, + doi = {10.1007/s00330-023-10065-9}, + file = {Jaco23.pdf:pdf\\Jaco23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {37540316}, + ss_id = {574577389a89314f2159ca9580264d0c7544257c}, + all_ss_ids = {['574577389a89314f2159ca9580264d0c7544257c']}, + gscites = {0}, +} + +@article{Jara15, + author = {Jaramillo, Joshua D. and Wilson, Carla and Stinson, Douglas S. and Stinson, Douglas J. and Lynch, David A. and Bowler, Russell P. and Lutz, Sharon and Bon, Jessica M. and Arnold, Ben and McDonald, Merry-Lynn N. and Washko, George R. and Wan, Emily S. and DeMeo, Dawn L. and Foreman, Marilyn G. and Soler, Xavier and Lindsay, Sarah E. and Lane, Nancy E. and Genant, Harry K. and Silverman, Edwin K. and Hokanson, John E. and Make, Barry J. and Crapo, James D. and Regan, Elizabeth A. and {COPD Gene Investigators}}, + title = {Reduced Bone Density and Vertebral Fractures in Smokers. Men and COPD Patients at Increased Risk}, + journal = AATC, + year = {2015}, + volume = {12}, + number = {5}, + month = {5}, + pages = {648--656}, + doi = {10.1513/AnnalsATS.201412-591OC}, + url = {http://dx.doi.org/10.1513/AnnalsATS.201412-591OC}, + abstract = {Former smoking history and chronic obstructive pulmonary disease (COPD) are potential risk factors for osteoporosis and fractures. Under existing guidelines for osteoporosis screening, women are included but men are not, and only current smoking is considered.To demonstrate the impact of COPD and smoking history on the risk of osteoporosis and vertebral fracture in men and women.Characteristics of participants with low volumetric bone mineral density (vBMD) were identified and related to COPD and other risk factors. We tested associations of sex and COPD with both vBMD and fractures adjusting for age, race, body mass index (BMI), smoking, and glucocorticoid use.vBMD by calibrated quantitative computed tomography (QCT), visually scored vertebral fractures, and severity of lung disease were determined from chest CT scans of 3,321 current and ex-smokers in the COPDGene study. Low vBMD as a surrogate for osteoporosis was calculated from young adult normal values. Male smokers had a small but significantly greater risk of low vBMD (2.5 SD below young adult mean by calibrated QCT) and more fractures than female smokers. Low vBMD was present in 58\% of all subjects, was more frequent in those with worse COPD, and rose to 84\% among subjects with very severe COPD. Vertebral fractures were present in 37\% of all subjects and were associated with lower vBMD at each Global Initiative for Chronic Obstructive Lung Disease stage of severity. Vertebral fractures were most common in the midthoracic region. COPD and especially emphysema were associated with both low vBMD and vertebral fractures after adjustment for steroid use, age, pack-years of smoking, current smoking, and exacerbations. Airway disease was associated with higher bone density after adjustment for other variables. Calibrated QCT identified more subjects with abnormal values than the standard dual-energy X-ray absorptiometry in a subset of subjects and correlated well with prevalent fractures.Male smokers, with or without COPD, have a significant risk of low vBMD and vertebral fractures. COPD was associated with low vBMD after adjusting for race, sex, BMI, smoking, steroid use, exacerbations, and age. Screening for low vBMD by using QCT in men and women who are smokers will increase opportunities to identify and treat osteoporosis in this at-risk population.}, + file = {Jara15.pdf:pdf\\Jara15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {25719895}, +} + +@article{Jark22, + author = {Jarkman, Sofia and Karlberg, Micael and Poceviciute, Milda and Boden, Anna and Bandi, Peter and Litjens, Geert and Lundstrom, Claes and Treanor, Darren and van der Laak, Jeroen}, + year = {2022}, + month = {11}, + journal = CANCERS, + title = {Generalization of Deep Learning in Digital Pathology: Experience in Breast Cancer Metastasis Detection.}, + doi = {10.3390/cancers14215424}, + issue = {21}, + volume = {14}, + abstract = {Poor generalizability is a major barrier to clinical implementation of artificial intelligence in digital pathology. The aim of this study was to test the generalizability of a pretrained deep learning model to a new diagnostic setting and to a small change in surgical indication. A deep learning model for breast cancer metastases detection in sentinel lymph nodes, trained on CAMELYON multicenter data, was used as a base model, and achieved an AUC of 0.969 (95% CI 0.926-0.998) and FROC of 0.838 (95% CI 0.757-0.913) on CAMELYON16 test data. On local sentinel node data, the base model performance dropped to AUC 0.929 (95% CI 0.800-0.998) and FROC 0.744 (95% CI 0.566-0.912). On data with a change in surgical indication (axillary dissections) the base model performance indicated an even larger drop with a FROC of 0.503 (95%CI 0.201-0.911). The model was retrained with addition of local data, resulting in about a 4% increase for both AUC and FROC for sentinel nodes, and an increase of 11% in AUC and 49% in FROC for axillary nodes. Pathologist qualitative evaluation of the retrained model's output showed no missed positive slides. False positives, false negatives and one previously undetected micro-metastasis were observed. The study highlights the generalization challenge even when using a multicenter trained model, and that a small change in indication can considerably impact the model's performance.}, + file = {:pdf/Jark22.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + pmid = {36358842}, + ss_id = {c90825e6da28cd71598d38dacf09cd1023ad1a74}, + all_ss_ids = {['c90825e6da28cd71598d38dacf09cd1023ad1a74']}, + gscites = {2}, +} + +@conference{Jiao21, + author = {Yiping Jiao and Mart Rijthoven and Junhong Li and Katrien Grunberg and Shumin Fei and Francesco Ciompi}, + booktitle = {European Congress on Digital Pathology (ECDP)}, + year = {2021}, + title = {Automatic Lung Cancer Segmentation in Histopathology Whole-Slide Images with Deep Learning}, +} + +@article{Johk21, + author = {Johkoh, Takeshi and Lee, Kyung Soo and Nishino, Mizuki and Travis, William D. and Ryu, Jay H. and Lee, Ho Yun and Ryerson, Christopher J. and Franquet, Tom\'{a}s and Bankier, Alexander A. and Brown, Kevin K. and Goo, Jin Mo and Kauczor, Hans-Ulrich and Lynch, David A. and Nicholson, Andrew G. and Richeldi, Luca and Schaefer-Prokop, Cornelia M. and Verschakelen, Johny and Raoof, Suhail and Rubin, Geoffrey D. and Powell, Charles and Inoue, Yoshikazu and Hatabu, Hiroto}, + title = {Chest CT Diagnosis and Clinical Management of Drug-Related Pneumonitis in Patients Receiving Molecular Targeting Agents and Immune Checkpoint Inhibitors}, + doi = {10.1016/j.chest.2020.11.027}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.chest.2020.11.027}, + file = {Johk21.pdf:pdf\Johk21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Chest}, + citation-count = {45}, + automatic = {yes}, + pages = {1107-1125}, + volume = {159}, +} + +@article{Jong10, + author = {P. A. de Jong and J. A. Achterberg and O. A. M. Kessels and B. van Ginneken and L. Hogeweg and F. J. Beek and S. W. J. Terheggen-Lagro}, + title = {Modified {C}hrispin-{N}orman chest radiography score for cystic fibrosis: observer agreement and correlation with lung function}, + journal = ER, + year = {2011}, + volume = {21}, + pages = {722--729}, + doi = {10.1007/s00330-010-1972-7}, + abstract = {OBJECTIVE: To test observer agreement and two strategies for possible improvement (consensus meeting and reference images) for the modified Chrispin-Norman score for children with cystic fibrosis (CF). METHODS: Before and after a consensus meeting and after developing reference images three observers scored sets of 25 chest radiographs from children with CF. Observer agreement was tested for line, ring, mottled and large soft shadows, for overinflation and for the composite modified Chrispin-Norman score. Correlation with lung function was assessed. RESULTS: Before the consensus meeting agreement between observers 1 and 2 was moderate-good, but with observer 3 agreement was poor-fair. Scores correlated significantly with spirometry for observers 1 and 2 (-0.72$350 ml of target lobe volume reduction after EBV treatment. Using a receiver operating characteristic curve, optimal thresholds predictive of complete fissures (responders) and incomplete fissures (non-responders) were determined. A subgroup of patients with partially complete fissures was identified, where software had lower accuracy. The complementary value of Chartis was investigated in this group. A fissure was defined as complete (FCS >95\%), incomplete (FCS <80\%), or partially complete (80\% < FCS < 95\%). The positive predictive value (PPV) of complete fissures is 88.1\%, and the negative predictive value (NPV) is 92.9\%, with an overall accuracy of 89.2%. Chartis was utilized in patients with partially complete fissures, with a PPV of 82.3\%, an NPV of 84.6\%, and an accuracy of 83.3\%. Combining diagnostic tools could reduce the burden on patients and the healthcare system while providing clinicians with a better means for patient selection for EBV therapy.}, + file = {Kost16.pdf:pdf\\Kost16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27577190}, + gsid = {10964824180916530041}, + gscites = {55}, +} + +@inproceedings{Kost16a, + author = {Kost, Henning and Homeyer, Andr\'{e} and Bult, Peter and Balkenhol, Maschenka C. A. and van der Laak, Jeroen A. W. M. and Hahn, Horst K.}, + title = {A generic nuclei detection method for histopathological breast images}, + doi = {10.1117/12.2209613}, + year = {2016}, + abstract = {The detection of cell nuclei plays a key role in various histopathological image analysis problems. Considering the high variability of its applications, we propose a novel generic and trainable detection approach. Adaption to specific nuclei detection tasks is done by providing training samples. A trainable deconvolution and classification algorithm is used to generate a probability map indicating the presence of a nucleus. The map is processed by an extended watershed segmentation step to identify the nuclei positions. We have tested our method on data sets with different stains and target nuclear types. We obtained F1-measures between 0.83 and 0.93.}, + url = {http://dx.doi.org/10.1117/12.2209613}, + file = {Kost16a.pdf:pdf\Kost16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {SPIE Proceedings}, + citation-count = {2}, + automatic = {yes}, +} + +@article{Kox11, + author = {Kox, M. and Pompe, J.C. and Peters, E. and Vaneker M., and van der Laak, J.W. and van der Hoeven, J.G. and Scheffer, G.J. and Hoedemaekers, C.W. and Pickkers, P.}, + title = {a7 Nicotinic acetylcholine receptor agonist GTS-21 attenuates ventilator-induced tumour necrosis factor-a production and lung injury}, + doi = {10.1093/bja/aer202}, + number = {4}, + pages = {559-566}, + volume = {107}, + abstract = {Background. Mechanical ventilation (MV) induces an inflammatory response that can lead to lung injury. The vagus nerve can limit the inflammatory response through the cholinergic anti-inflammatory pathway. We evaluated the effects of stimulation of the cholinergic antiinflammatory pathway with the selective partial a7 nicotinic acetylcholine receptor (a7nAChR) agonist GTS-21 on inflammation and lung injury induced by MV using clinically relevant ventilator settings. Furthermore, we investigated whether altering endogenous cholinergic signalling, by administration of the non-speci?c nAChR antagonist mecamylamine and the peripherally acting acetylcholinesterase inhibitor neostigmine, modulates the MV-induced inflammatory response. Methods. C57BL6 mice were injected i.p. with either the selective a7nAChR agonist GTS-21 (8 mg kg21), the acetylcholinesterase inhibitor neostigmine (80 mg kg21), the nAChR antagonist mecamylamine (1 mg kg21), or a placebo; followed by 4 h of MV (8 ml kg21, 1.5 cm H2O PEEP). Results. MV resulted in release of cytokines in plasma and lungs compared with unventilated mice. Lung and plasma levels of tumour necrosis factor (TNF)-a, but not of interleukin-10, were lower in GTS-21-treated animals compared with placebo (P,0.05). In addition, GTS-21 lowered the alveolar-arterial gradient, indicating improved lung function (P 1/4 0.04). Neither neostigmine nor mecamylamine had an effect on MV-induced inflammation or lung function. Conclusions. MV with clinically relevant ventilator settings results in pulmonary and systemic inflammation. Stimulation of the cholinergic anti-inflammatory pathway with GTS-21 attenuates MV-induced release of TNF-a, which was associated with reduced lung injury. Modulation of endogenous cholinergic signalling did not affect the MV-induced inflammatory response. Selective stimulation of the cholinergic anti-inflammatory pathway may represent new treatment options for MV-induced lung injury.}, + file = {Kox11:pdf\\kox11:PDF}, + journal = BJA, + month = {4}, + optnote = {DIAG}, + year = {2011}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/98553}, +} + +@mastersthesis{Kraa17, + author = {Kraamwinkel, N.}, + title = {Automatic Liver Lesion Segmentation in Abdominal {CT} Scans: Exploring Cascaded 2D and 2.5D U-Net Approaches}, + year = {2017}, + url = {http://www.scriptiesonline.uba.uva.nl/en/scriptie/639430}, + abstract = {Automatic segmentation of liver lesions could be an important advancement for patients and radiologists to further improve early diagnosis and treatment. To stimulate the development of such an automation, researchers are currently exploring deep learning approaches. In this paper we developed and experimented with two cascaded fully convolutionalneural network (FCN) approaches that work in 2D and 2.5D. The first U-Net focused on providing a liver prediction mask which was subsequent utilized as additional input for the second U-Nets. In these U-Nets, one received the liver prediction mask as an additional input channel, and the other utilized the same mask to discard the non-liver background. The networks were trained and tested on the Liver Tumor Segmentation Challenge (LiTS) dataset, consisting of 201 contrast-enhanced abdominal CT studies. Results of the rst FCN yielded 95% Dice score for the liver segmentation on the validation set. The U-Net with 3 slice input and masked-out non-liver background was the best performing network, and obtained 0.563 Dice score on the LiTS test set. Overall, both cascaded FCN approaches were found very promising performance-wise in comparison to a single slice input without information from the liver prediction mask. Further improvements can be made by addressing the qualitatively derived egmentation challenges as well as improving the networks by exploring the implementation of ResNet connections and additional post-processing steps.}, + file = {Kraa17.pdf:pdf/Kraa17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + school = {Universiteit van Amsterdam}, + journal = {Master thesis}, +} + +@conference{Kuij10, + author = {van Kuijk, J.P. and R. Manniesing and D. Goei and Y. R. B. M. {van Gestel} and S. E. Hoeks and M. Koek and J. J. Hermans and W.J. Niessen and H.J.M. Verhagen and D. Poldermans}, + title = {Automatic Quantication of Abdominal Visceral Fat Content: a True Marker for Metabolic Syndrome?}, + booktitle = {European Society of Cardiology}, + year = {2010}, + abstract = {Purpose: Metabolic syndrome (MetS) is a widely accepted concept that identifies the centrally obese patient with increased risk for cardiovascular disease. However, most studies indicate that the syndrome does not predict cardiovascular disease any better than the sum of its components. In addition, the single components seem to be interrelated. Therefore, we evaluated the use of automatic quantification of abdominal visceral fat content as a new marker of MetS, the single components and long-term cardiac survival. Methods: Prior to aortic aneurysm repair, 106 patients underwent computed tomography (CT) scanning of the abdominal aorta. Using a fully automated and intrinsically CT-3D image analysis method, visceral fat volume was calculated. MetS was defined according the Adult Treatment Panel III report (ATP III) and central obesity as body-mass-index (BMI) >30 kg/m2. Correlations between visceral fat volumes, MetS, the single components and long-term cardiac survival were investigated. Median follow-up was 2.6 years (IQR 1.2-3.6). Results: Metabolic syndrome was diagnosed in 46 (43%) patients. Increased visceral fat volume was not associated with the presence of MetS (p=0.26). In contrast, visceral fat volumes were significantly correlated with BMI and HDL-cholesterol. (Pearson's coefficient: BMI: r=0.51, HDL: r=-0.35). During follow-up 9 (8%) patients died. Increased visceral fat volume was significantly associated with increased mortality risk (p=0.03) (Table). Conclusion: The automatic quantified abdominal visceral fat content was not correlated with MetS. However, increased visceral fat volume seems to be a true marker of increased BMI, reduced HDL-cholesterol and increased long-term cardiac mortality.}, + file = {Kuij10.pdf:pdf/Kuij10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Laak00, + author = {van der Laak, J. A. W. M. and Pahlplatz, M. M. and Hanselaar, A. G. and de Wilde, P. C.}, + title = {Hue-saturation-density (HSD) model for stain recognition in digital images from transmitted light microscopy}, + issue = {4}, + pages = {275--284}, + volume = {39}, + abstract = {Transmitted light microscopy is used in pathology to examine stained tissues. Digital image analysis is gaining importance as a means to quantify alterations in tissues. A prerequisite for accurate and reproducible quantification is the possibility to recognise stains in a standardised manner, independently of variations in the staining density. The usefulness of three colour models was studied using data from computer simulations and experimental data from an immuno-doublestained tissue section. Direct use of the three intensities obtained by a colour camera results in the red-green-blue (RGB) model. By decoupling the intensity from the RGB data, the hue-saturation-intensity (HSI) model is obtained. However, the major part of the variation in perceived intensities in transmitted light microscopy is caused by variations in staining density. Therefore, the hue-saturation-density (HSD) transform was defined as the RGB to HSI transform, applied to optical density values rather than intensities for the individual RGB channels. In the RGB model, the mixture of chromatic and intensity information hampers standardisation of stain recognition. In the HSI model, mixtures of stains that could be distinguished from other stains in the RGB model could not be separated. The HSD model enabled all possible distinctions in a two-dimensional, standardised data space. In the RGB model, standardised recognition is only possible by using complex and time-consuming algorithms. The HSI model is not suitable for stain recognition in transmitted light microscopy. The newly derived HSD model was found superior to the existing models for this purpose.}, + file = {Laak00.pdf:pdf\\Laak00.pdf:PDF}, + journal = Cytometry, + month = apr, + optnote = {DIAG}, + pmid = {10738280}, + year = {2000}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/144809}, + ss_id = {57e693a3e04b766b142f72df5c6c6767620fb369}, + all_ss_ids = {['57e693a3e04b766b142f72df5c6c6767620fb369']}, + gscites = {103}, +} + +@article{Laak19, + author = {van der Laak, Jeroen and Ciompi, Francesco and Litjens, Geert}, + title = {No pixel-level annotations needed}, + journal = NBE, + year = {2019}, + volume = {3}, + issue = {11}, + month = {10}, + pages = {855-856}, + doi = {10.1038/s41551-019-0472-6}, + url = {https://doi.org/10.1038/s41551-019-0472-6}, + file = {Laak19.pdf:pdf\\Laak19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31624355}, + gsid = {13408433721865759246}, + gscites = {11}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/215679}, + ss_id = {48547eb6d7527cf31f47cd77cfec4af159a23a51}, + all_ss_ids = {['48547eb6d7527cf31f47cd77cfec4af159a23a51']}, +} + +@article{Laak21, + author = {van der Laak, Jeroen and Litjens, Geert and Ciompi, Francesco}, + title = {Deep learning in histopathology: the path to the clinic.}, + doi = {10.1038/s41591-021-01343-4}, + issue = {5}, + pages = {775--784}, + volume = {27}, + abstract = {Machine learning techniques have great potential to improve medical diagnostics, offering ways to improve accuracy, reproducibility and speed, and to ease workloads for clinicians. In the field of histopathology, deep learning algorithms have been developed that perform similarly to trained pathologists for tasks such as tumor detection and grading. However, despite these promising results, very few algorithms have reached clinical implementation, challenging the balance between hope and hype for these new techniques. This Review provides an overview of the current state of the field, as well as describing the challenges that still need to be addressed before artificial intelligence in histopathology can achieve clinical value.}, + file = {:pdf/Laak21.pdf:PDF}, + journal = NATMED, + month = may, + pmid = {33990804}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/235736}, + ss_id = {7d873a9c49d3864709aa762f8740edcdbd7369c5}, + all_ss_ids = {['7d873a9c49d3864709aa762f8740edcdbd7369c5']}, + gscites = {305}, +} + +@article{Laak22, + author = {van der Laak, Jeroen A. and Gr\"{u}nberg, Katrien and Frisk, Anna-Lena A. and Moulin, Pierre}, + title = {BUILDING AN E.U.-SCALE DIGITAL PATHOLOGY REPOSITORY: THE BIGPICTURE INITIATIVE}, + doi = {10.1016/j.jpi.2022.100026}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.jpi.2022.100026}, + file = {Laak22.pdf:pdf\Laak22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Pathology Informatics}, + citation-count = {0}, + automatic = {yes}, + pages = {100026}, + volume = {13}, +} + +@article{Laar23, + author = {Laarhuis, Babette I. and Janssen, Marcel J. R. and Simons, Michiel and van Kalmthout, Ludwike W. M. and van der Doelen, Maarten J. and Peters, Steffie M. B. and Westdorp, Harm and van Oort, Inge M. and Litjens, Geert and Gotthardt, Martin and Nagarajah, James and Mehra, Niven and Prive, Bastiaan M.}, + year = {2023}, + month = {4}, + journal = CGC, + title = {Tumoral Ki67 and PSMA Expression in Fresh Pre-PSMA-RLT Biopsies and Its Relation With PSMA-PET Imaging and Outcomes of PSMA-RLT in Patients With mCRPC.}, + doi = {10.1016/j.clgc.2023.04.003}, + abstract = {Prostate specific membrane antigen (PSMA) directed radioligand therapy (RLT) is a novel therapy for metastatic castration-resistant prostate cancer (mCRPC) patients. However, it is still poorly understood why approximately 40% of the patients does not respond to PSMA-RLT. The aims of this study were to evaluate the pretreatment PSMA expression on immunohistochemistry (IHC) and PSMA uptake on PET/CT imaging in mCRPC patients who underwent PSMA-RLT. We correlated these parameters and a cell proliferation marker (Ki67) to the therapeutic efficacy of PSMA-RLT. In this retrospective study, mCRPC patients who underwent PSMA-RLT were analyzed. Patients biopsies were scored for immunohistochemical Ki67 expression, PSMA staining intensity and percentage of cells with PSMA expression. Moreover, the PSMA tracer uptake of the tumor lesion(s) and healthy organs on PET/CT imaging was assessed. The primary outcome was to evaluate the association between histological PSMA protein expression of tumor in pre-PSMA-RLT biopsies and the PSMA uptake on PSMA PET/CT imaging of the biopsied lesion. Secondary outcomes were to assess the relationship between PSMA expression and Ki67 on IHC and the progression free survival (PFS) and overall survival (OS) following PSMA-RLT. In total, 22 mCRPC patients were included in this study. Nineteen (86%) patients showed a high and homogenous PSMA expression of >80% on IHC. Three (14%) patients had low PSMA expression on IHC. Although there was limited PSMA uptake on PET/CT imaging, these 3 patients had lower PSMA uptake on PET/CT imaging compared to the patients with high PSMA expression on IHC. Yet, no correlation was found between PSMA uptake on PET/CT imaging and PSMA expression on IHC (SUVmax: R = 0.046 and SUVavg: R = 0.036). The 3 patients had a shorter PFS compared to the patients with high PSMA expression on IHC (HR: 4.76, 95% CI: 1.14-19.99; P = .033). Patients with low Ki67 expression had a longer PFS and OS compared to patients with a high Ki67 expression (HR: 0.40, 95% CI: 0.15-1.06; P = .013) CONCLUSION: The PSMA uptake on PSMA-PET/CT generally followed the PSMA expression on IHC. However, heterogeneity may be missed on PSMA-PET/CT. Immunohistochemical PSMA and Ki67 expression in fresh tumor biopsies, may contribute to predict treatment efficacy of PSMA-RLT in mCRPC patients. This needs to be further explored in prospective cohorts.}, + file = {:pdf/Laar23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + pmid = {37164814}, + ss_id = {97c5ca5dd59f564c88a0f36e74a4f55c1ae66b79}, + all_ss_ids = {['97c5ca5dd59f564c88a0f36e74a4f55c1ae66b79']}, + gscites = {0}, +} + +@article{Laba17, + author = {Laban, S and Giebel, G and Kl\"umper, N and Schr\"ock, A and Doescher, J and Spagnoli, G and Thierauf, J and Theodoraki, M and Remark, R and Gnjatic, S and Krupar, R and Sikora, A and Litjens, G and Grabe, N and Kristiansen, G and Bootz, F and Schuler, P.J. and Brunner, C and Br\"agelmann, J and Hoffmann, T.K. and Perner, S}, + title = {{MAGE} expression in head and neck squamous cell carcinoma primary tumors, lymph node metastases and respective recurrences: implications for immunotherapy}, + journal = Oncotarget, + year = {2017}, + volume = {8}, + month = {1}, + pages = {14719--14735}, + doi = {10.18632/oncotarget.14830}, + abstract = {Melanoma associated antigens (MAGE) are potential targets for immunotherapy and have been associated with poor overall survival (OS) in head and neck squamous cell carcinoma (HNSCC). However, little is known about MAGE in lymph node metastases (LNM) and recurrent disease (RD) of HNSCC.To assess whether MAGE expression increases with metastasis or recurrence, a tissue microarray (TMA) of 552 primary tumors (PT), 219 LNM and 75 RD was evaluated by immunohistochemistry for MAGE antigens using three monoclonal antibodies to multiple MAGE family members. Mean expression intensity (MEI) was obtained from triplicates of each tumor specimen.The median MEI compared between PT, LNM and RD was significantly higher in LNM and RD. In paired samples, MEI was comparable in PT to respective LNM, but significantly different from RD. Up to 25% of patients were negative for pan-MAGE or MAGE-A3/A4 in PT, but positive in RD. The prognostic impact of MAGE expression was validated in the TMA cohort and also in TCGA data (mRNA). OS was significantly lower for patients expressing pan-MAGE or MAGE-A3/A4 in both independent cohorts.MAGE expression was confirmed as a prognostic marker in HNSCC and may be important for immunotherapeutic strategies as a shared antigen.}, + file = {Laba17.pdf:pdf\\Laba17.pdf:PDF}, + optnote = {DIAG}, + pmid = {28146422}, + gsid = {16955025695820045228}, + gscites = {17}, + ss_id = {510bebf1faa812da5e70ce18eedfa768f0077a38}, + all_ss_ids = {['510bebf1faa812da5e70ce18eedfa768f0077a38']}, +} + +@article{Labus22a, + author = {Sandra Labus and Martin M. Altmann and Henkjan Huisman and Angela Tong and Tobias Penzkofer and Moon Hyung Choi and Ivan Shabunin and David J. Winkel and Pengyi Xinga and Dieter H. Szolar and Steven M. Shea and Robert Grimm and Heinrich von Busch and Ali Kamen and Thomas Herold and Clemens Baumann}, + title = {A concurrent, deep learning-based computer-aided detection system for prostate multiparametric MRI: a performance study involving experienced and less-experienced radiologists}, + journal = ER, + year = {2022}, + doi = {doi.org/10.1007/s00330-022-08978-y}, + url = {https://link.springer.com/article/10.1007/s00330-022-08978-y}, + abstract = {OBJECTIVES: To evaluate the effect of a deep learning-based computer-aided diagnosis (DL-CAD) system on experienced and less-experienced radiologists in reading prostate mpMRI. METHODS: In this retrospective, multi-reader multi-case study, a consecutive set of 184 patients examined between 01/2018 and 08/2019 were enrolled. Ground truth was combined targeted and 12-core systematic transrectal ultrasound-guided biopsy. Four radiologists, two experienced and two less-experienced, evaluated each case twice, once without (DL-CAD-) and once assisted by DL-CAD (DL-CAD+). ROC analysis, sensitivities, specificities, PPV and NPV were calculated to compare the diagnostic accuracy for the diagnosis of prostate cancer (PCa) between the two groups (DL-CAD- vs. DL-CAD+). Spearman's correlation coefficients were evaluated to assess the relationship between PI-RADS category and Gleason score (GS). Also, the median reading times were compared for the two reading groups. RESULTS: In total, 172 patients were included in the final analysis. With DL-CAD assistance, the overall AUC of the less-experienced radiologists increased significantly from 0.66 to 0.80 (p = 0.001; cutoff ISUP GG >= 1) and from 0.68 to 0.80 (p = 0.002; cutoff ISUP GG >= 2). Experienced radiologists showed an AUC increase from 0.81 to 0.86 (p = 0.146; cutoff ISUP GG >= 1) and from 0.81 to 0.84 (p = 0.433; cutoff ISUP GG >= 2). Furthermore, the correlation between PI-RADS category and GS improved significantly in the DL-CAD + group (0.45 vs. 0.57; p = 0.03), while the median reading time was reduced from 157 to 150 s (p = 0.023). CONCLUSIONS: DL-CAD assistance increased the mean detection performance, with the most significant benefit for the less-experienced radiologist; with the help of DL-CAD less-experienced radiologists reached performances comparable to that of experienced radiologists.}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {ce59572dd6919387c5260721bb7b070ca7ebf444}, + all_ss_ids = {['ce59572dd6919387c5260721bb7b070ca7ebf444']}, + gscites = {10}, +} + +@article{Lang18, + author = {de Lange, S V and Bakker, M F and Monninkhof, E M and Peeters, P H M and de Koekkoek-Doll, P K and Mann, R M and Rutten, M J C M and Bisschops, R H C and Veltman, J and Duvivier, K M and Lobbes, M B I and de Koning, H J and Karssemeijer, N and Pijnappel, R M and Veldhuis, W B and van Gils, C H}, + title = {Reasons for (non)participation in supplemental population-based MRI breast screening for women with extremely dense breasts}, + journal = CLINR, + year = {2018}, + volume = {73}, + number = {8}, + month = {8}, + pages = {759e1-759e9}, + doi = {10.1016/j.crad.2018.04.002}, + abstract = {To determine the willingness of women with extremely dense breasts to undergo breast cancer screening with magnetic resonance imaging (MRI) in a research setting, and to examine reasons for women to participate or not. Between 2011 and 2015, 8,061 women (50-75 years) were invited for supplemental MRI as part of the Dense Tissue and Early Breast Neoplasm Screening (DENSE) trial (ClinicalTrials.gov Identifier: NCT01315015), after a negative screening mammography in the national population-based mammography screening programme. Demographics of participants and non-participants were compared. All invitees were asked to report reasons for (non)participation. Ethical approval was obtained. Participants provided written informed consent. Of the 8,061 invitees, 66% answered that they were interested, and 59% eventually participated. Participants were on average 54-years old (interquartile range: 51-59 years), comparable to women with extremely dense breasts in the population-based screening programme (55 years). Women with higher socio-economic status (SES) were more often interested in participation than women with lower SES (68% versus 59%, p<0.001). The most frequently stated reasons for non-participation were "MRI-related inconveniences and/or self-reported contraindications to MRI" (27%) and "anxiety regarding the result of supplemental screening" (21%). "Expected personal health benefit" (68%) and "contribution to science" (43%) were the most frequent reasons for participation. Of women invited for MRI because of extremely dense breasts, 59% participated. Common reasons for non-participation were "MRI-related inconveniences" and "anxiety regarding the result of supplemental screening". In case of future implementation, availability of precise evidence on benefits and harms might reduce this anxiety.}, + file = {:pdf/Lang18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29759590}, + gsid = {16489935243904182211}, + gscites = {19}, + ss_id = {db7c0e95188f028e4899642d1510c600e7fb44f2}, + all_ss_ids = {['db7c0e95188f028e4899642d1510c600e7fb44f2']}, +} + +@article{Lark18, + author = {Larkin, James R and Simard, Manon A and Khrapitchev, Alexandre A and Meakin, James A and Okell, Thomas W and Craig, Martin and Ray, Kevin J and Jezzard, Peter and Chappell, Michael A and Sibson, Nicola R}, + title = {Quantitative blood flow measurement in rat brain with multiphase arterial spin labelling magnetic resonance imaging}, + doi = {10.1177/0271678x18756218}, + year = {2018}, + abstract = { Cerebral blood flow is an important parameter in many diseases and functional studies that can be accurately measured in humans using arterial spin labelling (ASL) MRI. However, although rat models are frequently used for preclinical studies of both human disease and brain function, rat CBF measurements show poor consistency between studies. This lack of reproducibility is due, partly, to the smaller size and differing head geometry of rats compared to humans, as well as the differing analysis methodologies employed and higher field strengths used for preclinical MRI. To address these issues, we have implemented, optimised and validated a multiphase pseudo-continuous ASL technique, which overcomes many of the limitations of rat CBF measurement. Three rat strains (Wistar, Sprague Dawley and Berlin Druckrey IX) were used, and CBF values validated against gold-standard autoradiography measurements. Label positioning was found to be optimal at 45deg, while post-label delay was optimised to 0.55 s. Whole brain CBF measures were 109 +- 22, 111 +- 18 and 100 +- 15 mL/100 g/min by multiphase pCASL, and 108 +- 12, 116 +- 14 and 122 +- 16 mL/100 g/min by autoradiography in Wistar, SD and BDIX cohorts, respectively. Tumour model analysis shows that the developed methods also apply in disease states. Thus, optimised multiphase pCASL provides robust, reproducible and non-invasive measurement of CBF in rats. }, + url = {http://dx.doi.org/10.1177/0271678X18756218}, + file = {Lark18.pdf:pdf\Lark18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Cerebral Blood Flow & Metabolism}, + citation-count = {27}, + automatic = {yes}, + pages = {1557-1569}, + volume = {39}, +} + +@inproceedings{Lass11, + author = {B. Lassen and J. M. Kuhnigk and E. M. van Rikxoort and H. O. Peitgen}, + title = {Interactive lung lobe segmentation and correction in tomographic images}, + booktitle = MI, + year = {2011}, + volume = {7963}, + series = SPIE, + pages = {79631S-1--79631S-6}, + doi = {10.1117/12.877659}, + abstract = {Lobe-based quantification of tomographic images is of increasing interest for diagnosis and monitoring lung pathology. With modern tomography scanners providing data sets with hundreds of slices, manual segmentation is time-consuming and not feasible in the clinical routine. Especially for patients with severe lung pathology that are of particular clinical importance, automatic segmentation approaches frequently generate partially inaccurate or even completely unacceptable results. In this work we present a modality-independent, semi-automated method that can be used both for generic correction of any existing lung lobe segmentation and for segmentation from scratch. Intuitive slice-based drawing of fissure parts is used to introduce user knowledge. Internally, the current fissure is represented as sampling points in 3{D} space that are interpolated to a fissure surface. Using morphological processing, a 3{D} impact region is computed for each user-drawn 2{D} curve. Based on the curve and impact region, the updated lobar boundary surface is immediately computed after each interaction step to provide instant user feedback. The method was evaluated on 25 normal-dose {CT} scans with a reference standard provided by a human observer. When segmenting from scratch, the average distance to the reference standard was 1.6mm using an average of five interactions and 50 seconds of interaction time per case. When correcting inadequate automatic segmentations, the initial error was reduced from 13.9 to 1.9mm with comparable efforts. The evaluation shows that both correction of a given segmentation and segmentation from scratch can be successfully performed with little interaction in a short amount of time.}, + file = {Lass11.pdf:pdf\\Lass11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {16741708988934202815}, + gscites = {10}, +} + +@conference{Lass12, + author = {B. Lassen and E. M. van Rikxoort and J Kuhnigk and B. van Ginneken}, + title = {Fast and Intuitive Interactive Lung Lobe Segmentation in Thoracic Computed Tomography Scans}, + booktitle = RSNA, + year = {2012}, + abstract = {{PURPOSE} Segmentation of the pulmonary lobes in {CT} data is useful for diagnosis, monitoring, and quantification of pulmonary diseases. In patients with severe disease, automated lobe segmentation is infeasible and interactive methods are needed. {METHOD AND MATERIALS} An automated lobar segmentation method was developed that performs a watershed transformation with a cost image based on the information from fissures, bronchi, and pulmonary vessels. Lobar markers required by the watershed transformation are calculated by an analysis of the automatically labeled bronchial tree. This segmentation fails on cases with severe disease or largely incomplete fissures. Therefore an interactive method was developed where an observer adjusts the automatic result by drawing parts of the correct lobar boundaries on any plane. Each curve is converted to a set of sampling points and the software immediately adapts the segmentation on the current plane but also extrapolates intelligently to adjacent slices. The immediate feedback allows the user to refine the segmentation iteratively, typically on multiple orthogonal planes. The procedure can also be performed from scratch. A set of 55 publically available chest {CT} scans with submillimeter resolution from a lung and lung lobe segmentation challenge ({LOLA11}) was used for evaluation. This data set contains many difficult cases with gross abnormalities. An observer applied the interactive correction using the automatic result as starting point and performed the full lobar segmentation from scratch for each scan. {RESULTS} The overall score based on the average overlap with a {LOLA11} reference standard for the 5 lobes was 0.881 for the automatic method. The interactive corrections improved this to 0.918. The observer needed on average 7 interactions in 1.5 minutes per scan. The segmentation from scratch reached an overall score of 0.923 and required on average 4 minutes of processing time per scan. {CONCLUSION} Fast interactive lobar segmentation in thoracic {CT} is feasible. {CLINICAL RELEVANCE/APPLICATION} Lobar segmentation is a prerequisite for regional quantitative analysis of chest {CT} and other applications such as surgery planning and automated diagnosis.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Lass12a, + author = {B. C. Lassen and M. Schmidt and E. M. van Rikxoort and B. van Ginneken and C. Jacobs and J. Kuhnigk and M. Prokop}, + title = {Automated and interactive image analysis workstation for the extraction of imaging biomarkers related to chronic obstructive pulmonary disease from thoracic computed tomography scans}, + booktitle = RSNA, + year = {2012}, + abstract = {{BACKGROUND} Chronic obstructive pulmonary disease ({COPD}) is a leading and rising cause of morbidity and mortality worldwide, with 12 million people in the United States currently diagnosed. An increasing number of paired inspiratory and expiratory computed tomography ({CT}) scans are taken of patients with {COPD} but clinically available workstations do not provide the tools needed for interpretation of these scans. To gain insight into the underlying pathways of {COPD} and monitor progression, methods to extract imaging biomarkers and visualize {COPD} from chest {CT} scans are of crucial importance. We have developed a workstation that allows visualization of inspiratory and expiratory {CT} simultaneously, extracts quantifiable features from both the inspiration and expiration {CT} related to parenchyma and airway morphology, displays the results in the scans, allows for interactive correction, and produces structured reports. The software is currently available as a research prototype and is used for the analysis of scans made for pharmaceutical trials. It is not yet commercially available and open-source licenses to not apply. {METHODOLOGY/APPLICATION} The workstation has the following features and workflow: When a study is sent to the workstation two preprocessing steps are performed. An elastic registration between the inspiration and expiration scans is performed to allow for linking of spatial locations between the inspiration and expiration scans. The lungs, lobes, airways, fissures, and segments are automatically segmented [1] allowing regional analysis. The preprocessing is performed offline and preloaded in the background such that a user of the workstation has the results readily available when viewing the scan. Once the preprocessing is finished the study is available for analysis in the workstation. On opening a study, the user can scroll through the inspiratory and expiratory {CT} simultaneously in any direction to visually inspect the scans. The results of the automatic segmentations of anatomical structures can be overlayed on the scans. In case the user is not satisfied with the segmentation results of the lungs or lung lobes the observer can correct the segmentation in a short time with an intuitive interactive correction method [2]. Once the segmentations have been approved, a range of quantifiable features can be visualized in the workstation: parenchyma features, airway features [3], and fissure completeness [4]. All relevant measurements are reported for both inspiration and expiration for the whole lung as well as per lobe and segment to allow for regional analysis. In addition, the changes between inspiration and expiration are reported. After workup of a study of a {COPD} patient, a structured report is produced that contains screenshots, renderings, and all requested measurements. The reports are created in {HTML} and {PDF} format, allowing them to be viewed in the workstation as well as in other environments. {DEMONSTRATION STRATEGY} For the exhibit an informational poster will be present that highlights the key aspects of the here proposed workstation and workflow. The underlying algorithms will be explained and the use of the analysis showcased with example scans. Attendees will be able to gain hands-on experience with the workstation, inspect the results of automatic processing, interact with the methods, and obtain structured reports of the findings.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Lass12b, + author = {B. Lassen and E. M. van Rikxoort and M. Schmidt and S. Kerkstra and B. van Ginneken and J. Kuhnigk}, + title = {Automatic segmentation of the pulmonary lobes from chest {CT} scans based on fissures, vessels, and bronchi}, + journal = TMI, + year = {2013}, + volume = {32}, + pages = {210--222}, + doi = {10.1109/TMI.2012.2219881}, + abstract = {Segmentation of the pulmonary lobes is relevant in clinical practice and particularly challenging for cases with severe diseases or incomplete fissures. In this work an automated segmentation approach is presented that performs a markerbased watershed transformation on {CT} scans to subdivide the lungs into lobes. A cost image for the watershed transformation is computed by combining information from fissures, bronchi, and pulmonary vessels. The lobar markers are calculated by an analysis of the automatically labeled bronchial tree. By integration of information from several anatomical structures the segmentation is made robust against incomplete fissures. For evaluation the method was compared to a recently published method on 20 {CT} scans with no or mild disease. The average distances to the reference segmentation were 0.69 mm, 0.67 mm, and 1.21 mm for the left major, right major, and right minor fissure, respectively. In addition the results were submitted to {LOLA11}, an international lung lobe segmentation challenge with publically available data including cases with severe diseases. The average distances to the reference for the 55 CT scans provided by {LOLA11} were 0.98 mm, 3.97 mm, and 3.09 mm for the left major, right major, and right minor fissure. Moreover, an analysis of the relation between segmentation quality and fissure completeness showed that the method is robust against incomplete fissures.}, + file = {Lass12b.pdf:pdf/Lass12b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {23014712}, + month = {2}, + gsid = {1728731555240293040}, + gscites = {114}, + ss_id = {eaea24fdc6aa24479ff0cbd3b58dfcb2e8d56868}, + all_ss_ids = {['eaea24fdc6aa24479ff0cbd3b58dfcb2e8d56868']}, +} + +@conference{Lass14, + author = {B. C. Lassen and J. Kuhnigk and C. Jacobs and E. M. van Rikxoort and Bram van Ginneken}, + title = {Fully Automatic Volumetric Segmentation of Pulmonary Nodules: Evaluation using the Complete {LIDC/IDRI} Database}, + booktitle = RSNA, + year = {2014}, + abstract = {PURPOSE In the publicly available LIDC/IDRI database, all nodules larger than 3mm have been manually segmented by four expert thoracic radiologists. This provides a unique opportunity for large scale validation. We report the performance of our automatic method to segment pulmonary nodules and compare this to inter-reader variability. METHOD AND MATERIALS We developed an automatic nodule segmentation method which is initialized by region growing from a seed point in the nodule. Thresholds for region growing are automatically determined from histogram analysis. A circumscribing ellipsoid is approximated to separate nodules from the chest wall. Finally, through a combination of connected component analysis and morphological operations vasculature attached to the nodule is removed. To evaluate our automatic method, it was applied four times using a random seed point in each nodule in the LIDC/IDRI database that contains 1,018 chest CT scans from 1,000 patients, acquired at seven different institutions with a wide variety of scanners and imaging protocols. In these scans, 928 nodules were manually segmented independently by four radiologists by drawing contours on each axial section containing the nodule. The DICE overlap between the resulting automatic outline and the three other manual segmentations was computed. Similarly, each manual segmentation was compared to the three other manual outlines. We report statistics of the averaged DICE coefficients. RESULTS We achieved excellent agreement between our automatic and manual segmentation results. Mean DICE was 0.75 A,A+- 0.16 for the automatic method and 0.77 A,A+- 0.09 for the inter-observer agreement. The first quartile, median, and third quartile for the automatic method were 0.71, 0.79. 0.84, respectively. For the manual outlines, these statistics were 0.73, 0.79, 0.83. CONCLUSION Automated nodule segmentation is feasible in CT scans obtained with varying acquisition parameters with a performance close to manual outlining by expert thoracic radiologists. CLINICAL RELEVANCE/APPLICATION Automatic volumetric nodule segmentation is a robust, efficient and highly effective technique for the analysis of pulmonary nodules in CT data.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Lass15, + author = {Lassen, B. C. and Jacobs, C. and Kuhnigk, J-M. and van Ginneken, B. and van Rikxoort, E. M.}, + title = {Robust semi-automatic segmentation of pulmonary subsolid nodules in chest computed tomography scans}, + journal = PMB, + year = {2015}, + volume = {60}, + pages = {1307--1323}, + doi = {10.1088/0031-9155/60/3/1307}, + abstract = {The malignancy of lung nodules is most often detected by analyzing changes of the nodule diameter in follow-up scans. A recent study showed that comparing the volume or the mass of a nodule over time is much more significant than comparing the diameter. Since the survival rate is higher when the disease is still in an early stage it is important to detect the growth rate as soon as possible. However manual segmentation of a volume is time-consuming. Whereas there are several well evaluated methods for the segmentation of solid nodules, less work is done on subsolid nodules which actually show a higher malignancy rate than solid nodules. In this work we present a fast, semi-automatic method for segmentation of subsolid nodules.As minimal user interaction the method expects a user-drawn stroke on the largest diameter of the nodule. First, a threshold-based region growing is performed based on intensity analysis of the nodule region and surrounding parenchyma. In the next step the chest wall is removed by a combination of a connected component analyses and convex hull calculation. Finally, attached vessels are detached by morphological operations.The method was evaluated on all nodules of the publicly available LIDC/IDRI database that were manually segmented and rated as non-solid or part-solid by four radiologists (Dataset 1) and three radiologists (Dataset 2). For these 59 nodules the Jaccard index for the agreement of the proposed method with the manual reference segmentations was 0.52/0.50 (Dataset 1/Dataset 2) compared to an inter-observer agreement of the manual segmentations of 0.54/0.58 (Dataset 1/Dataset 2). Furthermore, the inter-observer agreement using the proposed method (i.e. different input strokes) was analyzed and gave a Jaccard index of 0.74/0.74 (Dataset 1/Dataset 2).The presented method provides satisfactory segmentation results with minimal observer effort in minimal time and can reduce the inter-observer variability for segmentation of subsolid nodules in clinical routine.}, + file = {Lass15.pdf:pdf\\Lass15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {25591989}, + month = {1}, + gsid = {9522455507672982699}, + gscites = {75}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/153905}, + ss_id = {95c6d05f35d82b06b155d05f1a8954e940991ab9}, + all_ss_ids = {['95c6d05f35d82b06b155d05f1a8954e940991ab9']}, +} + +@phdthesis{Lass15a, + author = {Bianca Lassen-Schmidt}, + title = {Automatic and Interactive Segmentation of Pulmonary Lobes and Nodules in Chest CT Images}, + year = {2015}, + url = {http://hdl.handle.net/2066/145304}, + abstract = {This thesis proposes automatic and interactive methods for the segmentation of pulmonary lobes and nodules. A combination of these approaches provides a segmentation workflow starting with automatic methods and continuing with interactive correction if required. Chapter 2 presents a watershed-based automatic segmentation of the lung lobes. It includes information from the pulmonary fissures, bronchi, and vessels. The evaluation was performed on 20 CT scans used in a previous study, allowing a direct comparison. Furthermore, we participated in the lungs and lobe segmentation challenge LOLA11 and applied the method to 55 datasets provided by the organizers of the challenge. Chapter 3 describes a complete segmentation workflow for the pulmonary lobes. The first step is the automatic segmentation presented in Chapter 2. The second step is a 3D geometric method that enables fast and intuitive correction of a given lung lobe segmentation. Also a segmentation from scratch only based on a lung mask is allowed. The boundary between the lobes is represented as a mesh that can be modified by drawing the correct boundary on 2D slices in arbitrary orientation. After each drawing, the mesh is immediately adapted in a 3D region around the user interaction. For evaluation we also participated in the LOLA11 challenge with both the correction and the segmentation from scratch. Two observers applied the approach to correct the automatic segmentation results and one observer did a segmentation from scratch for all of the 55 datasets provided by the challenge. In Chapter 4 an automatic nodule segmentation approach is introduced. A user provides a stroke on the largest diameter of the nodule to initialize the method. Then, a threshold-based region growing is performed based on an intensity analysis around the stroke and surrounding parenchyma. A combination of a connected component analysis and convex hull calculation separates the nodule from the chest wall. Finally, vessels attached to the nodule are removed by morphological operations. The method was evaluated on 59 subsolid publically available nodules provided by LIDC/IDRI. Chapter 5 presents a complete segmentation workflow for pulmonary nodules. As a first step a similar approach to the one introduced in Chapter 4 but with an initial seedpoint instead of a stroke is applied. For the cases with insufficient results an interactive step follows. Here the user can choose between seven precalculated segmentation results. These are also created with the automatic segmentation method but with varying parameters for the threshold-based region growing. This workflow was evaluated on 907 publically available pulmonary nodules provided by LIDC/IDRI. In addition, reliability of volumetric measurement is compared to 2D metrics. Chapters 6 and 7 give a summary of the thesis and a general discussion.}, + copromotor = {E. M. van Rikxoort and J.-M. Kuhnigk}, + file = {Lass15a.pdf:pdf/Lass15a.pdf:PDF}, + optnote = {DIAG}, + promotor = {B. van Ginneken and H. K. Hahn}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Lass17, + author = {Lassen-Schmidt, Bianca C and Kuhnigk, Jan-Martin and Konrad, Olaf and van Ginneken, Bram and van Rikxoort, Eva}, + title = {Fast interactive segmentation of the pulmonary lobes from thoracic computed tomography data}, + journal = PMB, + year = {2017}, + volume = {62}, + number = {16}, + pages = {6649-6665}, + doi = {10.1088/1361-6560/aa7674}, + abstract = {Automated lung lobe segmentation methods often fail for challenging and clinically relevant cases with incomplete fissures or substantial amounts of pathology. We present a fast and intuitive method to interactively correct a given lung lobe segmentation or to quickly create a lobe segmentation from scratch based on a lung mask. A given lobar boundary is converted into a mesh by principal component analysis of 3D lobar boundary markers to obtain a plane where nodes correspond to the position of the markers. An observer can modify the mesh by drawing on 2D slices in arbitrary orientations. After each drawing, the mesh is immediately adapted in a 3D region around the user interaction. For evaluation we participated in the international lung lobe segmentation challenge LObe and Lung Analysis 2011 (LOLA11). Two observers applied the method to correct a given lung lobe segmentation obtained by a fully automatic method for all 55 CT scans of LOLA11. On average observer 1/2 required 8+-4/25+-12 interactions per case and took 1:30+-0:34/3:19+-1:29 min. The average distances to the reference segmentation were improved from an initial 2.68+-14.71 mm to 0.89+-1.63/0.74+-1.51 mm. In addition, one observer applied the proposed method to create a segmentation from scratch. This took 3:44+-0:58 minutes on average per case, applying an average of 20+-3 interactions to reach an average distance to the reference of 0.77+-1.14 mm. Thus, both the interactive corrections and the creation of a segmentation from scratch were feasible in a short time with excellent results and minimal interaction. Since the mesh adaptation is independent of image features, the method can successfully handle patients with severe pathologies, provided that the human operator is capable of correctly indicating the lobar boundaries.}, + file = {Lass17.pdf:pdf\\Lass17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28570264}, + month = {7}, + gsid = {15670809461306486226}, + gscites = {12}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/181908}, + ss_id = {a1aa321aa6892ab2478ec99f422d1110afe1fc7d}, + all_ss_ids = {['a1aa321aa6892ab2478ec99f422d1110afe1fc7d']}, +} + +@inproceedings{Lass20, + title = {Automatic segmentation of the pulmonary lobes with a 3D u-net and optimized loss function}, + author = {Lassen-Schmidt, Bianca and Hering, Alessa and Krass, Stefan and Meine, Hans}, + booktitle = MIDL, + year = {2020}, + url = {https://2020.midl.io/papers/lassen-schmidt20.html}, + optnote = {DIAG, RADIOLOGY}, + abstract = {Fully-automatic lung lobe segmentation is challenging due to anatomical variations, pathologies, and incomplete fissures. We trained a 3D u-net for pulmonary lobe segmentation on 49 mainly publically available datasets and introduced a weighted Dice loss function to emphasize the lobar boundaries. To validate the performance of the proposed method we compared the results to two other methods. The new loss function improved the mean distance to 1.46 mm (compared to 2.08 mm for simple loss function without weighting).}, + ss_id = {971a413a2cb3163d7c912ed8a1fd53ba64bda007}, + all_ss_ids = {['971a413a2cb3163d7c912ed8a1fd53ba64bda007']}, + gscites = {4}, +} + +@inproceedings{Laue13, + author = {Hendrik O. A. Laue and Marcel T. H. Oei and L. Chen and I.N. Kompan and Horst K. Hahn and Mathias Prokop and Rashindra Manniesing}, + title = {Automated Artery and Vein Detection in Dynamic {CT} data with an Unsupervised Classification Algorithm of the Time Intensity Curves}, + booktitle = MI, + year = {2013}, + series = SPIE, + doi = {10.1117/12.2008116}, + abstract = {In this work a fully automated detection method for artery input function (AIF) and venous output function (VOF) in 4D-computer tomography (4D-CT) data is presented based on unsupervised classification of the time intensity curves (TIC) as input data. Bone and air voxels are first masked out using thresholding of the baseline measurement. The TICs for each remaining voxel are converted to time-concentration-curves (TCC) by subtracting the baseline value from the TIC. Then, an unsupervised K-means classifier is applied to each TCC with an area under the curve (AUC) larger than 95% of the maximum AUC of all TCCs. The results are three clusters, which yield average TCCs for vein and artery voxels in the brain, respectively. A third cluster generally represents a vessel outside the brain. The algorithm was applied to five 4D-CT patient data who were scanned on the suspicion of ischemic stroke. For all _ve patients, the algorithm yields reasonable classification of arteries and veins as well as reasonable and reproducible AIFs and VOF. To our knowledge, this is the first application of an unsupervised classification method to automatically identify arteries and veins in 4D-CT data. Preliminary results show the feasibility of using K-means clustering for the purpose of artery-vein detection in 4D-CT patient data.}, + file = {Laue13.pdf:pdf\\Laue13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + ss_id = {ad50e7a84dce1d4c36c5387731d99005711b062d}, + all_ss_ids = {['ad50e7a84dce1d4c36c5387731d99005711b062d']}, + gscites = {4}, +} + +@article{Leac12, + author = {Leach, M. O. and Morgan, B. and Tofts, P. S. and Buckley, D. L. and Huang, W. and Horsfield, M. A. and Chenevert, T. L. and Collins, D. J. and Jackson, A. and Lomas, D. and Whitcher, B. and Clarke, L. and Plummer, R. and Judson, I. and Jones, R. and Alonzi, R. and Brunner, T. and Koh, D. M. and Murphy, P. and Waterton, J. C. and Parker, G. and Graves, M. J. and Scheenen, T W J. and Redpath, T. W. and Orton, M. and Karczmar, G. and Huisman, H. and Barentsz, J. and Padhani, A. and , on behalf of the Experimental Cancer Medicine Centres Imaging Network Steering Committee}, + title = {Imaging vascular function for early stage clinical trials using dynamic contrast-enhanced magnetic resonance imaging}, + number = {7}, + pages = {1451--1464}, + url = {https://link.springer.com/article/10.1007%2Fs00330-012-2446-x}, + volume = {22}, + abstract = {Many therapeutic approaches to cancer affect the tumour vasculature, either indirectly or as a direct target. Dynamic contrast-enhanced magnetic resonance imaging (DCE-MRI) has become an important means of investigating this action, both pre-clinically and in early stage clinical trials. For such trials, it is essential that the measurement process (i.e. image acquisition and analysis) can be performed effectively and with consistency among contributing centres. As the technique continues to develop in order to provide potential improvements in sensitivity and physiological relevance, there is considerable scope for between-centre variation in techniques. A workshop was convened by the Imaging Committee of the Experimental Cancer Medicine Centres (ECMC) to review the current status of DCE-MRI and to provide recommendations on how the technique can best be used for early stage trials. This review and the consequent recommendations are summarised here. Key Points A-A?A 1/2 Tumour vascular function is key to tumour development and treatment A-A?A 1/2 Dynamic contrast-enhanced magnetic resonance imaging (DCE-MRI) can assess tumour vascular function A-A?A 1/2 Thus DCE-MRI with pharmacokinetic models can assess novel treatments A-A?A 1/2 Many recent developments are advancing the accuracy of and information from DCE-MRI A-A?A 1/2 Establishing common methodology across multiple centres is challenging and requires accepted guidelines.}, + file = {Leac12.pdf:pdf\\Leac12.pdf:PDF}, + journal = ER, + optnote = {DIAG, RADIOLOGY}, + pmid = {22562143}, + year = {2012}, + gsid = {7944651468101679970}, + gscites = {152}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/108222}, + ss_id = {9b7cbf3e93c3b346e824f26d1faf4194ec10cb9f}, + all_ss_ids = {['9b7cbf3e93c3b346e824f26d1faf4194ec10cb9f']}, +} + +@inproceedings{Leem15, + author = {Sil van de Leemput and Frank Dorssers and Babak {Ehteshami Bejnordi}}, + title = {A novel spherical shell filter for reducing false positives in automatic detection of pulmonary nodules in thoracic {CT} scans}, + booktitle = MI, + year = {2015}, + volume = {9414}, + series = SPIE, + pages = {94142P}, + doi = {10.1117/12.2082298}, + abstract = {Early detection of pulmonary nodules is crucial for improving prognosis of patients with lung cancer. Computer-aided detection of lung nodules in thoracic computed tomography (CT) scans has a great potential to enhance the performance of the radiologist in detecting nodules. In this paper we present a computer-aided lung nodule detection system for computed tomography (CT) scans that works in three steps. The system first segments the lung using thresholding and hole filling. From this segmentation the system extracts candidate nodules using Laplacian of Gaussian. To reject false positives among the detected candidate nodules, multiple established features are calculated. We propose a novel feature based on a spherical shell filter, which is specifically designed to distinguish between vascular structures and nodular structures. The performance of the proposed CAD system was evaluated by partaking in the ANODE09 challenge, which presents a platform for comparing automatic nodule detection programs. The results from the challenge show that our CAD system ranks third among the submitted works, demonstrating the efficacy of our proposed CAD system. The results also show that our proposed spherical shell filter in combination with conventional features can significantly reduce the number of false positives from the detected candidate nodules.}, + file = {Leem15.pdf:pdf\\Leem15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, +} + +@conference{Leem17, + author = {Sil van de Leemput and Frederick J. A. Meijer and Mathias Prokop and Rashindra Manniesing}, + title = {Cerebral white matter, gray matter and cerebrospinal fluid segmentation in CT using VCAST: a volumetric cluster annotation and segmentation tool}, + booktitle = ECR, + year = {2017}, + abstract = {Purpose: Segmentation of cerebral white matter (WM), gray matter (GM) and cerebrospinal fluid (CSF) in head CT is important for subsequent quantitative analysis and automated detection of cerebral pathology. We introduce VCAST, a new volumetric annotation tool aimed at delineating soft tissue in non-contrast CT (NCCT) and CT perfusion (CTP). + Methods and Materials: VCAST supports traditional 2D visualizations and annotations, and provides functionalities to facilitate 3D segmentations based on pre-calculated grids of volumetric clusters where the clusters are spatially coherently grouped based on HUs. Clicking a cluster in a 2D-plane allows for inclusion of the 3D-cluster in the output segmentation. + Ten patients with suspicion of ischemic stroke were included in this retrospective study, five NCCTs and five whole brain CTPs (320-row detector scanner). Temporal average CTA was reconstructed from CTP and in one slice in arbitrary direction, WM, GM and CSF were annotated two times by one observer using VCAST. In NCCT, a subvolume of approximately 22 mm^3 was randomly selected in which CSF was annotated by one observer, using VCAST either with 2D (slice-based) or 2D and 3D (cluster-based) annotation support. Dice coefficients and annotation times were reported. + Results: Dice coefficients were 0.86A+-0.04, 0.91A+-0.02, 0.87A+-0.02 for CSF, GM and WM respectively. CSF annotation times reduced from 16A+-9 to 8A+-5 minutes with 3D cluster support (p=0.02). CSF Dice similarity was 0.81A+-0.03. + Conclusion: VCAST is a volumetric annotation tool which reduces the time to obtain 3D segmentations in head CT while maintaining good overlap with a slice-based approach.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Leem18a, + author = {van de Leemput, S. C. and Teuwen, J. and Manniesing, R.}, + title = {{MemCNN}: a Framework for Developing Memory Efficient Deep Invertible Networks}, + booktitle = {International Conference on Learning Representations}, + year = {2018}, + url = {https://openreview.net/forum?id=r1KzqK1wz}, + abstract = {Reversible operations have recently been successfully applied to classification problems to reduce memory requirements during neural network training. This feature is accomplished by removing the need to store the input activation for computing the gradients at the backward pass and instead reconstruct them on demand. However, current approaches rely on custom implementations of backpropagation, which limits applicability and extendibility. We present MemCNN, a novel PyTorch framework which simplifies the application of reversible functions by removing the need for a customized backpropagation. The framework contains a set of practical generalized tools, which can wrap common operations like convolutions and batch normalization and which take care of the memory management. We validate the presented framework by reproducing state-of-the-art experiments comparing classification accuracy and training time on Cifar-10 and Cifar-100 with the existing state-of-the-art, achieving similar classification accuracy and faster training times.}, + file = {:pdf/Leem18a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {16960578953340713390}, + gscites = {11}, + ss_id = {f2ab2b234da918817c1721ccea4870c69285ad6a}, + all_ss_ids = {['f2ab2b234da918817c1721ccea4870c69285ad6a']}, +} + +@inproceedings{Leem18b, + author = {van de Leemput, S. C. and Prokop, M. and van Ginneken, B. and Manniesing, R.}, + title = {Stacked Bidirectional Convolutional {LSTM}s for {3D} Non-contrast {CT} Reconstruction from Spatiotemporal {4D CT}}, + booktitle = MIDL, + year = {2018}, + url = {https://openreview.net/forum?id=SkHVVZniz}, + abstract = {The imaging workup in acute stroke can be simplified by reconstructing the non-contrast CT (NCCT) from CT perfusion (CTP) images, resulting in reduced workup time and radiation dose. This work presents a stacked bidirectional convolutional LSTM (C-LSTM) network to predict 3D volumes from 4D spatiotemporal data. Several parameterizations of the C-LSTM network were trained on a set of 17 CTP-NCCT pairs to learn to reconstruct NCCT from CTP and were subsequently quantitatively evaluated on a separate cohort of 16 cases. The results show that C-LSTM network clearly outperforms basic reconstruction methods and provides a promising general deep learning approach for handling high-dimensional spatiotemporal medical data.}, + file = {:pdf/Leem18b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {12856903356329628963}, + gscites = {2}, + all_ss_ids = {['63c432e76128b36d5943eb116a38f98a9bb9ae27']}, +} + +@inproceedings{Leem18c, + author = {van de Leemput, S. C. and Patel, A. and Manniesing, R.}, + title = {Full Volumetric Brain Tissue Segmentation in Non-contrast {CT} using Memory Efficient Convolutional {LSTM}s}, + booktitle = {Medical Imaging meets NeurIPS}, + year = {2018}, + url = {https://openreview.net/pdf?id=rJxlLGBElN}, + abstract = {There is a demand for deep learning approaches able to process high resolution 3D + volumes in an accurate and fast way. However, training of these models is often + limited by the available GPU memory, which often results in reduced model depth, + receptive field, and input size, limiting the expressiveness of the model. In this work + we present a memory efficient modified convolutional-LSTM, which integrates + a context-rich 2D U-Net as an input in a slice based manner and subsequently + integrates the acquired slices using LSTM to create the full 3D context. Memory + savings achieved by checkpointing on one or more steps within the LSTM allow + for direct training on a single full non-contrast CT volume of: 512 x 512 x 320 on + a NVIDIA Titan X with 12 GB of VRAM. We demonstrate the effectiveness of our + method by training and segmenting the cranial cavity including soft-brain tissue + and CSF in the non-contrast CT end-to-end on the full image data, without any + stitching, while preserving a large receptive field and high expressiveness.}, + file = {:pdf/Leem18c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {2197973819111672877}, + gscites = {1}, +} + +@article{Leem19, + author = {{van de Leemput}, S. C. and {Meijs}, M. and {Patel}, A. and {Meijer}, F.J.A. and {van Ginneken}, B. and Manniesing, R.}, + title = {Multiclass Brain Tissue Segmentation in 4D CT using Convolutional Neural Networks}, + journal = _IEEE_Access_, + year = {2019}, + volume = {7}, + issue = {1}, + pages = {51557-51569}, + doi = {10.1109/ACCESS.2019.2910348}, + url = {https://ieeexplore.ieee.org/document/8686082}, + abstract = {4D CT imaging has great potential for use in stroke workup. A fully convolutional neural network (CNN) for 3D multiclass segmentation in 4D CT is presented, which can be trained end-to-end from sparse 2D annotations. The CNN was trained and validated on 42 4D CT acquisitions of the brain of patients with suspicion of acute ischemic stroke. White matter, gray matter, cerebrospinal fluid, and vessels were annotated by two trained observers. The mean Dice coefficients, contour mean distances, and absolute volume differences were respectively 0.87+-0.04, 0.52+-0.47 mm, and 11.78+-9.55 \% on a separate test set of five patients, which were similar to the average interobserver variability scores of 0.88+-0.03, 0.72+-0.93 mm, and 8.86+-7.65 \% outperforming the current state-of-the-art. The proposed method is therefore a promising deep neural network for multiclass segmentation in 4D spatiotemporal imaging data.}, + file = {:pdf/Leem19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {10211114630238511371}, + gscites = {11}, + ss_id = {e03d4a9c11a70f4ec009ea81d141742b40b20789}, + all_ss_ids = {['e03d4a9c11a70f4ec009ea81d141742b40b20789']}, +} + +@article{Leem19b, + author = {Sil C. van de Leemput and Jonas Teuwen and Bram van Ginneken and Rashindra Manniesing}, + title = {MemCNN: A Python/PyTorch package for creating memory-efficient invertible neural networks}, + journal = {Journal of Open Source Software}, + year = {2019}, + volume = {4}, + number = {39}, + month = {7}, + pages = {1576}, + doi = {10.21105/joss.01576}, + code = {https://github.com/silvandeleemput/memcnn}, + abstract = {Neural networks are computational models that were originally inspired by biological neural networks like animal brains. These networks are composed of many small computational units called neurons that perform elementary calculations. Instead of explicitly programming the behavior of neural networks, these models can be trained to perform tasks, like classifying images, by presenting them examples. Sufficiently complex neural networks can automatically extract task-relevant characteristics from the presented examples without having prior knowledge about the task domain, which makes them attractive for many complicated real-world applications. + + Reversible operations have recently been successfully applied to classification problems to reduce memory requirements during neural network training. This feature is accomplished by removing the need to store the input activation for computing the gradients at the backward pass and instead reconstruct them on demand. However, current approaches rely on custom implementations of backpropagation, which limits applicability and extendibility. We present MemCNN, a novel PyTorch framework that simplifies the application of reversible functions by removing the need for a customized backpropagation. The framework contains a set of practical generalized tools, which can wrap common operations like convolutions and batch normalization and which take care of memory management. We validate the presented framework by reproducing state-of-the-art experiments using MemCNN and by comparing classification accuracy and training time on Cifar-10 and Cifar-100. Our MemCNN implementations achieved similar classification accuracy and faster training times while retaining compatibility with the default backpropagation facilities of PyTorch.}, + file = {:pdf/Leem19b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + publisher = {The Open Journal}, + gsid = {12471482698808856137}, + gscites = {12}, + ss_id = {dce049bbedc5134dc6eb61fc7e5ce1d1c414bdd1}, + all_ss_ids = {['dce049bbedc5134dc6eb61fc7e5ce1d1c414bdd1']}, +} + +@article{Leem20, + author = {{van de Leemput}, S. C. and {Prokop}, M. and {van Ginneken}, B. and Manniesing, R.}, + title = {Stacked Bidirectional Convolutional LSTMs for Deriving 3D Non-contrast CT from Spatiotemporal 4D CT}, + journal = TMI, + year = {2020}, + volume = {39}, + issue = {4}, + pages = {985-996}, + doi = {10.1109/TMI.2019.2939044}, + abstract = {The imaging workup in acute stroke can be simplified by deriving non-contrast CT (NCCT) from CT perfusion (CTP) images. This results in reduced workup time and radiation dose. To achieve this, we present a stacked bidirectional convolutional LSTM (C-LSTM) network to predict 3D volumes from 4D spatiotemporal data. Several parameterizations of the C-LSTM network were trained on a set of 17 CTP-NCCT pairs to learn to derive a NCCT from CTP and were subsequently quantitatively evaluated on a separate cohort of 16 cases. The results show that the C-LSTM network clearly outperforms the baseline and competitive convolutional neural network methods. We show good scalability and performance of the method by continued training and testing on an independent dataset which includes pathology of 80 and 83 CTP-NCCT pairs, respectively. C-LSTM is, therefore, a promising general deep learning approach to learn from high-dimensional spatiotemporal medical images.}, + file = {Leem20.pdf:pdf\\Leem20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31484111}, + month = {4}, + gsid = {15708646315318329891}, + gscites = {14}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/218599}, + ss_id = {9f7129d07d92e91ecf42dd33ef05d83514303003}, + all_ss_ids = {['9f7129d07d92e91ecf42dd33ef05d83514303003']}, +} + +@article{Leen20, + author = {van Leenders, Geert J L H and van der Kwast, Theodorus H and Grignon, David J and Evans, Andrew J and Kristiansen, Glen and Kweldam, Charlotte F and Litjens, Geert and McKenney, Jesse K and Melamed, Jonathan and Mottet, Nicholas and Paner, Gladell P and Samaratunga, Hemamali and Schoots, Ivo G and Simko, Jeffry P and Tsuzuki, Toyonori and Varma, Murali and Warren, Anne Y and Wheeler, Thomas M and Williamson, Sean R and Iczkowski, Kenneth A and ISUP Grading Workshop Panel Members}, + title = {The 2019 International Society of Urological Pathology (ISUP) Consensus Conference on Grading of Prostatic Carcinoma.}, + volume = {44}, + issue = {8}, + pages = {e87--e99}, + doi = {10.1097/PAS.0000000000001497}, + abstract = {Five years after the last prostatic carcinoma grading consensus conference of the International Society of Urological Pathology (ISUP), accrual of new data and modification of clinical practice require an update of current pathologic grading guidelines. This manuscript summarizes the proceedings of the ISUP consensus meeting for grading of prostatic carcinoma held in September 2019, in Nice, France. Topics brought to consensus included the following: (1) approaches to reporting of Gleason patterns 4 and 5 quantities, and minor/tertiary patterns, (2) an agreement to report the presence of invasive cribriform carcinoma, (3) an agreement to incorporate intraductal carcinoma into grading, and (4) individual versus aggregate grading of systematic and multiparametric magnetic resonance imaging-targeted biopsies. Finally, developments in the field of artificial intelligence in the grading of prostatic carcinoma and future research perspectives were discussed.}, + file = {:pdf/Leen20.pdf:PDF}, + journal = AJSP, + month = aug, + optnote = {DIAG}, + pmid = {32459716}, + year = {2020}, + ss_id = {5a1c2ad4627095f867a2fe62d61177c4631096d9}, + all_ss_ids = {['5a1c2ad4627095f867a2fe62d61177c4631096d9']}, + gscites = {215}, +} + +@conference{Leeu19, + author = {van Leeuwen, Kicky G}, + title = {Opinion of Dutch healthcare professionals on the development and adoption of artificial intelligence in healthcare}, + booktitle = {EuSoMII Annual Meeting}, + year = {2019}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Leeu20, + author = {van Leeuwen, Kicky G. and Schalekamp, Steven and Rutten, Matthieu J.C.M. and van Ginneken, Bram and de Rooij, Maarten}, + title = {Scientific Evidence for 100 Commercially Available Artificial Intelligence Tools for Radiology: A Systematic Review}, + abstract = {Purpose: To survey scientific evidence for all CE marked artificial intelligence (AI) based software products for radiology available as of April, 2020. + Materials and Methods: We created an online overview of CE-certified AI software products for clinical radiology based on vendor-supplied product specifications (www.aiforradiology.com). For these products, we conducted a systematic literature study on Pubmed for original, peer-reviewed, English articles published between Jan 1, 2015 and April 14, 2020 on the efficacy of the AI software. Papers were included when the product and/or company name were mentioned, when efficacy level 2 to 6 according to Fryback was reported on an independent dataset, and when the tool was applied on in vivo human data. + Results: Our product overview consisted of 100 CE-certified software products from 51 different vendors. Among the 839 papers screened, 108 met the inclusion criteria. For 70/100 products we did not find papers that met the inclusion criteria. The evidence of the other 30 products was predominantly (84%) focused on diagnostic accuracy (efficacy level 2). Half of the available evidence (49%) was independent and not (co)-funded or (co)-authored by the vendor. In more than half (55%) of the papers the version number of the product used in the study was not mentioned. From all studies, 20 (18%) used validation data from multiple countries, 42 (39%) were multicenter studies, 25 (23%) were performed with acquisition machines from multiple manufacturers. + Conclusion: One hundred CE-certified AI software products for radiology exist today. Yet, for the majority, scientific evidence on the clinical performance and clinical impact is lacking. These insights should raise awareness that certification may not guarantee technical and clinical efficacy of an AI product. + Clinical relevance: Our findings identify the available evidence for commercially available AI software, aiming to contribute to a safe and effective implementation of AI software in radiology departments.}, + booktitle = RSNA, + year = {2020}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Leeu21, + author = {van Leeuwen, Kicky G. and Schalekamp, Steven and Rutten, Matthieu J.C.M. and van Ginneken, Bram and de Rooij, Maarten}, + title = {Artificial intelligence in radiology: 100 commercially available products and their scientific evidence}, + journal = ER, + year = {2021}, + volume = {31}, + pages = {3797-3804}, + doi = {10.1007/s00330-021-07892-z}, + url = {https://doi.org/10.1007/s00330-021-07892-z}, + abstract = {OBJECTIVES: Map the current landscape of commercially available artificial intelligence (AI) software for radiology and review the availability of their scientific evidence. METHODS: We created an online overview of CE-marked AI software products for clinical radiology based on vendor-supplied product specifications (www.aiforradiology.com ). Characteristics such as modality, subspeciality, main task, regulatory information, deployment, and pricing model were retrieved. We conducted an extensive literature search on the available scientific evidence of these products. Articles were classified according to a hierarchical model of efficacy. RESULTS: The overview included 100 CE-marked AI products from 54 different vendors. For 64/100 products, there was no peer-reviewed evidence of its efficacy. We observed a large heterogeneity in deployment methods, pricing models, and regulatory classes. The evidence of the remaining 36/100 products comprised 237 papers that predominantly (65%) focused on diagnostic accuracy (efficacy level 2). From the 100 products, 18 had evidence that regarded level 3 or higher, validating the (potential) impact on diagnostic thinking, patient outcome, or costs. Half of the available evidence (116/237) were independent and not (co-)funded or (co-)authored by the vendor. CONCLUSIONS: Even though the commercial supply of AI software in radiology already holds 100 CE-marked products, we conclude that the sector is still in its infancy. For 64/100 products, peer-reviewed evidence on its efficacy is lacking. Only 18/100 AI products have demonstrated (potential) clinical impact. KEY POINTS: * Artificial intelligence in radiology is still in its infancy even though already 100 CE-marked AI products are commercially available. * Only 36 out of 100 products have peer-reviewed evidence of which most studies demonstrate lower levels of efficacy. * There is a wide variety in deployment strategies, pricing models, and CE marking class of AI products for radiology.}, + file = {Leeu21.pdf:pdf\\Leeu21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {33856519}, + ss_id = {93e353567bbc7525306a14b6219973d34b084c4e}, + all_ss_ids = {['93e353567bbc7525306a14b6219973d34b084c4e']}, + gscites = {139}, +} + +@article{Leeu21a, + author = {van Leeuwen, Kicky G and de Rooij, Maarten and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J C M}, + title = {How does artificial intelligence in radiology improve efficiency and health outcomes?}, + journal = PEDRAD, + doi = {10.1007/s00247-021-05114-8}, + url = {https://doi.org/10.1007/s00247-021-05114-8}, + abstract = {Since the introduction of artificial intelligence (AI) in radiology, the promise has been that it will improve health care and reduce costs. Has AI been able to fulfill that promise? We describe six clinical objectives that can be supported by AI: a more efficient workflow, shortened reading time, a reduction of dose and contrast agents, earlier detection of disease, improved diagnostic accuracy and more personalized diagnostics. We provide examples of use cases including the available scientific evidence for its impact based on a hierarchical model of efficacy. We conclude that the market is still maturing and little is known about the contribution of AI to clinical practice. More real-world monitoring of AI in clinical practice is expected to aid in determining the value of AI and making informed decisions on development, procurement and reimbursement.}, + file = {Leeu21a.pdf:pdf\\Leeu21a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pages = {2087--2093}, + pmid = {34117522}, + volume = {52}, + year = {2022}, + ss_id = {5f6c21ae64b9db06da177f44b2d59a0dd6f9c583}, + all_ss_ids = {['5f6c21ae64b9db06da177f44b2d59a0dd6f9c583']}, + gscites = {47}, +} + +@article{Leeu21b, + author = {van Leeuwen, Kicky G and Meijer, Frederick J A and Schalekamp, Steven and Rutten, Matthieu J C M and van Dijk, Ewoud J and van Ginneken, Bram and Govers, Tim M and Rooij, Maarten De}, + title = {{Cost - effectiveness of artificial intelligence aided vessel occlusion detection in acute stroke: an early health technology assessment}}, + doi = {10.1186/s13244-021-01077-4}, + pages = {133}, + url = {https://doi.org/10.1186/s13244-021-01077-4}, + volume = {12}, + file = {Leeu21b.pdf:pdf\\Leeu21b.pdf:PDF}, + journal = INSI, + optnote = {DIAG, RADIOLOGY}, + publisher = {Springer International Publishing}, + year = {2021}, + ss_id = {dc68e0f61994ca0e768584d39c108305287f338a}, + all_ss_ids = {['dc68e0f61994ca0e768584d39c108305287f338a']}, + gscites = {13}, +} + +@conference{Leeu21c, + author = {van Leeuwen, Kicky G and Rutten, Matthieu J C M and Schalekamp, Steven and de Rooij, Maarten and van Ginneken, Bram}, + title = {Clinical use of artificial intelligence in radiology departments in the Netherlands: a survey}, + booktitle = ECR, + year = {2021}, + abstract = {Purpose: There are over 150 artificial intelligence (AI) products for radiology offered, but little is known about their current clinical use. We investigated actual clinical use of AI software in radiology departments in the Netherlands. + Materials and Methods: We consulted the radiology department of each hospital organization in the Netherlands (n=70) about their current AI implementations and plans from February-March 2020. A representative of the department was asked to fill in a questionnaire about their knowledge, experience, research and/or clinical use of commercially available CE-certified AI products for radiology (n=93). Responses for these familiarity-levels were analysed to create an overview with quantitative metrics. + Results: The response rate of the consulted hospitals was 43/70: 38/62 for general hospitals, 5/7 for academic medical centers, and 0/1 for children's hospitals. Of the respondents 30 (70%) were radiologists, 5 (12%) application or information managers, and 8 (19%), among others, clinical physicists and managers. A third (14) of the participating organizations had one to three AI applications in clinical use, with a total of 19 implementations. These implementations involved eight different vendors of which four were from the Netherlands. Most commonly used was software for bone age prediction and stroke detection. Respondents were most familiar with products aimed at neurology and cardiology. MR, CT and mammography were the most familiar modalities for AI. Most interest for clinical implementation was shown in software to triage exams. Eleven organizations (26%) had a dedicated budget for AI, either from the hospital or the department. + Conclusion: Even though the supply of AI software is extensive, clinical use remains limited showing that we are still in the initial stages of integrating AI in clinical practice in the Netherlands. + Limitations: Results may be influenced by a nonresponse bias.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Leeu21d, + author = {van Leeuwen, Kicky G and Meijer, Frederick J A and Schalekamp, Steven and Rutten, Matthieu J C M and van Dijk, Ewoud J and van Ginneken, Bram and Govers, Tim M and de Rooij, Maarten}, + title = {Artificial Intelligence in Acute Stroke: an Early Health Technology Assessment of Vessel Occlusion Detection on Computed Tomography}, + abstract = {Purpose or Learning Objective: To demonstrate the cost-effectiveness of artificial intelligence (AI) software to aid in the detection of intracranial vessel occlusions in stroke compared to standard care by performing early health technology assessment. + Methods or Background: We used a Markov based model from a societal perspective in a UK setting to demonstrate the potential value of an AI tool reported in expected incremental costs (IC) and effects (IE) in quality adjusted life years (QALYs). Initial population existed of patients suspected of stroke based on symptoms and exclusion of other causes as demonstrated by non-contrast cerebrum CT. Input parameters for the model were predominantly based on stroke registry data from the UK and complemented with pooled outcome data from large randomized trials. Parameters were varied to demonstrate model robustness. + Results or Findings: The AI strategy with its base-case parameters (6% missed diagnoses of intra-arterial therapy eligible patients by clinicians, $40 per AI analysis, 50% reduction of missed vessel occlusions by AI) resulted in modest cost-savings and incremental QALYs over the projected lifetime (IC: - $156, -0.23%; IE: +0.01 QALYs, +0.07%) per ischaemic stroke patient. Within a ninety-day window after treatment no financial (IC: +$60) and negligible QALY (IE: +0.0001) gain was observed. For each yearly cohort of patients in the UK this translates to a total cost saving of $11 million. + Conclusion: We showed that computer aided thrombus detection in emergency care has the potential to increase health and save costs. Results may contribute to the debate on the investments, financial accountability and reimbursement for the clinical use of AI technology. + Limitations: Parameter values of the model were based on results from previous studies.}, + booktitle = ECR, + year = {2021}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Leeu21e, + author = {van Leeuwen, Kicky G. and de Rooij, Maarten and Rutten, Matthieu J.C.M. and van Ginneken, Bram and Schalekamp, Steven}, + title = {Performance Of A Commercial Software Package For Lung Nodule Detection On Chest Radiographs Compared With 8 Expert Readers}, + abstract = {Purpose: Multi-center evaluation of the stand-alone performance of commercially available lung nodule detectionsoftware (Lunit INSIGHT CXR3). + Methods and Materials: A set of 300 posteroanterior (PA) and lateral chest radiographs from four medical centers in theNetherlands was collected. Solitary lung nodules ranging from 5 to 35 mm in size were present in 111 of the cases. Allnodules were confirmed by CT within three months of the radiograph acquisition. Control radiographs were determinedbased on a negative CT within six months. Five radiologists and three radiology residents scored the set to provide contextto the algorithm performance. All PA radiographs were processed by Lunit INSIGHT CXR3, a commercial software productthat detects ten common abnormalities in chest radiographs. Area under the receiver operating characteristics curve (AUC)and sensitivity at 90% specificity were used to measure performance. Multi-reader multi-case ROC analysis based on U-statistics (iMRMC-v4 software) was applied to compare CXR3 with the readers. Subanalysis was performed regardingnodule size (small<15mm, large>15mm) and conspicuity levels (well visible, moderately visible, subtle, very subtle). + Results: Out of the 300 radiographs, 7 could not be processed by CXR3, resulting in a set of 104 nodule cases and 189normal cases for evaluation. The CXR3 AUC was 0.93 and significantly higher than the mean reader AUC of 0.82 (p<0.001).CXR3 was also significantly better than the best reader with an AUC of 0.88 (p=0.028). At a specificity level of 90%,sensitivity was 83.2% for CXR3 and 63.3% (std+-7.5%) for the reader average. Regarding conspicuity of the nodules, CXR3AUCs were 0.99 for well visible, 0.94 for moderately visible, 0.94 for subtle, and 0.78 for very subtle nodules. No significantdifference in CXR3 performance was observed between the detection of small (AUC 0.91) and large nodules (AUC 0.93). + Conclusions: Lunit INSIGHT CXR3 significantly outperforms the comparison group of eight readers in nodule detection onchest radiographs. + Clinical Relevance/Application: Generalizability of artificial intelligence algorithms is not trivial. Performance studiesincrease confidence in algorithms to the users, especially to those with similar patient populations.}, + booktitle = RSNA, + year = {2021}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Leeu21f, + author = {van Leeuwen, Kicky G. and de Rooij, Maarten and Rutten, Matthieu J.C.M. and Schalekamp, Steven and van Ginneken, Bram}, + title = {Commercial Artificial Intelligence Solutions For Radiology: A Market Update}, + abstract = {Purpose: Provide an overview of the current market of regulatory-cleared artificial intelligence (AI) software for radiology. + Methods and Materials: An overview of CE marked AI products for clinical radiology is maintained online (https://www.AIforRadiology.com). Vendors were asked to verify and complete the product information. This overviewallows for analysis of market trends. Characteristics of the market were based on the state of the database on the 1st ofMay 2021. + Results: In May 2021 there were 161 CE marked AI products on the market, an increase of 36% compared with one yearprior. The growth from 2019 to 2020 was 69% (from 70 to 118 products). The number of vendors offering AI products onlygrew with 13% from 61 in 2020 to 69 in 2021. The average number of products per company therefore increased from 1.9to 2.3. The time from company founding to the first product on the market is on average 4 years and 1 month. Mostprevalent are tools for neuro and chest imaging. With respect to modality, CT and MR covered 62% of all products. Half ofthe CE marked AI products (51%) have also been cleared by the FDA. To our knowledge, only four products were CEmarked under the new Medical Device Regulations. Subscription or licensing are the most popular pricing models. Themajority of products are offered with both the option of local and cloud-based installation. + Conclusions: The growth of AI products new to the market is slowing down. This effect is even stronger for vendors.Existing vendors have been expanding their portfolios. + Clinical Relevance/Application: The market of AI products for radiology is growing. Our research provides a transparentoverview of the available products and their evidence.}, + booktitle = RSNA, + year = {2021}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Leeu22, + author = {van Leeuwen, Kicky G. and de Rooij, Maarten and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J.C.M.}, + title = {The rise of artificial intelligence solutions in radiology departments in the Netherlands}, + abstract = {Purpose: There are over 180 CE-marked artificial intelligence (AI) products for radiology commercially available in Europe, but little is known about the current clinical use. We investigated the clinical use of commercial AI software in radiology departments in the Netherlands over a two-year period. + Methods: We consulted the radiology department of all hospital organizations in the Netherlands (n=69) in February-March 2020 (44 respondents) and February-March 2021 (37 respondents). A representative of the department was asked to fill in a questionnaire about the (planned) clinical use of CE marked AI products for radiology, the available funding for AI, and biggest obstacles for implementation. + Results: From 2020 to 2021 the percentage of respondents that desired the adoption of AI tools in radiology increased from 63% to 86%. In 2020, 14 responding organisations used AI in clinical practice, which increased to 23 (33% of all organizations) in 2021. The total number of AI implementations in clinical practice expanded by 157%, from 19 to 49 implementations. Also, the diversity increased from 8 to 32 unique products. In 2021, 35% of respondents had budgets allocated for AI implementations either on the departmental level or on the institutional level, which was 26% in 2020. The major obstacles for AI adoption shifted from difficulties with the technical integration (2020) to the lack of budgets and an unclear business case (2021). Technical integration remained the second most often listed obstacle. + Conclusion: AI adoption is gradually increasing in clinical radiology in the Netherlands. The number of radiology departments using AI has increased to at least a third of all organizations. Also, the number and diversity of AI applications per department grew substantially. + Limitations: Results may be influenced by a nonresponse bias.}, + booktitle = ECR, + year = {2022}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Leeu22a, + author = {Deden, Laura N. and van Leeuwen, Kicky G. and Becks, M.J. and Bernsen, M.L.E. and de Rooij, Maarten and Martens, J.M. and Meijer, F.J.A.}, + title = {Gluren bij de buren - Evaluating and sharing real-world experience of an AI stroke tool in two centres}, + abstract = {Background: Currently, many hospitals are implementing AI software. However, clear clinical implementation procedures are not yet available. In order to exchange experiences, two interventional stroke centres (Radboudumc and Rijnstate) collaborated in the prospective evaluation of an AI tool for stroke diagnostics. + Methodology: Primary aim of StrokeViewer (Nicolab) implementation in both centres was diagnostic support in detecting large vessel occlusions (LVO) in anterior cerebral circulation. Additionally, in Rijnstate analysis of cerebral CT perfusion (CTP) was available. In Radboudumc, LVO results were available after log in to the StrokeViewer server. In Rijnstate, results were pushed to PACS as a pdf-report. Trial period in Radboudumc was 12 months, in Rijnstate 7 months. The performance of proximal LVO detection was compared with radiologists' assessments. Users filled in a questionnaire on user experience at several time points. In Radboudumc, the use was monitored by individual log-in information. + Results: Quantitative evaluation of ICA, M1 and proximal M2 occlusion detection (prevalence 18%) resulted in a case base sensitivity and specificity of 74% and 91% in Rijnstate (n=276) and 77% and 91% in Radboudumc (n=516). The use of the tool decreased over time. Radiologists unfamiliar with LVO assessment tended to value the AI report more than experienced radiologists. The net promoter scores were -56% in Radboudumc and -65% in Rijnstate. The tool was considered user friendly (7.2/10). CTP assessment in Rijnstate was used more frequently than LVO detection. + Conclusion: This evaluation aids to understand some of the challenges involved in clinical implementation and acceptance by users of AI tools. Findings are consistent for both centres. Success is not only dependent on the product and its performance, but also on clinical goal setting, expectations, context and implementation choices. Sharing experience within the NVvR AInetwork can help to gain insights into crucial factors for success ("Gluren-bij-de-buren").}, + booktitle = {Radiologendagen}, + year = {2022}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Leeu22b, + author = {van Leeuwen, Kicky G. and Becks, M.J. and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J.C.M. and de Rooij, Maarten and Meijer, F.J.A.}, + booktitle = ECR, + title = {Real-world evaluation of artificial intelligence software for cerebral large vessel occlusion detection in {CT} angiography}, + abstract = {Purpose: The commercially available AI tool (StrokeViewer v2, Nicolab) supports the diagnostic process of stroke by detecting large vessel occlusions (LVO) on CTA. We prospectively evaluated this tool in our department to monitor safety and impact. + Methods: We implemented the software with the goal to improve the diagnosis of LVO and elevate the diagnostic confidence of the radiologist (resident). We used quantitative measures (data from clinical systems, vendor log files) and qualitative measures (user survey) to analyse diagnostic performance, number of users, login attempts, radiologists' diagnostic confidence, and user experience. + Results: In total, 226 CTAs with a clinical indication of stroke between January-June 2021 were prospectively evaluated. Thirteen cases of posterior circulation and distal vessel occlusions were excluded as they were outside the intended use of the AI tool. The AI tool missed 12 of the 36 occlusions in the middle cerebral or intracranial internal carotid artery (M1=1, M2=10, ICA=1) resulting in an accuracy of 86.4%. Irrespective of location, the sensitivity was 77.8% and specificity 90.4%. The number of monthly unique users varied between 8 and 24 radiologists/residents. Log in attempts dropped after the initial month (which included training) to a monthly average of 44 attempts. The diagnostic confidence did not increase during the use of the tool. The likelihood that users would recommend StrokeViewer to colleagues was rated 4.5/10. + Conclusion: Over six months, the use of StrokeViewer dropped and users did not sense improvement of diagnostic confidence. Measures have been taken to stimulate adoption for the latter six months of the trial period. + Limitation: Because of the prospective character, no comparison could be made between radiologists supported by AI vs radiologists without AI.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@conference{Leeu23, + author = {van Leeuwen, Kicky G. and Hedderich, D.M. and Schalekamp, Steven}, + booktitle = ECR, + title = {Potential risk of off-label use of commercially available AI-based software for radiology}, + abstract = {Purpose or Learning Objective: The aim of this study was to analyse potential discrepancies between the claims and disclaimers of the intended purpose statements of CE-marked AI-based software for radiology. + Methods or Background: In March 2022, we asked all vendors listed on www.AIforRadiology.com (n=87) to verify or submit the intended purpose according to European clearance for their products (n=191). Any new additions were included until September 26th 2022 (n=12)). Claims and disclaimers were extracted from the statements. Potential conflicts of claims and disclaimers were flagged. + Results or Findings: We received the intended purpose statements for 157 of the 203 products. Of those, 36 were excluded as they provided too little information to analyse. The included products were certified under the current medical device regulations (class IIa = 24, class IIb = 9) and former Medical Device Directive (class I = 45, class IIa = 39, class IIb = 3). Of the 121 included statements 56 held disclaimers. For 13 of these products the claims and disclaimers were flagged to contradict each other. Potential discrepant disclaimer statements were e.g. 'act per the standard of care' (n=7) and 'not for diagnostic use' (n=6), while claiming to aid in the diagnosis, triaging or risk scoring of clinical conditions. + Conclusion: Potential discrepancies in claims and disclaimers were found for a substantial number of AI-tools bearing the risk that users of the AI software misunderstand the permitted use-cases which may lead to off-label use. + Limitations: Not all intended purpose statements received were of sufficient quality to use for analysis. The definition of what information the intended purpose should contain is not clearly specified under the MDR making it hard to objectively assess or compare.}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, +} + +@article{Leeu23a, + author = {van Leeuwen, Kicky G. and de Rooij, Maarten and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J. C. M.}, + title = {Clinical use of artificial intelligence products for radiology in the Netherlands between 2020 and 2022}, + doi = {10.1007/s00330-023-09991-5}, + url = {http://dx.doi.org/10.1007/s00330-023-09991-5}, + abstract = {Abstract + Objectives + To map the clinical use of CE-marked artificial intelligence (AI)-based software in radiology departments in the Netherlands (n = 69) between 2020 and 2022. + + Materials and methods + Our AI network (one radiologist or AI representative per Dutch hospital organization) received a questionnaire each spring from 2020 to 2022 about AI product usage, financing, and obstacles to adoption. Products that were not listed on www.AIforRadiology.com by July 2022 were excluded from the analysis. + + Results + The number of respondents was 43 in 2020, 36 in 2021, and 33 in 2022. The number of departments using AI has been growing steadily (2020: 14, 2021: 19, 2022: 23). The diversity (2020: 7, 2021: 18, 2022: 34) and the number of total implementations (2020: 19, 2021: 38, 2022: 68) has rapidly increased. Seven implementations were discontinued in 2022. Four hospital organizations said to use an AI platform or marketplace for the deployment of AI solutions. AI is mostly used to support chest CT (17), neuro CT (17), and musculoskeletal radiograph (12) analysis. The budget for AI was reserved in 13 of the responding centers in both 2021 and 2022. The most important obstacles to the adoption of AI remained costs and IT integration. Of the respondents, 28% stated that the implemented AI products realized health improvement and 32% assumed both health improvement and cost savings. + + Conclusion + The adoption of AI products in radiology departments in the Netherlands is showing common signs of a developing market. The major obstacles to reaching widespread adoption are a lack of financial resources and IT integration difficulties. + + Clinical relevance statement + The clinical impact of AI starts with its adoption in daily clinical practice. Increased transparency around AI products being adopted, implementation obstacles, and impact may inspire increased collaboration and improved decision-making around the implementation and financing of AI products. + + Key Points + The adoption of artificial intelligence products for radiology has steadily increased since 2020 to at least a third of the centers using AI in clinical practice in the Netherlands in 2022. + The main areas in which artificial intelligence products are used are lung nodule detection on CT, aided stroke diagnosis, and bone age prediction. + The majority of respondents experienced added value (decreased costs and/or improved outcomes) from using artificial intelligence-based software; however, major obstacles to adoption remain the costs and IT-related difficulties.}, + citation-count = {0}, + file = {Leeu23a.pdf:pdf\Leeu23a.pdf:PDF}, + journal = {European Radiology}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {03a68b7a56dda9b3cecacfd01d4f7225998fe4c8}, + all_ss_ids = {['03a68b7a56dda9b3cecacfd01d4f7225998fe4c8']}, + gscites = {1}, +} + +@article{Leeu23b, + author = {K.G.van Leeuwen and M.J. Becks and D. Grob and F. de Lange and J.H.E. Rutten and S. Schalekamp and M.J.C.M. Rutten and B. van Ginneken and M. de Rooij and F.J.A. Meijer}, + title = {AI-support for the detection of intracranial large vessel occlusions: One-year prospective evaluation}, + doi = {10.1016/j.heliyon.2023.e19065}, + abstract = {Purpose + Few studies have evaluated real-world performance of radiological AI-tools in clinical practice. Over one-year, we prospectively evaluated the use of AI software to support the detection of intracranial large vessel occlusions (LVO) on CT angiography (CTA). + Method + Quantitative measures (user log-in attempts, AI standalone performance) and qualitative data (user surveys) were reviewed by a key-user group at three timepoints. A total of 491 CTA studies of 460 patients were included for analysis. + Results + The overall accuracy of the AI-tool for LVO detection and localization was 87.6\%, sensitivity 69.1\% and specificity 91.2\%. Out of 81 LVOs, 31 of 34 (91\%) M1 occlusions were detected correctly, 19 of 38 (50\%) M2 occlusions, and 6 of 9 (67\%) ICA occlusions. The product was considered user-friendly. The diagnostic confidence of the users for LVO detection remained the same over the year. The last measured net promotor score was -56\%. The use of the AI-tool fluctuated over the year with a declining trend. + Conclusions + Our pragmatic approach of evaluating the AI-tool used in clinical practice, helped us to monitor the usage, to estimate the perceived added value by the users of the AI-tool, and to make an informed decision about the continuation of the use of the AI-tool.}, + citation-count = {0}, + file = {Leeu23b.pdf:pdf\Leeu23b.pdf:PDF}, + journal = {Heliyon}, + issue = {8}, + volume = {9}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {c2586bb2f434a12ef173271af12a69ed86b74944}, + all_ss_ids = {['c2586bb2f434a12ef173271af12a69ed86b74944']}, + gscites = {0}, +} + +@phdthesis{Leeu23c, + author = {Kicky G. van Leeuwen}, + title = {Validation and implementation of commercial artificial intelligence software for radiology}, + url = {https://repository.ubn.ru.nl/handle/2066/295128}, + abstract = {The aim of this thesis is to increase transparency of the AI software applications for the radiology market: the medical specialty which currently covers 75% of all approved medical AI software. The focus is on products available for clinical use in Europe, in other words, products that are CE marked. We discuss the potential use cases of AI in radiology, map commercially available AI products, independently assess the performance of products, and measure and model the (potential) added value. With the insights we have gained and publicly shared, we enable more informed decision-making by AI purchasers, users, investors, and creators. Furthermore, it encourages use and development of AI that is safe and of value to society. + The key contributions of this research are: + - Three years of publicly sharing of information to a global audience on commercially available AI products, verified regulatory clearance information, product specifications, and scientific evidence, through www.AIforRadiology.com and associated monthly newsletter. + - Initiating the Dutch Radiology AI-network connecting "AI-champions" among hospitals to share experiences and to enable the yearly inquiry on the clinical use of commercial AI. + - Development of a framework for the independent and objective validation of commercially available AI products and applying this to ten products, for two different use cases, on data from seven medical centers. With this framework, we make validation more efficient and impartial, enabling informed purchasing or reimbursement decisions. + - One of the first demonstrations of how an early health technology assessment can be performed to demonstrate the value of an AI product before implementation.}, + copromotor = {M.J.C.M. Rutten, Dr. M. de Rooij, S. Schalekamp}, + file = {:pdf/Leeu23c.pdf:PDF}, + journal = {PhD thesis}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken}, + school = {Radboud University, Nijmegen}, + year = {2023}, +} + +@article{Leij17, + author = {van Leijsen, Esther MC and van Uden, Ingeborg WM and Ghafoorian, Mohsen and Bergkamp, Mayra I and Lohner, Valerie and Kooijmans, Eline CM and van der Holst, Helena M and Tuladhar, Anil M and Norris, David G and van Dijk, Ewoud J and Rutten-Jacobs, Loes CA and Platel, Bram and Klijn, Catharina JM and de Leeuw, Frank-Erik}, + title = {Nonlinear temporal dynamics of cerebral small vessel disease The RUN DMC study}, + journal = Neurology, + year = {2017}, + volume = {89}, + number = {15}, + pages = {1569-1577}, + doi = {10.1212/WNL.0000000000004490}, + abstract = {Objective: To investigate the temporal dynamics of cerebral small vessel disease (SVD) by 3 consecutive assessments over a period of 9 years, distinguishing progression from regression. + + Methods: Changes in SVD markers of 276 participants of the Radboud University Nijmegen Diffusion Tensor and Magnetic Resonance Imaging Cohort (RUN DMC) cohort were assessed at 3 time points over 9 years. We assessed white matter hyperintensities (WMH) volume by semiautomatic segmentation and rated lacunes and microbleeds manually. We categorized baseline WMH severity as mild, moderate, or severe according to the modified Fazekas scale. We performed mixed-effects regression analysis including a quadratic term for increasing age. + + Results: Mean WMH progression over 9 years was 4.7 mL (0.54 mL/y; interquartile range 0.95-5.5 mL), 20.3% of patients had incident lacunes (2.3%/y), and 18.9% had incident microbleeds (2.2%/y). WMH volume declined in 9.4% of the participants during the first follow-up interval, but only for 1 participant (0.4%) throughout the whole follow-up. Lacunes disappeared in 3.6% and microbleeds in 5.7% of the participants. WMH progression accelerated over time: including a quadratic term for increasing age during follow-up significantly improved the model (p < 0.001). SVD progression was predominantly seen in participants with moderate to severe WMH at baseline compared to those with mild WMH (odds ratio [OR] 35.5, 95% confidence interval [CI] 15.8-80.0, p < 0.001 for WMH progression; OR 5.7, 95% CI 2.8-11.2, p < 0.001 for incident lacunes; and OR 2.9, 95% CI 1.4-5.9, p = 0.003 for incident microbleeds). + + Conclusions: SVD progression is nonlinear, accelerating over time, and a highly dynamic process, with progression interrupted by reduction in some, in a population that on average shows progression.}, + file = {Leij17.pdf:pdf\\Leij17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28878046}, + month = {9}, + gsid = {18114267648681255154}, + gscites = {89}, + ss_id = {f8c72c5da56aec5e573cc9e4a56d2a53b151e5a0}, + all_ss_ids = {['f8c72c5da56aec5e573cc9e4a56d2a53b151e5a0']}, +} + +@article{Leij18, + author = {van Leijsen, Esther M. C. and Tay, Jonathan and van Uden, Ingeborg W. M. and Kooijmans, Eline C. M. and Bergkamp, Mayra I. and van der Holst, Helena M. and Ghafoorian, Mohsen and Platel, Bram and Norris, David G. and Kessels, Roy P. C. and Markus, Hugh S. and Tuladhar, Anil M. and de Leeuw, Frank-Erik}, + title = {Memory decline in elderly with cerebral small vessel disease explained by temporal interactions between white matter hyperintensities and hippocampal atrophy}, + doi = {10.1002/hipo.23039}, + year = {2018}, + abstract = {AbstractWhite matter hyperintensities (WMH) constitute the visible spectrum of cerebral small vessel disease (SVD) markers and are associated with cognitive decline, although they do not fully account for memory decline observed in individuals with SVD. We hypothesize that WMH might exert their effect on memory decline indirectly by affecting remote brain structures such as the hippocampus. We investigated the temporal interactions between WMH, hippocampal atrophy and memory decline in older adults with SVD. Five hundred and three participants of the RUNDMC study underwent neuroimaging and cognitive assessments up to 3 times over 8.7 years. We assessed WMH volumes semi-automatically and calculated hippocampal volumes (HV) using FreeSurfer. We used linear mixed effects models and causal mediation analyses to assess both interaction and mediation effects of hippocampal atrophy in the associations between WMH and memory decline, separately for working memory (WM) and episodic memory (EM). Linear mixed effect models revealed that the interaction between WMH and hippocampal volumes explained memory decline (WM: b = .067; 95%CI[.024-0.111]; p < .01; EM: b = .061; 95%CI[.025-.098]; p < .01), with better model fit when the WMH*HV interaction term was added to the model, for both WM (likelihood ratio test, kh2[1] = 9.3, p < .01) and for EM (likelihood ratio test, kh2[1] = 10.7, p < .01). Mediation models showed that both baseline WMH volume (b = -.170; p = .001) and hippocampal atrophy (b = 0.126; p = .009) were independently related to EM decline, but the effect of baseline WMH on EM decline was not mediated by hippocampal atrophy (p value indirect effect: 0.572). Memory decline in elderly with SVD was best explained by the interaction of WMH and hippocampal volumes. The relationship between WMH and memory was not causally mediated by hippocampal atrophy, suggesting that memory decline during aging is a heterogeneous condition in which different pathologies contribute to the memory decline observed in elderly with SVD.}, + url = {http://dx.doi.org/10.1002/hipo.23039}, + file = {Leij18.pdf:pdf\Leij18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Hippocampus}, + citation-count = {24}, + automatic = {yes}, + pages = {500-510}, + volume = {29}, +} + +@article{Leij18, + author = {van Leijsen, Esther M. C. and Tay, Jonathan and van Uden, Ingeborg W. M. and Kooijmans, Eline C. M. and Bergkamp, Mayra I. and van der Holst, Helena M. and Ghafoorian, Mohsen and Platel, Bram and Norris, David G. and Kessels, Roy P. C. and Markus, Hugh S. and Tuladhar, Anil M. and de Leeuw, Frank-Erik}, + title = {Memory decline in elderly with cerebral small vessel disease explained by temporal interactions between white matter hyperintensities and hippocampal atrophy}, + doi = {10.1002/hipo.23039}, + year = {2018}, + abstract = {AbstractWhite matter hyperintensities (WMH) constitute the visible spectrum of cerebral small vessel disease (SVD) markers and are associated with cognitive decline, although they do not fully account for memory decline observed in individuals with SVD. We hypothesize that WMH might exert their effect on memory decline indirectly by affecting remote brain structures such as the hippocampus. We investigated the temporal interactions between WMH, hippocampal atrophy and memory decline in older adults with SVD. Five hundred and three participants of the RUNDMC study underwent neuroimaging and cognitive assessments up to 3 times over 8.7 years. We assessed WMH volumes semi-automatically and calculated hippocampal volumes (HV) using FreeSurfer. We used linear mixed effects models and causal mediation analyses to assess both interaction and mediation effects of hippocampal atrophy in the associations between WMH and memory decline, separately for working memory (WM) and episodic memory (EM). Linear mixed effect models revealed that the interaction between WMH and hippocampal volumes explained memory decline (WM: b = .067; 95%CI[.024-0.111]; p < .01; EM: b = .061; 95%CI[.025-.098]; p < .01), with better model fit when the WMH*HV interaction term was added to the model, for both WM (likelihood ratio test, kh2[1] = 9.3, p < .01) and for EM (likelihood ratio test, kh2[1] = 10.7, p < .01). Mediation models showed that both baseline WMH volume (b = -.170; p = .001) and hippocampal atrophy (b = 0.126; p = .009) were independently related to EM decline, but the effect of baseline WMH on EM decline was not mediated by hippocampal atrophy (p value indirect effect: 0.572). Memory decline in elderly with SVD was best explained by the interaction of WMH and hippocampal volumes. The relationship between WMH and memory was not causally mediated by hippocampal atrophy, suggesting that memory decline during aging is a heterogeneous condition in which different pathologies contribute to the memory decline observed in elderly with SVD.}, + url = {http://dx.doi.org/10.1002/hipo.23039}, + file = {Leij18.pdf:pdf\Leij18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Hippocampus}, + citation-count = {24}, + automatic = {yes}, + pages = {500-510}, + volume = {29}, +} + +@article{Leij19, + author = {van Leijsen, Esther Mc and Bergkamp, Mayra I and van Uden, Ingeborg Wm and Cooijmans, Sjacky and Ghafoorian, Mohsen and van der Holst, Helena M and Norris, David G and Kessels, Roy Pc and Platel, Bram and Tuladhar, Anil M and de Leeuw, Frank-Erik}, + title = {Cognitive consequences of regression of cerebral small vessel disease}, + journal = ESJ, + year = {2019}, + volume = {4}, + issue = {1}, + month = {3}, + pages = {85--89}, + doi = {10.1177/2396987318820790}, + abstract = {Recent studies have shown that neuroimaging markers of cerebral small vessel disease can also regress over time. We investigated the cognitive consequences of regression of small vessel disease markers. Two hundred and seventy-six participants of the RUNDMC study underwent neuroimaging and cognitive assessments at three time-points over 8.7 years. We semi-automatically assessed white matter hyperintensities volumes and manually rated lacunes and microbleeds. We analysed differences in cognitive decline and accompanying brain atrophy between participants with regression, progression and stable small vessel disease by analysis of variance. Fifty-six participants (20.3%) showed regression of small vessel disease markers: 31 (11.2%) white matter hyperintensities regression, 10 (3.6%) vanishing lacunes and 27 (9.8%) vanishing microbleeds. Participants with regression showed a decline in overall cognition, memory, psychomotor speed and executive function similar to stable small vessel disease. Participants with small vessel disease progression showed more cognitive decline compared with stable small vessel disease (p < 0.001 for cognitive index and memory; p < 0.01 for executive function), although significance disappeared after adjusting for age and sex. Loss of total brain, gray matter and white matter volume did not differ between participants with small vessel disease regression and stable small vessel disease, while participants with small vessel disease progression showed more volume loss of total brain and gray matter compared to those with stable small vessel disease (p < 0.05), although significance disappeared after adjustments. Regression of small vessel disease markers was associated with similar cognitive decline compared to stable small vessel disease and did not accompany brain atrophy, suggesting that small vessel disease regression follows a relatively benign clinical course. Future studies are required to validate these findings and to assess the role of vascular risk factor control on small vessel disease regression and possible recovery of clinical symptoms. Our findings of comparable cognitive decline between participants with regression and stable small vessel disease might suggest that small vessel disease regression has a relative benign cognitive outcome.}, + file = {Leij19.pdf:pdf\\Leij19.pdf:PDF}, + optnote = {DIAG}, + pmid = {31165098}, + gsid = {12830074869247778014}, + gscites = {12}, + ss_id = {c321fef685aa56d1c5a49d01e6de616f191011be}, + all_ss_ids = {['c321fef685aa56d1c5a49d01e6de616f191011be']}, +} + +@article{Leli08, + author = {B. P. F. Lelieveldt and N. Karssemeijer}, + title = {Information {P}rocessing {I}n {M}edical {I}maging 2007}, + journal = MIA, + year = {2008}, + volume = {12}, + pages = {729--730}, + doi = {10.1016/j.media.2008.03.005}, + file = {Leli08.pdf:pdf\\Leli08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {18457986}, + month = {12}, + gsid = {538772308921121002}, + gscites = {5}, + ss_id = {1bbe4a605e4889376878540c245a351aa83d9716}, + all_ss_ids = {['1bbe4a605e4889376878540c245a351aa83d9716']}, +} + +@article{Lens16, + author = {Eelco Lens and Alexis NTJ Kotte and Ajay Patel and Hanne D Heerkens and Matthieu Bal and Geertjan van Tienhoven and Arjan Bel and Astrid van der Horst and Gert J Meijer}, + title = {Probabilistic treatment planning for pancreatic cancer treatment: prospective incorporation of respiratory motion shows only limited dosimetric benefit}, + journal = {Acta Oncologica}, + year = {2016}, + pages = {1--7}, + doi = {10.1080/0284186x.2016.1257863}, + file = {:pdf\\Lens16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27885864}, + publisher = {Taylor \& Francis}, + month = {11}, + ss_id = {2d3e4695c85c92419e3b715fa385b33568c06cd8}, + all_ss_ids = {['2d3e4695c85c92419e3b715fa385b33568c06cd8']}, + gscites = {7}, +} + +@conference{Leon23, + author = {Leon-Ferre, Roberto A. and Carter, Jodi M. and Zahrieh, David and Sinnwell, Jason P. and Salgado, Roberto and Suman, Vera and Hillman, David and Boughey, Judy C. and Kalari, Krishna R. and Couch, Fergus J. and Ingle, James N. and Balkenkohl, Maschenka and Ciompi, Francesco and van der Laak, Jeroen and Goetz, Matthew P.}, + title = {Abstract P2-11-34: Mitotic spindle hotspot counting using deep learning networks is highly associated with clinical outcomes in patients with early-stage triple-negative breast cancer who did not receive systemic therapy}, + doi = {10.1158/1538-7445.sabcs22-p2-11-34}, + year = {2023}, + abstract = {Abstract + Background: Triple-negative breast cancers (TNBC) exhibit high rates of recurrence and mortality. However, recent studies suggest that a subset of patients (pts) with early-stage TNBC enriched in tumor-infiltrating lymphocytes (TILs) have excellent clinical outcomes even in the absence of systemic therapy. Additional histological biomarkers that could identify pts for future systemic therapy escalation/de-escalation strategies are of great interest. TNBC are frequently highly proliferative with abundant mitoses. However, classic markers of proliferation (manual mitosis counting and Ki-67) appear to offer no prognostic value. Here, we evaluated the prognostic effects of automated mitotic spindle hotspot (AMSH) counting on RFS in independent cohorts of systemically untreated early-stage TNBC. + Methods: AMSH counting was conducted with a state-of-the-art deep learning algorithm trained on the detection of mitoses within 2 mm2 areas with the highest mitotic density (i.e. hotspots) in digital H&E images. Details of the development, training and validation of the algorithm were published previously [1] in a cohort of unselected TNBC. We obtained AMSH counts in a centrally confirmed TNBC cohort from Mayo Clinic [2] and focused our analysis on pts who received locoregional therapy but no systemic therapy. Using a fractional polynomial analysis with a multivariable proportional hazards regression model, we confirmed the assumption of linearity in the log hazard for the continuous variable AMSH and evaluated whether AMSH counts were prognostic of RFS. We corroborated our findings in an independent cohort of systemically untreated TNBC pts from the Radboud University Medical Center in the Netherlands (Radboud Cohort). Results are reported at a median follow-up of 8.1 and 6.7 years for the Mayo and Netherlands cohorts, respectively. + Results: Among 182 pts with who did not receive systemic therapy in the Mayo Cohort, 140 (77\%) with available AMSH counts were included. The mean age was 61 (range: 31-94), 71\% were postmenopausal, 67\% had tumors <= 2cm, and 83\% were node-negative. As expected, most tumors were Nottingham grade 3 (84\%) and had a high Ki-67 proliferation index (54\% with Ki-67 &gt;30\%). Most tumors (73\%) had stromal TILs <= 30\%. The median AMSH count was 18 (IQR: 8, 42). AMSH counts were linearly associated with grade and tumor size, with the proportion of pts with grade 3 tumors and size &gt; 2 cm increasing as the AMSH counts increased (p=0.007 and p=0.059, respectively). In a multivariate model controlling for nodal status, tumor size, and stromal TILs, AMSH counts were independently associated with RFS (p&lt; 0.0001). For every 10-point increase in the AMSH count, we observed a 17\% increase in the risk of experiencing an RFS event (HR 1.17, 95\% CI 1.08-1.26). We corroborated our findings in the Radboud Cohort (n=126). The mean age was 68 (range: 40-96), and 81\% were node-negative. While the median AMSH count was 36 (IQR: 16-63), higher than in the Mayo Cohort (p=0.004), the prognostic impact was similar, with a significant association between AMSH count and RFS (p=0.028) in a multivariate model corrected for nodal status, tumor size, and stromal TILs. For every 10-point increase in the AMSH count in the Netherlands cohort, we observed a 9\% increase in the risk of experiencing an RFS event (HR 1.09, 95\% CI 1.01-1.17). RFS rates according to AMSH counts for both cohorts are shown in the Table. + Conclusions: AMSH counting is a new proliferation biomarker that provides prognostic value independent of nodal status, tumor size, and stromal TILs in systemically untreated early-stage TNBC. Plans are underway to evaluate AMSH counts in additional cohorts of systemically untreated TNBC, and in other disease settings such as prior to neoadjuvant systemic therapy. If validated, this biomarker should be prospectively evaluated as a potential selection biomarker in clinical trials of systemic therapy de-escalation. + References: + 1. PMID: 29994086 + 2. PMID: 28913760 + Table RFS according to AMSH counts in the Mayo and Radboud Cohorts + Citation Format: Roberto A. Leon-Ferre, Jodi M. Carter, David Zahrieh, Jason P. Sinnwell, Roberto Salgado, Vera Suman, David Hillman, Judy C. Boughey, Krishna R. Kalari, Fergus J. Couch, James N. Ingle, Maschenka Balkenkohl, Francesco Ciompi, Jeroen van der Laak, Matthew P. Goetz. Mitotic spindle hotspot counting using deep learning networks is highly associated with clinical outcomes in patients with early-stage triple-negative breast cancer who did not receive systemic therapy [abstract]. In: Proceedings of the 2022 San Antonio Breast Cancer Symposium; 2022 Dec 6-10; San Antonio, TX. Philadelphia (PA): AACR; Cancer Res 2023;83(5 Suppl):Abstract nr P2-11-34.}, + url = {http://dx.doi.org/10.1158/1538-7445.SABCS22-P2-11-34}, + file = {Leon23.pdf:pdf\Leon23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Cancer Research}, + citation-count = {0}, + automatic = {yes}, + pages = {P2-11-34-P2-11-34}, + volume = {83}, + all_ss_ids = {6ab3424f10b236c992d823568dfca2075e2ad46e}, + gscites = {0}, +} + +@article{Lepp95, + author = {Leppert, A. G. and Prokop, M. and Schaefer-Prokop, C. M. and Galanski, M.}, + title = {Detection of simulated chest lesions: comparison of a conventional screen-film combination, an asymmetric screen-film system, and storage phosphor radiography}, + journal = Radiology, + year = {1995}, + volume = {195}, + pages = {259--263}, + abstract = {To compare a high-contrast asymmetric screen-film system with a conventional screen-film combination and storage phosphor radiographs for detection of simulated chest lesions.To test the diagnostic performance of these three systems, the authors used three types of simulated lesions that were superimposed over the chests of 10 patients and an anthropomorphic phantom. In the patient and phantom study, a total of 1,350 observations by each of the eight radiologists were analyzed by means of receiver operating characteristics.In the patient study, mediastinal nodules were better visualized with high-contrast asymmetric screen-film and storage phosphor radiographs compared with the conventional screen-film system. For visualization of pulmonary nodules, the high-contrast asymmetric screen-film system was best. Micronodules were poorly visualized on high-contrast asymmetric screen-film and storage phosphor radiographs, but only in the phantom study.The high-contrast asymmetric screen-film system combines the advantages of conventional screen-film radiographs with improved visualization of the mediastinum.}, + optnote = {DIAG}, + number = {1}, + pmid = {7892482}, + month = {4}, + gsid = {14644887371056422135}, + gscites = {21}, +} + +@inproceedings{Lesn11, + author = {Jan Lesniak and Rianne Hupse and Michiel Kallenberg and Maurice Samulski and R\'{e}mi Blanc and Nico Karssemeijer and G\'{a}bor Sz\'{e}kely}, + title = {Computer {A}ided {D}etection of {B}reast {M}asses in {M}ammography using {S}upport {V}ector {M}achine {C}lassification}, + booktitle = MI, + year = {2011}, + volume = {7963}, + series = SPIE, + pages = {79631K}, + doi = {10.1117/12.878140}, + abstract = {The reduction of false positive marks in breast mass CAD is an active area of research. Typically, the problem can be approached either by developing more discriminative features or by employing difierent classifier designs. Usually one intends to find an optimal combination of classifier configuration and small number of features to ensure high classification performance and a robust model with good generalization capabilities. In this paper, we investigate the potential benefit of relying on a support vector machine (SVM) classifier for the detection of masses. The evaluation is based on a 10-fold cross validation over a large database of screenfilm mammograms (10397 images). The purpose of this study is twofold: first, we assess the SVM performance compared to neural networks (NNet), k-nearest neighbor classification (k-NN) and linear discriminant analysis (LDA). Second, we study the classifiers' performances when using a set of 30 and a set of 73 region-based features. The CAD performance is quantified by the mean sensitivity in 0.05 to 1 false positives per exam on the free-response receiver operating characteristic curve. The best mean exam sensitivities found were 0.545, 0.636, 0.648, 0.675 for LDA, k-NN, NNet and SVM. K-NN and NNet proved to be stable against variation of the featuresets. Conversely, LDA and SVM exhibited an increase in performance when adding more features. It is concluded that with an SVM a more pronounced reduction of false positives is possible, given that a large number of cases and features are available.}, + file = {Lesn11.pdf:pdf\\Lesn11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {3}, + gsid = {5157723055013579670}, + gscites = {17}, + ss_id = {01491b1e830ef88a3e39a06313bc27390167fd86}, + all_ss_ids = {['01491b1e830ef88a3e39a06313bc27390167fd86']}, +} + +@inproceedings{Lesn12, + author = {J. M. Lesniak and G. van Schie and C. Tanner and B. Platel and H. Huisman and N. Karssemeijer and G. Szekely}, + title = {Multimodal Classification of Breast Masses in Mammography and {MRI} using Unimodal Feature Selection and Decision Fusion}, + booktitle = {IWDM '12: Proceedings of the 11th International Workshop on Breast Imaging}, + year = {2012}, + volume = {7361}, + series = LNCS, + pages = {88--95}, + doi = {10.1007/978-3-642-31271-7_12}, + abstract = {In this work, a classifier combination approach for computer aided diagnosis (CADx) of breast mass lesions in mammography (MG) and magnetic resonance imaging (MRI) is investigated, using a database with 278 and 243 findings in MG resp. MRI including 98 multimodal (MM) lesion annotations. For each modality, feature selection was performed separately with linear Support Vector Machines (SVM). Using nonlinear SVMs, calibrated unimodal malignancy estimates were obtained and fused to a multimodal (MM) estimate by averaging. Evaluating the area under the receiver operating characteristic curve (AUC), feature selection raised AUC from 0.68, 0.69 and 0.72 for MG, MRI and MM to 0.76, 0.73 and 0.81 with a significant improvement for MM (P=0.018). Multimodal classification offered increased performance compared to MG and MRI (P=0.181 and P=0.087). In conclusion, unimodal feature selection significantly increased multimodal classification performance and can provide a useful tool for generating joint CADx scores in the multimodal setting.}, + file = {Lesn12.pdf:pdf\\Lesn12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {9813410708328583159}, + gscites = {5}, + ss_id = {a37cda5126f5024777d9440c5f567c9e5baae4ce}, + all_ss_ids = {['a37cda5126f5024777d9440c5f567c9e5baae4ce']}, +} + +@article{Lesn12a, + author = {Lesniak, J. M. and Hupse, R. and Blanc, R. and Karssemeijer, N. and Sz\'{e}kely, G.}, + title = {Comparative evaluation of support vector machine classification for computer aided detection of breast masses in mammography}, + journal = PMB, + year = {2012}, + volume = {57}, + pages = {5295--5307}, + doi = {10.1088/0031-9155/57/16/5295}, + abstract = {False positive (FP) marks represent an obstacle for effective use of computer-aided detection (CADe) of breast masses in mammography. Typically, the problem can be approached either by developing more discriminative features or by employing different classifier designs. In this paper, the usage of support vector machine (SVM) classification for FP reduction in CADe is investigated, presenting a systematic quantitative evaluation against neural networks, k-nearest neighbor classification, linear discriminant analysis and random forests. A large database of 2516 film mammography examinations and 73 input features was used to train the classifiers and evaluate for their performance on correctly diagnosed exams as well as false negatives. Further, classifier robustness was investigated using varying training data and feature sets as input. The evaluation was based on the mean exam sensitivity in 0.05-1 FPs on normals on the free-response receiver operating characteristic curve (FROC), incorporated into a tenfold cross validation framework. It was found that SVM classification using a Gaussian kernel offered significantly increased detection performance (P = 0.0002) compared to the reference methods. Varying training data and input features, SVMs showed improved exploitation of large feature sets. It is concluded that with the SVM-based CADe a significant reduction of FPs is possible outperforming other state-of-the-art approaches for breast mass CADe.}, + file = {Lesn12a.pdf:pdf\\Lesn12a.pdf:PDF}, + optnote = {DIAG}, + number = {16}, + pmid = {22853938}, + month = {8}, + gsid = {683635356274851558}, + gscites = {22}, + ss_id = {1d8b4341ee856c294dc6c3d13e8cb01ab8dc44a4}, + all_ss_ids = {['1d8b4341ee856c294dc6c3d13e8cb01ab8dc44a4']}, +} + +@article{Less13, + author = {Nikolas Lessmann and Daniel Dromann and Alexander Schlaefer}, + title = {Feasibility of respiratory motion-compensated stereoscopic {X-ray} tracking for bronchoscopy}, + journal = IJCARS, + year = {2013}, + volume = {9}, + pages = {199--209}, + doi = {10.1007/s11548-013-0920-9}, + optnote = {DIAG}, +} + +@conference{Less15, + author = {Lessmann, N. and Isgum, I. and Lam, S. and Mayo, J. and de Jong, P. A. and Viergever, M. A. and van Ginneken, B.}, + title = {Automatic coronary calcium scoring and cardiovascular risk estimation in the {Pan-Canadian} lung cancer screening trial}, + booktitle = RSNA, + year = {2015}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Less16, + author = {Nikolas Lessmann and Ivana I{\v{s}}gum and Arnaud A.A. Setio and Bob D. de Vos and Francesco Ciompi and Pim A. de Jong and Matthijs Oudkerk and Willem P. Th. M. Mali and Max A. Viergever and Bram van Ginneken}, + title = {Deep convolutional neural networks for automatic coronary calcium scoring in a screening study with low-dose chest {CT}}, + booktitle = MI, + year = {2016}, + volume = {9785}, + series = SPIE, + pages = {978511-1 -- 978511-6}, + doi = {10.1117/12.2216978}, + abstract = {The amount of calcifications in the coronary arteries is a powerful and independent predictor of cardiovascular events and is used to identify subjects at high risk who might benefit from preventive treatment. Routine quantification of coronary calcium scores can complement screening programs using low-dose chest CT, such as lung cancer screening. We present a system for automatic coronary calcium scoring based on deep convolutional neural networks (CNNs). + + The system uses three independently trained CNNs to estimate a bounding box around the heart. In this region of interest, connected components above 130 HU are considered candidates for coronary artery calcifications. To separate them from other high intensity lesions, classification of all extracted voxels is performed by feeding two-dimensional 50 mm x 50 mm patches from three orthogonal planes into three concurrent CNNs. The networks consist of three convolutional layers and one fully-connected layer with 256 neurons. + + In the experiments, 1028 non-contrast-enhanced and non-ECG-triggered low-dose chest CT scans were used. The network was trained on 797 scans. In the remaining 231 test scans, the method detected on average 194.3 mm3 of 199.8 mm3 coronary calcifications per scan (sensitivity 97.2%) with an average false-positive volume of 10.3 mm3. Subjects were assigned to one of five standard cardiovascular risk categories based on the Agatston score. Accuracy of risk category assignment was 84.4% with a linearly weighted kappa of 0.89. + + The proposed system can perform automatic coronary artery calcium scoring to identify subjects undergoing low-dose chest CT screening who are at risk of cardiovascular events with high accuracy.}, + file = {Less16.pdf:pdf\\Less16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {11932712040638875797}, + gscites = {46}, + ss_id = {b542b6df300ef39144d6172e8fe41a0b93fd5f8b}, + all_ss_ids = {['b542b6df300ef39144d6172e8fe41a0b93fd5f8b']}, +} + +@article{Less17, + author = {{Lessmann}, N. and {van Ginneken}, B. and {Zreik}, M. and {de Jong}, P.~A. and {de Vos}, B.~D. and {Viergever}, M.~A. and I{\v{s}}gum, I.}, + title = {Automatic calcium scoring in low-dose chest {CT} using deep neural networks with dilated convolutions}, + journal = TMI, + year = {2018}, + volume = {37}, + number = {2}, + pages = {615-625}, + doi = {10.1109/TMI.2017.2769839}, + url = {https://arxiv.org/abs/1711.00349}, + abstract = {Heavy smokers undergoing screening with low-dose chest CT are affected by cardiovascular disease as much as by lung cancer. Low-dose chest CT scans acquired in screening enable quantification of atherosclerotic calcifications and thus enable identification of subjects at increased cardiovascular risk. This paper presents a method for automatic detection of coronary artery, thoracic aorta and cardiac valve calcifications in low-dose chest CT using two consecutive convolutional neural networks. The first network identifies and labels potential calcifications according to their anatomical location and the second network identifies true calcifications among the detected candidates. This method was trained and evaluated on a set of 1744 CT scans from the National Lung Screening Trial. To determine whether any reconstruction or only images reconstructed with soft tissue filters can be used for calcification detection, we evaluated the method on soft and medium/sharp filter reconstructions separately. On soft filter reconstructions, the method achieved F1 scores of 0.89, 0.89, 0.67, and 0.55 for coronary artery, thoracic aorta, aortic valve and mitral valve calcifications, respectively. On sharp filter reconstructions, the F1 scores were 0.84, 0.81, 0.64, and 0.66, respectively. Linearly weighted kappa coefficients for risk category assignment based on per subject coronary artery calcium were 0.91 and 0.90 for soft and sharp filter reconstructions, respectively. These results demonstrate that the presented method enables reliable automatic cardiovascular risk assessment in all low-dose chest CT scans acquired for lung cancer screening.}, + file = {Less17.pdf:pdf\\Less17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29408789}, + month = {2}, + gsid = {16774069466764003485}, + gscites = {165}, + ss_id = {1a3470626b24ccd510047925f80d21affde3c3b8}, + all_ss_ids = {['1a3470626b24ccd510047925f80d21affde3c3b8']}, +} + +@conference{Less17a, + author = {Lessmann, N. and van Ginneken, B. and de Jong, P. A. and Veldhuis, W. B. and Viergever, M. A. and Isgum, I.}, + title = {Deep learning analysis for automatic calcium scoring in routine chest {CT}}, + booktitle = RSNA, + year = {2017}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Less18, + author = {Nikolas Lessmann and Bram van Ginneken and Ivana I{\v{s}}gum}, + title = {Iterative convolutional neural networks for automatic vertebra identification and segmentation in {CT} images}, + booktitle = MI, + year = {2018}, + volume = {10574}, + series = SPIE, + doi = {10.1117/12.2292731}, + abstract = {Segmentation and labeling of the vertebrae in CT images are important steps for automatic analysis of the spine. This paper presents an automatic method based on iterative convolutional neural networks. These utilize the inherent order of the vertebral column to simplify the detection problem, so that the network can be trained with as little as ten manual reference segmentations. Vertebrae are identified and segmented individually in sequential order, first in low-resolution images that enable the analysis of context information, and afterwards in the original high-resolution images to obtain a fine segmentation. The method was trained and evaluated with 15 spine CT scans from the MICCAI CSI 2014 workshop challenge. These scans cover the whole thoracic and lumbar part of the spine of healthy young adults. In contrast to a non-iterative convolutional neural network, the proposed method correctly identified all vertebrae. Our method achieved a mean Dice coefficient of 0.948 and a mean surface distance of 0.29 mm and thus outperforms the best method that participated in the original challenge.}, + file = {Less18.pdf:pdf/Less18.pdf:PDF}, + optnote = {DIAG}, + month = {3}, + gsid = {3970246513012434900}, + gscites = {32}, + ss_id = {b41e47d9978abb56a5b0fa6697f5454e31579722}, + all_ss_ids = {['b41e47d9978abb56a5b0fa6697f5454e31579722']}, +} + +@inproceedings{Less18a, + author = {Nikolas Lessmann and Bram van Ginneken and Pim A. de Jong and Ivana I{\v{s}}gum}, + title = {Iterative fully convolutional neural networks for automatic vertebra segmentation}, + booktitle = MIDL, + year = {2018}, + url = {https://openreview.net/forum?id=S1NnlZnjG}, + abstract = {Precise segmentation of the vertebrae is often required for automatic detection of vertebral abnormalities. This especially enables incidental detection of abnormalities such as compression fractures in images that were acquired for other diagnostic purposes. While many CT and MR scans of the chest and abdomen cover a section of the spine, they often do not cover the entire spine. Additionally, the first and last visible vertebrae are likely only partially included in such scans. In this paper, we therefore approach vertebra segmentation as an instance segmentation problem. A fully convolutional neural network is combined with an instance memory that retains information about already segmented vertebrae. This network iteratively analyzes image patches, using the instance memory to search for and segment the first not yet segmented vertebra. At the same time, each vertebra is classified as completely or partially visible, so that partially visible vertebrae can be excluded from further analyses. We evaluated this method on spine CT scans from a vertebra segmentation challenge and on low-dose chest CT scans. The method achieved an average Dice score of 95.8% and 92.1%, respectively, and a mean absolute surface distance of 0.194 mm and 0.344 mm.}, + file = {Less18a.pdf:pdf\\Less18a.pdf:PDF}, + optnote = {DIAG}, + all_ss_ids = {['14fcdfdd2b15f6fec9b9e7b1b4189e43281273d8', '77ca4f86eaef55fb7c853dad2fd3600a3eb5b169']}, + gscites = {171}, +} + +@article{Less19, + author = {Lessmann, Nikolas and de Jong, Pim A and Celeng, Csilla and Takx, Richard A P and Viergever, Max A and van Ginneken, Bram and I{\v{s}}gum, Ivana}, + title = {Sex Differences in Coronary Artery and Thoracic Aorta Calcification and Their Association With Cardiovascular Mortality in Heavy Smokers}, + journal = JACCCI, + year = {2019}, + volume = {12}, + pages = {1808-1817}, + doi = {10.1016/j.jcmg.2018.10.026}, + abstract = {The aim of this study was to investigate sex differences in the prevalence, extent, and association of coronary artery calcium (CAC) and thoracic aorta calcium (TAC) scores with cardiovascular mortality in a population eligible for lung screening. CAC and TAC scores derived from chest computed tomography (CT) might be useful biomarkers for individualized cardiovascular disease prevention and could be especially relevant in high-risk populations such as heavy smokers. Therefore, it is important to know the prevalence of arterial calcifications in male and female heavy smokers, and if there are differences in the predictive value calcifications carry. We performed a nested case-control study with 5,718 participants of the CT arm of the NLST (National Lung Screening Trial). Prevalence and extent of CAC and TAC were resampled to the full cohort to provide unbiased estimates of the typical calcium burden of male and female heavy smokers. Weighted Cox proportional hazards regression was used to assess differences in the association of CAC and TAC scores with all-cause and cardiovascular mortality. CAC was substantially more common and more severe in men (prevalence: 81% vs. 60%; median volume: 104 mm3 vs.12 mm3). Women had CAC comparable to that of men who were 10 years younger. TAC was equally common in men and women, with a tendency to be more pronounced in women (prevalence: 92% vs. 93%; median volume: 388 mm3 vs. 404 mm3). Both types of calcification were associated with increased cardiovascular and all-cause mortality. TAC scores improved the prediction of coronary heart disease mortality over CAC in men, but not in women. In both sexes, TAC, but not CAC, was associated with cardiovascular mortality other than coronary heart disease. CAC develops later in women, whereas TAC develops equally in both sexes. CAC is strongly associated with coronary heart disease, whereas TAC is especially associated with extracardiac vascular mortality in either sex.}, + file = {Less19.pdf:pdf\\Less19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30660540}, + month = {9}, + gsid = {13396141710348170095}, + gscites = {16}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/208061}, + ss_id = {31d77e71f4151452d18b69a5649a64df561f6af7}, + all_ss_ids = {['31d77e71f4151452d18b69a5649a64df561f6af7']}, +} + +@article{Less19a, + author = {Lessmann, Nikolas and van Ginneken, Bram and de Jong, Pim A. and I{\v{s}}gum, Ivana}, + title = {Iterative fully convolutional neural networks for automatic vertebra segmentation and identification}, + journal = MIA, + year = {2019}, + volume = {53}, + pages = {142--155}, + doi = {10.1016/j.media.2019.02.005}, + url = {https://arxiv.org/abs/1804.04383}, + abstract = {Precise segmentation and anatomical identification of the vertebrae provides the basis for automatic analysis of the spine, such as detection of vertebral compression fractures or other abnormalities. Most dedicated spine CT and MR scans as well as scans of the chest, abdomen or neck cover only part of the spine. Segmentation and identification should therefore not rely on the visibility of certain vertebrae or a certain number of vertebrae. We propose an iterative instance segmentation approach that uses a fully convolutional neural network to segment and label vertebrae one after the other, independently of the number of visible vertebrae. This instance-by-instance segmentation is enabled by combining the network with a memory component that retains information about already segmented vertebrae. The network iteratively analyzes image patches, using information from both image and memory to search for the next vertebra. To efficiently traverse the image, we include the prior knowledge that the vertebrae are always located next to each other, which is used to follow the vertebral column. The network concurrently performs multiple tasks, which are segmentation of a vertebra, regression of its anatomical label and prediction whether the vertebra is completely visible in the image, which allows to exclude incompletely visible vertebrae from further analyses. The predicted anatomical labels of the individual vertebrae are additionally refined with a maximum likelihood approach, choosing the overall most likely labeling if all detected vertebrae are taken into account. This method was evaluated with five diverse datasets, including multiple modalities (CT and MR), various fields of view and coverages of different sections of the spine, and a particularly challenging set of low-dose chest CT scans. For vertebra segmentation, the average Dice score was 94.9 +- 2.1% with an average absolute symmetric surface distance of 0.2 +- 10.1mm. The anatomical identification had an accuracy of 93 %, corresponding to a single case with mislabeled vertebrae. Vertebrae were classified as completely or incompletely visible with an accuracy of 97 %. The proposed iterative segmentation method compares favorably with state-of-the-art methods and is fast, flexible and generalizable.}, + file = {Less19a.pdf:pdf\\Less19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30771712}, + month = {4}, + gsid = {1055664958726725388}, + gscites = {170}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/202066}, + ss_id = {77ca4f86eaef55fb7c853dad2fd3600a3eb5b169}, + all_ss_ids = {['77ca4f86eaef55fb7c853dad2fd3600a3eb5b169']}, +} + +@phdthesis{Less19b, + author = {Nikolas Lessmann}, + title = {Machine Learning based quantification of extrapulmonary diseases in chest CT}, + year = {2019}, + url = {https://dspace.library.uu.nl/handle/1874/380319}, + abstract = {In several countries, including the US and China, and likely soon also in Europe, heavy cigarette smokers are regularly invited for a computed tomography (CT) scan of their lungs (chest CT). This scan can help detect lung cancer in an early stage, when treatment is more effective. Heavy smokers often also have other chronic diseases, of which some can also be detected on these scans, or of which the severity can be measured in these scans. The research in this thesis focuses on automatic analysis of lung cancer screening chest CT scans to measure the severity of cardiovascular disease and osteoporosis. Automatic image analysis makes it possible to include these assessments into the screening workflow without requiring additional reading time of radiologists or image analysts. The image analysis methods that we developed are based on deep convolutional neural networks, a form of machine learning. We developed a method that detects calcifications of the coronary arteries, the aorta and the heart valves. The amount of calcification of these arteries is an indicator of the severity of cardiovascular disease and the risk to have a cardiovascular event, such as heart attack or stroke. We also applied this method to a large dataset from a US screening study to investigate differences between men and women. For osteoporosis analysis, we developed a method that finds the vertebrae in the image and another method that partitions the vertebrae. Together, this allows for automatic measurement of, e.g., the density or size of the vertebral bodies.}, + copromotor = {I. Isgum}, + file = {Less19b.pdf:pdf\\Less19b.pdf:PDF}, + optnote = {DIAG}, + promotor = {M. A. Viergever, B. van Ginneken, P. A. de Jong}, + school = {Utrecht University}, + journal = {PhD thesis}, + all_ss_ids = {42315be6452636824d9004d5d8aa2fe8924494a2}, + gscites = {0}, +} + +@inproceedings{Less19c, + author = {Nikolas Lessmann and Jelmer M. Wolterink and Majd Zreik and Max A. Viergever and Bram van Ginneken and Ivana Isgum}, + title = {Vertebra partitioning with thin-plate spline surfaces steered by a convolutional neural network}, + booktitle = MIDL, + year = {2019}, + url = {https://arxiv.org/abs/1907.10978}, + abstract = {Thin-plate splines can be used for interpolation of image values, but can also be used to represent a smooth surface, such as the boundary between two structures. We present a method for partitioning vertebra segmentation masks into two substructures, the vertebral body and the posterior elements, using a convolutional neural network that predicts the boundary between the two structures. This boundary is modeled as a thin-plate spline surface defined by a set of control points predicted by the network. The neural network is trained using the reconstruction error of a convolutional autoencoder to enable the use of unpaired data.}, + file = {Less19c.pdf:pdf\\Less19c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {14928274104806083629}, + gscites = {1}, + ss_id = {47d4ee03c5b2596dbfcfb4373de7e05e1cd831dc}, + all_ss_ids = {['47d4ee03c5b2596dbfcfb4373de7e05e1cd831dc']}, +} + +@article{Less20, + author = {Lessmann, Nikolas and S\'{a}nchez, Clara I. and Beenen, Ludo and Boulogne, Luuk H. and Brink, Monique and Calli, Erdi and Charbonnier, Jean-Paul and Dofferhoff, Ton and van Everdingen, Wouter M. and Gerke, Paul K. and Geurts, Bram and Gietema, Hester A. and Groeneveld, Miriam and van Harten, Louis and Hendrix, Nils and Hendrix, Ward and Huisman, Henkjan J. and Isgum, Ivana and Jacobs, Colin and Kluge, Ruben and Kok, Michel and Krdzalic, Jasenko and Lassen-Schmidt, Bianca and van Leeuwen, Kicky and Meakin, James and Overkamp, Mike and van Rees Vellinga, Tjalco and van Rikxoort, Eva M. and Samperna, Riccardo and Schaefer-Prokop, Cornelia and Schalekamp, Steven and Scholten, Ernst Th. and Sital, Cheryl and St\"{o}ger, Lauran and Teuwen, Jonas and Vaidhya Venkadesh, Kiran and de Vente, Coen and Vermaat, Marieke and Xie, Weiyi and de Wilde, Bram and Prokop, Mathias and van Ginneken, Bram}, + title = {Automated Assessment of {COVID}-19 Reporting and Data System and Chest {CT} Severity Scores in Patients Suspected of Having {COVID}-19 Using Artificial Intelligence}, + journal = Radiology, + year = {2021}, + volume = {298}, + number = {1}, + pages = {E18--E28}, + file = {:pdf/Less20.pdf:PDF}, + doi = {10.1148/radiol.2020202439}, + pmid = {32729810}, + algorithm = {https://grand-challenge.org/algorithms/corads-ai/}, + abstract = {The coronavirus disease 2019 (COVID-19) pandemic has spread across the globe with alarming speed, morbidity, and mortality. Immediate triage of patients with chest infections suspected to be caused by COVID-19 using chest CT may be of assistance when results from definitive viral testing are delayed. Purpose: To develop and validate an artificial intelligence (AI) system to score the likelihood and extent of pulmonary COVID-19 on chest CT scans using the COVID-19 Reporting and Data System (CO-RADS) and CT severity scoring systems. Materials and Methods: The CO-RADS AI system consists of three deep-learning algorithms that automatically segment the five pulmonary lobes, assign a CO-RADS score for the suspicion of COVID-19, and assign a CT severity score for the degree of parenchymal involvement per lobe. This study retrospectively included patients who underwent a nonenhanced chest CT examination because of clinical suspicion of COVID-19 at two medical centers. The system was trained, validated, and tested with data from one of the centers. Data from the second center served as an external test set. Diagnostic performance and agreement with scores assigned by eight independent observers were measured using receiver operating characteristic analysis, linearly weighted k values, and classification accuracy. Results: A total of 105 patients (mean age, 62 years +- 16 [standard deviation]; 61 men) and 262 patients (mean age, 64 years +- 16; 154 men) were evaluated in the internal and external test sets, respectively. The system discriminated between patients with COVID-19 and those without COVID-19, with areas under the receiver operating characteristic curve of 0.95 (95% CI: 0.91, 0.98) and 0.88 (95% CI: 0.84, 0.93), for the internal and external test sets, respectively. Agreement with the eight human observers was moderate to substantial, with mean linearly weighted k values of 0.60 +- 0.01 for CO-RADS scores and 0.54 +- 0.01 for CT severity scores. Conclusion: With high diagnostic performance, the CO-RADS AI system correctly identified patients with COVID-19 using chest CT scans and assigned standardized CO-RADS and CT severity scores that demonstrated good agreement with findings from eight independent observers and generalized well to external data.}, + optnote = {DIAG, RADIOLOGY}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/228667}, + ss_id = {4330ce9c73af04a35d1a7dd366df8434c98e30ed}, + all_ss_ids = {['4330ce9c73af04a35d1a7dd366df8434c98e30ed']}, + gscites = {105}, +} + +@article{Less20a, + author = {Nikolas Lessmann and Bram van Ginneken}, + title = {Random smooth gray value transformations for cross modality learning with gray value invariant networks}, + journal = {arXiv:2003.06158}, + year = {2020}, + abstract = {Random transformations are commonly used for augmentation of the training data with the goal of reducing the uniformity of the training samples. These transformations normally aim at variations that can be expected in images from the same modality. Here, we propose a simple method for transforming the gray values of an image with the goal of reducing cross modality differences. This approach enables segmentation of the lumbar vertebral bodies in CT images using a network trained exclusively with MR images. The source code is made available at https://github.com/nlessmann/rsgt}, + file = {:http\://arxiv.org/pdf/2003.06158v1:PDF}, + ss_id = {d9eec89c003fb7af39968663ba21db04098d6019}, + all_ss_ids = {['d9eec89c003fb7af39968663ba21db04098d6019']}, + gscites = {0}, +} + +@article{Less22, + doi = {10.1148/ryai.220008}, + year = {2022}, + volume = {4}, + number = {2}, + pages = {e220008}, + author = {Huisman, Merel and Lessmann, Nikolas}, + title = {Automatic Brand Identification of Orthopedic Implants from Radiographs: Ready for the Next Step?}, + journal = RAI, + optnote = {DIAG, RADIOLOGY}, + ss_id = {91437d205e582d2d1ce2dab8e49687a2a9bb0f66}, + all_ss_ids = {['91437d205e582d2d1ce2dab8e49687a2a9bb0f66']}, + gscites = {0}, +} + +@inproceedings{Leym96, + author = {Leymarie, F. and de la Fortelle, A. and Koenderink, J. J. and Kappers, A. M. L. and Stavridi, M. and van Ginneken, B. and Muller, S. and Krake, S. and Faugeras, O. and Robert, L. and Gauclin, C. and Laveau, S. and Zeller, C.}, + title = {{REALISE}: reconstruction of {REAL}ity from {I}mage {SE}quences}, + booktitle = ICIP, + year = {1996}, + pages = {651--654}, + doi = {10.1109/ICIP.1996.560579}, + abstract = {{REALISE} was designed to extract from sequences of images, acquired with a moving camera, the information necessary for determining the 3{D} ({CAD}-like) structure of a real-life scene together with information about the radiometric signatures of surfaces bounding the extracted 3{D} objects (e.g. reflectance behaviour). {T}he retrieved information is then integrated in a virtual reality ({VR}) software environment. {T}he {R} amp;{D} work is been performed principally in the following areas of computer vision and computer graphics: structure from motion, recovery of geometries, recovery of photometric and texture information, highly realistic rendering on the basis of empirically-based reflectance models, and the design and development of improved rendering processes together with a new {VR} system. {B}eside this innovative {R}\&{D} work another key aspect of {REALISE} is to have computer vision and computer graphics cooperate to produce realistic 3{D} data efficiently}, + file = {Leym96.pdf:pdf\\Leym96.pdf:PDF}, + gsid = {12954033652655760385}, + optnote = {DIAG, RADIOLOGY}, + gscites = {22}, + ss_id = {7f897bcefc1fcc04cd17ae162d6bdfcff4dd83c0}, + all_ss_ids = {['7f897bcefc1fcc04cd17ae162d6bdfcff4dd83c0']}, +} + +@article{Li23, + author = {Li, Yiwen and Fu, Yunguan and Gayo, Iani J.M.B. and Yang, Qianye and Min, Zhe and Saeed, Shaheer U. and Yan, Wen and Wang, Yipei and Noble, J. Alison and Emberton, Mark and Clarkson, Matthew J. and Huisman, Henkjan and Barratt, Dean C. and Prisacariu, Victor A. and Hu, Yipeng}, + title = {Prototypical few-shot segmentation for cross-institution male pelvic structures with spatial registration}, + doi = {10.1016/j.media.2023.102935}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.media.2023.102935}, + file = {Li23.pdf:pdf\Li23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Image Analysis}, + citation-count = {0}, + automatic = {yes}, + pages = {102935}, + volume = {90}, +} + +@conference{Lief16, + author = {Bart Liefers and Vivian Schreur and Thomas Theelen and Clara I. S\'{a}nchez}, + title = {Registration and grading of micro-aneurysms in Fluorescein Angiography and OCT Angiography}, + booktitle = {4th International Congress on OCT Angiography and Advances in OCT}, + year = {2016}, + abstract = {Purpose + : + A tool for grading micro-aneurysms and the Foveal Avascular Zone (FAZ) in Fluorescein Angiography (FA) and OCT Angiography (OCTA) has been developed. With this tool the user can compare visibility and grade micro-aneurysms by displaying early FA, late FA and inner, intermediate and outer OCTA images in a synchronized view. + + Methods + : + The user can register the images in two steps by clicking on corresponding landmarks: early and late FA should be registered, as well as early FA to OCTA. A least-squares approximation to the affine transform that best matches the annotated point sets is calculated. Visual feedback is available during this stage by blending the images that need to be registered. + Once the images are registered, a synchronized cursor helps the user in finding and comparing micro-aneurysms in all five images. The FAZ, for which the area is automatically calculated, can be drawn onto each image as well. + + Results + : + + Early and late FA and OCTA images, segmented into an inner, intermediate and outer layer, have been acquired for 31 eyes of 24 patients with Diabetic Macular Edema (DME). In every set of images, enough landmarks could be found for successful registration. The affine transform was sufficiently accurate to compare micro-aneurysms in the different images. The tool has been used for grading visibility and leakage of 567 micro-aneurysms. The FAZ could be delineated accurately in each image except the late FA where it was not visible. + + Conclusion + : + We developed a tool that can help researchers in comparing properties of FA and OCTA images, by registration of 5 different images (early and late FA, inner, intermediate and outer OCTA). The tool has been used for grading micro-aneurysms and delineating the FAZ for patients with DME.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Lief17, + author = {Bart Liefers and Freerk G. Venhuizen and Thomas Theelen and Carel Hoyng and Bram van Ginneken and Clara I. S\'{a}nchez}, + title = {Fovea Detection in Optical Coherence Tomography using Convolutional Neural Networks}, + booktitle = MI, + year = {2017}, + volume = {10133}, + series = SPIE, + pages = {1013302}, + doi = {10.1117/12.2254301}, + abstract = {The fovea is an important clinical landmark that is used as a reference for assessing various quantitative measures, such as central retinal thickness or drusen count. In this paper we propose a novel method for automatic detection of the foveal center in Optical Coherence Tomography (OCT) scans. Although the clinician will generally aim to center the OCT scan on the fovea, post-acquisition image processing will give a more accurate estimate of the true location of the foveal center. A Convolutional Neural Network (CNN) was trained on a set of 781 OCT scans that classifies each pixel in the OCT B-scan with a probability of belonging to the fovea. Dilated convolutions were used to obtain a large receptive field, while maintaining pixel-level accuracy. In order to train the network more effectively, negative patches were sampled selectively after each epoch. After CNN classification of the entire OCT volume, the predicted foveal center was chosen as the voxel with maximum output probability, after applying an optimized three-dimensional Gaussian blurring. We evaluate the performance of our method on a data set of 99 OCT scans presenting different stages of Age-related Macular Degeneration (AMD). The fovea was correctly detected in 96.9% of the cases, with a mean distance error of 73 +- 112 micro meter. This result was comparable to the performance of a second human observer who obtained a mean distance error of 69 +- 94 micro meter. Experiments showed that the proposed method is accurate and robust even in retinas heavily affected by pathology.}, + file = {Lief17.pdf:pdf\\Lief17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {3861077382929820883}, + gscites = {4}, + ss_id = {e4010e494f6a7f0ea93b4cdb257aa1e51611609a}, + all_ss_ids = {['e4010e494f6a7f0ea93b4cdb257aa1e51611609a']}, +} + +@conference{Lief17a, + author = {Bart Liefers and Freerk G. Venhuizen and Vivian Schreur and Bram van Ginneken and Carel Hoyng and Thomas Theelen and Clara I. S\'{a}nchez}, + booktitle = ARVO, + title = {Automatic detection of the foveal center in optical coherence tomography}, + abstract = {Purpose : To aautomatically detect the foveal center in optical coherence tomography (OCT) scans in order to obtain an accurate and reliable reference for the assessment of various structural biomarkers, even in the presence of large abnormalities and across different scanning protocols. + + Methods : 1784 OCT scans were used for the development of the proposed automatic method: 1744 scans from the European Genetic Database (EUGENDA) acquired with a Heidelberg Spectralis HRA+OCT 1 scanner and 40 scans from a publicly available dataset [1] acquired with a Bioptigen scanner. Two independent sets, with different levels of age-related macular degeneration (AMD) were drawn from the same databases for evaluation: 100 scans from EUGENDA (Set A, 25 control patients and 25 for each of the AMD severity levels early, intermediate and advanced) and 100 scans from [1] (Set B, 50 control, 50 AMD). + A fully convolutional neural network based on stacked layers of dilated convolutions was trained to classify each pixel in a B-scan by assigning a probability of belonging to the fovea. The network was applied to every B-scan in the OCT volume, and the final foveal center was defined as the pixel with maximum assigned probability. An initial network was trained on the 1744 training scans from EUGENDA and optimized with the 40 training scans acquired with the Bioptigen scanner, to specialize for different levels of noise and contrast. + + For all scans manual annotations were available as reference for evaluation. The foveal center was considered correctly identified if the distance between the prediction and the reference was less than the foveal radius, i.e. 750 mm. + + Results : The foveal center was correctly detected in 95 OCT scans in Set A (24 control, 24 early, 25 intermediate, 22 advanced). The mean distance error was 63.7 mm with 81 detections inside a radius of 175 mm (the foveola) and 70 inside a radius of 75 mm (the umbo). In Set B, the foveal center was correctly identified in 96 OCT scans (49 control, 47 AMD). The mean distance error was 88.6 mm with 82 detections inside the foveola and 61 inside the umbo. + + Conclusions : The proposed automatic method performed accurately for both healthy retinas and retinas affected by AMD. The method can be applied successfully to scans from different vendors, thus providing a reliable reference location for the assessment of structural biomarkers in OCT.}, + optnote = {DIAG, RADIOLOGY}, + year = {2017}, +} + +@article{Lief17b, + author = {Bart Liefers and Freerk G. Venhuizen and Vivian Schreur and Bram van Ginneken and Carel Hoyng and Sascha Fauser and Thomas Theelen and Clara I. S\'{a}nchez}, + title = {Automatic detection of the foveal center in optical coherence tomography}, + journal = BOE, + year = {2017}, + volume = {8}, + number = {11}, + month = {11}, + pages = {5160--5178}, + doi = {10.1364/BOE.8.005160}, + url = {http://www.osapublishing.org/boe/abstract.cfm?URI=boe-8-11-5160}, + abstract = {We propose a method for automatic detection of the foveal center in optical coherence tomography (OCT). The method is based on a pixel-wise classification of all pixels in an OCT volume using a fully convolutional neural network (CNN) with dilated convolution filters. The CNN-architecture contains anisotropic dilated filters and a shortcut connection and has been trained using a dynamic training procedure where the network identifies its own relevant training samples. The performance of the proposed method is evaluated on a data set of 400 OCT scans of patients affected by age-related macular degeneration (AMD) at different severity levels. For 391 scans (97.75%) the method identified the foveal center with a distance to a human reference less than 750 um, with a mean (+- SD) distance of 71 um +- 107 um. Two independent observers also annotated the foveal center, with a mean distance to the reference of 57 um +- 84 um and 56 um +- 80 um, respectively. Furthermore, we evaluate variations to the proposed network architecture and training procedure, providing insight in the characteristics that led to the demonstrated performance of the proposed method.}, + file = {Lief17b.pdf:pdf\\Lief17b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29188111}, + publisher = {OSA}, + gsid = {16004483321806977582}, + gscites = {28}, + ss_id = {e5fded74d443402884e88d8f7952d34d5371dedf}, + all_ss_ids = {['e5fded74d443402884e88d8f7952d34d5371dedf']}, +} + +@inproceedings{Lief19, + author = {Liefers, Bart and Gonz\'{a}lez-Gonzalo, Cristina and Klaver, Caroline and {van Ginneken}, Bram and S\'{a}nchez, Clara I.}, + title = {Dense Segmentation in Selected Dimensions: Application to Retinal Optical Coherence Tomography}, + booktitle = MIDL, + year = {2019}, + volume = {102}, + series = {Proceedings of Machine Learning Research}, + publisher = {PMLR}, + month = {7}, + pages = {337--346}, + url = {http://proceedings.mlr.press/v102/liefers19a.html}, + abstract = {We present a novel convolutional neural network architecture designed for dense segmentation in a subset of the dimensions of the input data. The architecture takes an N-dimensional image as input, and produces a label for every pixel in M output dimensions, where 0< M < N. Large context is incorporated by an encoder-decoder structure, while funneling shortcut subnetworks provide precise localization. We demonstrate applicability of the architecture on two problems in retinal optical coherence tomography: segmentation of geographic atrophy and segmentation of retinal layers. Performance is compared against two baseline methods, that leave out either the encoder-decoder structure or the shortcut subnetworks. For segmentation of geographic atrophy, an average Dice score of 0.49+-0.21 was obtained, compared to 0.46+-0.22 and 0.28+-0.19 for the baseline methods, respectively. For the layer-segmentation task, the proposed architecture achieved a mean absolute error of 1.305+-0.547 pixels compared to 1.967+-0.841 and 2.166+-0.886 for the baseline methods.}, + file = {Lief19.pdf:pdf\\Lief19.pdf:PDF}, + optnote = {DIAG}, + gsid = {2472537240832249962}, + gscites = {11}, + ss_id = {40bf7af2b9d79960dc127d03c2a83ce480d4d3ad}, + all_ss_ids = {['40bf7af2b9d79960dc127d03c2a83ce480d4d3ad']}, +} + +@conference{Lief19a, + author = {Bart Liefers and Johanna Colijn and Cristina Gonz\'{a}lez-Gonzalo and Akshayaa Vaidyanathan and Harm van Zeeland and Paul Mitchell and Caroline Klaver and S\'{a}nchez, Clara I.}, + booktitle = ARVO, + title = {Prediction of areas at risk of developing geographic atrophy in color fundus images using deep learning}, + abstract = {Purpose: + Exact quantification of areas of geographic atrophy (GA) can provide an important anatomical endpoint for treatment trials. The prediction of areas where GA may develop can provide valuable personalized prognosis and help in the development of targeted treatments to prevent progression and further vision loss. In this work, we present a model based on a deep convolutional neural network (CNN) that predicts the areas of GA within 5 years from baseline using color fundus (CF) images. + + Methods: + Areas of GA were delineated by 4 to 5 experienced graders in consensus in 377 CF images (252 eyes) collected from the Rotterdam Study and the Blue Mountains Eye Study. Graders made use of multimodal and follow up images when available, using our EyeNED annotation workstation. We identified 84 pairs of images (baseline and follow-up) of the same eye that were acquired with an interval of approximately 5 years. Image registration was performed by identifying corresponding landmarks between the images, allowing to project the delineated GA of the follow-up image onto the baseline image. + Next, a fully automatic segmentation model, based on a deep CNN, was developed. The CNN was trained to simultaneously segment the current GA area and the area at risk of developing GA, using only the baseline image as input. A five-fold cross-validation was performed to validate the prediction performance. + + Results: + The model achieved an average dice coefficient of 0.63 for segmentation of areas at risk of developing GA in the 84 images. The intraclass correlation coefficient between the GA area defined by the consensus grading of the follow-up image and the automatically predicted area based on the baseline image was 0.54. + + Conclusions: + We present a model based on a deep CNN that is capable of identifying areas where GA may develop from CF images. The proposed approach constitutes a step towards personalized prognosis and possible treatment decisions. Furthermore, the model may be used for automatic discovery of new predictive biomarkers for development and growth rate of GA, and may help to automatically identify individuals at risk of developing GA.}, + optnote = {DIAG, RADIOLOGY}, + year = {2019}, + all_ss_ids = {6d1ea27b41023f9add67e2c8c4dcbc7866ae640b}, + gscites = {0}, +} + +@article{Lief19b, + author = {Liefers, Bart and Colijn, Johanna M and Gonz\'{a}lez-Gonzalo, Cristina and Verzijden, Timo and Mitchell, Paul and Hoyng, Carel B and van Ginneken, Bram and Klaver, Caroline CW and S\'{a}nchez, Clara I}, + title = {A deep learning model for segmentation of geographic atrophy to study its long-term natural history}, + journal = {arXiv:1908.05621}, + year = {2019}, + url = {https://arxiv.org/abs/1908.05621}, + abstract = {Purpose: To develop and validate a deep learning model for automatic segmentation of geographic atrophy (GA) in color fundus images (CFIs) and its application to study growth rate of GA. Participants: 409 CFIs of 238 eyes with GA from the Rotterdam Study (RS) and the Blue Mountain Eye Study (BMES) for model development, and 5,379 CFIs of 625 eyes from the Age-Related Eye Disease Study (AREDS) for analysis of GA growth rate. Methods: A deep learning model based on an ensemble of encoder-decoder architectures was implemented and optimized for the segmentation of GA in CFIs. Four experienced graders delineated GA in CFIs from RS and BMES. These manual delineations were used to evaluate the segmentation model using 5-fold cross-validation. The model was further applied to CFIs from the AREDS to study the growth rate of GA. Linear regression analysis was used to study associations between structural biomarkers at baseline and GA growth rate. A general estimate of the progression of GA area over time was made by combining growth rates of all eyes with GA from the AREDS set. Results: The model obtained an average Dice coefficient of 0.72 +- 0.26 on the BMES and RS. An intraclass correlation coefficient of 0.83 was reached between the automatically estimated GA area and the graders' consensus measures. Eight automatically calculated structural biomarkers (area, filled area, convex area, convex solidity, eccentricity, roundness, foveal involvement and perimeter) were significantly associated with growth rate. Combining all growth rates indicated that GA area grows quadratically up to an area of around 12 mm2, after which growth rate stabilizes or decreases. Conclusion: The presented deep learning model allowed for fully automatic and robust segmentation of GA in CFIs. These segmentations can be used to extract structural characteristics of GA that predict its growth rate.}, + optnote = {DIAG}, + month = {8}, + all_ss_ids = {['4d54536ebf1b355d02cdfe742553032666101dbd']}, + gscites = {36}, +} + +@article{Lief20, + author = {Liefers, Bart and Colijn, Johanna M and Gonz\'{a}lez-Gonzalo, Cristina and Verzijden, Timo and Wang, Jie Jin and Joachim, Nichole and Mitchell, Paul and Hoyng, Carel B and van Ginneken, Bram and Klaver, Caroline CW and S\'{a}nchez, Clara I}, + title = {A deep learning model for segmentation of geographic atrophy to study its long-term natural history}, + doi = {10.1016/j.ophtha.2020.02.009}, + url = {https://arxiv.org/abs/1908.05621}, + abstract = {PURPOSE: + To develop and validate a deep learning model for the automatic segmentation of geographic atrophy (GA) using color fundus images (CFIs) and its application to study the growth rate of GA. + + DESIGN: + Prospective, multicenter, natural history study with up to 15 years of follow-up. + + PARTICIPANTS: + Four hundred nine CFIs of 238 eyes with GA from the Rotterdam Study (RS) and Blue Mountain Eye Study (BMES) for model development, and 3589 CFIs of 376 eyes from the Age-Related Eye Disease Study (AREDS) for analysis of GA growth rate. + + METHODS: + Deep learning model based on an ensemble of encoder-decoder architectures was implemented and optimized for the segmentation of GA in CFIs. Four experienced graders delineated, in consensus, GA in CFIs from the RS and BMES. These manual delineations were used to evaluate the segmentation model using 5-fold cross-validation. The model was applied further to CFIs from the AREDS to study the growth rate of GA. Linear regression analysis was used to study associations between structural biomarkers at baseline and the GA growth rate. A general estimate of the progression of GA area over time was made by combining growth rates of all eyes with GA from the AREDS set. + + MAIN OUTCOME MEASURES: + Automatically segmented GA and GA growth rate. + + RESULTS: + The model obtained an average Dice coefficient of 0.72+-0.26 on the BMES and RS set while comparing the automatically segmented GA area with the graders' manual delineations. An intraclass correlation coefficient of 0.83 was reached between the automatically estimated GA area and the graders' consensus measures. Nine automatically calculated structural biomarkers (area, filled area, convex area, convex solidity, eccentricity, roundness, foveal involvement, perimeter, and circularity) were significantly associated with growth rate. Combining all growth rates indicated that GA area grows quadratically up to an area of approximately 12 mm2, after which growth rate stabilizes or decreases. + + CONCLUSIONS: + The deep learning model allowed for fully automatic and robust segmentation of GA on CFIs. These segmentations can be used to extract structural characteristics of GA that predict its growth rate.}, + file = {Lief20.pdf:pdf\\Lief20.pdf:PDF}, + journal = Ophthalmology, + volume = {127}, + number = {8}, + pages = {1086--1096}, + optnote = {DIAG}, + pmid = {32197912}, + year = {2020}, + month = {2}, + gsid = {10165923379562366066}, + gscites = {36}, + ss_id = {4d54536ebf1b355d02cdfe742553032666101dbd}, + all_ss_ids = {['f643f4a927cf65f1ec66231ae76d3bc1736a67d3', '4d54536ebf1b355d02cdfe742553032666101dbd']}, +} + +@conference{Lief20a, + author = {Liefers, Bart and Taylor, Paul and Gonz\'{a}lez-Gonzalo, Cristina and Tufail, Adnan and S\'{a}nchez, Clara I.}, + booktitle = EURETINA, + url = {https://www.euretina.org/congress/amsterdam-2020/virtual-2020-freepapers/}, + title = {Achieving expert level performance in quantifying 13 distinctive features of neovascular age-related macular degeneration on optical coherence tomography}, + abstract = {Purpose: + To develop and validate an automatic model for volumetric quantification of the 13 most common abnormalities associated with neovascular age-related macular degeneration (nAMD) on optical coherence tomography (OCT). + Setting: + Clinical data and associated imaging were collected from five UK secondary care providers between February 2002 and September 2017. We identified 680 treatment-naive patients with no recent cataract surgery, at least one anti-VEGF injection, a diagnosis of nAMD, and associated OCT imaging (Topcon, Tokyo, Japan). + Methods: + A deep convolutional neural network (CNN) was used to produce a volumetric segmentation of 13 retinal abnormalities. The CNN architecture was based on a deep encoder-decoder structure that combines information from adjacent B-scans. The model was trained on 2,712 B-scans from 307 OCT volumes, with manual labels provided at a voxel-level for all abnormalities by eight graders. Abnormalities that were found in over 80 B-scans were modelled. The performance of the model and graders was assessed on an independent set of 112 B-scans from 112 OCT-volumes of nAMD cases, for which four graders independently provided annotations. To create a reference standard, the outputs of three graders were combined and defined as voxels where at least two out of three agreed. The graders' accuracy was calculated using each grader, in turn, as an observer. The Dice similarity metric was used to compare overlap, calculated per A-scan or per voxel where appropriate. Free-response receiver operator characteristic (FROC) analysis was used for the detection of small abnormalities. The intraclass correlation coefficient (ICC) was used to measure agreement on area or volume measures, with the reference area or volume defined as the average of the three graders. + Results: + Included abnormalities were: intraretinal fluid (IRF), subretinal fluid (SRF), pigment epithelial detachment (PED), subretinal hyperreflective material (SHRM), fibrosis, drusen and drusenoid PED, epiretinal membrane (ERM), outer plexiform layer (OPL) descent, ellipsoid loss, retinal pigment epithelium (RPE) loss or attenuation, hyper-transmission, hyperreflective dots and subretinal drusenoid deposits - reticular pseudodrusen (SDD - RPD). For OPL-descent and fibrosis there were insufficient examples in the test set for a reliable performance estimate.For the other features, the model obtained an average Dice score of 0.63 +- 0.15 (median 0.64), compared to 0.61 +- 0.17 (median 0.60) for the observers. The average ICC for the model was 0.66 +- 0.22 (median 0.69), compared to 0.62 +- 0.21 (median 0.55) for the observers. For individual features, differences between model and observer Dice score were within a 95% confidence interval for all features except ellipsoid loss, where model performance was slightly better (p=0.03). Regarding ICC, model performance was slightly better for IRF (p=0.04) and ellipsoid loss (p=0.006), slightly worse for drusen and drusenoid PED (p=0.03), and within the 95% confidence interval for other features. For hyperreflective dots and SDD-RPD, FROC analysis revealed that the model performed at similar sensitivity per false positives as the observers. + Conclusions: + We present a deep-learning based model that provides accurate volumetric quantification of a comprehensive set of relevant pathological components of nAMD. There was relatively large variability in grader agreement between abnormalities. Nevertheless, model performance was comparable to, and in many cases exceeded, human performance, both in terms of overlap and quantification. The model generates a precise, quantitative morphological signature of the retinal pathology that can facilitate the development of prediction models for treatment response and planning of personalized treatment intervals, as well as further research into structure/function correlation. In clinical care it can facilitate structured reporting, reducing subjectivity in clinicians' assessments and enabling implementation of refined treatment guidelines.The presented model accelerates interpretation of OCT volumes and surpasses manual reading, both in terms of attainable level of extracted information and consistency. This can potentially lead to a reduction of costs in interpretation of clinical trials and improve personalized clinical care.}, + optnote = {DIAG, RADIOLOGY}, + year = {2020}, + month = {9}, +} + +@article{Lief21, + title = {Quantification of key retinal features in early and late age-related macular degeneration using deep learning}, + author = {Liefers, Bart and Taylor, Paul and Alsaedi, Abdulrahman and Bailey, Clare and Balaskas, Konstantinos and Dhingra, Narendra and Egan, Catherine A and Rodrigues, Filipa Gomes and Gonz\'{a}lez-Gonzalo, Cristina and Heeren, Tjebo F.C. and Lotery, Andrew and Muller, Philipp L. and Olvera-Barrios, Abraham and Paul, Bobby and Schwartz, Roy and Thomas, Darren S. and Warwick, Alasdair N. and Tufail, Adnan and S\'{a}nchez, Clara I.}, + abstract = {Purpose: + To develop and validate a deep learning model for segmentation of 13 features associated with neovascular and atrophic age-related macular degeneration (AMD). + + Design: + Development and validation of a deep-learning model for feature segmentation. + + Methods: + Data for model development were obtained from 307 optical coherence tomography volumes. Eight experienced graders manually delineated all abnormalities in 2,712 B-scans. A deep neural network was trained with this data to perform voxel-level segmentation of the 13 most common abnormalities (features). For evaluation, 112 B-scans from 112 patients with a diagnosis of neovascular AMD were annotated by four independent observers. Main outcome measures were Dice score, intra-class correlation coefficient (ICC), and free-response receiver operating characteristic (FROC) curve. + + Results: + On 11 of the 13 features, the model obtained a mean Dice score of 0.63 +- 0.15, compared to 0.61 +- 0.17 for the observers. The mean ICC for the model was 0.66 +- 0.22, compared to 0.62 +- 0.21 for the observers. Two features were not evaluated quantitatively due to lack of data. FROC analysis demonstrated that the model scored similar or higher sensitivity per false positives compared to the observers. + + Conclusions: + The quality of the automatic segmentation matches that of experienced graders for most features, exceeding human performance for some features. The quantified parameters provided by the model can be used in the current clinical routine and open possibilities for further research into treatment response outside clinical trials.}, + journal = AJO, + doi = {https://doi.org/10.1016/j.ajo.2020.12.034}, + url = {https://www.sciencedirect.com/science/article/abs/pii/S0002939421000088}, + volume = {226}, + pages = {1--12}, + year = {2021}, + pmid = {33422464}, + publisher = {Elsevier}, + optnote = {DIAG}, + ss_id = {67af3a5566aceaf87e2caf82eb587c91ebd41900}, + all_ss_ids = {['67af3a5566aceaf87e2caf82eb587c91ebd41900']}, + gscites = {22}, +} + +@phdthesis{Lief22, + author = {Bart Liefers}, + title = {Deep Learning Algorithms for Age-Related Macular Degeneration}, + url = {https://repository.ubn.ru.nl/handle/2066/252875}, + abstract = {This thesis is devoted to the applications of deep learning algorithms for automated analysis of retinal images. + In contains chapters on: + + 1. Automatic detection of the foveal center in OCT scans (Chapter 2); + 2. Segmentation of retinal layers and geographic atrophy (Chapter 3); + 3. Segmentation of geographic atrophy on color fundus (Chapter 4); + 4. Quantification of key retinal features in early and late AMD. (Chapter 5).}, + copromotor = {T. Theelen}, + file = {Lief22.pdf:pdf/Lief22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken and C.I. Sanchez Gutierrez and C.B. Hoyng}, + school = {Radboud University, Nijmegen}, + year = {2022}, + journal = {PhD thesis}, +} + +@article{LinE05, + author = {E. N. J. Th. van Lin and L. P. van der Vight and J. A. Witjes and H. J. Huisman and J. W. Leer and A. G. Visser}, + title = {The effect of an endorectal balloon and off-line correction on the interfraction systematic and random prostate position variations: a comparative study}, + journal = IJROBP, + year = {2005}, + volume = {61}, + pages = {278--288}, + doi = {10.1016/j.ijrobp.2004.09.042}, + abstract = {{PURPOSE}: {T}o investigate the effect of an endorectal balloon ({ERB}) and an off-line correction protocol on the day-to-day, interfraction prostate gland motion, in patients receiving external beam radiotherapy for prostate cancer. {METHODS} {AND} {MATERIALS}: {I}n 22 patients, irradiated with an {ERB} in situ ({ERB} group) and in 30 patients without an {ERB} ({N}o-{ERB} group), prostate displacements were measured daily in three orthogonal directions with portal images. {I}mplanted gold markers and an off-line electronic portal imaging correction protocol were used for prostate position verification and correction. {M}ovie loops were analyzed to evaluate prostate motion and rectal filling variations. {RESULTS}: {T}he off-line correction protocol reduced the systematic prostate displacements, equally for the {ERB} and {N}o-{ERB} group, to 1.3-1.8 mm (1 {SD}). {T}he mean 3{D} displacement was reduced to 2.8 mm and 2.4 mm for the {ERB} and {N}o-{ERB} group, respectively. {T}he random interfraction displacements, relative to the treatment isocenter, were not reduced by the {ERB} and remained nearly unchanged in all three directions: 3.1 mm (1 {SD}) left-right, 2.6 mm (1 {SD}) superior-inferior, and 4.7 mm (1 {SD}) for the anterior-posterior direction. {T}hese day-to-day prostate position variations can be explained by the presence of gas and stool beside the {ERB}. {CONCLUSIONS}: {T}he off-line corrections on the fiducial markers are effective in reducing the systematic prostate displacements. {T}he investigated {ERB} does not reduce the interfraction prostate motion. {A}lthough the overall mean displacement is low, the day-to-day interfraction motion, especially in anterior-posterior direction, remains high compared with the systematic displacements.}, + file = {LinE05.pdf:pdf\\LinE05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {15629621}, + month = {1}, + gsid = {13303135162649110518}, + gscites = {108}, + ss_id = {d4dabcc6ef6712de68132daadabb0660bc567a9e}, + all_ss_ids = {['d4dabcc6ef6712de68132daadabb0660bc567a9e']}, +} + +@article{LinE06, + author = {E. N. J. T. van Lin and J. J. F\"utterer and S. W. T. P. J. Heijmink and L. P. van der Vight and A. L. Hoffmann and P. van Kollenburg and H. J. Huisman and T. W. J. Scheenen and J. A. Witjes and J. W. Leer and J. O. Barentsz and A. G. Visser}, + title = {{IMRT} boost dose planning on dominant intraprostatic lesions: gold marker-based three-dimensional fusion of {CT} with dynamic contrast-enhanced and 1{H}-spectroscopic {MRI}}, + journal = IJROBP, + year = {2006}, + volume = {65}, + pages = {291--303}, + doi = {10.1016/j.ijrobp.2005.12.046}, + abstract = {{PURPOSE}: {T}o demonstrate the theoretical feasibility of integrating two functional prostate magnetic resonance imaging ({MRI}) techniques (dynamic contrast-enhanced {MRI} [{DCE}-{MRI}] and 1{H}-spectroscopic {MRI} [{MRSI}]) into inverse treatment planning for definition and potential irradiation of a dominant intraprostatic lesion ({DIL}) as a biologic target volume for high-dose intraprostatic boosting with intensity-modulated radiotherapy ({IMRT}). {METHODS} {AND} {MATERIALS}: {I}n 5 patients, four gold markers were implanted. {A}n endorectal balloon was inserted for both {CT} and {MRI}. {A} {DIL} volume was defined by {DCE}-{MRI} and {MRSI} using different prostate cancer-specific physiologic ({DCE}-{MRI}) and metabolic ({MRSI}) parameters. {CT}-{MRI} registration was performed automatically by matching three-dimensional gold marker surface models with the iterative closest point method. {DIL}-{IMRT} plans, consisting of whole prostate irradiation to 70 {G}y and a {DIL} boost to 90 {G}y, and standard {IMRT} plans, in which the whole prostate was irradiated to 78 {G}y were generated. {T}he tumor control probability and rectal wall normal tissue complication probability were calculated and compared between the two {IMRT} approaches. {RESULTS}: {C}ombined {DCE}-{MRI} and {MRSI} yielded a clearly defined single {DIL} volume (range, 1.1-6.5 cm3) in all patients. {I}n this small, selected patient population, no differences in tumor control probability were found. {A} decrease in the rectal wall normal tissue complication probability was observed in favor of the {DIL}-{IMRT} plan versus the plan with {IMRT} to 78 {G}y. {CONCLUSION}: {C}ombined {DCE}-{MRI} and {MRSI} functional image-guided high-dose intraprostatic {DIL}-{IMRT} planned as a boost to 90 {G}y is theoretically feasible. {T}he preliminary results have indicated that {DIL}-{IMRT} may improve the therapeutic ratio by decreasing the normal tissue complication probability with an unchanged tumor control probability. {A} larger patient population, with more variations in the number, size, and localization of the {DIL}, and a feasible mechanism for treatment implementation has to be studied to extend these preliminary tumor control and toxicity estimates.}, + file = {LinE06.pdf:pdf\\LinE06.pdf:PDF}, + optnote = {BioMR, DIAG, MAGIC, RADIOLOGY}, + number = {1}, + pmid = {16618584}, + month = {5}, + gsid = {12299719120846507003}, + gscites = {195}, + ss_id = {d214f7c0440c815029df15d85ac33c9fabb24d29}, + all_ss_ids = {['d214f7c0440c815029df15d85ac33c9fabb24d29']}, +} + +@inproceedings{Linm20, + author = {Linmans, Jasper and van der Laak, Jeroen and Litjens, Geert}, + title = {Efficient Out-of-Distribution Detection in Digital Pathology Using Multi-Head Convolutional Neural Networks}, + booktitle = MIDL, + pages = {465--478}, + url = {https://openreview.net/forum?id=hRwB2BTRNu}, + abstract = {Successful clinical implementation of deep learning in medical imaging depends, in part, on the reliability of the predictions. Specifically, the system should be accurate for classes seen during training while providing calibrated estimates of uncertainty for abnormalities and unseen classes. To efficiently estimate predictive uncertainty, we propose the use of multi-head CNNs (M-heads). We compare its performance to related and more prevalent approaches, such as deep ensembles, on the task of out-of-distribution (OOD) detection. To this end, we evaluate models trained to discriminate normal lymph node tissue from breast cancer metastases, on lymph nodes containing lymphoma. We show the ability to discriminate between in-distribution lymph node tissue and lymphoma by evaluating the AUROC based on the uncertainty signal. Here, the best performing multi-head CNN (91.7) outperforms both Monte Carlo dropout (88.3) and deep ensembles (86.8). Furthermore, we show that the meta-loss function of M-heads improves OOD detection in terms of AUROC.}, + file = {:pdf/Linm20.pdf:PDF}, + optnote = {DIAG}, + year = {2020}, + ss_id = {89997fe09abc2c7d8bd73ef0f902e38fc2ae8baa}, + all_ss_ids = {['89997fe09abc2c7d8bd73ef0f902e38fc2ae8baa']}, + gscites = {31}, +} + +@article{Linm23, + author = {Linmans, Jasper and Elfwing, Stefan and van der Laak, Jeroen and Litjens, Geert}, + year = {2023}, + month = {1}, + journal = MIA, + title = {Predictive uncertainty estimation for out-of-distribution detection in digital pathology.}, + doi = {10.1016/j.media.2022.102655}, + pages = {102655}, + volume = {83}, + abstract = {Machine learning model deployment in clinical practice demands real-time risk assessment to identify situations in which the model is uncertain. Once deployed, models should be accurate for classes seen during training while providing informative estimates of uncertainty to flag abnormalities and unseen classes for further analysis. Although recent developments in uncertainty estimation have resulted in an increasing number of methods, a rigorous empirical evaluation of their performance on large-scale digital pathology datasets is lacking. This work provides a benchmark for evaluating prevalent methods on multiple datasets by comparing the uncertainty estimates on both in-distribution and realistic near and far out-of-distribution (OOD) data on a whole-slide level. To this end, we aggregate uncertainty values from patch-based classifiers to whole-slide level uncertainty scores. We show that results found in classical computer vision benchmarks do not always translate to the medical imaging setting. Specifically, we demonstrate that deep ensembles perform best at detecting far-OOD data but can be outperformed on a more challenging near-OOD detection task by multi-head ensembles trained for optimal ensemble diversity. Furthermore, we demonstrate the harmful impact OOD data can have on the performance of deployed machine learning models. Overall, we show that uncertainty estimates can be used to discriminate in-distribution from OOD data with high AUC scores. Still, model deployment might require careful tuning based on prior knowledge of prospective OOD data.}, + file = {:pdf/Linm23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + pmid = {36306568}, + ss_id = {697be6a6398755ff665b968936fef868ec8974bf}, + all_ss_ids = {['697be6a6398755ff665b968936fef868ec8974bf']}, + gscites = {16}, +} + +@conference{Litj09, + author = {G. Litjens and M. Heisen and J. Buurman and A.M. Wood and M. Medved and G.S. Karczmar and B.M. Ter Haar-Romeny}, + title = {T1 Quantification: Variable Flip Angle Method vs Use of Reference Phantom}, + booktitle = RSNA, + year = {2009}, + abstract = {{PURPOSE} For standardized interpretation of DCEMRI curves, calculation of contrast agent (CA) concentration from signal intensity over time is desired. Accurate measurement of tissue T1 before and after CA administration is thus necessary. Current T1 measurement methods are time-consuming. We propose the use of the ?reference tissue? method for fast T1 measurements concurrent with DCEMRI data acquisition, but with use of a reference phantom. {METHOD AND MATERIALS} The ?reference tissue? method is based on the approximation that in T1-weighted gradient echo images, signal intensity is proportional to 1/T1 ? thus signal intensity can be referenced to a tissue or a phantom with a known T1. We compared this method to the ?variable flip angle? method, most commonly used in clinical practice. We compared the ?reference tissue? method (TR/TE = 25/1.1 ms, a=40?) to the ?variable flip angle? method (TR/TE = 25/1.1 ms, a=3/5/10/15/20/25/30/35/40?), using the Eurospin T05 phantom, in which 10 out of 18 vials containing agar with varying concentrations of Gd-DTPA (T1 range: 281 ? 1384 ms) were used. {RESULTS} With the ?reference tissue? method, using 9 tubes successively as a reference for the remaining tube, the average error in the estimation of T1 was 8.5%, with a standard deviation of 6.1%, and was random. Using the ?variable flip angle? method, the average error was 5.7 % with a standard deviation of 3.8%. Using a two-sided Student?s t-test we found no statistically significant differences in the performance of the two methods (p-value = 0.52). {CONCLUSION} As DCEMRI imaging is done in a heavily T1-weighted regime, the ?reference tissue? method can be used to concurrently measure T1, and quantify CA concentration. Use of an agar phantom will require a correction for lower proton density in biological tissue. Proton density can be measured prior to CA administration, or tabulated values can be used. We demonstrated the feasibility of fast T1 measurements using a reference phantom, providing T1 maps without additional scanning time. This will allow quantification of the CA concentration throughout the DCEMRI scan, which cannot be achieved using the current clinical method. {CLINICAL RELEVANCE/APPLICATION} The use of a reference phantom for determining T1 can lead to a drastic reduction in scanning time and thus patient discomfort when compared to a regular clinical ?variable flip angle? scan.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Litj10, + author = {G. Litjens and M. Heisen and J. Buurman and B.M. ter Haar Romeny}, + title = {Pharmacokinetic models in clinical practice: what model to use for {DCE-MRI} of the breast?}, + booktitle = ISBI, + pages = {185--188}, + doi = {10.1109/ISBI.2010.5490382}, + abstract = {{P}harmacokinetic modeling is increasingly used in {DCE}-{MRI} high risk breast cancer screening. {S}everal models are available. {T}he most common models are the standard and extended {T}ofts, the shutterspeed, and the {B}rix model. {E}ach model and the meaning of its parameters is explained. {I}t was investigated which models can be used in a clinical setting by simulating a range of sampling rates and noise levels representing different {MRI} acquisition schemes. {I}n addition, an investigation was performed on the errors introduced in the estimates of the pharmacokinetic parameters when using a physiologically less complex model, i.e. the standard {T}ofts model, to fit curves generated with more complex models. {I}t was found that the standard {T}ofts model is the only model that performs within an error margin of 20% on parameter estimates over a range of sampling rates and noise levels. {T}his still holds when small complex physiological effects are present.}, + file = {Litj10.pdf:pdf\\Litj10.pdf:PDF}, + gscites = {11}, + gsid = {16055378021937184817}, + optnote = {DIAG, RADIOLOGY}, + year = {2010}, +} + +@inproceedings{Litj10a, + author = {G. Litjens and L. Hogeweg and A.M.R. Schilham and P.A. de Jong and M.A. Viergever and B. van Ginneken}, + title = {Simulation of nodules and diffuse infiltrates in chest radiographs using {CT} templates}, + booktitle = MICCAI, + year = {2010}, + volume = {6362}, + series = LNCS, + pages = {396--403}, + doi = {10.1007/978-3-642-15745-5_49}, + abstract = {A method is proposed to simulate nodules and diffuse infiltrates in chest radiographs. This allows creation of large annotated databases for training of both radiologists and computer aided diagnosis systems. Realistic nodules and diffuse infiltrates were generated from three-dimensional templates segmented from CT data. These templates are rescaled, rotated, projected and superimposed on a radiograph. This method was compared, in an observer study, to a previously published method that simulates pulmonary nodules as perfectly spherical objects. Results show that it is hard for human observers to distinguish real and simulated nodules when using templates (AUC-values do not significantly differ from .5, p > .05 for all observers). The method that produced spherical nodules performed slightly worse (AUC of one observer differs significantly from .5, p = .011). Simulation of diffuse infiltrates is challenging but also feasible (AUC = 0.67 for one observer).}, + file = {Litj10a.pdf:pdf/Litj10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {20879340}, + gsid = {3783126959662430798}, + gscites = {4}, + ss_id = {0f0313aea2ba0198c5da4b5eff724caecf885e7e}, + all_ss_ids = {['0f0313aea2ba0198c5da4b5eff724caecf885e7e']}, +} + +@mastersthesis{Litj10b, + author = {Geert Litjens}, + title = {Pharmacokinetic modeling in breast cancer MRI}, + year = {2009}, + abstract = {Breast cancer is a disease which impacts the lives of thousands of people. In the entire world over half a million people die due to breast cancer every year, mostly women. However, when breast cancer is detected in the early stages of disease ?ve-year-survival rate approaches 100%. So, it is very important to detect breast cancer as early as possible. That is why in most of modern Western society screening programs for breast cancer have been developed. Most of these screening programs focus on x-ray mammography. However, women who have an increased risk to get breast cancer these screening programs are not adequate. These women usually develop breast cancer at a younger age and x-ray mammography for those women has a low sensitivity. For these cases, and inconclusive ?ndings in x-ray mammography in other women, dynamic contrast enhance (DCE) MRI is used. In DCE MRI a contrast agent is used which takes advantage of the fact that tumor vasculature is leaky and sloppy, thus the contrast agent tends to accumulate in the tumor, leading to increased signal intensity in T1-weighted images. Due to the fact that we have a time range of images it is possible to look at kinetic behavior. However, as kinetic curves have a large inter and intra patient variability and variability depending on the imaging site the analysis of these curves is not straightforward. Pharmacokinetic modeling could be an answer to those problems as it can be used to obtain lesion-speci?c physiological parameters. To use pharmacokinetic modeling however we need high temporal resolution data, which is not readily available. The University of Chicago Medical Center obtained several high temporal resolution data sets for the initial part of the kinetic enhancement curve in addition to the regular low temporal clinical scans. These data sets were the basis for this research. When analyzing such data several factors play an important role. The ?rst of these being the extraction of the signal-vs.-time curves from the data sets. In this report we used a small graphical user interface to extract the data. The low and high temporal resolution images were obtained in different orientations, which was a problem we also needed to solve. The second step after extraction of the signal-vs.-time-curves was the conversion of the signal intensity to contrast agent concentration. In literature there were several methods that were used to accomplish this, but all were based on the use of a gradient recalled echo signal model. We ?rst investigated the assumption that we can neglect T2* effects, which we concluded was allowed. To estimate concentration the tissue T1 at time 0 has to been known. As we had no additional T1 measurements, we used a reference tissue approach to estimate T1. We investigated if the simpli?cations often used in this method were allowed and we concluded that it was better to use the full model. The last part of the conversion to concentration is the estimation of uncertainty in the concentration curves, which in itself contains several uncertainties. We derived an algebraic expression for these uncertainties using a Taylor expansion of concentration uncertainty. On average the uncertainty levels are around 10% of the concentration. The third step was choosing a pharmacokinetic model, we inspected a total of four models, the standard and extended Tofts models, the shutter speed model and the Brix model. We ?rst assessed the ability of each model to ?nd correct minima using a forward-backward simulation approach. We then simulated data that has the same temporal and uncertainty characteristics as real clinical data and used the same forward-backward approach to estimate model performance. We concluded that for data with those speci?c characteristics only the standard Tofts model performed adequately. After that we started investigating the data requirements for all models and we could see that for all models except the standard Tofts model data requirements on especially temporal resolution are high. Lastly we did an investigation in the errors introduced by assuming that the underlying physiological processes are more simplistic, which is what we do when we use the standard Tofts model. The fourth step was ?nding the arterial input function, which is used as an input for the pharmacokinetic model. In literature there are several methods, we discussed three: the use of a standardized input function (population averaged or mathematical), the use of a single reference tissue approach and the use of a multiple refernce tissue approach. We found that errors caused by using a population averaged input function can be quite large as deviations from the true local input functions are seen directly in the pharmacokinetic parameters. The single reference tissue approach is another way to estimate the input function. We found that when we know the exact pharmacokinetic parameters of the reference tissue the errors are considerably lower than in the use of a standardized AIF. When pharmacokinetic parameters of the reference tissue are wrong however we can still induce large errors in parameter estimates. The third option was the use of a multiple reference tissue approach, which gave the best results. If multiple reference tissue are available within the data set this option should be used. The last step is to put together the pieces from the previous steps and use that to analyze the clinical data. We were able to use 14 patient data sets. Although a small number, we were able to see that there seems to be a relation between malignancy and Ktrans values. Benign tissues seemed to have lower Ktrans values when compared to malignant tissues. Another question was if we were able to cluster different cancer types according to pharmacokinetic parameters, but we have too little data to support that claim.}, + file = {:pdf/Litj10b.pdf:PDF}, + optnote = {DIAG}, + school = {Eindhoven University of Technology}, + journal = {Master thesis}, +} + +@inproceedings{Litj11, + author = {G. Litjens and P.C. Vos and J.O. Barentsz and N. Karssemeijer and H.J. Huisman}, + title = {Automatic Computer Aided Detection of Abnormalities in Multi-Parametric Prostate {MRI}}, + booktitle = MI, + year = {2011}, + volume = {7963}, + series = SPIE, + doi = {10.1117/12.877844}, + abstract = {Development of CAD systems for detection of prostate cancer has been a recent topic of research. A multi-stage computer aided detection scheme is proposed to help reduce perception and oversight errors in multi-parametric prostate cancer screening MRI. In addition, important features for development of computer aided detection systems for prostate cancer screening MRI are identified. A fast, robust prostate segmentation routine is used to segment the prostate, based on coupled appearance and anatomy models. Subsequently a voxel classification is performed using a support vector machine to compute an abnormality likelihood map of the prostate. This classification step is based on quantitative voxel features like the apparent diffusion coefficient (ADC) and pharmacokinetic parameters. Local maxima in the likelihood map are found using a local maxima detector, after which regions around the local maxima are segmented. Region features are computed to represent statistical properties of the voxel features within the regions. Region classification is performed using these features, which results in a likelihood of abnormality per region. Performance was validated using a 188 patient dataset in a leave-one-patient-out manner. Ground truth was annotated by two expert radiologists. The results were evaluated using FROC analysis. The FROC curves show that inclusion of ADC and pharmacokinetic parameter features increases the performance of an automatic detection system. In addition it shows the potential of such an automated system in aiding radiologists diagnosing prostate MR, obtaining a sensitivity of respectively 74.7\% and 83.4\% at 7 and 9 false positives per patient.}, + file = {Litj11.pdf:pdf/Litj11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {3}, + gsid = {11297243638232679534}, + gscites = {42}, + ss_id = {91a237f32276f0b6c334058b32e1c6dd25ffd8db}, + all_ss_ids = {['91a237f32276f0b6c334058b32e1c6dd25ffd8db']}, +} + +@conference{Litj11b, + author = {G. Litjens and J. O. Barentsz and N. Karssemeijer and H.J. Huisman}, + title = {Zone-specific Automatic Computer-aided Detection of Prostate Cancer in {MRI}}, + booktitle = RSNA, + year = {2011}, + abstract = {{PURPOSE} Interpretation of multi-parametric MRI findings in the peripheral zone (PZ) or the transition zone (TZ) of the prostate is different. Therefore, this study investigates the performance of zone-specific computer-aided detection (CAD) as opposed to whole-prostate CAD. {METHOD AND MATERIALS} 117 consecutive prostate MRI?s from 2009 were extracted from our database. 71/117 MRI?s showed no malignant findings, 26/117 patients had a PZ tumor, 20/117 a TZ tumor. The MRI?s were acquired on a 3T MR scanner (Siemens Trio Tim, Erlangen, Germany) and included T2-weighted images (T2WI), dynamic contrast enhanced MRI (DCE-MRI), and diffusion-weighted images (DWI). From DCE-MRI and DWI pharmacokinetic parameters (PK) and ADC maps were calculated respectively. Lesion locations were indicated by an expert radiologist. Histology was obtained using MR-guided biopsy or prostatectomy. A two-stage classification strategy was used. The prostate was segmented using an atlas based method including PZ and TZ. First stage voxel classification resulted in a likelihood map, in which local maxima were detected. Then, a region was segmented for each local maximum. Second stage classification resulted in a malignancy likelihood per region. Voxel features used were the T2WI intensities, PK and ADC values and blob detection values for T2WI, ADC and PK images. For the second stage 25th- and 75th-percentiles within the segmented regions were calculated for all voxel features including the initial likelihood map. Classification in both stages was performed using a whole-prostate classifier or two separate zone-specific classifiers. The first stage used linear discriminant classifiers, the second stage support vector machine classifiers. Validation was performed in a leave-one-patient-out manner. FROC calculation and statistical analysis were performed using the JAFROC software package. The figure-of-merit (FOM) used is the area under the alternative FROC (AFROC) curve. {RESULTS} Zone-specific CAD was significantly better than whole-prostate CAD (FOM 0.63 vs. 0.48, p < 0.05). At 0.1, 1.0 and 3.0 false positives per patient the sensitivity of the zone-specific system was 0.23, 0.5 and 0.87 compared to 0.05, 0.22 and 0.47. {CONCLUSION} A zone-specific CAD system has significantly higher performance than a whole-prostate CAD system. {CLINICAL RELEVANCE/APPLICATION} CAD can help the radiologist read prostate MRI and might reduce oversight and perception errors in both PZ and TZ.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Litj12, + author = {G. Litjens and J.O. Barentsz and N. Karssemeijer and H.J. Huisman}, + title = {Automated computer-aided detection of prostate cancer in {MR} images: from a whole-organ to a zone-based approach}, + booktitle = MI, + year = {2012}, + volume = {8315}, + series = SPIE, + pages = {83150G-83150G-6}, + doi = {10.1117/12.911061}, + abstract = {MRI has shown to have great potential in prostate cancer localization and grading, but interpreting those exams requires expertise that is not widely available. Therefore, CAD applications are being developed to aid radiologists in detecting prostate cancer. Existing CAD applications focus on the prostate as a whole. However, in clinical practice transition zone cancer and peripheral zone cancer are considered to have different appearances. In this paper we present zone-specific CAD, in addition to an atlas based segmentation technique which includes zonal segmentation. Our CAD system consists of a detection and a classification stage. Prior to the detection stage the prostate is segmented into two zones. After segmentation features are extracted. Subsequently a likelihood map is generated on which local maxima detection is performed. For each local maximum a region is segmented. In the classification stage additional shape features are calculated, after which the regions are classified. Validation was performed on 288 data sets with MR-guided biopsy results as ground truth. Free-response Receiver Operating Characteristic (FROC) analysis was used for statistical evaluation. The difference between whole-prostate and zone-specific CAD was assessed using the difference between the FROCs. Our results show that evaluating the two zones separately results in an increase in performance compared to whole-prostate CAD. The FROC curves at .1, 1 and 3 false positives have a sensitivity of 0.0, 0.55 and 0.72 for whole-prostate and 0.08, 0.57 and 0.80 for zone-specific CAD. The FROC curve of the zone-specific CAD also showed significantly better performance overall (p < 0.05).}, + file = {Litj12.pdf:pdf/Litj12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {2}, + gsid = {1559733626797444041}, + gscites = {24}, + ss_id = {14db29c5408cb2e1d9e66cf4b09034699a024ca6}, + all_ss_ids = {['14db29c5408cb2e1d9e66cf4b09034699a024ca6']}, +} + +@inproceedings{Litj12a, + author = {Litjens, G. and Debats, O. A. and van de Ven, W. J. M. and Karssemeijer, N. and Huisman, H. J.}, + title = {A pattern recognition approach to zonal segmentation of the prostate on {MRI}}, + booktitle = MICCAI, + year = {2012}, + volume = {7511}, + series = LNCS, + pages = {413-420}, + doi = {10.1007/978-3-642-33418-4_51}, + abstract = {Zonal segmentation of the prostate into the central gland and peripheral zone is a useful tool in computer-aided detection of prostate cancer, because occurrence and characteristics of cancer in both zones differ substantially. In this paper we present a pattern recognition approach to segment the prostate zones. It incorporates three types of features that can differentiate between the two zones: anatomical, intensity and texture. It is evaluated against a multi-parametric multi-atlas based method using 48 multi-parametric MRI studies. Three observers are used to assess inter-observer variability and we compare our results against the state of the art from literature. Results show a mean Dice coefficient of 0.89 +/- 0.03 for the central gland and 0.75 +/- 0.07 for the peripheral zone, compared to 0.87 +/-0.04 and 0.76 +/- 0.06 in literature. Summarizing, a pattern recognition approach incorporating anatomy, intensity and texture has been shown to give good results in zonal segmentation of the prostate.}, + file = {Litj12a.pdf:pdf\\Litj12a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {15659902223096625028}, + gscites = {73}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/111051}, + ss_id = {bcff51ffb3e864ae4f8a4b6de12c2bf019999c7b}, + all_ss_ids = {['bcff51ffb3e864ae4f8a4b6de12c2bf019999c7b']}, +} + +@article{Litj12b, + author = {Litjens, G. and Hambrock, Thomas and Hulsbergen-van de Kaa, Christina and Barentsz, Jelle and Huisman, Henkjan}, + title = {Interpatient Variation in Normal Peripheral Zone Apparent Diffusion Coefficient: Effect on the Prediction of Prostate Cancer Aggressiveness}, + journal = Radiology, + year = {2012}, + volume = {265}, + pages = {260-266}, + doi = {10.1148/radiol.12112374}, + abstract = {Purpose:To determine the interpatient variability of prostate peripheral zone (PZ) apparent diffusion coefficient (ADC) and its effect on the assessment of prostate cancer aggressiveness.Materials and Methods:The requirement for institutional review board approval was waived. Intra- and interpatient variation of PZ ADCs was determined by means of repeated measurements of normal ADCs at three magnetic resonance (MR) examinations in a retrospective cohort of 10 consecutive patients who had high prostate-specific antigen levels and negative findings at transrectal ultrasonographically-guided biopsy. In these patients, no signs of PZ cancer were found at all three MR imaging sessions. The effect of interpatient variation on the assessment of prostate cancer aggressiveness was examined in a second retrospective cohort of 51 patients with PZ prostate cancer. Whole-mount step-section pathologic evaluation served as reference standard for placement of regions of interest on tumors and normal PZ. Repeated-measures analysis of variance was used to determine the significance of the interpatient variations in ADCs. Linear logistic regression was used to assess whether incorporating normal PZ ADCs improves the prediction of cancer aggressiveness.Results:Analysis of variance revealed that interpatient variability (1.2-2.0 A-A?A 1/2 10(-3) mm(2)/sec) was significantly larger than measurement variability (0.068 A-A?A 1/2 10(-3) mm(2)/sec A-A?A 1/2 0.027 [standard deviation]) (P = .0058). Stand-alone tumor ADCs showed an area under the receiver operating characteristic curve (AUC) of 0.91 for discriminating low-grade versus high-grade tumors. Incorporating normal PZ ADC significantly improved the AUC to 0.96 (P = .0401).Conclusion:PZ ADCs show significant interpatient variation, which has a substantial effect on the prediction of prostate cancer aggressiveness. Correcting this effect results in a significant increase in diagnostic accuracy.A-A?A 1/2 RSNA, 2012.}, + file = {Litj12b.pdf:pdf\\Litj12b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {22923722}, + month = {10}, + gsid = {3462472109870161281}, + gscites = {60}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/108408}, +} + +@conference{Litj12c, + author = {G. Litjens and J. O. Barentsz and N. Karssemeijer and H.J. Huisman}, + title = {Computerized characterization of central gland lesions using texture and relaxation features from {T2}-weighted prostate {MRI}}, + booktitle = RSNA, + year = {2012}, + abstract = {Purpose: The recent PI-RADS standard considers T2-weighted (T2W) MR the best imaging modality to characterize central gland (CG) lesions. In this study we assessed whether computer-aided diagnosis using T2 texture and relaxation features can separate benign and malignant CG lesions. Materials and Methods: MR scans of 101 patients were included in this study. The reference standard was MR-guided MR biopsy. Of these patients 36 had benign disease (e.g. benign prostatic hyperplasia) and 65 had prostate cancer. Lesions were annotated on the T2W sequence using a contouring tool. A quantitative T2 relaxation map was computed using an estimator that combines the T2W and proton density images with a turbo-spin-echo signal model and a gain factor. The latter was estimated using an automatically selected muscle reference region. Several texture voxel features were computed on the resulting T2-map: co-occurrenc matrix based homogeneity, neighboring gray-level dependence matrix based texture strength, and multi-scale Gaussian derivative features. For the latter 5 scales between 2 and 12 mm and derivatives up to the second order were calculated. For the matrix based features we calculated several histogram bin sizes (8, 16 and 32) and kernel sizes (4, 8 and 12 mm). The total number of texture features was 42. A linear discriminant classifier with feature selection was trained to compute the cancer likelihood for each voxel in the lesion. A feature selection was performed in a nested cross-validation loop using 10 folds. Cross-validation was performed in a leave-one-patient-out manner. For each annotated region a summary lesion likelihood was computed using the 75th percentile of the voxel likelihoods. The diagnostic accuracy of the lesion cancer likelihood was evaluated using receiver-operating characteristic (ROC) analysis and bootstrapping. Results: An area under the ROC curve of 0.76 (95% bootstrap confidence interval 0.64 A-A?A 1/2 0.87) was obtained for determining cancer likelihood using texture features, which is similar to radiologist performance reported in the literature when they only have T2W images available, like in this study. Conclusion: A novel method for characterizing lesions in T2-weighted MRI using texture descriptors was developed. The performance is in the range of values reported in the literature for radiologists. Clinical relevance: A CAD system for classification of CG lesions could improve the characterization of these lesions, which might result in better treatment planning.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Litj12d, + author = {G. Litjens and N. Karssemeijer and H. J. Huisman}, + title = {A multi-atlas approach for prostate segmentation in {MRI}}, + booktitle = {{MICCAI} {W}orkshop: {P}rostate {C}ancer {I}maging: The {PROMISE12} Prostate Segmentation Challenge}, + year = {2012}, + file = {Litj12d.pdf:pdf\\Litj12d.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {72368b70dd95e303cc4cc1bdaa8f5047225aa773}, + all_ss_ids = {['72368b70dd95e303cc4cc1bdaa8f5047225aa773']}, + gscites = {18}, +} + +@conference{Litj13, + author = {G. Litjens and J. O. Barentsz and N. Karssemeijer and H.J. Huisman}, + title = {Initial prospective evaluation of the prostate imaging reporting and data standard ({PI-RADS}): Can it reduce unnecessary {MR} guided biopsies?}, + booktitle = RSNA, + year = {2013}, + abstract = {Purpose: To evaluate the performance of the prostate imaging reporting and data standard (PI-RADS) and the effect of reader experience. Materials and Methods: A consecutive patient cohort of 254 patients who underwent both a detection MRI between January 1st, 2012 and December 31st, 2012 and a subsequent MR guided biopsy were included in this study. All patients were prospectively reported by one radiologist according to the PI-RADS guidelines. In total 10 different radiologists reported patients in this cohort. Of these 10 radiologists, 2 radiologist can be considered experts (19 and 12 years of experience with prostate MRI) and 8 can be considered inexperienced (3 years or less experience with prostate MRI). Together, the 2 experienced readers reported 108 patients and 146 were reported by the 8 inexperienced readers The radiologists reported 436 lesions in these patients of which 339 were biopsied. Of these 339 biopsied lesions 190 were prostate cancer. Of these 190 prostate cancer 127 lesions had a Gleason 4 or higher component and were considered high-grade prostate cancer, all others were considered low grade. The distribution of non-cancers, low-grade cancers and high-grade cancer was similar between the inexperienced and experienced observers (44%/19%/36% vs. 46%/16%/38%). Each lesion received, according to the PI-RADS guidelines, a score between 1 and 5. The sensitivity, specificity, positive predictive value and negative predictive value were calculated at each of the PI-RADS scores relative to the biopsy results. High-grade cancers with a PI-RADS score above or equal to the threshold are true positives. Non-cancers below the threshold were considered true negatives. This was done for both the inexperienced and experienced radiologists. Results: In total 19 PI-RADS 2, 67 PI-RADS 3, 112 PI-RADS 4 and 141 PI-RADS 5 lesions were biopsied. No PI-RADS 1 lesions were biopsied. The inexperienced reader sensitivities for PIRADS 2, 3, 4 and 5 are: 1, 1, 0.96 and 0.69 respectively. The experienced readers obtained 1, 1, 0.98 and 0.71. The corresponding specificities were 0, 0.16, 0.48 and 0.71 for the inexperienced and 0, 0.07, 0.36 and 0.76 for the experienced readers. The positive and negative predictive values were 0.46, 0.50, 0.61, 0.71 and 1, 1, 0.93, 0.74 for the inexperienced readers. For the experienced readers we obtained 0.46, 0.48, 0.57, 0.84 and 1, 1, 0.96, 0.78 respectively. Conclusion: In this population we can see that especially PI-RADS 4 and 5 classifications have excellent sensitivity, specificity, PPV and NPV characteristics. From this data we conclude that only PI-RADS 4 and 5 lesions require biopsy; inexperienced and experienced readers have sensitivities of 0.96 and 0.98 at this threshold. Experience matters: the number of unnecessary biopsies in PI-RADS 5 lesions reduces from 29/100 to 16/100 between experienced and inexperienced readers. Clinical relevance: PI-RADS reported lesions may help reduce the number of unnecessary biopsies. The strong effect of experience emphasizes the need for adequately trained radiologists for reporting prostate MR.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Litj14, + author = {Litjens, G. and Toth, Robert and van de Ven, Wendy and Hoeks, Caroline and Kerkstra, Sjoerd and van Ginneken, Bram and Vincent, Graham and Guillard, Gwenael and Birbeck, Neil and Zhang, Jindang and Strand, Robin and Malmberg, Filip and Ou, Yangming and Davatzikos, Christos and Kirschner, Matthias and Jung, Florian and Yuan, Jing and Qiu, Wu and Gao, Qinquan and Edwards, Philip Eddie and Maan, Bianca and van der Heijden, Ferdinand and Ghose, Soumya and Mitra, Jhimli and Dowling, Jason and Barratt, Dean and Huisman, Henkjan and Madabhushi, Anant}, + title = {Evaluation of prostate segmentation algorithms for {MRI}: The {PROMISE12} challenge}, + journal = MIA, + year = {2014}, + volume = {18}, + pages = {359-373}, + doi = {10.1016/j.media.2013.12.002}, + abstract = {Prostate MRI image segmentation has been an area of intense research due to the increased use of MRI as a modality for the clinical workup of prostate cancer. Segmentation is useful for various tasks, e.g. to accurately localize prostate boundaries for radiotherapy or to initialize multi-modal registration algorithms. In the past, it has been difficult for research groups to evaluate prostate segmentation algorithms on multi-center, multi-vendor and multi-protocol data. Especially because we are dealing with MR images, image appearance, resolution and the presence of artifacts are affected by differences in scanners and/or protocols, which in turn can have a large influence on algorithm accuracy. The Prostate MR Image Segmentation (PROMISE12) challenge was setup to allow a fair and meaningful comparison of segmentation methods on the basis of performance and robustness. In this work we will discuss the initial results of the online PROMISE12 challenge, and the results obtained in the live challenge workshop hosted by the MICCAI2012 conference. In the challenge, 100 prostate MR cases from 4 different centers were included, with differences in scanner manufacturer, field strength and protocol. A total of 11 teams from academic research groups and industry participated. Algorithms showed a wide variety in methods and implementation, including active appearance models, atlas registration and level sets. Evaluation was performed using boundary and volume based metrics which were combined into a single score relating the metrics to human expert performance. The winners of the challenge where the algorithms by teams Imorphics and ScrAutoProstate, with scores of 85.72 and 84.29 overall. Both algorithms where significantly better than all other algorithms in the challenge (p<0.05) and had an efficient implementation with a run time of 8min and 3s per case respectively. Overall, active appearance model based approaches seemed to outperform other approaches like multi-atlas registration, both on accuracy and computation time. Although average algorithm performance was good to excellent and the Imorphics algorithm outperformed the second observer on average, we showed that algorithm combination might lead to further improvement, indicating that optimal performance for prostate segmentation is not yet obtained. All results are available online at http://promise12.grand-challenge.org/.}, + file = {Litj14.pdf:pdf\\Litj14.pdf:PDF}, + optnote = {DIAG, Prostate, RADIOLOGY}, + number = {2}, + pmid = {24418598}, + month = {2}, + gsid = {12911375061118389379}, + gscites = {548}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/137969}, + ss_id = {cc83eabf4f833b8c92dcf6012dcce348591d060f}, + all_ss_ids = {['cc83eabf4f833b8c92dcf6012dcce348591d060f']}, +} + +@inproceedings{Litj14a, + author = {Litjens, G. and Elliott, R. and Shih, N. and Feldman, M. and Barentsz, J. and Hulsbergen - van de Kaa, C. and Kovacs, I. and Huisman, H. and Madabhushi, A.}, + title = {Distinguishing prostate cancer from benign confounders via a cascaded classifier on multi-parametric {MRI}}, + booktitle = MI, + year = {2014}, + volume = {9035}, + series = SPIE, + pages = {903512}, + doi = {10.1117/12.2043751}, + abstract = {Learning how to separate benign confounders from prostate cancer is important because the imaging characteristics of these confounders are poorly understood. Furthermore, the typical representations of the MRI parameters might not be enough to allow discrimination. The diagnostic uncertainty this causes leads to a lower diagnostic accuracy. In this paper a new cascaded classifier is introduced to separate prostate cancer and benign confounders on MRI in conjunction with specific computer-extracted features to distinguish each of the benign classes (benign prostatic hyperplasia (BPH), inflammation, atrophy or prostatic intra-epithelial neoplasia (PIN). In this study we tried to (1) calculate different mathematical representations of the MRI parameters which more clearly express subtle differences between different classes, (2) learn which of the MRI image features will allow to distinguish specific benign confounders from prostate cancer, and (2) find the combination of computer-extracted MRI features to best discriminate cancer from the confounding classes using a cascaded classifier. One of the most important requirements for identifying MRI signatures for adenocarcinoma, BPH, atrophy, inflammation, and PIN is accurate mapping of the location and spatial extent of the confounder and cancer categories from ex vivo histopathology to MRI. Towards this end we employed an annotated prostatectomy data set of 31 patients, all of whom underwent a multi-parametric 3 Tesla MRI prior to radical prostatectomy. The prostatectomy slides were carefully co-registered to the corresponding MRI slices using an elastic registration technique. We extracted texture features from the T2-weighted imaging, pharmacokinetic features from the dynamic contrast enhanced imaging and diffusion features from the diffusion-weighted imaging for each of the confounder classes and prostate cancer. These features were selected because they form the mainstay of clinical diagnosis. Relevant features for each of the classes were selected using maximum relevance minimum redundancy feature selection, allowing us to perform classifier independent feature selection. The selected features were then incorporated in a cascading classifier, which can focus on easier sub-tasks at each stage, leaving the more difficult classification tasks for later stages. Results show that distinct features are relevant for each of the benign classes, for example the fraction of extra-vascular, extra-cellular space in a voxel is a clear discriminator for inflammation. Furthermore, the cascaded classifier outperforms both multi-class and one-shot classifiers in overall accuracy for discriminating confounders from cancer: 0.76 versus 0.71 and 0.62.}, + file = {Litj14a.pdf:pdf\\Litj14a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {16281847160609020035}, + gscites = {18}, + ss_id = {2f39694d368806dbf90c898dcadb23fa4d2a0ba2}, + all_ss_ids = {['2f39694d368806dbf90c898dcadb23fa4d2a0ba2']}, +} + +@inproceedings{Litj14b, + author = {Litjens, G. and Huisman, H. and Elliott, R. and Shih, N. and Feldman, M. and F\"utterer and Bomers, J. and Madabhushi, A.}, + title = {Distinguishing benign confounding treatment changes from residual prostate cancer on {MRI} following laser ablation}, + booktitle = MI, + year = {2014}, + volume = {9036}, + series = SPIE, + pages = {90361D}, + doi = {10.1117/12.2043819}, + abstract = {Laser interstitial thermotherapy (LITT) is a relatively new focal therapy technique for the ablation of localized prostate cancer. However, very little is known about the specific effects of LITT within the ablation zone and the surrounding normal tissue regions. For instance, it is important to be able to assess the extent of residual cancer within the prostate following LITT, which may be masked by thermally induced benign necrotic changes. Fortunately LITT is MRI compatible and hence this allows for quantitatively assessing LITT induced changes via multi-parametric MRI. Of course definite validation of any LITT induced changes on MRI requires confirmation via histopathology. The aim of this study was to quantitatively assess and distinguish the imaging characteristics of prostate cancer and benign confounding treatment changes following LITTon 3 Tesla multi-parametric MRI by carefully mapping the treatment related changes from the ex vivo surgically resected histopathologic specimens onto the pre-operative in vivo imaging. A better understanding of the imaging characteristics of residual disease and successfully ablated tissue might lead to improved treatment monitoring and as such patient prognosis. A unique clinical trial at the Radboud University Medical Center, in which 3 patients underwent a prostatectomy after LITT treatment, yielded ex-vivo histopathologic specimens along with pre- and post-LITT MRI. Using this data we (1) identified the computer extracted MRI signatures associated with treatment effects including benign necrotic changes and residual disease and (2) subsequently evaluated the computer extracted MRI features previously identified in distinguishing LITT induced changes in the ablated area relative to the residual disease. Towards this end first a pathologist annotated the ablated area and the residual disease on the ex-vivo histology and then we transferred the annotations to the post-LITT MRI using semi-automatic elastic registration. The pre- and post-LITT MRI were subsequently registered and computer-derived multi-parametric MRI features extracted to determine differences in feature values between residual disease and successfully ablated tissue to assess treatment response. A scoring metric allowed us to identify those specific computer-extracted MRI features that maximally and differentially expressed between the ablated regions and the residual cancer, on a voxel- by voxel basis. Finally, we used a Fuzzy C-Means algorithm to assess the discriminatory power of these selected features. Our results show that specific computer-extracted features from multi-parametric MRI differentially express within the ablated and residual cancer regions, as evidenced by our ability to, on a voxel-by-voxel basis, classify tissue as residual disease. Additionally, we show that change of feature values between pre- and post-LITT MRI may be useful as a quantitative marker for treatment response (T2-weighted texture and DCE MRI features showed largest differences between residual disease and successfully ablated tissue). Finally, a clustering approach to separate treatment effects and residual disease incorporating both (1) and (2) yielded a maximum area under the ROC curve of 0.97 on a voxel basis across 3 studies.}, + file = {Litj14b.pdf:pdf\\Litj14b.pdf:PDF}, + optnote = {DIAG, MAGIC, RADIOLOGY}, + month = {3}, + gsid = {6518656036620498343}, + gscites = {4}, + ss_id = {639ee4bb8d748364fa12c44a29cb38e925488ada}, + all_ss_ids = {['639ee4bb8d748364fa12c44a29cb38e925488ada']}, +} + +@article{Litj14c, + author = {Litjens, G. and Debats, O. and Barentsz, J. and Karssemeijer, N. and Huisman, H.}, + title = {Computer-aided detection of prostate cancer in {MRI}}, + journal = TMI, + year = {2014}, + volume = {33}, + pages = {1083--1092}, + doi = {10.1109/TMI.2014.2303821}, + abstract = {Prostate cancer is one of the major causes of cancer death for men in the western world. Magnetic resonance imaging (MRI) is being increasingly used as a modality to detect prostate cancer. Therefore, computer-aided detection of prostate cancer in MRI images has become an active area of research. In this paper we investigate a fully automated computer-aided detection system which consists of two stages. In the first stage, we detect initial candidates using multi-atlas-based prostate segmentation, voxel feature extraction, classification and local maxima detection. The second stage segments the candidate regions and using classification we obtain cancer likelihoods for each candidate. Features represent pharmacokinetic behavior, symmetry and appearance, among others. The system is evaluated on a large consecutive cohort of 347 patients with MR-guided biopsy as the reference standard. This set contained 165 patients with cancer and 182 patients without prostate cancer. Performance evaluation is based on lesion-based free-response receiver operating characteristic curve and patient-based receiver operating characteristic analysis. The system is also compared to the prospective clinical performance of radiologists. Results show a sensitivity of 0.42, 0.75, and 0.89 at 0.1, 1, and 10 false positives per normal case. In clinical workflow the system could potentially be used to improve the sensitivity of the radiologist. At the high specificity reading setting, which is typical in screening situations, the system does not perform significantly different from the radiologist and could be used as an independent second reader instead of a second radiologist. Furthermore, the system has potential in a first-reader setting.}, + file = {Litj14c.pdf:pdf\\Litj14c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {24770913}, + publisher = {Institute of Electrical \& Electronics Engineers (IEEE)}, + month = {5}, + gsid = {2908069285137157626}, + gscites = {375}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/247043}, + ss_id = {1b02bd4ef5cbf6d6fe99c1e52814ae27c17cf96b}, + all_ss_ids = {['1b02bd4ef5cbf6d6fe99c1e52814ae27c17cf96b']}, +} + +@article{Litj14d, + author = {Litjens, Geert JS and Huisman, Henkjan J and Elliott, Robin M and Shih, Natalie Nc and Feldman, Michael D and Viswanath, Satish and F\"utterer, Jurgen J and Bomers, Joyce GR and Madabhushi, Anant}, + title = {Quantitative identification of magnetic resonance imaging features of prostate cancer response following laser ablation and radical prostatectomy}, + journal = JMI, + year = {2014}, + volume = {1}, + pmid = {26158070}, + pages = {035001--035001}, + doi = {10.1117/1.JMI.1.3.035001}, + url = {http://medicalimaging.spiedigitallibrary.org/article.aspx?articleid=1921073&journalid=165}, + file = {Litj14d.pdf:pdf\\Litj14d.pdf:PDF}, + optnote = {DIAG, MAGIC, RADIOLOGY}, + number = {3}, + publisher = {International Society for Optics and Photonics}, + month = {10}, + gsid = {1987898104259315064}, + gscites = {16}, + ss_id = {1b62c9397a2b1e488ae68530275cd815204022e1}, + all_ss_ids = {['1b62c9397a2b1e488ae68530275cd815204022e1']}, +} + +@conference{Litj14e, + author = {G. Litjens and N. Karssemeijer and J. O. Barentsz and H.J. Huisman}, + title = {Computer-aided {D}etection of {P}rostate {C}ancer in {M}ulti-parametric {M}agnetic {R}esonance {I}maging}, + booktitle = RSNA, + year = {2014}, + abstract = {{PURPOSE} Accurate reporting of multi-parametric prostate magnetic resonance imaging (mpMRI) is difficult and requires substantial experience. We investigate the effect of computer-aided diagnosis (CAD) on the diagnostic accuracy of prostate MRI reporting. {METHOD AND MATERIALS} Two consecutive cohorts of patients were used. One for training/development of the CAD system (347 patients) and one for the prospective evaluation (130 patients). Both cohorts comprise mpMRI and subsequent MR-guided biopsy and pathology. The mpMRIs were ESUR guideline compliant and performed on a Siemens 3T MRI without the use of an endo-rectal coil. Both cohorts were prospectively reported by one of ten radiologists according to the PI-RADS guidelines. Experience of the radiologists ranged from inexperienced to very experienced (1-20 years). The computer-aided diagnosis (CAD) system comprised of a voxel classification stage and a subsequent candidate segmentation and classification stage. Features include quantified T2, ADC, pharmacokinetics, texture and anatomical characteristics. ROC and FROC analysis was used to evaluate performance. For the prospective validation the CAD system assigned a score to each radiologist-identified lesion. Logistic regression combining the radiologist and CAD scores was used to emulate independent, prospective CAD reading. Subsequently, the diagnostic performance in detecting intermediate-to-high-grade cancer of the CAD system alone, the radiologist alone and the radiologist CAD-system combination was evaluated using sensitivity and specificity for the different PI-RADS thresholds. Bootstrapping was used to assess significance. {RESULTS} FROC analyses showed that the CAD system could detect 82% of all intermediate-to-high-grade lesions at 1 false positive per case. Combined CAD and radiologist score significantly improved the sensitivity at a PI-RADS 4 threshold over the radiologist alone (0.98 for the combination, 0.93 for the radiologist alone, p = 0.029). A significantly improved specificity was found at a PI-RADS threshold of 3 (0.25 versus 0.09, p = 0.013). {CONCLUSION} CAD can achieve excellent performance. As a second observer to characterize prostate lesions it can improve sensitivity and specificity in discriminating intermediate-to-high-grade cancer. {CLINICAL RELEVANCE/APPLICATION} Improving the performance of mpMRI in the detection of prostate cancer by CAD can prevent unnecessary biopsies.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Litj15, + author = {Litjens, G. and Bejnordi, B. Ehteshami and Timofeeva, N. and Swadi, G. and Kovacs, I. and Hulsbergen-van de Kaa, C. A. and van der Laak, J. A. W. M.}, + title = {Automated detection of prostate cancer in digitized whole-slide images of {H\&E}-stained biopsy specimens}, + booktitle = MI, + year = {2015}, + volume = {9420}, + series = SPIE, + pages = {94200B}, + doi = {10.1117/12.2081366}, + abstract = {Automated detection of prostate cancer in digitized H and E whole-slide images is an important first step for computer-driven grading. Most automated grading algorithms work on preselected image patches as they are too computationally expensive to calculate on the multi-gigapixel whole-slide images. An automated multi-resolution cancer detection system could reduce the computational workload for subsequent grading and quantification in two ways: by excluding areas of definitely normal tissue within a single specimen or by excluding entire specimens which do not contain any cancer. In this work we present a multi-resolution cancer detection algorithm geared towards the latter. The algorithm methodology is as follows: at a coarse resolution the system uses superpixels, color histograms and local binary patterns in combination with a random forest classifier to assess the likelihood of cancer. The five most suspicious superpixels are identified and at a higher resolution more computationally expensive graph and gland features are added to refine classification for these superpixels. Our methods were evaluated in a data set of 204 digitized whole-slide H and E stained images of MR-guided biopsy specimens from 163 patients. A pathologist exhaustively annotated the specimens for areas containing cancer. The performance of our system was evaluated using ten-fold cross-validation, stratified according to patient. Image-based receiver operating characteristic (ROC) analysis was subsequently performed where a specimen containing cancer was considered positive and specimens without cancer negative. We obtained an area under the ROC curve of 0.96 and a 0.4 specificity at a 1.0 sensitivity.}, + file = {Litj15.pdf:pdf\\Litj15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + ss_id = {e14a159a1d16c2acfe48b60c81807300c67ee228}, + all_ss_ids = {['e14a159a1d16c2acfe48b60c81807300c67ee228']}, + gscites = {15}, +} + +@phdthesis{Litj15a, + author = {Litjens, Geert}, + title = {Computerized detection of cancer in multi-parametric prostate {MRI}}, + year = {2015}, + url = {http://repository.ubn.ru.nl/handle/2066/134514}, + abstract = {Prostate MRI is becoming an increasingly popular imaging technique for the detection of prostate cancer. However, it requires a substantial amount of expertise and time from radiologists to accurately report on prostate MRI. Furthermore, quantitative analysis is needed for accurate assessment of cancer aggressiveness in vivo. Computer-aided detection and diagnosis (CAD) systems are excellent tools to tackle these challenges. In this thesis the design of such a systems is discussed. CAD systems are typically a connected pipeline of differing algorithms performing consecutive tasks, for example segmentation, feature extraction and classification. We followed a similar outline in this thesis. In Chapter 2 we discuss the segmentation of the prostate capsule in the setting of a 'grand challenge'. Further division of the prostate capsule in distinct anatomical zones is the topic of Chapter 3. After segmentation of the capsule and the prostate zones, features discriminative for cancer and cancer aggression are discussed in Chapter 4 and 5. The interconnection of the components into one unified CAD system is subsequently discussed in Chapter 6 and the evaluation of the system in a clinical setting in Chapter 7. In this last Chapter we not only show that designing a fully automated CAD system is feasbile, but that it can be used to the benefit of radiologists.}, + copromotor = {H. J. Huisman}, + file = {Litj15a.pdf:pdf/Litj15a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {N. Karssemeijer and J. O. Barentsz}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Litj15b, + author = {Litjens, Geert J S. and Barentsz, Jelle O. and Karssemeijer, Nico and Huisman, Henkjan J.}, + title = {Clinical evaluation of a computer-aided diagnosis system for determining cancer aggressiveness in prostate {MRI}}, + journal = ER, + year = {2015}, + volume = {25}, + number = {11}, + pages = {3187--3199}, + doi = {10.1007/s00330-015-3743-y}, + abstract = {To investigate the added value of computer-aided diagnosis (CAD) on the diagnostic accuracy of PIRADS reporting and the assessment of cancer aggressiveness.Multi-parametric MRI and histopathological outcome of MR-guided biopsies of a consecutive set of 130 patients were included. All cases were prospectively PIRADS reported and the reported lesions underwent CAD analysis. Logistic regression combined the CAD prediction and radiologist PIRADS score into a combination score. Receiver-operating characteristic (ROC) analysis and Spearman's correlation coefficient were used to assess the diagnostic accuracy and correlation to cancer grade. Evaluation was performed for discriminating benign lesions from cancer and for discriminating indolent from aggressive lesions.In total 141 lesions (107 patients) were included for final analysis. The area-under-the-ROC-curve of the combination score was higher than for the PIRADS score of the radiologist (benign vs. cancer, 0.88 vs. 0.81, pAC/a,!aEURdeg=AC/a,!aEURdeg0.013 and indolent vs. aggressive, 0.88 vs. 0.78, pAC/a,!aEURdeg80 papers, covering modalities ranging from cardiac magnetic resonance, computed tomography, and single-photon emission computed tomography, to intravascular optical coherence tomography and echocardiography. Many different machines learning algorithms were used throughout these papers, with the most common being convolutional neural networks. Recent algorithms such as generative adversarial models were also used. The potential implications of deep learning algorithms on clinical practice, now and in the near future, are discussed.}, + file = {:pdf/Litj19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31395244}, + gsid = {7949427670884659630}, + gscites = {222}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/208418}, + ss_id = {1e70a09aedc8d91538e269f568b212ca839012b1}, + all_ss_ids = {['1e70a09aedc8d91538e269f568b212ca839012b1']}, +} + +@article{Litj22, + author = {Litjens, Geert and Ciompi, Francesco and van der Laak, Jeroen}, + title = {A Decade of GigaScience: The Challenges of Gigapixel Pathology Images.}, + doi = {10.1093/gigascience/giac056}, + volume = {11}, + abstract = {In the last decade, the field of computational pathology has advanced at a rapid pace because of the availability of deep neural networks, which achieved their first successes in computer vision tasks in 2012. An important driver for the progress of the field were public competitions, so called 'Grand Challenges', in which increasingly large data sets were offered to the public to solve clinically relevant tasks. Going from the first Pathology challenges, which had data obtained from 23 patients, to current challenges sharing data of thousands of patients, performance of developed deep learning solutions has reached (and sometimes surpassed) the level of experienced pathologists for specific tasks. We expect future challenges to broaden the horizon, for instance by combining data from radiology, pathology and tumor genetics, and to extract prognostic and predictive information independent of currently used grading schemes.}, + file = {:pdf/Litj22.pdf:PDF}, + journal = GigaScience, + month = {6}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35701372}, + year = {2022}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/252047}, + ss_id = {42c19709354c631efc35d2bda8f0d33cf1d79831}, + all_ss_ids = {['42c19709354c631efc35d2bda8f0d33cf1d79831']}, + gscites = {3}, +} + +@article{Liu14, + author = {Haixia Liu and Tao Tan and Jan van Zelst and Ritse Mann and Nico Karssemeijer and Bram Platel}, + title = {Incorporating texture features in a computer-aided breast lesion diagnosis system for automated three-dimensional breast ultrasound}, + journal = JMI, + year = {2014}, + volume = {1}, + pages = {024501-024501}, + doi = {10.1117/1.JMI.1.2.024501}, + abstract = {We investigated the benefits of incorporating texture features into an existing computer-aided diagnosis (CAD) system for classifying benign and malignant lesions in automated three-dimensional breast ultrasound images. The existing system takes into account 11 different features, describing different lesion properties; however, it does not include texture features. In this work, we expand the system by including texture features based on local binary patterns, gray level co-occurrence matrices, and Gabor filters computed from each lesion to be diagnosed. To deal with the resulting large number of features, we proposed a combination of feature-oriented classifiers combining each group of texture features into a single likelihood, resulting in three additional features used for the final classification. The classification was performed using support vector machine classifiers, and the evaluation was done with 10-fold cross validation on a dataset containing 424 lesions (239 benign and 185 malignant lesions). We compared the classification performance of the CAD system with and without texture features. The area under the receiver operating characteristic curve increased from 0.90 to 0.91 after adding texture features (p<0.001).}, + file = {Liu14.pdf:pdf\\Liu14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + month = {7}, + gsid = {10049699584799847554}, + gscites = {22}, + ss_id = {a4278e9daff8dd2d492c51d6c629a76d552a9c45}, + all_ss_ids = {['a4278e9daff8dd2d492c51d6c629a76d552a9c45']}, +} + +@inproceedings{Lo09, + author = {P. Lo and B. van Ginneken and J. M. Reinhardt and M. de Bruijne}, + title = {Extraction of Airways from {CT} ({EXACT09})}, + booktitle = {The Second International Workshop On Pulmonary Image Analysis}, + year = {2009}, + pages = {175--189}, + abstract = {This paper describes a framework for evaluating airway extraction algorithms in a standardized manner and establishing reference segmentations that can be used for future algorithm development. Because of the sheer difficulty of constructing a complete reference standard manually, we propose to construct a reference using results from the algorithms being compared, by splitting each airway tree segmentation result into individual branch segments that are subsequently visually inspected by trained observers. Using the so constructed reference, a total of seven performance measures covering different aspects of segmentation quality are computed. We evaluated 15 airway tree extraction algorithms from different research groups on a diverse set of 20 chest CT scans from subjects ranging from healthy volunteers to patients with severe lung disease, who were scanned at different sites, with several different CT scanner models, and using a variety of scanning protocols and reconstruction parameters.}, + file = {Lo09.pdf:pdf\\Lo09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['d66ed19788611086397ccada6e74c7ba8d759cdf']}, + gscites = {2}, +} + +@inproceedings{Lo10, + author = {P. Lo and B. van Ginneken and M. de Bruijne}, + title = {Vessel tree extraction using locally optimal paths}, + booktitle = ISBI, + year = {2010}, + pages = {680--683}, + doi = {10.1109/ISBI.2010.5490083}, + abstract = {{T}his paper proposes a method to extract vessel trees by continually extending detected branches with locally optimal paths. {O}ur approach uses a cost function from a multiscale vessel enhancement filter. {O}ptimal paths are selected based on rules that take into account the geometric characteristics of the vessel tree. {E}xperiments were performed on 10 low dose chest {CT} scans for which the pulmonary vessel trees were extracted. {T}he proposed method is shown to extract a better connected vessel tree and extract more of the small peripheral vessels in comparison to applying a threshold on the output of the vessel enhancement filter.}, + file = {Lo10.pdf:pdf\\Lo10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {3547575388613335105}, + gscites = {29}, + ss_id = {56f99e7b81eac197131323001445ecb2eb0569b1}, + all_ss_ids = {['56f99e7b81eac197131323001445ecb2eb0569b1']}, +} + +@inproceedings{Lo11, + author = {P. Lo and E. M. van Rikxoort and J. G. Goldin and F. Abtin and M. de Bruijne and M. S. Brown}, + title = {A bottom-up approach for labeling of human airway trees}, + booktitle = {The Fourth International Workshop on Pulmonary Image Analysis}, + year = {2011}, + pages = {23--34}, + abstract = {In this paper, an airway labeling algorithm that allows for gaps between the labeled branches is introduced. A bottom-up approach for arriving to an optimal set of branches and their associated labels is used in the proposed method. A K nearest neighbor based appearance model is used to differentiate the different anatomical branches. The proposed method was applied on 33 computed tomography scans of different subjects, where an average of 24 anatomical branches were correctly detected out of a total of 29 anatomical branches. Additionally, the proposed method was also evaluated on trees with simulated errors, such as missing branches and having falsely detected branches, where we showed that such errors have little or no effect on the proposed method.}, + optnote = {DIAG, RADIOLOGY}, + gsid = {11866150524425499008}, + gscites = {7}, +} + +@article{Lo12, + author = {P. Lo and B. van Ginneken and J. M. Reinhardt and Y. Tarunashree and P. A. de Jong and B. Irving and C. Fetita and M. Ortner and R. Pinho and J. Sijbers and M. Feuerstein and A. Fabijanska and C. Bauer and R. Beichel and C. S. Mendoza and R. Wiemker and J. Lee and A. P. Reeves and S. Born and O. Weinheimer and E. M. van Rikxoort and J. Tschirren and K. Mori and B. Odry and D. P. Naidich and I. J. Hartmann and E. A. Hoffman and M. Prokop and J. H. Pedersen and M. de Bruijne}, + title = {Extraction of Airways from {CT} ({EXACT'09})}, + journal = TMI, + year = {2012}, + volume = {31}, + pages = {2093--2107}, + doi = {10.1109/TMI.2012.2209674}, + abstract = {This paper describes a framework for establishing a reference airway tree segmentation, which was used to quantitatively evaluate fifteen different airway tree extraction algorithms in a standardized manner. Because of the sheer difficulty involved in manually constructing a complete reference standard from scratch, we propose to construct the reference using results from all algorithms that are to be evaluated. We start by subdividing each segmented airway tree into its individual branch segments. Each branch segment is then visually scored by trained observers to determine whether or not it is a correctly segmented part of the airway tree. Finally, the reference airway trees are constructed by taking the union of all correctly extracted branch segments. Fifteen airway tree extraction algorithms from different research groups are evaluated on a diverse set of twenty chest computed tomography ({CT}) scans of subjects ranging from healthy volunteers to patients with severe pathologies, scanned at different sites, with different {CT} scanner brands, models, and scanning protocols. Three performance measures covering different aspects of segmentation quality were computed for all participating algorithms. Results from the evaluation showed that no single algorithm could extract more than an average of 74\% of the total length of all branches in the reference standard, indicating substantial differences between the algorithms. A fusion scheme that obtained superior results is presented, demonstrating that there is complementary information provided by the different algorithms and there is still room for further improvements in airway segmentation algorithms.}, + file = {Lo12.pdf:pdf\\Lo12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pmid = {22855226}, + month = {11}, + gsid = {7968104102758490533}, + gscites = {231}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/107854}, + ss_id = {cfcae9fa6bd0b4cad15e40bb0dc4e8b367826acc}, + all_ss_ids = {['cfcae9fa6bd0b4cad15e40bb0dc4e8b367826acc']}, +} + +@inproceedings{Lo13, + author = {P. Lo and E. M. van Rikxoort and F. Abtin and S. Ahmad and A. Ordookhani and J. G. Goldin and M. S. Brown}, + title = {Automated segmentation of pulmonary lobes in chest {CT} scans using evolving surfaces}, + booktitle = MI, + year = {2013}, + volume = {8869}, + series = SPIE, + pages = {86693R}, + doi = {10.1117/12.2006982}, + abstract = {Segmentation of the pulmonary lobes from chest {CT} scans is a challenging problem, especially with the presence of incomplete pulmonary fissures. We present an iterative approach for the segmentation of pulmonary lobes via a surface that evolves based on a voxel based fissure confidence function and a smooth prior. The surface is constructed such that it separates the whole lung at all times, and is represented as a height map above a {2D} reference plane. A surface evolution process is used to fit the surface to a pulmonary fissure in a scan. At each iteration, the height of all points in the map is adjusted such that the overall confidence is maximized, followed by {L}aplacian smoothing to enforce a smooth prior on the surface. The proposed method was trained and tuned on 18 {CT} scans from a clinical trial, and tested on 41 scans of different patients with severe emphysema from another clinical trial. Average overlap ratio of the segmented upper and lower lobes of the left and right lungs are 0.96 and 0.91 respectively. Average overlap ratio for the right middle lobes is 0.86, where minor manual intervention was needed for six cases, and with seven cases excluded because the minor fissure was almost entirely not visible in the {CT} scan.}, + file = {Lo13.pdf:pdf\\Lo13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {1168389910277398226}, + gscites = {4}, +} + +@inproceedings{Lo13a, + author = {Pechin Lo and Eva M. van Rikxoort and Jonathan Goldin and Matthew S. Brown}, + title = {Semi-automated segmentation of pulmonary lobes in chest {CT} scans using evolving surfaces}, + booktitle = {The Fifth International Workshop on Pulmonary Image Analysis}, + year = {2013}, + abstract = {Automated segmentation of the pulmonary lobes from chest {CT} scans is a challenging problem that is yet to be solved reliably. Therefore there is a need for a semi-automated solution in the case where the automated solution fails. We present an approach that can be used for correcting an existing lobe segmentation or segmenting the lobes from scratch in a semi-automatic manner. The method is based on an iterative approach that evolves a surface based on a voxel based fissure confidence function, smooth prior and user input points. An advantage of the proposed method is that it takes into account both inputs from user and the appearance of fissures in the image, which in turn reduces the number of user interactions required. The proposed method was trained and tuned on 18 {CT} scans, and tested on 22 {CT} scans from different subjects with either idiopathic pulmonary fibrosis or severe emphysema. On average, the proposed method requires 37 user drawn line segments, which are mostly short, to segment all lobes accurately. We did not notice a large difference in the number of required line segments between starting from scratch or correcting lobe segmented from an automated method, as it usually requires only two lines in two different view plane from the user to obtain a relatively accurate fissure from scratch.}, + file = {Lo13a.pdf:Lo13a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Lohn15, + author = {V. Lohner and I.W.M. van Uden and H.M. van der Holst and A.M. Tuladhar and A.G.W. van Norden and M.I. Bergkamp and K.F. de Laat and R. Arntz and M. Ghafoorian and B. Platel and E.J. van Dijk and F.E. de Leeuw}, + title = {Run away from small vessel disease: physical activity reduces progression of white matter hyperintensities: the RUN DMC study}, + booktitle = {European Stroke Organization}, + year = {2015}, + optnote = {DIAG}, +} + +@conference{Lohuizen23a, + author = {Q. Y. van Lohuizen and C. Roest and F. F. J. Simonis and S. J. Fransen and T. C. Kwee and D. Yakar and H. Huisman}, + booktitle = RSNA, + title = {Diagnostic AI to speed up MRI protocols by identifying redundant sequences: are all diffusion-weighted prostate MRI sequences necessary?}, + abstract = {PURPOSE: Studies showed that AI reconstruction of accelerated MRI improves visual quality, but it is unclear whether this improves diagnostic value. We investigated a novel framework for accelerated MRI by assessing reconstruction quality (naive- vs AI-reconstructed) by comparing diagnostic performance and visual similarity as an outcome for prostate cancer detection. + METHODS: A retrospective multi-site study was performed on a cohort of 1535 patients who underwent bi-parametric prostate MRI between 2016-2020. An expert radiologist delineated all clinically significant prostate cancer (csPCa) lesions (PI-RADS >= 4). T2W scans were retrospectively undersampled in k-space, simulating four (R4) and eight (R8) times acceleration. A 3D U-Net was used to reconstruct undersampled images. The resulting images were fed to an existing state-of-the-art csPCa detection AI to evaluate the effect of AI reconstruction on diagnosis. Visual image quality (SSIM) was compared with a Wilcoxon test. Lesion level diagnostics were evaluated by comparing the partial area-under-the-FROC-curve over the false positive interval 0.1-2.5 (pAUC) using permutation tests. + RESULTS: AI-based reconstruction significantly improved visual quality compared to naive (IFFT) reconstruction MRI at R4 (SSIM 0.78+-0.02 vs 0.68+-0.03, p<0.001) and R8 (SSIM 0.67+-0.03 vs 0.51+-0.03, p<0.001), however, no significant improvements in diagnostic performance were observed for R4 (pAUC FROC 1.33 [CI 1.28-1.39] vs 1.29 [CI 1.23-1.35], p=0.37), nor R8 (pAUC FROC 1.12 [CI 1.07-1.17] vs 0.95 [CI 1.89-1.01], p=0.067). AI-based reconstruction resulted in 0.1 or more decrease in sensitivity compared to unaccelerated MRI. + CONCLUSION: Recovery of visual quality in reconstructions does not correlate with recovering diagnostic quality, emphasizing the importance of measuring diagnostic value rather than visual similarity. AI reconstruction tools should be approached with care because they might have been optimized to reconstruct visually appealing images instead of diagnostic images.}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, +} + +@article{Loka10, + author = {Mariette A J M Lokate and Michiel G J Kallenberg and Nico Karssemeijer and Maurice A A J van den Bosch and Petra H M Peeters and Carla H van Gils}, + title = {Volumetric breast density from full-field digital mammograms and its association with breast cancer risk factors: a comparison with a threshold method}, + journal = CANEBP, + year = {2010}, + volume = {19}, + pages = {3096--3105}, + doi = {10.1158/1055-9965.EPI-10-0703}, + abstract = {BACKGROUND: Breast density, a strong breast cancer risk factor, is usually measured on the projected breast area from film screen mammograms. This is far from ideal, as breast thickness and technical characteristics are not taken into account. We investigated whether volumetric density measurements on full-field digital mammography (FFDM) are more strongly related to breast cancer risk factors than measurements with a computer-assisted threshold method.METHODS: Breast density was measured on FFDMs from 370 breast cancer screening participants, using a computer-assisted threshold method and a volumetric method. The distribution of breast cancer risk factors among quintiles of density was compared between both methods. We adjusted for age and body mass index (BMI) with linear regression analysis.RESULTS: High percent density was strongly related to younger age, lower BMI, nulliparity, late age at first delivery and pre/perimenopausal status, to the same extent with both methods (all p<0.05). Similarly strong relationships were seen for the absolute dense area, but to a lesser extent for absolute dense volume. A larger dense volume was only significantly associated with late age at menopause, use of menopausal hormone therapy, and, in contrast to the other methods, high BMI.CONCLUSIONS: Both methods related equally well to known breast cancer risk factors. Impact:Despite its alleged higher precision, the volumetric method was not more strongly related to breast cancer risk factors. This is in agreement with other studies. The definitive relationship with breast cancer risk still needs to be investigated.}, + file = {Loka10.pdf:pdf/Loka10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {12}, + pmid = {20921336}, + month = {10}, + gsid = {7815225352498783242}, + gscites = {70}, + ss_id = {a717f22666cfef9a62f3e18ac8ed5d81abac272e}, + all_ss_ids = {['a717f22666cfef9a62f3e18ac8ed5d81abac272e']}, +} + +@conference{Loma23, + author = {Robin Lomans and Rachel van der Post and Francesco Ciompi}, + booktitle = {MIDL}, + title = {Interactive Cell Detection in H&E-stained slides of Diffuse Gastric Cancer}, + abstract = {We present an interactive detection model to improve the cell annotation workflow of diffuse gastric cancer. + The model relates image and user inputs and is trained to detect three types of cells in diffuse gastric cancer histology. + We measure model multi-class cell detection performance as per-class F1 score and we show that it increases with the number of user input clicks. + Moreover, we show that the proposed interactive annotation approach substantially reduces the number of required user actions needed for complete image annotation, achieving a 17\% reduction for the multi-class case. + Future work will implement an iterative approach to filter out recurring false positives for further performance improvement.}, + optnote = {DIAG, PATHOLOGY}, + year = {2023}, +} + +@conference{Loma23a, + author = {Robin Lomans and Jeroen van der Laak and Iris Nagtegaal and Francesco Ciompi and Rachel van der Post}, + booktitle = {European Congress of Pathology}, + title = {Deep learning for multi-class cell detection in H&E-stained slides of diffuse gastric cancer}, + abstract = {Background & objective + Diffuse gastric cancer (DGC) is characterized by poorly cohesive cells which are difficult to detect. We propose the first deep learning model to detect classical signet ring cells (SRCs), atypical SRCs, and poorly differentiated cells in H&E-stained slides of DGC. + + Methods + We collected slides from 9 patients with hereditary DGC, resulting in 105 and 3 whole-slide images (WSIs) of gastric resections and biopsies, respectively. The three target cell types were annotated, resulting in 24,695 cell-level annotations. We trained a deep learning model with the Faster-RCNN architecture using 99 WSIs in the development set. + + Results + The algorithm was tested on 9 WSIs in the independent validation set. Model predictions were counted as correct if they were within a 15-micron radius from the expert reference annotations. For evaluation, we split the detection task into two components: class-independent cell localization (recognition of any tumor cell type) and cell-type classification (categorizing localized cells as the correct types). We found (average) F1 scores of 0.69 and 0.93 for the localization and classification tasks, respectively. Thus, we observe that the algorithm does not generally misclassify cells, but rather, the errors mainly arise from missing cells or false positive predictions of cells that do not belong to the three target classes. + + Conclusion + Future work will focus on improving the cell localization performance of the algorithm. Cell localization of the three target classes will be an important task in a clinical application of our model, in which it could be used to improve the detection of DGC lesions among large sets of slides. Moreover, the algorithm will allow for quantitative assessment of DGC patterns, potentially giving new insights in specific morphological features of DGC such as patterns of spatial cell distributions.}, + optnote = {DIAG, PATHOLOGY}, + year = {2023}, +} + +@inproceedings{Loog02a, + author = {M. Loog and B. van Ginneken}, + title = {Supervised segmentation by {I}terated {C}ontextual {P}ixel {C}lassification}, + booktitle = ICPR, + year = {2002}, + pages = {925-928}, + doi = {10.1109/ICPR.2002.1048456}, + abstract = {{W}e propose a general iterative contextual pixel classifier for supervised image segmentation. {T}he iterative procedure is statistically well-founded and can be considered a variation on the iterated conditional modes ({ICM}) of {B}esag (1983). {H}aving an initial segmentation, the algorithm iteratively updates it by reclassifying every pixel, based on the original features and, additionally, contextual information. {T}his contextual information consists of the class labels of pixels in the neighborhood of the pixel to be reclassified. {T}hree essential differences with the original {ICM} are: (1) our update step is merely based on a classification result, hence a voiding the explicit calculation of conditional probabilities; (2) the clique formalism of the {M}arkov random field framework is not required; (3) no assumption is made w.r.t. the conditional independence of the observed pixel values given the segmented image. {T}he important consequence of properties 1 and 2 is that one can easily incorporate rate common pattern recognition tools in our segmentation algorithm. {E}xamples are different classifiers-e.g. {F}isher linear discriminant, nearest-neighbor classifier, or support vector machines-and dimension reduction techniques like {LDA}, or {PCA}. {W}e experimentally compare a specific instance of our general method to pixel classification, using simulated data and chest radiographs, and show that the former outperforms the latter.}, + file = {Loog02a.pdf:pdf\\Loog02a.pdf:PDF}, + gsid = {17126051828720866801}, + optnote = {DIAG, RADIOLOGY}, + gscites = {33}, + ss_id = {284bac5873dc9ae6cd676d5e77f51a7347db061e}, + all_ss_ids = {['284bac5873dc9ae6cd676d5e77f51a7347db061e']}, +} + +@inproceedings{Loog03, + author = {M. Loog and B. van Ginneken and M. A. Viergever}, + title = {Segmenting the posterior ribs in chest radiographs by iterated contextual pixel classification}, + booktitle = MI, + year = {2003}, + volume = {5032}, + series = SPIE, + pages = {609-618}, + doi = {10.1117/12.480862}, + abstract = {{T}he task of segmenting the posterior ribs within the lung fields is of great practical importance. {F}or example, delineation of the ribs may lead to a decreased number of false positives in computerized detection of abnormalities, and hence analysis of radiographs for computer-aided diagnosis purposes will benefit from this. {W}e use an iterative, pixel-based, statistical classification method - iterated contextual pixel classification ({ICPC}). {I}t is suited for a complex segmentation task in which a global shape description is hard to provide. {T}he method combines local gray level and contextual information to come to an overall image segmentation. {B}ecause of it generality, it is also useful for other segmentation tasks. {I}n our case, the variable number of visible ribs in the lung fields complicates the use of a global model. {A}dditional difficulties arise from the poor visibility of the lower and medial ribs. {U}sing cross validation, the method is evaluated on 35 radiographs in which all posterior ribs were traced manually. {ICPC} obtains an accuracy of 83%, a sensitivity of 79%, and a specificity of 86% for segmenting the costal space. {F}urther evaluation is done using five manual segmentations from a second observer, whose performance is compared with the five corresponding images from the first manual segmentation, yielding 83% accuracy, 84% sensitivity, and 83% specificity. {O}n these five images, {ICPC} attains 82%, 78%, and 86% respectively.}, + file = {Loog03.pdf:pdf\\Loog03.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {9395252940507654239}, + gscites = {8}, + ss_id = {e783c9b2be90dd57814d8064375e42bef8cd7154}, + all_ss_ids = {['e783c9b2be90dd57814d8064375e42bef8cd7154']}, +} + +@phdthesis{Loog04c, + author = {M. Loog}, + title = {Supervised {D}imensionality {R}eduction and {C}ontextual {P}attern {R}ecognition in {M}edical {I}mage {P}rocessing}, + year = {2004}, + url = {http://igitur-archive.library.uu.nl/dissertations/2005-0915-200014/UUindex.html}, + abstract = {{T}he past few years have witnessed a significant increase in the number of supervised methods employed in diverse image processing tasks. {E}specially in medical image analysis the use of, for example, supervised shape and appearance modelling has increased considerably and has proven to be successful. {T}his thesis focuses on applying supervised pattern recognition methods in medical image processing. {W}e consider a local, pixel-based approach in which image segmentation, regression, and filtering tasks are solved using descriptors of the local image content (features) based on which decisions are made that provide a class label (in case of image segmentation) or a gray value (in case of filtering or regression) for every pixel. {T}he basic probabilistic decision problem, underlying---implicitly or explicitly---all the methods presented in this thesis, can be stated in terms of a conditional probability optimization problem u = argmax_y {P}(y|x) in which x is a d-dimensional vector of measurements, i.e., a feature vector, describing the local image content and y is a quantity that takes values from a set {Y}. {T}ypically, in a classification task, {Y} is a discrete set of labels and in case of regression, {Y} equals {R}. {B}ased on the maximization in the previous equation, to every vector x (which is associated to a pixel in an image), a particular u from {Y} is associated. {T}his approach is - because of its local nature - quite different from the shape and appearance methods mentioned in the beginning of this chapter which try to solve image processing tasks in a more global way. {A} recent comparative study [{B}. van {G}inneken, {M}. {B}. {S}tegmann, and {M}. {L}oog. {S}egmentation of anatomical structures in chest radiographs using supervised methods: a comparative study on a public database, {M}edical {I}mage {A}nalysis, 2006] shows that in image segmentation, pixel-based approaches can compete with shape and appearance models, providing an interesting alternative to the latter. {T}he principal methodological part of the thesis consists of three dimensionality reduction methods that can aid the extraction of relevant features to be used for performing image segmentation or regression. {F}urthermore, an iterative segmentation scheme is developed which draws from classical pattern recognition and machine learning methods. {F}inally, two applications of these techniques in two problems related to computer-aided diagnosis ({CAD}) in chest radiography are presented. {F}irstly, the task of segmenting the posterior ribs is considered. {S}econdly, a regression framework is presented, which aims at suppressing bony structures in chest radiographs.}, + copromotor = {B. van Ginneken and R. P. W. Duin}, + file = {Loog04c.pdf:pdf\\Loog04c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {M. A. Viergever}, + school = {Utrecht University}, + journal = {PhD thesis}, +} + +@inproceedings{Loog04d, + author = {M. Loog and B. van Ginneken}, + title = {Static posterior probability fusion for signal detection: applications in the detection of interstitial diseases in chest radiographs}, + booktitle = ICPR, + year = {2004}, + pages = {644--647}, + doi = {10.1109/ICPR.2004.1334244}, + abstract = {{T}his work presents general signal detection schemes based on the static fusion of posterior probabilities. {S}tarting with the assumption that for every pixel in an image there is a posterior probability-indicating the probability of the presence or the absence of the signal to be detected, some well-known probability fusion schemes and generalizations thereof are proposed to come to an overall decision regarding the presence or absence of the signal. {I}n addition to these well-known static fusion schemes-i.e., voting, averaging, maximum rule, etcetera, a quantile-based combination rule is presented as well. {T}he performance of the several rules is evaluated on two real-world, medical image analysis task. {B}oth tasks consider the computer-aided diagnosis ({CAD}) of standard posteroanterior chest radiographs. {M}ore specifically, in the first task the general detection of interstitial diseases is studied, while in the second task the focus is on the detection of tuberculosis.}, + file = {Loog04d.pdf:pdf\\Loog04d.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + gsid = {4373819761535258462}, + gscites = {36}, + ss_id = {90f7f1eb9a2d0ff1db35a909062577efca9e32be}, + all_ss_ids = {['90f7f1eb9a2d0ff1db35a909062577efca9e32be']}, +} + +@inproceedings{Loog04e, + author = {M. Loog and B. van Ginneken and R. P. W. Duin}, + title = {Dimensionality reduction by canonical contextual correlation projections}, + booktitle = ECCV, + year = {2004}, + volume = {3021}, + series = LNCS, + pages = {562--573}, + doi = {10.1007/978-3-540-24670-1_43}, + abstract = {{A} linear, discriminative, supervised technique for reducing feature vectors extracted from image data to a lower-dimensional representation is proposed. {I}t is derived from classical {F}isher linear discriminant analysis ({LDA}) and useful, for example, in supervised segmentation tasks in which high-dimensional feature vector describes the local structure of the image. {I}n general, the main idea of the technique is applicable in discriminative and statistical modelling that involves contextual data. {LDA} is a basic, well-known and useful technique in many applications. {O}ur contribution is that we extend the use of {LDA} to cases where there is dependency between the output variables, i.e., the class labels, and not only between the input variables. {T}he latter can be dealt with in standard {LDA}. {T}he principal idea is that where standard {LDA} merely takes into account a single class label for every feature vector, the new technique incorporates class labels of its neighborhood in its analysis as well. {I}n this way, the spatial class label configuration in the vicinity of every feature vector is accounted for, resulting in a technique suitable for e.g. image data. {T}his spatial {LDA} is derived from a formulation of standard {LDA} in terms of canonical correlation analysis. {T}he linearly dimension reduction transformation thus obtained is called the canonical contextual correlation projection. {A}n additional drawback of {LDA} is that it cannot extract more features than the number of classes minus one. {I}n the two-class case this means that only a reduction to one dimension is possible. {O}ur contextual {LDA} approach can avoid such extreme deterioration of the classification space and retain more than one dimension. {T}he technique is exemplified on a pixel-based segmentation problem. {A}n illustrative experiment on a medical image segmentation task shows the performance improvements possible employing the canonical contextual correlation projection.}, + file = {Loog04e.pdf:pdf\\Loog04e.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {13168417199997445959}, + gscites = {20}, + ss_id = {07456424645481c6fcb32de1373978bbfc6e6e2c}, + all_ss_ids = {['07456424645481c6fcb32de1373978bbfc6e6e2c']}, +} + +@inproceedings{Loog04f, + author = {M. Loog and B. van Ginneken and M. Nielsen}, + title = {Detection of interstitial lung disease in {PA} chest radiographs}, + booktitle = MI, + year = {2004}, + volume = {5368}, + series = SPIE, + pages = {848-855}, + doi = {10.1117/12.535307}, + abstract = {{A} computer-aided diagnosis scheme for the detection of interstitial disease in standard digital posteroanterior ({PA}) chest radiographs is presented. {T}he detection technique is supervised-manually labelled data should be provided for training the algorithm-and fully automatic, and can be used as part of a computerized analysisscheme for {X}-ray lung images.{P}rior to the detection, a segmentation should be performed which delineates the lung field boundaries.{S}ubsequently, a quadratic decision rule is employed for every pixel within the lung fields to associate with each pixel a probabilistic measure indicating interstitial disease. {T}he locally obtained per-pixel probabilities are fused to a single global probability indicating to what extent there is interstitial disease present in the image. {F}inally, a threshold on this quantity classifies the image as containing interstitial disease or not.{T}he probability combination scheme presented utilizes the quantiles of the local posterior probabilities to fuse the local probability into a global one. {U}sing this nonparametric technique, reasonable results are obtained on the interstitial disease detection task. {T}he area under the receiver operating characteristic equals 0.92 for theoptimal setting.}, + file = {Loog04f.pdf:pdf\\Loog04f.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {14687688016231883918}, + gscites = {10}, + ss_id = {9d694a982ab413c311ead393c98a844394752044}, + all_ss_ids = {['9d694a982ab413c311ead393c98a844394752044']}, +} + +@article{Loog05, + author = {M. Loog and B. van Ginneken and R. P. W. Duin}, + title = {Dimensionality reduction of image features using the canonical contextual correlation projection}, + journal = PR, + year = {2005}, + volume = {38}, + pages = {2409--2418}, + doi = {10.1016/j.patcog.2005.04.011}, + abstract = {{A} linear, discriminative, supervised technique for reducing feature vectors extracted from image data to a lower-dimensional representation is proposed. {I}t is derived from classical linear discriminant analysis ({LDA}), extending this technique to cases where there is dependency between the output variables, i.e., the class labels, and not only between the input variables. ({T}he latter can readily be dealt with in standard {LDA}.) {T}he novel method is useful, for example, in supervised segmentation tasks in which high-dimensional feature vectors describe the local structure of the image. {T}he principal idea is that where standard {LDA} merely takes into account a single class label for every feature vector, the new technique incorporates class labels of its neighborhood in the analysis as well. {I}n this way, the spatial class label configuration in the vicinity of every feature vector is accounted for, resulting in a technique suitable for, e.g. image data. {T}his extended {LDA}, that takes spatial label context into account, is derived from a formulation of standard {LDA} in terms of canonical correlation analysis. {T}he novel technique is called the canonical contextual correlation projection ({CCCP}). {A}n additional drawback of {LDA} is that it cannot extract more features than the number of classes minus one. {I}n the two-class case this means that only a reduction to one dimension is possible. {O}ur contextual {LDA} approach can avoid such extreme deterioration of the classification space and retain more than one dimension. {T}he technique is exemplified on a pixel-based medical image segmentation problem in which it is shown that it may give significant improvement in segmentation accuracy.}, + file = {Loog05.pdf:pdf\\Loog05.pdf:PDF}, + gsid = {6999602497087293130}, + optnote = {DIAG, RADIOLOGY}, + month = {12}, + gscites = {23}, + ss_id = {644fcd2ee9894d41c6e886c8067645af514b925f}, + all_ss_ids = {['644fcd2ee9894d41c6e886c8067645af514b925f']}, +} + +@inproceedings{Loog06, + author = {M. Loog and B. van Ginneken}, + title = {Bony {S}tructure {S}uppression in {C}hest {R}adiographs}, + booktitle = {Computer Vision Approaches to Medical Image Analysis}, + year = {2006}, + volume = {4241}, + series = LNCS, + pages = {166--177}, + doi = {10.1007/11889762_15}, + abstract = {{M}any computer aided diagnosis schemes in chest radiography start with preprocessing steps that try to remove or suppress normal anatomical structures from the image. {E}xamples of normal structures in posteroanterior chest radiographs are bony structures. {R}emoving these kinds of structures can be done quite effectively if the right dual energy images?two radiographic images from the same patient taken with different energies?are available. {S}ubtracting these two radiographs gives a soft-tissue image with most of the rib and other bony structures removed. {I}n general, however, dual energy images are not readily available. {W}e propose a supervised learning technique for inferring a soft-tissue image from a standard radiograph without explicitly determining the additional dual energy image. {T}he procedure, called dual energy faking, is based on k-nearest neighbor regression, and incorporates knowledge obtained from a training set of dual energy radiographs with their corresponding subtraction images for the construction of a soft-tissue image from a previously unseen single standard chest image.}, + file = {Loog06.pdf:pdf\\Loog06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {5428199913642700502}, + gscites = {19}, + ss_id = {5260cd32e11481ba468d6c8a0464d66627e27f8e}, + all_ss_ids = {['5260cd32e11481ba468d6c8a0464d66627e27f8e']}, +} + +@article{Loog06a, + author = {M. Loog and B. van Ginneken}, + title = {Segmentation of the posterior ribs in chest radiographs using iterated contextual pixel classification}, + journal = TMI, + year = {2006}, + volume = {25}, + pages = {602--11}, + doi = {10.1109/TMI.2006.872747}, + abstract = {{T}he task of segmenting the posterior ribs within the lung fields of standard posteroanterior chest radiographs is considered. {T}o this end, an iterative, pixel-based, supervised, statistical classification method is used, which is called iterated contextual pixel classification ({ICPC}). {S}tarting from an initial rib segmentation obtained from pixel classification, {ICPC} updates it by reclassifying every pixel, based on the original features and, additionally, class label information of pixels in the neighborhood of the pixel to be reclassified. {T}he method is evaluated on 30 radiographs taken from the {JSRT} ({J}apanese {S}ociety of {R}adiological {T}echnology) database. {A}ll posterior ribs within the lung fields in these images have been traced manually by two observers. {T}he first observer's segmentations are set as the gold standard; {ICPC} is trained using these segmentations. {I}n a sixfold cross-validation experiment, {ICPC} achieves a classification accuracy of 0.86 +/- 0.06, as compared to 0.94 +/- 0.02 for the second human observer.}, + file = {Loog06a.pdf:pdf\\Loog06a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {16689264}, + month = {5}, + gsid = {8560942913013408020}, + gscites = {103}, + ss_id = {d5f1619d8d7b6a1f5377a19f4daba1dcef9413aa}, + all_ss_ids = {['d5f1619d8d7b6a1f5377a19f4daba1dcef9413aa']}, +} + +@article{Loog06b, + author = {M. Loog and B. van Ginneken and Schilham, A. M. R.}, + title = {Filter learning: application to suppression of bony structures from chest radiographs}, + journal = MIA, + year = {2006}, + volume = {10}, + pages = {826--840}, + doi = {10.1016/j.media.2006.06.002}, + abstract = {{A} novel framework for image filtering based on regression is presented. {R}egression is a supervised technique from pattern recognition theory in which a mapping from a number of input variables (features) to a continuous output variable is learned from a set of examples from which both input and output are known. {W}e apply regression on a pixel level. {A} new, substantially different, image is estimated from an input image by computing a number of filtered input images (feature images) and mapping these to the desired output for every pixel in the image. {T}he essential difference between conventional image filters and the proposed regression filter is that the latter filter is learned from training data. {T}he total scheme consists of preprocessing, feature computation, feature extraction by a novel dimensionality reduction scheme designed specifically for regression, regression by k-nearest neighbor averaging, and (optionally) iterative application of the algorithm. {T}he framework is applied to estimate the bone and soft-tissue components from standard frontal chest radiographs. {A}s training material, radiographs with known soft-tissue and bone components, obtained by dual energy imaging, are used. {T}he results show that good correlation with the true soft-tissue images can be obtained and that the scheme can be applied to images from a different source with good results. {W}e show that bone structures are effectively enhanced and suppressed and that in most soft-tissue images local contrast of ribs decreases more than contrast between pulmonary nodules and their surrounding, making them relatively more pronounced.}, + file = {Loog06b.pdf:pdf\\Loog06b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {16859953}, + month = {12}, + gsid = {7955130862773952899}, + gscites = {62}, + ss_id = {c238b1c9bdec805216b5c3ee9b650edaf35947d3}, + all_ss_ids = {['c238b1c9bdec805216b5c3ee9b650edaf35947d3']}, +} + +@inproceedings{Lope03, + author = {M. I. L\'{o}pez and C. I. S\'{a}nchez and R. Hornero}, + title = {Retinal image analysis to detect and quantify lesions associated with diabetic retinopathy}, + booktitle = ARVO, + year = {2003}, + optnote = {DIAG, RADIOLOGY}, + gsid = {13560728391275032738}, + gscites = {4}, +} + +@article{Louz14, + author = {Louzao Martinez, Laura and Friedlander, Elza and van der Laak, Jeroen A. W. M. and Hebeda, Konnie M.}, + title = {Abundance of IgG4+ Plasma Cells in Isolated Reactive Lymphadenopathy Is No Indication of IgG4-Related Disease}, + journal = AJCP, + year = {2014}, + volume = {142}, + issue = {4}, + pages = {459-466}, + doi = {10.1309/AJCPX6VF6BGZVJGE}, + abstract = {Objectives: IgG4-related disease is a recently recognized condition that can be associated with lymphadenopathy, with several histologic patterns and increased absolute number and ratio of immunoglobulin G4 (IgG4)-positive plasma cells. However, these findings are considered to be not exclusively specific for IgG4-related disease. Methods: The occurrence of the histologic patterns reported in patients with isolated lymphadenopathy was studied and correlated with the clinical presentation to determine their predictive value for IgG4-related lymphadenopathy. Results: We found cases meeting all histologic criteria for IgG4-related lymphadenopathy, without clinical signs of IgG4-related disease. The only pattern that was not seen in this series was an inflammatory pseudotumor-like picture. Conclusion: Without a clinical suspicion of IgG4-related disease, these morphologic patterns and high numbers of IgG4-positive plasma cells should be interpreted with care to avoid an erroneous diagnosis of IgG4-related disease.}, + file = {Louz14.pdf:pdf\\Louz14.pdf:PDF}, + optnote = {DIAG}, + month = {10}, + gsid = {13309261770194300071}, + gscites = {28}, + ss_id = {81460e5c3d1d916f328caad6b1775bd8ce7fa418}, + all_ss_ids = {['81460e5c3d1d916f328caad6b1775bd8ce7fa418']}, +} + +@article{Luit19, + author = {Luiten, Jacky D. and Korte, Bram and Voogd, Adri C. and Vreuls, Willem and Luiten, Ernest J.T. and Strobbe, Luc J. and Rutten, Matthieu J.C.M. and Plaisier, Menno L. and Lohle, Paul N. and Hooijen, Marianne J.H. and Tjan-Heijnen, Vivianne C.G. and Duijm, Lucien E.M.}, + title = {Trends in frequency and outcome of high-risk breast lesions at core needle biopsy in women recalled at biennial screening mammography, a multiinstitutional study}, + doi = {10.1002/ijc.32353}, + year = {2019}, + abstract = {Between January 1, 2011, and December 31, 2016, we studied the incidence, management and outcome of high-risk breast lesions in a consecutive series of 376,519 screens of women who received biennial screening mammography. During the 6-year period covered by the study, the proportion of women who underwent core needle biopsy (CNB) after recall remained fairly stable, ranging from 39.2% to 48.1% (mean: 44.2%, 5,212/11,783), whereas the proportion of high-risk lesions at CNB (i.e., flat epithelial atypia, atypical ductal hyperplasia, lobular carcinoma in situ and papillary lesions) gradually increased from 3.2% (25/775) in 2011 to 9.5% (86/901) in 2016 (p < 0.001). The mean proportion of high-risk lesions at CNB that were subsequently treated with diagnostic surgical excision was 51.4% (169/329) and varied between 41.0% and 64.3% through the years, but the excision rate for high-risk lesions per 1,000 screens and per 100 recalls increased from 0.25 (2011) to 0.70 (2016; p < 0.001) and from 0.81 (2011) to 2.50 (2016; p < 0.001), respectively. The proportion of all diagnostic surgical excisions showing in situ or invasive breast cancer was 29.0% (49/169) and varied from 22.2% (8/36) in 2014 to 38.5% (5/13) in 2011. In conclusion, the proportion of high-risk lesions at CNB tripled in a 6-year period, with a concomitant increased excision rate for these lesions. As the proportion of surgical excisions showing in situ or invasive breast cancer did not increase, a rising number of screened women underwent invasive surgical excision with benign outcome.}, + url = {http://dx.doi.org/10.1002/ijc.32353}, + file = {Luit19.pdf:pdf\Luit19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {International Journal of Cancer}, + citation-count = {10}, + automatic = {yes}, + pages = {2720-2727}, + volume = {145}, +} + +@mastersthesis{Lux20, + author = {Klaus-Michael Lux}, + title = {Neuromuscular disease screening with deep learning}, + abstract = {Ultrasound images have already been successfully leveraged in the early screening for neuromuscular disease (NMD). However, the current rule-based screening method requires the manual annotation of regions of interest (ROI) and can only be extended to new ultrasound machines after the costly collection of a reference sample. This work proposes to solve both these problems with end-to-end training of a deep-learning classi er on multiple images per patient. We demonstrate the suitability of the method for screening, with no need for ROI annotation. We also investigate various methods for domain adaptation. Simple adaptation methods are shown to already work well and could allow easy transfer of the screening method between different ultrasound devices.}, + file = {Lux20.pdf:pdf/Lux20.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University Nijmegen}, + year = {2020}, + journal = {Master thesis}, +} + +@conference{Maas13b, + author = {Marnix C. Maas and Mari\"et J. Koopman and Geert J.S. Litjens and Alan J. Wright and Kirsten M. Selnas and Ingrid S. Gribbestad and Masoom A. Haider and Katarzyna J. Macura and Daniel J.A. Margolis and Berthold Kiefer and Jurgen J. F\"utterer and Tom W.J. Scheenen}, + title = {Prostate {C}ancer localization with a {M}ultiparametric {MR} Approach ({PCaMAP}): initial results of a multi-center study}, + booktitle = ISMRM, + year = {2013}, + file = {Maas13b.pdf:pdf\\Maas13b.pdf:PDF}, + optnote = {BioMR, DIAG, RADIOLOGY}, + gsid = {15616006571239793730}, + gscites = {2}, +} + +@article{Maas19, + author = {Maas, Marnix C and Litjens, Geert J S and Wright, Alan J and Attenberger, Ulrike I and Haider, Masoom A and Helbich, Thomas H and Kiefer, Berthold and Macura, Katarzyna J and Margolis, Daniel J A and Padhani, Anwar R and Selnaes, Kirsten M and Villeirs, Geert M and Futterer, Jurgen J and Scheenen, Tom W J}, + title = {A Single-Arm, Multicenter Validation Study of Prostate Cancer Localization and Aggressiveness With a Quantitative Multiparametric Magnetic Resonance Imaging Approach}, + journal = IR, + year = {2019}, + doi = {10.1097/RLI.0000000000000558}, + abstract = {The aims of this study were to assess the discriminative performance of quantitative multiparametric magnetic resonance imaging (mpMRI) between prostate cancer and noncancer tissues and between tumor grade groups (GGs) in a multicenter, single-vendor study, and to investigate to what extent site-specific differences affect variations in mpMRI parameters. Fifty patients with biopsy-proven prostate cancer from 5 institutions underwent a standardized preoperative mpMRI protocol. Based on the evaluation of whole-mount histopathology sections, regions of interest were placed on axial T2-weighed MRI scans in cancer and noncancer peripheral zone (PZ) and transition zone (TZ) tissue. Regions of interest were transferred to functional parameter maps, and quantitative parameters were extracted. Across-center variations in noncancer tissues, differences between tissues, and the relation to cancer grade groups were assessed using linear mixed-effects models and receiver operating characteristic analyses. Variations in quantitative parameters were low across institutes (mean [maximum] proportion of total variance in PZ and TZ, 4% [14%] and 8% [46%], respectively). Cancer and noncancer tissues were best separated using the diffusion-weighted imaging-derived apparent diffusion coefficient, both in PZ and TZ (mean [95% confidence interval] areas under the receiver operating characteristic curve [AUCs]; 0.93 [0.89-0.96] and 0.86 [0.75-0.94]), followed by MR spectroscopic imaging and dynamic contrast-enhanced-derived parameters. Parameters from all imaging methods correlated significantly with tumor grade group in PZ tumors. In discriminating GG1 PZ tumors from higher GGs, the highest AUC was obtained with apparent diffusion coefficient (0.74 [0.57-0.90], P < 0.001). The best separation of GG1-2 from GG3-5 PZ tumors was with a logistic regression model of a combination of functional parameters (mean AUC, 0.89 [0.78-0.98]). Standardized data acquisition and postprocessing protocols in prostate mpMRI at 3 T produce equivalent quantitative results across patients from multiple institutions and achieve similar discrimination between cancer and noncancer tissues and cancer grade groups as in previously reported single-center studies.This is an open-access article distributed under the terms of the Creative Commons Attribution-Non Commercial-No Derivatives License 4.0 (CCBY-NC-ND), where it is permissible to download and share the work provided it is properly cited. The work cannot be changed in any way or used commercially without permission from the journal.}, + file = {Maas19.pdf:pdf\\Maas19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30946180}, + month = {7}, + gsid = {14299452348209473588}, + gscites = {23}, + ss_id = {e8b28dbb1bbb2944e3ec69a7cb9a9c33deeead88}, + all_ss_ids = {['e8b28dbb1bbb2944e3ec69a7cb9a9c33deeead88']}, +} + +@inproceedings{MacL08, + author = {MacLeod, R.S. and Kholmovski, E. and DiBella, E.V.R. and Oakes, R.S. and Blauer, J.E. and Fish, E. and Vijayakumar, S. and Daccarett, M. and Segerson, N.M. and Marrouche, N.F}, + title = {Integration of {MRI} in evaluation and ablation of atrial fibrillation}, + booktitle = CC, + year = {2008}, + pages = {77--80}, + doi = {10.1109/CIC.2008.4748981}, + abstract = {Magnetic resonance imaging (MRI) based approaches are supporting rapid advances in all phases of the management of atrial fibrillation (AF) patients, especially with the use of contrast agents and novel MRI acquisition techniques. In this report, we summarize briefly some recent advances in our use of MRI for AF management with special focus on the impact of these findings on the modeling and simulation of AF. We summarize results from two clinical studies, one of patients before radio frequency ablation of atrial fibrillation and one after ablation. In pre-ablation patients, significant extent of enhancements in delayed enhancement MRI of the left atrium is predictive of worsened outcome from ablation. The presumed mechanism is the presence of fibrosis in the posterior wall of the left atrium and supports the known finding that patients in chronic atrial fibrillation develop elevated levels of fibrosis. The implications of this finding on modeling of atrial electrical activity are that any such models must include both structural and functional fibrosis if they are to reflect realistic conditions.}, + optnote = {DIAG, RADIOLOGY}, + month = {9}, +} + +@book{Mada11, + author = {A. Madabhushi AND J. Dowling AND H. Huisman AND D. Barratt}, + title = {Prostate Cancer Imaging. Image Analysis and Image-Guided Interventions}, + year = {2011}, + volume = {6963}, + series = LNCS, + publisher = {Springer}, + doi = {10.1007/978-3-642-23944-1}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Madu09, + author = {Pragnya Maduskar and Mausumi Acharyya}, + title = {Automatic identification of intracranial hemorrhage in non-contrast {CT} with large slice thickness for trauma cases}, + booktitle = MI, + year = {2009}, + volume = {7260}, + series = SPIE, + pages = {726011.1-726011.8}, + doi = {10.1117/12.812276}, + abstract = {In this paper we propose a technique for automatic detection of intracranial hemorrhage (ICH) and acute intracranial hemorrhage (AIH) in brain Computed Tomography (CT) for trauma cases where no contrast can be applied and the CT has large slice thickness. ICH or AIH comprise of internal bleeding (intra-axial) or external (extra-axial) to the brain substance. Large bleeds like in intra-axial region are easy to diagnose whereas it can be challenging if small bleed occurs in extra-axial region particularly in the absence of contrast. Bleed region needs to be distinguished from bleed-look-alike brain regions which are abnormally bright falx and fresh flowing blood. We propose an algorithm for detection of brain bleed in various anatomical locations. A preprocessing step is performed to segment intracranial contents and enhancement of region of interests(ROIs). A number of bleed and bleed- look- alike candidates are identified from a set of 11 available cases. For each candidate texture based features are extracted from non-separable quincunx wavelet transform along with some other descriptive features. The candidates are randomly divided into a training and test set consisting of both bleed and bleed-look-alike. A supervised classifier is designed based on the training sample features. A performance accuracy of 96% is attained for the independent test candidates.}, + file = {Madu09.pdf:pdf\\Madu09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, +} + +@inproceedings{Madu11, + author = {Pragnya Maduskar AND Pandu R. Devarakota AND Siddharth Vikal}, + title = {Automatic detection of lung vessel bifurcation in thoracic {CT} Images}, + booktitle = MI, + year = {2011}, + volume = {7963}, + series = SPIE, + pages = {796334}, + doi = {10.1117/12.878395}, + abstract = {Computer-aided diagnosis (CAD) systems for detection of lung nodules have been an active topic of research for last few years. It is desirable that a CAD system should generate very low false positives (FPs) while maintaining high sensitivity. This work aims to reduce the number of false positives occurring at vessel bifurcation point. FPs occur quite frequently on vessel branching point due to its shape which can appear locally spherical due to the intrinsic geometry of intersecting tubular vessel structures combined with partial volume effects and soft tissue attenuation appearance surrounded by parenchyma.}, + file = {Madu11.pdf:pdf\\Madu11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, +} + +@inproceedings{Madu11a, + author = {Pragnya Maduskar and Laurens Hogeweg and Helen Ayles and Rodney Dawson and Pim A. de Jong and Nico Karssemeijer and Bram van Ginneken}, + title = {Cavity segmentation in chest radiographs}, + booktitle = {The Fourth International Workshop on Pulmonary Image Analysis}, + year = {2011}, + abstract = {Cavities are air-filled spaces within a pulmonary consolidation and can be indicative of various diseases like primary bronchogenic carcinoma, mycobacterium tuberculosis, cancer and infections. Segmentation of cavities is a challenging task in chest radiographs due to the presence of superimposed structures. It is important to accurately measure the extent of cavitation to measure temporal changes and response to therapy. In this paper, we propose a semi-automatic technique for cavity border segmentation based on dynamic programming. A pixel classifier is trained using cavity border pixels based on Gaussian, location and Hessian features to construct a cavity wall likelihood map. A polar transformation of this likelihood map around the seed point is then used as a cost function to find an optimal border using dynamic programming. We have validated our technique on 50 chest radiographs (2048 x 2048 resolution, pixel size 0.25 mm, Delft Imaging Systems, The Netherlands) containing in total 50 cavities. These cavities have been manually outlined by three human experts, one chest radiologist and two readers certified to read chest radiographs according to a tuberculosis scoring system. The automatic border segmentations are compared with manual segmentations provided by the readers using Jaccard overlapping measure. The agreement between the automatically determined outlines is comparable to the inter-observer agreement.}, + file = {Madu11a.pdf:pdf\\Madu11a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + all_ss_ids = {['1bae2990c101fa15e5f9b20919ab1d58da72e6f3']}, + gscites = {0}, +} + +@conference{Madu11b, + author = {P. Maduskar and L. Hogeweg and H. Ayles and R. Dawson and P. A. de Jong and B. van Ginneken}, + title = {Automatic Size Measurement of Cavities on Chest Radiographs Using Supervised Learning and Dynamic Programming}, + booktitle = RSNA, + year = {2011}, + abstract = {PURPOSE Accurate measurement of the size of cavities on chest radiographs (CXRs) is important for tuberculosis (TB) treatment monitoring and to make the decision to start TB treatment in the case of smear-negativeTB suspects. An automatic technique for cavity segmentation is presented and compared to inter-reader agreement of human experts. METHOD AND MATERIALS A data set of 105 digital CXRs (2048 A-A?A 1/2 2048 resolution, pixel size 0.25 mm, Delft Imaging Systems, The Netherlands) was collected at Kanyama Health Clinic, Lusaka, Zambia and University of Cape Town Lung Institute, Cape Town, South Africa. For training and system development, 20 CXRs with one cavity and 30 normal cases were used. For evaluation, 55 CXRs with one cavity were used. These cavities were manually outlined by three human experts, one chest radiologist and two readers certi??ed to read CXRs according to the Chest Radiograph Reading and Recording System. Cavities are automatically segmented after the user clicks near the cavity center. A pixel classi??er was trained to construct a cavity wall likelihood map. The classifier uses Gaussian, location and Hessian features and was trained with pixels within 1 mm of a cavity border as positive examples and random pixels from normal CXRs as negative examples. A polar transformation of the likelihood map around the center point was used as a cost function to search for an optimal border using dynamic programming. Cavity segmentations are compared using Jaccard overlap measure. Cases where the average overlap between pairs of manual expert segmentations is above/below 0.80 are defined as obvious/challenging cases. RESULTS The evaluation data contained 37 obvious and 18 challenging cases. The average overlap between manual expert segmentations was 0.87 A-A?A 1/2 0.02 and 0.67 A-A?A 1/2 0.05 for the obvious and challenging cases, respectively. For automatic versus manual segmentation the average overlap was 0.77 A-A?A 1/2 0.01 and 0.66 A-A?A 1/2 0.01, respectively. CONCLUSION Cavity segmentation is a challenging task with considerable disagreement between human expert readers. Our automated algorithm shows results comparable to experts and can be considered as an effectual and consistent method for cavity size measurements in CXR. CLINICAL RELEVANCE/APPLICATION Automatic cavity segmentation on chest radiographs can facilitate TB treatment follow-up and help to make a clinical decision regarding the start of TB treatment.}, + optnote = {DIAG, RADIOLOGY, TB}, +} + +@inproceedings{Madu13, + author = {P. Maduskar and L. Hogeweg and R. Philipsen and S. Schalekamp and B. van Ginneken}, + title = {Improved texture analysis for automatic detection of Tuberculosis ({TB}) on Chest Radiographs with Bone Suppression images}, + booktitle = MI, + year = {2013}, + volume = {8670}, + series = SPIE, + pages = {86700H}, + doi = {10.1117/12.2008083}, + abstract = {Computer aided detection (CAD) of tuberculosis (TB) on chest radiographs (CXR) is challenging due to overlapping structures. Suppression of normal structures can reduce overprojection effects and can enhance the appearance of diffuse parenchymal abnormalities. In this work, we compare two CAD systems to detect textural abnormalities in chest radiographs of TB suspects. One CAD system was trained and tested on the original CXR and the other CAD system was trained and tested on bone suppression images (BSI). BSI were created using a commercially available software (ClearRead 2.4, Riverain Medical). The CAD system is trained with 431 normal and 434 abnormal images with manually outlined abnormal regions. Subtlety rating (1-3) is assigned to each abnormal region, where 3 refers to obvious and 1 refers to subtle abnormalities. Performance is evaluated on normal and abnormal regions from an independent dataset of 900 images. These contain in total 454 normal and 1127 abnormal regions, which are divided into 3 subtlety categories containing 280, 527 and 320 abnormal regions respectively. For normal regions, original/BSI CAD has an average abnormality score of 0.094A,A+-0.027/0.085A,A+-0.032 (p < 0.001). For abnormal regions, subtlety 1, 2, 3 categories have average abnormality scores for original/BSI of 0.155A,A+-0.073/0.156A,A+-0.089 (p = 0.73), 0.194A,A+-0.086/0.207A,A+-0.101 (p < 0.001), 0.225A,A+-0.119/0.247A,A+-0.117 (p < 0.001) respectively. CAD prototype is benefited by BSI in terms of increased accuracy of abnormality probabilistic maps. We therefore conclude that the use of bone suppression results in slightly but significantly improved automated detection of textural abnormalities in chest radiographs.}, + file = {Madu13.pdf:pdf\\Madu13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + number = {16}, + month = {3}, + gsid = {5079156226762569028}, + gscites = {25}, + ss_id = {7738d6a2cfdba31d771d4a8ab6649f2718ded3d5}, + all_ss_ids = {['7738d6a2cfdba31d771d4a8ab6649f2718ded3d5']}, +} + +@inproceedings{Madu13a, + author = {P. Maduskar and L. Hogeweg and R. Philipsen and B. van Ginneken}, + title = {Automated localization of costophrenic recesses and costophrenic angle measurement on frontal chest radiographs}, + booktitle = MI, + year = {2013}, + volume = {8670}, + series = SPIE, + pages = {867038}, + doi = {10.1117/12.2008239}, + abstract = {Computer aided detection (CAD) of tuberculosis (TB) on chest radiographs (CXR) is difficult because the disease has varied manifestations, like opacification, hilar elevation, and pleural effusions. We have developed a CAD research prototype for TB (CAD4TB v1.08, Diagnostic Image Analysis Group, Nijmegen, The Netherlands) which is trained to detect textural abnormalities inside unobscured lung fields. If the only abnormality visible on a CXR would be a blunt costophrenic angle, caused by pleural fluid in the costophrenic recess, this is likely to be missed by texture analysis in the lung fields. The goal of this work is therefore to detect the presence of blunt costophrenic (CP) angles caused by pleural effusion on chest radiographs. The CP angle is the angle formed by the hemidiaphragm and the chest wall. We define the intersection point of both as the CP angle point. We first detect the CP angle point automatically from a lung field segmentation by finding the foreground pixel of each lung with maximum y location. Patches are extracted around the CP angle point and boundary tracing is performed to detect 10 consecutive pixels along the hemidiaphragm and the chest wall and derive the CP angle from these. We evaluate the method on a data set of 250 normal CXRs, 200 CXRs with only one or two blunt CP angles and 200 CXRs with one or two blunt CP angles but also other abnormalities. For these three groups, the CP angle location and angle measurements were correct in 91%, 88%, and 92% of all the cases, respectively. The average CP angles for the three groups are indeed different with 71.6A,Adeg A,A+- 22.9, 87.5A,Adeg A,A+- 25.7, and 87.7A,Adeg A,A+- 25.3, respectively.}, + file = {Madu13a.pdf:pdf\\Madu13a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + number = {118}, + month = {3}, + gsid = {540514792832839267}, + gscites = {12}, + ss_id = {a016412367ed40d9c5b76383109dcc4f833ff12f}, + all_ss_ids = {['a016412367ed40d9c5b76383109dcc4f833ff12f']}, +} + +@conference{Madu13b, + author = {P. Maduskar and L. Hogeweg and H. Ayles and B. van Ginneken}, + title = {Performance evaluation of automatic chest radiograph reading for detection of tuberculosis {(TB)}: a comparative study with clinical officers and certified readers on {TB} suspects in sub-Saharan {Africa}}, + booktitle = ECR, + year = {2013}, + abstract = {Purpose: Digital chest radiography (CXR) is used in high burden countries for suspect screening, active case finding and in prevalence surveys for TB diagnosis. An observer study was conducted to compare performance of automatic software with that of clinical officers and certified expert readers. Methods and Materials: A dataset of 166 digital CXRs (2048AfaEUR?2048, 0.25 mm, Delft Imaging Systems, The Netherlands) was collected at Kanyama Clinic, Lusaka, Zambia. Sputum culture was used as reference. An observer study was conducted with four clinical officers who read x-rays in Kanyama Clinic, and with two readers certified to read CXRs according to CRRS standard (University of Cape Town, South Africa). A software system for detection of TB (CAD4TB-1.08, Diagnostic Image Analysis Group, The Netherlands) analyzed all the cases. Human readers and software scored all the images between 0-100. We report area under the Receiver Operating Characteristics curve(Az) with 95% confidence intervals and pairwise comparisons from bootstrap estimates. p<0.05 was considered significant. Results: The dataset contained 99 positive and 67 negative cases. Az for the software was 0.72(0.63-0.80). Az values of CRRS certified readers were 0.71(0.63-0.79) and 0.72(0.64-0.78). Mean Az for clinical officers was 0.70(0.65-0.76). There was no significant difference between any reader and the software, except for one clinical officer who performed significantly worse than automatic reading. Conclusion: Automatic computer reading has similar performance as clinical officers and certified readers. The software has potential of being used as a point-of-care decision tool, to diagnose TB or select subjects that should undergo further tests.}, + optnote = {DIAG, RADIOLOGY, TB}, +} + +@article{Madu13c, + author = {Pragnya Maduskar and Monde Muyoyeta and Helen Ayles and Laurens Hogeweg and Liesbeth Peters-Bax and Bram van Ginneken}, + title = {Detection of tuberculosis with digital chest radiography: automatic reading versus interpretation by clinical officers}, + journal = IJTLD, + year = {2013}, + volume = {17}, + pages = {1613-1620}, + doi = {10.5588/ijtld.13.0325}, + abstract = {SETTING: A busy urban health centre in Lusaka, Zambia. OBJECTIVE: To compare the accuracy of automated reading (CAD4TB) with the interpretation of digital chest radiograph (CXR) by clinical officers for the detection of tuberculosis (TB). DESIGN: A retrospective analysis was performed on 161 subjects enrolled in a TB specimen bank study. CXRs were analysed using CAD4TB, which computed an image abnormality score (0-100). Four clinical officers scored the CXRs for abnormalities consistent with TB. We compared the automated readings and the readings by clinical officers against the bacteriological and radiological results used as reference. We report here the area under the receiver operating characteristic curve (AUC) and kappa () statistics. RESULTS: Of 161 enrolled subjects, 97 had bacteriologically confirmed TB and 120 had abnormal CXR. The AUCs for CAD4TB and the clinical officers were respectively 0.73 and 0.65-0.75 in comparison with the bacteriological reference, and 0.91 and 0.89-0.94 in comparison with the radiological reference. P values indicated no significant differences, except for one clinical officer who performed significantly worse than CAD4TB (P < 0.05) using the bacteriological reference. values for CAD4TB and clinical officers with radiological reference were respectively 0.61 and 0.49-0.67. CONCLUSION: CXR assessment using CAD4TB and by clinical officers is comparable. CAD4TB has potential as a point-of-care test and for the automated identification of subjects who require further examinations.}, + file = {Madu13c.pdf:pdf\\Madu13c.pdf:PDF}, + optnote = {DIAG}, + number = {12}, + pmid = {24200278}, + month = {12}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/125314}, + ss_id = {a53cbf0c57a5e06312397f04c0629c31731e26da}, + all_ss_ids = {['a53cbf0c57a5e06312397f04c0629c31731e26da']}, + gscites = {65}, +} + +@article{Madu14, + author = {Maduskar, Pragnya and Hogeweg, Laurens and de Jong, Pim A. and Peters-Bax, Liesbeth and Dawson, Rodney and Ayles, Helen and S\'{a}nchez, Clara I. and van Ginneken, Bram}, + title = {Cavity contour segmentation in chest radiographs using supervised learning and dynamic programming}, + journal = MP, + year = {2014}, + volume = {41}, + pages = {071912-1 - 071912-15}, + doi = {10.1118/1.4881096}, + abstract = {Efficacy of tuberculosis (TB) treatment is often monitored using chest radiography. Monitoring size of cavities in pulmonary tuberculosis is important as the size predicts severity of the disease and its persistence under therapy predicts relapse. The authors present a method for automatic cavity segmentation in chest radiographs.A two stage method is proposed to segment the cavity borders, given a user defined seed point close to the center of the cavity. First, a supervised learning approach is employed to train a pixel classifier using texture and radial features to identify the border pixels of the cavity. A likelihood value of belonging to the cavity border is assigned to each pixel by the classifier. The authors experimented with four different classifiers:k-nearest neighbor (kNN), linear discriminant analysis (LDA), GentleBoost (GB), and random forest (RF). Next, the constructed likelihood map was used as an input cost image in the polar transformed image space for dynamic programming to trace the optimal maximum cost path. This constructed path corresponds to the segmented cavity contour in image space.The method was evaluated on 100 chest radiographs (CXRs) containing 126 cavities. The reference segmentation was manually delineated by an experienced chest radiologist. An independent observer (a chest radiologist) also delineated all cavities to estimate interobserver variability. Jaccard overlap measure AZA(c) was computed between the reference segmentation and the automatic segmentation; and between the reference segmentation and the independent observer's segmentation for all cavities. A median overlap AZA(c) of 0.81 (0.76 A,A+- 0.16), and 0.85 (0.82 A,A+- 0.11) was achieved between the reference segmentation and the automatic segmentation, and between the segmentations by the two radiologists, respectively. The best reported mean contour distance and Hausdorff distance between the reference and the automatic segmentation were, respectively, 2.48 A,A+- 2.19 and 8.32 A,A+- 5.66 mm, whereas these distances were 1.66 A,A+- 1.29 and 5.75 A,A+- 4.88 mm between the segmentations by the reference reader and the independent observer, respectively. The automatic segmentations were also visually assessed by two trained CXR readers as "excellent," "adequate," or "insufficient." The readers had good agreement in assessing the cavity outlines and 84\% of the segmentations were rated as "excellent" or "adequate" by both readers.The proposed cavity segmentation technique produced results with a good degree of overlap with manual expert segmentations. The evaluation measures demonstrated that the results approached the results of the experienced chest radiologists, in terms of overlap measure and contour distance measures. Automatic cavity segmentation can be employed in TB clinics for treatment monitoring, especially in resource limited settings where radiologists are not available.}, + file = {Madu14.pdf:pdf\\Madu14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {7}, + pmid = {24989390}, + month = {6}, + gsid = {5011186305563263998}, + gscites = {7}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/137611}, + ss_id = {d1db95a442a2c17c4e60e4fee01372fa400ff636}, + all_ss_ids = {['d1db95a442a2c17c4e60e4fee01372fa400ff636']}, +} + +@conference{Madu15, + author = {P. Maduskar and I. Adetifa and J. van den Hombergh and E. Leroy-Terquem and A. Fasan-Odunsi and Clara I. S\'{a}nchez and U d'Alessandro and B. van Ginneken}, + title = {Computerized Reading of Chest Radiographs in The Gambia National Tuberculosis Prevalence Survey: Retrospective Comparison with Human Experts}, + booktitle = {Union World Conference on Lung Health}, + year = {2015}, + abstract = {Rationale: Tuberculosis (TB) prevalence surveys require recruitment of trained personnel for reading chest radiographs (CXRs), which is scarce in high TB burden countries. Computerized CXR reading could replace human readers in such a scenario and we retrospectively investigate this possibility on data from the 2011-2013 TB prevalence survey in The Gambia. Methods: Computerized readings were compared with field and central readings on 4,552 CXRs. The survey participants were screened based on symptoms and CXR findings. Field readers judged CXRs on radiological findings as: Normal, Abnormal, suggestive of TB? or Other abnormalities; the latter two were considered abnormal. In case of symptoms and/or abnormal CXR, sputum samples were collected and bacteriological tests (fluorescence microscopy and BACTEC MGIT culture) were performed for confirmatory TB diagnosis. Following the analysis, 73 subjects with proven TB were considered Abnormal and the remainder as Normal. The CXRs were centrally audited by an expert reader and one of the following categories was assigned to each CXR: Normal, Active TB, Abnormal, healed TB, or Other pathology. The software (CAD4TB, Radboud University Medical Center, Nijmegen, The Netherlands) computed a TB score between 0-100 (0-Normal, 100-Abnormal) for each CXR. The area under receiver operating characteristic curve (AUC) was calculated for CAD4TB and various cut-off points were chosen on the TB score to compare specificities at the sensitivities of the field and central readings. Results: The field reading achieved a sensitivity of 86.3% at 72.8% specificity. The central reading had a sensitivity of 67.1% at 93.3% specificity, and when the healed TB category was considered abnormal, sensitivity increased to 87.7% with a decrease in specificity to 65.2%. CAD4TB attained an AUC of 0.90 and at all three sensitivity levels of human readers, specificity was nearly identical and not significantly different (p-value>0.05): at field sensitivity, CAD4TB had a specificity of 72.2%, and for central sensitivity levels, specificities of 93.7% and 66.0% (healed TB as abnormal) were obtained. Conclusions: When selecting subjects in a prevalence survey on the basis of chest radiography to undergo confirmatory testing for TB, the performance of computerized reading is not significantly different from field and central readings by human experts. The software has potential to improve the efficiency of TB prevalence surveys.}, + optnote = {DIAG}, +} + +@phdthesis{Madu15a, + author = {Pragnya Maduskar}, + title = {Automated analysis of tuberculosis in chest radiographs}, + year = {2015}, + url = {http://hdl.handle.net/2066/144035}, + abstract = {This thesis describes automatic methods to detect various manifestations of TB and validates the software for automated analysis on TB populations from two countries. The work presented in this thesis is part of a larger project - Computer-Aided Detection for Tuberculosis (CAD4TB).}, + copromotor = {C. I. S\'{a}nchez}, + file = {Madu15a.pdf:pdf/Madu15a.pdf:PDF}, + optnote = {DIAG}, + promotor = {B. van Ginneken}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Madu16, + author = {Maduskar, Pragnya and Philipsen, Rick H M M. and Melendez, Jaime and Scholten, Ernst and Chanda, Duncan and Ayles, Helen and S\'{a}nchez, Clara I. and {van Ginneken}, Bram}, + title = {Automatic detection of pleural effusion in chest radiographs}, + journal = MIA, + year = {2016}, + volume = {28}, + pages = {22--32}, + doi = {10.1016/j.media.2015.09.004}, + abstract = {Automated detection of Tuberculosis (TB) using chest radiographs (CXRs) is gaining popularity due to the lack of trained human readers in resource limited countries with a high TB burden. The majority of the computer-aided detection (CAD) systems for TB focus on detection of parenchymal abnormalities and ignore other important manifestations such as pleural effusion (PE). The costophrenic angle is a commonly used measure for detecting PE, but has limitations. In this work, an automatic method to detect PE in the left and right hemithoraces is proposed and evaluated on a database of 638 CXRs. We introduce a robust way to localize the costophrenic region using the chest wall contour as a landmark structure, in addition to the lung segmentation. Region descriptors are proposed based on intensity and morphology information in the region around the costophrenic recess. Random forest classifiers are trained to classify left and right hemithoraces. Performance of the PE detection system is evaluated in terms of recess localization accuracy and area under the receiver operating characteristic curve (AUC). The proposed method shows significant improvement in the AUC values as compared to systems which use lung segmentation and the costophrenic angle measurement alone.}, + file = {Madu16.pdf:pdf\\Madu16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26688067}, + month = {2}, + gsid = {8541446038177947163}, + gscites = {39}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/168319}, + ss_id = {7e2f4b83c10f388e8ceab04bf559f30f299f4111}, + all_ss_ids = {['7e2f4b83c10f388e8ceab04bf559f30f299f4111']}, +} + +@inproceedings{Mahm14, + author = {Qaiser Mahmood and Artur Chodorowski and Babak {Ehteshami Bejnordi} and Mikael Persson}, + title = {A fully automatic unsupervised segmentation framework for the brain tissues in {MR} images}, + booktitle = MI, + year = {2014}, + series = SPIE, + doi = {10.1117/12.2043646}, + abstract = {This paper presents a novel fully automatic unsupervised framework for the segmentation of brain tissues in magnetic resonance (MR) images. The framework is a combination of our proposed Bayesian-based adaptive mean shift (BAMS), a priori spatial tissue probability maps and fuzzy c-means. BAMS is applied to cluster the tissues in the joint spatialintensity feature space and then a fuzzy c-means algorithm is employed with initialization by a priori spatial tissue probability maps to assign the clusters into three tissue types; white matter (WM), gray matter (GM) and cerebrospinal fluid (CSF). The proposed framework is validated on multimodal synthetic as well as on real T1-weighted MR data with varying noise characteristics and spatial intensity inhomogeneity. The performance of the proposed framework is evaluated relative to our previous method BAMS and other existing adaptive mean shift framework. Both of these are based on the mode pruning and voxel weighted k-means algorithm for classifying the clusters into WM, GM and CSF tissue. The experimental results demonstrate the robustness of the proposed framework to noise and spatial intensity inhomogeneity, and that it exhibits a higher degree of segmentation accuracy in segmenting both synthetic and real MR data compared to competing methods.}, + file = {Mahm14.pdf:pdf\\Mahm14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, +} + +@article{Maho20, + author = {Mahomed, Nasreen and van Ginneken, Bram and Philipsen, Rick H H M and Melendez, Jaime and Moore, David P and Moodley, Halvani and Sewchuran, Tanusha and Mathew, Denny and Madhi, Shabir A}, + title = {Computer-aided diagnosis for World Health Organization-defined chest radiograph primary-endpoint pneumonia in children}, + journal = PEDRAD, + year = {2020}, + volume = {50}, + issue = {4}, + month = {1}, + pages = {482-491}, + doi = {10.1007/s00247-019-04593-0}, + url = {https://link.springer.com/article/10.1007%2Fs00247-019-04593-0#citeas}, + abstract = {The chest radiograph is the most common imaging modality to assess childhood pneumonia. It has been used in epidemiological and vaccine efficacy/effectiveness studies on childhood pneumonia. To develop computer-aided diagnosis (CAD4Kids) for chest radiography in children and to evaluate its accuracy in identifying World Health Organization (WHO)-defined chest radiograph primary-endpoint pneumonia compared to a consensus interpretation. Chest radiographs were independently evaluated by three radiologists based on WHO criteria. Automatic lung field segmentation was followed by manual inspection and correction, training, feature extraction and classification. Radiographs were filtered with Gaussian derivatives on multiple scales, extracting texture features to classify each pixel in the lung region. To obtain an image score, the 95 percentile score of the pixels was used. Training and testing were done in 10-fold cross validation. The radiologist majority consensus reading of 858 interpretable chest radiographs included 333 (39%) categorised as primary-endpoint pneumonia, 208 (24%) as other infiltrate only and 317 (37%) as no primary-endpoint pneumonia or other infiltrate. Compared to the reference radiologist consensus reading, CAD4Kids had an area under the receiver operator characteristic (ROC) curve of 0.850 (95% confidence interval [CI] 0.823-0.876), with a sensitivity of 76% and specificity of 80% for identifying primary-endpoint pneumonia on chest radiograph. Furthermore, the ROC curve was 0.810 (95% CI 0.772-0.846) for CAD4Kids identifying primary-endpoint pneumonia compared to other infiltrate only. Further development of the CAD4Kids software and validation in multicentre studies are important for future research on computer-aided diagnosis and artificial intelligence in paediatric radiology.}, + file = {Maho20.pdf:pdf\\Maho20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31930429}, + gsid = {17440625968335195014}, + gscites = {40}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/217776}, + ss_id = {64a8a40675ff707a6a3de3af238dc03e30c7186e}, + all_ss_ids = {['64a8a40675ff707a6a3de3af238dc03e30c7186e']}, +} + +@article{Maie18a, + author = {Maier-Hein, Lena and Eisenmann, Matthias and Reinke, Annika and Onogur, Sinan and Stankovic, Marko and Scholz, Patrick and Arbel, Tal and Bogunovic, Hrvoje and Bradley, Andrew P and Carass, Aaron and Feldmann, Carolin and Frangi, Alejandro F and Full, Peter M and van Ginneken, Bram and Hanbury, Allan and Honauer, Katrin and Kozubek, Michal and Landman, Bennett A and Marz, Keno and Maier, Oskar and Maier-Hein, Klaus and Menze, Bjoern H and Muller, Henning and Neher, Peter F and Niessen, Wiro and Rajpoot, Nasir and Sharp, Gregory C and Sirinukunwattana, Korsuk and Speidel, Stefanie and Stock, Christian and Stoyanov, Danail and Taha, Abdel Aziz and van der Sommen, Fons and Wang, Ching-Wei and Weber, Marc-Andre and Zheng, Guoyan and Jannin, Pierre and Kopp-Schneider, Annette}, + title = {Why rankings of biomedical image analysis competitions should be interpreted with care}, + journal = NATCOM, + year = {2018}, + volume = {9}, + issue = {1}, + month = {12}, + pages = {5217}, + doi = {10.1038/s41467-018-07619-7}, + abstract = {International challenges have become the standard for validation of biomedical image analysis methods. Given their scientific impact, it is surprising that a critical analysis of common practices related to the organization of challenges has not yet been performed. In this paper, we present a comprehensive analysis of biomedical image analysis challenges conducted up to now. We demonstrate the importance of challenges and show that the lack of quality control has critical consequences. First, reproducibility and interpretation of the results is often hampered as only a fraction of relevant information is typically provided. Second, the rank of an algorithm is generally not robust to a number of variables such as the test data used for validation, the ranking scheme applied and the observers that make the reference annotations. To overcome these problems, we recommend best practice guidelines and define open research questions to be addressed in the future.}, + file = {:pdf/Maie18a.pdf:PDF}, + optnote = {DIAG}, + pmid = {30523263}, + gsid = {3078574965052198238,9198386791607085917}, + gscites = {242}, + all_ss_ids = {['91d23b702b9a59bf75c5162c3250017b526c0e69', 'bed661aebb0ede3c407e044637ce60a6d7ccfe5f']}, +} + +@article{Maie19, + author = {Maier-Hein, Lena and Reinke, Annika and Kozubek, Michal and L. Martel, Anne and Arbel, Tal and Eisenmann, Matthias and Hanbuary, Allan and Jannin, Pierre and Muller, Henning and Onogur, Sinan and Saez-Rodriguez, Julio and van Ginneken, Bram and Kopp-Schneider, Annette and Landman, Bennett}, + title = {BIAS: Transparent reporting of biomedical image analysis challenges}, + journal = {arXiv:1910.04071}, + year = {2019}, + url = {https://arxiv.org/abs/1910.04071}, + abstract = {The number of biomedical image analysis challenges organized per year is steadily increasing. These international competitions have the purpose of benchmarking algorithms on common data sets, typically to identify the best method for a given problem. Recent research, however, revealed that common practice related to challenge reporting does not allow for adequate interpretation and reproducibility of results. To address the discrepancy between the impact of challenges and the quality (control), the Biomedical I mage Analysis ChallengeS (BIAS) initiative developed a set of recommendations for the reporting of challenges. The BIAS statement aims to improve the transparency of the reporting of a biomedical image analysis challenge regardless of field of application, image modality or task category assessed. This article describes how the BIAS statement was developed and presents a checklist which authors of biomedical image analysis challenges are encouraged to include in their submission when giving a paper on a challenge into review. The purpose of the checklist is to standardize and facilitate the review process and raise interpretability and reproducibility of challenge results by making relevant information explicit.}, + file = {Maie19.pdf:pdf\\Maie19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['2968b96fae50ce329147b26f9c29d49439b74985']}, + gscites = {72}, +} + +@article{Maie20, + author = {Maier-Hein, Lena and Reinke, Annika and Kozubek, Michal and L. Martel, Anne and Arbel, Tal and Eisenmann, Matthias and Hanbuary, Allan and Jannin, Pierre and Muller, Henning and Onogur, Sinan and Saez-Rodriguez, Julio and van Ginneken, Bram and Kopp-Schneider, Annette and Landman, Bennett}, + title = {BIAS: Transparent reporting of biomedical image analysis challenges}, + journal = MIA, + year = {2020}, + doi = {10.1016/j.media.2020.101796}, + url = {https://arxiv.org/abs/1910.04071}, + abstract = {The number of biomedical image analysis challenges organized per year is steadily increasing. These international competitions have the purpose of benchmarking algorithms on common data sets, typically to identify the best method for a given problem. Recent research, however, revealed that common practice related to challenge reporting does not allow for adequate interpretation and reproducibility of results. To address the discrepancy between the impact of challenges and the quality (control), the Biomedical I mage Analysis ChallengeS (BIAS) initiative developed a set of recommendations for the reporting of challenges. The BIAS statement aims to improve the transparency of the reporting of a biomedical image analysis challenge regardless of field of application, image modality or task category assessed. This article describes how the BIAS statement was developed and presents a checklist which authors of biomedical image analysis challenges are encouraged to include in their submission when giving a paper on a challenge into review. The purpose of the checklist is to standardize and facilitate the review process and raise interpretability and reproducibility of challenge results by making relevant information explicit.}, + file = {Maie20.pdf:pdf\\Maie20.pdf:PDF}, + optnote = {DIAG}, + volume = 66, + pages = {101796}, + pmid = {32911207}, + gsid = {17435494069051207769}, + gscites = {72}, + ss_id = {2968b96fae50ce329147b26f9c29d49439b74985}, + all_ss_ids = {['2968b96fae50ce329147b26f9c29d49439b74985']}, +} + +@inproceedings{Maka10, + author = {A. Makarau and H. Huisman and R. Mus and M. Zijp and N. Karssemeijer}, + title = {Breast {MRI} intensity non-uniformity correction using mean-shift}, + booktitle = MI, + year = {2010}, + volume = {7624}, + series = SPIE, + pages = {76242D}, + doi = {10.1117/12.845612}, + abstract = {{I}n breast {MRI}, intensity inhomogeneity due to coil profile hampers development of robust segmentation and automated processing methods. {T}he purpose of this paper is to evaluate the performance in breast {MRI} of a number of existing non-uniformity correction methods, mostly developed for brain imaging, and a novel correction method first presented here. {T}en breast {MRI} exams, which were manually segmented into background and five tissue classes, were used for performance assessment. {R}esults show that the relatively simple and fast bias field correction method presented in this paper outperforms the other methods in a number of aspects.}, + file = {Maka10.pdf:pdf/Maka10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {16713159183791215628}, + gscites = {6}, + ss_id = {bb1e8f7832cf85a1d49ee002dbb6e72d0c8251d6}, + all_ss_ids = {['bb1e8f7832cf85a1d49ee002dbb6e72d0c8251d6']}, +} + +@inproceedings{Mann00, + author = {R. Manniesing and I. Karkowski and H. Corporaal}, + title = {Automatic {SIMD} Parallelization of Embedded Applications Based on Pattern Recognition}, + booktitle = {Sixth International Euro-Par Conference on Parallel Processing}, + year = {2000}, + series = LNCS, + publisher = {Springer}, + pages = {349-356}, + doi = {10.1007/3-540-44520-X_46}, + abstract = {This paper investigates the potential for automatic mapping of typical embedded applications to architectures with multimedia instruction set extensions. For this purpose a (pattern matching based) code transformation engine is used, which involves a three-step process of matching, condition checking and replacing of the source code. Experiments with DSP and the MPEG2 encoder benchmarks, show that about 85% of the loops which are suitable for Single Instruction Multiple Data (SIMD) parallelization can be automatically recognized and mapped.}, + file = {Mann00.pdf:pdf\\Mann00.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {13189106045004110415}, + gscites = {23}, +} + +@article{Mann04, + author = {R. Manniesing AND R. Kleihorst AND A. van der Avoird AND E. Hendriks}, + title = {Power analysis of a general convolution algorithm mapped on a linear processor array}, + journal = {Journal of {VLSI} Signal Processing Systems}, + year = {2004}, + volume = {37}, + pages = {5-19}, + doi = {10.1023/B:VLSI.0000017000.91377.a7}, + url = {http://www.springerlink.com/content/v647p7654q106184/}, + abstract = {We explore the energy dissipation of the Linear Processor Array (LPA) as a function of the number of available resources (Processor Units P) within the array. This number P is an important parameter, as it reflects performance, relates parallel processing to energy dissipation, and influences the scaling of the various parts of the LPA architecture (memory, address generator, communication network). To make a comparison of the different design variants for a fixed datawidth possible, we propose a high-level energy dissipation model of the processor, which is based on a detailed analysis of a general convolution algorithm. It is shown that the energy dissipation of the LPA can roughly be described by the relationship Etotal ? N/P with N presenting the datawidth in pixels. This relationship is derived from two observations: first, the largest contribution to Etotal is formed by the energy dissipated by the memories, and second, in our model of the LPA, the datawidth of the memories corresponds with the number of pixels N to be processed, which results in an increase of the access rate when P decreases. Furthermore, we have shown that the energy dissipation caused by communication within the LPA, increases with increasing number of resources: the trade-off between communication versus computation in parallel computing. This turns out to be negligible in the total energy dissipation, and we therefore conclude, that the optimum solution is found, when a full number of resources is applied within the LPA.}, + file = {Mann04.pdf:pdf\\Mann04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {5}, + gsid = {10855059478762708879}, + gscites = {7}, +} + +@inproceedings{Mann04a, + author = {R. Manniesing AND W. J. Niessen}, + title = {Local Speed Functions in Level Set Based Vessel Segmentation}, + booktitle = MICCAI, + year = {2004}, + volume = {3216}, + series = LNCS, + pages = {475-482}, + doi = {10.1007/978-3-540-30135-6_58}, + abstract = {A new segmentation scheme is proposed for 3D vascular tree delineation in CTA data sets, which has two essential features. First, the segmentation is carried out locally in a small volume of interest (VOI), second, a global topology estimation is made to initialize a new VOI. The use of local VOI allows that parameter settings for the level set speed function can be optimally set depending on the local image content, which is advantageous especially in vascular tree segmentation where contrast may change significantly, especially in the distal part of the vascular. Moreover, a local approach is significantly faster. A comparison study on five CTA data sets showed that our method has the potential to segment larger part of the vessel tree compared to a similar global level set based segmentation, and in substantially less computation time.}, + file = {Mann04a.pdf:pdf\\Mann04a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {7566048489987331517}, + gscites = {41}, +} + +@inproceedings{Mann04b, + author = {R. Manniesing and B. K. Velthuis and M. van Leeuwen and W. J. Niessen}, + title = {Skeletonization for re-initialization in level set-based vascular tree segmentation}, + booktitle = MI, + year = {2004}, + volume = {5370}, + series = SPIE, + pages = {506-514}, + doi = {10.1117/12.533045}, + abstract = {An extension to level set based segmentation is proposed for vascular tree delineation. The method starts with topology extraction, by a shape constrained level set evolution steered by a strictly positive, image base speed function to ensure some oversegmentation. Next, the skeleton of the resulting oversegmentation is determined, which then is used to initialise another level set steered by a speed function with both negative and positive speed forces based on image features, to obtain a most accurate segmentation. The novelty of our approach lies in the shape constraint that is imposed implicitly on the first level set evolution. We apply repeatedly re-initializations of this evolution with a topology preserving skeleton of the current zero level set. We compare this method with a plain level set evolution steered by the same full range speed function. Both are initialised by placing a single seed point at the root of the vessel tree. Pilot experiments on twelve multislice CT data sets of the Circle of Willis show that our method is capable of segmenting the smaller branches at the distal part of the vessel tree structures and has the potential to segment vessels which are distal to a severe stenosis or occlusion.}, + file = {Mann04b.pdf:pdf\\Mann04b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {7644005351394539594}, + gscites = {6}, +} + +@inproceedings{Mann05a, + author = {R Manniesing and W.J. Niessen}, + title = {Multiscale vessel enhancing diffusion in {CT} angiography noise filtering}, + booktitle = IPMI, + year = {2005}, + volume = {3565}, + series = LNCS, + pages = {138--149}, + doi = {10.1007/11505730_12}, + url = {http://www.ncbi.nlm.nih.gov/pubmed/17354691}, + abstract = {Filtering of vessel structures in medical images by analyzing the second order information or the Hessian of the image, is a well known technique. In this work we incorporate Frangi's multiscale vessel filter, which is based on a geometrical analysis of the Hessian' eigenvectors, into a nonlinear, anisotropic diffusion scheme, such that diffusion mainly takes place along the vessel axis while diffusion perpendicular to this axis is inhibited. The multiscale character of the vesselness filter ensures an equally good response for varying vessel radii. The first, theoretical contribution of this paper is the modification of the original formulation of this vessel filter, such that it becomes a smooth function on its domain which is a necessary condition imposed by the diffusion process to ensure well-posedness. The second contribution concerns the application of noise filtering of 3D synthetic, phantom computed tomography (CT) and patient CT data. It is shown that the method is very effective in noise filtering, illustrating its potential as a preprocessing step in the analysis of low dose CT angiography.}, + file = {Mann05a.pdf:pdf\\Mann05a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {17354691}, + gsid = {3693686481660026501}, + gscites = {46}, +} + +@inproceedings{Mann06, + author = {R. Manniesing and W. J. Niessen}, + title = {Shape Constrained Vessel Centerline Extraction by Integrating Surface Evolution and Topology Analysis}, + booktitle = ISBI, + year = {2006}, + pages = {165-168}, + doi = {10.1109/ISBI.2006.1624878}, + abstract = {A novel approach for vessel axis tracking is presented based on surface evolution in 3D. The main idea is to guide the evolution by analyzing the topology of intermediate segmentation results, and in particular, to impose shape constraints on the topology. For example, the topology can be constrained to represent a bifurcation, which can be imposed by extracting three different connected paths with maximum length from the skeleton of intermediate segmentation results. The evolving surface is then re-initialized with the newly found topology. Re-initialization is a crucial step since it creates probing behavior of the evolving front and prevents the surface from leaking into the background. The method was evaluated in two CTA applications (i) extracting the internal carotid arteries including the region in which they traverse through the skull base, which is challenging due to the close proximity of bone structures and overlap in intensity values, and (ii) extracting the carotid bifurcation, some of them severely stenosed and most of them containing calcifications. Using only the image gradient as the image force in the surface evolution and a single seed point as initialization, the method was successful in 80% of ten internal carotids in five patients, and 80% of ten carotid bifurcations in five patients, respectively}, + file = {Mann06.pdf:pdf\\Mann06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {2365811462521934118}, + gscites = {4}, +} + +@article{Mann06a, + author = {R. Manniesing and B. K. Velthuis and M. S. van Leeuwen and I. C. van der Schaaf and P. J. van Laar and W. J. Niessen}, + title = {Level set based cerebral vasculature segmentation and diameter quantification in {CT} angiography}, + journal = MIA, + year = {2006}, + volume = {10}, + pages = {200--214}, + doi = {10.1016/j.media.2005.09.001}, + url = {http://www.sciencedirect.com/science?_ob=ArticleURL&_udi=B6W6Y-4HG6CBB-1&_user=499884&_coverDate=04%2F30%2F2006&_rdoc=1&_fmt=&_orig=search&_sort=d&view=c&_acct=C000024499&_version=1&_urlVersion=0&_userid=499884&md5=49d96b0e385788104d5c16c05d9ad4df}, + abstract = {A level set based method is presented for cerebral vascular tree segmentation from computed tomography angiography (CTA) data. The method starts with bone masking by registering a contrast enhanced scan with a low-dose mask scan in which the bone has been segmented. Then an estimate of the background and vessel intensity distributions is made based on the intensity histogram which is used to steer the level set to capture the vessel boundaries. The relevant parameters of the level set evolution are optimized using a training set. The method is validated by a diameter quantification study which is carried out on phantom data, representing ground truth, and 10 patient data sets. The results are compared to manually obtained measurements by two expert observers. In the phantom study, the method achieves similar accuracy as the observers, but is unbiased whereas the observers are biased, i.e., the results are 0.00+/-0.23 vs. -0.32+/-0.23 mm. Also, the method's reproducibility is slightly better than the inter-and intra-observer variability. In the patient study, the method is in agreement with the observers and also, the method's reproducibility -0.04+/-0.17 mm is similar to the inter-observer variability 0.06+/-0.17 mm. Since the method achieves comparable accuracy and reproducibility as the observers, and since the method achieves better performance than the observers with respect to ground truth, we conclude that the level set based vessel segmentation is a promising method for automated and accurate CTA diameter quantification}, + file = {Mann06a.pdf:pdf\\Mann06a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {16263325}, + month = {4}, + gsid = {15962511962349506772}, + gscites = {152}, +} + +@article{Mann06b, + author = {Rashindra Manniesing and Max A Viergever and Wiro J Niessen}, + title = {Vessel enhancing diffusion: a scale space representation of vessel structures}, + journal = MIA, + year = {2006}, + volume = {10}, + pages = {815--825}, + doi = {10.1016/j.media.2006.06.003}, + abstract = {A method is proposed to enhance vascular structures within the framework of scale space theory. We combine a smooth vessel filter which is based on a geometrical analysis of the Hessian's eigensystem, with a non-linear anisotropic diffusion scheme. The amount and orientation of diffusion depend on the local vessel likeliness. Vessel enhancing diffusion (VED) is applied to patient and phantom data and compared to linear, regularized Perona-Malik, edge and coherence enhancing diffusion. The method performs better than most of the existing techniques in visualizing vessels with varying radii and in enhancing vessel appearance. A diameter study on phantom data shows that VED least affects the accuracy of diameter measurements. It is shown that using VED as a preprocessing step improves level set based segmentation of the cerebral vasculature, in particular segmentation of the smaller vessels of the vasculature.}, + file = {Mann06b.pdf:pdf\\Mann06b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {16876462}, + month = {12}, + gsid = {15693491580876283283}, + gscites = {285}, +} + +@conference{Mann06c, + author = {R. Manniesing and W. J. Niessen}, + title = {Automated Detection of the Internal Carotid Arteries in Multislice Cerebral {CT} Angiography}, + booktitle = RSNA, + year = {2006}, + abstract = {PURPOSE: Automated Internal Carotid Artery (ICA) detection, as preprocessing step for fully automatic vascular segmentation, diameter quantification or aneurysms detection in cerebral Computed Tomography Angiography (CTA). METHOD AND MATERIAL: A new three step method to automatically detect seed points in the Internal Carotid Artery in CTA is developed and evaluated. First, the image entropy per slice is computed, from which a cross sectional 2D plane which contains the carotids can automatically be defined by selecting the plane with maximum information. Subsequently, an automatic segmentation of high intensity objects in this region is performed. Finally, a Hough transform is applied to detect two structures which have the most circular shape. The method has been evaluated on 30 randomly selected patients who were examined for acute cerebrovascular events or for screening purposes. The data, acquired on a 16-slice CT scanner (Philips Mx8000), consisted of approximately 300 slices (512x512) with an in plane resolution of 0.3125x0.3125 mm and slice spacing of 0.5 mm. Visual inspection was used to determine whether the carotid arteries were correctly detected. RESULTS: In 60 ICAs of the 30 patients, a success rate of 91.6% (55/60) was achieved. CONCLUSION: Automated seed point placement in the ICA is feasible, which facilitates automatic initialization of e.g. vessel segmentation and quantification. CLINICAL RELEVANCE/APPLICATION: Automated initialization enables complete automation of the processing of large CTA data sets, reduces operator time and is not subject to observer variability.}, + file = {Mann06c.pdf:pdf\\Mann06c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@phdthesis{Mann06e, + author = {Rashindra Manniesing}, + title = {Image Analysis in {CT} Angiography}, + year = {2006}, + abstract = {In this thesis we develop and validate novel image processing techniques for the analysis of vascular structures in medical images. First a new type of filter is proposed which is capable of enhancing vascular structures while suppressing noise in the remainder of the image. This filter is based on a controlled smoothing process and results in a so-called scale space representation of the vascular structures. This representation can be used as preprocessing step for other vascular image processing techniques, such as vessel axis tracking and vessel segmentation. Second, a new method is proposed to track the central vessel axis in vessels with known topology. The method utilizes surface evolution which is typically applied to object segmentation. In our approach surface evolution is extended by repeatedly estimating and constraining the current vessel axis. This way, vessel information is gathered while the vessel axis tracking method proceeds along the vessel. Finally, an automated method is proposed for segmentation of the complete intracranial arterial vasculature. The methods are applied to, and evaluated on 3D head and neck Computed Tomography Angiography (CTA) data sets.}, + file = {Mann06e.pdf:pdf\\Mann06e.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {W.J. Niessen and M.A. Viergever}, + school = {Utrecht University}, + gsid = {5021212018097989389}, + gscites = {1}, + journal = {PhD thesis}, +} + +@article{Mann07, + author = {Rashindra Manniesing and Max A Viergever and Wiro J Niessen}, + title = {Vessel axis tracking using topology constrained surface evolution}, + journal = TMI, + year = {2007}, + volume = {26}, + pages = {309--316}, + doi = {10.1109/TMI.2006.891503}, + url = {17354637}, + abstract = {An approach to three-dimensional vessel axis tracking based on surface evolution is presented. The main idea is to guide the evolution of the surface by analyzing its skeleton topology during evolution, and imposing shape constraints on the topology. For example, the intermediate topology can be processed such that it represents a single vessel segment, a bifurcation, or a more complex vascular topology. The evolving surface is then re-initialized with the newly found topology. Re-initialization is a crucial step since it creates probing behavior of the evolving front, encourages the segmentation process to extract the vascular structure of interest and reduces the risk on leaking of the curve into the background. The method was evaluated in two computed tomography angiography applications: (i) extracting the internal carotid arteries including the region in which they traverse through the skull base, which is challenging due to the proximity of bone structures and overlap in intensity values, and (ii) extracting the carotid bifurcations including many cases in which they are severely stenosed and contain calcifications. The vessel axis was found in 90% (18/20 internal carotids in ten patients) and 70% (14/20 carotid bifurcations in a different set of ten patients) of the cases.}, + file = {Mann07.pdf:pdf\\Mann07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {17354637}, + month = {3}, + gsid = {13078016742785481388}, + gscites = {101}, +} + +@inproceedings{Mann07a, + author = {R. Manniesing and W.J. Niessen}, + title = {Automatic Segmentation of the Internal Carotid Arteries through the Skull Base}, + booktitle = MI, + year = {2007}, + volume = {6512}, + series = SPIE, + pages = {65120I-1 --65120I-5}, + doi = {10.1117/12.705201}, + abstract = {An automatic method is presented to segment the internal carotid arteries through the difficult part of the skull base in CT angiography. The method uses the entropy per slice to select a cross sectional plane below the skull base. In this plane 2D circular structures are detected by the Hough transform. The center points are used to initialize a level set which evolves with a prior shape constraint on its topology. In contrast with some related vessel segmentation methods, our approach does not require the acquisition of an additional CT scan for bone masking. Experiments on twenty internal carotids in ten patients show that 19 seed points are correctly identified (95%) and 18 carotids (90%) are successfully segmented without any human interaction.}, + file = {Mann07a.pdf:pdf\\Mann07a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {8091387818955463937}, + gscites = {2}, +} + +@article{Mann08a, + author = {Rashindra Manniesing and Max A Viergever and Aad van der Lugt and Wiro J Niessen}, + title = {Cerebral arteries: fully automated segmentation from {CT} angiography--a feasibility study}, + journal = Radiology, + year = {2008}, + volume = {247}, + pages = {841--846}, + doi = {10.1148/radiol.2473070436}, + url = {http://radiology.rsnajnls.org/cgi/content/abstract/247/3/841}, + abstract = {The purpose of this study was to retrospectively assess the feasibility of a fully automated image postprocessing tool for the segmentation of the arterial cerebrovasculature from computed tomographic (CT) angiography in 27 patients (nine men, 18 women; mean age, 55 years; age range, 33?76 years) with subarachnoid hemorrhage. The institutional review board approved this study, and informed consent was waived. The proposed method, which does not require the acquisition of an additional CT scan for bone suppression, consists of the following: (a) automatic detection of the main arteries for initialization, (b) segmentation of these arteries through the skull base, and (c) suppression of the large veins near the skull. The parameters of this method were optimized on the training subset of nine patients, and the method was successful at segmentation of the arteries in 15 (83%) of the 18 remaining patients. The difference between automatic and manual diameter measurements was 0.0 mm ? 0.4 (standard deviation). The study results showed that fully automated segmentation of the cerebral arteries is feasible.}, + file = {Mann08a.pdf:pdf\\Mann08a.pdf:PDF;Mann08a.png:png\\Mann08a.png:PNG image}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {18487538}, + month = {6}, + gsid = {12767658535289311636}, + gscites = {18}, +} + +@misc{Mann09, + author = {R. Manniesing and M.A. Viergever and W.J. Niessen}, + title = {Vessel Enhancing Diffusion}, + year = {2009}, + url = {http://www.insight-journal.org/browse/publication/314}, + abstract = {Recently, an implementation of the Vessel Enhancing Diffusion (VED) algorithm [3] using the Insight Toolkit (ITK) framework [2] has been proposed by Enquobahrie et al.[1]. In this paper we present an alternative implementation, for two reasons. First, in this implementation all the main functionality of the algorithm, including eigensystem, vesselness, tensor calculation and PDE discretization using a forward Euler scheme are now grouped together in one single class. Although this may come at the cost of code-reusability, it improves readibility and enables application specific code optimization. The second reason is the criterion of reproducibility. Source code, test environment and example data of the paper [3] are provided.}, + file = {Mann09.pdf:pdf\\Mann09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {18252001084369446470}, + gscites = {3}, +} + +@misc{Mann09a, + author = {R. Manniesing}, + title = {Read and Write Support for MevisLab Dicom/Tiff Format}, + year = {2009}, + url = {http://www.insight-journal.org/browse/publication/311}, + abstract = {MevisLab [2] is a development environment for medical image processing and visualization, which supports the reading and writing of combined dicom/tiff images. In this document we provide the source code (ImageIO factory) and testing data for the Insight Toolkit (ITK) framework [4].}, + file = {Mann09a.pdf:pdf\\Mann09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@misc{Mann09b, + author = {R. Manniesing}, + title = {DicomImport}, + year = {2009}, + url = {http://www.insight-journal.org/browse/publication/700}, + abstract = {We describe a new class itk::DicomDirectoryToImage which, given a directory consisting of raw dicom files, converts all found series to volumetric data with consistent filenaming based on the dicom headers. In addition various internal checks on the dicom files are made and reported, and the user has the possibility to add simple criteria to match with the series description. The main advantage of the class is the ease of processing and converting very large dicom databases in a consistent way.}, + file = {Mann09b.pdf:pdf\\Mann09b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Mann09c, + author = {R. Manniesing and D. Vukadinovic and S. Rozie and M. Schaap and A. {van der Lugt} and W.J. Niessen}, + title = {Automated {CTA} Lumen Segmentation of the Atherosclerotic Carotid Artery Bifurcation}, + booktitle = RSNA, + year = {2009}, + abstract = {PURPOSE: To develop an image processing tool that requires minimal user interaction and automatically extracts the atherosclerotic bifurcation in CTA METHOD AND MATERIALS: We propose a two step approach: First, the central vessel axis is obtained based on path tracking between three user defined points. Second, starting from this path, the final segmentation is automatically obtained using a level set which is steered by a novel, slice feature-based speed function. We evaluate the method on a large data set of 234 carotid bifurcations of 129 ischemic stroke patients with atherosclerotic disease. The results are compared to manually obtained lumen segmentations. Parameter optimization is carried out on a subset of 30 representative carotid bifurcations. RESULTS: With the optimized parameter settings the method successfully tracked the central vessel paths in 201 of the remaining 204 bifurcations (99%) which were not part of the training set. Comparison with manually drawn segmentations shows an average overlap of 94% which is slightly less than the overlap between observers (96%). CONCLUSION: To our knowledge is this the first method which has been tested on such a large number of patient data. The results show that robust and accurate segmentation of the atherosclerotic bifurcation in CT angiography is feasible. CLINICAL RELEVANCE/APPLICATION: This work is highly relevant for clinical research/practice: lumen segmentation of the diseased bifurcation is the first step for stenosis grading, plaque characterization and hemodynamic modeling.}, + file = {Mann09c.pdf:pdf\\Mann09c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Mann10, + author = {R. Manniesing and M. Schaap and S. Rozie and R. Hameeteman and D. Vukadinovic and A. {van der Lugt} and W.J. Niessen}, + title = {Robust {CTA} lumen segmentation of the atherosclerotic carotid artery bifurcation in a large patient population}, + journal = MIA, + year = {2010}, + volume = {14}, + pages = {759--769}, + doi = {10.1016/j.media.2010.05.001}, + abstract = {We propose and validate a semi-automatic method for lumen segmentation of the carotid bifurcation in computed tomography angiography (CTA). First, the central vessel axis is obtained using path tracking between three user-defined points. Second, starting from this path, the segmentation is automatically obtained using a level set. The cost and speed functions for path tracking and segmentation make use of intensity and homogeneity slice-based image features. The method is validated on a large data set of 234 carotid bifurcations of 129 ischemic stroke patients with atherosclerotic disease. The results are compared to manually obtained lumen segmentations. Parameter optimization is carried out on a subset of 30 representative carotid bifurcations. With the optimized parameter settings the method successfully tracked the central vessel paths in 201 of the remaining 204 bifurcations (99\%) which were not part of the training set. Comparison with manually drawn segmentations shows that the average overlap between the method and observers is similar (for the inter-observer set the results were 92\% vs. 87\% and for the intra-observer set 94\% vs. 94\%). Therefore the method has potential to replace the manual procedure of lumen segmentation of the atherosclerotic bifurcation in CTA.}, + file = {:pdf\\Mann10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {20605737}, + month = {12}, + gsid = {9948311098295922507}, + gscites = {45}, + ss_id = {75cd956b09c2eda5131b334555c749a6c314e486}, + all_ss_ids = {['75cd956b09c2eda5131b334555c749a6c314e486']}, +} + +@conference{Mann10a, + author = {R. Manniesing and M. Koek and D. Goei and J.J. Hermans and W.J. Niessen and D. Poldermans}, + title = {Fully Automated Abdominal Fat Quantification using {3D} Computed Tomography Angiography}, + booktitle = ECR, + year = {2010}, + abstract = {Purpose: Abdominal tissue type differentation and quantification is increasingly becoming important in the pathophysiology of e.g. cardiovacular disease [1] and metabolic syndrome [2,3]. However 3D image processing methods for the automated analysis of computed tomography (CT) data providing objective measurements are currently lacking. Current methods are limited to 2D [4-6], or have been developed for magnetic resonance imaging [7,8]. The purpose of this work is to develop a fully automated and intrinsically 3D image analysis method which is capable of labeling and quantifying the subcutaneous and visceral fat of the abdomen in CT. Methods and Materials:A three step approach is adopted: First, the patient body is separated from the background and table by applying morphological operators. Then a 3D mesh is deformed which is steered by image intensity and gradient information to delineate the abdominal region. This region is defined by the pelvis, diaphragm and the muscle layer of the abdominal wall. Finally, the subcutaneous and visceral fat are separated by thresholding using the Hounsfield units inside and outside the abdominal region. The method is applied to CTA scans of 14 patients which were scheduled for noncardiac vascular surgery. Weight, BMI index and the level of triglyceride in blood of each patient were acquired. Fat volumes were measured using the proposed method. Results: Correlations between total, visceral, subcutaneous fat volumes (TV, VV, SV) and weight, BMI and triglyceride were investigated. Significant correlations were found for (PearsonA-A?A 1/2 s coefficient, only the strongest correlations are reported, significance p<0.01 for all): Weight-TV r=0.821, BMI-TV r=0.868, triglyceride-VV r=0.684. Conclusion: To our knowledge we have presented the first intrinsically 3D method which is capable of fully automatically segmenting the subcutaneous and visceral fat of the abdomen in CTA. Preliminary results suggest a correlation between triglyceride and visceral fat volume.}, + file = {Mann10a.pdf:pdf\\Mann10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Mann11, + author = {Mann, Ritse M. and Veltman, Jeroen and Huisman, Henkjan and Boetes, Carla}, + title = {Comparison of enhancement characteristics between invasive lobular carcinoma and invasive ductal carcinoma}, + journal = JMRI, + year = {2011}, + volume = {34}, + pages = {293--300}, + doi = {10.1002/jmri.22632}, + abstract = {To compare enhancement characteristics between invasive lobular carcinoma (ILC) and invasive ductal carcinoma (IDC) on contrast enhanced MRI of the breast and to observe the magnitude of eventual differences as these may impair the diagnostic value of breast MRI in ILC.We performed an analysis of enhancement characteristics on biphasic breast MRI in a series of 136 patients (103 IDC, 33 ILC) using an in-house developed application for pharmacokinetic modeling of contrast enhancement and a commercially available CAD application that evaluated the contrast-enhancement versus time curve.Pharmacokinetic analysis showed that the most enhancing voxels in IDC had significantly higher K(trans) -values than in ILC (P < 0.01). No difference in v(e) -values was noted between groups. Visual assessment of contrast-enhancement versus time curves revealed wash-out curves to be less common in ILC (48\% versus 84\%). However, when using the CAD-application to assess the most malignant looking curve, the difference was blotted out (76\% versus 86\%).ILC enhances slower than IDC but peak enhancement is not significantly less. The use of a CAD-application may help to determine the most malignant looking contrast-enhancement versus time curve, and hence facilitates lesion classification.}, + file = {Mann11.pdf:pdf\\Mann11.pdf:PDF}, + optnote = {DIAG}, + number = {2}, + pmid = {21780225}, + month = {7}, + gsid = {4842131897026662112}, + gscites = {22}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/97748}, + ss_id = {50d7d177be4b1e7fa5f9c7371604718f128d6528}, + all_ss_ids = {['50d7d177be4b1e7fa5f9c7371604718f128d6528']}, +} + +@conference{Mann12a, + author = {R. Mann and R. Mus and C. Geppert and C. Frentz and N. Karssemeijer and HJ. Huisman and B. Platel}, + title = {Initial maximum slope of the contrast enhancement versus time curve for dynamic evaluation of breast lesions on ultrafast breast MRIs}, + booktitle = ECR, + year = {2012}, + url = {http://ipp.myesr.org/esr/ecr2012/index.php?v=sessiond&seid=247&ippwwwsid=5jnho9nq3agreqirle8hkru2l2}, + abstract = {Purpose: Modern viewsharing sequences allow dynamic whole breast MRI at diagnostic spatial resolution within 5 seconds, showing the inflow of contrast in breast abnormalities. We evaluate the maximum slope of initial contrast enhancement as a heuristic parameter for differentiating benign from malignant disease and compare its value to classic 3 timepoint analysis. Methods and Materials: We employed a bi-temporal breast MRI protocol on a 3 T MRI using a 16 channel bilateral breast coil. High-resolution VIBE acquisitions (0.8*0.8*1.0 mm, temporal resolution 94s) were interleaved with a series of ultrafast TWIST acquisitions (1*1*2.5 mm, temporal resolution 4.2s) during the inflow of the contrast agent. Forty-two consecutive patients with 43 enhancing abnormalities (21 benign and 22 malignant) presenting between November 2010 and July 2011 were included. We measured the maximum slope of the relative enhancement versus time curve on the TWIST acquisitions and recorded curve type of the lesions on the VIBE acquisitions. ROC analysis was performed to compare diagnostic performance. Results: The maximum slope of the relative enhancement versus time curve was significantly better in discriminating between benign and malignant disease than the curve type (Az 0.865 vs. Az 0.723, p = 0.036). Cutoff values of 15%/sec and 3.15%/sec can be used to differentiate high-risk lesions (>85% malignancies) from intermediate and low-risk lesions (<10% malignancies). This simplification yields an Az of 0.808. Conclusion: The initial maximum slope provides more diagnostic information than the curve types and can therefore be used to assess lesion dynamics. This can substantially shorten current scan protocols.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Mann12b, + author = {R. Mann and R. Mus and C. Geppert and C. Frentz and N. Karssemeijer and HJ. Huisman and B. Platel}, + title = {Dynamic analysis of breast lesions: Can we use the wash-in phase instead of the wash-out phase?}, + booktitle = ISMRM, + year = {2012}, + file = {:pdf/Mann12b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {873517841017097603}, + gscites = {2}, +} + +@article{Mann14, + author = {Mann, Ritse M. and Mus, Roel D. and van Zelst, Jan and Geppert, Christian and Karssemeijer, Nico and Platel, Bram}, + title = {A Novel Approach to Contrast-Enhanced Breast Magnetic Resonance Imaging for Screening: High-Resolution Ultrafast Dynamic Imaging}, + journal = IR, + year = {2014}, + volume = {49}, + pages = {579-585}, + doi = {10.1097/RLI.0000000000000057}, + abstract = {The use of breast magnetic resonance imaging (MRI) as screening tool has been stalled by high examination costs. Scan protocols have lengthened to optimize specificity. Modern view-sharing sequences now enable ultrafast dynamic whole-breast MRI, allowing much shorter and more cost-effective procedures. This study evaluates whether dynamic information from ultrafast breast MRI can be used to replace standard dynamic information to preserve accuracy.We interleaved 20 ultrafast time-resolved angiography with stochastic trajectory (TWIST) acquisitions (0.9 AfaEUR? 1 AfaEUR? 2.5 mm, temporal resolution, 4.3 seconds) during contrast inflow in a regular high-resolution dynamic MRI protocol. A total of 160 consecutive patients with 199 enhancing abnormalities (95 benign and 104 malignant) were included. The maximum slope of the relative enhancement versus time curve (MS) obtained from the TWIST and curve type obtained from the regular dynamic sequence as defined in the breast imaging reporting and data system (BIRADS) lexicon were recorded. Diagnostic performance was compared using receiver operating characteristic analysis.All lesions were visible on both the TWIST and standard series. Maximum slope allows discrimination between benign and malignant disease with high accuracy (area under the curve, 0.829). Types of MS were defined in analogy to BIRADS curve types: MS type 3 implies a high risk of malignancy (MS >13.3\%/s; specificity, 85\%), MS type 2 yields intermediate risk (MS <13.3\%/s and >6.4\%/s), and MS type 1 implies a low risk (MS <6.4\%/s; sensitivity, 90\%). This simplification provides a much higher accuracy than the much lengthier BIRADS curve type analysis does (area under the curve, 0.812 vs 0.692; P = 0.0061).Ultrafast dynamic breast MRI allows detection of breast lesions and classification with high accuracy using MS. This allows substantial shortening of scan protocols and hence reduces imaging costs, which is beneficial especially for screening.}, + file = {Mann14.pdf:pdf/Mann14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {9}, + pmid = {24691143}, + month = {9}, + gsid = {10399546970199943856}, + gscites = {148}, + ss_id = {afb28ac7f8988cf0da21a8b18a2cd37ac5350917}, + all_ss_ids = {['afb28ac7f8988cf0da21a8b18a2cd37ac5350917']}, +} + +@article{Mann16, + author = {Rashindra Manniesing and Marcel T. H. Oei and Bram {van Ginneken} and Mathias Prokop}, + title = {Quantitative Dose Dependency Analysis of Whole-Brain {CT} Perfusion Imaging}, + journal = Radiology, + year = {2016}, + volume = {278}, + number = {1}, + month = {1}, + pages = {190-197}, + doi = {10.1148/radiol.2015142230}, + abstract = {Purpose:To quantitatively assess whether decreasing total radiation dose of the image acquisition protocol has an effect on cerebral CT perfusion values in patients with acute stroke. Materials and Methods: This retrospective study was approved by the institutional ethics committee, and informed consent was waived. Twenty consecutive patients with ischemic stroke who underwent CT perfusion imaging with a 320AC/a,!aEURoedetector row CT scanner were included. A standard acquisition protocol was used, which was started 5 seconds after injection of a contrast agent, with a scan at 200 mAs, followed after 4 seconds by 13 scans, one every 2 seconds, at 100 mAs, and then five scans, one every 5 seconds, at 75 mAs. The total examination had an average effective dose of 5.0 mSv. For each patient, a patient-specific digital perfusion phantom was constructed to simulate the same protocol at a lower total dose (0.5AC/a,!aEURoe5.0 mSv, with stepped doses of 0.5 mSv). The lowest setting for which the maximum mean difference remained within 5% of the reference standard (at 5.0 mSv) was marked as the optimal setting. At the optimal setting, Pearson correlation coefficients were calculated to assess correlations with the reference values, and paired t tests were performed to compare the means. Results: At 2.5 mSv, the maximum mean differences in values from those of the reference standard were 4.5%, 5.0%, and 1.9%, for cerebral blood flow, cerebral blood volume, and mean transit time, respectively. Pearson correlation coefficients of perfusion values for white matter and gray matter were 0.864AC/a,!aEURoe0.917, and all differences were significant (P < .0001). Paired t tests showed no significant differences between the reference standard and optimal settings (P = .089AC/a,!aEURoe.923). Conclusion: The total dose of a clinical cerebral CT perfusion protocol can be lowered to 2.5 mSv, with only minor quantitative effects on perfusion values. Dose reduction beyond this point resulted in overestimation of perfusion values.}, + file = {Mann16.pdf:pdf\\Mann16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26114226}, + gsid = {681530597847692433}, + gscites = {27}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/171318}, + ss_id = {0023c0af3833ba26698ff22e236f3625f1533e86}, + all_ss_ids = {['0023c0af3833ba26698ff22e236f3625f1533e86']}, +} + +@article{Mann16a, + author = {Rashindra Manniesing and Christoph Brune and Bram van Ginneken and Mathias Prokop}, + title = {A {4D} {CT} Digital Phantom of an Individual Human Brain for Perfusion Analysis}, + journal = PRJ, + year = {2016}, + volume = {4}, + month = {11}, + pages = {e2683}, + doi = {10.7717/peerj.2683}, + url = {https://doi.org/10.7717/peerj.2683}, + abstract = {Brain perfusion is of key importance to assess brain function. Modern CT scanners can acquire perfusion maps of the cerebral parenchyma in vivo at submillimeter resolution. These perfusion maps give insights into the hemodynamics of the cerebral parenchyma and are critical for example for treatment decisions in acute stroke. However, the relations between acquisition parameters, tissue attenuation curves, and perfusion values are still poorly understood and cannot be unraveled by studies involving humans because of ethical concerns. We present a 4D CT digital phantom specific for an individual human brain to analyze these relations in a bottom-up fashion. Validation of the signal and noise components was based on 1000 phantom simulations of 20 patient imaging data. This framework was applied to quantitatively assess the relation between radiation dose and perfusion values, and to quantify the signal-to-noise ratios of penumbra regions with decreasing sizes in white and gray matter. This is the first 4D CT digital phantom that enables to address clinical questions without having to expose the patient to additional radiation dose.}, + file = {Mann16a.pdf:pdf\\Mann16a.pdf:PDF}, + optnote = {DIAG}, + pmid = {27917312}, + gsid = {8010355516467544387}, + gscites = {3}, + ss_id = {198800090856d9799b21c965032eab972e1ee578}, + all_ss_ids = {['198800090856d9799b21c965032eab972e1ee578']}, +} + +@conference{Mann16c, + author = {Rashindra Manniesing and Sil {van de Leemput} and Mathias Prokop and Bram {van Ginneken}}, + title = {White Matter and Gray Matter Segmentation in {4D CT} Images of Acute Ischemic Stroke Patients: a Feasibility Study}, + booktitle = RSNA, + year = {2016}, + abstract = {PURPOSE: White matter (WM) and gray matter (GM) respond differently to ischemia and thrombolytic treatment. Being able to differentiate WM/GM in CT enables tissue dependent perfusion analysis and automated detection of stroke related pathology. In this work we show the feasibility of segmenting WM/GM in 4DCT images of acute ischemic stroke patients. + + METHOD AND MATERIALS: In total 18 stroke patients who received both a 4DCT and followup MR scan were included in this retrospective study. CT imaging was done on a 320 row scanner with 19 or 24 volumetric acquisitions after contrast injection resulting in 512x512x320 isotropic voxels of 0.5 mm. T1w imaging was done on a 1.5T MR scanner resulting in approximately 384x318x26 voxels of 0.6x0.6x5.5 mm. The MR image was segmented with FSL tools and served as reference standard to train and evaluate the method. The method starts with brain segmentation by atlas registration followed by a refinement using a geodesic active contour with dominating advection term steered by a gradient based speed function. Within the segmented brain, three groups of features are then extracted: intensity, contextual and temporal, including a multiscale representation of the temporal average image weighted according to the exposures of the individual time points to maximize the signaltonoise ratios. In total 120 features were then fed into a nonlinear support vector machine with Gaussian radial basis kernel. A leaveonepatient out cross validation was carried out. Segmentation results were visually inspected for overall quality. Dice coefficient (DC) and 95th percentile Hausdorff distance (HD) were reported. + + RESULTS: The segmentations were evaluated as good with the separation of WM/GM at the cortex good to excellent. GM segmentation at the cortex had generally less thickness variations compared to the reference standard. DC were 0.79+-0.06 and 0.77+-0.06, 95% HD were 8.71+-3.22 and 7.11+-3.93 mm, for WM and GM, respectively. + + CONCLUSION: WM and GM segmentation in 4DCT is feasible. + + + CLINICAL RELEVANCE/APPLICATION: WM and GM segmentation in 4DCT enables tissue dependent perfusion analysis and may increase sensitivity of detecting core and penumbra. Volume measurements of WM and GM normalized with the contralateral side may yield an important diagnostic parameter in the acute phase of ischemia.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Mann17, + author = {Rashindra Manniesing and Marcel T.H. Oei and Luuk J Oostveen and Jaime Melendez and Ewoud J. Smit and Bram Platel and Clara I S\'{a}nchez and Frederick J.A. Meijer and Mathias Prokop and Bram van Ginneken}, + title = {White Matter and Gray Matter Segmentation in {4D} Computed Tomography}, + journal = NATSCIREP, + year = {2017}, + volume = {7}, + number = {119}, + doi = {10.1038/s41598-017-00239-z}, + abstract = {Modern Computed Tomography (CT) scanners are capable of acquiring contrast dynamics of the whole brain, adding functional to anatomical information. Soft tissue segmentation is important for subsequent applications such as tissue dependent perfusion analysis and automated detection and quantification of cerebral pathology. In this work a method is presented to automatically segment white matter (WM) and gray matter (GM) in contrast- enhanced 4D CT images of the brain. The method starts with intracranial segmentation via atlas registration, followed by a refinement using a geodesic active contour with dominating advection term steered by image gradient information, from a 3D temporal average image optimally weighted according to the exposures of the individual time points of the 4D CT acquisition. Next, three groups of voxel features are extracted: intensity, contextual, and temporal. These are used to segment WM and GM with a support vector machine. Performance was assessed using cross validation in a leave-one-patient-out manner on 22 patients. Dice coefficients were 0.81$\pm$0.04 and 0.79$\pm$0.05, 95% Hausdorff distances were 3.86$\pm$1.43 and 3.07$\pm$1.72 mm, for WM and GM, respectively. Thus, WM and GM segmentation is feasible in 4D CT with good accuracy.}, + file = {Mann17.pdf:pdf\\Mann17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28273920}, + month = {3}, + gsid = {7982996011980143322}, + gscites = {23}, + all_ss_ids = {['682e8febc5d053766fefe1ee2de5ca2ff39762f2', '7f1eabb6591699816ccd89e7765a66dc54a7175c', '86bab119f07a492350c6cc7bd06ed91c73ee807a', 'e7f15929ff71a5a33af66ece92d707a7be7aebc8']}, +} + +@inproceedings{Mann98, + author = {R. Manniesing and R.P. Kleihorst and R. {van der Vleuten} and E.A. Hendriks}, + title = {Implementation of Lossless Coding for Embedded Compression}, + booktitle = {{IEEE} Program for Research on Integrated Systems and Circuits/Workshop on Circuits, Systems and Signal Processing}, + year = {1998}, + abstract = {Hybrid video compression schemes need to store the local decoded image for predictive coding. Traditionally, this image is stored in the spatial domain needing almost 5 Mbit of memory for main-level image format. The amount of storage space can be significantly reduced if the data is stored in the compressed domain itself, using embedded compression techniques. Embedded compression typically has a lossless part. This function is subject to various constraints imposed by the remainder of the system. In this paper we arrive at three candidate algorithms for the lossless compression function. These are: implementing a zoning approach, implementing a variant of vector-coding using a codebook and implementing position-coding respectively. The three algorithms are analyzed for performance and hardware costs.}, + file = {Mann98.pdf:pdf\\Mann98.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {16505865606804775897}, + gscites = {8}, +} + +@inproceedings{Mann99, + author = {R. Manniesing and I. Karkowski and H.Corporaal}, + title = {Evaluation of a Potential for Automatic {SIMD} Parallelization of Embedded Applications}, + booktitle = {Fifth Annual Conference of the Advanced School for Computing and Imaging}, + year = {1999}, + pages = {103--110}, + abstract = {This paper investigates the potential for automatic mapping of typical embedded applications to architectures with multimedia instruction set extensions. For this purpose a (pattern matching based) code transformation engine is used. The experiments show that about 85% of the loops which are suitable for Single Instruction Multiple Data (SIMD) parallelization can be automatically recognized and mapped}, + file = {Mann99.pdf:pdf\\Mann99.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {151484521720707710}, + gscites = {3}, +} + +@mastersthesis{Mann99a, + author = {R. Manniesing}, + title = {Power Analysis of a General Convolution Algorithm mapped on a Linear Processor Array}, + abstract = {We explore the energy dissipation of the Linear Processor Array (LPA) as a function of the number of available resources (Processor Units P) within the array. This number P is an important parameter, as it reflects performance, relates parallel processing to energy dissipation, and influences the scaling of the various parts of the LPA architecture (memory, address generator, communication network). To make a comparison of the different design variants for a fixed datawidth possible, we propose a high-level energy dissipation model of the processor, which is based on a detailed analysis of a general convolution algorithm. It is shown that the energy dissipation of the LPA can roughly be described by the relationship Etotal ? N/P with N presenting the datawidth in pixels. This relationship is derived from two observations: first, the largest contribution to Etotal is formed by the energy dissipated by the memories, and second, in our model of the LPA, the datawidth of the memories corresponds with the number of pixels N to be processed, which results in an increase of the access rate when P decreases. Furthermore, we have shown that the energy dissipation caused by communication within the LPA, increases with increasing number of resources: the trade-off between communication versus computation in parallel computing. This turns out to be negligible in the total energy dissipation, and we therefore conclude, that the optimum solution is found, when a full number of resources is applied within the LPA.}, + file = {:pdf/Mann04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + school = {Delft University of Technology, Electrical Engineering}, + year = {1999}, + journal = {Master thesis}, +} + +@inproceedings{Marc17, + author = {Marchesi, Agnese and Bria, Alessandro and Marrocco, Claudio and Molinara, Mario and Mordang, Jan-Jurre and Tortorella, Francesco and Karssemeijer, Nico}, + title = {The Effect of Mammogram Preprocessing on Microcalcification Detection with Convolutional Neural Networks}, + doi = {10.1109/cbms.2017.29}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1109/CBMS.2017.29}, + file = {Marc17.pdf:pdf\Marc17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)}, + citation-count = {7}, + automatic = {yes}, +} + +@article{Marc22, + author = {Marchesin, Stefano and Giachelle, Fabio and Marini, Niccolo and Atzori, Manfredo and Boytcheva, Svetla and Buttafuoco, Genziana and Ciompi, Francesco and Di Nunzio, Giorgio Maria and Fraggetta, Filippo and Irrera, Ornella and Muller, Henning and Primov, Todor and Vatrano, Simona and Silvello, Gianmaria}, + title = {Empowering digital pathology applications through explainable knowledge extraction tools.}, + doi = {10.1016/j.jpi.2022.100139}, + pages = {100139}, + volume = {13}, + abstract = {Exa-scale volumes of medical data have been produced for decades. In most cases, the diagnosis is reported in free text, encoding medical knowledge that is still largely unexploited. In order to allow decoding medical knowledge included in reports, we propose an unsupervised knowledge extraction system combining a rule-based expert system with pre-trained Machine Learning (ML) models, namely the Semantic Knowledge Extractor Tool (SKET). Combining rule-based techniques and pre-trained ML models provides high accuracy results for knowledge extraction. This work demonstrates the viability of unsupervised Natural Language Processing (NLP) techniques to extract critical information from cancer reports, opening opportunities such as data mining for knowledge extraction purposes, precision medicine applications, structured report creation, and multimodal learning. SKET is a practical and unsupervised approach to extracting knowledge from pathology reports, which opens up unprecedented opportunities to exploit textual and multimodal medical information in clinical practice. We also propose SKET eXplained (SKET X), a web-based system providing visual explanations about the algorithmic decisions taken by SKET. SKET X is designed/developed to support pathologists and domain experts in understanding SKET predictions, possibly driving further improvements to the system.}, + file = {Marc22.pdf:pdf\\Marc22.pdf:PDF}, + journal = {Journal of pathology informatics}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36268087}, + year = {2022}, + ss_id = {57c9e588f21034990f834f5f0610959332d3ed70}, + all_ss_ids = {['57c9e588f21034990f834f5f0610959332d3ed70']}, + gscites = {10}, +} + +@article{Mari21, + author = {Niccol{\`{o}} Marini and Sebastian Ot{\'{a}}lora and Damian Podareanu and Mart van Rijthoven and Jeroen van der Laak and Francesco Ciompi and Henning Muller and Manfredo Atzori}, + title = {Multi{\_}Scale{\_}Tools: A Python Library to Exploit Multi-Scale Whole Slide Images}, + doi = {10.3389/fcomp.2021.684521}, + volume = {3}, + abstract = {Algorithms proposed in computational pathology can allow to automatically analyze digitized tissue samples of histopathological images to help diagnosing diseases. Tissue samples are scanned at a high-resolution and usually saved as images with several magnification levels, namely whole slide images (WSIs). Convolutional neural networks (CNNs) represent the state-of-the-art computer vision methods targeting the analysis of histopathology images, aiming for detection, classification and segmentation. However, the development of CNNs that work with multi-scale images such as WSIs is still an open challenge. The image characteristics and the CNN properties impose architecture designs that are not trivial. Therefore, single scale CNN architectures are still often used. This paper presents Multi_Scale_Tools, a library aiming to facilitate exploiting the multiscale structure of WSIs. Multi_Scale_Tools currently include four components: a preprocessing component, a scale detector, a multi-scale CNN for classification and a multiscale CNN for segmentation of the images. The pre-processing component includes methods to extract patches at several magnification levels. The scale detector allows to identify the magnification level of images that do not contain this information, such as images from the scientific literature. The multi-scale CNNs are trained combining features and predictions that originate from different magnification levels. The components are developed using private datasets, including colon and breast cancer tissue samples. They are tested on private and public external data sources, such as The Cancer Genome Atlas (TCGA). The results of the library demonstrate its effectiveness and applicability. The scale detector accurately predicts multiple levels of image magnification and generalizes well to independent external data. The multi-scale CNNs outperform the single-magnification CNN for both classification and segmentation tasks. The code is developed in Python and it will be made publicly available upon publication. It aims to be easy to use and easy to be improved with additional functions.}, + file = {Mari21.pdf:pdf\\Mari21.pdf:PDF}, + journal = {Frontiers in Computer Science}, + optnote = {DIAG}, + publisher = {Frontiers Media {SA}}, + year = {2021}, + ss_id = {51c291c635a61682b1abcfc99c6e21079ba39b67}, + all_ss_ids = {['51c291c635a61682b1abcfc99c6e21079ba39b67']}, + gscites = {14}, +} + +@inproceedings{Mari21a, + author = {Marini, Niccolo and Otalora, Sebastian and Ciompi, Francesco and Silvello, Gianmaria and Marchesin, Stefano and Vatrano, Simona and Buttafuoco, Genziana and Atzori, Manfredo and Muller, Henning and Burlutskiy, N and Li, Z and Minhas, F and Peng, T and Rajpoot, N and Torbennielsen, B and Van Der Laak, J and Veta, M and Yuan, Y and Zlobec, I}, + title = {Multi-Scale Task Multiple Instance Learning for the Classification of Digital Pathology Images with Global Annotations}, + abstract = {Whole slide images (WSIs) are high-resolution digitized images of tissue samples, stored including different magnification levels. WSIs datasets often include only global annotations, available thanks to pathology reports. Global annotations refer to global findings in the high-resolution image and do not include information about the location of the regions of interest or the magnification levels used to identify a finding. This fact can limit the training of machine learning models, as WSIs are usually very large and each magnification level includes different information about the tissue. This paper presents a Multi-Scale Task Multiple Instance Learning (MuSTMIL) method, allowing to better exploit data paired with global labels and to combine contextual and detailed information identified at several magnification levels. The method is based on a multiple instance learning framework}, + file = {Mari21a.pdf:pdf\\Mari21a.pdf:PDF}, + optnote = {DIAG}, + year = {2021}, + ss_id = {695568836a9c87507435fa5cbae0f014666043b8}, + all_ss_ids = {['695568836a9c87507435fa5cbae0f014666043b8']}, + gscites = {11}, +} + +@article{Mari22, + author = {Marini, Niccolo and Marchesin, Stefano and Otalora, Sebastian and Wodzinski, Marek and Caputo, Alessandro and van Rijthoven, Mart and Aswolinskiy, Witali and Bokhorst, John-Melle and Podareanu, Damian and Petters, Edyta and Boytcheva, Svetla and Buttafuoco, Genziana and Vatrano, Simona and Fraggetta, Filippo and van der Laak, Jeroen and Agosti, Maristella and Ciompi, Francesco and Silvello, Gianmaria and Muller, Henning and Atzori, Manfredo}, + title = {Unleashing the potential of digital pathology data by training computer-aided diagnosis models without human annotations.}, + doi = {10.1038/s41746-022-00635-4}, + issue = {1}, + pages = {102}, + volume = {5}, + abstract = {The digitalization of clinical workflows and the increasing performance of deep learning algorithms are paving the way towards new methods for tackling cancer diagnosis. However, the availability of medical specialists to annotate digitized images and free-text diagnostic reports does not scale with the need for large datasets required to train robust computer-aided diagnosis methods that can target the high variability of clinical cases and data produced. This work proposes and evaluates an approach to eliminate the need for manual annotations to train computer-aided diagnosis tools in digital pathology. The approach includes two components, to automatically extract semantically meaningful concepts from diagnostic reports and use them as weak labels to train convolutional neural networks (CNNs) for histopathology diagnosis. The approach is trained (through 10-fold cross-validation) on 3'769 clinical images and reports, provided by two hospitals and tested on over 11'000 images from private and publicly available datasets. The CNN, trained with automatically generated labels, is compared with the same architecture trained with manual labels. Results show that combining text analysis and end-to-end deep neural networks allows building computer-aided diagnosis tools that reach solid performance (micro-accuracy = 0.908 at image-level) based only on existing clinical data without the need for manual annotations.}, + file = {Mari22.pdf:pdf\\Mari22.pdf:PDF}, + journal = {NPJ digital medicine}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35869179}, + year = {2022}, + ss_id = {79c9209f3892a5a704abb6be161ca7b41d02775d}, + all_ss_ids = {['79c9209f3892a5a704abb6be161ca7b41d02775d']}, + gscites = {28}, +} + +@inproceedings{Marr18, + author = {Marrocco, Claudio and Bria, Alessandro and Di Sano, Valerio and Borges, Lucas R. and Savelli, Benedetta and Molinara, Mario and Mordang, Jan-Jurre and Karssemeijer, Nico and Tortorella, Francesco}, + title = {Mammogram denoising to improve the calcification detection performance of convolutional nets}, + doi = {10.1117/12.2318069}, + year = {2018}, + abstract = {Recently, Convolutional Neural Networks (CNNs) have been successfully used to detect microcalcifications in mammograms. An important step in CNN-based detection is image preprocessing that, in raw mammograms, is usually employed to equalize or remove the intensity-dependent quantum noise. In this work, we show how removing the noise can significantly improve the microcalcification detection performance of a CNN. To this end, we describe the quantum noise with a uniform square-root model. Under this assumption, the generalized Anscombe transformation is applied to the raw mammograms by estimating the noise characteristics from the image at hand. In the Anscombe domain, noise is filtered through an adaptive Wiener filter. The denoised images are recovered with an appropriate inverse transformation and are then used to train the CNN-based detector. Experiments were performed on 1,066 mammograms acquired with GE Senographe systems. MC detection performance of a CNN on noise-free mammograms was statistically significantly higher than on unprocessed mammograms. Results were also superior in comparison with a nonparametric noise-equalizing transformation previously proposed for digital mammograms.}, + url = {http://dx.doi.org/10.1117/12.2318069}, + file = {Marr18.pdf:pdf\Marr18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {14th International Workshop on Breast Imaging (IWBI 2018)}, + citation-count = {5}, + automatic = {yes}, +} + +@mastersthesis{Mart21, + author = {Evgenia Martynova}, + title = {Artificial intelligence-assisted detection of adhesions on cine-MRI}, + abstract = {Adhesive disease, which commonly occurs as a postoperative complication, is a major cause of morbidity and places a substantial burden on healthcare worldwide. Currently, laparoscopy is the only accurate diagnostic technique for abdominal adhesions, which intrinsically involves health risks including the formation of new adhesions. Non-invasive diagnostic methods with similar reliability are lacking. In recent years using cine-MRI scans of the abdomen captured during respiration has demonstrated promising performance in the diagnosis of adhesions. However, correct interpretation of cine- MRI scans requires considerable radiological expertise and this technique has not been widely adopted in clinical practice yet. In this masters thesis, the first fully-automated multistage computer-aided diagnosis (CAD) method for adhesion detection is proposed. The method exploits the phenomenon of visceral slide, a pattern of abdominal motion observed during respiration in healthy subjects. Local reduction of visceral slide is a diagnostic criterion of adhesions. Visceral slide that occurs on a cine-MRI slice is quantified using a segmentation mask generated by a deep learning model and a deformation field between cine-MRI frames obtained with an image registration algorithm. Bounding boxes of adhesions are predicted with a region growing method based on the visceral slide values. Additionally, false positives reduction driven by domain knowledge is performed. The impact of using all cine-MRI time points and different normalisation options are investigated and the hyper-parameters of the method are determined with 5-fold cross-validation. When evaluated with cross-validation, the best method configuration yields detection sensitivity of 0:61 and 0:73 at 1 and 2 false positives per slice along with 0:53 AUC in slice-level diagnosis. On the held-out test set, a slightly different configuration is top-performing and achieves detection sensitivity of 0:7 and 0:91 at 1 and 1:89 false positive per slice along with 0:78 slice-level AUC, which indicates the promising potential of the core idea of the method.}, + file = {Mart21.pdf:pdf\\Mart21.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2021}, + journal = {Master thesis}, +} + +@article{Maus18, + author = {Mauschitz, Matthias M. and Bonnemaijer, Pieter W.M. and Diers, Kersten and Rauscher, Franziska G. and Elze, Tobias and Engel, Christoph and Loeffler, Markus and Colijn, Johanna Maria and Ikram, M. Arfan and Vingerling, Johannes R. and Williams, Katie M. and Hammond, Christopher J. and Creuzot-Garcher, Catherine and Bron, Alain M. and Silva, Rufino and Nunes, Sandrina and Delcourt, C\'{e}cile and Cougnard-Gr\'{e}goire, Audrey and Holz, Frank G. and Klaver, Caroline C.W. and Breteler, Monique M.B. and Finger, Robert P. and Acar, Niyazi and Anastosopoulos, Eleftherios and Azuara-Blanco, Augusto and Berendschot, Tos and Berendschot, Tos and Bergen, Arthur and Bertelsen, Geir and Binquet, Christine and Bird, Alan and Bobak, Martin and Larsen, Morten B\ogelund and Boon, Camiel and Bourne, Rupert and Br\'{e}tillon, Lionel and Broe, Rebecca and Bron, Alain and Buitendijk, Gabrielle and Cachulo, Maria Luz and Capuano, Vittorio and Carri\`{e}re, Isabelle and Chakravarthy, Usha and Chan, Michelle and Chang, Petrus and Colijn, Johanna and Cougnard-Gr\'{e}goire, Audrey and Cree, Angela and Creuzot-Garcher, Catherine and Cumberland, Phillippa and Cunha-Vaz, Jos\'{e} and Daien, Vincent and De Jong, Eiko and Deak, Gabor and Delcourt, C\'{e}cile and Delyfer, Marie-No\"{e}lle and Hollander, Anneke den and Dietzel, Martha and Erke, Maja Gran and Faria, Pedro and Farinha, Claudia and Fauser, Sascha and Finger, Robert and Fletcher, Astrid and Foster, Paul and Founti, Panayiota and Gorgels, Theo and Grauslund, Jakob and Grus, Franz and Hammond, Christopher and Hense, Hans-Werner and Hermann, Manuel and Hoehn, Ren\'{e} and Hogg, Ruth and Holz, Frank and Hoyng, Carel and Jansonius, Nomdo and Janssen, Sarah and Kersten, Eveline and Khawaja, Anthony and Klaver, Caroline and Korobelnik, Jean-Fran\c{c}ois and Lamparter, Julia and Le Goff, M\'{e}lanie and Lechanteur, Yara and Lehtim\"{a}ki, Terho and Leung, Irene and Lotery, Andrew and Mauschitz, Matthias and Meester, Magda and Merle, B\'{e}n\'{e}dicte and Meyer zu Westrup, Verena and Midena, Edoardo and Miotto, Stefania and Mirshahi, Alireza and Mohan-Sa\"{i}d, Sadek and Mueller, Michael and Muldrew, Alyson and Murta, Joaquim and Nickels, Stefan and Nunes, Sandrina and Owen, Christopher and Peto, Tunde and Pfeiffer, Norbert and Piermarocchi, Stefano and Prokofyeva, Elena and Rahi, Jugnoo and Raitakari, Olli and Rauscher, Franziska and Ribeiro, Luisa and Rougier, Marie-B\'{e}n\'{e}dicte and Rudnicka, Alicja and Sahel, Jos\'{e} and Salonikiou, Aggeliki and Sanchez, Clarisa and Schmitz-Valckenberg, Steffen and Schuster, Alexander and Schweitzer, C\'{e}dric and Segato, Tatiana and Shehata, Jasmin and Silva, Rufino and Silvestri, Giuliana and Simader, Christian and Souied, Eric and Speckauskas, Martynas and Springelkamp, Henriet and Tapp, Robyn and Topouzis, Fotis and van Leeuwen, Elisa and Verhoeven, Virginie and Verzijden, Timo and Von Hanno, Therese and Wiedemann, Peter and Williams, Katie and Wolfram, Christian and Yip, Jennifer and Zerbib, Jennyfer}, + title = {Systemic and Ocular Determinants of Peripapillary Retinal Nerve Fiber Layer Thickness Measurements in the European Eye Epidemiology (E3) Population}, + doi = {10.1016/j.ophtha.2018.03.026}, + year = {2018}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ophtha.2018.03.026}, + file = {Maus18.pdf:pdf\Maus18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Ophthalmology}, + citation-count = {56}, + automatic = {yes}, + pages = {1526-1536}, + volume = {125}, +} + +@article{McLo04, + author = {Kristin J McLoughlin and Philip J Bones and Nico Karssemeijer}, + title = {Noise equalization for detection of microcalcification clusters in direct digital mammogram images}, + journal = TMI, + year = {2004}, + volume = {23}, + pages = {313--320}, + doi = {10.1109/TMI.2004.824240}, + file = {McLo04.pdf:pdf/McLo04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {15027524}, + month = {3}, + gsid = {8745936118971532376}, + gscites = {106}, + ss_id = {81cfc7e735194a4493f16e501f985fa009bfdff7}, + all_ss_ids = {['81cfc7e735194a4493f16e501f985fa009bfdff7']}, +} + +@inproceedings{Mehr17, + author = {Mehrtash, Alireza and Sedghi, Alireza and Ghafoorian, Mohsen and Taghipour, Mehdi and Tempany, Clare M. and Wells, William M. and Kapur, Tina and Mousavi, Parvin and Abolmaesumi, Purang and Fedorov, Andriy}, + booktitle = MI, + title = {Classification of clinical significance of MRI prostate findings using 3D convolutional neural networks}, + doi = {10.1117/12.2277123}, + pages = {101342A-101342A-4}, + series = SPIE, + url = {http://dx.doi.org/10.1117/12.2277123}, + volume = {10134}, + abstract = {Prostate cancer (PCa) remains a leading cause of cancer mortality among American men. Multi-parametric magnetic resonance imaging (mpMRI) is widely used to assist with detection of PCa and characterization of its aggressiveness. Computer-aided diagnosis (CADx) of PCa in MRI can be used as clinical decision support system to aid radiologists in interpretation and reporting of mpMRI. We report on the development of a convolution neural network (CNN) model to support CADx in PCa based on the appearance of prostate tissue in mpMRI, conducted as part of the SPIE-AAPM-NCI PROSTATEx challenge. The performance of different combinations of mpMRI inputs to CNN was assessed and the best result was achieved using DWI and DCE-MRI modalities together with the zonal information of the finding. On the test set, the model achieved an area under the receiver operating characteristic curve of 0.80.}, + file = {:pdf/Mehr17.pdf:PDF}, + optnote = {DIAG}, + year = {2017}, + month = {3}, +} + +@article{Meij12, + author = {Meijer, Hanneke J M and Debats, Oscar A and Kunze-Busch, Martina and van Kollenburg, Peter and Leer, Jan Willem and Witjes, J. Alfred and Kaanders, Johannes H A M and Barentsz, Jelle O and van Lin, Emile N J Th}, + title = {Magnetic Resonance Lymphography-Guided Selective High-Dose Lymph Node Irradiation in Prostate Cancer}, + journal = IJROBP, + year = {2012}, + volume = {82}, + pages = {175--183}, + doi = {10.1016/j.ijrobp.2010.09.023}, + abstract = {PURPOSE: To demonstrate the feasibility of magnetic resonance lymphography (MRL) -guided delineation of a boost volume and an elective target volume for pelvic lymph node irradiation in patients with prostate cancer. The feasibility of irradiating these volumes with a high-dose boost to the MRL-positive lymph nodes in conjunction with irradiation of the prostate using intensity-modulated radiotherapy (IMRT) was also investigated. METHODS AND MATERIALS: In 4 prostate cancer patients with a high risk of lymph node involvement but no enlarged lymph nodes on CT and/or MRI, MRL detected pathological lymph nodes in the pelvis. These lymph nodes were identified and delineated on a radiotherapy planning CT to create a boost volume. Based on the location of the MRL-positive lymph nodes, the standard elective pelvic target volume was individualized. An IMRT plan with a simultaneous integrated boost (SIB) was created with dose prescriptions of 42 Gy to the pelvic target volume, a boost to 60 Gy to the MRL-positive lymph nodes, and 72 Gy to the prostate. RESULTS: All MRL-positive lymph nodes could be identified on the planning CT. This information could be used to delineate a boost volume and to individualize the pelvic target volume for elective irradiation. IMRT planning delivered highly acceptable radiotherapy plans with regard to the prescribed dose levels and the dose to the organs at risk (OARs). CONCLUSION: MRL can be used to select patients with limited lymph node involvement for pelvic radiotherapy. MRL-guided delineation of a boost volume and an elective pelvic target volume for selective high-dose lymph node irradiation with IMRT is feasible. Whether this approach will result in improved outcome for these patients needs to be investigated in further clinical studies.}, + file = {Meij12.pdf:pdf\\Meij12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {21075555}, + month = {1}, +} + +@article{Meij12a, + author = {Meijer, Hanneke J M. and Debats, Oscar A. and Roach, 3rd, Mack and Span, Paul N. and Witjes, J Alfred and Kaanders, Johannes H A M. and van Lin, Emile N J Th and Barentsz, Jelle O.}, + title = {Magnetic Resonance Lymphography Findings in Patients With Biochemical Recurrence After Prostatectomy and the Relation With the Stephenson Nomogram}, + journal = IJROBP, + year = {2012}, + volume = {84}, + pages = {1186--1191}, + doi = {10.1016/j.ijrobp.2012.02.039}, + abstract = {PURPOSE: To estimate the occurrence of positive lymph nodes on magnetic resonance lymphography (MRL) in patients with a prostate-specific antigen (PSA) recurrence after prostatectomy and to investigate the relation between score on the Stephenson nomogram and lymph node involvement on MRL. METHODS AND MATERIALS: Sixty-five candidates for salvage radiation therapy were referred for an MRL to determine their lymph node status. Clinical and histopathologic features were recorded. For 49 patients, data were complete to calculate the Stephenson nomogram score. Receiver operating characteristic (ROC) analysis was performed to determine how well this nomogram related to the MRL result. Analysis was done for the whole group and separately for patients with a PSA <1.0 ng/mL to determine the situation in candidates for early salvage radiation therapy, and for patients without pathologic lymph nodes at initial lymph node dissection. RESULTS: MRL detected positive lymph nodes in 47 patients. ROC analysis for the Stephenson nomogram yielded an area under the curve (AUC) of 0.78 (95\% confidence interval, 0.61-0.93). Of 29 patients with a PSA <1.0 ng/mL, 18 had a positive MRL. Of 37 patients without lymph node involvement at initial lymph node dissection, 25 had a positive MRL. ROC analysis for the Stephenson nomogram showed AUCs of 0.84 and 0.74, respectively, for these latter groups. CONCLUSION: MRL detected positive lymph nodes in 72\% of candidates for salvage radiation therapy, in 62\% of candidates for early salvage radiation therapy, and in 68\% of initially node-negative patients. The Stephenson nomogram showed a good correlation with the MRL result and may thus be useful for identifying patients with a PSA recurrence who are at high risk for lymph node involvement.}, + file = {Meij12a.pdf:pdf\\Meij12a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {22520482}, + month = {12}, +} + +@article{Meij12c, + author = {Meijer, Hanneke J M and van Lin, Emile N and Debats, Oscar A and Witjes, J. Alfred and Span, Paul N and Kaanders, Johannes H A M and Barentsz, Jelle O}, + title = {High Occurrence of Aberrant Lymph Node Spread on Magnetic Resonance Lymphography in Prostate Cancer Patients with a Biochemical Recurrence After Radical Prostatectomy}, + journal = IJROBP, + year = {2012}, + volume = {82 (4)}, + pages = {1405-1410}, + doi = {10.1016/j.ijrobp.2011.04.054}, + abstract = {PURPOSE: To investigate the pattern of lymph node spread in prostate cancer patients with a biochemical recurrence after radical prostatectomy, eligible for salvage radiotherapy; and to determine whether the clinical target volume (CTV) for elective pelvic irradiation in the primary setting can be applied in the salvage setting for patients with (a high risk of) lymph node metastases. METHODS AND MATERIALS: The charts of 47 prostate cancer patients with PSA recurrence after prostatectomy who had positive lymph nodes on magnetic resonance lymphography (MRL) were reviewed. Positive lymph nodes were assigned to a lymph node region according to the guidelines of the Radiation Therapy Oncology Group (RTOG) for delineation of the CTV for pelvic irradiation (RTOG-CTV). We defined four lymph node regions for positive nodes outside this RTOG-CTV: the para-aortal, proximal common iliac, pararectal, and paravesical regions. They were referred to as aberrant lymph node regions. For each patient, clinical and pathologic features were recorded, and their association with aberrant lymph drainage was investigated. The distribution of positive lymph nodes was analyzed separately for patients with a prostate-specific antigen (PSA) <1.0 ng/mL. RESULTS: MRL detected positive aberrant lymph nodes in 37 patients (79\%). In 20 patients (43\%) a positive lymph node was found in the pararectal region. Higher PSA at the time of MRL was associated with the presence of positive lymph nodes in the para-aortic region (2.49 vs. 0.82 ng/mL; p=0.007) and in the proximal common iliac region (1.95 vs. 0.59 ng/mL; p=0.009). There were 18 patients with a PSA <1.0 ng/mL. Ten of these patients (61\%) had at least one aberrant positive lymph node. CONCLUSION: Seventy-nine percent of the PSA-recurrent patients had at least one aberrant positive lymph node. Application of the standard RTOG-CTV for pelvic irradiation in the salvage setting therefore seems to be inappropriate.}, + file = {Meij12c.pdf:pdf\\Meij12c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {21640507}, + month = {3}, +} + +@inproceedings{Meij15a, + author = {Midas Meijs and Oscar A. Debats and Henkjan J. Huisman}, + title = {The evaluation of multi-structure, multi-atlas pelvic anatomy features in a prostate {MR} Lymphography {CAD} system}, + booktitle = MI, + year = {2015}, + volume = {9414}, + series = SPIE, + pages = {94140T}, + doi = {10.1117/12.2082708}, + abstract = {In prostate cancer, the detection of metastatic lymph nodes indicates progression from localized disease to metastasized cancer. The detection of positive lymph nodes is, however, a complex and time consuming task for experienced radiologists. Assistance of a two-stage Computer-Aided Detection (CAD) system in MR Lymphography (MRL) is not yet feasible due to the large number of false positives in the first stage of the system. By introducing a multi-structure, multi-atlas segmentation, using an affine transformation followed by a B-spline transformation for registration, the organ location is given by a mean density probability map. The atlas segmentation is semi-automatically drawn with ITK-SNAP, using Active Contour Segmentation. Each anatomic structure is identified by a label number. Registration is performed using Elastix, using Mutual Information and an Adaptive Stochastic Gradient optimization. The dataset consists of the MRL scans of ten patients, with lymph nodes manually annotated in consensus by two expert readers. The feature map of the CAD system consists of the Multi-Atlas and various other features (e.g. Normalized Intensity and multi-scale Blobness). The voxel-based Gentleboost classifier is evaluated using ROC analysis with cross validation. We show in a set of 10 studies that adding multi-structure, multi-atlas anatomical structure likelihood features improves the quality of the lymph node voxel likelihood map. Multiple structure anatomy maps may thus make MRL CAD more feasible.}, + file = {Meij15a.pdf:pdf\\Meij15a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {72943470504272677}, + gscites = {2}, + ss_id = {b7911fd32d678c756874904e35b13dd3fa225a56}, + all_ss_ids = {['b7911fd32d678c756874904e35b13dd3fa225a56']}, +} + +@article{Meij15c, + author = {Meijs, Midas and Christensen, Soren and Lansberg, Maarten G and Albers, Gregory W and Calamante, Fernando}, + title = {Analysis of perfusion {MRI} in stroke: To deconvolve, or not to deconvolve}, + journal = MRM, + year = {2016}, + volume = {76}, + pages = {1282-1290}, + doi = {10.1002/mrm.26024}, + abstract = {Purpose + There is currently controversy regarding the benefits of deconvolution-based parameters in stroke imaging, with studies suggesting a similar infarct prediction using summary parameters. We investigate here the performance of deconvolution-based parameters and summary parameters for dynamic-susceptibility contrast (DSC) MRI analysis, with particular emphasis on precision. + + Methods + Numerical simulations were used to assess the contribution of noise and arterial input function (AIF) variability to measurement precision. A realistic AIF range was defined based on in vivo data from an acute stroke clinical study. The simulated tissue curves were analyzed using two popular singular value decomposition (SVD) based algorithms, as well as using summary parameters. + + Results + SVD-based deconvolution methods were found to considerably reduce the AIF-dependency, but a residual AIF bias remained on the calculated parameters. Summary parameters, in turn, show a lower sensitivity to noise. The residual AIF-dependency for deconvolution methods and the large AIF-sensitivity of summary parameters was greatly reduced when normalizing them relative to normal tissue. + + Conclusion + Consistent with recent studies suggesting high performance of summary parameters in infarct prediction, our results suggest that DSC-MRI analysis using properly normalized summary parameters may have advantages in terms of lower noise and AIF-sensitivity as compared to commonly used deconvolution methods.}, + file = {Meij15c.pdf:pdf\\Meij15c.pdf:PDF}, + optnote = {DIAG}, + pmid = {26519871}, + publisher = {Wiley Online Library}, + month = {10}, +} + +@conference{Meij16, + author = {Midas Meijs and Mathias Prokop and Frederick J.A. Meijer and Bram van Ginneken and Rashindra Manniesing}, + title = {Segmentation of small cerebral vessels on {CT} perfusion images of patients with suspected acute stroke}, + booktitle = RSNA, + year = {2016}, + abstract = {PURPOSE: Due to partial volume effects, accurate segmentation of small cerebral vessels on {CT} is a challenge. We present a novel technique that incorporates local intensity histogram information to segment the cerebral vasculature on {CT} perfusion ({CTP}) scans for suspected ischemic stroke. + + METHOD AND MATERIALS: A pattern recognition approach based on global and local image features followed by a random forest classifier is proposed. The features consist of an automatically computed brain mask denoting intracranial tissue, the first volume of the {CTP} scan, the {CTP} scan temporal average weighted according to the individual exposures to maximize signal-to-noise ratio, the weighted temporal variance ({WTV}), and local histogram features of the {WTV} calculated in a neighborhood of 9x9x9 voxels around a centered voxel. The mean, standard deviation, entropy and mode of the histogram are extracted as local feature values. In total 26 patients that underwent {CTP} for suspicion of stroke were included in this study. The {CTP} was acquired on a 320-detector row scanner. Image size was 512x512x320 voxels by 19 time points with voxel sizes of approximately 0.5 mm. Training was done on 8 patients with manually annotated data. The remaining 18 patients were used as testing set. Segmentations were visually inspected for completeness and overall quality. 3D-patches including the {M2}/{M3} segments of the middle cerebral artery were manually annotated for quantitative evaluation. The modified Hausdorff distance ({MHD}) (maximum of the median {HD}s) and the accuracy (true positive + true negative voxels divided by amount of voxels in a patch) of the segmentation were reported for the annotated patches. + + RESULTS: Overall the method was capable of segmenting the complete cerebral vasculature with inclusion of very small distal vessels. Parts of one internal carotid was missed in one patient because of clipping artefacts. In 3 patients false positive voxels were observed in the skull base region near the internal carotid artery and cavernous sinus. The {MHD} was 0.51A-A?A 1/2 0.28 mm, which is similar to the voxel spacing, and the accuracy was 0.97A-A?A 1/2 0.01. + + CONCLUSION: Our approach provides high-quality segmentation of small cerebral vessels from {CTP} data. + + CLINICAL RELEVANCE/APPLICATION: The high quality segmentation provided by our approach is an important step towards the automated localization and evaluation of vascular pathology in acute stroke patients.}, + file = {Meij16.pdf:pdf\\Meij16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Meij17, + author = {Midas Meijs and Sjoert Pegge and Mathias Prokop and Bram van Ginneken and Frederick J. A. Meijer and Rashindra Manniesing}, + title = {Detection of vessel occlusion in acute stroke is facilitated by color-coded {4D-CTA}}, + booktitle = ECR, + year = {2017}, + abstract = {PURPOSE + To perform a pilot study to explore the effect of a new post-processing technique for 4D-CTA on speed and accuracy of the detection of intracranial vessel occlusions in acute stroke. This technique color-codes the contrast arrival time in the cerebral vasculature in 4D-CTA so that abnormally delayed vascular territories are easily detected. + METHOD AND MATERIALS + We selected 10 patients without and 10 patients with a confirmed single vessel occlusion on CTA from our database of acute ischemic stroke patients, so that occlusions of the ICA, MCA, ACA and PCA of varying subtlety were included. Whole-brain CT perfusion was performed on a 320 detector-row scanner. Color-coded 4D-CTA images were obtained by centering the color scale of vessel time-to-peak (TTP) on the modus of the TTP histogram. Temporal MIP of 4D-CTA with and without color-coding were evaluated in random order for the presence of vessel occlusion by to two neuroradiologists. Time-to-detection and accuracy of detection of vessel occlusions were evaluated. + RESULTS + One false-positive vessel occlusion was rated on color-mapping by both observers. Overall, the average time-to-detection decreased from 37.0s to 19.4s (p<0.03) and the average accuracy of vessel occlusion detection increased from 0.825 to 0.85 with color-mapping. + CONCLUSION + Color-mapping of cerebral vasculature in 4D-CTA improves the speed and may improve the accuracy of the detection of vessel occlusions in acute stroke patients.}, + file = {Meij17.pdf:pdf\\Meij17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Meij17a, + author = {Midas Meijs and Ajay Patel and Sil van de Leemput and Mathias Prokop and Ewoud J van Dijk and Frank-Erik de Leeuw and Frederick J. A. Meijer and Bram van Ginneken and Rashindra Manniesing}, + title = {Robust Segmentation of the Full Cerebral Vasculature in {4D CT} Images of Suspected Stroke Patients}, + journal = NATSCIREP, + year = {2017}, + volume = {7}, + abstract = {A robust method is presented for the segmentation of the full cerebral vasculature in 4-dimensional (4D) computed tomography (CT). The method consists of candidate vessel selection, feature extraction, random forest classiffcation and postprocessing. Image features include among others the weighted temporal variance image and parameters, including entropy, of an intensity histogram in a local region at di + erent scales. These histogram parameters revealed to be a strong feature in the detection of vessels regardless of shape and size. The method was trained and tested on a large database of 264 patients with suspicion of acute ischemic stroke who underwent 4D CT in our hospital in the period January 2014 to December 2015. In this database there is a large variety of patients observed in every day clinical practice. The method was trained on 19 4D CT images of patients with manual annotations by two trained medical assistants. Five subvolumes representing different regions of the cerebral vasculature were annotated in each image in the training set. The evaluation of the method was done on 242 patients. One out of fve subvolumes was randomly annotated in 159 patients and was used for quantitative evaluation. Segmentations were inspected visually for the entire study cohort to assess failures. A total of 16 (<8%) patients showed severe under- or over-segmentation and were reported as failures. Quantitative evaluation in comparison to the reference annotation showed a Dice coeffcient of 0.91 +- 0.07 and a modiffed Hausdorff distance of 0.23 +- 0.22 mm, which is smaller than voxel spacing.}, + doi = {10.1038/s41598-017-15617-w}, + file = {Meij17a.pdf:pdf\\Meij17a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29142240}, + url = {https://www.nature.com/articles/s41598-017-15617-w}, + month = {11}, + all_ss_ids = {['6119971f513cfa3a9dce2cff139f041f11bcb404', 'fb5c4bf41fd879c9c64028a817db7f7ab6aba429']}, + gscites = {37}, +} + +@inproceedings{Meij18, + author = {Midas Meijs and Rashindra Manniesing}, + title = {Artery and Vein Segmentation of the Cerebral Vasculature in {4D CT} using a {3D} Fully Convolutional Neural Network}, + booktitle = MI, + year = {2018}, + volume = {10575}, + series = SPIE, + pages = {105751Q}, + doi = {10.1117/12.2292974}, + abstract = {Segmentation of the arteries and veins of the cerebral vasculature is important for improved visualization and for the detection of vascular related pathologies including arterio-venous malformations. We propose a three dimensional fully convolutational neural network (CNN), with Time-to-Signal images as input, extended with the distance to the center of gravity of the brain as spatial feature integrated at the abstract level of the CNN. The method is trained and validated on 6 and tested on 4 4D CT patient imaging data. The reference standard was acquired by manual annotations by an experienced observer. Quantitative evaluation shows a mean Dice similarity coefficient of 0.936 +- 0.027 and 0.973 +- 0.012, a mean absolute volume di + erence of 4.36 +- 5.47 % and 1.79 +- 2.26 % for artery and vein respectively and an overall accuracy of 0.962 +- 0.017. Average calculation time per volume on the test set was approximately one minute. Our method shows promising results and enables fast and accurate segmentation of arteries and veins in full 4D CT imaging data.}, + file = {Meij18.pdf:pdf\\Meij18.pdf:PDF}, + optnote = {DIAG, Radiology}, + month = {2}, + gsid = {11326400040196917956}, + gscites = {13}, + ss_id = {d5822d3e621307d4928b2bd20643640676b09495}, + all_ss_ids = {['d5822d3e621307d4928b2bd20643640676b09495']}, +} + +@article{Meij18a, + author = {Midas Meijs and Frank-Erik de Leeuw and Hieronymus D. Boogaarts and Rashindra Manniesing and Frederick J. A. Meijer}, + title = {Circle of Willis collateral flow in carotid artery occlusion is depicted by {4D}-{CTA}}, + journal = WONEU, + year = {2018}, + volume = {114}, + pages = {421-426}, + doi = {10.1016/j.wneu.2018.02.189}, + abstract = {Background + In case of carotid artery occlusion, the risk and extent of ischemic cerebral damage is highly dependent on the pathways of collateral flow, including the anatomy of the circle of Willis. + In this report, cases are presented to illustrate that {4D-CTA} can be considered as a noninvasive alternative to DSA for the evaluation of circle of Willis collateral flow. + Case Description + Five patients with unilateral internal carotid artery ({ICA}) occlusion underwent {4D-CTA} for the evaluation of intracranial hemodynamics. Next to a visual evaluation of {4D-CTA}, temporal information was visualized using a normalized color scale on the cerebral vasculature, which enabled quantification of the contrast bolus arrival time. In these patients, {4D-CTA} demonstrated dominant {MCA} blood supply on the side of {ICA} occlusion originating either from the contralateral {ICA} or from the posterior circulation via the communicating arteries. + Conclusions + Temporal dynamics of collateral flow in the circle of Willis can be depicted with {4D-CTA} in patients with a unilateral carotid artery occlusion.}, + file = {Meij18a.pdf:pdf\\Meij18a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29530689}, + month = {6}, + gsid = {7527352445172307553}, + gscites = {5}, + ss_id = {b8f20f8de8a22406d7308e6a7e2bc9508bf78210}, + all_ss_ids = {['b8f20f8de8a22406d7308e6a7e2bc9508bf78210']}, +} + +@conference{Meij18b, + author = {Midas Meijs and Ajay Patel and Sil C. van de Leemput and Bram van Ginneken and Mathias Prokop and Rashindra Manniesing}, + title = {Fast, Robust and Accurate Segmentation of the Complete Cerebral Vasculature in 4{D-CTA} using Deep Learning}, + booktitle = RSNA, + year = {2018}, + abstract = {PURPOSE: Segmentation of the complete cerebral vasculature in {4D-CTA} is important for improved visualization, automated pathology detection and assessment of the collateral flow. We present a deep learning approach to segment the complete cerebral vasculature in {4D-CTA} of patients with suspected stroke. + MATERIALS AND METHODS: In total 162 patients that underwent {4D-CTA} for suspicion of stroke were included in this study. The scans were acquired on a 320-detector row scanner (Canon Medical Systems Corporation, Japan). Image size was 512x512x320 voxels by 19 time points with isotropic voxel sizes of approximately 0.5 mm. A 3D fully convolutional neural network ({CNN}), U-Net, was proposed with integration of a spatial feature in the final convolutional layer of the network. The weighted temporal average and variance were derived from the 4D-CTA and used as input for the network. As spatial feature the Euclidean distance from the center of the brain to the skull was used. Training was done on 19 patients with manually annotated data. The remaining 143 patients were used as testing set. Segmentations were visually inspected for completeness and overall quality. Two observers manually annotated three dimensional sub-volumes throughout the brain to include different sized vessels for quantitative evaluation. The Dice similarity coefficient ({DSC}) and Mean Contour Distance ({MCD}) of the segmentations were reported. + RESULTS + Overall the method was capable of segmenting the complete cerebral vasculature. Smaller distal vessels (e.g. M3) showed similar segmentation results as the larger vessels (e.g. internal carotid artery). The {DSC} was 0.91+-0.08 and the {MCD} was 0.26+-0.24 mm which is below voxel spacing. Computation time was less than 90 seconds for processing a full {4D-CTA} data set. + CONCLUSION: + A 3D U-Net with spatial features provides fast, robust and accurate segmentations of the full cerebral vasculature in {4D-CTA}. + Clinical Relevance + The high quality segmentation provided by our method is an important step towards the automated localization and evaluation of vascular pathology in acute stroke patients.}, + file = {Meij18b.pdf:pdf\\Meij18b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Meij18c, + author = {Frederick J.A. Meijer and Peter W.A. Willems and Midas Meijs and Rashindra Manniesing}, + title = {Color-mapping visualization of {4D-CTA} in neurovascular disease}, + booktitle = ESNR, + year = {2018}, + abstract = {{PURPOSE} + Nowadays {4D-CTA} is available as a non-invasive alternative to conventional angiography, with reported high diagnostic accuracy in the evaluation of different neurovascular disorders, including arteriovenous shunts and collateral flow pathways. Optimized processing of {4D-CTA} is crucial, considering the large amount of data generated. Enhanced visualization of {4D-CTA} can be achieved by applying color-mapping of temporal information in the cerebral vasculature. + {METHOD AND MATERIALS} + Color-mapping processing of {4D-CTA} is achieved in two steps. First, the full cerebral vasculature is segmented by features extraction and random forest classification. Second, the color-scale is adjusted using the histogram using the histogram of the arrival times of the segmented vessels. Early contrast bolus arrival(e.g. healthy internal carotid artery) is labeled red, intermediate arrival yellow, and delayed contrast arrival is labeled blue. + Color-mapping of {4D-CTA} was applied in patients suspected of cranial arteriovenous shunts, and in patients with unilateral carotid artery occlusion for the evaluation of circle of Willis collateral flow. The patients were scanned on a wide-row 320 slice detector {CT} (Toshiba Aquilion {ONE}), enabling whole-head coverage at high temporal resolution. + {RESULTS} + Arterialization of venous vascular structures is the hallmark of arterio-venous shunts, which is easily and accurately identified on color-mapping of {4D-CTA}. Temporal dynamics of collateral flow in the circle of Willis is adequately depicted with {4D-CTA} in patients with unilateral carotid artery occlusion. + {CONCLUSION} + Color-mapping of {4D-CTA} accurately displays temporal information of the cerebral vasculature, which can facilitate the detection of arterio-venous shunts and the evaluation of collateral flow in intracranial steno-occlusive disease.}, + file = {Meij18c.pdf:pdf\\Meij18c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Meij19a, + author = {Midas Meijs and Sjoert A.H. Pegge and Kazuhiro Murayama and Hieronymus D. Boogaarts and Mathias Prokop and Peter W.A. Willems and Rashindra Manniesing and Frederick J.A. Meijer}, + title = {Color mapping of {4D-CTA} for the detection of cranial arteriovenous shunts}, + doi = {10.3174/ajnr.A6156}, + number = {9}, + pages = {1498--1504}, + volume = {40}, + abstract = {Four-dimensional CT angiography ({4D-CTA}) is increasingly used in clinical practice for the assessment of different neurovascular disorders. Optimized processing of {4D-CTA} is crucial for diagnostic interpretation because of the large amount of data that is generated. A color-mapping method of {4D-CTA} is presented for improved and enhanced visualization of the cerebral vasculature hemodynamics. This method was applied to detect cranial AVFs. All patients who underwent both {4D-CTA} and {DSA} in our hospital in the period 2011-2018 for the clinical suspicion of a cranial {AVF} or caroticocavernous fistula ({CCF}) were retrospectively collected. Temporal information in the cerebral vasculature was visualized using a patient-specific color-scale. All color-maps were evaluated by three observers for the presence or absence of an {AVF} or {CCF}. The presence or absence of cortical venous reflux was evaluated as a secondary outcome measure. In total 31 patients were included, 21 patients with and 10 without an {AVF}. Arterialization of venous structures in {AVF}s was accurately visualized using color-mapping. There was high sensitivity (86-100%) and moderate-to-high specificity (70-100%) for the detection of AVFs on color-mapping {4D-CTA}, even without the availability of dynamic subtraction rendering. The diagnostic performance of the three observers in the detection of cortical venous reflux was variable (sensitivity 43-88%, specificity 60-80%). Arterialization of venous structures can be visualized using color mapping of {4D-CTA} and proves to be accurate for the detection of cranial {AVF}s. This makes color-mapping a promising visualization technique for assessing temporal hemodynamics in {4D-CTA}.}, + file = {Meij19a.pdf:pdf\\Meij19a.pdf:PDF}, + journal = AJNR, + optnote = {DIAG}, + pmid = {31395664}, + publisher = {Am Soc Neuroradiology}, + year = {2019}, + month = {8}, + ss_id = {42f7d5d9b1df931226dcdbf354a07e357cdbc186}, + all_ss_ids = {['42f7d5d9b1df931226dcdbf354a07e357cdbc186']}, + gscites = {4}, +} + +@phdthesis{Meij20, + author = {Midas Meijs}, + title = {Automated Image Analysis and Machine Learning to Detect Cerebral Vascular Pathology in 4D-CTA}, + url = {https://repository.ubn.ru.nl/handle/2066/216524}, + abstract = {Scanners used in the hospital are becoming more advanced and in a matter of seconds complete 4-dimensional details images can be acquired from the brain and its vessels. In serious disorders, like an ischemic stroke the vessels in the brain are blocked or affected. A radiologist examines these complex scans looking for the disorder of blocked vessel. The goal of my research is to facilitate the assessment of these images and help doctor read these scans. Using image analysis methods, I have developed a technique that visualizes the flow in the blood vessels using a color-map. Through this, the abnormalities or blocked blood vessels become better visible. On top of this, I have developed a method using artificial intelligence that can automatically detect patients with an ischemic stroke, based on the 4-dimensional images. In the future, these and other algorithms can aid, and possibly take over the job of the radiologist, of examine these images.}, + copromotor = {R. Manniesing and F.J.A. Meijer}, + file = {Meij20.pdf:pdf\\Meij20.pdf:PDF}, + optnote = {DIAG}, + promotor = {B. van Ginneken and M. Prokop}, + school = {Radboud University, Nijmegen}, + year = {2020}, + journal = {PhD thesis}, +} + +@article{Meij20a, + author = {Midas Meijs and Meijer, F. J. A. and Mathias Prokop and van Ginneken, B. and Rashindra Manniesing}, + title = {Image-level detection of arterial occlusions in {4D-CTA} of acute stroke patients using deep learning}, + abstract = {The triage of acute stroke patients is increasingly dependent on four-dimensional CTA (4D-CTA) imaging. In this work, we present a convolutional neural network (CNN) for image-level detection of intracranial anterior circulation artery occlusions in 4D-CTA. The method uses a normalized 3D time-to-signal (TTS) representation of the input image, which is sensitive to differences in the global arrival times caused by the potential presence of vascular pathologies. The TTS map presents the time within the cranial cavity at which the signal reaches a percentage of the maximum signal intensity, corrected for the baseline intensity. The method was trained and validated on (n=214) patient images and tested on an independent set of (n=279) patient images. This test set included all consecutive suspected-stroke patients admitted to our hospital in 2018. The accuracy, sensitivity, and specificity were 92\%, 95\%, and 92\%. The area under the receiver operating characteristics curve was 0.98 (95% CI: 0.95- 0.99). These results show the feasibility of automated stroke triage in 4D-CTA.}, + file = {Meij20a.pdf:pdf\\Meij20a.pdf:PDF}, + doi = {10.1016/j.media.2020.101810}, + pmid = {32920477}, + journal = MIA, + optnote = {DIAG}, + volume = {66}, + pages = {101810}, + year = {2020}, + ss_id = {3db60e34dad56e3949508d8dc1324d5331f9a2c6}, + all_ss_ids = {['3db60e34dad56e3949508d8dc1324d5331f9a2c6']}, + gscites = {14}, +} + +@article{Meij20b, + author = {Midas Meijs and Sjoert A. H. Pegge and Maria H. E. Vos and Ajay Patel and Sil C. van de Leemput and Kevin Koschmieder and Mathias Prokop and Frederick J. A. Meijer and Rashindra Manniesing}, + title = {Cerebral Artery and Vein Segmentation in Fourdimensional CT Angiography Using Convolutional Neural Networks}, + doi = {10.1148/ryai.2020190178}, + abstract = {Purpose: To implement and test a deep learning approach for the segmentation of the arterial and venous cerebral vasculature with four-dimensional (4D) CT angiography. Materials and Methods: Patients who had undergone 4D CT angiography for the suspicion of acute ischemic stroke were retrospectively identified. A total of 390 patients evaluated in 2014 (n = 113) or 2018 (n = 277) were included in this study, with each patient having undergone one 4D CT angiographic scan. One hundred patients from 2014 were randomly selected, and the arteries and veins on their CT scans were manually annotated by five experienced observers. The weighted temporal average and weighted temporal variance from 4D CT angiography were used as input for a three-dimensional Dense-U-Net. The network was trained with the fully annotated cerebral vessel artery-vein maps from 60 patients. Forty patients were used for quantitative evaluation. The relative absolute volume difference and the Dice similarity coefficient are reported. The neural network segmentations from 277 patients who underwent scanning in 2018 were qualitatively evaluated by an experienced neuroradiologist using a five-point scale. Results: The average time for processing arterial and venous cerebral vasculature with the network was less than 90 seconds. The mean Dice similarity coefficient in the test set was 0.80 6 0.04 (standard deviation) for the arteries and 0.88 +- 0.03 for the veins. The mean relative absolute volume difference was 7.3% 6 5.7 for the arteries and 8.5% +- 4.8 for the veins. Most of the segmentations (n = 273, 99.3%) were rated as very good to perfect. Conclusion: The proposed convolutional neural network enables accurate artery and vein segmentation with 4D CT angiography with a processing time of less than 90 seconds.}, + file = {Meij20b.pdf:pdf\\Meij20b.pdf:PDF}, + journal = RAI, + optnote = {DIAG}, + volume = {2}, + number = {4}, + pages = {e190178}, + year = {2020}, + ss_id = {ffa6b97fd94b3641ddf22af707719a42ee4db9ac}, + all_ss_ids = {['ffa6b97fd94b3641ddf22af707719a42ee4db9ac']}, + gscites = {7}, +} + +@inproceedings{Mein19, + author = {Meine, Hans and Hering, Alessa}, + title = {Efficient prealignment of CT scans for registration through a bodypart regressor}, + booktitle = MIDL, + year = {2019}, + url = {https://openreview.net/forum?id=r1xYAvZXqN}, + abstract = {Convolutional neural networks have not only been applied for classification of voxels, ob-jects, or images, for instance, but have also been proposed as a bodypart regressor. We pickup this underexplored idea and evaluate its value for registration: A CNN is trained to out-put the relative height within the human body in axial CT scans, and the resulting scoresare used for quick alignment between different timepoints. Preliminary results confirm thatthis allows both fast and robust prealignment compared with iterative approaches.}, + file = {Mein19.pdf:pdf\\Mein19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {f5d48773035e68452f06310f5db78cb11299dc0a}, + all_ss_ids = {['f5d48773035e68452f06310f5db78cb11299dc0a']}, + gscites = {2}, +} + +@article{Melb11, + author = {Melbourne, A. and Hipwell, J. and Modat, M. and Mertzanidou, T. and Huisman, H. and Ourselin, S. and Hawkes, D. J.}, + title = {The effect of motion correction on pharmacokinetic parameter estimation in dynamic-contrast-enhanced {MRI}}, + journal = PMB, + year = {2011}, + volume = {56}, + pages = {7693--7708}, + doi = {10.1088/0031-9155/56/24/001}, + abstract = {A dynamic-contrast-enhanced magnetic resonance imaging (DCE-MRI) dataset consists of many imaging frames, often acquired both before and after contrast injection. Due to the length of time spent acquiring images, patient motion is likely and image re-alignment or registration is required before further analysis such as pharmacokinetic model fitting. Non-rigid image registration procedures may be used to correct motion artefacts; however, a careful choice of registration strategy is required to reduce misregistration artefacts associated with enhancing features. This work investigates the effect of registration on the results of model-fitting algorithms for 52 DCE-MR mammography cases for 14 patients. Results are divided into two sections: a comparison of registration strategies in which a DCE-MRI-specific algorithm is preferred in 50\% of cases, followed by an investigation of parameter changes with known applied deformations, inspecting the effect of magnitude and timing of motion artefacts. Increased motion magnitude correlates with increased model-fit residual and is seen to have a strong influence on the visibility of strongly enhancing features. Motion artefacts in images close to the contrast agent arrival have a disproportionate effect on discrepancies in parameter estimation. The choice of algorithm, magnitude of motion and timing of the motion are each shown to influence estimated pharmacokinetic parameters even when motion magnitude is small.}, + file = {Melb11:pdf/Melb11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {24}, + pmid = {22086390}, + month = {11}, + gsid = {7633438660557008463}, + gscites = {35}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/96425}, +} + +@inproceedings{Mele12, + author = {J. Melendez and C. I. S\'{a}nchez and R. Hupse and B. van Ginneken and N. Karssemeijer}, + title = {Potential of a Standalone Computer-Aided Detection System for Breast Cancer Detection in Screening Mammography}, + booktitle = {IWDM '12: Proceedings of the 11th International Workshop on Breast Imaging}, + year = {2012}, + volume = {7361}, + series = LNCS, + pages = {682--689}, + doi = {10.1007/978-3-642-31271-7_88}, + abstract = {Current computer-aided detection (CAD) systems for mammography screening work as prompting devices that aim at drawing radiologistsA-A?A 1/2 attention to suspicious regions. In this paper, we investigate utilizing a CAD system based on a support vector machine classifier as a standalone tool for recalling additional abnormal cases missed at screening, while keeping the associated recall rate at low levels. We tested the system on a large database of 5800 cases containing abnormal instances (1%) corresponding to prior examinations missed at screening. The results showed that 26% of the missed cases could be detected with a low additional recall rate of 2%. Moreover, after extrapolating this result to a screening program, we determined that, with our system, 0.73 additional cancers per 20 additional recalls could be potentially detected. We also compared the proposed system with a regular CAD system intended for non-standalone operation. The performance of the proposed system was significantly better.}, + file = {Mele12.pdf:pdf\\Mele12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {11683397277520872641}, + gscites = {1}, + ss_id = {ed8e8be2e3c9e0520090e9b2065377d9623f01f2}, + all_ss_ids = {['ed8e8be2e3c9e0520090e9b2065377d9623f01f2']}, +} + +@conference{Mele12a, + author = {J. Melendez and C. I. S\'{a}nchez and B. van Ginneken and N. Karssemeijer}, + title = {Detection of breast carcinomas potentially missed during screening by means of a standalone {CAD} system}, + booktitle = RSNA, + year = {2012}, + abstract = {PURPOSE: To develop and evaluate a standalone computer-aided detection (CAD) system for the identification of suspicious non-referred cases. These cases could be submitted to a third reader for reconsideration. METHOD AND MATERIALS: We investigated a CAD system for mass detection consisting of three stages: pre-processing, initial detection and a final classification. In the pre-processing stage, the input image is segmented and enhanced. Next, suspicious regions are detected by an ensemble of five neural networks provided with five features based on spiculation and gradient measures. In the final stage, these candidate regions are assigned a malignancy score based on a richer set of 73 features that, in addition, measure contrast, location, linear texture, etc. For this final stage three different classifiers were evaluated: linear discriminant analysis (LDA), gentleboost with regression stumps and support vector machines (SVMs). The classifier parameters were optimized at a very low recall rate (2%), in contrast to previously proposed CAD systems, because we are interested in such a high specificity in order to minimize the reviewing effort for a possible third reader. The data for this study consisted of a set of 28,811 scanned film mammograms collected from different sites in the Netherlands. Of these, 740 images correspond to 266 malignant cases with visible masses and architectural distortions that were not detected until the next screening round (two years later). The remaining images correspond to 9,127 normal cases with no sign of pathology. A five fold cross-validation scheme was used to evaluate the system. Performance was assessed by mean case sensitivity obtained at 2% recall rate. Comparisons were made using bootstrap analysis. RESULTS: At a 2% additional recall rate, mean sensitivities achieved by LDA, gentleboost and SVM were 4.0%, 9.8%, and 10.5%, respectively. SVM and gentleboost performed significantly better than LDA. Using the best classifier, the CAD system was able to detect 28 (10.5%) previously missed cases for reconsideration. CONCLUSION: Automatic selection of non-referred suspicious cases for reconsideration using a standalone CAD system is feasible at low additional recall rates. CLINICAL RELEVANCE/APPLICATION:Using a CAD system that autonomously detects potentially missed cancers could substantially improve screening efficacy with only a small additional reading effort.}, + optnote = {DIAG}, +} + +@inproceedings{Mele14, + author = {J. Melendez and C. I. S\'{a}nchez and R. H. H. M. Philipsen and P. Maduskar and B. van Ginneken}, + title = {Multiple-instance learning for computer-aided detection of tuberculosis}, + booktitle = MI, + year = {2014}, + volume = {9035}, + series = SPIE, + pages = {90351J}, + doi = {10.1117/12.2043018}, + abstract = {Detection of tuberculosis (TB) on chest radiographs (CXRs) is a hard problem. Therefore, to help radiologists or even take their place when they are not available, computer-aided detection (CAD) systems are being developed. In order to reach a performance comparable to that of human experts, the pattern recognition algorithms of these systems are typically trained on large CXR databases that have been manually annotated to indicate the abnormal lung regions. However, manually outlining those regions constitutes a time-consuming process that, besides, is prone to inconsistencies and errors introduced by interobserver variability and the absence of an external reference standard. In this paper, we investigate an alternative pattern classification method, namely multiple-instance learning (MIL), that does not require such detailed information for a CAD system to be trained. We have applied this alternative approach to a CAD system aimed at detecting textural lesions associated with TB. Only the case (or image) condition (normal or abnormal) was provided in the training stage. We compared the resulting performance with those achieved by several variations of a conventional system trained with detailed annotations. A database of 917 CXRs was constructed for experimentation. It was divided into two roughly equal parts that were used as training and test sets. The area under the receiver operating characteristic curve was utilized as a performance measure. Our experiments show that, by applying the investigated MIL approach, comparable results as with the aforementioned conventional systems are obtained in most cases, without requiring condition information at the lesion level.}, + file = {Mele14.pdf:pdf\\Mele14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {7361253230908315066}, + gscites = {13}, + ss_id = {1e742e102e77881837a2c8b8ada295217a7ea4ef}, + all_ss_ids = {['1e742e102e77881837a2c8b8ada295217a7ea4ef']}, +} + +@article{Mele14a, + author = {Melendez, Jaime and van Ginneken, Bram and Maduskar, Pragnya and Philipsen, Rick H H M. and Reither, Klaus and Breuninger, Marianne and Adetifa, Ifedayo M O. and Maane, Ramatoulie and Ayles, Helen and S\'{a}nchez, Clara I.}, + title = {A Novel Multiple-Instance Learning-Based Approach to Computer-Aided Detection of Tuberculosis on Chest X-Rays}, + journal = TMI, + year = {2015}, + volume = {34}, + pages = {179-192}, + doi = {10.1109/TMI.2014.2350539}, + abstract = {In order to reach performance levels comparable to those of human experts, computer-aided detection (CAD) systems are typically optimized by means of a supervised learning approach that relies on large training databases comprising manually annotated lesions. However, manually outlining those lesions constitutes a difficult and time-consuming process that renders detailedly annotated data often difficult to obtain. In this paper, we investigate an alternative pattern classification approach, namely multiple-instance learning (MIL), that does not require such detailed information for a CAD system to be optimized. We have applied MIL to a CAD system aimed at detecting textural lesions associated with tuberculosis. Only the case (or image) condition (normal or abnormal), which was determined by radiological means, was required during training. Based upon the well-known miSVM technique, we propose a novel algorithm, specifically designed for our CAD application, that overcomes serious drawbacks of the former related to underestimation of the positive instances and costly iteration. The key of the proposed method is to use probability estimates instead of decision values to guide the MIL procedure. In addition, we include countermeasures that deal with the uncertainty resulting from instance relabeling. To show the advantages of our MIL-based approach as compared with a traditional supervised one, experiments with three different image databases were conducted. The area under the receiver operating characteristic curve was utilized as a performance measure. With the first database, for which training lesion annotations were available, the supervised system was not much better than our MILbased method (0:88 vs. 0:86). Thus, the proposed approach achieved highly competitive results without resorting to lesionlevel information and the associated annotation process. When evaluating the remaining databases, given their large difference with respect to the previous image set, the most appealing strategy to maintain good performance was to retrain the CAD systems considering the new data. However, since only the image condition was available in this case, only the MIL-based system could be retrained. This scenario, which is common in realworld applications, clearly demonstrates the better adaptation capabilities of the proposed approach. After retraining, our MILbased system significantly outperformed the supervised one (0:86 vs. 0:79 and 0:91 vs. 0:85, p < 0:0001 and p = 0:0002, respectively).}, + file = {Mele14a.pdf:pdf\\Mele14a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {25163057}, + month = {1}, + gsid = {4430711056723080400}, + gscites = {102}, + ss_id = {32eab666e2a48a743f3d278ed9974582e513553d}, + all_ss_ids = {['32eab666e2a48a743f3d278ed9974582e513553d']}, +} + +@article{Mele14b, + author = {Melendez, Jaime and S\'{a}nchez, Clara I. and van Ginneken, Bram and Karssemeijer, Nico}, + title = {Improving mass candidate detection in mammograms via feature maxima propagation and local feature selection}, + journal = MP, + year = {2014}, + volume = {41}, + pages = {081904}, + doi = {10.1118/1.4885995}, + abstract = {Mass candidate detection is a crucial component of multistep computer-aided detection (CAD) systems. It is usually performed by combining several local features by means of a classifier. When these features are processed on a per-image-location basis (e.g., for each pixel), mismatching problems may arise while constructing feature vectors for classification, which is especially true when the behavior expected from the evaluated features is a peaked response due to the presence of a mass. In this study, two of these problems, consisting of maxima misalignment and differences of maxima spread, are identified and two solutions are proposed.The first proposed method, feature maxima propagation, reproduces feature maxima through their neighboring locations. The second method, local feature selection, combines different subsets of features for different feature vectors associated with image locations. Both methods are applied independently and together.The proposed methods are included in a mammogram-based CAD system intended for mass detection in screening. Experiments are carried out with a database of 382 digital cases. Sensitivity is assessed at two sets of operating points. The first one is the interval of 3.5-15 false positives per image (FPs/image), which is typical for mass candidate detection. The second one is 1 FP/image, which allows to estimate the quality of the mass candidate detector's output for use in subsequent steps of the CAD system. The best results are obtained when the proposed methods are applied together. In that case, the mean sensitivity in the interval of 3.5-15 FPs/image significantly increases from 0.926 to 0.958 (p < 0.0002). At the lower rate of 1 FP/image, the mean sensitivity improves from 0.628 to 0.734 (p < 0.0002).Given the improved detection performance, the authors believe that the strategies proposed in this paper can render mass candidate detection approaches based on image location classification more robust to feature discrepancies and prove advantageous not only at the candidate detection level, but also at subsequent steps of a CAD system.}, + file = {Mele14b.pdf:pdf\\Mele14b.pdf:PDF}, + optnote = {DIAG}, + number = {8}, + pmid = {25086535}, + month = {7}, + gsid = {9401041546322028279}, + gscites = {7}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/132831}, + ss_id = {98b95b0af8ba3c25a87ad5755519164fcfeee2d0}, + all_ss_ids = {['98b95b0af8ba3c25a87ad5755519164fcfeee2d0']}, +} + +@phdthesis{Mele15, + author = {Jaime Melendez}, + title = {Improving computer-aided detection systems through advanced pattern recognition techniques}, + year = {2015}, + url = {http://hdl.handle.net/2066/142761}, + abstract = {In this thesis, several patern recognition methods aimed at further developing the capabilities of CAD ar investigated. To evaluate these methods, CAD systems applied to breast cancer screening and tuberculosis detection are explored.}, + copromotor = {C. I. S\'{a}nchez}, + file = {Mele15.pdf:pdf/Mele15.pdf:PDF}, + optnote = {DIAG}, + promotor = {N. Karssemeijer and B. van Ginneken}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Mele16, + author = {Melendez, Jaime and {van Ginneken}, Bram and Maduskar, Pragnya and Philipsen, Rick and Ayles, Helen and S\'{a}nchez, Clara I.}, + title = {On Combining Multiple-Instance Learning and Active Learning for Computer-Aided Detection of Tuberculosis}, + journal = TMI, + year = {2016}, + volume = {35}, + number = {4}, + month = {4}, + pages = {1013--1024}, + doi = {10.1109/TMI.2015.2505672}, + url = {http://dx.doi.org/10.1109/TMI.2015.2505672}, + abstract = {The major advantage of multiple-instance learning (MIL) applied to a computer-aided detection (CAD) system is that it allows optimizing the latter with case-level labels instead of accurate lesion outlines as traditionally required for a supervised approach. As shown in previous work, a MIL-based CAD system can perform comparably to its supervised counterpart considering complex tasks such as chest radiograph scoring in tuberculosis (TB) detection. However, despite this remarkable achievement, the uncertainty inherent to MIL can lead to a less satisfactory outcome if analysis at lower levels (e.g., regions or pixels) is needed. This issue may seriously compromise the applicability of MIL to tasks related to quantification or grading, or detection of highly localized lesions. In this paper, we propose to reduce uncertainty by embedding a MIL classifier within an active learning (AL) framework. To minimize the labeling effort, we develop a novel instance selection mechanism that exploits the MIL problem definition through one-class classification. We adapt this mechanism to provide meaningful regions instead of individual instances for expert labeling, which is a more appropriate strategy given the application domain. In addition, and contrary to usual AL methods, a single iteration is performed. To show the effectiveness of our approach, we compare the output of a MIL-based CAD system trained with and without the proposed AL framework. The task is to detect textural abnormalities related to TB. Both quantitative and qualitative evaluations at the pixel level are carried out. In the former case, receiver operating characteristic and precision-recall analyses are used. For completeness, we also compare with a CAD system following a supervised approach and related baselines. The quantitative results show that the proposed method significantly improves the MIL-based classification and narrows the gap with the supervised approach in \%50 and \%75, depending on if 100 or 300 labeled regions are processed, but with only \%5 and \%16 of the labeling effort. In qualitative terms, a substantial improvement in lesion localization and false-positive detections is observed. The proposed method also outperforms the aforementioned baselines.}, + file = {Mele16.pdf:pdf\\Mele16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26660889}, + gsid = {14270097264018368898}, + gscites = {46}, + ss_id = {b0a13599be9ea703f27a90bc544511ed8c93b2b5}, + all_ss_ids = {['b0a13599be9ea703f27a90bc544511ed8c93b2b5']}, +} + +@article{Mele16a, + author = {Melendez, Jaime and S\'{a}nchez, Clara I. and Philipsen, Rick H. H. M. and Maduskar, Pragnya and Dawson, Rodney and Theron, Grant and Dheda, Keertan and {van Ginneken}, Bram}, + title = {An automated tuberculosis screening strategy combining X-ray-based computer-aided detection and clinical information}, + journal = NATSCIREP, + year = {2016}, + volume = {6}, + pages = {25265}, + doi = {10.1038/srep25265}, + url = {http://dx.doi.org/10.1038/srep25265}, + abstract = {Lack of human resources and radiological interpretation expertise impair tuberculosis (TB) screening programmes in TB-endemic countries. Computer-aided detection (CAD) constitutes a viable alternative for chest radiograph (CXR) reading. However, no automated techniques that exploit the additional clinical information typically available during screening exist. To address this issue and optimally exploit this information, a machine learning-based combination framework is introduced. We have evaluated this framework on a database containing 392 patient records from suspected TB subjects prospectively recruited in Cape Town, South Africa. Each record comprised a CAD score, automatically computed from a CXR, and 12 clinical features. Comparisons with strategies relying on either CAD scores or clinical information alone were performed. Our results indicate that the combination framework outperforms the individual strategies in terms of the area under the receiving operating characteristic curve (0.84 versus 0.78 and 0.72), specificity at 95\% sensitivity (49\% versus 24\% and 31\%) and negative predictive value (98\% versus 95\% and 96\%). Thus, it is believed that combining CAD and clinical information to estimate the risk of active disease is a promising tool for TB screening.}, + file = {Mele16a.pdf:pdf\\Mele16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27126741}, + month = {4}, + gsid = {1152274775860878952}, + gscites = {103}, + ss_id = {3b198ccfa068ba133c3c1eae1c8beac1d2ce88e8}, + all_ss_ids = {['3b198ccfa068ba133c3c1eae1c8beac1d2ce88e8']}, +} + +@article{Mele17, + author = {Melendez, J and Philipsen, R H H M and Chanda-Kapata, P and Sunkutu, V and Kapata, N and van Ginneken, B}, + title = {Automatic versus human reading of chest X-rays in the Zambia National Tuberculosis Prevalence Survey}, + journal = IJTLD, + year = {2017}, + volume = {21}, + issue = {8}, + month = {8}, + pages = {880--886}, + doi = {10.5588/ijtld.16.0851}, + abstract = {Tuberculosis (TB) prevalence survey in Zambia between 2013 and 2014. To compare the performance of automatic software (CAD4TB 5) in chest X-ray (CXR) reading with that of field (general practitioners) and central (radiologists) readers. A retrospective study comparing the performance of human and automatic reading was conducted. Two scenarios for central reading were evaluated: abnormalities not consistent with TB were considered to be 'normal' or 'abnormal'. Sputum culture was defined as the reference standard. Measures derived from receiver operating characteristic analysis were used to assess readers' performances. Of 46 099 participants, 23 838 cases included all survey information; of these, 106 cases were culture-confirmed TB-positive. The performance of CAD4TB 5 was similar to that of field and central readers. Although there were significant differences in specificity when compared with field readings (P = 0.002) and central readings considering the first scenario (P < 0.001), these differences were not substantial (93.2% vs. 92.6% and 98.4% vs. 99.6%, respectively).CONCLUSIONp: The performance of automatic CXR readings is comparable with that of human experts in a TB prevalence survey setting using culture as reference.}, + file = {Mele17.pdf:pdf\\Mele17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28786796}, + gsid = {14209453799880160478}, + gscites = {20}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/181866}, + ss_id = {8125ba843b6e5c20524c9b549398a368e0613d88}, + all_ss_ids = {['8125ba843b6e5c20524c9b549398a368e0613d88']}, +} + +@article{Mele18, + author = {Melendez, J. and Hogeweg, L. and S\'{a}nchez, C. I. and Philipsen, R. H. H. M. and Aldridge, R. W. and Hayward, A.C. and Abubakar, I. and van Ginneken, B. and Story, A.}, + title = {Accuracy of an automated system for tuberculosis detection on chest radiographs in high-risk screening}, + journal = IJTLD, + year = {2018}, + volume = {22}, + number = {5}, + pages = {567-571}, + doi = {10.5588/ijtld.17.0492}, + abstract = {With 10.4 million new cases and 1.8 million deaths in 2015, tuberculosis (TB) remains a major health concern. Prevalence is highest in Africa and overall incidence in Asia.1 Although TB incidence in the West has decreased, increases in TB rates have been reported in high-risk populations, especially in urban settings. Despite efforts to develop new TB diagnostics, screening is still commonly performed using chest radiography, followed by sputum culture, Xpert (Cepheid, Sunnyvale, CA, USA) testing or smear microscopy. Early studies reported limited specificity and variable levels of inter- and intra-reader agreement in interpreting chest radiographs (CXRs) for TB detection. However, modern digital radiography provides a quick and reliable technique with low marginal and operational costs, and its use, together with standardised scoring, may improve performance and reader agreement.}, + file = {Mele18.pdf:pdf\\Mele18.pdf:PDF}, + optnote = {DIAG}, + pmid = {29663963}, + month = {5}, + gsid = {3793586673643236456}, + gscites = {21}, + ss_id = {0831d694fa14d96916f62226c6c74d862f90772c}, + all_ss_ids = {['0831d694fa14d96916f62226c6c74d862f90772c']}, +} + +@article{Mema05, + author = {Memarsadeghi, Mazda and Heinz-Peer, Gertraud and Helbich, Thomas H. and Schaefer-Prokop, Cornelia and Kramer, Gero and Scharitzer, Martina and Prokop, Mathias}, + title = {Unenhanced multi-detector row {CT} in patients suspected of having urinary stone disease: effect of section width on diagnosis}, + journal = Radiology, + year = {2005}, + volume = {235}, + pages = {530--536}, + doi = {10.1148/radiol.2352040448}, + abstract = {To assess prospectively the effect of section width in multi-detector row computed tomographic (CT) evaluation of patients with acute flank pain who are suspected of having or known to have urinary stone disease.This study was approved by the ethics committee of the authors' university, and written informed consent was obtained from all patients. One hundred forty-seven patients (age range, 11-101 years; mean, 51.4 years +/- 18.7 [standard deviation]) suspected of having urinary stone disease underwent unenhanced multi-detector row CT. CT was performed with four detector rows, a section thickness of 1.0 mm, an effective tube current-time product of 100 mAs, and a tube voltage of 120 kVp (CT dose index, 11.4 mGy). From these data, three sets of transverse images were reconstructed with section widths of 1.5, 3.0, and 5.0 mm and approximately 50\% of overlap each. Scans were evaluated in varying random orders by two radiologists for the number, size, and location of uroliths and nephroliths and for the presence of phleboliths, renal cysts, and secondary signs of obstruction. The significance of differences between the numbers of detected calcifications and the numbers of associated abnormalities on the scans obtained with varying section widths was tested with the McNemar test at a P level of less than .05. Spearman rho rank correlation coefficients were calculated to assess the correlation between the presence of uroliths and the presence of secondary signs.Uroliths were found in 72 of 147 (49.0\%) patients, and nephroliths were found in 16 patients (10.9\%). There was no significant difference between section widths of 1.5 and 3.0 mm with regard to the number of detected stones (264 uroliths and 61 nephroliths for both protocols). Transverse sections 5.0-mm wide revealed significantly fewer uroliths (n = 231; P < .001) and nephroliths (n = 47; P < .016). The final diagnosis was changed in four of 72 patients. All missed renal and ureteral stones measured less than 3 mm in diameter.Overlapping 3-mm sections are sufficient for the detection of urinary stone disease. Small calculi (<3 mm) may be missed on 5.0-mm-thick sections.}, + file = {Mema05.pdf:pdf\\Mema05.pdf:PDF}, + optnote = {DIAG}, + number = {2}, + pmid = {15758192}, + month = {5}, + gsid = {2394778339010199585}, + gscites = {92}, +} + +@inproceedings{Mend06, + author = {A. M. Mendrik and E. P. A. Vonken and A. M. R. Schilham and M. A. Viergever and B. van Ginneken}, + title = {Hybrid diffusion compared with existing diffusion schemes on simulated low dose {CT} scans}, + booktitle = ISBI, + year = {2006}, + pages = {1008-1011}, + doi = {10.1109/ISBI.2006.1625091}, + abstract = {{B}ecause of the growing interest in low dose computed tomography ({CT}) scanning, noise has become a major issue in {CT} imaging. {D}iffusion filtering is a well-known technique for filtering noise from images. {W}e propose a hybrid diffusion model, which combines edge-enhancing diffusion ({EED}) and coherence-enhancing diffusion ({CED}) in a continuous manner. {T}his diffusion model is compared with five existing diffusion schemes. {Q}uantitative experimental results are presented on clinical {CT} chest scans, using high dose and simulated low dose scans.}, + file = {Mend06.pdf:pdf\\Mend06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {7760805046827918819}, + gscites = {4}, + ss_id = {c0dcc63c3114c66cadb229b2f66e7a0b7ee3fbb5}, + all_ss_ids = {['c0dcc63c3114c66cadb229b2f66e7a0b7ee3fbb5']}, +} + +@conference{Mend07a, + author = {A. M. Mendrik and E. P. A Vonken and B. van Ginneken and M. Prokop}, + title = {Automatic segmentation of arteries and veins in 4{D} cerebral {CT} perfusion scans}, + booktitle = RSNA, + year = {2007}, + pages = {644}, + optnote = {4DCT, DIAG, RADIOLOGY}, +} + +@inproceedings{Mend09, + author = {A. Mendrik and E. P. A. Vonken and A. Waaijer and E. Smit and M. Prokop and B. van Ginneken}, + title = {Segmentation of arteries and veins on {4D CT} perfusion scans for constructing arteriograms and venograms}, + booktitle = MI, + year = {2009}, + volume = {7259}, + series = SPIE, + pages = {72590Z1--72590Z7}, + doi = {10.1117/12.812135}, + abstract = {3{D} {CT} {A}ngiography ({CTA}) scans are currently used to assess the cerebral arteries. {A}n additional 4{D} {CT} {P}erfusion ({CTP}) scan is often acquired to determine perfusion parameters in the cerebral parenchyma. {W}e propose a method to extract a three dimensional volume showing either the arteries (arteriogram) or the veins (venogram) from the 4{D} {CTP} scan. {T}his would allow cerebrovascular assessment using the {CTP} scan and obviate the need for acquiring an additional {CTA} scan. {P}reprocessing steps consist of registration of the time volumes of the {CTP} scan using rigid registration and masking out extracranial structures, bone and air. {N}ext a 3{D} volume is extracted containing the vessels (vascular volume) by using the absolute area under the first derivative curve in time. {T}o segment the arteries and veins we use the time to peak of the contrast enhancement curve combined with region growing within a rough vessel segmentation. {F}inally the artery/vein segmentation is used to suppress either the veins or the arteries in the vascular volume to construct the arteriogram and venogram. {T}o evaluate the method, 11 arteriograms and venograms were visually inspected by an expert observer, with special attention to the important cerebral arteries ({C}ircle of {W}illis) and veins (straight and transverse sinus). {R}esults show that the proposed method is effective in extracting the major cerebral arteries and veins from {CTP} scans.}, + file = {Mend09.pdf:pdf\\Mend09.pdf:PDF}, + optnote = {4DCT, DIAG, RADIOLOGY}, + month = {2}, + gsid = {3518168224524955974}, + gscites = {1}, + ss_id = {fc218a97ffea02d88a808f2706c966874c998ce5}, + all_ss_ids = {['fc218a97ffea02d88a808f2706c966874c998ce5']}, +} + +@article{Mend09a, + author = {A. M. Mendrik and E.-J. Vonken and A. Rutten and M. A. Viergever and B. van Ginneken}, + title = {Noise reduction in computed tomography scans using 3-d anisotropic hybrid diffusion with continuous switch}, + journal = TMI, + year = {2009}, + volume = {28}, + pages = {1585--1594}, + doi = {10.1109/TMI.2009.2022368}, + abstract = {{N}oise filtering techniques that maintain image contrast while decreasing image noise have the potential to optimize the quality of computed tomography ({CT}) images acquired at reduced radiation dose. {I}n this paper, a hybrid diffusion filter with continuous switch ({HDCS}) is introduced, which exploits the benefits of three-dimensional edge-enhancing diffusion ({EED}) and coherence-enhancing diffusion ({CED}). {N}oise is filtered, while edges, tubular structures, and small spherical structures are preserved. {F}rom ten high dose thorax {CT} scans, acquired at clinical doses, ultra low dose ( 15 m{A}s ) scans were simulated and used to evaluate and compare {HDCS} to other diffusion filters, such as regularized {P}erona-{M}alik diffusion and {EED}. {Q}uantitative results show that the {HDCS} filter outperforms the other filters in restoring the high dose {CT} scan from the corresponding simulated low dose scan. {A} qualitative evaluation was performed on filtered real low dose {CT} thorax scans. {A}n expert observer scored artifacts as well as fine structures and was asked to choose one of three scans (two filtered (blinded), one unfiltered) for three different settings (trachea, lung, and mediastinal). {O}verall, the {HDCS} filtered scan was chosen most often.}, + file = {Mend09a.pdf:pdf\\Mend09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + pmid = {19783496}, + month = {10}, + gsid = {8629946616599488980}, + gscites = {104}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/81291}, + ss_id = {5fd0e531592071d01d4286f08566a86362e26df9}, + all_ss_ids = {['5fd0e531592071d01d4286f08566a86362e26df9']}, +} + +@conference{Mend09b, + author = {A. M. Mendrik and E. P. A. Vonken and E. J. Smit and B. van Ginneken and M. Prokop}, + title = {Reduction of {V}enous {O}verprojection {U}sing {C}erebral {A}rteriograms from {CT} {P}erfusion {S}cans of {S}ubarachnoid {H}emorrhage {P}atients}, + booktitle = RSNA, + year = {2009}, + optnote = {4DCT, DIAG, RADIOLOGY}, +} + +@inproceedings{Mend10, + author = {A. Mendrik and E. P. A. Vonken and J. W. Dankbaar and M. Prokop and B. van Ginneken}, + title = {Noise filtering in thin-slice {4D} cerebral {CT} perfusion scans}, + booktitle = MI, + year = {2010}, + volume = {7623}, + series = SPIE, + pages = {76230N1-76230N8}, + doi = {10.1117/12.843813}, + abstract = {{P}atients suffering from cerebral ischemia or subarachnoid hemorrhage, undergo a 4{D} (3{D}+time) {CT} {P}erfusion ({CTP}) scan to assess the cerebral perfusion and a {CT} {A}ngiography ({CTA}) scan to assess the vasculature. {T}he aim of our research is to extract the vascular information from the {CTP} scan. {T}his requires thin-slice {CTP} scans that suffer from a substantial amount of noise. {T}herefore noise reduction is an important prerequisite for further analysis. {S}o far, the few noise filtering methods for 4{D} datasets proposed in literature deal with the temporal dimension as a 4th dimension similar to the 3 spatial dimensions, mixing temporal and spatial intensity information. {W}e propose a bilateral noise reduction method based on time-intensity profile similarity ({TIPS}), which reduces noise while preserving temporal intensity information. {TIPS} was compared to 4{D} bilateral filtering on 10 patient {CTP} scans and, even though {TIPS} bilateral filtering is much faster, it results in better vessel visibility and higher image quality ranking (observer study) than 4{D} bilateral filtering.}, + file = {Mend10.pdf:pdf\\Mend10.pdf:PDF}, + optnote = {4DCT, DIAG, RADIOLOGY}, + month = {3}, + gsid = {1760708134553287553}, + gscites = {6}, + ss_id = {824ab3050caa860dc50cbe8859a5b2249588a5e2}, + all_ss_ids = {['824ab3050caa860dc50cbe8859a5b2249588a5e2']}, +} + +@article{Mend10a, + author = {A. Mendrik and E. Vonken and B. van Ginneken and E. J. Smit and A. Waaijer and G. Bertolini and M. A. Viergever and M. Prokop}, + title = {Automatic segmentation of intracranial arteries and veins in four-dimensional cerebral {CT} perfusion scans}, + journal = MP, + year = {2010}, + volume = {37}, + pages = {2956--2966}, + doi = {10.1118/1.3397813}, + abstract = {PURPOSE: CT angiography (CTA) scans are the current standard for vascular analysis of patients with cerebrovascular diseases, such as acute stroke and subarachnoid hemorrhage. An additional CT perfusion (CTP) scan is acquired of these patients to assess the perfusion of the cerebral tissue. The aim of this study is to extend the diagnostic yield of the CTP scans to also include vascular information. METHODS: CTP scans are acquired by injecting contrast material and repeatedly scanning the head over time. Therefore, time-intensity profiles are available for each voxel in the scanned volume, resulting in a 4D dataset. These profiles can be utilized to differentiate not only between vessels and background but also between arteries and veins. In this article, a fully automatic method is proposed for the segmentation of the intracranial arteries and veins from 4D cerebral CTP scans. Furthermore, a vessel enhanced volume is presented, in which the vasculature is highlighted and background structures are suppressed. Combining this volume with the artery/vein segmentation results in an arteriogram and a venogram, which could serve as additional means for vascular analysis in patients with cerebrovascular diseases. The artery/vein segmentation is quantitatively evaluated by comparing the results to manual segmentations by two expert observers. RESULTS: Results (paired two-tailed t-test) show that the accuracies of the proposed artery/vein labeling are not significantly different from the accuracies of the expert observer manual labeling (ground truth). Moreover, sensitivity and specificity of the proposed artery/vein labeling, relative to both expert observer ground truths, were similar to the sensitivity and specificity of the expert observer labeling compared to each other. CONCLUSIONS: The proposed method for artery/vein segmentation is shown to be very accurate for arteries and veins with normal perfusion. Combining the artery/vein segmentation with the vessel enhanced volume produces an arteriogram and a venogram, which have the potential to extend the diagnostic yield of CTP scans and replace the additional CTA scan, but could also be helpful to radiologists in addition to the CTA scan.}, + file = {Mend10a.pdf:pdf\\Mend10a.pdf:PDF}, + optnote = {4DCT, DIAG, RADIOLOGY}, + number = {6}, + pmid = {20632608}, + month = {5}, + gsid = {17637183029799425094}, + gscites = {33}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/88438}, + ss_id = {375ac3921f2fe9516b01a35a37aedd05f2093836}, + all_ss_ids = {['375ac3921f2fe9516b01a35a37aedd05f2093836', '5aa81fedd2e09d24fd1ba706782a6ce24cde5483']}, +} + +@phdthesis{Mend10b, + author = {A. M. Mendrik}, + title = {Noise Reduction and Vascular Enhancement in {4D CT} Perfusion Scans}, + year = {2010}, + url = {http://igitur-archive.library.uu.nl/dissertations/2009-0923-200149/UUindex.html}, + abstract = {Computed tomography (CT) uses X-ray radiation to construct images. Applying X-ray radiation to the human body may damage the tissue and increases the risk of inducing cancer. Therefore, the radiation dose should be kept as low as reasonably achievable (ALARA). This is especially true for 4D CT perfusion (CTP) scans, since these scans consist of multiple sequential 3D CT scans over time. Therefore, not much radiation dose can be used per sequential scan. However, lowering the radiation dose increases the amount of noise in CT scans and therefore decreases image quality. This thesis describes two ways of dealing with radiation dose reduction in terms of image processing: 1) Image processing techniques can be utilized to extract as much information as possible from one CT scan, obviating the need for additional CT scans, therefore reducing radiation dose. Chapter 2 describes a method to automatically derive vascular information from 4D CTP scans and separate the arteries from the veins. The resulting CTP-derived arteriogram and venogram have the potential to replace the CT angiography (CTA) scan that is routinely acquired in addition to the CTP scan to assess the arteries. In Chapter 3 the quality of the arteriograms derived from the CTP data is evaluated by comparing them with standard CTA scans. The CTP-derived arteriograms provide improved visualization of the Circle of Willis arteries compared to standard CTA, due to effective suppression of superimposing structures such as veins, bone and subarachnoid blood. 2) Image processing techniques can be utilized to reduce the noise that is introduced by minimizing the radiation dose. In Chapter 4 an anisotropic diffusion method is described, entitled hybrid diffusion with continuous switch (HDCS), for reducing noise in 3D CT scans while preserving edges, tubular structures, and small spherical structures. Chapter 5 describes a bilateral noise reduction method, entitled time-intensity profile similarity (TIPS) bilateral filtering, for filtering noise from 4D CTP scans, while preserving the time-intensity profiles that are important for perfusion analysis. This filter improves the quality of cerebral perfusion maps derived from the CTP data, which are used to detect areas of abnormal perfusion in the brain. Chapter 6 describes an anisotropic diffusion method to reduce noise in 4D CTP scans that combines the HDCS filter proposed in Chapter 4 with the TIPS measure proposed in Chapter 5, using the 4th dimension to distinguish between structures. This filter enhances vessels, while reducing noise in 4D CTP scans and was evaluated by deriving arteriograms and venograms from the filtered CTP data. These arteriograms and venograms showed reduced background noise and improved visualization of small arteries and veins. The last chapter of this thesis describes how the methods proposed in this thesis can be used in future applications.}, + copromotor = {B. van Ginneken and E. P. A. Vonken}, + file = {Mend10b.pdf:pdf\\Mend10b.pdf:PDF}, + optnote = {4DCT, DIAG, RADIOLOGY}, + promotor = {M. A. Viergever and W. M. Prokop}, + school = {Utrecht University}, + journal = {PhD thesis}, +} + +@article{Mend11, + author = {A. M. Mendrik and E. Vonken and B. van Ginneken and H. W. de Jong and A. Riordan and T. van Seeters and E. J. Smit and M. A. Viergever and M. Prokop}, + title = {{TIPS} bilateral noise reduction in {4D CT} perfusion scans produces high-quality cerebral blood flow maps}, + journal = PMB, + year = {2011}, + volume = {56}, + pages = {3857--3872}, + doi = {10.1088/0031-9155/56/13/008}, + abstract = {Cerebral computed tomography perfusion (CTP) scans are acquired to detect areas of abnormal perfusion in patients with cerebrovascular diseases. These 4D CTP scans consist ofmultiple sequential 3D CT scans over time. Therefore, to reduce radiation exposure to the patient, the amount of x-ray radiation that can be used per sequential scan is limited, which results in a high level of noise. To detect areas of abnormal perfusion, perfusion parameters are derived from the CTP data, such as the cerebral blood flow (CBF). Algorithms to determine perfusion parameters, especially singular value decomposition, are very sensitive to noise. Therefore, noise reduction is an important preprocessing step for CTP analysis. In this paper, we propose a time-intensity profile similarity (TIPS) bilateral filter to reduce noise in 4D CTP scans, while preserving the time-intensity profiles (fourth dimension) that are essential for determining the perfusion parameters. The proposed TIPS bilateral filter is compared to standard Gaussian filtering, and 4D and 3D (applied separately to each sequential scan) bilateral filtering on both phantom and patient data. Results on the phantom data show that the TIPS bilateral filter is best able to approach the ground truth (noise-free phantom), compared to the other filtering methods (lowest root mean square error). An observer study is performed using CBF maps derived from fifteen CTP scans of acute stroke patients filtered with standard Gaussian, 3D, 4D and TIPS bilateral filtering. These CBF maps were blindly presented to two observers that indicated which map they preferred for (1) gray/white matter differentiation, (2) detectability of infarcted area and (3) overall image quality. Based on these results, the TIPS bilateral filter ranked best and its CBF maps were scored to have the best overall image quality in 100% of the cases by both observers. Furthermore, quantitative CBF and cerebral blood volume values in both the phantom and the patient data showed that the TIPS bilateral filter resulted in realistic mean values with a smaller standard deviation than the other evaluated filters and higher contrast-to-noise ratios. Therefore, applying the proposed TIPS bilateral filtering method to 4D CTP data produces higher quality CBF maps than applying the standard Gaussian, 3D bilateral or 4D bilateral filter. Furthermore, the TIPS bilateral filter is computationally faster than both the 3D and 4D bilateral filters.}, + file = {Mend11.pdf:pdf\\Mend11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {21654042}, + month = {6}, + gsid = {12609398248666249823}, + gscites = {84}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/98391}, + ss_id = {12e543454c9b14e372718e9618fa40685db83ad5}, + all_ss_ids = {['12e543454c9b14e372718e9618fa40685db83ad5']}, +} + +@article{Mend12, + author = {Mendrik, A. M. and Vonken, E P A. and de Kort, G A P. and van Ginneken, B. and Smit, E. J. and Viergever, M. A. and Prokop, M.}, + title = {Improved Arterial Visualization in Cerebral {CT} Perfusion-Derived Arteriograms Compared with Standard {CT} Angiography: A Visual Assessment Study}, + journal = AJNR, + year = {2012}, + volume = {33}, + pages = {2171-2177}, + doi = {10.3174/ajnr.A3118}, + abstract = {BACKGROUND AND PURPOSE:Invasive cerebral DSA has largely been replaced by CTA, which is noninvasive but has a compromised arterial view due to superimposed bone and veins. The purpose of this study was to evaluate whether arterial visualization in CTPa is superior to standard CTA, which would eliminate the need for an additional CTA scan to assess arterial diseases and therefore reduce radiation dose.MATERIALS AND METHODS:In this study, we included 24 patients with subarachnoid hemorrhage for whom CTA and CTP were available. Arterial quality and presence of superimposed veins and bone in CTPa were compared with CTA and scored by 2 radiologists by using a VAS (0\%-100\%). Average VAS scores were determined and VAS scores per patient were converted to a 10-point NRS. Arterial visualization was considered to be improved when the highest rate (NRS 10, VAS > 90\%) was scored for arterial quality, and the lowest rate (NRS 1, VAS < 10\%), for the presence of superimposed veins and bone. A sign test with continuity correction was used to test whether the number of cases with these rates was significant.RESULTS:Average VAS scores in the proximal area were 94\% (arterial quality), 4\% (presence of bone), and 7\% (presence of veins). In this area, the sign test showed that a significant number of cases scored NRS 10 for arterial quality (P < .02) and NRS 1 for the presence of superimposed veins and bone (P < .01).CONCLUSIONS:Cerebral CTPa shows improved arterial visualization in the proximal area compared with CTA, with similar arterial quality but no superimposed bone and veins.}, + file = {Mend12.pdf:pdf\\Mend12.pdf:PDF}, + optnote = {DIAG}, + number = {11}, + pmid = {22627803}, + month = {5}, + gsid = {12508173963296523395}, + gscites = {11}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/108266}, + ss_id = {b10df775262da5a98ec6cf85403a7bbfd26c1aeb}, + all_ss_ids = {['b10df775262da5a98ec6cf85403a7bbfd26c1aeb']}, +} + +@inproceedings{Mend15a, + author = {Mendrik, Adri{\"e}nne M and Vonken, Evert-jan and Witkamp, Theo and Prokop, Mathias and van Ginneken, Bram and Viergever, Max A}, + title = {Using the Fourth Dimension to Distinguish Between Structures for Anisotropic Diffusion Filtering in {4D CT} Perfusion Scans}, + booktitle = {Spatio-temporal Image Analysis for Longitudinal and Time-Series Image Data}, + year = {2015}, + volume = {8682}, + series = LNCS, + doi = {10.1007/978-3-319-14905-9_7}, + publisher = {Springer International Publishing}, + pages = {79--87}, + optnote = {DIAG, RADIOLOGY}, + gsid = {4332605455572363117}, + gscites = {3}, + ss_id = {5e8d92d23959661851fa64fee44597df24b828b2}, + all_ss_ids = {['5e8d92d23959661851fa64fee44597df24b828b2']}, +} + +@article{Meno23, + author = {Menotti, Laura and Silvello, Gianmaria and Atzori, Manfredo and Boytcheva, Svetla and Ciompi, Francesco and Di Nunzio, Giorgio Maria and Fraggetta, Filippo and Giachelle, Fabio and Irrera, Ornella and Marchesin, Stefano and Marini, Niccol\`{o} and M\"{u}ller, Henning and Primov, Todor}, + title = {Modelling digital health data: The ExaMode ontology for computational pathology}, + doi = {10.1016/j.jpi.2023.100332}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.jpi.2023.100332}, + file = {Meno23.pdf:pdf\Meno23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Pathology Informatics}, + citation-count = {0}, + automatic = {yes}, + pages = {100332}, + volume = {14}, + ss_id = {2ca6685fcb499bee0f054df1fdc4b3ae298be614}, + all_ss_ids = {['2ca6685fcb499bee0f054df1fdc4b3ae298be614']}, + gscites = {0}, +} + +@inproceedings{Merc19, + author = {Mercan, Caner and Balkenhol, Maschenka and van der Laak, Jeroen and Ciompi, Francesco}, + title = {From Point Annotations to Epithelial Cell Detection in Breast Cancer Histopathology using RetinaNet}, + booktitle = MIDL, + year = {2019}, + url = {https://openreview.net/forum?id=BJlvCtU45E}, + abstract = {Detection of epithelial cells has powerful implications such as being an integral part of nuclear pleomorphism scoring for breast cancer grading. We exploit the point annotations inside nuclei boundaries to estimate their bounding boxes using empirical analysis on the cell bodies and the coarse instance segmentation masks obtained from an image segmentation algorithm. Our experiments show that training a state-of-the-art object detection network with a recently proposed optimizer on simple bounding box estimations performs promising epithelial cell detection, achieving a mean average precision (mAP) score of 71.36\% on tumor and 59.65\% on benign cells in the test set.}, + file = {Merc19.pdf:pdf\\Merc19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {c3533243da19c79f7cf819315b7b9ed2e603a6c2}, + all_ss_ids = {['c3533243da19c79f7cf819315b7b9ed2e603a6c2']}, + gscites = {5}, +} + +@inproceedings{Merc20, + author = {Caner Mercan and Germonda Reijnen-Mooij and David Tellez Martin and Johannes Lotz and Nick Weiss and Marcel van Gerven and Francesco Ciompi}, + title = {Virtual staining for mitosis detection in Breast Histopathology}, + booktitle = ISBI, + year = {2020}, + pages = {1770-1774}, + doi = {10.1109/ISBI45749.2020.9098409}, + abstract = {We propose a virtual staining methodology based on Generative Adversarial Networks to map histopathology images of breast cancer tissue from H&E stain to PHH3 and vice versa. We use the resulting synthetic images to build Convolutional Neural Networks (CNN) for automatic detection of mitotic figures, a strong prognostic biomarker used in routine breast cancer diagnosis and grading. We propose several scenarios, in which CNN trained with synthetically generated histopathology images perform on par with or even better than the same baseline model trained with real images. We discuss the potential of this application to scale the number of training samples without the need for manual annotations.}, + file = {:pdf/Merc20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + ss_id = {e8abcf480f30bf5c079023c0a5bf80308dcd78ab}, + all_ss_ids = {['e8abcf480f30bf5c079023c0a5bf80308dcd78ab']}, + gscites = {22}, +} + +@conference{Merc20a, + author = {Mercan, Caner and Balkenhol, Maschenka and Laak, Jeroen van der and Ciompi, Francesco}, + title = {Grading nuclear pleomorphism in breast cancer using deep learning}, + booktitle = ECP, + year = {2020}, + abstract = {Nuclear pleomorphism is defined as the variability in size and shape of tumor cells as compared to normal epithelial cells. Objective of this study is to train a deep neural network that can achieve pathologist-level pleomorphism grading performance. We collected 29 whole slide images (WSI) of breast cancer resections in which we manually selected 90 regions of interest, ensuring grade homogeneity of tumour cells within a region. Subsequently, we cropped regions of ~0.38 mm2 at 40X magnification (0.25 um/px) and asked six pathologists to grade each region independently. We used an epithelial cell detector network to detect the epithelial cells in each region and extracted fixed-size patches from these regions with high tumor density. For the task of pleomorphism grading, we trained a densenet model on those patches with the majority voting of the grades of the pathologists (majority grades). The variation of kappa scores of the pathologists with the majority grade was very high, ranging between 0.37 and 0.69 on the standalone test set consisting of 18 regions from 7 WSI. On the same test set, our densenet model had a kappa score of 0.47 with the majority grades. We demonstrated that our network trained only on tumor cells achieved performance on the task of pleomorphism grading of breast cancer around the low mid-range of the inter-pathologist variability where the inter-observer variability of pathologists was very high. Future research will include scores of a larger panel of pathologists, and study alternative deep learning strategies to improve the performance.}, + optnote = {DIAG}, +} + +@article{Merc22, + author = {Mercan, Caner and Balkenhol, Maschenka and Salgado, Roberto and Sherman, Mark and Vielh, Philippe and Vreuls, Willem and Polonia, Antonio and Horlings, Hugo M. and Weichert, Wilko and Carter, Jodi M. and Bult, Peter and Christgen, Matthias and Denkert, Carsten and van de Vijver, Koen and Bokhorst, John-Melle and van der Laak, Jeroen and Ciompi, Francesco}, + title = {Deep learning for fully-automated nuclear pleomorphism scoring in breast cancer.}, + doi = {10.1038/s41523-022-00488-w}, + issue = {1}, + pages = {120}, + volume = {8}, + abstract = {To guide the choice of treatment, every new breast cancer is assessed for aggressiveness (i.e., graded) by an experienced histopathologist. Typically, this tumor grade consists of three components, one of which is the nuclear pleomorphism score (the extent of abnormalities in the overall appearance of tumor nuclei). The degree of nuclear pleomorphism is subjectively classified from 1 to 3, where a score of 1 most closely resembles epithelial cells of normal breast epithelium and 3 shows the greatest abnormalities. Establishing numerical criteria for grading nuclear pleomorphism is challenging, and inter-observer agreement is poor. Therefore, we studied the use of deep learning to develop fully automated nuclear pleomorphism scoring in breast cancer. The reference standard used for training the algorithm consisted of the collective knowledge of an international panel of 10 pathologists on a curated set of regions of interest covering the entire spectrum of tumor morphology in breast cancer. To fully exploit the information provided by the pathologists, a first-of-its-kind deep regression model was trained to yield a continuous scoring rather than limiting the pleomorphism scoring to the standard three-tiered system. Our approach preserves the continuum of nuclear pleomorphism without necessitating a large data set with explicit annotations of tumor nuclei. Once translated to the traditional system, our approach achieves top pathologist-level performance in multiple experiments on regions of interest and whole-slide images, compared to a panel of 10 and 4 pathologists, respectively.}, + file = {Merc22.pdf:pdf\\Merc22.pdf:PDF}, + journal = {NPJ breast cancer}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36347887}, + year = {2022}, + ss_id = {76f8e809b6ffac6eac0806d590c0c2519d75dac2}, + all_ss_ids = {['76f8e809b6ffac6eac0806d590c0c2519d75dac2']}, + gscites = {5}, +} + +@article{Mert12, + author = {Mertzanidou, Thomy and Hipwell, John and Cardoso, M Jorge and Zhang, Xiying and Tanner, Christine and Ourselin, Sebastien and Bick, Ulrich and Huisman, Henkjan and Karssemeijer, Nico and Hawkes, David}, + title = {{MRI} to X-ray mammography registration using a volume-preserving affine transformation}, + journal = MIA, + year = {2012}, + volume = {16}, + pages = {966--975}, + doi = {10.1016/j.media.2012.03.001}, + abstract = {X-ray mammography is routinely used in national screening programmes and as a clinical diagnostic tool. Magnetic Resonance Imaging (MRI) is commonly used as a complementary modality, providing functional information about the breast and a 3D image that can overcome ambiguities caused by the superimposition of fibro-glandular structures associated with X-ray imaging. Relating findings between these modalities is a challenging task however, due to the different imaging processes involved and the large deformation that the breast undergoes. In this work we present a registration method to determine spatial correspondence between pairs of MR and X-ray images of the breast, that is targeted for clinical use. We propose a generic registration framework which incorporates a volume-preserving affine transformation model and validate its performance using routinely acquired clinical data. Experiments on simulated mammograms from 8 volunteers produced a mean registration error of 3.8A-A?A 1/2 1.6mm for a mean of 12 manually identified landmarks per volume. When validated using 57 lesions identified on routine clinical CC and MLO mammograms (n=113 registration tasks) from 49 subjects the median registration error was 13.1mm. When applied to the registration of an MR image to CC and MLO mammograms of a patient with a localisation clip, the mean error was 8.9mm. The results indicate that an intensity based registration algorithm, using a relatively simple transformation model, can provide radiologists with a clinically useful tool for breast cancer diagnosis.}, + file = {Mert12.pdf:pdf\\Mert12.pdf:PDF}, + optnote = {DIAG}, + number = {5}, + pmid = {22513136}, + month = {7}, + gsid = {877286232400307497}, + gscites = {28}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/108597}, + ss_id = {a04e943e648173c7944c554bc587960765feb114}, + all_ss_ids = {['a04e943e648173c7944c554bc587960765feb114']}, +} + +@inproceedings{Mert16, + author = {Mertzanidou, T. and Hipwell, J.H. and Reis, S. and Bejnordi, B. Ehteshami and Hermsen, M. and Dalmis, M. and Vreemann, S. and Platel, B. and van der Laak, J. and Karssemeijer, N. and Mann, R. and Bult, P. and Hawkes, D.J.}, + title = {Whole Mastectomy Volume Reconstruction from 2D Radiographs and Its Mapping to Histology}, + booktitle = {Breast Imaging}, + year = {2016}, + volume = {9699}, + series = {Lecture Notes in Computer Science}, + publisher = {Springer International Publishing Switzerland}, + pages = {367-374}, + doi = {10.1007/978-3-319-41546-8_46}, + abstract = {Abstract. Women that are diagnosed with breast cancer often undergo surgery to remove either the tumour and some of the surrounding tissue (lumpectomy) or the whole breast (mastectomy). After surgery, the excised tissue is sliced at the pathology department, where specimen radiographs of the slices are typically acquired. Representative parts of the tissue are then sampled for further processing, staining and examination under the microscope. The results of histopathological imaging are used for tumour characterisation. As the 3D structure of the specimen is inevitably lost during specimen handling, reconstructing a volume from individual specimen slices could facilitate the correlation of histology to radiological imaging. This work proposes a novel method for a whole specimen volume reconstruction and is validated on six mastectomy cases. We also demonstrate how these volumes can be used as a means to map multiple histology slides to a whole mastectomy image (MRI or CT).}, + file = {:pdf/Mert16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {2930968711439371239}, + gscites = {4}, + ss_id = {3487257137f253ce44733bc433e14249a87c07ad}, + all_ss_ids = {['3487257137f253ce44733bc433e14249a87c07ad']}, +} + +@article{Mert17, + author = {Mertzanidou, Thomy and Hipwell, John H. and Reis, Sara and Hawkes, David J. and Bejnordi, Babak Ehteshami and Dalmis, Mehmet and Vreemann, Suzan and Platel, Bram and van der Laak, Jeroen and Karssemeijer, Nico and Hermsen, Meyke and Bult, Peter and Mann, Ritse}, + title = {3D volume reconstruction from serial breast specimen radiographs for mapping between histology and {3D} whole specimen imaging}, + journal = MP, + year = {2017}, + volume = {44}, + issue = {3}, + pages = {935-948}, + doi = {10.1002/mp.12077}, + abstract = {PURPOSE: + In breast imaging, radiological in vivo images, such as x-ray mammography and magnetic resonance imaging (MRI), are used for tumor detection, diagnosis, and size determination. After excision, the specimen is typically sliced into slabs and a small subset is sampled. Histopathological imaging of the stained samples is used as the gold standard for characterization of the tumor microenvironment. A 3D volume reconstruction of the whole specimen from the 2D slabs could facilitate bridging the gap between histology and in vivo radiological imaging. This task is challenging, however, due to the large deformation that the breast tissue undergoes after surgery and the significant undersampling of the specimen obtained in histology. In this work, we present a method to reconstruct a coherent 3D volume from 2D digital radiographs of the specimen slabs. + + METHODS: + To reconstruct a 3D breast specimen volume, we propose the use of multiple target neighboring slices, when deforming each 2D slab radiograph in the volume, rather than performing pairwise registrations. The algorithm combines neighborhood slice information with free-form deformations, which enables a flexible, nonlinear deformation to be computed subject to the constraint that a coherent 3D volume is obtained. The neighborhood information provides adequate constraints, without the need for any additional regularization terms. + + RESULTS: + The volume reconstruction algorithm is validated on clinical mastectomy samples using a quantitative assessment of the volume reconstruction smoothness and a comparison with a whole specimen 3D image acquired for validation before slicing. Additionally, a target registration error of 5 mm (comparable to the specimen slab thickness of 4 mm) was obtained for five cases. The error was computed using manual annotations from four observers as gold standard, with interobserver variability of 3.4 mm. Finally, we illustrate how the reconstructed volumes can be used to map histology images to a 3D specimen image of the whole sample (either MRI or CT). + + CONCLUSIONS: + Qualitative and quantitative assessment has illustrated the benefit of using our proposed methodology to reconstruct a coherent specimen volume from serial slab radiographs. To our knowledge, this is the first method that has been applied to clinical breast cases, with the goal of reconstructing a whole specimen sample. The algorithm can be used as part of the pipeline of mapping histology images to ex vivo and ultimately in vivo radiological images of the breast.}, + file = {Mert17.pdf:pdf\\Mert17.pdf:PDF}, + optnote = {DIAG}, + pmid = {28064435}, + month = {3}, + gsid = {2215474408889452803}, + gscites = {15}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/170535}, + ss_id = {5eaff5ff94bfb7d25ef87679e9fcf243346b30e5}, + all_ss_ids = {['5eaff5ff94bfb7d25ef87679e9fcf243346b30e5']}, +} + +@article{Mesk17, + author = {Mesker, W. and van Pelt, G. and Huijbers, A. and van der Laak, J. and Dequeker, E. and Fl\'{e}jou, J-F. and Al Dieri, R. and Kerr, D. and Van Krieken, J. and Tollenaar, R.}, + title = {Improving treatment decisions in colon cancer: The tumor-stroma ratio (TSR) additional to the TNM classification}, + doi = {10.1093/annonc/mdx393.087}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1093/annonc/mdx393.087}, + file = {Mesk17.pdf:pdf\Mesk17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Annals of Oncology}, + citation-count = {0}, + automatic = {yes}, + pages = {v190-v191}, + volume = {28}, +} + +@article{Mets11a, + author = {O. M. Mets and C. F. M. Buckens and P. Zanen and I. I{\v{s}}gum and B. van Ginneken and M. Prokop and H. A. Gietema and J. W. J. Lammers and R. Vliegenthart and M. Oudkerk and R. J. van Klaveren and H. J. de Koning and W. P. Th. M. Mali and P. A. de Jong}, + title = {Identification of Chronic Obstructive Pulmonary Disease in Lung Cancer Screening Computed Tomographic Scans}, + journal = JAMA, + year = {2011}, + volume = {306}, + pages = {1775-1781}, + doi = {10.1001/jama.2011.1531}, + abstract = {Context: Smoking is a major risk factor for both cancer and chronic obstructive pulmonary disease (COPD). Computed tomography (CT)-based lung cancer screening may provide an opportunity to detect additional individuals with COPD at an early stage. Objective: To determine whether low-dose lung cancer screening CT scans can be used to identify participants with COPD. Design, Setting, and Patients: Single-center prospective cross-sectional study within an ongoing lung cancer screening trial. Prebronchodilator pulmonary function testing with inspiratory and expiratory CT on the same day was obtained from 1140 male participants between July 2007 and September 2008. Computed tomographic emphysema was defined as percentage of voxels less than -950 Hounsfield units (HU), and CT air trapping was defined as the expiratory:inspiratory ratio of mean lung density. Chronic obstructive pulmonary disease was defined as the ratio of forced expiratory volume in the first second to forced vital capacity (FEV1/FVC) of less than 70%. Logistic regression was used to develop a diagnostic prediction model for airflow limitation. Main Outcome Measures: Diagnostic accuracy of COPD diagnosis using pulmonary function tests as the reference standard. Results: Four hundred thirty-seven participants (38%) had COPD according to lung function testing. A diagnostic model with CT emphysema, CT air trapping, body mass index, pack-years, and smoking status corrected for overoptimism (internal validation) yielded an area under the receiver operating characteristic curve of 0.83 (95% CI, 0.81-0.86). Using the point of optimal accuracy, the model identified 274 participants with COPD with 85 false-positives, a sensitivity of 63% (95% CI, 58%-67%), specificity of 88% (95% CI, 85%-90%), positive predictive value of 76% (95% CI, 72%-81%); and negative predictive value of 79% (95% CI, 76%-82%). The diagnostic model showed an area under the receiver operating characteristic curve of 0.87 (95% CI, 0.86-0.88) for participants with symptoms and 0.78 (95% CI, 0.76-0.80) for those without symptoms. Conclusion: Among men who are current and former heavy smokers, low-dose inspiratory and expiratory CT scans obtained for lung cancer screening can identify participants with COPD, with a sensitivity of 63% and a specificity of 88%}, + file = {Mets11a.pdf:pdf\\Mets11a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {16}, + pmid = {22028353}, + month = {10}, + gsid = {9535176740666077921}, + gscites = {124}, + ss_id = {ccd229b5d8ea29c484d4fa8fa9823899afb2a061}, + all_ss_ids = {['ccd229b5d8ea29c484d4fa8fa9823899afb2a061']}, +} + +@article{Mets12, + author = {O. M. Mets and K. Murphy and P. Zanen and H. A. Gietema and J. W. Lammers and B. van Ginneken and M. Prokop and P. A. de Jong}, + title = {The relationship between lung function impairment and quantitative computed tomography in chronic obstructive pulmonary disease}, + journal = ER, + year = {2012}, + volume = {22}, + pages = {120--128}, + doi = {10.1007/s00330-011-2237-9}, + abstract = {OBJECTIVES: To determine the relationship between lung function impairment and quantitative computed tomography (CT) measurements of air trapping and emphysema in a population of current and former heavy smokers with and without airflow limitation. METHODS: In 248 subjects (50 normal smokers; 50 mild obstruction; 50 moderate obstruction; 50 severe obstruction; 48 very severe obstruction) CT emphysema and CT air trapping were quantified on paired inspiratory and end-expiratory CT examinations using several available quantification methods. CT measurements were related to lung function (FEV(1), FEV(1)/FVC, RV/TLC, Kco) by univariate and multivariate linear regression analysis. RESULTS: Quantitative CT measurements of emphysema and air trapping were strongly correlated to airflow limitation (univariate r-squared up to 0.72, p<0.001). In multivariate analysis, the combination of CT emphysema and CT air trapping explained 68-83\% of the variability in airflow limitation in subjects covering the total range of airflow limitation (p<0.001). CONCLUSIONS: The combination of quantitative CT air trapping and emphysema measurements is strongly associated with lung function impairment in current and former heavy smokers with a wide range of airflow limitation. KEY POINTS: - CT helps to automatically assess lung disease in heavy smokers - CT quantitatively measures emphysema and small airways disease in heavy smokers - CT air trapping and CT emphysema are associated with lung function impairment.}, + file = {Mets12.pdf:pdf\\Mets12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {21837396}, + month = {8}, + gsid = {3220250186597997230}, + gscites = {70}, + ss_id = {edc0d900d33d2ee0013dbb130d4f7956d77ebba9}, + all_ss_ids = {['edc0d900d33d2ee0013dbb130d4f7956d77ebba9']}, +} + +@article{Mets12a, + author = {Mets, O. M. and de Jong, P. A. and van Ginneken, B. and Gietema, H. A. and Lammers, J. W. J.}, + title = {Quantitative Computed Tomography in {COPD}: Possibilities and Limitations}, + journal = Lung, + year = {2012}, + volume = {190}, + pages = {133--145}, + doi = {10.1007/s00408-011-9353-9}, + abstract = {Chronic obstructive pulmonary disease (COPD) is a heterogeneous disease that is characterized by chronic airflow limitation. Unraveling of this heterogeneity is challenging but important, because it might enable more accurate diagnosis and treatment. Because spirometry cannot distinguish between the different contributing pathways of airflow limitation, and visual scoring is time-consuming and prone to observer variability, other techniques are sought to start this phenotyping process. Quantitative computed tomography (CT) is a promising technique, because current CT technology is able to quantify emphysema, air trapping, and large airway wall dimensions. This review focuses on CT quantification techniques of COPD disease components and their current status and role in phenotyping COPD.}, + file = {Mets12a.pdf:pdf\\Mets12a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {22179694}, + month = {12}, + gsid = {12983185550555310641}, + gscites = {114}, + ss_id = {5b54d066b85be837e4896989f53d10e0b13dbecb}, + all_ss_ids = {['5b54d066b85be837e4896989f53d10e0b13dbecb']}, +} + +@article{Mets12e, + author = {Mets, Onno M. and van Hulst, Robert A. and Jacobs, Colin and van Ginneken, Bram and de Jong, Pim A.}, + title = {Normal Range of Emphysema and Air Trapping on {CT} in Young Men}, + journal = AJR, + year = {2012}, + volume = {199}, + pages = {336--340}, + doi = {10.2214/AJR.11.7808}, + abstract = {The purpose of our study was to assess the normal range of CT measures of emphysema and air trapping in young men with normal lung function.A cohort of 70 young men with high-normal spirometry and body plethysmography underwent paired inspiratory and expiratory CT. Visual and quantitative scores of emphysema and air trapping were obtained. On CT, emphysema was defined as the 15th percentile of the attenuation curve (Perc(15)), and as the percentage of inspiratory voxels below -950 (IN(-950)) and below -960 (IN(-960)) HU. On CT, air trapping was defined as the expiratory-to-inspiratory ratio of mean lung density (EI-ratio(MLD)), and the percentage of voxels below -856 HU in expiration (EXP(-856)). Means, medians, and upper limits of normal (ULN) are presented for the total population and for smokers and nonsmokers separately.The mean age (A-A?A 1/2 SD) of the subjects was 36.1 A-A?A 1/2 9.3 years. Smoking history was limited (range, 0-11 pack-years). Spirometry was high normal, ranging from 113\% to 160\% of predicted for vital capacity (VC), and from 104\% to 140\% of predicted for forced expiratory volume in 1 second (FEV(1)). The ULN was 2.73\% for IN(-950), 0.87\% for IN(-960), -936 HU for Perc(15), 89.0\% for EI-ratio(MLD), and 17.2\% for EXP(-856).Visual CT scores showed minimal emphysema in eight (11\%), > 5 lobules of air trapping in five (7\%), and segmental air trapping in three (4\%) subjects. CT measures were similar for never- and ever-smokers.We report the normal range of CT values for young male subjects with normal lung function, which is important to define pulmonary disease.}, + file = {Mets12e.pdf:pdf\\Mets12e.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {22826394}, + month = {8}, + gsid = {9417645695025612100}, + gscites = {51}, + ss_id = {177331ab3937bb17a77cf453ba6684d3ac028b22}, + all_ss_ids = {['177331ab3937bb17a77cf453ba6684d3ac028b22']}, +} + +@article{Mets12h, + author = {Mets, O. M. and Zanen, P. and Lammers, J.-W. J. and I{\v{s}}gum, I. and Gietema, H. A. and van Ginneken, B. and Prokop, M. and de Jong, P. A.}, + title = {Early Identification of Small Airways Disease on Lung Cancer Screening {CT}: Comparison of Current Air Trapping Measures}, + journal = Lung, + year = {2012}, + volume = {190}, + pages = {629-633}, + doi = {10.1007/s00408-012-9422-8}, + abstract = {BACKGROUND : Lung cancer screening CT scans might provide valuable information about air trapping as an early indicator of smoking-related lung disease. We studied which of the currently suggested measures is most suitable for detecting functionally relevant air trapping on low-dose computed tomography (CT) in a population of subjects with early-stage disease. METHODS : This study was ethically approved and informed consent was obtained. Three quantitative CT air trapping measures were compared against a functional reference standard in 427 male lung cancer screening participants. This reference standard for air trapping was derived from the residual volume over total lung capacity ratio (RV/TLC) beyond the 95th percentile of predicted. The following CT air trapping measures were compared: expiratory to inspiratory relative volume change of voxels with attenuation values between -860 and -950 Hounsfield Units (RVC(-860 to -950)), expiratory to inspiratory ratio of mean lung density (E/I-ratio(MLD)) and percentage of voxels below -856 HU in expiration (EXP(-856)). Receiver operating characteristic (ROC) analysis was performed and area under the ROC curve compared. RESULTS: Functionally relevant air trapping was present in 38 (8.9 \%) participants. E/I-ratio(MLD) showed the largest area under the curve (0.85, 95 \% CI 0.813-0.883), which was significantly larger than RVC(-860 to -950) (0.703, 0.657-0.746; p < 0.001) and EXP(-856) (0.798, 0.757-0.835; p = 0.002). At the optimum for sensitivity and specificity, E/I-ratio(MLD) yielded an accuracy of 81.5 \%. CONCLUSIONS: The expiratory to inspiratory ratio of mean lung density (E/I-ratio(MLD)) is most suitable for detecting air trapping on low-dose screening CT and performs significantly better than other suggested quantitative measures.}, + file = {Mets12h.pdf:pdf\\Mets12h.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {23064488}, + month = {10}, + gsid = {8262566518944908606}, + gscites = {53}, + ss_id = {2012c7e5e9a96cafa9128f057db62ba372e3c702}, + all_ss_ids = {['2012c7e5e9a96cafa9128f057db62ba372e3c702']}, +} + +@article{Mets13a, + author = {Mets, Onno M. and de Jong, Pim A. and van Ginneken, Bram and Kruitwagen, Cas L J J. and Prokop, Mathias and Oudkerk, Matthijs and Lammers, Jan-Willem J. and Zanen, Pieter}, + title = {{CT} Air Trapping Is Independently Associated with Lung Function Reduction over Time}, + journal = PLOSONE, + year = {2013}, + volume = {8}, + pages = {e61783}, + doi = {10.1371/journal.pone.0061783}, + abstract = {We aimed to study the association between lung function decline and quantitative computed tomography (CT) air trapping.Current and former heavy smokers in a lung cancer screening trial underwent volumetric low-dose CT in inspiration and expiration. Spirometry was obtained at baseline and after 3 years. The expiratory to inspiratory ratio of mean lung density (E/I-ratioMLD) was used to quantify air trapping. CT emphysema was defined as voxels in inspiratory CT below -950 Hounsfield Unit. Linear mixed modeling was used to determine the association between CT air trapping and lung function.We included 985 subjects with a mean age of 61.3 years. Independent of CT emphysema, CT air trapping was significantly associated with a reduction in forced expiratory volume in one second (FEV1) and the ratio of FEV1 over the forced vital capacity (FEV1/FVC); FEV1 declines with 33 mL per percent increase in CT air trapping, while FEV1/FVC declines 0.58\% per percent increase (both p<0.001). CT air trapping further elicits accelerated loss of FEV1/FVC (additional 0.24\% reduction per percent increase; pAC/a,!A =AC/a,!A 0.014).In a lung cancer screening cohort, quantitatively assessed air trapping on low-dose CT is independently associated with reduced lung function and accelerated decline of FEV1/FVC.}, + file = {Mets13a.pdf:pdf\\Mets13a.pdf:PDF}, + optnote = {DIAG}, + number = {4}, + pmid = {23613934}, + month = {4}, + gsid = {8130074603070439298}, + gscites = {13}, + ss_id = {4e79aacf36a21b972e387d566141177e549469a0}, + all_ss_ids = {['4e79aacf36a21b972e387d566141177e549469a0']}, +} + +@article{Mets13b, + author = {Mets, O. M. and Schmidt, M. and Buckens, C. F. and Gondrie, M. J. and I{\v{s}}gum, I. and Oudkerk, M. and Vliegenthart, R. and de Koning, H. J. and van der Aalst, C. M. and Prokop, M. and Lammers, J. J. and Zanen, P. and Mohamed Hoesein, F. A. A. and Mali, W. P. and van Ginneken, B. and van Rikxoort, E. M. and de Jong, P. A.}, + title = {Diagnosis of chronic obstructive pulmonary disease in lung cancer screening Computed Tomography scans: independent contribution of emphysema, air trapping and bronchial wall thickening}, + journal = RESPR, + year = {2013}, + volume = {14}, + pages = {59}, + doi = {10.1186/1465-9921-14-59}, + abstract = {BACKGROUND: Beyond lung cancer, screening CT contains additional information on other smoking related diseases (e.g. chronic obstructive pulmonary disease, COPD). Since pulmonary function testing is not regularly incorporated in lung cancer screening, imaging biomarkers for COPD are likely to provide important surrogate measures for disease evaluation. Therefore, this study aims to determine the independent diagnostic value of CT emphysema, CT air trapping and CT bronchial wall thickness for COPD in low-dose screening CT scans. METHODS: Prebronchodilator spirometry and volumetric inspiratory and expiratory chest CT were obtained on the same day in 1140 male lung cancer screening participants. Emphysema, air trapping and bronchial wall thickness were automatically quantified in the CT scans. Logistic regression analysis was performed to derivate a model to diagnose COPD. The model was internally validated using bootstrapping techniques. RESULTS: Each of the three CT biomarkers independently contributed diagnostic value for COPD, additional to age, body mass index, smoking history and smoking status. The diagnostic model that included all three CT biomarkers had a sensitivity and specificity of 73.2\% and 88.\%, respectively. The positive and negative predictive value were 80.2\% and 84.2\%, respectively. Of all participants, 82.8\% was assigned the correct status. The C-statistic was 0.87, and the Net Reclassification Index compared to a model without any CT biomarkers was 44.4\%. However, the added value of the expiratory CT data was limited, with an increase in Net Reclassification Index of 4.5\% compared to a model with only inspiratory CT data. CONCLUSION: Quantitatively assessed CT emphysema, air trapping and bronchial wall thickness each contain independent diagnostic information for COPD, and these imaging biomarkers might prove useful in the absence of lung function testing and may influence lung cancer screening strategy. Inspiratory CT biomarkers alone may be sufficient to identify patients with COPD in lung cancer screening setting.}, + file = {Mets13b.pdf:pdf\\Mets13b.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {23711184}, + gsid = {14303730188902322930}, + gscites = {68}, + ss_id = {1e9cef47e4a4180ed74b89301ef2dbec30b66990}, + all_ss_ids = {['1e9cef47e4a4180ed74b89301ef2dbec30b66990']}, +} + +@article{Mets13c, + author = {Mets, Onno M. and Vliegenthart, Rozemarijn and Gondrie, Martijn J. and Viergever, Max A. and Oudkerk, Matthijs and de Koning, Harry J. and Mali, Willem P Th M. and Prokop, Mathias and van Klaveren, Rob J. and van der Graaf, Yolanda and Buckens, Constantinus F M. and Zanen, Pieter and Lammers, Jan-Willem J. and Groen, Harry J M. and I{\v{s}}gum, Ivana and de Jong, Pim A.}, + title = {Lung Cancer Screening {CT}-Based Prediction of Cardiovascular Events}, + journal = JACCCI, + year = {2013}, + volume = {6}, + pages = {899-907}, + doi = {10.1016/j.jcmg.2013.02.008}, + abstract = {OBJECTIVES: The aim of this study was to derivate and validate a prediction model for cardiovascular events based on quantification of coronary and aortic calcium volume in lung cancer screening chest computed tomography (CT). BACKGROUND: CT-based lung cancer screening in heavy smokers is a very timely topic. Given that the heavily smoking screening population is also at risk for cardiovascular disease, CT-based screening may provide the opportunity to additionally identify participants at high cardiovascular risk. METHODS: Inspiratory screening CT of the chest was obtained in 3,648 screening participants. Next, smoking characteristics, patient demographics, and physician-diagnosed cardiovascular events were collected from 10 years before the screening CT (i.e., cardiovascular history) until 3 years after the screening CT (i.e., follow-up time). Cox proportional hazards analysis was used to derivate and validate a prediction model for cardiovascular risk. Age, smoking status, smoking history, and cardiovascular history, together with automatically quantified coronary and aortic calcium volume from the screening CT, were included as independent predictors. The primary outcome measure was the discriminatory value of the model. RESULTS: Incident cardiovascular events occurred in 145 of 1,834 males (derivation cohort) and 118 of 1,725 males and 2 of 89 females (validation cohort). The model showed good discrimination in the validation cohort with a C-statistic of 0.71 (95\% confidence interval: 0.67 to 0.76). When high risk was defined as a 3-year risk of 6\% and higher, 589 of 1,725 males were regarded as high risk and 72 of 118 of all events were correctly predicted by the model. CONCLUSIONS: Quantification of coronary and aortic calcium volumes in lung cancer screening CT images-information that is readily available-can be used to predict cardiovascular risk. Such an approach might prove useful in the reduction of cardiovascular morbidity and mortality and may enhance the cost-effectiveness of CT-based screening in heavy smokers.}, + file = {Mets13c.pdf:pdf\\Mets13c.pdf:PDF}, + optnote = {DIAG}, + pmid = {23769488}, + month = {8}, + gsid = {3110821982906556219}, + gscites = {69}, +} + +@article{Mets16, + author = {Mets, Onno M. and {de Jong}, Pim A. and Chung, Kaman and Lammers, Jan-Willem J. and {van Ginneken}, Bram and Schaefer-Prokop, Cornelia M.}, + title = {Fleischner recommendations for the management of subsolid pulmonary nodules: high awareness but limited conformance - a survey study}, + journal = ER, + year = {2016}, + volume = {26}, + pages = {3840-3849}, + doi = {10.1007/s00330-016-4249-y}, + url = {http://dx.doi.org/10.1007/s00330-016-4249-y}, + abstract = {The aim of this study was to assess awareness and conformance to the Fleischner society recommendations for the management of subsolid pulmonary nodules (SSN) in clinical practice. An online questionnaire with four imaging cases was sent to 1579 associates from the European Respiratory Society and 757 from the European Society of Thoracic Imaging. Each respondent was asked to choose from several options which one they thought was the indicated management for the nodule presented. Awareness and conformance to the Fleischner recommendations (FR) were assessed and correlated to respondents characteristics. In total, 119 radiologists (response rate 16.0%) and 243 pulmonologists (response rate 16.5%) were included. Awareness of the FR was higher in radiologists than in pulmonologists (93% vs. 70%, p<0.001), as was implementation in daily practice (66% vs. 47%, p<0.001). Radiologists conformed to FR in rates of 31, 69, 68, and 82%, and pulmonologists in 12, 43, 70, and 75% for cases 1 to 4, respectively. Overmanagement was common. Conformance in SSN management was associated with awareness, working in an academic practice, larger practice size, teaching residents, and higher SSN exposure. Although awareness of the Fleischner recommendations for SSN management is widespread, management choices in clinical practice show large heterogeneity.Guideline awareness among clinicians is widespread, but conformance shows large heterogeneity. Awareness and conformance is significantly higher among radiologists than pulmonologists. Overmanagement is common, which may lead to avoidable financial and physical burden.}, + file = {Mets16.pdf:pdf\\Mets16.pdf:PDF}, + optnote = {DIAG}, + pmid = {26945759}, + month = {3}, + gsid = {16090325245936356049}, + gscites = {24}, + ss_id = {e571e6bec0974bb3d8b508183075258b783c9ec7}, + all_ss_ids = {['e571e6bec0974bb3d8b508183075258b783c9ec7']}, +} + +@article{Mets17, + author = {Mets, Onno M. and {de Jong}, Pim A. and Scholten, Ernst Th and Chung, Kaman and {van Ginneken}, Bram and Schaefer-Prokop, Cornelia M.}, + title = {Subsolid pulmonary nodule morphology and associated patient characteristics in a routine clinical population}, + journal = ER, + year = {2017}, + volume = {27}, + number = {2}, + month = {2}, + pages = {689-696}, + doi = {10.1007/s00330-016-4429-9}, + url = {http://dx.doi.org/10.1007/s00330-016-4429-9}, + abstract = {To determine the presence and morphology of subsolid pulmonary nodules (SSNs) in a non-screening setting and relate them to clinical and patient characteristics.A total of 16,890 reports of clinically obtained chest CT (06/2011 to 11/2014, single-centre) were searched describing an SSN. Subjects with a visually confirmed SSN and at least two thin-slice CTs were included. Nodule volumes were measured. Progression was defined as volume increase exceeding the software interscan variation. Nodule morphology, location, and patient characteristics were evaluated.Fifteen transient and 74 persistent SSNs were included (median follow-up 19.6 [8.3-36.8] months). Subjects with an SSN were slightly older than those without (62 vs. 58A-A?A 1/2 years; p?=?0.01), but no gender predilection was found. SSNs were mostly located in the upper lobes. Women showed significantly more often persistent lesions than men (94A-A?A 1/2 \% vs. 69A-A?A 1/2 \%; p?=?0.002). Part-solid lesions were larger (1638 vs. 383A-A?A 1/2 mm(3); p?= 40 years of age who underwent routine chest CT (2004-2012), 186 eligible subjects with incident lung cancer and 511 controls without were investigated. All non-calcified nodules >= 4 mm were semi-automatically annotated. Lung cancer location and subject characteristics were recorded. Cases (56 % male) had a median age of 64 years (IQR 59-70). Controls (60 % male) were slightly younger (p<0.01), median age of 61 years (IQR 51-70). A total of 262/1,278 (21 %) unique non-calcified nodules represented a PFN. None of these were traced to a lung malignancy over a median follow-up of around 4.5 years. PFNs were most often located in the lower lung zones (72 %, p<0.001). Median diameter was 4.6 mm (range: 4.0-8.1), volume 51 mm3 (range: 32-278). Some showed growth rates < 400 days. Our data show that incidental PFNs do not represent lung cancer in a routine care, heterogeneous population. This confirms prior screening-based results. * One-fifth of non-calcified nodules represented a perifissural nodule in our non-screening population. * PFNs fairly often show larger size, and can show interval growth. * When morphologically resembling a PFN, nodules are nearly certainly not a malignancy. * The assumed benign aetiology of PFNs seems valid outside the screening setting.}, + file = {:pdf/Mets18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28986629}, + gsid = {8930722888609062115}, + gscites = {26}, + ss_id = {eb383f9d03fc189c9b3c7bb2e7132d5fdb07f067}, + all_ss_ids = {['eb383f9d03fc189c9b3c7bb2e7132d5fdb07f067']}, +} + +@article{Mets18a, + author = {Mets, O. M. and Chung, K. and Zanen, P. and Scholten, E. T. and Veldhuis, W. B. and van Ginneken, B. and Prokop, M. and Schaefer-Prokop, C. M. and de Jong, P. A.}, + title = {In vivo growth of 60 non-screening detected lung cancers: a computed tomography study}, + doi = {10.1183/13993003.02183-2017}, + pages = {1702183}, + volume = {51}, + abstract = {Current pulmonary nodule management guidelines are based on nodule volume doubling time, which assumes exponential growth behaviour. However, this is a theory that has never been validated in vivo in the routine-care target population. This study evaluates growth patterns of untreated solid and subsolid lung cancers of various histologies in a non-screening setting.Growth behaviour of pathology-proven lung cancers from two academic centres that were imaged at least three times before diagnosis (n=60) was analysed using dedicated software. Random-intercept random-slope mixed-models analysis was applied to test which growth pattern most accurately described lung cancer growth. Individual growth curves were plotted per pathology subgroup and nodule type.We confirmed that growth in both subsolid and solid lung cancers is best explained by an exponential model. However, subsolid lesions generally progress slower than solid ones. Baseline lesion volume was not related to growth, indicating that smaller lesions do not grow slower compared to larger ones.By showing that lung cancer conforms to exponential growth we provide the first experimental basis in the routine-care setting for the assumption made in volume doubling time analysis.}, + file = {:pdf/Mets18a.pdf:PDF}, + journal = ERJ, + optnote = {DIAG}, + pmid = {29650547}, + year = {2018}, + month = {4}, + gsid = {13243596039614426394}, + gscites = {11}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/191051}, + ss_id = {6a340e97fd3c7c5f9f2f5401d1a2fb07c39fb8ed}, + all_ss_ids = {['8c9301f67b46bbac884c588232ebca05dd3ba8ae', '6a340e97fd3c7c5f9f2f5401d1a2fb07c39fb8ed']}, +} + +@article{Mets18b, + author = {Mets, Onno M. and Schaefer-Prokop, Cornelia M. and de Jong, Pim A.}, + title = {Cyst-related primary lung malignancies: an important and relatively unknown imaging appearance of (early) lung cancer}, + doi = {10.1183/16000617.0079-2018}, + year = {2018}, + abstract = {It is well known that lung cancer can manifest itself in imaging as solid and subsolid nodules or masses. However, in this era of increased computed tomography use another morphological computed tomography appearance of lung cancer is increasingly being recognised, presenting as a malignancy in relation to cystic airspaces. Despite the fact that it seems to be a relatively common finding in daily practice, literature on this entity is scarce and presumably the overall awareness is limited. This can lead to misinterpretation and delay in diagnosis and, therefore, increased awareness is urgently needed. This review aims to illustrate the imaging appearances of cyst-related primary lung malignancies, demonstrate its mimickers and potential pitfalls, and discuss the clinical implications based on the available literature and our own experience in four different hospitals.}, + url = {http://dx.doi.org/10.1183/16000617.0079-2018}, + file = {Mets18b.pdf:pdf\Mets18b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Respiratory Review}, + citation-count = {12}, + automatic = {yes}, + pages = {180079}, + volume = {27}, +} + +@article{Mets18b, + author = {Mets, Onno M. and Schaefer-Prokop, Cornelia M. and de Jong, Pim A.}, + title = {Cyst-related primary lung malignancies: an important and relatively unknown imaging appearance of (early) lung cancer}, + doi = {10.1183/16000617.0079-2018}, + year = {2018}, + abstract = {It is well known that lung cancer can manifest itself in imaging as solid and subsolid nodules or masses. However, in this era of increased computed tomography use another morphological computed tomography appearance of lung cancer is increasingly being recognised, presenting as a malignancy in relation to cystic airspaces. Despite the fact that it seems to be a relatively common finding in daily practice, literature on this entity is scarce and presumably the overall awareness is limited. This can lead to misinterpretation and delay in diagnosis and, therefore, increased awareness is urgently needed. This review aims to illustrate the imaging appearances of cyst-related primary lung malignancies, demonstrate its mimickers and potential pitfalls, and discuss the clinical implications based on the available literature and our own experience in four different hospitals.}, + url = {http://dx.doi.org/10.1183/16000617.0079-2018}, + file = {Mets18b.pdf:pdf\Mets18b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Respiratory Review}, + citation-count = {12}, + automatic = {yes}, + pages = {180079}, + volume = {27}, +} + +@article{Meye20, + author = {Anneke Meyer and Grzegorz Chlebus and Marko Rak and Daniel Schindele and Martin Schostak and Bram van Ginneken and Andrea Schenk and Hans Meine and Horst K. Hahn and Andreas Schreiber and Christian Hansen}, + title = {Anisotropic {3D} Multi-Stream {CNN} for Accurate Prostate Segmentation from Multi-Planar {MRI}}, + journal = {arXiv:2009.11120}, + year = {2020}, + abstract = {Background and Objective: Accurate and reliable segmentation of the prostate gland in MR images can support the clinical assessment of prostate cancer, as well as the planning and monitoring of focal and loco-regional therapeutic interventions. Despite the availability of multi-planar MR scans due to standardized protocols, the majority of segmentation approaches presented in the literature consider the axial scans only. Methods: We propose an anisotropic 3D multi-stream CNN architecture, which processes additional scan directions to produce a higher-resolution isotropic prostate segmentation. We investigate two variants of our architecture, which work on two (dual-plane) and three (triple-plane) image orientations, respectively. We compare them with the standard baseline (single-plane) used in literature, i.e., plain axial segmentation. To realize a fair comparison, we employ a hyperparameter optimization strategy to select optimal configurations for the individual approaches. Results: Training and evaluation on two datasets spanning multiple sites obtain statistical significant improvement over the plain axial segmentation ($p<0.05$ on the Dice similarity coefficient). The improvement can be observed especially at the base ($0.898$ single-plane vs. $0.906$ triple-plane) and apex ($0.888$ single-plane vs. $0.901$ dual-plane). Conclusion: This study indicates that models employing two or three scan directions are superior to plain axial segmentation. The knowledge of precise boundaries of the prostate is crucial for the conservation of risk structures. Thus, the proposed models have the potential to improve the outcome of prostate cancer diagnosis and therapies.}, + file = {:http\://arxiv.org/pdf/2009.11120v1:PDF}, + all_ss_ids = {['4607ec477007deebd18b51e8ba787beae8647303']}, + gscites = {24}, +} + +@article{Meye20a, + author = {Anneke Meyer and Grzegorz Chlebus and Marko Rak and Daniel Schindele and Martin Schostak and Bram van Ginneken and Andrea Schenk and Hans Meine and Horst K. Hahn and Andreas Schreiber and Christian Hansen}, + title = {Anisotropic {3D} Multi-Stream {CNN} for Accurate Prostate Segmentation from Multi-Planar {MRI}}, + journal = CMPB, + pages = {105821}, + year = {2020}, + pmid = {33218704}, + doi = {10.1016/j.cmpb.2020.105821}, + abstract = {Background and Objective: Accurate and reliable segmentation of the prostate gland in MR images can support the clinical assessment of prostate cancer, as well as the planning and monitoring of focal and loco-regional therapeutic interventions. Despite the availability of multi-planar MR scans due to standardized protocols, the majority of segmentation approaches presented in the literature consider the axial scans only. In this work, we investigate whether a neural network processing anisotropic multi-planar images could work in the context of a semantic segmentation task, and if so, how this additional information would improve the segmentation quality. Methods: We propose an anisotropic 3D multi-stream CNN architecture, which processes additional scan directions to produce a high-resolution isotropic prostate segmentation. We investigate two variants of our architecture, which work on two (dual-plane) and three (triple-plane) image orientations, respectively. The influence of additional information used by these models is evaluated by comparing them with a single-plane baseline processing only axial images. To realize a fair comparison, we employ a hyperparameter optimization strategy to select optimal configurations for the individual approaches. Results: Training and evaluation on two datasets spanning multiple sites show statistical significant improvement over the plain axial segmentation (on the Dice similarity coefficient). The improvement can be observed especially at the base (0.898 single-plane vs. 0.906 triple-plane) and apex (0.888 single-plane vs. 0.901 dual-plane). Conclusion: This study indicates that models employing two or three scan directions are superior to plain axial segmentation. The knowledge of precise boundaries of the prostate is crucial for the conservation of risk structures. Thus, the proposed models have the potential to improve the outcome of prostate cancer diagnosis and therapies.}, + url = {https:\\arxiv.org/abs/2009.11120}, + file = {:pdf\\Meye20.pdf:PDF}, + ss_id = {4607ec477007deebd18b51e8ba787beae8647303}, + all_ss_ids = {['4607ec477007deebd18b51e8ba787beae8647303']}, + gscites = {24}, +} + +@inproceedings{Mich20, + author = {Michielsen, K. and Moriakov, N. and Teuwen, J. and Sechopoulos, I.}, + title = {Deep Learning-based Initialization of Iterative Reconstruction for Breast Tomosynthesis}, + booktitle = {6th International Conference on Image Formation in X-Ray Computed Tomography}, + year = {2020}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {b02dc7bfc3211dd0113ce1ccd69dc17462083eaf}, + all_ss_ids = {['b02dc7bfc3211dd0113ce1ccd69dc17462083eaf']}, + gscites = {0}, +} + +@article{Mich21a, + author = {Florian Michallek and Henkjan Huisman and Bernd Hamm and Sefer Elezkurtaj and Andreas Maxeiner and Marc Dewey}, + title = {Prediction of prostate cancer grade using fractal analysis of perfusion MRI: retrospective proof-of-principle study}, + journal = ER, + year = {2021}, + doi = {10.1007/s00330-021-08394-8}, + url = {https://doi.org/10.1007/s00330-021-08394-8}, + abstract = {Multiparametric MRI has high diagnostic accuracy for detecting prostate cancer, but non-invasive prediction of tumor grade remains challenging. Characterizing tumor perfusion by exploiting the fractal nature of vascular anatomy might elucidate the aggressive potential of a tumor. This study introduces the concept of fractal analysis for characterizing prostate cancer perfusion and reports about its usefulness for non-invasive prediction of tumor grade.}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/252017}, + ss_id = {32fb40cb7d2abc53a1f56c94fe592b0c39870901}, + all_ss_ids = {['32fb40cb7d2abc53a1f56c94fe592b0c39870901']}, + gscites = {10}, +} + +@article{Mich21b, + author = {Florian Michallek and Henkjan Huisman and Bernd Hamm and Sefer Elezkurtaj and Andreas Maxeiner and Marc Dewey}, + title = {Accuracy of fractal analysis and PI-RADS assessment of prostate magnetic resonance imaging for prediction of cancer grade groups: a clinical validation study}, + journal = ER, + year = {2021}, + doi = {10.1007/s00330-021-08358-y}, + url = {https://doi.org/10.1007/s00330-021-08358-y}, + abstract = {Multiparametric MRI with Prostate Imaging Reporting and Data System (PI-RADS) assessment is sensitive but not specific for detecting clinically significant prostate cancer. This study validates the diagnostic accuracy of the recently suggested fractal dimension (FD) of perfusion for detecting clinically significant cancer.}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/249481}, + ss_id = {926e78dce5e42624d849dde4be1e8cdab215c42a}, + all_ss_ids = {['926e78dce5e42624d849dde4be1e8cdab215c42a']}, + gscites = {2}, +} + +@article{Mies22, + author = {Miesen, Laura and B\'{a}ndi, P\'{e}ter and Willemsen, Brigith and Mooren, Fieke and Strieder, Thiago and Boldrini, Eva and Drenic, Vedran and Eymael, Jennifer and Wetzels, Roy and Lotz, Johannes and Weiss, Nick and Steenbergen, Eric and van Kuppevelt, Toin H. and van Erp, Merijn and van der Laak, Jeroen and Endlich, Nicole and Moeller, Marcus J. and Wetzels, Jack F. M. and Jansen, Jitske and Smeets, Bart}, + title = {Parietal epithelial cells maintain the epithelial cell continuum forming Bowman's space in focal segmental glomerulosclerosis}, + doi = {10.1242/dmm.046342}, + year = {2022}, + abstract = {ABSTRACT + In the glomerulus, Bowman's space is formed by a continuum of glomerular epithelial cells. In focal segmental glomerulosclerosis (FSGS), glomeruli show segmental scarring, a result of activated parietal epithelial cells (PECs) invading the glomerular tuft. The segmental scars interrupt the epithelial continuum. However, non-sclerotic segments seem to be preserved even in glomeruli with advanced lesions. We studied the histology of the segmental pattern in Munich Wistar Fromter rats, a model for secondary FSGS. Our results showed that matrix layers lined with PECs cover the sclerotic lesions. These PECs formed contacts with podocytes of the uninvolved tuft segments, restoring the epithelial continuum. Formed Bowman's spaces were still connected to the tubular system. In biopsies of patients with secondary FSGS, we also detected matrix layers formed by PECs, separating the uninvolved from the sclerotic glomerular segments. PECs have a major role in the formation of glomerulosclerosis; we show here that in FSGS they also restore the glomerular epithelial cell continuum that surrounds Bowman's space. This process may be beneficial and indispensable for glomerular filtration in the uninvolved segments of sclerotic glomeruli.}, + url = {http://dx.doi.org/10.1242/dmm.046342}, + file = {Mies22.pdf:pdf\Mies22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Disease Models & Mechanisms}, + citation-count = {3}, + automatic = {yes}, + volume = {15}, +} + +@article{Mile17, + author = {Milenkovi\'{c}, Jana and Dalm\i\c{s}, Mehmet Ufuk and Zgajnar, Janez and Platel, Bram}, + title = {Textural analysis of early-phase spatiotemporal changes in contrast enhancement of breast lesions imaged with an ultrafast DCE-MRI protocol}, + doi = {10.1002/mp.12408}, + year = {2017}, + abstract = {PurposeNew ultrafast view-sharing sequences have enabled breast dynamic contrast-enhanced magnetic resonance imaging (DCE-MRI) to be performed at high spatial and temporal resolution. The aim of this study is to evaluate the diagnostic potential of textural features that quantify the spatiotemporal changes of the contrast-agent uptake in computer-aided diagnosis of malignant and benign breast lesions imaged with high spatial and temporal resolution DCE-MRI.MethodThe proposed approach is based on the textural analysis quantifying the spatial variation of six dynamic features of the early-phase contrast-agent uptake of a lesion's largest cross-sectional area. The textural analysis is performed by means of the second-order gray-level co-occurrence matrix, gray-level run-length matrix and gray-level difference matrix. This yields 35 textural features to quantify the spatial variation of each of the six dynamic features, providing a feature set of 210 features in total. The proposed feature set is evaluated based on receiver operating characteristic (ROC) curve analysis in a cross-validation scheme for random forests (RF) and two support vector machine classifiers, with linear and radial basis function (RBF) kernel. Evaluation is done on a dataset with 154 breast lesions (83 malignant and 71 benign) and compared to a previous approach based on 3D morphological features and the average and standard deviation of the same dynamic features over the entire lesion volume as well as their average for the smaller region of the strongest uptake rate.ResultThe area under the ROC curve (AUC) obtained by the proposed approach with the RF classifier was 0.8997, which was significantly higher (P = 0.0198) than the performance achieved by the previous approach (AUC = 0.8704) on the same dataset. Similarly, the proposed approach obtained a significantly higher result for both SVM classifiers with RBF (P = 0.0096) and linear kernel (P = 0.0417) obtaining AUC of 0.8876 and 0.8548, respectively, compared to AUC values of previous approach of 0.8562 and 0.8311, respectively.ConclusionThe proposed approach based on 2D textural features quantifying spatiotemporal changes of the contrast-agent uptake significantly outperforms the previous approach based on 3D morphology and dynamic analysis in differentiating the malignant and benign breast lesions, showing its potential to aid clinical decision making.}, + url = {http://dx.doi.org/10.1002/mp.12408}, + file = {Mile17.pdf:pdf\Mile17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Physics}, + citation-count = {14}, + automatic = {yes}, + pages = {4652-4664}, + volume = {44}, +} + +@article{Moha11, + author = {Mohamed Hoesein, F. A. A. and B. de Hoop and P. Zanen and H. Gietema and C. L. J. J. Kruitwagen and B. van Ginneken and I. I{\v{s}}gum and C. Mol and R. J. van Klaveren and A. E. Dijkstra and H. J. M. Groen and H. M. Boezen and D. S. Postma and M. Prokop and J.-W. J. Lammers}, + title = {{CT}-quantified emphysema in male heavy smokers: association with lung function decline}, + journal = Thorax, + year = {2011}, + volume = {66}, + pages = {782--787}, + doi = {10.1136/thx.2010.145995}, + abstract = {Background Emphysema and small airway disease both contribute to chronic obstructive pulmonary disease (COPD), a disease characterised by accelerated decline in lung function. The association between the extent of emphysema in male current and former smokers and lung function decline was investigated. Methods Current and former heavy smokers participating in a lung cancer screening trial were recruited to the study and all underwent CT. Spirometry was performed at baseline and at 3-year follow-up. The 15th percentile (Perc15) was used to assess the severity of emphysema. Results 2085 men of mean age 59.8 years participated in the study. Mean (SD) baseline Perc15 was -934.9 (19.5) HU. A lower Perc15 value correlated with a lower forced expiratory volume in 1 s (FEV(1)) at baseline (r=0.12, p<0.001). Linear mixed model analysis showed that a lower Perc15 was significantly related to a greater decline in FEV(1) after follow-up (p<0.001). Participants without baseline airway obstruction who developed it after follow-up had significantly lower mean (SD) Perc15 values at baseline than those who did not develop obstruction (-934.2 (17.1) HU vs -930.2 (19.7) HU, p<0.001). Conclusion Greater baseline severity of CT-detected emphysema is related to lower baseline lung function and greater rates of lung function decline, even in those without airway obstruction. CT-detected emphysema aids in identifying non-obstructed male smokers who will develop airflow obstruction.}, + file = {Moha11.pdf:pdf\\Moha11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {21474499}, + month = {4}, + gsid = {2503222556262111335,16225817165193692000}, + gscites = {157}, + ss_id = {11f3274c97ef62ae0fc888928ce0c939d03c79f7}, + all_ss_ids = {['11f3274c97ef62ae0fc888928ce0c939d03c79f7']}, +} + +@article{Moha13, + author = {Mohamed Hoesein, F. A. A. and Zanen, Pieter and de Jong, Pim A. and van Ginneken, Bram and Boezen, H Marike and Groen, Harry Jm and Oudkerk, Mathijs and de Koning, Harry J. and Postma, Dirkje S. and Lammers, Jan-Willem J.}, + title = {Rate of progression of {CT}-quantified emphysema in male current and ex-smokers: a follow-up study}, + journal = RESPR, + year = {2013}, + volume = {14}, + pages = {55}, + doi = {10.1186/1465-9921-14-55}, + abstract = {BACKGROUND: Little is known about the factors associated with CT-quantified emphysema progression in heavy smokers. The objective of this study was to investigate the effect of length of smoking cessation and clinical / demographical factors on the rate of emphysema progression and FEV1-decline in male heavy smokers. METHODS: 3,670 male smokers with mean (SD) 40.8 (17.9) packyears underwent chest CT scans and pulmonary function tests at baseline and after 1 and 3 years follow-up. Smoking status (quitted >=5, >=1-<5, <1 years or current smoker) was noted. Rate of progression of emphysema and FEV1-decline after follow-up were assessed by analysis of variance adjusting for age, height, baseline pulmonary function and emphysema severity, packyears, years in study and respiratory symptoms. The quitted >=5 group was used as reference. RESULTS: Median (Q1-Q3) emphysema severity,<-950 HU, was 8.8 (5.1 -- 14.1) and mean (SD) FEV1 was 3.4 (0.73) L or 98.5 (18.5) \% of predicted. The group quitted '>5 years' showed significantly lower rates of progression of emphysema compared to current smokers, 1.07\% and 1.12\% per year, respectively (p<0.001). Current smokers had a yearly FEV1-decline of 69 ml, while subjects quit smoking >5 years had a yearly decline of 57.5 ml (p<0.001). CONCLUSION: Quit smoking >5 years significantly slows the rate of emphysema progression and lung function decline.Trial registration: Registered at www.trialregister.nl with trial number ISRCTN63545820.}, + file = {Moha13.pdf:pdf\\Moha13.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {23688060}, + gsid = {283001529912021713}, + gscites = {27}, + ss_id = {e0a4f8f3d89cf5b3bbf5db1be0ab370896e68498}, + all_ss_ids = {['e0a4f8f3d89cf5b3bbf5db1be0ab370896e68498']}, +} + +@article{Moha13a, + author = {Mohamed Hoesein, Firdaus A A. and Schmidt, Michael and Mets, Onno M. and Gietema, Hester A. and Lammers, Jan-Willem J. and Zanen, Pieter and de Koning, Harry J. and van der Aalst, Carlijn and Oudkerk, Matthijs and Vliegenthart, Rozemarijn and I{\v{s}}gum, Ivana and Prokop, Mathias and van Ginneken, Bram and van Rikxoort, Eva M. and de Jong, Pim A.}, + title = {Discriminating dominant computed tomography phenotypes in smokers without or with mild COPD}, + journal = RESPM, + year = {2014}, + volume = {108}, + pages = {136-143}, + doi = {10.1016/j.rmed.2013.08.014}, + abstract = {Finding phenotypes within COPD patients may prove imperative for optimizing treatment and prognosis. We hypothesized that it would be possible to discriminate emphysematous, large airway wall thickening and small airways disease dominant phenotypes.Inspiratory and expiratory CTs were performed in 1140 male smokers without or with mild COPD to quantify emphysema, airway wall thickness and air trapping. Spirometry, residual volume to total lung capacity (RV/TLC) and diffusion capacity (Kco) were measured. Dominant phenotype (emphysema, airway wall thickening or air trapping dominant) was defined as one of the respective CT measure in the upper quartile, with the other measures not in the upper quartile.573 subjects had any of the three CT measures in the upper quartile. Of these, 367 (64\%) were in a single dominant group and 206 (36\%) were in a mixed group. Airway wall thickening dominance was associated with younger age (p < 0.001), higher body mass index (p < 0.001), more wheezing (p < 0.05) and lower FEV1 \%predicted (p < 0.001). Emphysema dominant subjects had lower FEV1/FVC (p < 0.05) and Kco \%predicted (p < 0.05). There was no significant difference in respiratory related hospitalizations (p = 0.09).CT measures can discriminate three different CT dominant groups of disease in male smokers without or with mild COPD. ISRCTN63545820, registered at www.trialregister.nl.}, + file = {Moha13a.pdf:pdf\\Moha13a.pdf:PDF}, + optnote = {DIAG}, + pmid = {24035313}, + month = {1}, + gsid = {5267033159797573513}, + gscites = {31}, + ss_id = {955dc85fbe185045d897252e2c8cc45e851d95ae}, + all_ss_ids = {['955dc85fbe185045d897252e2c8cc45e851d95ae']}, +} + +@article{Moha14, + author = {Mohamed Hoesein, F. A. A. and de Jong, P. A. and Lammers, J.-W. J. and Mali, W. P. T. M. and Mets, O. M. and Schmidt, M. and de Koning, H. J. and van der Aalst, C. and Oudkerk, M. and Vliegenthart, R. and van Ginneken, B. and van Rikxoort, E. M. and Zanen, P.}, + title = {Contribution of {CT} Quantified Emphysema, Air Trapping and Airway Wall Thickness on Pulmonary Function in Male Smokers With and Without {COPD}}, + doi = {10.3109/15412555.2014.933952}, + number = {5}, + pages = {503-509}, + volume = {11}, + abstract = {Abstract Emphysema, airway wall thickening and air trapping are associated with chronic obstructive pulmonary disease (COPD). All three can be quantified by computed tomography (CT) of the chest. The goal of the current study is to determine the relative contribution of CT derived parameters on spirometry, lung volume and lung diffusion testing. Emphysema, airway wall thickening and air trapping were quantified automatically on CT in 1,138 male smokers with and without COPD. Emphysema was quantified by the percentage of voxels below -950 Hounsfield Units (HU), airway wall thickness by the square root of wall area for a theoretical airway with 10 mm lumen perimeter (Pi10) and air trapping by the ratio of mean lung density at expiration and inspiration (E/I-ratio). Spirometry, residual volume to total lung capacity (RV/TLC) and diffusion capacity (Kco) were obtained. Standardized regression coefficients (AZA2) were used to analyze the relative contribution of CT changes to pulmonary function measures. The independent contribution of the three CT measures differed per lung function parameter. For the FEV1 airway wall thickness was the most contributing structural lung change (AZA2 = -0.46), while for the FEV1/FVC this was emphysema (AZA2 = -0.55). For the residual volume (RV) air trapping was most contributing (AZA2 = -0.35). Lung diffusion capacity was most influenced by emphysema (AZA2 = -0.42). In a cohort of smokers with and without COPD the effect of different CT changes varies per lung function measure and therefore emphysema, airway wall thickness and air trapping need to be taken in account.}, + file = {Moha14.pdf:pdf\\Moha14.pdf:PDF}, + journal = COPD, + optnote = {DIAG}, + pmid = {25093696}, + year = {2014}, + month = {8}, + gsid = {12560823050881457785}, + gscites = {46}, + ss_id = {37152c02e2e92a6c796c062ccb749974197d4f0c}, + all_ss_ids = {['37152c02e2e92a6c796c062ccb749974197d4f0c']}, +} + +@article{Moha15, + author = {Mohamed Hoesein, Firdaus A A. and de Jong, Pim A. and Lammers, Jan-Willem J. and Mali, Willem P. ThM. and Schmidt, Michael and de Koning, Harry J. and van der Aalst, Carlijn and Oudkerk, Matthijs and Vliegenthart, Rozemarijn and Groen, Harry J M. and van Ginneken, Bram and van Rikxoort, Eva M. and Zanen, Pieter}, + title = {Airway wall thickness associated with forced expiratory volume in 1 second decline and development of airflow limitation}, + journal = ERJ, + year = {2015}, + volume = {45}, + pages = {644-651}, + doi = {10.1183/09031936.00020714}, + abstract = {Airway wall thickness and emphysema contribute to airflow limitation. We examined their association with lung function decline and development of airflow limitation in 2021 male smokers with and without airflow limitation. Airway wall thickness and emphysema were quantified on chest computed tomography and expressed as the square root of wall area of a 10-mm lumen perimeter ({Pi10}) and the 15th percentile method ({Perc15}), respectively. Baseline and follow-up (median (interquartile range) 3 (2.9-3.1)AC/a,!aEUR|years) spirometry was available. {Pi10} and {Perc15} correlated with baseline forced expiratory volume in 1AC/a,!aEUR|s ({FEV1}) (r=AC/a,!A -0.49 and 0.11, respectively (p<0.001)). Multiple linear regression showed that {Pi10} and {Perc15} at baseline were associated with a lower {FEV1} after follow-up (p<0.05). For each sd increase in {Pi10} and decrease in {Perc15} the {FEV1} decreased by 20AC/a,!A mL and 30.2AC/a,!A mL, respectively. The odds ratio for developing airflow limitation after 3AC/a,!aEUR|years was 2.45 for a 1-mm higher {Pi10} and 1.46 for a 10-{HU} lower {Perc15} (p<0.001). A greater degree of airway wall thickness and emphysema was associated with a higher {FEV1} decline and development of airflow limitation after 3AC/a,!aEUR|years of follow-up.}, + file = {Moha15.pdf:pdf/Moha15.pdf:PDF}, + optnote = {DIAG}, + number = {3}, + pmid = {25614166}, + month = {1}, + gsid = {13584450798353585582}, + gscites = {47}, + ss_id = {5d64335056fe6f14983a92010a3b6ae8c92d4b73}, + all_ss_ids = {['5d64335056fe6f14983a92010a3b6ae8c92d4b73']}, +} + +@conference{Moha16, + author = {Mohamed Hoesein, F. A. A. and Pompe, E. and Lynch, D. A. and Lessmann, N. and Lammers, J. W. J. and I{\v{s}}gum, I. and de Jong, P. A.}, + title = {Computed tomographic findings are associated with respiratory mortality in the {National Lung Screening Trial}}, + booktitle = RSNA, + year = {2016}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Mol10, + author = {C. Mol and B. van Ginneken and M. de Bruijne and P. A. de Jong and M. Oudkerk and A. Dirksen and P. Zanen}, + title = {Correction of Quantitative Emphysema Measures with Density Calibration Based on Measurements in the Trachea}, + booktitle = RSNA, + year = {2010}, + abstract = {Purpose: Emphysema quantification with computed tomography is known to be affected by variations in scanner type, scan protocol and proprietary reconstruction algorithms from manufacturers. We investigated if automatic calibration of density values based on per scan measurements in air outside the body or in the trachea reduced variability between emphysema scores from similar populations examined in a multi-center study. Method and materials: Three cohorts of heavy smokers, 500 subjects each, were randomly taken from lung cancer screening trials in two cities in The Netherlands and one in Denmark. Inclusion criteria, age, smoking history and lung function were comparable. Cohort A was scanned at full inspiration with 16 x 0.75 collimation with a Philips Brilliance scanner, low dose acquisition, reconstructed with a Philips B kernel to 1.0 mm section with 0.7 mm increment. Settings for the other cohorts were similar except that Cohort B was scanned with 1.0 mm increment and a Philips D kernel, and Cohort C was scanned with Siemens Sensation 16 and 64 scanners with a B30 kernel. All scans were processed with in-house developed software that segmented the lungs, excluded the airways, and computed the 15th percentile (PD15) emphysema measurement. The software also segmented the trachea and the region of air outside the body. Measurements were computed on the original data and on calibrated data. For calibration a Gaussian distribution was fitted to density values obtained from either air outside the body or from the trachea. HU values were shifted so that the peak of this Gaussian distribution coincided with -1000 HU. Results: On the original data, the mean(sd) of PD15 for the three cohorts was: -917(20), -942(21), -930(23) HU. Calibration using air outside the body was not effective: -908(20), -932(21), -921(23). Calibration using air in the trachea, however, almost completely removed the difference between the distributions of PD15: -938(18), -934(21), -932(22). Conclusion: The use of different scanners and reconstruction algorithms results in density variations in low attenuation areas within the body. A calibration procedure based on air measurements in the trachea can largely compensate for these effects. Clinical relevance/application: To obtain comparable emphysema measurements in multi-center studies, air density calibration based on measurements in air containing structures inside the body is essential.}, + optnote = {DIAG, RADIOLOGY}, + gsid = {6186758846186660553}, + gscites = {1}, +} + +@article{Mooi18, + author = {Germonda Mooij and Ines Bagulho and Henkjan Huisman}, + title = {Automatic segmentation of prostate zones}, + journal = {arXiv:1806.07146}, + year = {2018}, + abstract = {Convolutional networks have become state-of-the-art techniques for automatic medical image analysis, with the U-net architecture being the most popular at this moment. In this article we report the application of a 3D version of U-net to the automatic segmentation of prostate peripheral and transition zones in 3D MRI images. Our results are slightly better than recent studies that used 2D U-net and handcrafted feature approaches. In addition, we test ideas for improving the 3D U-net setup, by 1) letting the network segment surrounding tissues, making use of the fixed anatomy, and 2) adjusting the network architecture to reflect the anisotropy in the dimensions of the MRI image volumes. While the latter adjustment gave a marginal improvement, the former adjustment showed a significant deterioration of the network performance. We were able to explain this deterioration by inspecting feature map activations in all layers of the network. We show that to segment more tissues the network replaces feature maps that were dedicated to detecting prostate peripheral zones, by feature maps detecting the surrounding tissues.}, + optnote = {DIAG}, + month = {6}, + gsid = {11990056638194684452}, + gscites = {14}, + ss_id = {484e166d13638c7a458fc95ce81026d30bedec10}, + all_ss_ids = {['484e166d13638c7a458fc95ce81026d30bedec10']}, +} + +@mastersthesis{Mooi19, + author = {Germonda Mooij}, + title = {Using GANs to synthetically stain histopathological images to generate training data for automatic mitosis detection in breast tissue}, + abstract = {Generative adversarial networks (GANs) have been proven effective at mapping medical images from one domain to another (e.g. from CT to MRI). + In this study we investigate the effectiveness of GANs at mapping images of breast tissue between histopathological stains. + Breast cancer is the most common cancer in women worldwide. Counting mitotic figures in histological images of breast cancer tissue has been shown to be a reliable and independent prognostic marker. Most successful methods for automatic counting involve training deep neural networks on H&E stained slides. This training requires extensive manual annotations of mitotic figures in H&E stained slides, which suffers from a low inter-observer agreement. Manual counting in PHH3 stained slides has a much higher inter-observer agreement. + In this project we aimed to train GANs to map PHH3 slides to synthetic H&E slides and vice versa. A mitosis classifier is used to quantify the quality of the synthetic images, by comparing its performance after training on synthetic images with training on real images.}, + file = {Mooi19.pdf:pdf/Mooi19.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2019}, + journal = {Master thesis}, +} + +@article{Mook09, + author = {D. O. Mook-Kanamori and S. Holzhauer and L. M. Hollestein and B. Durmus and R. Manniesing and M. Koek and G. Boehm and E. M. van der Beek and A. Hofman and J. C M Witteman and M. H. Lequin and V. W V Jaddoe}, + title = {Abdominal fat in children measured by ultrasound and computed tomography}, + journal = UMB, + year = {2009}, + volume = {35}, + pages = {1938--1946}, + doi = {10.1016/j.ultrasmedbio.2009.07.002}, + abstract = {The prevalence of childhood obesity is increasing rapidly. Visceral fat plays an important role in the pathogenesis of metabolic and cardiovascular diseases. Currently, computed tomography (CT) is broadly seen as the most accurate method of determining the amount of visceral fat. The main objective was to examine whether measures of abdominal visceral fat can be determined by ultrasound in children and whether CT can be replaced by ultrasound for this purpose. To assess whether preperitoneal fat thickness and area are good approximations of visceral fat at the umbilical level, we first retrospectively examined 47 CT scans of nonobese children (body mass index <30kg/m(2); median age 7.9 y [95\% range 1.2 to 16.2]). Correlation coefficients between visceral and preperitoneal fat thickness and area were 0.58 (p<0.001) and 0.76 (p<0.001), respectively. Then, to assess how preperitoneal and subcutaneous fat thicknesses and areas measured by ultrasound compare with these parameters in CT, we examined 34 nonobese children (median age 9.5 [95\% range 0.3 to 17.0]) by ultrasound and CT. Ultrasound measurements of preperitoneal and subcutaneous fat were correlated with CT measurements, with correlation coefficients ranging from 0.75-0.97 (all p<0.001). Systematic differences of up to 24.0cm(2) for preperitoneal fat area (95\% confidence interval -29.9 to 77.9cm(2)) were observed when analyzing the results described by the Bland-Altman method. Our findings suggest that preperitoneal fat can be used as an approximation for visceral fat in children and that measuring abdominal fat with ultrasound in children is a valid method for epidemiological and clinical studies. However, the exact agreement between the ultrasound and CT scan was limited, which indicates that ultrasound should be used carefully for obtaining exact fat distribution measurements in individual children.}, + file = {Mook09.pdf:pdf\\Mook09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {12}, + pmid = {19800165}, + month = {12}, + gsid = {17973696798080974141}, + gscites = {64}, +} + +@inproceedings{Moor18a, + author = {Timothy de Moor and Alejandro Rodriguez-Ruiz and Ritse Mann and Jonas Teuwen}, + title = {Automated soft tissue lesion detection and segmentation in digital mammography using a u-net deep learning network}, + booktitle = {International Workshop on Breast Imaging}, + year = {2018}, + url = {https://arxiv.org/abs/1802.06865}, + abstract = {Computer-aided detection or decision support systems aim to improve breast cancer screening programs by helping radiologists to evaluate digital mammography (DM) exams. Commonly such methods proceed in two steps: selection of candidate regions for malignancy, and later classification as either malignant or not. In this study, we present a candidate detection method based on deep learning to automatically detect and additionally segment soft tissue lesions in DM. A database of DM exams (mostly bilateral and two views) was collected from our institutional archive.}, + file = {:pdf/Moor18a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['9fca3cab7f8cf1071094591bc13ca53de528dac6']}, + gscites = {33}, +} + +@article{Moor2018, + author = {de Moor, Timothy and Rodriguez-Ruiz, Alejandro and Mann, Ritse and Teuwen, Jonas}, + title = {Automated soft tissue lesion detection and segmentation in digital mammography using a u-net deep learning network}, + abstract = {Computer-aided detection or decision support systems aim to improve breast cancer screening programs by helping radiologists to evaluate digital mammography (DM) exams. Commonly such methods proceed in two steps: selection of candidate regions for malignancy, and later classification as either malignant or not. In this study, we present a candidate detection method based on deep learning to automatically detect and additionally segment soft tissue lesions in DM. A database of DM exams (mostly bilateral and two views) was collected from our institutional archive.}, + journal = {arXiv:1802.06865}, + optnote = {DIAG}, + year = {2018}, + month = {2}, +} + +@inproceedings{Mor18, + author = {Moriakov, Nikita and Michielsen, Koen and Mann, Ritse and Adler, Jonas and Sechopolous, Ioannis and Teuwen, Jonas}, + title = {Deep learning framework for digital breast tomosynthesis reconstruction}, + booktitle = MI, + year = {2019}, + series = SPIE, + doi = {10.1117/12.2512912}, + url = {https://arxiv.org/abs/1808.04640}, + abstract = {Digital breast tomosynthesis is rapidly replacing digital mammography as the basic x-ray technique for evaluation of the breasts. However, the sparse sampling and limited angular range gives rise to dierent artifacts, which manufacturers try to solve in several ways. In this study we propose an extension of the Learned Primal-Dual algorithm for digital breast tomosynthesis. The Learned Primal-Dual algorithm is a deep neural network consisting of several `reconstruction blocks', which take in raw sinogram data as the initial input, perform a forward and a backward pass by taking projections and back-projections, and use a convolutional neural network to produce an intermediate reconstruction result which is then improved further by the successive reconstruction block. We extend the architecture by providing breast thickness measurements as a mask to the neural network and allow it to learn how to use this thickness mask. We have trained the algorithm on digital phantoms and the corresponding noise-free/noisy projections, and then tested the algorithm on digital phantoms for varying level of noise. Reconstruction performance of the algorithms was compared visually, using MSE loss and Structural Similarity Index. Results indicate that the proposed algorithm outperforms the baseline iterative reconstruction algorithm in terms of reconstruction quality for both breast edges and internal structures and is robust to noise.}, + file = {:pdf/Mori19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + ss_id = {aa0eeb67cdccfa3ea49185dc9d7711c21ab29dde}, + all_ss_ids = {['aa0eeb67cdccfa3ea49185dc9d7711c21ab29dde']}, + gscites = {10}, +} + +@conference{Mord12, + author = {Jan Jurre Mordang and Marcel T. H. Oei and Rieneke {van den Boom} and Hendrik O. A. Laue and Luuk J. Oostveen and Mathias Prokop and Bram van Ginneken and Rashindra Manniesing}, + title = {Effect of Dose Reduction in {CT} Perfusion Scans on Cerebral Blood Flow and Volume Computed with Three Perfusion Software Packages: Analysis with a Digital Phantom}, + booktitle = RSNA, + year = {2012}, + abstract = {Purpose: The calculation of perfusion maps from dynamic CT sequences may depend on the software package used and the dose that was used during acquisition. The purpose of this work was to study this using a hybrid phantom consisting of intensity curves from patient data combined with realistic noise. Methods and Materials: We included CTP data from 10 patients with acute ischemic stroke (320 multi-detector row, 80 kV, 24 volumes, total scanning time of 202 s, cumulative dose of 2250 mAs). In each scan regions of interest were indicated in white matter (WM) and gray matter (GM). Tissue curves were averaged over these regions and used in a digital phantom. Noise patterns from an anthropomorphic head phantom scanned in the same scanner were added to the tissue curves to simulate an acquisition at 80, 70, 60 and 50% of the original dose. Also noise was added to the tissue curves to simulate 100% dose, these were used as a reference standard. The constructed phantom data set was analyzed with three perfusion software packages developed by Vital Images (VI), TeraRecon (TR) and Fraunhofer MEVIS (FM). With these packages the cerebral blood flow (CBF) and cerebral blood volume (CBV) were calculated. For each software package and each tissue type, ratios were calculated between the perfusion values at the simulated dose level and the reference standard. Wilcoxon ranked sum tests were used to determine if the ratios deviate from the reference. Results: CBF and CBV values of GM and WM increased significantly for dose reduction for al perfusion software packages (p<0.05).The CBF values in GM and WM increased significantly with dose reduction in al perfusion software packages (p<0.05). The mean patient CBF ratios calculated with the software packages ranged from 1.14 at 80% dose to 1.47 at 50% dose in GM. In WM, mean CBF values ranged from 1.14 up to 1.51 at 80 and 50% dose, respectively. The CBV values showed similar results. Here, mean ratios ranged from 1.18 to 1.49 in GM and 1.15 to 1.50 in WM, again at 80 and 50% dose. Conclusion: Cerebral blood flow and cerebral blood volume are strongly dependent on radiation dose in CT perfusion imaging. Dose reduction may lead to errors in perfusion values calculated with most software packages. Clinical relevance/application: Radiologists have to be aware of this effect and have to interpret absolute CT perfusion values with care if dose reduction is performed}, + optnote = {DIAG}, +} + +@inproceedings{Mord13, + author = {J. J. Mordang and M. T. H. Oei and R. van den Boom and E. J. Smit and M. Prokop and B. van Ginneken and R. Manniesing}, + title = {A Pattern Recognition Framework for Vessel Segmentation in {4D CT} of the Brain}, + booktitle = MI, + year = {2013}, + series = SPIE, + pages = {866919}, + doi = {10.1117/12.2006824}, + abstract = {In this study, a pattern recognition-based framework is presented to automatically segment the complete cerebral vasculature from 4D Computed Tomography (CT) patient data. Ten consecutive patients whom were admitted to our hospital on a suspicion of ischemic stroke were included in this study. A background mask and bone mask were calculated based on intensity thresholding and morphological operations, and the following six image features were proposed: 1) a subtraction image of a subtraction image consisting of timing-invariant CTA and non-constrast CT, 2) the area under the curve of a gamma variate function fitted to the tissue curves, 3-5) three optimized parameter values of this gamma variate function, and 6) a vessel likeliness function. After masking bone and background, these features were used to train a linear discriminant voxel classifier (LDC) on regions of interest (ROIs), which were annotated in soft tissue (white matter and gray matter) and vessels by an expert observer. The LDC was trained in a leave-one-out manner in which 9 patients tissue ROIs were used for training and the remaining patient tissue ROIs were used for testing the classifier. To evaluate the frame work, for each training cycle the accuracy was calculated by dividing the true positives and negatives by the true positives and negatives and false positives and negatives. The resulting averaged accuracy was 0:985A,A+-0:014 with a range of 0:957 to 0:999.}, + file = {:pdf/Mord13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {6413494452242276462}, + gscites = {1}, + ss_id = {2c75e1d3844da3a4152d44ed23d2281a84051083}, + all_ss_ids = {['2c75e1d3844da3a4152d44ed23d2281a84051083']}, +} + +@inproceedings{Mord14, + author = {Mordang, Jan-Jurre and Hauth, Jakob and den Heeten, GerardJ. and Karssemeijer, Nico}, + title = {Automated Labeling of Screening Mammograms with Arterial Calcifications}, + booktitle = {Breast Imaging}, + year = {2014}, + volume = {8539}, + series = {Lecture Notes in Computer Science}, + publisher = {Springer International Publishing}, + doi = {10.1007/978-3-319-07887-8_82}, + file = {Mord14.pdf:home/JanJurre/SVNDIAG/literature/pdf/Mord14.pdf:PDF}, + optnote = {DIAG}, + gsid = {13385385247096114761}, + gscites = {2}, + ss_id = {933b9110259e74592a7208444887f79a625210f3}, + all_ss_ids = {['933b9110259e74592a7208444887f79a625210f3']}, +} + +@inproceedings{Mord15, + author = {Jan-Jurre Mordang and Nico Karssemeijer}, + title = {Vessel segmentation in screening mammograms}, + booktitle = MI, + year = {2015}, + volume = {9414}, + series = SPIE, + pages = {94140J}, + doi = {10.1117/12.2081804}, + abstract = {Blood vessels are a major cause of false positives in computer aided detection systems for the detection of breast cancer. Therefore, the purpose of this study is to construct a framework for the segmentation of blood vessels in screening mammograms. The proposed framework is based on supervised learning using a cascade classifier. This cascade classifier consists of several stages where in each stage a GentleBoost classifier is trained on Haar-like features. A total of 30 cases were included in this study. In each image, vessel pixels were annotated by selecting pixels on the centerline of the vessel, control samples were taken by annotating a region without any visible vascular structures. This resulted in a total of 31,000 pixels marked as vascular and over 4 million control pixels. After training, the classifier assigns a vesselness likelihood to the pixels. The proposed framework was compared to three other vessel enhancing methods, i) a vesselness filter, ii) a gaussian derivative filter, and iii) a tubeness filter. The methods were compared in terms of area under the receiver operating characteristics curves, the Az values. The Az value of the cascade approach is 0:85. This is superior to the vesselness, Gaussian, and tubeness methods, with Az values of 0:77, 0:81, and 0:78, respectively. From these results, it can be concluded that our proposed framework is a promising method for the detection of vessels in screening mammograms.}, + file = {Mord15.pdf:pdf\\Mord15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {16648788988505657805}, + gscites = {3}, + ss_id = {8c1ea2ea519e1512cb46b28cd7814c8af900dc06}, + all_ss_ids = {['8c1ea2ea519e1512cb46b28cd7814c8af900dc06']}, +} + +@article{Mord16, + author = {Jan-Jurre Mordang and Albert Gubern-M\'{e}rida and Gerard den Heeten and Nico Karssemeijer}, + title = {Reducing false positives of microcalcification detection systems by removal of breast arterial calcifications}, + journal = MP, + year = {2016}, + volume = {43}, + number = {4}, + month = {3}, + pages = {1676--1687}, + doi = {10.1118/1.4943376}, + url = {http://dx.doi.org/10.1118/1.4943376}, + abstract = {PURPOSE: + In the past decades, computer-aided detection (CADe) systems have been developed to aid screening radiologists in the detection of malignant microcalcifications. These systems are useful to avoid perceptual oversights and can increase the radiologists' detection rate. However, due to the high number of false positives marked by these CADe systems, they are not yet suitable as an independent reader. Breast arterial calcifications (BACs) are one of the most frequent false positives marked by CADe systems. In this study, a method is proposed for the elimination of BACs as positive findings. Removal of these false positives will increase the performance of the CADe system in finding malignant microcalcifications. + METHODS: + A multistage method is proposed for the removal of BAC findings. The first stage consists of a microcalcification candidate selection, segmentation and grouping of the microcalcifications, and classification to remove obvious false positives. In the second stage, a case-based selection is applied where cases are selected which contain BACs. In the final stage, BACs are removed from the selected cases. The BACs removal stage consists of a GentleBoost classifier trained on microcalcification features describing their shape, topology, and texture. Additionally, novel features are introduced to discriminate BACs from other positive findings. + RESULTS: + The CADe system was evaluated with and without BACs removal. Here, both systems were applied on a validation set containing 1088 cases of which 95 cases contained malignant microcalcifications. After bootstrapping, free-response receiver operating characteristics and receiver operating characteristics analyses were carried out. Performance between the two systems was compared at 0.98 and 0.95 specificity. At a specificity of 0.98, the sensitivity increased from 37% to 52% and the sensitivity increased from 62% up to 76% at a specificity of 0.95. Partial areas under the curve in the specificity range of 0.8-1.0 were significantly different between the system without BACs removal and the system with BACs removal, 0.129 A+- 0.009 versus 0.144 A+- 0.008 (p<0.05), respectively. Additionally, the sensitivity at one false positive per 50 cases and one false positive per 25 cases increased as well, 37% versus 51% (p<0.05) and 58% versus 67% (p<0.05) sensitivity, respectively. Additionally, the CADe system with BACs removal reduces the number of false positives per case by 29% on average. The same sensitivity at one false positive per 50 cases in the CADe system without BACs removal can be achieved at one false positive per 80 cases in the CADe system with BACs removal. + CONCLUSIONS: + By using dedicated algorithms to detect and remove breast arterial calcifications, the performance of CADe systems can be improved, in particular, at false positive rates representative for operating points used in screening.}, + file = {:pdf/Mord16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27036566}, + publisher = {American Association of Physicists in Medicine ({AAPM})}, + gsid = {13574103016601897640}, + gscites = {12}, + ss_id = {f96c79f90cb53fb753033a0602753c114ba0e9fb}, + all_ss_ids = {['f96c79f90cb53fb753033a0602753c114ba0e9fb']}, +} + +@inproceedings{Mord16b, + author = {Mordang, J. J. and Janssen, T. and Bria, A. and Kooi, T. and Gubern-M\'{e}rida, A. and Karssemeijer, N.}, + title = {Automatic Microcalcification Detection in Multi-vendor Mammography Using Convolutional Neural Networks}, + booktitle = {Breast Imaging}, + year = {2016}, + volume = {9699}, + series = {Lecture Notes in Computer Science}, + publisher = {Springer International Publishing Switzerland}, + pages = {35-42}, + doi = {10.1007/978-3-319-41546-8_5}, + abstract = {Abstract. Convolutional neural networks (CNNs) have shown to be powerful for classification of image data and are increasingly used in medical image analysis. Therefore, CNNs might be very suitable to detect microcalcifications in mammograms. In this study, we have configured a deep learning approach to fulfill this task. To overcome the large class imbalance between pixels belonging to microcalcifications and other breast tissue, we applied a hard negative mining strategy where two CNNs are used. The deep learning approach was compared to a current state-of-the-art method for the detection of microcalcifications: the cascade classifier. Both methods were trained on a large training set including 11,711 positive and 27 million negative samples. For testing, an independent test set was configured containing 5,298 positive and 18 million negative samples. The mammograms included in this study were acquired on mammography systems from three manufactures: Hologic, GE, and Siemens. Receiver operating characteristics analysis was carried out. Over the whole specificity range, the CNN approach yielded a higher sensitivity compared to the cascade classifier. Significantly higher mean sensitivities were obtained with the CNN on the mammograms of each individual manufacturer compared to the cascade classifier in the specificity range of 0 to 0.1. To our knowledge, this was the first study to use a deep learning strategy for the detection of microcalcifications in mammograms.}, + file = {:pdf/Mord16b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {16371627561967415273}, + gscites = {69}, + ss_id = {2c922a216c9684d539ffaf858cace091b2a2f7c7}, + all_ss_ids = {['2c922a216c9684d539ffaf858cace091b2a2f7c7']}, +} + +@article{Mord17, + author = {Mordang, Jan-Jurre and Gubern-Merida, Albert and Bria, Alessandro and Tortorella, Francesco and den Heeten, Gerard and Karssemeijer, Nico}, + title = {Improving computer-aided detection assistance in breast cancer screening by removal of obviously false-positive findings}, + journal = MP, + year = {2017}, + volume = {44}, + number = {4}, + pages = {1390-1401}, + doi = {10.1002/mp.12152}, + abstract = {Computer-aided detection (CADe) systems for mammography screening still mark many false positives. This can cause radiologists to lose confidence in CADe, especially when many false positives are obviously not suspicious to them. In this study, we focus on obvious false positives generated by microcalcification detection algorithms. We aim at reducing the number of obvious false-positive findings by adding an additional step in the detection method. In this step, a multiclass machine learning method is implemented in which dedicated classifiers learn to recognize the patterns of obvious false-positive subtypes that occur most frequently. The method is compared to a conventional two-class approach, where all false-positive subtypes are grouped together in one class, and to the baseline CADe system without the new false-positive removal step. The methods are evaluated on an independent dataset containing 1,542 screening examinations of which 80 examinations contain malignant microcalcifications. Analysis showed that the multiclass approach yielded a significantly higher sensitivity compared to the other two methods (P < 0.0002). At one obvious false positive per 100 images, the baseline CADe system detected 61% of the malignant examinations, while the systems with the two-class and multiclass false-positive reduction step detected 73% and 83%, respectively. Our study showed that by adding the proposed method to a CADe system, the number of obvious false positives can decrease significantly (P < 0.0002).}, + file = {Mord17.pdf:pdf\\Mord17.pdf:PDF}, + optnote = {DIAG}, + pmid = {28182277}, + month = {3}, + gsid = {4803214541066334157}, + gscites = {16}, + ss_id = {c8a213ffbfae47c7471de82e7a6c03b5e48006f6}, + all_ss_ids = {['c8a213ffbfae47c7471de82e7a6c03b5e48006f6']}, +} + +@phdthesis{Mord18, + author = {Jan-Jurre Mordang}, + title = {Towards an independent observer of screening mammograms: detection of calcifications}, + year = {2018}, + url = {https://repository.ubn.ru.nl/handle/2066/200483}, + abstract = {Breast cancer is the one of the most deadly types of cancer in the female population in the Netherlands, 1 of very 7 women develops breast cancer during her lifetime and early detection of this type of cancer can reduce breast cancer related mortality. Breast cancer screening programs are implemented in most developed countries, and millions of mammograms are acquired each year leading to a substantial workload for radiologists, especially in screening programs with doublereading. To reduce reading time and improve detection, computer-aided detection (CAD) systems have been developed.}, + copromotor = {M. Broeders}, + file = {:pdf/Mord18.pdf:PDF;:png/publications/Mord18.png:PNG image;:png/publications/thumbs/Mord18.png:PNG image}, + optnote = {DIAG}, + promotor = {N. Karssemeijer and G. J. den Heeten}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Mord18a, + author = {Mordang, J J and Gubern-Merida, A and Bria, A and Tortorella, F and Mann, R M and Broeders, M J M and den Heeten, G J and Karssemeijer, N}, + title = {The importance of early detection of calcifications associated with breast cancer in screening}, + journal = BCRT, + year = {2018}, + volume = {167}, + issue = {2}, + month = {1}, + pages = {451--458}, + doi = {10.1007/s10549-017-4527-7}, + abstract = {The aim of this study was to assess how often women with undetected calcifications in prior screening mammograms are subsequently diagnosed with invasive cancer. From a screening cohort of 63,895 women, exams were collected from 59,690 women without any abnormalities, 744 women with a screen-detected cancer and a prior negative exam, 781 women with a false positive exam based on calcifications, and 413 women with an interval cancer. A radiologist identified cancer-related calcifications, selected by a computer-aided detection system, on mammograms taken prior to screen-detected or interval cancer diagnoses. Using this ground truth and the pathology reports, the sensitivity for calcification detection and the proportion of lesions with visible calcifications that developed into invasive cancer were determined. The screening sensitivity for calcifications was 45.5%, at a specificity of 99.5%. A total of 68.4% (n = 177) of cancer-related calcifications that could have been detected earlier were associated with invasive cancer when diagnosed. Screening sensitivity for detection of malignant calcifications is low. Improving the detection of these early signs of cancer is important, because the majority of lesions with detectable calcifications that are not recalled immediately but detected as interval cancer or in the next screening round are invasive at the time of diagnosis.}, + file = {Mord18a.pdf:pdf/Mord18a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29043464}, + gsid = {16257625997408044224}, + gscites = {12}, + ss_id = {d6e204d39bc7fb333edc2336187986f5e665a775}, + all_ss_ids = {['d6e204d39bc7fb333edc2336187986f5e665a775']}, +} + +@article{Mori17, + author = {Moriakov, Nikita}, + title = {On Effective Birkhoff's Ergodic Theorem for Computable Actions of Amenable Groups}, + journal = TCS, + year = {2017}, + month = {11}, + doi = {10.1007/s00224-017-9822-5}, + url = {https://doi.org/10.1007/s00224-017-9822-5}, + file = {:pdf/Mori17.pdf:PDF}, + optnote = {DIAG}, +} + +@inproceedings{Mori20, + author = {Moriakov, N. and Adler, J. and Teuwen, J.}, + title = {Kernel of CycleGAN as a principal homogeneous space}, + booktitle = {International Conference on Learning Representations}, + year = {2020}, + url = {https://openreview.net/forum?id=B1eWOJHKvB}, + abstract = {Unpaired image-to-image translation has attracted significant interest due to the invention of CycleGAN, a method which utilizes a combination of adversarial and cycle consistency losses to avoid the need for paired data. It is known that the CycleGAN problem might admit multiple solutions, and our goal in this paper is to analyze the space of exact solutions and to give perturbation bounds for approximate solutions. We show theoretically that the exact solution space is invariant with respect to automorphisms of the underlying probability spaces, and, furthermore, that the group of automorphisms acts freely and transitively on the space of exact solutions. We examine the case of zero pure CycleGAN loss first in its generality, and, subsequently, expand our analysis to approximate solutions for extended CycleGAN loss where identity loss term is included. In order to demonstrate that these results are applicable, we show that under mild conditions nontrivial smooth automorphisms exist. Furthermore, we provide empirical evidence that neural networks can learn these automorphisms with unexpected and unwanted results. We conclude that finding optimal solutions to the CycleGAN loss does not necessarily lead to the envisioned result in image-to-image translation tasks and that underlying hidden symmetries can render the result useless.}, + file = {Mori20.pdf:pdf\\Mori20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['6d37b7ceb99c6cc36be602ae9ab2a26232d11bb5', '8f938d6b54c38e469997969380383de635e654aa']}, + gscites = {9}, +} + +@inproceedings{Mosh12, + author = {Ramin Moshavegh and Babak {Ehteshami Bejnordi} and Andrew Mehnert and K Sujathan and Patrik Malm and Ewert Bengtsson}, + title = {Automated segmentation of free-lying cell nuclei in Pap smears for malignancy-associated change analysis}, + booktitle = {Engineering in Medicine and Biology Society (EMBC)}, + year = {2012}, + doi = {10.1109/EMBC.2012.6347208}, + abstract = {This paper presents an automated algorithm for robustly detecting and segmenting free-lying cell nuclei in bright-field microscope images of Pap smears. This is an essential initial step in the development of an automated screening system for cervical cancer based on malignancy associated change (MAC) analysis. The proposed segmentation algorithm makes use of gray-scale annular closings to identify free-lying nuclei-like objects together with marker-based watershed segmentation to accurately delineate the nuclear boundaries. The algorithm also employs artifact rejection based on size, shape, and granularity to ensure only the nuclei of intermediate squamous epithelial cells are retained. An evaluation of the performance of the algorithm relative to expert manual segmentation of 33 fields-of-view from 11 Pap smear slides is also presented. The results show that the sensitivity and specificity of nucleus detection is 94.71% and 85.30% respectively, and that the accuracy of segmentation, measured using the Dice coefficient, of the detected nuclei is 97.30A,A+-1.3%.}, + file = {:D\:\\University Docs\\PRISM\\Papers\\SPIE 2013\\Mosh12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {8}, +} + +@article{Mour23, + author = {van Mourik, Tim and Koopmans, Peter J. and Bains, Lauren J. and Norris, David G. and Jehee, Janneke F.M.}, + title = {Investigation of layer-specific BOLD signal in the human visual cortex during visual attention}, + doi = {10.52294/001c.87638}, + year = {2023}, + abstract = {Directing spatial attention towards a particular stimulus location enhances cortical responses at corresponding regions in cortex. How attention modulates the laminar response profile within the attended region remains unclear, however. In this paper, we use high-field (7T) functional magnetic resonance imaging to investigate the effects of attention on laminar activity profiles in areas V1-V3 both when a stimulus was presented to the observer and in the absence of visual stimulation. Replicating previous findings, we find robust increases in the overall BOLD response for attended regions in cortex, both with and without visual stimulation. When analysing the BOLD response across the individual layers in visual cortex, we observed no evidence for laminar-specific differentiation with attention. We offer several potential explanations for these results, including theoretical, methodological and technical reasons. Additionally, we provide all data and pipelines openly in order to promote analytic consistency across layer-specific studies and improve reproducibility.}, + url = {http://dx.doi.org/10.52294/001c.87638}, + file = {Mour23.pdf:pdf\Mour23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Aperture Neuro}, + citation-count = {0}, + automatic = {yes}, + volume = {3}, +} + +@inproceedings{Muen09, + author = {S. E. A. Muenzing and K. Murphy and B. van Ginneken and J. P. W. Pluim}, + title = {Automatic detection of registration errors for quality assessment in medical image registration}, + booktitle = MI, + year = {2009}, + volume = {7259}, + series = SPIE, + pages = {72590K1--72590K9}, + doi = {10.1117/12.812659}, + abstract = {{A} novel method for quality assessment in medical image registration is presented. {I}t is evaluated on 24 follow-up {CT} scan pairs of the lung. {B}ased on a reference standard of manually matched landmarks we established a pattern recognition approach for detection of local registration errors. {T}o capture characteristics of these misalignments a set of intensity, entropy and deformation related features was employed. {F}eature selection was conducted and a k{NN} classifier was trained and evaluated on a subset of landmarks. {R}egistration errors larger than 2 mm were classified with a sensitivity of 88% and specificity of 94%.}, + file = {Muen09.pdf:pdf\\Muen09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {479701880220897797}, + gscites = {10}, + ss_id = {5710ee8046be85e4702f546f7e18f4c90ac48d87}, + all_ss_ids = {['5710ee8046be85e4702f546f7e18f4c90ac48d87']}, +} + +@inproceedings{Muen10, + author = {S. E. A. Muenzing and B. van Ginneken and J. P. W. Pluim}, + title = {Knowledge Driven Regularization of the Deformation Field for PDE Based Non-Rigid Registration Algorithms}, + booktitle = {Medical Image Analysis for the Clinic - A Grand Challenge}, + year = {2010}, + pages = {127--136}, + abstract = {We propose a novel method to model local regularization of medical image registration. The regularization model incorporates information from two different knowledge sources: 1. statistical aspect, considering regularization as a machine learning problem and 2. anatomical aspect, extracting predominant anatomical structures and modeling the ROI as composition of anatomical objects. Finally a link function is proposed to combine information from above stated knowledge sources. The method was trained and evaluated on a set of five CT lung scan pairs and on the EMPIRE10 dataset.}, + file = {Muen10.pdf:pdf\\Muen10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {13737076938483379783}, + gscites = {13}, + ss_id = {123ca7207e628ef99ea5ea90da2e726797c2ed02}, + all_ss_ids = {['123ca7207e628ef99ea5ea90da2e726797c2ed02']}, +} + +@inproceedings{Muen12, + author = {S. E. A. Muenzing and B. van Ginneken and J. P. W. Pluim}, + title = {On Combining Algorithms for Deformable Image Registration}, + booktitle = {Biomedical Image Registration}, + year = {2012}, + series = LNCS, + pages = {256--265}, + doi = {10.1007/978-3-642-31340-0_27}, + abstract = {We propose a meta-algorithm for registration improvement by combining deformable image registrations (MetaReg). It is inspired by a well-established method from machine learning, the combination of classifiers. MetaReg consists of two main components: (1) A strategy for composing an improved registration by combining deformation fields from different registration algorithms. (2) A method for regularization of deformation fields post registration (UnfoldReg). In order to compare and combine different registrations, MetaReg utilizes a landmark-based classifier for assessment of local registration quality. We present preliminary results of MetaReg, evaluated on five CT pulmonary breathhold inspiration and expiration scan pairs, employing a set of three registration algorithms (NiftyReg, Demons, Elastix). MetaReg generated for each scan pair a registration that is better than any registration obtained by each registration algorithm separately. On average, 10% improvement is achieved, with a reduction of 30% of regions with misalignments larger than 5mm, compared to the best single registration algorithm.}, + file = {Muen12.pdf:pdf\\Muen12.pdf:PDF}, + optnote = {DIAG}, + gsid = {16342785207326025803}, + gscites = {9}, + ss_id = {2afc979ce2117c36a6bc93edc575b3dbd60c7170}, + all_ss_ids = {['2afc979ce2117c36a6bc93edc575b3dbd60c7170']}, +} + +@article{Muen12a, + author = {S. E. A. Muenzing and B. van Ginneken and K. Murphy and J. P. W. Pluim}, + title = {Supervised Quality Assessment Of Medical Image Registration: Application to intra-patient {CT} lung registration}, + journal = MIA, + year = {2012}, + volume = {16}, + pages = {1521-1531}, + doi = {10.1016/j.media.2012.06.010}, + abstract = {A novel method for automatic quality assessment of medical image registration is presented. The method is based on supervised learning of local alignment patterns, which are captured by statistical image features at distinctive landmark points. A two-stage classifier cascade, employing an optimal multi-feature model, classifies local alignments into three quality categories: correct, poor or wrong alignment. We establish a reference registration error set as basis for training and testing of the method. It consists of image registrations obtained from different nonrigid registration algorithms and manually established point correspondences of automatically determined landmarks. We employ a set of different classifiers and evaluate the performance of the proposed image features based on the classification performance of corresponding single-feature classifiers. Feature selection is conducted to find an optimal subset of image features and the resulting multi-feature model is validated against the set of single-feature classifiers. We consider the setup generic, however, its application is demonstrated on 51 CT follow-up scan pairs of the lung. On this data, the proposed method performs with an overall classification accuracy of 90%.}, + file = {Muen12a.pdf:pdf\\Muen12a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {22981428}, + month = {12}, + gsid = {18008631097448211397}, + gscites = {51}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/109635}, + ss_id = {93f83b026d86c5c2217e36b6b91383d1b8afe3c4}, + all_ss_ids = {['93f83b026d86c5c2217e36b6b91383d1b8afe3c4']}, +} + +@inproceedings{Muen12b, + author = {S.E.A. Muenzing and B. van Ginneken and J. P. W. Pluim}, + title = {DIRBoost: An algorithm for boosting deformable image registration}, + booktitle = ISBI, + year = {2012}, + pages = {1339--1342}, + doi = {10.1109/ISBI.2012.6235813}, + abstract = {We introduce a novel boosting algorithm to boost - i.e. improve on - existing methods for deformable image registration. The proposed DIRBoost algorithm is inspired by the theory on hypothesis boosting, well-known in the field of machine learning. DIRBoost involves a classifier for landmark-based Registration Error Detection (RED). Based on these RED predictions a Voronoi tessellation is generated to obtain a dense estimate of local image registration quality. All areas predicted as erroneous registration are subjected to boosting, i.e. undergo iterative registrations by employing boosting masks on both the fixed and moving image. We evaluated the DIRBoost algorithm on five CT pulmonary breathhold inspiration and expiration scan pairs, employing the NiftyReg registration algorithm. DIRBoost could boost about 50% of the wrongly registered areas which in turn also improved the average landmark registration error by 24%.}, + file = {Muen12b.pdf:pdf\\Muen12b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {380465826604345313}, + gscites = {9}, + ss_id = {8872a136642b7c45de906e5f3350d08da8915e31}, + all_ss_ids = {['8872a136642b7c45de906e5f3350d08da8915e31']}, +} + +@article{Muen14, + author = {Muenzing, Sascha E A. and van Ginneken, Bram and Viergever, Max A. and Pluim, Josien P W.}, + title = {DIRBoost-An algorithm for boosting deformable image registration: Application to lung {CT} intra-subject registration}, + journal = MIA, + year = {2014}, + volume = {18}, + pages = {449--459}, + doi = {10.1016/j.media.2013.12.006}, + abstract = {We introduce a boosting algorithm to improve on existing methods for deformable image registration (DIR). The proposed DIRBoost algorithm is inspired by the theory on hypothesis boosting, well known in the field of machine learning. DIRBoost utilizes a method for automatic registration error detection to obtain estimates of local registration quality. All areas detected as erroneously registered are subjected to boosting, i.e. undergo iterative registrations by employing boosting masks on both the fixed and moving image. We validated the DIRBoost algorithm on three different DIR methods (ANTS gSyn, NiftyReg, and DROP) on three independent reference datasets of pulmonary image scan pairs. DIRBoost reduced registration errors significantly and consistently on all reference datasets for each DIR algorithm, yielding an improvement of the registration accuracy by 5-34\% depending on the dataset and the registration algorithm employed.}, + file = {Muen14.pdf:pdf\\Muen14.pdf:PDF}, + optnote = {DIAG}, + number = {3}, + pmid = {24556079}, + month = {4}, + gsid = {17113415496324263537}, + gscites = {28}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/137731}, + ss_id = {c2114441f5e7ab560314ec222a9ab9a38cb9a62b}, + all_ss_ids = {['c2114441f5e7ab560314ec222a9ab9a38cb9a62b']}, +} + +@phdthesis{Muen14a, + author = {S. E. A. Muenzing}, + title = {Learning-based approaches to deformable image registration}, + year = {2014}, + url = {https://dspace.library.uu.nl/handle/1874/363668}, + abstract = {Accurate registration of images is an important and often crucial step in many areas of image processing and analysis, yet it is only used in a small percentage of possible applications. Automated registration methods are not considered to be sufficiently robust to handle complex deformations and locally deviating intensities. The motive of this research has therefore been the development of methodology that learns to cope with such situations, from example registrations defined by experts. Image processing by learning has been successfully applied for image segmentation, but the concept is new to image registration. Thus, the research question of this thesis is in general:-How can machine learning be employed to improve deformable image registration?The developed methods were evaluated on registration of lung CT scans, in which two separate registration tasks were examined: change detection in follow-up scans and registration of inspiration and expiration images. The results of this research are presented in the following chapters. Chapter 2 presents an automatic method for quality assessment of medical image registration. It can be seen as a CAD system for detection and classification of registration errors. In Chapter 3, a boosting method (machine learning) is described that can improve state-of-the art image registration methods. Chapter 4 describes a meta-registration algorithm (machine learning) that automatically combines registrations obtained by different methods in order to arrive at an improved image registration result. Chapter 5 presents a knowledge model (machine learning) for local regularization of nonparametric registration methods. The thesis ends with a summary and general discussion in Chapter 6.}, + copromotor = {J. P. W. Pluim}, + file = {Muen14a.pdf:pdf\\Muen14a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {M. A. Viergever and B. van Ginneken}, + school = {Utrecht University}, + journal = {PhD thesis}, +} + +@article{Mull19, + author = {Mullooly, Maeve and Ehteshami Bejnordi, Babak and Pfeiffer, Ruth M and Fan, Shaoqi and Palakal, Maya and Hada, Manila and Vacek, Pamela M and Weaver, Donald L and Shepherd, John A and Fan, Bo and Mahmoudzadeh, Amir Pasha and Wang, Jeff and Malkov, Serghei and Johnson, Jason M and Herschorn, Sally D and Sprague, Brian L and Hewitt, Stephen and Brinton, Louise A and Karssemeijer, Nico and van der Laak, Jeroen and Beck, Andrew and Sherman, Mark E and Gierach, Gretchen L}, + title = {Application of convolutional neural networks to breast biopsies to delineate tissue correlates of mammographic breast density}, + journal = NPJBC, + year = {2019}, + volume = {5}, + pages = {43}, + doi = {10.1038/s41523-019-0134-6}, + abstract = {Breast density, a breast cancer risk factor, is a radiologic feature that reflects fibroglandular tissue content relative to breast area or volume. Its histology is incompletely characterized. Here we use deep learning approaches to identify histologic correlates in radiologically-guided biopsies that may underlie breast density and distinguish cancer among women with elevated and low density. We evaluated hematoxylin and eosin (H&E)-stained digitized images from image-guided breast biopsies ( = 852 patients). Breast density was assessed as global and localized fibroglandular volume (%). A convolutional neural network characterized H&E composition. In total 37 features were extracted from the network output, describing tissue quantities and morphological structure. A random forest regression model was trained to identify correlates most predictive of fibroglandular volume ( = 588). Correlations between predicted and radiologically quantified fibroglandular volume were assessed in 264 independent patients. A second random forest classifier was trained to predict diagnosis (invasive vs. benign); performance was assessed using area under receiver-operating characteristics curves (AUC). Using extracted features, regression models predicted global ( = 0.94) and localized ( = 0.93) fibroglandular volume, with fat and non-fatty stromal content representing the strongest correlates, followed by epithelial organization rather than quantity. For predicting cancer among high and low fibroglandular volume, the classifier achieved AUCs of 0.92 and 0.84, respectively, with epithelial organizational features ranking most important. These results suggest non-fatty stroma, fat tissue quantities and epithelial region organization predict fibroglandular volume. The model holds promise for identifying histological correlates of cancer risk in patients with high and low density and warrants further evaluation.}, + file = {Mull19.pdf:pdf\\Mull19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31754628}, + month = {11}, + gsid = {7420919522018227404}, + gscites = {13}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/212635}, + all_ss_ids = {['18d0a58647fbdaaf7cd278cb66ae548a1e5289e5', '981fb8239e2e32cd742059aa270015871aa64b61']}, +} + +@article{Mulle20, + author = {Muller, Philipp L. and Liefers, Bart and Treis, Tim and Gomes Rodrigues, Filipa and Olvera-Barrios, Abraham and Paul, Bobby and Dhingra, Narendra and Lotery, Andrew and Bailey, Clare and Taylor, Paul and S\'{a}nchez, Clara I and Tufail, Adnan}, + title = {Reliability of retinal pathology quantification in age-related macular degeneration: Implications for clinical trials and machine learning applications}, + url = {https://www.medrxiv.org/content/10.1101/2020.10.09.20210120v1}, + doi = {https://doi.org/10.1101/2020.10.09.20210120}, + journal = {medrxiv}, + year = {2020}, + abstract = {Purpose: To investigate the inter-reader agreement for grading of retinal alterations in age-related macular degeneration (AMD) using a reading center setting. Methods: In this cross-sectional case series, spectral domain optical coherence tomography (OCT, Topcon 3D OCT, Tokyo, Japan) scans of 112 eyes of 112 patients with neovascular AMD (56 treatment-naive, 56 after three anti-vascular endothelial growth factor injections) were analyzed by four independent readers. Imaging features specific for AMD were annotated using a novel custom-built annotation platform. Dice score, Bland-Altman plots, coefficients of repeatability (CR), coefficients of variation (CV), and intraclass correlation coefficients (ICC) were assessed. Results: Loss of ellipsoid zone, pigment epithelium detachment, subretinal fluid, and Drusen were the most abundant features in our cohort. The features subretinal fluid, intraretinal fluid, hypertransmission, descent of the outer plexiform layer, and pigment epithelium detachment showed highest inter-reader agreement, while detection and measures of loss of ellipsoid zone and retinal pigment epithelium were more variable. The agreement on the size and location of the respective annotation was more consistent throughout all features. Conclusions: The inter-reader agreement depended on the respective OCT-based feature. A selection of reliable features might provide suitable surrogate markers for disease progression and possible treatment effects focusing on different disease stages. This might give opportunities to a more time- and cost-effective patient assessment and improved decision-making as well as have implications for clinical trials and training machine learning algorithms.}, + optnote = {DIAG}, + month = {10}, + ss_id = {67e43291243b436029c050ac11f62541bc3e2909}, + all_ss_ids = {['67e43291243b436029c050ac11f62541bc3e2909']}, + gscites = {11}, +} + +@article{Muna21, + author = {Munari, Enrico and Marconi, Marcella and Querzoli, Giulia and Lunardi, Gianluigi and Bertoglio, Pietro and Ciompi, Francesco and Tosadori, Alice and Eccher, Albino and Tumino, Nicola and Quatrini, Linda and Vacca, Paola and Rossi, Giulio and Cavazza, Alberto and Martignoni, Guido and Brunelli, Matteo and Netto, George J. and Moretta, Lorenzo and Zamboni, Giuseppe and Bogina, Giuseppe}, + title = {Impact of PD-L1 and PD-1 Expression on the Prognostic Significance of CD8+, Tumor-Infiltrating Lymphocytes in Non-Small Cell Lung Cancer.}, + doi = {10.3389/fimmu.2021.680973}, + pages = {680973}, + volume = {12}, + abstract = {The immune infiltrate within tumors has proved to be very powerful in the prognostic stratification of patients and much attention is also being paid towards its predictive value. In this work we therefore aimed at clarifying the significance and impact of PD-L1 and PD-1 expression on the prognostic value of CD8 tumor infiltrating lymphocytes (TILs) in a cohort of consecutive patients with primary resected non-small cell lung cancer (NSCLC). Tissue microarrays (TMA) were built using one representative formalin fixed paraffin embedded block for every case, with 5 cores for each block. TMA sections were stained with PD-L1 (clone SP263), PD-1 (clone NAT105) and CD8 (clone SP57). Number of CD8 cells per mm were automatically counted; median, 25 and 75 percentiles of CD8 cells were used as threshold for statistical clinical outcome analysis and evaluated in patients subgroups defined by expression of PD-L1 and PD-1 within tumors. We found an overall strong prognostic value of CD8 cells in our cohort of 314 resected NSCLC, especially in PD-L1 negative tumors lacking PD-1 TILs, and demonstrated that in PD-L1 positive tumors a higher density of CD8 lymphocytes is necessary to improve the prognosis. Our data strengthen the concept of the importance of the assessment and quantification of the immune contexture in cancer and, similarly to what has been carried on in colorectal cancer, promote the efforts for the establishment of an Immunoscore for NSCLC for prognostic and possibly predictive purposes.}, + file = {Muna21.pdf:pdf\\Muna21.pdf:PDF}, + journal = {Frontiers in immunology}, + optnote = {DIAG}, + pmid = {34122444}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/234187}, + ss_id = {0759829c3d1484b5ba08e60ffb8282207ab68a8b}, + all_ss_ids = {['0759829c3d1484b5ba08e60ffb8282207ab68a8b']}, + gscites = {15}, +} + +@article{Muna21a, + author = {Munari, Enrico and Mariotti, Francesca R. and Quatrini, Linda and Bertoglio, Pietro and Tumino, Nicola and Vacca, Paola and Eccher, Albino and Ciompi, Francesco and Brunelli, Matteo and Martignoni, Guido and Bogina, Giuseppe and Moretta, Lorenzo}, + title = {PD-1/PD-L1 in Cancer: Pathophysiological, Diagnostic and Therapeutic Aspects.}, + doi = {10.3390/ijms22105123}, + issue = {10}, + volume = {22}, + abstract = {Immune evasion is a key strategy adopted by tumor cells to escape the immune system while promoting their survival and metastatic spreading. Indeed, several mechanisms have been developed by tumors to inhibit immune responses. PD-1 is a cell surface inhibitory receptor, which plays a major physiological role in the maintenance of peripheral tolerance. In pathological conditions, activation of the PD-1/PD-Ls signaling pathway may block immune cell activation, a mechanism exploited by tumor cells to evade the antitumor immune control. Targeting the PD-1/PD-L1 axis has represented a major breakthrough in cancer treatment. Indeed, the success of PD-1 blockade immunotherapies represents an unprecedented success in the treatment of different cancer types. To improve the therapeutic efficacy, a deeper understanding of the mechanisms regulating PD-1 expression and signaling in the tumor context is required. We provide an overview of the current knowledge of PD-1 expression on both tumor-infiltrating T and NK cells, summarizing the recent evidence on the stimuli regulating its expression. We also highlight perspectives and limitations of the role of PD-L1 expression as a predictive marker, discuss well-established and novel potential approaches to improve patient selection and clinical outcome and summarize current indications for anti-PD1/PD-L1 immunotherapy.}, + file = {Muna21a.pdf:pdf\\Muna21a.pdf:PDF}, + journal = {International journal of molecular sciences}, + optnote = {DIAG, RADIOLOGY}, + pmid = {34066087}, + year = {2021}, + ss_id = {7e43e8a75e61c350cc95862bddae395e662a76cd}, + all_ss_ids = {['7e43e8a75e61c350cc95862bddae395e662a76cd']}, + gscites = {57}, +} + +@article{Muna22, + author = {Munari, Enrico and Querzoli, Giulia and Brunelli, Matteo and Marconi, Marcella and Sommaggio, Marco and Cocchi, Marco A. and Martignoni, Guido and Netto, George J. and Calio, Anna and Quatrini, Linda and Mariotti, Francesca R. and Luchini, Claudio and Girolami, Ilaria and Eccher, Albino and Segala, Diego and Ciompi, Francesco and Zamboni, Giuseppe and Moretta, Lorenzo and Bogina, Giuseppe}, + title = {Comparison of three validated PD-L1 immunohistochemical assays in urothelial carcinoma of the bladder: interchangeability and issues related to patient selection.}, + doi = {10.3389/fimmu.2022.954910}, + pages = {954910}, + volume = {13}, + abstract = {Different programmed cell death-ligand 1 (PD-L1) assays and scoring algorithms are being used in the evaluation of PD-L1 expression for the selection of patients for immunotherapy in specific settings of advanced urothelial carcinoma (UC). In this paper, we sought to investigate three approved assays (Ventana SP142 and SP263, and Dako 22C3) in UC with emphasis on implications for patient selection for atezolizumab/pembrolizumab as the first line of treatment. Tumors from 124 patients with invasive UC of the bladder were analyzed using tissue microarrays (TMA). Serial sections were stained with SP263 and SP142 on Ventana Benchmark Ultra and with 22C3 on Dako Autostainer Link 48. Stains were evaluated independently by two observers and scored using the combined positive score (CPS) and tumor infiltrating immune cells (IC) algorithms. Differences in proportions (DP), overall percent agreement (OPA), positive percent agreement (PPA), negative percent agreement (NPA), and Cohen k were calculated for all comparable cases. Good overall concordance in analytic performance was observed for 22C3 and SP263 with both scoring algorithms; specifically, the highest OPA was observed between 22C3 and SP263 (89.6%) when using CPS. On the other hand, SP142 consistently showed lower positivity rates with high differences in proportions (DP) compared with 22C3 and SP263 with both CPS and IC, and with a low PPA, especially when using the CPS algorithm. In conclusion, 22C3 and SP263 assays show comparable analytical performance while SP142 shows divergent staining results, with important implications for the selection of patients for both pembrolizumab and atezolizumab.}, + file = {Muna22.pdf:pdf\\Muna22.pdf:PDF}, + journal = {Frontiers in immunology}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35967344}, + year = {2022}, + ss_id = {e7158d01e00c44011785e31e48d9082c3b3e08ce}, + all_ss_ids = {['e7158d01e00c44011785e31e48d9082c3b3e08ce']}, + gscites = {3}, +} + +@article{Muns19a, + author = {Munsterman, Isabelle D and Van Erp, Merijn and Weijers, Gert and Bronkhorst, Carolien and de Korte, Chris L and Drenth, Joost PH and van der Laak, Jeroen AWM and Tjwa, Eric TTL}, + title = {A Novel Automatic Digital Algorithm that Accurately Quantifies Steatosis in NAFLD on Histopathological Whole-Slide Images}, + journal = CytometryB, + year = {2019}, + doi = {10.1002/cyto.b.21790}, + abstract = {BACKGROUND: Accurate assessment of hepatic steatosis is a key to grade disease severity in non-alcoholic fatty liver disease (NAFLD). METHODS: We developed a digital automated quantification of steatosis on whole-slide images (WSIs) of liver tissue and performed a validation study. Hematoxylin-eosin stained liver tissue slides were digitally scanned, and steatotic areas were manually annotated. We identified thresholds for size and roundness parameters by logistic regression to discriminate steatosis from surrounding liver tissue. The resulting algorithm produces a steatosis proportionate area (SPA; ratio of steatotic area to total tissue area described as percentage). The software can be implemented as a Java plug-in in FIJI, in which digital WSI can be processed automatically using the Pathomation extension. RESULTS: We obtained liver tissue specimens from 61 NAFLD patients and 18 controls. The area under the curve of correctly classified steatosis by the algorithm was 0.970 (95% CI 0.968-0.973), P < 0.001. Accuracy of the algorithm was 91.9%, with a classification error of 8.1%. SPA correlated significantly with steatosis grade (Rs = 0.845, CI: 0.749-0.902, P < 0.001) and increased significantly with each individual steatosis grade, except between Grade 2 and 3. CONCLUSIONS: We have developed a novel digital analysis algorithm that accurately quantifies steatosis on WSIs of liver tissue. This algorithm can be incorporated when quantification of steatosis is warranted, such as in clinical trials studying efficacy of new therapeutic interventions in NAFLD.}, + file = {Muns19a.pdf:pdf\\Muns19a.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + pmid = {31173462}, + publisher = {Wiley Online Library}, + month = {6}, + gsid = {9651421720990871193}, + gscites = {26}, + ss_id = {aa4959c07dab73932a118ba1d75fdde23e55ffc6}, + all_ss_ids = {['aa4959c07dab73932a118ba1d75fdde23e55ffc6']}, +} + +@inproceedings{Murp07, + author = {Murphy, K. and Schilham, A. M. R. and Gietema, H. and Prokop, M. and van Ginneken, B.}, + title = {Automated detection of pulmonary nodules from low-dose computed tomography scans using a two-stage classification system based on local image features}, + booktitle = MI, + year = {2007}, + volume = {6514}, + series = SPIE, + pages = {651410-1--651410-12}, + doi = {10.1117/12.713370}, + abstract = {{T}he automated detection of lung nodules in {CT} scans is an important problem in computer-aided diagnosis. {I}n this paper an approach to nodule candidate detection is presented which utilises the local image features of shape index and curvedness. {F}alse-positive candidates are removed by means of a two-step approach using k{NN} classification. {T}he k{NN} classifiers are trained using features of the image intensity gradients and grey-values in addition to further measures of shape index and curvedness profiles in the candidate regions. {T}he training set consisted of data from 698 scans while the independent test set comprised a further 142 images. {A}t 84% sensitivity an average of 8.2 false-positive detections per scan were observed.}, + file = {Murp07.pdf:pdf\\Murp07.pdf:PDF}, + optnote = {DIAG, NoduleDetectionCT, RADIOLOGY}, + month = {3}, + gsid = {11443400652096701431}, + gscites = {22}, + ss_id = {46ad5997668ef588b9eae850299e642b90262060}, + all_ss_ids = {['46ad5997668ef588b9eae850299e642b90262060']}, +} + +@inproceedings{Murp08, + author = {Murphy, K. and van Ginneken, B. and Klein, S. and Staring, M. and Pluim, J. P. W.}, + title = {Semi-{A}utomatic {R}eference {S}tandard {C}onstruction for {Q}uantitative {E}valuation of {L}ung {CT} {R}egistration}, + booktitle = MICCAI, + year = {2008}, + volume = {5242}, + series = LNCS, + doi = {10.1007/978-3-540-85990-1_121}, + abstract = {{A}n algorithm is presented for the efficient semi-automatic construction of a detailed reference standard for registration in thoracic {CT}. {A} well-distributed set of 100 landmarks is detected fully automatically in one scan of a pair to be registered. {U}sing a custom-designed interface, observers locate corresponding anatomic locations in the second scan. {T}he manual annotations are used to learn the relationship between the scans and after approximately twenty manual marks the remaining points are matched automatically. {I}nter-observer differences demonstrate the accuracy of the matching and the applicability of the reference standard is demonstrated on two different sets of registration results over 19 {CT} scan pairs.}, + file = {Murp08.pdf:pdf\\Murp08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {18982703}, + gsid = {12888240555944431806}, + gscites = {86}, + ss_id = {42051c5c9cfd535a32221211dd2fec5ab3d2a12f}, + all_ss_ids = {['42051c5c9cfd535a32221211dd2fec5ab3d2a12f']}, +} + +@inproceedings{Murp08a, + author = {K. Murphy and B. van Ginneken and J.P.W. Pluim and S. Klein and M. Staring}, + title = {Quantitative {A}ssessment of {R}egistration in {T}horacic {CT}}, + booktitle = {The First International Workshop on Pulmonary Image Analysis}, + year = {2008}, + publisher = {Lulu.com}, + pages = {203-211}, + abstract = {{A} novel method for quantitative evaluation of registration systems in thoracic {CT} is utilised to examine the effects of varying system parameters on registration error. {R}egional analysis is implemented to determine whether registration error is more prevalent in particular areas of the lungs. {E}xperiments on twenty-four {CT} scan-pairs prove that in many cases significant reductions in processing time can be achieved without much loss of registration accuracy. {M}ore difficult cases require additional steps in order to achieve maximum precision. {L}arger errors appear more frequently in the lower regions of the lungs close to the diaphragm.}, + file = {Murp08a.pdf:pdf\\Murp08a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {8663339826075135727}, + gscites = {3}, +} + +@conference{Murp08b, + author = {K. Murphy and M. Prokop and C. M. Schaefer-Prokop and H. Gietema and G. D. Nossent and B. van Ginneken and J. P. W. Pluim and Y. Arzhaeva}, + title = {Improved {E}fficiency of {A}ssessment of {I}nterstitial {L}ung {D}isease {P}rogression in {CT} of the {C}hest by {V}isualisation of {A}utomatically-registered {I}mage {P}airs}, + booktitle = RSNA, + year = {2008}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Murp08c, + author = {K. Murphy and B. van Ginneken and E. M. van Rikxoort and B. de Hoop and J. P. W. Pluim and M. Prokop}, + title = {Prediction of {O}bstructive {P}ulmonary {F}unction by {Q}uantitative {L}obar {E}valuation of {D}ensity {C}hange between {I}nspiration and {E}xpiration on {T}horacic {CT}}, + booktitle = RSNA, + year = {2008}, + abstract = {PURPOSE To investigate density change between inspiration and expiration in thoracic CT at a lobar level and establish its relationship with pulmonary function. METHOD AND MATERIALS Sixty-seven male former or current heavy smokers underwent low-dose CT (16x0.75mm, 30 mAs, 120-140 kVp) at full inspiration, ultra low-dose CT (20 mAs, 90 kVp) at full expiration, and pulmonary function testing on the same day as part of a lung cancer screening trial. With in-house software, automatic non-rigid registration was performed to align the expiration scans precisely with their inspiration counterparts and the lobes were automatically segmented in the inspiration scans. Both scans were then blurred slightly to reduce the effect of noise and vessels were excluded by means of thresholding. The ratio of the Hounsfield values at expiration (H_exp) and at inspiration (H_insp) was calculated for each voxel in the registered lung volumes. The median, H, of the H_exp/H_insp ratios was calculated per lobe and for the entire lung volume and these medians were plotted against the FEV1/FVC ratios. Correlation coefficients, r, were measured to determine the presence of linear relationships between H and FEV1/FVC. Lines were fitted to the data and the line slopes, m, calculated in order to determine the predictive power of H in determining pulmonary function impairment. RESULTS The FEV1/FVC ratios varied from 0.48 to 0.92 with a mean value of 0.71. A stronger correlation between between H and FEV1/FVC was revealed in the lower lobes (r from -0.66 to -0.61) than in the upper and middle lobes (r from -0.47 to -0.38). Additionally the H ratios were more strongly predictive of FEV1/FVC for the lower lobes (m from -0.38 to -0.30) than for the upper and middle lobes (m from -0.23 to -0.16). For the entire lung volume the correlation and slope values were r=-0.58, m=-0.27. CONCLUSION Automatic regional density analysis on inspiration and expiration CT allows for the calculation of the ratio H which correlates well with measures of obstructive pulmonary impairment. The lower lobes appear to contribute more to functional impairment than the upper or middle lobes. CLINICAL RELEVANCE/APPLICATION Quantitative analysis of regional density changes in the lung may allow for new insights into the mechanisms of COPD and the determinants of obstructive lung function in smokers.}, + optnote = {DIAG, RADIOLOGY}, + gsid = {16482852489757716222}, + gscites = {1}, +} + +@article{Murp09, + author = {K. Murphy and B. van Ginneken and A. M. R. Schilham and B. J. de Hoop and H. A. Gietema and M. Prokop}, + title = {A Large Scale Evaluation of Automatic Pulmonary Nodule Detection in Chest {CT} using Local Image Features and k-Nearest-Neighbour Classification}, + journal = MIA, + year = {2009}, + volume = {13}, + number = {5}, + pages = {757-770}, + doi = {10.1016/j.media.2009.07.001}, + abstract = {{A} scheme for the automatic detection of nodules in thoracic computed tomography scans is presented and extensively evaluated. {T}he algorithm uses the local image features of shape index and curvedness in order to detect candidate structures in the lung volume and applies two successive k-nearest-neighbour classifiers in the reduction of false-positives. {T}he nodule detection system is trained and tested on three databases extracted from a large-scale experimental screening study. {T}he databases are constructed in order to evaluate the algorithm on both randomly chosen screening data as well as data containing higher proportions of nodules requiring follow-up. {T}he system results are extensively evaluated including performance measurements on specific nodule types and sizes within the databases and on lesions which later proved to be malignant. {I}n a random selection of 813 scans from the screening study a sensitivity of 80% with an average 4.2 false-positives per scan is achieved. {T}he detection results presented are a realistic measure of a {CAD} system performance in a low-dose screening study which includes a diverse array of nodules of many varying sizes, types and textures.}, + file = {Murp09.pdf:pdf\\Murp09.pdf:PDF}, + optnote = {DIAG, NoduleDetectionCT, RADIOLOGY}, + pmid = {19646913}, + month = {10}, + gsid = {13680973010063298300}, + gscites = {319}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/81262}, + ss_id = {1b28c67c8c23bb85dd86a56e661ece107dd4c98f}, + all_ss_ids = {['1b28c67c8c23bb85dd86a56e661ece107dd4c98f']}, +} + +@conference{Murp09a, + author = {K. Murphy and B. van Ginneken and J. P. W. Pluim and T. A. Altes and M. Prokop and E. E. de Lange}, + title = {Comparison of airflow defects in asthma patients assessed by breath-hold inspiration and expiration {CT} and by hyperpolarized helium-3 magnetic resonance imaging}, + booktitle = RSNA, + year = {2009}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Murp10, + author = {K. Murphy and B. van Ginneken and S. Klein and M. Staring and B. J. de Hoop and M. A. Viergever and J. P. W. Pluim}, + title = {Semi-automatic construction of reference standards for evaluation of image registration}, + journal = MIA, + year = {2011}, + volume = {15}, + pages = {71-84}, + doi = {10.1016/j.media.2010.07.005}, + abstract = {Quantitative evaluation of image registration algorithms is a difficult and under-addressed issue due to the lack of a reference standard in most registration problems. In this work a method is presented whereby detailed reference standard data may be constructed in an efficient semi-automatic fashion. A well-distributed set of n landmarks is detected fully automatically in one scan of a pair to be registered. Using a custom-designed interface, observers define corresponding anatomic locations in the second scan for a specified subset of s of these landmarks. The remaining n > s landmarks are matched fully automatically by a thin-plate-spline based system using the s manual landmark correspondences to model the relationship between the scans. The method is applied to 47 pairs of temporal thoracic CT scans, three pairs of brain MR scans and five thoracic CT datasets with synthetic deformations. Interobserver differences are used to demonstrate the accuracy of the matched points. The utility of the reference standard data as a tool in evaluating registration is shown by the comparison of six sets of registration results on the 47 pairs of thoracic CT data.}, + file = {Murp10.pdf:pdf\\Murp10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {20709592}, + month = {2}, + gsid = {15970567742966563807}, + gscites = {135}, + ss_id = {11270fdc633003eec7df21d1d9a8d807d8cf1d0c}, + all_ss_ids = {['11270fdc633003eec7df21d1d9a8d807d8cf1d0c']}, +} + +@inproceedings{Murp10a, + author = {K. Murphy and B. van Ginneken and J. M. Reinhardt and S. Kabus and K. Ding and X. Deng and J. P. W. Pluim}, + title = {Evaluation of Methods for Pulmonary Image Registration: The EMPIRE10 Study}, + booktitle = {Medical Image Analysis for the Clinic - A Grand Challenge}, + year = {2010}, + pages = {11--22}, + abstract = {EMPIRE10 is a public platform for fair and meaningful comparison of registration algorithms which are applied to a database of intra-patient thoracic CT image pairs. Participants download 20 datasets from the internet, register them, and return the results for independent evaluation. Evaluation is carried out in four separate categories and participants are ranked according to their performance. All results are published on the EMPIRE10 website [1]. The Grand Challenge workshop at MICCAI 2010 [2] brings participants together to register a further 10 scan pairs live on site and for discussion and collaboration opportunities.}, + file = {Murp10a.pdf:pdf\\Murp10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {13824588902399310721}, + gscites = {45}, + ss_id = {ce5780f841ac6b5576199b0c22d2e0ba374c30b4}, + all_ss_ids = {['ce5780f841ac6b5576199b0c22d2e0ba374c30b4']}, +} + +@phdthesis{Murp11, + author = {K. Murphy}, + title = {Development and evaluation of automated image analysis techniques in thoracic {CT}}, + url = {http://dspace.library.uu.nl/handle/1874/204800}, + abstract = {This work concerns the development and analysis of two automatic image processing techniques in thoracic CT: Nodule detection and Intra-patient non-rigid registration. A method of nodule detection is described and thoroughly evaluated, and the organisation of a public challenge in nodule detection (ANODE09) is described with detailed discussion of the challenge results. Non-rigid registration is applied to a database of COPD patients in order to demonstrate a method of measuring pulmonary function via CT. A method to evaluate non-rigid registration via semi-automatically determined point pairs is described and demonstrated. Finally a public challenge (EMPIRE10) in non-rigid registration of thoracic CT is described and results for 20 algorithms from different institutes are detailed.}, + copromotor = {B. van Ginneken and J. P. W. Pluim}, + file = {Murp11.pdf:pdf\\Murp11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {M. A. Viergever}, + school = {Utrecht University}, + year = {2011}, + journal = {PhD thesis}, +} + +@article{Murp11a, + author = {K. Murphy and B. van Ginneken and J. M. Reinhardt and S. Kabus and K. Ding and X. Deng and K. Cao and K. Du and G. E. Christensen and V. Garcia and T. Vercauteren and N. Ayache and O. Commowick and G. Malandain and B. Glocker and N. Paragios and N. Navab and V. Gorbunova and J. Sporring and M. de Bruijne and X. Han and M. P. Heinrich and J. A. Schnabel and M. Jenkinson and C. Lorenz and M. Modat and J. R. McClelland and S. Ourselin and S. E. A. Muenzing and M. A. Viergever and D. De Nigris and D. L. Collins and T. Arbel and M. Peroni and R. Li and G. C. Sharp and A. Schmidt-Richberg and J. Ehrhardt and R. Werner and D. Smeets and D. Loeckx and G. Song and N. Tustison and B. Avants and J. C. Gee and M. Staring and S. Klein and B. C. Stoel and M. Urschler and M. Werlberger and J. Vandemeulebroucke and S. Rit and D. Sarrut and J. P. W. Pluim}, + title = {Evaluation of Registration Methods on Thoracic {CT}: The {EMPIRE10} Challenge}, + journal = TMI, + year = {2011}, + volume = {31}, + pages = {1901--1920}, + doi = {10.1109/TMI.2011.2158349}, + abstract = {EMPIRE10 (Evaluation of Methods for Pulmonary Image REgistration 2010) is a public platform for fair and meaningful comparison of registration algorithms which are applied to a database of intra-patient thoracic CT image pairs. Evaluation of non-rigid registration techniques is a non trivial task. This is compounded by the fact that researchers typically test only on their own data, which varies widely. For this reason, reliable assessment and comparison of different registration algorithms has been virtually impossible in the past. In this work we present the results of the launch phase of EMPIRE10, which comprised the comprehensive evaluation and comparison of 20 individual algorithms from leading academic and industrial research groups. All algorithms are applied to the same set of 30 thoracic CT pairs. Algorithm settings and parameters are chosen by researchers expert in the configuration of their own method and the evaluation is independent, using the same criteria for all participants. All results are published on the EMPIRE10 website (http://empire10.isi.uu.nl). The challenge remains ongoing and open to new participants. Full results from 24 algorithms have been published at the time of writing. This article details the organisation of the challenge, the data and evaluation methods and the outcome of the initial launch with 20 algorithms. The gain in knowledge and future work are discussed.}, + file = {Murp11a.pdf:pdf\\Murp11a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pmid = {21632295}, + month = {11}, + gsid = {11653185328679959383}, + gscites = {438}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/96888}, + ss_id = {31f676889affcf561b077e661380e5fd945aabba}, + all_ss_ids = {['31f676889affcf561b077e661380e5fd945aabba']}, +} + +@article{Murp12, + author = {Murphy, K. and Pluim, J. P. W. and van Rikxoort, E. M. and de Jong, P. A. and de Hoop, B. and Gietema, H. A. and Mets, O. and de Bruijne, M. and Lo, P. and Prokop, M. and van Ginneken, B.}, + title = {Toward automatic regional analysis of pulmonary function using inspiration and expiration thoracic {CT}}, + journal = MP, + year = {2012}, + volume = {39}, + pages = {1650--1662}, + doi = {10.1118/1.3687891}, + abstract = {Purpose: To analyze pulmonary function using a fully automatic technique which processes pairs of thoracic {CT} scans acquired at breath-hold inspiration and expiration, respectively. The following research objectives are identified to: (a) describe and systematically analyze the processing pipeline and its results; (b) verify that the quantitative, regional ventilation measurements acquired through CT are meaningful for pulmonary function analysis; (c) identify the most effective of the calculated measurements in predicting pulmonary function; and (d) demonstrate the potential of the system to deliver clinically important information not available through conventional spirometry. Methods: A pipeline of automatic segmentation and registration techniques is presented and demonstrated on a database of 216 subjects well distributed over the various stages of {COPD} (chronic obstructive pulmonary disorder). Lungs, fissures, airways, lobes, and vessels are automatically segmented in both scans and the expiration scan is registered with the inspiration scan using a fully automatic nonrigid registration algorithm. Segmentations and registrations are examined and scored by expert observers to analyze the accuracy of the automatic methods. Quantitative measures representing ventilation are computed at every image voxel and analyzed to provide information about pulmonary function, both globally and on a regional basis. These {CT} derived measurements are correlated with results from spirometry tests and used as features in a kNN classifier to assign {COPD} global initiative for obstructive lung disease ({GOLD}) stage. Results: The steps of anatomical segmentation (of lungs, lobes, and vessels) and registration in the workflow were shown to perform very well on an individual basis. All CT-derived measures were found to have good correlation with spirometry results, with several having correlation coefficients, r, in the range of 0.85-0.90. The best performing kNN classifier succeeded in classifying 67\% of subjects into the correct {COPD} {GOLD} stage, with a further 29\% assigned to a class neighboring the correct one. Conclusions: Pulmonary function information can be obtained from thoracic CT scans using the automatic pipeline described in this work. This preliminary demonstration of the system already highlights a number of points of clinical importance such as the fact that an inspiration scan alone is not optimal for predicting pulmonary function. It also permits measurement of ventilation on a per lobe basis which reveals, for example, that the condition of the lower lobes contributes most to the pulmonary function of the subject. It is expected that this type of regional analysis will be instrumental in advancing the understanding of multiple pulmonary diseases in the future.}, + file = {Murp12.pdf:pdf/Murp12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {22380397}, + month = {3}, + gsid = {4159560996296390373}, + gscites = {49}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/110012}, + ss_id = {9ca3badc4daf9fd3da93b868e84d06476393fc78}, + all_ss_ids = {['9ca3badc4daf9fd3da93b868e84d06476393fc78']}, +} + +@article{Murp19, + author = {Murphy, Keelin and Habib, Shifa Salman and Zaidi, Syed Mohammad Asad and Khowaja, Saira and Khan, Aamir and Melendez, Jaime and Scholten, Ernst T. and Amad, Farhan and Schalekamp, Steven and Verhagen, Maurits and Philipsen, Rick H. H. M. and Meijers, Annet and van Ginneken, Bram}, + title = {Computer aided detection of tuberculosis on chest radiographs: An evaluation of the CAD4TB v6 system}, + journal = {arXiv:1903.03349}, + year = {2019}, + url = {https://arxiv.org/abs/1903.03349}, + abstract = {There is a growing interest in the automated analysis of chest X-Ray (CXR) as a sensitive and inexpensive means of screening susceptible populations for pulmonary tuberculosis. In this work we evaluate the latest version of CAD4TB, a software platform designed for this purpose. Version 6 of CAD4TB was released in 2018 and is here tested on an independent dataset of 5565 CXR images with GeneXpert (Xpert) sputum test results available (854 Xpert positive subjects). A subset of 500 subjects (50% Xpert positive) was reviewed and annotated by 5 expert observers independently to obtain a radiological reference standard. The latest version of CAD4TB is found to outperform all previous versions in terms of area under receiver operating curve (ROC) with respect to both Xpert and radiological reference standards. Improvements with respect to Xpert are most apparent at high sensitivity levels with a specificity of 76% obtained at 90% sensitivity. When compared with the radiological reference standard, CAD4TB v6 also outperformed previous versions by a considerable margin and achieved 98% specificity at 90% sensitivity. No substantial difference was found between the performance of CAD4TB v6 and any of the various expert observers against the Xpert reference standard. A cost and efficiency analysis on this dataset demonstrates that in a standard clinical situation, operating at 90% sensitivity, users of CAD4TB v6 can process 132 subjects per day at an average cost per screen of \$5.95 per sub + ject, while users of version 3 process only 85 subjects per day at a cost of \$8.41 per subject. At all tested operating points version 6 is shown to be more efficient and cost effective than any other version.}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + all_ss_ids = {['61d1507dec6ef557cba474c4f67b1d0ad3631fb0']}, + gscites = {103}, +} + +@article{Murp20, + author = {Murphy, Keelin and Habib, Shifa Salman and Zaidi, Syed Mohammad Asad and Khowaja, Saira and Khan, Aamir and Melendez, Jaime and Scholten, Ernst T. and Amad, Farhan and Schalekamp, Steven and Verhagen, Maurits and Philipsen, Rick H. H. M. and Meijers, Annet and van Ginneken, Bram}, + title = {Computer aided detection of tuberculosis on chest radiographs: An evaluation of the CAD4TB v6 system}, + doi = {10.1038/s41598-020-62148-y}, + pages = {5492}, + url = {https://arxiv.org/abs/1903.03349}, + volume = {10}, + abstract = {There is a growing interest in the automated analysis of chest X-Ray (CXR) as a sensitive and inexpensive means of screening susceptible populations for pulmonary tuberculosis. In this work we evaluate the latest version of CAD4TB, a software platform designed for this purpose. Version 6 of CAD4TB was released in 2018 and is here tested on an independent dataset of 5565 CXR images with GeneXpert (Xpert) sputum test results available (854 Xpert positive subjects). A subset of 500 subjects (50% Xpert positive) was reviewed and annotated by 5 expert observers independently to obtain a radiological reference standard. The latest version of CAD4TB is found to outperform all previous versions in terms of area under receiver operating curve (ROC) with respect to both Xpert and radiological reference standards. Improvements with respect to Xpert are most apparent at high sensitivity levels with a specificity of 76% obtained at 90% sensitivity. When compared with the radiological reference standard, CAD4TB v6 also outperformed previous versions by a considerable margin and achieved 98% specificity at 90% sensitivity. No substantial difference was found between the performance of CAD4TB v6 and any of the various expert observers against the Xpert reference standard. A cost and efficiency analysis on this dataset demonstrates that in a standard clinical situation, operating at 90% sensitivity, users of CAD4TB v6 can process 132 subjects per day at an average cost per screen of \$5.95 per subject, while users of version 3 process only 85 subjects per day at a cost of \$8.41 per subject. At all tested operating points version 6 is shown to be more efficient and cost effective than any other version.}, + file = {Murp20.pdf:pdf\\Murp20.pdf:PDF}, + journal = NATSCIREP, + optnote = {DIAG}, + pmid = {32218458}, + year = {2020}, + month = {3}, + gsid = {5738262576738254867}, + gscites = {103}, + ss_id = {61d1507dec6ef557cba474c4f67b1d0ad3631fb0}, + all_ss_ids = {['61d1507dec6ef557cba474c4f67b1d0ad3631fb0']}, +} + +@article{Murp20a, + author = {Murphy, Keelin and Smits, Henk and Knoops, Arnoud J. G. and Korst, Mike B. J. M. and Samson, Tijs and Scholten, Ernst T. and Schalekamp, Steven and Schaefer-Prokop, Cornelia M. and Philipsen, Rick H. H. M. and Meijers, Annet and Melendez, Jaime and van Ginneken, Bram and Rutten, Matthieu}, + title = {{COVID-19} on the Chest Radiograph: A Multi-Reader Evaluation of an {AI} System}, + journal = Radiology, + year = {2020}, + pages = {E166-E172}, + volume = 296, + doi = {10.1148/radiol.2020201874}, + abstract = {Background Chest radiography (CXR) may play an important role in triage for COVID-19, particularly in low-resource settings. Purpose To evaluate the performance of an artificial intelligence (AI) system for detection of COVID-19 pneumonia on chest radiographs. Methods An AI system (CAD4COVID-Xray) was trained on 24,678 CXR images including 1,540 used only for validation while training. The test set consisted of a set of continuously acquired CXR images (n=454) obtained in patients suspected for COVID-19 pneumonia between March 4th and April 6th 2020 in a single center (223 RT-PCR positive subjects, 231 RT-PCR negative subjects). The radiographs were independently analyzed by six readers and by the AI system. Diagnostic performance was performed by receiver operating characteristic curve analysis. Results For the test set, the mean age of the patients was 67.3 (+/-14.4) years (56% male). Using RT-PCR test results as the reference standard, the AI system correctly classified CXR images as COVID-19 pneumonia with an AUC of 0.81. The system significantly outperforms each reader (p < 0.001 using McNemar test) at their highest possible sensitivities. At their lowest sensitivities, only one reader can significantly outperform the AI system (p=0.04). Conclusions An AI system for detection of COVID-19 on chest radiographs was comparable to six independent readers.}, + file = {Murp20a.pdf:pdf\\Murp20a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {32384019}, + month = {5}, + gsid = {15067361865056080572}, + gscites = {146}, + ss_id = {1f77425465d8a5dd20e949f9e80aa4da590efbb5}, + all_ss_ids = {['1f77425465d8a5dd20e949f9e80aa4da590efbb5']}, +} + +@conference{Mus12, + author = {Roel Mus and Ritse Mann and Andre Moyakine and Christian Geppert and Bram Platel and Nico Karssemeijer and Jelle Barentsz}, + title = {{MRI} Screening of the Breast in Less than 2 Minutes: A Prelude to Extend {MR} Breast Screening Possibilities}, + booktitle = RSNA, + year = {2012}, + abstract = {PURPOSE To evaluate time to enhancement (TTE) as a parameter to differentiate benign from malignant lesions in MR Mammography using an ultrafast DCE protocol. METHOD AND MATERIALS 42 patients with 43 lesions (20 malignant, 23 benign, size range 3mm - 7 cm) were scanned at 3.0T (Siemens, Magneton Trio and Skyra) using a 16 channel bilateral breast coil. An interleaved protocol was used, in which a TWIST (Time-resolved angiography With Stochastic Trajectories) sequence was performed during and immediately after IV administration of 30 ml Gd-DOTA (20 time points, spatial resolution 1*0.9*2.5 mm, temporal resolution 4.32 seconds), preceded 1x and followed 5x by high resolution VIBE acquisitions (spatial resolution: 0.9*0.9*1 mm) lasting 107 seconds each. Consequently, the TWIST acquisitions could be used to assess TTE, whereas relative-enhancement versus time curves could be derived from the VIBE acquisitions. All evaluations were performed on a DynaCad workstation (InVivo), using primarily the maximum intensity projections (MIPs) of the subtracted series. TTE was defined as AC/a,!A"the timepoint were the lesion started to enhanceA-A?A 1/2 ? AC/a,!aEURoe AC/a,!A"the timepoint were the aorta started to enhanceA-A?A 1/2 ?. Lesions with TTE $AC/aEURdegA$?$ 12 seconds were considered malignant, Lesions with TTE $>$ 12 were considered benign. Similarly, based upon the curves we considered lesions with wash-out malignant and lesions with continuous enhancement benign. RESULTS Due to its speed the TWIST sequence showed less movement artifacts than the VIBE and -later- enhancing glandular tissue did not interfere with -earlier- enhancing lesions, making detection of the lesions on the MIPs easier. 19/23 malignancies had TTE $AC/aEURdegA$?$ 12 seconds, 17/20 benign lesions showed TTE $>$ 12 seconds. Using the curves, 19/ 23 malignancies showed wash-out and 13/20 benign lesions had continuous enhancement. Corresponding accuracy, sensitivity, specificity, PPV and NPV for TTE were 84, 83, 85, 86 and 81, compared to 74, 83, 65, 74 and 75 for curve analysis. CONCLUSION TTE is a strong discriminator between benign and malignant breast disease and appears to improve especially the specificity of the exam when compared to curve analysis. CLINICAL RELEVANCE/APPLICATION Ultrafast MRI, allowing evaluation of both inflow of contrast and morphology can be used to substantially shorten current MRI protocols. This may allow MRI screening at substantially lower costs.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Mus17, + author = {Mus, Roel D. and Borelli, Cristina and Bult, Peter and Weiland, Elisabeth and Karssemeijer, Nico and Barentsz, Jelle O. and Gubern-M\'{e}rida, Albert and Platel, Bram and Mann, Ritse M.}, + title = {Time to enhancement derived from ultrafast breast MRI as a novel parameter to discriminate benign from malignant breast lesions}, + doi = {10.1016/j.ejrad.2017.01.020}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejrad.2017.01.020}, + file = {Mus17.pdf:pdf\Mus17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Radiology}, + citation-count = {60}, + automatic = {yes}, + pages = {90-96}, + volume = {89}, +} + +@article{Muyo14, + author = {Muyoyeta, M. and Maduskar, P. and Moyo, M. and Kasese, N. and Milimo, D. and Spooner, R. and Kapata, N. and Hogeweg, L. and van Ginneken, B. and Ayles, H.}, + title = {The Sensitivity and Specificity of Using a Computer Aided Diagnosis Program for Automatically Scoring Chest {X}-Rays of Presumptive {TB} Patients Compared with {X}pert {MTB/RIF} in {L}usaka {Z}ambia}, + journal = PLOSONE, + year = {2014}, + volume = {9}, + number = {4}, + pages = {e93757}, + doi = {10.1371/journal.pone.0093757}, + abstract = {To determine the sensitivity and specificity of a Computer Aided Diagnosis (CAD) program for scoring chest x-rays (CXRs) of presumptive tuberculosis (TB) patients compared to Xpert MTB/RIF (Xpert).Consecutive presumptive TB patients with a cough of any duration were offered digital CXR, and opt out HIV testing. CXRs were electronically scored as normal (CAD score AC/aEURdegA$?60) or abnormal (CAD score>60) using a CAD program. All patients regardless of CAD score were requested to submit a spot sputum sample for testing with Xpert and a spot and morning sample for testing with LED Fluorescence Microscopy-(FM).Of 350 patients with evaluable data, 291 (83.1\%) had an abnormal CXR score by CAD. The sensitivity, specificity, positive predictive value (PPV) and negative predictive value (NPV) of CXR compared to Xpert were 100\% (95\%CI 96.2-100), 23.2\% (95\%CI 18.2-28.9), 33.0\% (95\%CI 27.6-38.7) and 100\% (95\% 93.9-100), respectively. The area under the receiver operator curve (AUC) for CAD was 0.71 (95\%CI 0.66-0.77). CXR abnormality correlated with smear grade (rAC/a,!A =AC/a,!A 0.30, p<0.0001) and with Xpert CT(rAC/a,!A =AC/a,!A 0.37, p<0.0001).To our knowledge this is the first time that a CAD program for TB has been successfully tested in a real world setting. The study shows that the CAD program had high sensitivity but low specificity and PPV. The use of CAD with digital CXR has the potential to increase the use and availability of chest radiography in screening for TB where trained human resources are scarce.}, + file = {Muyo14.pdf:pdf\\Muyo14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {24705629}, + month = {4}, + gsid = {4954392116893371357}, + gscites = {79}, + ss_id = {ba7ca751b20644a47eceb276948d676bf42609fa}, + all_ss_ids = {['ba7ca751b20644a47eceb276948d676bf42609fa']}, +} + +@article{Nab90, + author = {H. W. Nab and N. Karssemeijer and L. J. van Erning and A. L. Verbeek and J. H. Hendriks}, + title = {{D}igital mammography is very useful in mass screening of breast cancer}, + journal = NTVG, + year = {1990}, + volume = {134}, + pages = {2383--2387}, + abstract = {Mammograms made between 1981 and 1989 in the Nijmegen screening programme for breast cancer were retrospectively reviewed. Those made before detection of breast cancer showed signs of tumour growth in the place of the subsequently detected malignancy in 22% of the cases. A work station was set up for image digitization and image processing. Display with optimal contrast and image processing of mammograms is possible. Diagnoses based on digitized mammograms displayed on a monitor were as good as those based on the conventional images on film. Automatic detection of image features has been investigated. A procedure for automatic detection of microcalcifications was developed. This research is important for optimalization of diagnosis in screening and because of the expected introduction of direct digital imaging techniques.}, + file = {Nab90.PDF:pdf\\Nab90.PDF:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {49}, + pmid = {2263263}, + gsid = {8785684717863376362}, + gscites = {3}, + ss_id = {fd4a0f80717b0dc6a8157fd6148aec44788aa903}, + all_ss_ids = {['fd4a0f80717b0dc6a8157fd6148aec44788aa903']}, +} + +@article{Nab92, + author = {H. W. Nab and N. Karssemeijer and L. J. Van Erning and J. H. Hendriks}, + title = {Comparison of digital and conventional mammography: a {ROC} study of 270 mammograms}, + journal = MINF, + year = {1992}, + volume = {17}, + pages = {125--131}, + file = {Nab92.pdf:pdf/Nab92.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {1405832}, + month = {1}, + gsid = {14419021407581029067}, + gscites = {45}, + ss_id = {5aca0ca5805abfa265b540ba865c83a0cc96d23a}, + all_ss_ids = {['5aca0ca5805abfa265b540ba865c83a0cc96d23a']}, +} + +@article{Nage13, + author = {Nagel, Klaas N A. and Schouten, Martijn G. and Hambrock, Thomas and Litjens, G. and Hoeks, Caroline M A. and Haken, Bennie Ten and Barentsz, Jelle O. and F\"utterer, Jurgen J.}, + title = {Differentiation of Prostatitis and Prostate Cancer by Using Diffusion-weighted {MR} Imaging and {MR}-guided Biopsy at 3 T}, + volume = {267}, + pages = {164-172}, + doi = {10.1148/radiol.12111683}, + abstract = {Purpose:To determine if prostatitis and prostate cancer (PCa) can be distinguished by using apparent diffusion coefficients (ADCs) on magnetic resonance (MR) images, with specimens obtained at MR-guided biopsy as the standard of reference.Materials and Methods:The need for institutional review board approval and informed consent was waived. MR-guided biopsies were performed in 130 consecutive patients with cancer-suspicious regions (CSRs) on multiparametric MR images obtained at 3 T. In this retrospective study, 88 patients met the inclusion criteria. During the biopsy procedure, an axial diffusion-weighted sequence was performed and ADC maps were generated (repetition time msec/echo time msec, 2000/67; section thickness, 4 mm; in-plane resolution, 1.8 AfaEUR" 1.8 mm; and b values of 0, 100, 500, and 800 sec/mm(2)). Subsequently, a confirmation image with the needle left in situ was acquired and projected on the ADC map. The corresponding ADCs at the biopsy location were compared with the histopathologic outcomes of the biopsy specimens. Linear mixed-model regression analyses were used to test for ADC differences between the histopathologic groups.Results:The study included 116 biopsy specimens. Median ADCs of normal prostate tissue, prostatitis, low-grade PCa (Gleason grade components 2 or 3), and high-grade PCa (Gleason grade components 4 or 5) were 1.22 AfaEUR" 10(-3) mm(2)/sec (standard deviation, A,A+- 0.21), 1.08 AfaEUR" 10(-3) mm(2)/sec (A,A+- 0.18), 0.88 AfaEUR" 10(-3) mm(2)/sec (A,A+- 0.15), and 0.88 AfaEUR" 10(-3) mm(2)/sec (A,A+- 0.13), respectively. Although the median ADCs of biopsy specimens with prostatitis were significantly higher compared with low- and high-grade PCa (P < .001), there is a considerable overlap between the tissue types.Conclusion:Diffusion-weighted imaging is a noninvasive technique that shows differences between prostatitis and PCa in both the peripheral zone and central gland, although its usability in clinical practice is limited as a result of significant overlap in ADCs.A,A(c) RSNA, 2013.}, + file = {Nage13.pdf:pdf\\Nage13.pdf:PDF}, + journal = RADIOLOGY, + month = {Jan}, + optnote = {ProstateCancer, RADIOLOGY}, + pmid = {23329653}, + year = {2013}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/118297}, +} + +@article{Naid13, + author = {Naidich, David P. and Bankier, Alexander A. and MacMahon, Heber and Schaefer-Prokop, Cornelia M. and Pistolesi, Massimo and Goo, Jin Mo and Macchiarini, Paolo and Crapo, James D. and Herold, Christian J. and Austin, John H. and Travis, William D.}, + title = {Recommendations for the management of subsolid pulmonary nodules detected at {CT}: a statement from the Fleischner Society}, + journal = Radiology, + year = {2013}, + volume = {266}, + pages = {304--317}, + doi = {10.1148/radiol.12120628}, + abstract = {This report is to complement the original Fleischner Society recommendations for incidentally detected solid nodules by proposing a set of recommendations specifically aimed at subsolid nodules. The development of a standardized approach to the interpretation and management of subsolid nodules remains critically important given that peripheral adenocarcinomas represent the most common type of lung cancer, with evidence of increasing frequency. Following an initial consideration of appropriate terminology to describe subsolid nodules and a brief review of the new classification system for peripheral lung adenocarcinomas sponsored by the International Association for the Study of Lung Cancer (IASLC), American Thoracic Society (ATS), and European Respiratory Society (ERS), six specific recommendations were made, three with regard to solitary subsolid nodules and three with regard to multiple subsolid nodules. Each recommendation is followed first by the rationales underlying the recommendation and then by specific pertinent remarks. Finally, issues for which future research is needed are discussed. The recommendations are the result of careful review of the literature now available regarding subsolid nodules. Given the complexity of these lesions, the current recommendations are more varied than the original Fleischner Society guidelines for solid nodules. It cannot be overemphasized that these guidelines must be interpreted in light of an individual's clinical history. Given the frequency with which subsolid nodules are encountered in daily clinical practice, and notwithstanding continuing controversy on many of these issues, it is anticipated that further refinements and modifications to these recommendations will be forthcoming as information continues to emerge from ongoing research.}, + file = {Naid13.pdf:pdf\\Naid13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {23070270}, + month = {1}, + all_ss_ids = {c37197020c3e86415f814d66e49de6f11d7cdbf1}, + gscites = {1000}, +} + +@article{Naik22, + author = {Naik, Nithesh and Hameed, B. M. Zeeshan and Sooriyaperakasam, Nilakshman and Vinayahalingam, Shankeeth and Patil, Vathsala and Smriti, Komal and Saxena, Janhavi and Shah, Milap and Ibrahim, Sufyan and Singh, Anshuman and Karimi, Hadis and Naganathan, Karthickeyan and Shetty, Dasharathraj K. and Rai, Bhavan Prasad and Chlosta, Piotr and Somani, Bhaskar K.}, + title = {Transforming healthcare through a digital revolution: A review of digital healthcare technologies and solutions.}, + doi = {10.3389/fdgth.2022.919985}, + pages = {919985}, + volume = {4}, + abstract = {The COVID-19 pandemic has put a strain on the entire global healthcare infrastructure. The pandemic has necessitated the re-invention, re-organization, and transformation of the healthcare system. The resurgence of new COVID-19 virus variants in several countries and the infection of a larger group of communities necessitate a rapid strategic shift. Governments, non-profit, and other healthcare organizations have all proposed various digital solutions. It's not clear whether these digital solutions are adaptable, functional, effective, or reliable. With the disease becoming more and more prevalent, many countries are looking for assistance and implementation of digital technologies to combat COVID-19. Digital health technologies for COVID-19 pandemic management, surveillance, contact tracing, diagnosis, treatment, and prevention will be discussed in this paper to ensure that healthcare is delivered effectively. Artificial Intelligence (AI), big data, telemedicine, robotic solutions, Internet of Things (IoT), digital platforms for communication (DC), computer vision, computer audition (CA), digital data management solutions (blockchain), digital imaging are premiering to assist healthcare workers (HCW's) with solutions that include case base surveillance, information dissemination, disinfection, and remote consultations, along with many other such interventions.}, + file = {Naik22.pdf:pdf\\Naik22.pdf:PDF}, + journal = {Frontiers in digital health}, + optnote = {DIAG}, + pmid = {35990014}, + year = {2022}, +} + +@conference{Nair12a, + author = {Nair, A. and Desai, S. R, and Copley S. J. and Edey, A. J. and Walsh, S. L. F. and Robinson, G. and Field, J. K. and Baldwin, D. R. and Vliegenthart, R. and Oudkerk, M. and van Ginneken, B. and de Jong, P. A. and Prokop, M. and Hansell, D. M. and Devaraj, A.}, + title = {The impact of numbers of readers and methods of arbitration on pulmonary nodule detection in the context of lung cancer screening with {CT}}, + booktitle = ESTI, + year = {2012}, + abstract = {Aim: To determine whether a) increasing numbers of readers and b) methods of arbitration significantly influence nodule detection. Methods: 85 CTs performed as part of the NELSON lung cancer screening trial were read by five experienced thoracic radiologists twice. During the first reading, radiologists classified all opacities as positive (nodules>3mm) or negative (non-nodular opacities and nodules <3mm). In the second reading, each radiologist categorised the opacities identified by the other radiologists. Readers' final scores were combined to simulate double- and triple-reading. For double-reading, if there was disagreement a third independent reader provided arbitration. For triple-reading, >=2 radiologists in agreement constituted a positive reading. The reference standard was agreement by >=4 radiologists. Results: 531 opacities were identified, of which 186 (35.0%) nodules met reference standard criteria. Double-reading without arbitration had a variable impact on nodule detection: there was a significant increase in mean sensitivity (22.6%, p<0.005) and specificity (11.3%, p<0.05) in 5 and 16 pairs respectively, a significant reduction in mean sensitivity (20.7%, p<0.005) in 2 pairs, and no change in mean sensitivity and specificity in 13 and 4 pairs respectively. Triple-reading or double-reading with arbitration significantly increased mean sensitivity in 29/30 triple-reader combinations (24.5%, p<0.05) and 6/10 pairs (17.7%, p<0.05) respectively, but there was a significantly decreased mean specificity in 26/30 triple-reader combinations (11.3%, p<0.05) and 10/10 pairs respectively (11.7%, p<0.0005). Conclusions: Double-reading does not invariably improve nodule detection accuracy for experienced thoracic radiologists. However improved sensitivity is achieved by triple-reading or double-reading with independent arbitration, at the expense of reduced specificity.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Napo19, + author = {Napolitano, George and Lynge, Elsebeth and Lillholm, Martin and Vejborg, Ilse and van Gils, Carla H and Nielsen, Mads and Karssemeijer, Nico}, + title = {Change in mammographic density across birth cohorts of {{D}}utch breast cancer screening participants}, + journal = IJC, + year = {2019}, + volume = {145}, + issue = {11}, + month = {2}, + pages = {2954-2962}, + doi = {10.1002/ijc.32210}, + abstract = {High mammographic density is a well-known risk factor for breast cancer. This study aimed to search for a possible birth cohort effect on mammographic density, which might contribute to explain the increasing breast cancer incidence. We separately analysed left and right breast density of Dutch women from a 13-year period (2003-2016) in the breast cancer screening programme. First, we analysed age-specific changes in average percent dense volume (PDV) across birth cohorts. A linear regression analysis (PDV vs. year of birth) indicated a small but statistically significant increase in women of: 1) age 50 and born from 1952 to 1966 (left, slope = 0.04, p =0.003; right, slope = 0.09, p<0.0001); 2) age 55 and born from 1948 to 1961 (right, slope = 0.04, p=0.01); and 3) age 70 and born from 1933 to 1946 (right, slope = 0.05, p=0.002). A decrease of total breast volume seemed to explain the increase in PDV. Second, we compared proportion of women with dense breast in women born in 1946-1953 and 1959-1966, and observed a statistical significant increase of proportion of highly dense breast in later born women, in the 51 to 55 age-groups for the left breast (around a 20% increase in each age-group), and in the 50 to 56 age-groups for the right breast (increase ranging from 27% to 48%). The study indicated a slight increase in mammography density across birth cohorts, most pronounced for women in their early 50s, and more marked for the right than for the left breast. This article is protected by copyright. All rights reserved.}, + file = {Napo19.pdf:pdf\\Napo19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30762225}, + gsid = {1389657668429886587}, + gscites = {4}, + ss_id = {c94e50909368cf1112536dd7224fe141c2799b87}, + all_ss_ids = {['c94e50909368cf1112536dd7224fe141c2799b87']}, +} + +@article{Nas22, + author = {Nas, J and Thannhauser, J and Vart, P and van Geuns, RJM and Muijsers, HEC and Mol, JHQ and Aarts, GWA and Konijnenberg, LSF and Gommans, DHF and Ahoud-Schoenmakers, SGAM and Vos, JL and van Royen, N and Bonnes, JL and Brouwer, MA}, + title = {The impact of alcohol use on the quality of cardiopulmonary resuscitation among festival attendees: A prespecified analysis of a randomised trial}, + doi = {10.1016/j.resuscitation.2022.10.002}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.resuscitation.2022.10.002}, + file = {Nas22.pdf:pdf\Nas22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Resuscitation}, + citation-count = {1}, + automatic = {yes}, + pages = {12-19}, + volume = {181}, +} + +@article{Nas22, + author = {Nas, J and Thannhauser, J and Vart, P and van Geuns, RJM and Muijsers, HEC and Mol, JHQ and Aarts, GWA and Konijnenberg, LSF and Gommans, DHF and Ahoud-Schoenmakers, SGAM and Vos, JL and van Royen, N and Bonnes, JL and Brouwer, MA}, + title = {The impact of alcohol use on the quality of cardiopulmonary resuscitation among festival attendees: A prespecified analysis of a randomised trial}, + doi = {10.1016/j.resuscitation.2022.10.002}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.resuscitation.2022.10.002}, + file = {Nas22.pdf:pdf\Nas22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Resuscitation}, + citation-count = {1}, + automatic = {yes}, + pages = {12-19}, + volume = {181}, +} + +@article{Niek09, + author = {C. G. van Niekerk and J. A. W. M. van der Laak and M. E. B\"orger and H. Huisman and J. Alfred Witjes and J. O. Barentsz and Hulsbergen-van de Kaa, C. A.}, + title = {Computerized whole slide quantification shows increased microvascular density in {pT2} prostate cancer as compared to normal prostate tissue}, + journal = Prostate, + year = {2009}, + volume = {69}, + pages = {62--69}, + doi = {10.1002/pros.20855}, + abstract = {{BACKGROUND}: {C}ontrast enhanced imaging enables powerful, non-invasive diagnostics, important for detection and staging of early prostate cancer. {T}he uptake of contrast agent is increased in prostate cancer as compared to normal prostate tissue. {T}o reveal the underlying physiological mechanisms, quantification of tissue components in pathology specimens may yield important information. {A}im of this study was to investigate whether microvascularity is increased in prostate confined cancer (p{T}2). {METHODS}: {R}adical prostatectomy specimens of 26 patients were selected for organ confined peripheral zone tumors which were restricted to one side of the prostate. {M}icrovessels were visualized by immunohistochemistry against {CD}31. {S}pecimens were scanned using a computer controlled microscope and scanning stage and vessels were recognized automatically. {P}seudocolor mappings were produced showing number of vascular profiles ({MVD}), vascular area ({MVA}) and perimeter ({MVP}) in an overview of the entire prostate transection. {MVD} is a common measure for vascularity, whereas {MVA} represents the 3{D} vascular volume and {MVP} the perfused surface area. {M}ean, coefficient of variation and 75th percentile of these parameters were calculated automatically in manually indicated areas, consisting of the entire tumor area and the corresponding normal area in the contra lateral side of the prostate. {RESULTS}: {T}he mappings clearly indicate areas of increased vascularity in prostate transections. {I}n tumor tissue an increase was found compared to normal tissue of 81\%, 49\%, and 62\% for mean {MVD}, mean {MVA} and mean {MVP}, respectively ({P} < 0.001 for all comparisons). {I}n contrast, the heterogeneity in tumor vasculature was significantly decreased as compared to normal prostate ({P} < 0.001). {CONCLUSIONS}: {C}haracteristics of microvasculature deviated significantly in p{T}2 prostate tumor as compared to normal tissue.}, + file = {Niek09.pdf:pdf\\Niek09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {18780292}, + month = {1}, + gsid = {10214499820404046656}, + gscites = {40}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/81446}, + ss_id = {75b311285b30601afcb96dcb4150b382f9266aec}, + all_ss_ids = {['75b311285b30601afcb96dcb4150b382f9266aec']}, +} + +@article{Niel11, + author = {M. Nielsen and G. Karemore and M. Loog and J. Raundahl and N. Karssemeijer and J. D M Otten and M. A. Karsdal and C. M. Vachon and C. Christiansen}, + title = {A novel and automatic mammographic texture resemblance marker is an independent risk factor for breast cancer}, + journal = CANE, + year = {2011}, + volume = {35}, + pages = {381--387}, + doi = {10.1016/j.canep.2010.10.011}, + abstract = {Objective: We investigated whether breast cancer is predicted by a breast cancer risk mammographic texture resemblance (MTR) marker. Methods: A previously published case-control study included 495 women of which 245 were diagnosed with breast cancer. In baseline mammograms, 2-4 years prior to diagnosis, the following mammographic parameters were analysed for relation to breast cancer risk: (C) categorical parenchymal pattern scores; (R) radiologist's percentage density, (P) computer-based percentage density; (H) computer-based breast cancer risk MTR marker; (E) computer-based hormone replacement treatment MTR marker; and (A) an aggregate of P and H. Results: Density scores, C, R, and P correlated (tau=0.3-0.6); no other pair of scores showed large (tau>0.2) correlation. For the parameters, the odds ratios of future incidence of breast cancer comparing highest to lowest categories (146 and 106 subject respectively) were C: 2.4(1.4-4.2), R: 2.4(1.4-4.1), P: 2.5(1.5-4.2), E: non-significant, H: 4.2(2.4-7.2), and A: 5.6(3.2-9.8). The AUC analysis showed a similarly increasing pattern (C: 0.58?0.02, R: 0.57?0.03, P: 0.60?0.03, H: 0.63?0.02, A: 0.66?0.02). The AUC of the aggregate marker (A) surpasses others significantly except H. HRT-MTR (E) did not significantly identify future cancers or correlate with any other marker. Conclusions: Breast cancer risk MTR marker was independent of density scores and more predictive of risk. The hormone replacement treatment MTR marker did not identify patients at risk.}, + file = {Niel11.pdf:pdf/Niel11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + pmid = {21146484}, + month = {8}, + gsid = {14092454369424631175}, + gscites = {44}, +} + +@inproceedings{Niem03a, + author = {M. Niemeijer and B. van Ginneken and C. Maas and F. J. A. Beek and M. A. Viergever}, + title = {Assessing the {S}keletal {A}ge {F}rom a {H}and {R}adiograph: {A}utomating the {T}anner-{W}hitehouse {M}ethod}, + booktitle = MI, + year = {2003}, + volume = {5032}, + series = SPIE, + pages = {1197-1205}, + doi = {10.1117/12.480163}, + abstract = {{T}he skeletal maturity of children is usually assessed from a standard radiograph of the left hand and wrist. {A}n established clinical method to determine the skeletal maturity is the {T}anner-{W}hitehouse ({TW}2) method. {T}his method divides the skeletal development into several stages (labelled {A}, {B}, ...,{I}). {W}e are developing an automated system based on this method. {I}n this work we focus on assigning a stage to one region of interest ({ROI}), the middle phalanx of the third finger. {W}e classify each {ROI} as follows. {A} number of {ROI}s which have been assigned a certain stage by a radiologist are used to construct a mean image for that stage. {F}or a new input {ROI}, landmarks are detected by using an {A}ctive {S}hape {M}odel. {T}hese are used to align the mean images with the input image. {S}ubsequently the correlation between each transformed mean stage image and the input is calculated. {T}he input {ROI} can be assigned to the stage with the highest correlation directly, or the values can be used as features in a classifier. {T}he method was tested on 71 cases ranging from stage {E} to {I}. {T}he {ROI} was staged correctly in 73.2% of all cases and in 97.2% of all incorrectly staged cases the error was not more than one stage.}, + file = {Niem03a.pdf:pdf\\Niem03a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {10281073666494131994}, + gscites = {77}, + ss_id = {7c6eef6134210ac70172c73bfd341809aac67b9b}, + all_ss_ids = {['7c6eef6134210ac70172c73bfd341809aac67b9b']}, +} + +@conference{Niem03b, + author = {Niemeijer, M. and van Ginneken, B. and Abr\`amoff, M. D.}, + title = {Automatic {D}etection and {C}lassification of {M}icroaneurysms and {S}mall {H}emors in {C}olour {F}undus {P}hotographs}, + booktitle = {European Journal of Ophthalmology}, + year = {2003}, + volume = {13}, + pages = {226}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, +} + +@inproceedings{Niem04a, + author = {Niemeijer, M. and Staal, J. J. and van Ginneken, B. and Loog, M. and Abr\`amoff, M. D.}, + title = {Comparative Study of Retinal Vessel Segmentation Methods on a New Publicly Available Database}, + booktitle = MI, + year = {2004}, + volume = {5370}, + series = SPIE, + pages = {648-656}, + doi = {10.1117/12.535349}, + abstract = {{I}n this work we compare the performance of a number of vessel segmentation algorithms on a newly constructed retinal vessel image database. {R}etinal vessel segmentation is important for the detection of numerous eye diseases and plays an important role in automatic retinal disease screening systems. {A} large number of methods for retinal vessel segmentation have been published, yet an evaluation of these methods on a common database of screening images has not been performed. {T}o compare the performance of retinal vessel segmentation methods we have constructed a large database of retinal images. {T}he database contains forty images in which the vessel trees have been manually segmented. {F}or twenty of those forty images a second independent manual segmentation is available. {T}his allows for a comparison between the performance of automatic methods and the performance of a human observer. {T}he database is available to the research community. {I}nterested researchers are encouraged to upload their segmentation results to our website (http://www.isi.uu.nl/{R}esearch/{D}atabases). {T}he performance of five different algorithms has been compared. {F}our of these methods have been implemented as described in the literature. {T}he fifth pixel classification based method was developed specifically for the segmentation of retinal vessels and is the only supervised method in this test. {W}e define the segmentation accuracy with respect to our gold standard as the performance measure. {R}esults show that the pixel classification method performs best, but the second observer still performs significantly better.}, + file = {Niem04a.pdf:pdf\\Niem04a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {4847858369065721334}, + gscites = {876}, + ss_id = {798e312dd67798024da74f9a8f92946af88c7cd4}, + all_ss_ids = {['798e312dd67798024da74f9a8f92946af88c7cd4']}, +} + +@patent{Niem05, + author = {M. Niemeijer and B. van Ginneken and M. D. Abr\`amoff}, + title = {Automatic {D}etection of {R}ed {L}esions in {D}igital {C}olor {F}undus {P}hotographs}, + year = {2005}, + url = {http://www.google.com/patents/US7474775}, + optnote = {DIAG, RADIOLOGY}, + number = {7,474,775}, +} + +@inproceedings{Niem05a, + author = {Niemeijer, M. and van Ginneken, B. and ter Haar, F. and Abr\`amoff, M. D.}, + title = {Automatic Detection of the Optic Disc, Fovea and Vascular Arch in Digital Color Photographs of the Retina}, + booktitle = {Proceedings of the British Machine Vision Conference}, + year = {2005}, + pages = {109--118}, + abstract = {{W}e present a novel method that determines whether a macula centered retinal image is from the left or right eye and automatically detects the optic disc, the fovea and the vascular arch by inferring the location of a set of landmarks placed on these structures. {T}he algorithm relies on a specific energy function that combines global and local cues. {T}he global cues are derived from vascular atlases of the vessel orientation and thickness on the retina as well as a vascular distance map. {A} fourth component models the local appearance around each of the landmarks in the model and is able to estimate the distance between a position in the image and the target position of a landmark. {F}or the minimization of the energy function a combination of optimization methods is used. {W}e compare the results of several different system setups and combinations of energy function components with the performance of a second human observer. {T}he best performing system localizes the {OD} in 91% of all cases, the fovea in 94% of all cases and correctly positions 74% of all vessel landmarks. {T}he results show that a combination of global and local energy function components is required to obtain optimal results.}, + file = {Niem05a.pdf:retina\\Niem05a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {9478136858074324990}, + gscites = {26}, + ss_id = {e09a88838edfa47d94493c9aa186b4d48ff63945}, + all_ss_ids = {['e09a88838edfa47d94493c9aa186b4d48ff63945']}, +} + +@article{Niem05b, + author = {Niemeijer, M. and van Ginneken, B. and Staal, J. and Suttorp-Schulten, M. S. A. and Abr\`amoff, M. D.}, + title = {Automatic Detection of Red Lesions in Digital Color Fundus Photographs}, + journal = TMI, + year = {2005}, + volume = {24}, + number = {5}, + pages = {584--592}, + doi = {10.1109/TMI.2005.843738}, + abstract = {{T}he robust detection of red lesions in digital color fundus photographs is a critical step in the development of automated screening systems for diabetic retinopathy. {I}n this paper, a novel red lesion detection method is presented based on a hybrid approach, combining prior works by {S}pencer et al. (1996) and {F}rame et al. (1998) with two important new contributions. {T}he first contribution is a new red lesion candidate detection system based on pixel classification. {U}sing this technique, vasculature and red lesions are separated from the background of the image. {A}fter removal of the connected vasculature the remaining objects are considered possible red lesions. {S}econd, an extensive number of new features are added to those proposed by {S}pencer-{F}rame. {T}he detected candidate objects are classified using all features and a k-nearest neighbor classifier. {A}n extensive evaluation was performed on a test set composed of images representative of those normally found in a screening set. {W}hen determining whether an image contains red lesions the system achieves a sensitivity of 100\% at a specificity of 87\%. {T}he method is compared with several different automatic systems and is shown to outperform them all. {P}erformance is close to that of a human expert examining the images for the presence of red lesions.}, + file = {Niem05b.pdf:pdf/Niem05b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {15889546}, + month = {5}, + gsid = {13743541034647495598}, + gscites = {592}, + ss_id = {444deb4d61347150f98d2095192bf0c9a75c99bf}, + all_ss_ids = {['444deb4d61347150f98d2095192bf0c9a75c99bf']}, +} + +@inproceedings{Niem05c, + author = {Niemeijer, M. and Abr\`amoff, M. D. and van Ginneken, B.}, + title = {Automatic detection of the presence of bright lesions in color fundus photographs}, + booktitle = {The 3rd European Medical and Biological Engineering Conference}, + year = {2005}, + pages = {1832 (1-6)}, + abstract = {{A} novel scheme for the detection of bright lesions in digital color photographs is presented. {A} supervised system is used to assign a soft label to each pixel in a retinal image indicating the probability that it is a bright lesion pixel. {T}hen, through a classifier combination scheme, the probability that bright lesions are present anywhere in the image is obtained. {T}o counter spurious responses on certain retinal landmarks such as the optic disc, prior knowledge about the location of the major anatomical landmarks on the retina is integrated into the system. {D}ifferent system setups are compared. {T}he best performing system obtains an area under the {ROC} curve of 0.88.}, + file = {Niem05c.pdf:retina\\Niem05c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {5626891224991621892}, + gscites = {15}, +} + +@article{Niem06a, + author = {Niemeijer, M. and Abr\`amoff, M. D. and van Ginneken, B.}, + title = {Image structure clustering for image quality verification of color retina images in diabetic retinopathy screening}, + journal = MIA, + year = {2006}, + volume = {10}, + number = {6}, + pages = {888--898}, + doi = {10.1016/j.media.2006.09.006}, + abstract = {{R}eliable verification of image quality of retinal screening images is a prerequisite for the development of automatic screening systems for diabetic retinopathy. {A} system is presented that can automatically determine whether the quality of a retinal screening image is sufficient for automatic analysis. {T}he system is based on the assumption that an image of sufficient quality should contain particular image structures according to a certain pre-defined distribution. {W}e cluster filterbank response vectors to obtain a compact representation of the image structures found within an image. {U}sing this compact representation together with raw histograms of the {R}, {G}, and {B} color planes, a statistical classifier is trained to distinguish normal from low quality images. {T}he presented system does not require any previous segmentation of the image in contrast with previous work. {T}he system was evaluated on a large, representative set of 1000 images obtained in a screening program. {T}he proposed method, using different feature sets and classifiers, was compared with the ratings of a second human observer. {T}he best system, based on a {S}upport {V}ector {M}achine, has performance close to optimal with an area under the {ROC} curve of 0.9968.}, + file = {Niem06a.pdf:pdf/Niem06a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {17138215}, + month = {12}, + gsid = {9152679895729904090}, + gscites = {144}, + ss_id = {2ee501f792ae098305ec14a721755a387cb01309}, + all_ss_ids = {['2ee501f792ae098305ec14a721755a387cb01309']}, +} + +@phdthesis{Niem06b, + author = {M. Niemeijer}, + title = {Automatic detection of diabetic retinopathy in digital fundus photographs}, + year = {2006}, + url = {http://igitur-archive.library.uu.nl/dissertations/2007-0102-200110/UUindex.html}, + abstract = {{D}iabetic retinopathy is a common ocular complication of diabetes. {I}t is the most frequent cause of blindness in the working population of the {U}nited {S}tates and the {E}uropean {U}nion. {E}arly diagnosis, and treatment can prevent vision loss in the majority of cases. {Y}et only approximately 50% of people with diabetes are regularly screened for the presence of signs of diabetic retinopathy. {C}omputer aided diagnosis technology and digital retinal imaging could help to facilitate a large scale screening of people with diabetes. {T}he research in this thesis is focused on the development of an automatic diabetic retinopathy detection system. {T}his system would select exams which possibly contain signs of the presence of diabetic retinopathy and present only those to an ophthalmologist. {I}n this manner the total workload of the ophthalmologist could be reduced. {A} number of different automatic screening system components are described and evaluated: {A}n automatic system for the quantification of retinal image quality. {I}mage quality is an important issue in large scale screening programs where images are acquired at many different sites, using different cameras and operators. {I}t forms an essential component of an automatic screening system. {S}everal different techniques for the automatic detection of the vasculature in retinal images are compared. {A} new technique is proposed and shown to exhibit good performance in comparison with the techniques from the literature. {I}n addition to the vasculature, other anatomical landmarks are present on the retina, in particular the optic disc, the macula and the vascular arch. {T}he location of these anatomical landmarks can be used to determine a relative position on the retina, and this spatial information about potential lesions can be used to remove false positive lesion detections and provide important diagnostic information. {A} system which is able to automatically find a set of points in a retinal image and thereby identify the location of the most important retinal anatomy is described. {R}ed lesions are amongst the first signs of the presence of diabetic retinopathy and are therefore important to detect. {A}n automatic red lesion detection system, based on a pixel classification approach, is described. {W}hen diabetic retinopathy progresses also bright lesions appear. {A}n automatic detection system for these lesions is also presented. {T}he individual system components are combined into a comprehensive screening system. {T}his system is evaluated on a large dataset containing 40,000 images obtained in 10,000 eye examinations. {A} novel method is proposed to combine the outputs of the various system components into a single opinion about the complete examination. {T}he results shown that the system is able to detect the majority of suspect exams at a specificity above 50%.}, + copromotor = {B. van Ginneken and M. D. Abr\`{a}moff}, + file = {Niem06b.pdf:pdf/Niem06b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {M.A. Viergever}, + school = {Utrecht University}, + gsid = {7359028972615471828}, + gscites = {16}, + journal = {PhD thesis}, +} + +@article{Niem07a, + author = {Niemeijer, M. and Abr\`amoff, M. D. and van Ginneken, B.}, + title = {Segmentation of the optic disc, macula and vascular arch in fundus photographs}, + journal = TMI, + year = {2007}, + volume = {26}, + number = {1}, + pages = {116-127}, + doi = {10.1109/TMI.2006.885336}, + abstract = {{A}n automatic system is presented to find the location of the major anatomical structures in color fundus photographs; the optic disc, the macula, and the vascular arch. {T}hese structures are found by fitting a single point-distribution-model to the image, that contains points on each structure. {T}he method can handle optic disc and macula centered images of both the left and the right eye. {T}he system uses a cost function, which is based on a combination of both global and local cues, to find the correct position of the model points. {T}he global terms in the cost function are based on the orientation and width of the vascular pattern in the image. {T}he local term is derived from the image structure around the points of the model. {T}o optimize the fit of the point-distribution-model to an image, a sophisticated combination of optimization processes is proposed which combines optimization in the parameter space of the model and in the image space, where points are moved directly. {E}xperimental results are presented demonstrating that our specific choices for the cost function components and optimization scheme are needed to obtain good results. {T}he system was developed and trained on a set of 500 screening images, and tested on a completely independent set of 500 screening images. {I}n addition to this the system was also tested on a separate set of 100 pathological images. {I}n the screening set it was able to find the vascular arch in 93.2\%, the macula in 94.4\%, the optic disc location in 98.4\% and whether it is dealing with a left or right eye in 100\% of all tested cases. {F}or the pathological images test set, this was 77.0\%, 92.0\%, 94.0\%, and 100\% respectively.}, + file = {Niem07a.pdf:pdf\\Niem07a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {17243590}, + month = {1}, + gsid = {446255861867709018}, + gscites = {247}, + ss_id = {aa6b6a54343f1c9bba9b1dd2579b1132bc775d79}, + all_ss_ids = {['aa6b6a54343f1c9bba9b1dd2579b1132bc775d79']}, +} + +@article{Niem07b, + author = {Niemeijer, M. and van Ginneken, B. and Russel, S. R. and Suttorp-Schulten, M. S. A. and Abr\`amoff, M. D.}, + title = {Automated detection and differentiation of drusen, exudates, and cotton-wool spots in digital color fundus photographs for diabetic retinopathy diagnosis}, + journal = IOVS, + year = {2007}, + volume = {48}, + number = {5}, + pages = {2260-2267}, + doi = {10.1167/iovs.06-0996}, + abstract = {{PURPOSE}: {T}o describe and evaluate a machine learning-based, automated system to detect exudates and cotton-wool spots in digital color fundus photographs and differentiate them from drusen, for early diagnosis of diabetic retinopathy. {METHODS}: {T}hree hundred retinal images from one eye of 300 patients with diabetes were selected from a diabetic retinopathy telediagnosis database (nonmydriatic camera, two-field photography): 100 with previously diagnosed bright lesions and 200 without. {A} machine learning computer program was developed that can identify and differentiate among drusen, (hard) exudates, and cotton-wool spots. {A} human expert standard for the 300 images was obtained by consensus annotation by two retinal specialists. {S}ensitivities and specificities of the annotations on the 300 images by the automated system and a third retinal specialist were determined. {RESULTS}: {T}he system achieved an area under the receiver operating characteristic ({ROC}) curve of 0.95 and sensitivity/specificity pairs of 0.95/0.88 for the detection of bright lesions of any type, and 0.95/0.86, 0.70/0.93, and 0.77/0.88 for the detection of exudates, cotton-wool spots, and drusen, respectively. {T}he third retinal specialist achieved pairs of 0.95/0.74 for bright lesions and 0.90/0.98, 0.87/0.98, and 0.92/0.79 per lesion type. {CONCLUSIONS}: {A} machine learning-based, automated system capable of detecting exudates and cotton-wool spots and differentiating them from drusen in color images obtained in community based diabetic patients has been developed and approaches the performance level of retinal experts. {I}f the machine learning can be improved with additional training data sets, it may be useful for detecting clinically important bright lesions, enhancing early diagnosis, and reducing visual loss in patients with diabetes.}, + file = {Niem07b.pdf:pdf\\Niem07b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {17460289}, + month = {5}, + gsid = {13784025486209249363}, + gscites = {413}, + ss_id = {6a1e5f1ff1b3106d160cb1e486fb9611252c393d}, + all_ss_ids = {['6a1e5f1ff1b3106d160cb1e486fb9611252c393d']}, +} + +@inproceedings{Niem08, + author = {M. Niemeijer and M. D. Abr\`amoff and B. van Ginneken}, + title = {Automated localization of the optic disc and the fovea}, + booktitle = EMBS, + year = {2008}, + pages = {3538--3541}, + doi = {10.1109/IEMBS.2008.4649969}, + abstract = {{T}he detection of the position of the normal anatomy in color fundus photographs is an important step in the automated analysis of retinal images. {A}n automatic system for the detection of the position of the optic disc and the fovea is presented. {T}he method integrates the use of local vessel geometry and image intensity features to find the correct positions in the image. {A} k{NN} regressor is used to accomplish the integration. {E}valuation was performed on a set of 250 digital color fundus photographs and the detection performance for the optic disc and the fovea were 99.2\% and 96.4\% respectively.}, + file = {Niem08.pdf:pdf\\Niem08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {19163472}, + month = {8}, + gsid = {17667278035436647511}, + gscites = {41}, + ss_id = {9d9799545cca4d430a3e3edcce2fc0b6c8ba99c3}, + all_ss_ids = {['9d9799545cca4d430a3e3edcce2fc0b6c8ba99c3']}, +} + +@inproceedings{Niem08a, + author = {M. Niemeijer and M. K. Garvin and B. van Ginneken and M. Sonka and M. D. Abr\`amoff}, + title = {Vessel segmentation in {3D} spectral {OCT} scans of the retina}, + booktitle = MI, + year = {2008}, + volume = {6914}, + series = SPIE, + doi = {10.1117/12.772680}, + abstract = {{T}he latest generation of spectral optical coherence tomography ({OCT}) scanners is able to image 3{D} cross-sectional volumes of the retina at a high resolution and high speed. {T}hese scans offer a detailed view of the structure of the retina. {A}utomated segmentation of the vessels in these volumes may lead to more objective diagnosis of retinal vascular disease including hypertensive retinopathy, retinopathy of prematurity. {A}dditionally, vessel segmentation can allow color fundus images to be registered to these 3{D} volumes, possibly leading to a better understanding of the structure and localization of retinal structures and lesions. {I}n this paper we present a method for automatically segmenting the vessels in a 3{D} {OCT} volume. {F}irst, the retina is automatically segmented into multiple layers, using simultaneous segmentation of their boundary surfaces in 3{D}. {N}ext, a 2{D} projection of the vessels is produced by only using information from certain segmented layers. {F}inally, a supervised, pixel classification based vessel segmentation approach is applied to the projection image. {W}e compared the influence of two methods for the projection on the performance of the vessel segmentation on 10 optic nerve head centered 3{D} {OCT} scans. {T}he method was trained on 5 independent scans. {U}sing {ROC} analysis, our proposed vessel segmentation system obtains an area under the curve of 0.970 when compared with the segmentation of a human observer.}, + file = {Niem08a.pdf:pdf\\Niem08a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {7748171457790516188}, + gscites = {88}, + ss_id = {b2c5f8d2a2ad3759ab3844e90de416857145fc89}, + all_ss_ids = {['b2c5f8d2a2ad3759ab3844e90de416857145fc89']}, +} + +@article{Niem09, + author = {M. Niemeijer and M. D. Abr\`amoff and B. van Ginneken}, + title = {Fast detection of the optic disc and fovea in color fundus photographs}, + journal = MIA, + year = {2009}, + volume = {13}, + number = {6}, + pages = {859--870}, + doi = {10.1016/j.media.2009.08.003}, + abstract = {{A} fully automated, fast method to detect the fovea and the optic disc in digital color photographs of the retina is presented. {T}he method makes few assumptions about the location of both structures in the image. {W}e define the problem of localizing structures in a retinal image as a regression problem. {A} k{NN} regressor is utilized to predict the distance in pixels in the image to the object of interest at any given location in the image based on a set of features measured at that location. {T}he method combines cues measured directly in the image with cues derived from a segmentation of the retinal vasculature. {A} distance prediction is made for a limited number of image locations and the point with the lowest predicted distance to the optic disc is selected as the optic disc center. {B}ased on this location the search area for the fovea is defined. {T}he location with the lowest predicted distance to the fovea within the foveal search area is selected as the fovea location. {T}he method is trained with 500 images for which the optic disc and fovea locations are known. {A}n extensive evaluation was done on 500 images from a diabetic retinopathy screening program and 100 specially selected images containing gross abnormalities. {T}he method found the optic disc in 99.4\% and the fovea in 96.8\% of regular screening images and for the images with abnormalities these numbers were 93.0\% and 89.0\% respectively.}, + file = {Niem09.pdf:pdf\\Niem09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {19782633}, + month = {12}, + gsid = {3777725572675616469}, + gscites = {253}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/80726}, + ss_id = {9c948dbc3d1c7fc86c90f1b0b98e5aea37bb7c88}, + all_ss_ids = {['9c948dbc3d1c7fc86c90f1b0b98e5aea37bb7c88']}, +} + +@article{Niem09a, + author = {M. Niemeijer and M. D. Abr\`amoff and B. van Ginneken}, + title = {Information fusion for diabetic retinopathy {CAD} in digital color fundus photographs}, + journal = TMI, + year = {2009}, + volume = {28}, + number = {5}, + pages = {775--785}, + doi = {10.1109/TMI.2008.2012029}, + abstract = {{T}he purpose of computer-aided detection or diagnosis ({CAD}) technology has so far been to serve as a second reader. {I}f, however, all relevant lesions in an image can be detected by {CAD} algorithms, use of {CAD} for automatic reading or prescreening may become feasible. {T}his work addresses the question how to fuse information from multiple {CAD} algorithms, operating on multiple images that comprise an exam, to determine a likelihood that the exam is normal and would not require further inspection by human operators. {W}e focus on retinal image screening for diabetic retinopathy, a common complication of diabetes. {C}urrent {CAD} systems are not designed to automatically evaluate complete exams consisting of multiple images for which several detection algorithm output sets are available. {I}nformation fusion will potentially play a crucial role in enabling the application of {CAD} technology to the automatic screening problem. {S}everal different fusion methods are proposed and their effect on the performance of a complete comprehensive automatic diabetic retinopathy screening system is evaluated. {E}xperiments show that the choice of fusion method can have a large impact on system performance. {T}he complete system was evaluated on a set of 15,000 exams (60,000 images). {T}he best performing fusion method obtained an area under the receiver operator characteristic curve of 0.881. {T}his indicates that automated prescreening could be applied in diabetic retinopathy screening programs.}, + file = {Niem09a.pdf:pdf\\Niem09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {19150786}, + month = {5}, + gsid = {14795703699235766374}, + gscites = {134}, + ss_id = {852e0bb4efbd7906c4c1eb35a69fc2c0eb51f9d6}, + all_ss_ids = {['852e0bb4efbd7906c4c1eb35a69fc2c0eb51f9d6']}, +} + +@inproceedings{Niem09b, + author = {M. Niemeijer and M.K. Garvin and K. Lee and B. van Ginneken and M.D. Abr\`amoff and Milan Sonka}, + title = {Registration of 3{D} spectral {OCT} volumes using 3{D} {SIFT} feature point matching}, + booktitle = MI, + year = {2009}, + volume = {7259}, + series = SPIE, + pages = {72591I-1--72591I-8}, + doi = {10.1117/12.811906}, + abstract = {{T}he recent introduction of next generation spectral {OCT} scanners has enabled routine acquisition of high resolution, 3{D} cross-sectional volumetric images of the retina. 3{D} {OCT} is used in the detection and management of serious eye diseases such as glaucoma and age-related macular degeneration. {F}or follow-up studies, image registration is a vital tool to enable more precise, quantitative comparison of disease states. {T}his work presents a registration method based on a recently introduced extension of the 2{D} {S}cale-{I}nvariant {F}eature {T}ransform ({SIFT}) framework to 3{D}. {T}he {SIFT} feature extractor locates minima and maxima in the difference of {G}aussian scale space to find salient feature points. {I}t then uses histograms of the local gradient directions around each found extremum in 3{D} to characterize them in a 4096 element feature vector. {M}atching points are found by comparing the distance between feature vectors. {W}e apply this method to the rigid registration of optic nerve head- ({ONH}) and macula-centered 3{D} {OCT} scans of the same patient that have only limited overlap. {T}hree {OCT} data set pairs with known deformation were used for quantitative assessment of the method's robustness and accuracy when deformations of rotation and scaling were considered. {T}hree-dimensional registration accuracy of 2.0?3.3 voxels was observed. {T}he accuracy was assessed as average voxel distance error in {N}=1572 matched locations. {T}he registration method was applied to 12 3{D} {OCT} scans (200 x 200 x 1024 voxels) of 6 normal eyes imaged in vivo to demonstrate the clinical utility and robustness of the method in a real-world environment.}, + file = {Niem09b.pdf:retina\\Niem09b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {11931099815327808952}, + gscites = {63}, + ss_id = {df038b6c7e9eb4a67f30f4752a478110f5b1d1b1}, + all_ss_ids = {['df038b6c7e9eb4a67f30f4752a478110f5b1d1b1']}, +} + +@inproceedings{Niem09c, + author = {M. Niemeijer and B. van Ginneken and M. D. Abr\`amoff}, + title = {Automatic classification of retinal vessels into arteries and veins}, + booktitle = MI, + year = {2009}, + volume = {7260}, + series = SPIE, + pages = {72601F1--72601F8}, + doi = {10.1117/12.813826}, + abstract = {{S}eparating the retinal vascular tree into arteries and veins is important for quantifying vessel changes that preferentially affect either the veins or the arteries. {F}or example the ratio of arterial to venous diameter, the retinal a/v ratio, is well established to be predictive of stroke and other cardiovascular events in adults, as well as the staging of retinopathy of prematurity in premature infants. {T}his work presents a supervised, automatic method that can determine whether a vessel is an artery or a vein based on intensity and derivative information. {A}fter thinning of the vessel segmentation, vessel crossing and bifurcation points are removed leaving a set of vessel segments containing centerline pixels. {A} set of features is extracted from each centerline pixel and using these each is assigned a soft label indicating the likelihood that it is part of a vein. {A}s all centerline pixels in a connected segment should be the same type we average the soft labels and assign this average label to each centerline pixel in the segment. {W}e train and test the algorithm using the data (40 color fundus photographs) from the {DRIVE} database1 with an enhanced reference standard. {I}n the enhanced reference standard a fellowship trained retinal specialist ({MDA}) labeled all vessels for which it was possible to visually determine whether it was a vein or an artery. {A}fter applying the proposed method to the 20 images of the {DRIVE} test set we obtained an area under the receiver operator characteristic ({ROC}) curve of 0.88 for correctly assigning centerline pixels to either the vein or artery classes.}, + file = {:retina\\Niem09c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {17967959879140071576}, + gscites = {74}, + ss_id = {3dee47df1e52968b02ac5ca0ce2790e7d4c56ac2}, + all_ss_ids = {['3dee47df1e52968b02ac5ca0ce2790e7d4c56ac2']}, +} + +@inproceedings{Niem09d, + author = {M. Niemeijer and B. van Ginneken and M. D. Abr\`amoff}, + title = {A Linking Framework for Pixel Classification Based Retinal Vessel Segmentation}, + booktitle = MI, + year = {2009}, + volume = {7262}, + series = SPIE, + pages = {726216-1--726216-8}, + doi = {10.1117/12.811653}, + abstract = {{R}etinal vessel segmentation is a prerequisite for the analysis of vessel parameters such as tortuosity, variation of the vessel width along the vessel and the ratio between the venous and arterial vessel width. {T}his analysis can provide indicators for the presence of a wide range of diseases. {D}ifferent types of approaches have been proposed to segment the retinal vasculature and two important groups are vessel tracking and pixel processing based methods. {A}n advantage of tracking based methods is the guaranteed connectedness of vessel segments, in pixel processing based methods connectedness is not guaranteed. {I}n this work an automated vessel linking framework is presented. {T}he framework links together separate pieces of the retinal vasculature into a connected vascular tree. {T}o determine which vessel sections should be linked together the use of a supervised cost function is proposed. {E}valuation is performed on the vessel centerlines. {T}he results show that the vessel linking framework outperforms other automated vessel linking methods especially for the narrowest vessels.}, + file = {Niem09d.pdf:retina\\Niem09d.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {14163524361307936981}, + gscites = {14}, + ss_id = {f3cac15257a142e4137eb93feb1ba1cf1127221a}, + all_ss_ids = {['f3cac15257a142e4137eb93feb1ba1cf1127221a']}, +} + +@conference{Niem09e, + author = {M. Niemeijer and M. K. Garvin and K. Lee and M. Sonka and B. van Ginneken and M. D. Abr\`amoff}, + title = {Automated {S}egmentation of the {R}etinal {V}asculature {S}ilhouettes in {I}sotropic 3{D} {O}ptical {C}oherence {T}omography {S}cans}, + booktitle = ARVO, + year = {2009}, + abstract = {{P}urpose: {T}o report and evaluate an algorithm for the segmentation of retinal vessels in isotropic 3{D} spectral {OCT} scans, with closely spaced slices. {I}ts performance is compared with two expert segmentations of {OCT} projection images and a single expert segmentation of the corresponding color fundus photographs. {M}ethods: 16 optic nerve head centered 3{D} {OCT} volumes (200x200x1024 voxels, 6x6x2 mm3) were obtained from 8 subjects (1 scan per eye) using a {C}irrus {HD}-{OCT} machine ({C}arl {Z}eiss {M}editec, {I}nc., {D}ublin, {CA}). {A}dditionally, 16 color disc photographs, one from each of the eyes of these 8 subjects, were acquired. {T}wo experts manually marked the retinal vessel silhouettes in projection images derived from the {SD} {OCT} scans. {T}he two expert's results were combined into a single {OCT} reference segmentation. {A} single retinal expert marked all vessels in the color photographs. {T}he automatic vessel segmentation algorithm ({N}iemeijer et al., {ARVO} 2008) was applied all {OCT} projection images. {T}he average segmented vessel lengths (mm) identified, missed and spuriously identified by the algorithm and the {OCT} reference segmentation were compared, after co-registration, with the expert detected vessels in the color photographs. {R}esults: {T}he algorithm, on average, correctly identified 37.5mm (95% {CI}, 34.7-40.3), missed 19.9mm (95% {CI}, 17.4-22.4) and spuriously identified 4.8mm (95% {CI}, 3.7-5.9) of vessels, compared to the color fundus photographs based standard. {T}he {OCT} reference segmentation identified 39.7mm (95% {CI}, 36.4-43.0), missed 17.6mm (95% {CI}, 15.4-19.8) and spuriously identified 2.0mm (95% {CI}, 1.6-2.4) of vessels. {C}onclusions: {T}he automatic system detects close to the same length of vessels in {OCT} projection images compared to an expert. {T}he length of the detectable vasculature is reduced in 3{D} spectral {OCT} compared with fundus images. {G}iven this performance, automated vessel segmentation in isotropic {SD} {OCT} can help co-localize {OCT} scans to each other as well as to fundus photos from the same patient at different timepoints.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Niem10, + author = {M. Niemeijer and B. van Ginneken and M. J. Cree and A. Mizutani and G. Quellec and C. I. S\'{a}nchez and B. Zhang and R. Hornero and M. Lamard and C. Muramatsu and X. Wu and G. Cazuguel and J. You and A. Mayo and Q. Li and Y. Hatanaka and B. Cochener and C. Roux and F. Karray and M. Garcia and H. Fujita and M. D. Abr\`amoff}, + title = {Retinopathy online challenge: automatic detection of microaneurysms in digital color fundus photographs}, + journal = TMI, + year = {2010}, + volume = {29}, + pages = {185--195}, + doi = {10.1109/TMI.2009.2033909}, + abstract = {{T}he detection of microaneurysms in digital color fundus photographs is a critical first step in automated screening for diabetic retinopathy ({DR}), a common complication of diabetes. {T}o accomplish this detection numerous methods have been published in the past but none of these was compared with each other on the same data. {I}n this work we present the results of the first international microaneurysm detection competition, organized in the context of the {R}etinopathy {O}nline {C}hallenge ({ROC}), a multiyear online competition for various aspects of {DR} detection. {F}or this competition, we compare the results of five different methods, produced by five different teams of researchers on the same set of data. {T}he evaluation was performed in a uniform manner using an algorithm presented in this work. {T}he set of data used for the competition consisted of 50 training images with available reference standard and 50 test images where the reference standard was withheld by the organizers ({M}. {N}iemeijer, {B}. van {G}inneken, and {M}. {D}. {A}br?moff). {T}he results obtained on the test data was submitted through a website after which standardized evaluation software was used to determine the performance of each of the methods. {A} human expert detected microaneurysms in the test set to allow comparison with the performance of the automatic methods. {T}he overall results show that microaneurysm detection is a challenging task for both the automatic methods as well as the human expert. {T}here is room for improvement as the best performing system does not reach the performance of the human expert. {T}he data associated with the {ROC} microaneurysm detection competition will remain publicly available and the website will continue accepting submissions.}, + file = {Niem10.pdf:pdf\\Niem10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {19822469}, + month = {1}, + gsid = {9532443460643942339}, + gscites = {508}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/89373}, + ss_id = {378dddb8472d1dd8e8bf18fc670916cdda4d4c1e}, + all_ss_ids = {['378dddb8472d1dd8e8bf18fc670916cdda4d4c1e']}, +} + +@inproceedings{Niem10a, + author = {M. Niemeijer and B. van Ginneken and M. D. Abr\`amoff}, + title = {Automatic determination of the artery vein ratio in retinal images}, + booktitle = MI, + year = {2010}, + volume = {7624}, + series = SPIE, + pages = {76240I1-76240I10}, + doi = {10.1117/12.844469}, + abstract = {{A} lower ratio between the width of the arteries and veins ({A}rteriolar-to-{V}enular diameter {R}atio, {AVR}) on the retina, is well established to be predictive of stroke and other cardiovascular events in adults, as well as an increased risk of retinopathy of prematurity in premature infants. {T}his work presents an automatic method that detects the location of the optic disc, determines the appropriate region of interest ({ROI}), classifies the vessels in the {ROI} into arteries and veins, measures their widths and calculates the {AVR}. {A}fter vessel segmentation and vessel width determination the optic disc is located and the system eliminates all vessels outside the {AVR} measurement {ROI}. {T}he remaining vessels are thinned, vessel crossing and bifurcation points are removed leaving a set of vessel segments containing centerline pixels. {F}eatures are extracted from each centerline pixel that are used to assign them a soft label indicating the likelihood the pixel is part of a vein. {A}s all centerline pixels in a connected segment should be the same type, the median soft label is assigned to each centerline pixel in the segment. {N}ext artery vein pairs are matched using an iterative algorithm and the widths of the vessels is used to calculate the {AVR}. {W}e train and test the algorithm using a set of 25 high resolution digital color fundus photographs a reference standard that indicates for the major vessels in the images whether they are an artery or a vein. {W}e compared the {AVR} values produced by our system with those determined using a computer assisted method in 15 high resolution digital color fundus photographs and obtained a correlation coefficient of 0.881.}, + file = {Niem10a.pdf:pdf\\Niem10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {4396585503730942977}, + gscites = {23}, + ss_id = {38a508f72094aa3810427c53f1a54ebbf15b7294}, + all_ss_ids = {['38a508f72094aa3810427c53f1a54ebbf15b7294']}, +} + +@inproceedings{Niem11, + author = {M. Niemeijer and A. V. Dumitrescu and B. van Ginneken and M. D. Abr\`amoff}, + title = {Automatic localization of bifurcations and vessel crossings in digital fundus photographs using location regression}, + booktitle = MI, + year = {2011}, + volume = {7965}, + series = SPIE, + pages = {796507}, + doi = {10.1117/12.878364}, + abstract = {Parameters extracted from the vasculature on the retina are correlated with various conditions such as diabetic retinopathy and cardiovascular diseases such as stroke. Segmentation of the vasculature on the retina has been a topic that has received much attention in the literature over the past decade. Analysis of the segmentation result, however, has only received limited attention with most works describing methods to accurately measure the width of the vessels. Analyzing the connectedness of the vascular network is an important step towards the characterization of the complete vascular tree. The retinal vascular tree, from an image interpretation point of view, originates at the optic disc and spreads out over the retina. The tree bifurcates and the vessels also cross each other. The points where this happens form the key to determining the connectedness of the complete tree. We present a supervised method to detect the bifurcations and crossing points of the vasculature of the retina. The method uses features extracted from the vasculature as well as the image in a location regression approach to find those locations of the segmented vascular tree where the bifurcation or crossing occurs (from here, POI, points of interest). We evaluate the method on the publicly available DRIVE database in which an ophthalmologist has marked the POI.}, + file = {Niem11.pdf:pdf/Niem11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {15284653557100524466}, + gscites = {1}, + ss_id = {d3fdf1be1b16875985db2121c467e852b2e6e319}, + all_ss_ids = {['d3fdf1be1b16875985db2121c467e852b2e6e319']}, +} + +@article{Niem11a, + author = {M. Niemeijer and M. Loog and M. D. Abr\`amoff and M. A. Viergever and M. Prokop and B. van Ginneken}, + title = {On Combining Computer-Aided Detection Systems}, + journal = TMI, + year = {2011}, + volume = {30}, + pages = {215--223}, + doi = {10.1109/TMI.2010.2072789}, + abstract = {Computer-aided detection (CAD) is increasingly used in clinical practice and for many applications a multitude of CAD systems have been developed. In practice, CAD systems have different strengths and weaknesses and it is therefore interesting to consider their combination. In this paper, we present generic methods to combine multiple CAD systems and investigate what kind of performance increase can be expected. Experimental results are presented using data from the ANODE09 and ROC09 online CAD challenges for the detection of pulmonary nodules in computed tomography scans and red lesions in retinal images, respectively. For both applications, combination results in a large and significant increase in performance when compared to the best individual CAD system.}, + file = {Niem11a.pdf:pdf\\Niem11a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {20813633}, + month = {2}, + gsid = {17448530076208906101}, + gscites = {127}, + ss_id = {e2b916c12347faafc35876900a78ff9efe136791}, + all_ss_ids = {['e2b916c12347faafc35876900a78ff9efe136791']}, +} + +@article{Niem11b, + author = {M. Niemeijer and X. Xu and A. Dumitrescu and P. Gupta and B. van Ginneken and J. Folk and M. Abr\`amoff}, + title = {Automated measurement of the arteriolar-to-venular width ratio in digital color fundus photographs}, + journal = TMI, + year = {2011}, + volume = {31}, + pages = {1941--1950}, + doi = {10.1109/TMI.2011.2159619}, + abstract = {A decreased ratio of the width of retinal arteries to veins (Arteriolar-to-Venular diameter Ratio, AVR), is well established as predictive of cerebral atrophy, stroke and other cardiovascular events in adults. Tortuous and dilated arteries and veins, as well as decreased AVR are also markers for Plus disease in retinopathy of prematurity. This work presents an automated method to estimate the AVR in retinal color images by detecting the location of the optic disc, determining an appropriate region of interest (ROI), classifying vessels as arteries or veins, estimating vessel widths, and calculating the AVR. After vessel segmentation and vessel width determination, the optic disc is located and the system eliminates all vessels outside the AVR measurement ROI. A skeletonization operation is applied to the remaining vessels after which vessel crossings and bifurcation points are removed, leaving a set of vessel segments consisting of only vessel centerline pixels. Features are extracted from each centerline pixel in order to assign these a soft label indicating the likelihood that the pixel is part of a vein. As all centerline pixels in a connected vessel segment should be the same type, the median soft label is assigned to each centerline pixel in the segment. Next, artery vein pairs are matched using an iterative algorithm, and the widths of the vessels are used to calculate the AVR. We trained and tested the algorithm on a set of 65 high resolution digital color fundus photographs using a reference standard that indicates for each major vessel in the image whether it is an artery or vein. We compared the AVR values produced by our system with those determined by a semi-automated reference system. We obtained a mean unsigned error of 0.06 (SD 0.04) in 40 images with a mean AVR of 0.67. A second observer using the semi-automated system obtained the same mean unsigned error of 0.06 (SD 0.05) on the set of images with a mean AVR of 0.66. The testing data and reference standard used in this study has been made publicly available.}, + file = {Niem11b.pdf:pdf\\Niem11b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pmid = {21690008}, + month = {11}, + gsid = {14355423160597618268}, + gscites = {181}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/97661}, + ss_id = {05abf67f5320dd744e9edf76d5b04d4258bd49bd}, + all_ss_ids = {['05abf67f5320dd744e9edf76d5b04d4258bd49bd']}, +} + +@article{Nill07, + author = {Nillesen, M.M. and Lopata, R.G.P. and Gerrits, I.H. and Kapusta, Livia and Huisman, H.J. and Thijssen, J.M. and de Korte, C.L.}, + title = {Segmentation of the heart muscle in 3-{D} pediatric echocardiographic images}, + journal = UMB, + year = {2007}, + volume = {33}, + pages = {1453--1462}, + doi = {10.1016/j.ultrasmedbio.2007.04.001}, + abstract = {{T}his study aimed to show segmentation of the heart muscle in pediatric echocardiographic images as a preprocessing step for tissue analysis. {T}ransthoracic image sequences (2-{D} and 3-{D} volume data, both derived in radiofrequency format, directly after beam forming) were registered in real time from four healthy children over three heart cycles. {T}hree preprocessing methods, based on adaptive filtering, were used to reduce the speckle noise for optimizing the distinction between blood and myocardium, while preserving the sharpness of edges between anatomical structures. {T}he filtering kernel size was linked to the local speckle size and the speckle noise characteristics were considered to define the optimal filter in one of the methods. {T}he filtered 2-{D} images were thresholded automatically as a first step of segmentation of the endocardial wall. {T}he final segmentation step was achieved by applying a deformable contour algorithm. {T}his segmentation of each 2-{D} image of the 3-{D}+time (i.e., 4-{D}) datasets was related to that of the neighboring images in both time and space. {B}y thus incorporating spatial and temporal information of 3-{D} ultrasound image sequences, an automated method using image statistics was developed to perform 3-{D} segmentation of the heart muscle.}, + file = {Nill07.pdf:pdf\\Nill07.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + number = {9}, + pmid = {17574727}, + month = {9}, + gsid = {6175723420112201828}, + gscites = {41}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/52135}, + ss_id = {25e214b3c90dc8afcc8418ea9d5001f50a223fa4}, + all_ss_ids = {['25e214b3c90dc8afcc8418ea9d5001f50a223fa4']}, +} + +@article{Nill09, + author = {M.M. Nillesen and R.G.P. Lopata and W.P. de Boode and I.H. Gerrits and H.J. Huisman and J.M. Thijssen and L. Kapusta and C.L. de Korte}, + title = {In vivo validation of cardiac output assessment in non-standard 3{D} echocardiographic images}, + journal = PMB, + year = {2009}, + volume = {54}, + pages = {1951--1962}, + doi = {10.1088/0031-9155/54/7/006}, + abstract = {{A}utomatic segmentation of the endocardial surface in three-dimensional (3{D}) echocardiographic images is an important tool to assess left ventricular ({LV}) geometry and cardiac output ({CO}). {T}he presence of speckle noise as well as the nonisotropic characteristics of the myocardium impose strong demands on the segmentation algorithm. {I}n the analysis of normal heart geometries of standardized (apical) views, it is advantageous to incorporate a priori knowledge about the shape and appearance of the heart. {I}n contrast, when analyzing abnormal heart geometries, for example in children with congenital malformations, this a priori knowledge about the shape and anatomy of the {LV} might induce erroneous segmentation results. {T}his study describes a fully automated segmentation method for the analysis of non-standard echocardiographic images, without making strong assumptions on the shape and appearance of the heart. {T}he method was validated in vivo in a piglet model. {R}eal-time 3{D} echocardiographic image sequences of five piglets were acquired in radiofrequency (rf) format. {T}hese {ECG}-gated full volume images were acquired intra-operatively in a non-standard view. {C}ardiac blood flow was measured simultaneously by an ultrasound transit time flow probe positioned around the common pulmonary artery. {T}hree-dimensional adaptive filtering using the characteristics of speckle was performed on the demodulated rf data to reduce the influence of speckle noise and to optimize the distinction between blood and myocardium. {A} gradient-based 3{D} deformable simplex mesh was then used to segment the endocardial surface. {A} gradient and a speed force were included as external forces of the model. {T}o balance data fitting and mesh regularity, one fixed set of weighting parameters of internal, gradient and speed forces was used for all data sets. {E}nd-diastolic and end-systolic volumes were computed from the segmented endocardial surface. {T}he cardiac output derived from this automatic segmentation was validated quantitatively by comparing it with the {CO} values measured from the volume flow in the pulmonary artery. {R}elative bias varied between 0 and -17\%, where the nominal accuracy of the flow meter is in the order of 10\%. {A}ssuming the {CO} measurements from the flow probe as a gold standard, excellent correlation (r = 0.99) was observed with the {CO} estimates obtained from image segmentation.}, + file = {Nill09.pdf:pdf\\Nill09.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + number = {7}, + pmid = {19265202}, + month = {3}, + gsid = {17130217268325843134}, + gscites = {9}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/80272}, +} + +@inproceedings{Nill09a, + author = {M.M. Nillesen and R.G.P. Lopata and H.J. Huisman and J.M. Thijssen and L. Kapusta and C.L. de Korte}, + title = {{3D} cardiac segmentation using temporal correlation of radio frequency ultrasound data}, + booktitle = MICCAI, + year = {2009}, + volume = {12}, + series = LNCS, + pages = {927--934}, + doi = {10.1007/978-3-642-04271-3_112}, + abstract = {{S}emi-automatic segmentation of the myocardium in 3{D} echographic images may substantially support clinical diagnosis of heart disease. {P}articularly in children with congenital heart disease, segmentation should be based on the echo features solely since a priori knowledge on the shape of the heart cannot be used. {S}egmentation of echocardiographic images is challenging because of the poor echogenicity contrast between blood and the myocardium in some regions and the inherent speckle noise from randomly backscattered echoes. {P}hase information present in the radio frequency (rf) ultrasound data might yield useful, additional features in these regions. {A} semi-3{D} technique was used to determine maximum temporal cross-correlation values locally from the rf data. {T}o segment the endocardial surface, maximum cross-correlation values were used as additional external force in a deformable model approach and were tested against and combined with adaptive filtered, demodulated rf data. {T}he method was tested on full volume images ({P}hilips, i{E}33) of four healthy children and evaluated by comparison with contours obtained from manual segmentation.}, + file = {Nill09a.pdf:pdf\\Nill09a.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + pmid = {20426200}, + gsid = {12873469043739776735}, + gscites = {13}, + ss_id = {336b5a32e73a31cbc276d6dd0750e5ca42cfe073}, + all_ss_ids = {['336b5a32e73a31cbc276d6dd0750e5ca42cfe073']}, +} + +@article{Nill11, + author = {Nillesen, M.M. and Lopata, R.G.P. and Huisman, H. J. and Thijssen, J.M. and Kapusta, Livia and de Korte, C.L.}, + title = {Correlation based 3-D segmentation of the left ventricle in pediatric echocardiographic images using radio-frequency data}, + journal = UMB, + year = {2011}, + volume = {37}, + pages = {1409--1420}, + doi = {10.1016/j.ultrasmedbio.2011.05.005}, + abstract = {Clinical diagnosis of heart disease might be substantially supported by automated segmentation of the endocardial surface in three-dimensional (3-D) echographic images. Because of the poor echogenicity contrast between blood and myocardial tissue in some regions and the inherent speckle noise, automated analysis of these images is challenging. A priori knowledge on the shape of the heart cannot always be relied on, e.g., in children with congenital heart disease, segmentation should be based on the echo features solely. The objective of this study was to investigate the merit of using temporal cross-correlation of radio-frequency (RF) data for automated segmentation of 3-D echocardiographic images. Maximum temporal cross-correlation (MCC) values were determined locally from the RF-data using an iterative 3-D technique. MCC values as well as a combination of MCC values and adaptive filtered, demodulated RF-data were used as an additional, external force in a deformable model approach to segment the endocardial surface and were tested against manually segmented surfaces. Results on 3-D full volume images (Philips, iE33) of 10 healthy children demonstrate that MCC values derived from the RF signal yield a useful parameter to distinguish between blood and myocardium in regions with low echogenicity contrast and incorporation of MCC improves the segmentation results significantly. Further investigation of the MCC over the whole cardiac cycle is required to exploit the full benefit of it for automated segmentation.}, + file = {Nill11.pdf:pdf\\Nill11.pdf:PDF}, + optnote = {DIAG, MUSIC, RADIOLOGY}, + number = {9}, + pmid = {21683512}, + month = {9}, + gsid = {1538861246327733103}, + gscites = {19}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/95767}, + ss_id = {1b2583976cd39108af8f0b7d9f1900b4e61e4c95}, + all_ss_ids = {['1b2583976cd39108af8f0b7d9f1900b4e61e4c95']}, +} + +@article{Noot22, + author = {Julia M. Noothout and Nikolas Lessmann and Matthijs Van Eede and Louis van Harten and Ecem Sogancioglu and Friso Heslinga and Mitko Veta and Bram van Ginneken and Ivana Isgum}, + title = {Knowledge distillation with ensembles of convolutional neural networks for medical image segmentation}, + doi = {https://doi.org/10.1117/1.JMI.9.5.052407}, + abstract = {Purpose: Ensembles of convolutional neural networks (CNNs) often outperform a single CNN in medical image segmentation tasks, but inference is computationally more expensive and makes ensembles unattractive for some applications. We compared the performance of differently constructed ensembles with the performance of CNNs derived from these ensembles using knowledge distillation, a technique for reducing the footprint of large models such as ensembles. + + Approach: We investigated two different types of ensembles, namely, diverse ensembles of networks with three different architectures and two different loss-functions, and uniform ensembles of networks with the same architecture but initialized with different random seeds. For each ensemble, additionally, a single student network was trained to mimic the class probabilities predicted by the teacher model, the ensemble. We evaluated the performance of each network, the ensembles, and the corresponding distilled networks across three different publicly available datasets. These included chest computed tomography scans with four annotated organs of interest, brain magnetic resonance imaging (MRI) with six annotated brain structures, and cardiac cine-MRI with three annotated heart structures. + + Results: Both uniform and diverse ensembles obtained better results than any of the individual networks in the ensemble. Furthermore, applying knowledge distillation resulted in a single network that was smaller and faster without compromising performance compared with the ensemble it learned from. The distilled networks significantly outperformed the same network trained with reference segmentation instead of knowledge distillation. + + Conclusion: Knowledge distillation can compress segmentation ensembles of uniform or diverse composition into a single CNN while maintaining the performance of the ensemble.}, + file = {Noot22.pdf:pdf\\Noot22.pdf:PDF}, + journal = {Journal of Medical Imaging}, + month = {05}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, + ss_id = {0aabb1a4831d724bf28ef9a9a0d7bde6a1bb2ea7}, + all_ss_ids = {['0aabb1a4831d724bf28ef9a9a0d7bde6a1bb2ea7']}, + gscites = {5}, +} + +@conference{Oei11a, + author = {M. T. H. Oei and B. M. Goraj and F. J. A. Meijer and J. J. Mordang and A. J. Idema and S. H. E. Boots-Sprenger and H. O. A. Laue and M.Prokop}, + title = {Variability of relative cerebral blood volume normalization in patients with gliomas: Interobserver and intraobserver reproducibility study}, + booktitle = ISMRM, + year = {2011}, + abstract = {Purpose: Due to limited spatial resolution in T2* dynamic susceptibility contrast-enhanced MR imaging (DSC-MRI) arterial measurements are difficult. Therefore absolute quantification of Cerebral Blood Volume (CBV) is difficult and relative CBV (rCBV) values are reported. In order to reduce intraindividual differences in CBV values determined by DSC-MRI, CBV is normalized to normal appearing white matter (NAWM) or normal appearing gray matter (NAGM). We determined the variability of such rCBV values depending on the choice of the region of interest (ROI) in NAWM or NAGM. Materials and Methods: We retrospectively analyzed 17 patients with pathologically proven gliomas for whom DSC-MRI was available. DSC-MRI used a T2* weighted sequence with 5mm thickness and 2s temporal resolution. Measurements were performed by two observers. First, observer 1 generated rCBV maps on which a tumor ROI was defined that contained the region with highest tumor perfusion. This ROI was kept constant for the subsequent normalization step using NAWM or NAGM. ROIs were drawn on 1) contralateral NAWM by choice, 2) the same axial section as tumor ROI in the contralateral NAWM, 3) contralateral NAGM by choice, 4) NAWM near corpus callosum, 5) contralateral putamen, 6) contralateral thalamus and 7) a large ROI in contralateral centrum semiovale by both observers. These ROIs were used as internal reference standard to calculate rCBV. After a first evaluation (T1), a second evaluation (T2) was performed using the same method as described above after 2 months. Wilcoxon signed-rank test was used to determine significant difference between rCBVtumor measurements of T1 and T2 (intraobserver) and between both observers (interobserver) (significance level p < .05). Inter- and intraobserver reproducibility were also both quantified with intraclass correlation coefficient (R) and the coefficient of variation (CV). Results: Wilcoxon signed-rank test did not show significant difference between rCBVtumor measurements of the first evaluation and second evaluation for all ROIs and for both observers (p > .05). Intraobserver CV ranged between 7% - 19% for observer 1 and 12% - 23% for observer 2. Intraclass correlation coefficient (R) showed excellent intraobserver reproducibility for contralateral putamen, thalamus and centrum semiovale for observer 1 (R > .74) and good intraobserver reproducibility for contralateral putamen (R = .69) and centrum semiovale (R = 0.74) for observer 2. Wilcoxon signed-rank test did not show significant difference between rCBVtumor measurements of observer 1 and 2 for contralateral NAWM by choice, corpus callosum and putamen. Interobserver CV ranged from 12% to 17% (except for NAWM where CV was 42%). Intraclass correlation coefficient (R) showed excellent interobserver reproducibility for contralateral putamen (R = .76) and centrum semiovale (R = .77) . Conclusion: Our findings show good intra- and interobserver CV of rCBV measurements. Contralateral putamen chosen as internal reference standard for rCBV shows least interobserver variability and a low intraobserver variability.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Oei12, + author = {Marcel T. H. Oei and Rieneke {van den Boom} and Luuk J. Oostveen and Sven Lafebre and Ewoud J. Smit and Bram {van Ginneken} and Rashindra Manniesing and Mathias Prokop}, + title = {Hybrid digital phantom for optimizing data acquisition strategies in {CT} perfusion}, + booktitle = ECR, + year = {2012}, + abstract = {Purpose: Optimizing data acquisition strategies for cerebral CT perfusion (CTP) studies is difficult in clinical practice. We developed a hybrid digital phantom to study the influence of various acquisition parameters on CTP maps. Material and Methods: The phantom uses scans of an epoxy-filled human skull to provide a homogeneous background with realistic noise patterns derived from a 320-row CT scanner. Superimposed geometric shapes with real patient perfusion curves are digitally added. By manipulating the time tags of the images and by using noise patterns derived with various exposure settings different CTP acquisition strategies can be simulated. The resulting perfusion maps allow for studying contrast-to-noise ratios (CNR) of the superimposed shapes. We tested the phantom by simulating a clinical CTP protocol. We measured arterial input functions (AIF) and tissue curves in grey and white matter (GM, WM) in 9 patients. GM-to-WM CNR derived from the CBF maps were calculated for patients and compared to values of the phantom using identical AIF and tissue curves that were superimposed on the background (simulating WM) and on a circular object (20mm, simulating GM). Results: Measured noise in CBF maps from patient and phantom data was within 13% (1.9-23.9%) of each other. CNR for CBF maps derived from the phantom (20mm object) and patient data correlated well (R = 0.93). Conclusion: The phantom provides a good estimate of real noise in CTP maps and allows for measuring CNR. It can play a major role for future optimization of CTP protocols.}, + optnote = {DIAG}, +} + +@conference{Oei13, + author = {Marcel T. H. Oei and Rieneke van den Boom and Luuk J. Oostveen and Hendrik O. A. Laue and Bram {van Ginneken} and Rashindra Manniesing and Mathias Prokop}, + booktitle = ECR, + title = {Influence of Temporal Sampling Rate of CTP acquisitions on Cerebral Perfusion Maps using a Digital Phantom}, + abstract = {Purpose: To study the influence of the temporal sampling rate in cerebral CT perfusion scans using noise-free synthetic data sets created with a digital phantom. Materials and Methods: Noise-free synthetic data sets were created with a digital phantom which consists of a skull derived from a human skull phantom combined with arterial input and venous output functions, white matter (WM) and gray matter (GM) time-attenuation curves (TAC) obtained from 5 patients. By manipulating the time tags CTP protocols with a total scan duration of 60s and a temporal sampling rate of 1, 2, 3 and 4s were simulated. The 1s protocol was used as reference standard. The 2, 3 and 4s protocols were shifted with 1s to measure the influence of missing peak points of the curve, resulting in 1, 2, 3 extra data sets, respectively. CBF, CBV and MTT maps were calculated using PMA (ASIST-Japan). Mean values of GM and WM of the protocols were compared to the reference protocol. Results: By increasing temporal resolution, CBF values decreased, MTT values increased and CBV stayed relatively the same for WM and GM: CBF values were within 63%, 75% and 85% of the reference standard for 2s, 3s and 4s protocols, respectively. MTT values were within 69%, 156% and 238% of the reference standard for 2s, 3s and 4s protocols, respectively. CBV values stayed within 50% of the reference standard for all protocols. Conclusion: Cerebral perfusion values are strongly influenced by temporal sampling rate.}, + optnote = {DIAG, RADIOLOGY}, + year = {2013}, +} + +@conference{Oei14, + author = {Marcel T. H. Oei and Rashindra Manniesing and Frederick J. A. Meijer and Masthias Prokop}, + title = {{One-Step-Stroke} Imaging: Does an Interleaved Acquisition of Cerebral {CT} Perfusion and {CTA} of the Carotids Affect {CTP} Values}, + booktitle = ECR, + year = {2014}, + abstract = {Purpose: One-step stroke imaging is a CT perfusion(CTP) scan in which one volumetric acquisition is replaced to acquire a carotid CTA using toggling table technique and a single dose of contrast agent. The purpose of this study is to determine if missing one acquisition of the CTP influences the CTP values. Methods and Materials: Ten patients with suspicion of ischemic stroke were scanned with a clinical CTP head protocol(14 scans every 2s, 5 scans every 5s) with a total duration of 55s using a 320-row CT scanner. The one-step stroke protocol is simulated from the original clinical protocol by deleting one acquisition. For every patient, the timepoint of the bolus arrival time(BAT) and the arterial peak(AP) in the middle cerebral artery was determined. One acquisition was deleted starting from the BAT up to the fifth time point after AP. Corresponding perfusion maps were calculated with bSVD using PMA-ASIST. Regions of interests(ROIs) were annotated in normal-appearing grey and white matter. The percentage errors were calculated per timepoint and patient. Results: The absolute mean of the percentage errors across all patients for each timepoint in grey matter compared to the original CBF, CBV and MTT is 3.40%(range 0.10-9.53%), 3.22%(0.00-9.98%) and 3.08%(0.00-13.79%), respectively. Percentage errors>8.50% occurred for some patients if an acquisition was deleted at BAT, 2s-4s after BAT, AP or after the AP. Similar results were found in WM. Conclusion: A one-step stroke protocol is feasible with only minor effects on the CTP maps if CTA is acquired 2-4s after BAT and before the AP.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Oei14a, + author = {Marcel T. H. Oei and Rashindra Manniesing and Rieneke {and van den Boom}, Willem-Jan {van der Woude} and Bram {van Ginneken} and Frederick J. A. Meijer and Mathias Prokop}, + title = {{One-Step-Stroke CT} Imaging - Part {I}: Optimization of Interleaved Acquisition of Cerebral {CT} Perfusion and Neck {CT} Angiography}, + booktitle = RSNA, + year = {2014}, + abstract = {PURPOSE: One-Step-Stroke imaging is a CTP acquisition in which one volumetric scan is substituted by volumetric neck CTA, using a toggling table technique and a single dose of contrast agent (see figure). It is not clear how missing one time point of the CTP acquisition to obtain the neck CTA will affect the perfusion maps and which time point is best suited for neck CTA. We determined the optimum timing of neck CTA with the least effect on cerebral perfusion maps. METHOD AND MATERIALS: 20 consecutive patients with suspicion of ischemic stroke were scanned with a clinical CTP head protocol using a 320-row CT scanner. A neck CTA takes maximal 4s, therefore omitting one time point of the CTP with 2s scan interval is sufficient. The One-Step-Stroke protocol was simulated from the original protocol by eliminating one acquisition at various time points. The elimination of one acquisition of CTP simulates the acquisition of the neck CTA. For every patient one volumetric acquisition was deleted, starting from the bolus arrival time up to the fifth time point after the arterial peak determined from the middle cerebral artery (MCA). Corresponding perfusion maps were calculated. Percentage errors were calculated for all perfusion parameters (CBF, CBV, MTT) in basal ganglia and white matter per time point and per patient. Bolus tracking is simulated by using the enhancement curves in the MCA to derive relative thresholds (40-100HU). The relative thresholds were used to determine the time point resulting in the smallest error across all patients. RESULTS: A volumetric CTP scan deleted 2s after reaching a threshold of 40AC/a,!aEURoe70HU kept the absolute percentage errors of all perfusion parameters below 10% in all patients. A relative threshold of 70HU for bolus tracking of the CTA gave the lowest absolute percentage errors for CTP parameters (mean <3.0%, maximum always <7.5%) for acquiring the neck CTA. Estimated average enhancement at CTA, measured in the MCA, was 302HU (range 198AC/a,!aEURoe408HU). CONCLUSION: Our simulations suggest that the One-Step-Stroke protocol does not significantly alter absolute perfusion values and creates high enhancement in the carotids, if the neck CTA is acquired 2s after a threshold of 70HU in the MCA. CLINICAL RELEVANCE/APPLICATION: One-step stroke imaging is a single exam sequence where the neck CTA is part of the CTP. One-Step-Stroke imaging has the potential to replace CTA and CTP which saves radiation dose and contrast agent dose.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Oei17, + author = {Oei, Marcel T H and Meijer, Frederick J A and van der Woude, Willem-Jan and Smit, Ewoud J and van Ginneken, Bram and Manniesing, Rashindra and Prokop, Mathias}, + title = {Interleaving cerebral {CT} perfusion with neck {CT} angiography. Part {II}: clinical implementation and image quality}, + journal = ER, + year = {2017}, + volume = {27}, + pages = {2411--2418}, + month = {6}, + abstract = {Feasibility evaluation of the One-Step Stroke Protocol, which is an interleaved cerebral computed tomography perfusion (CTP) and neck volumetric computed tomography angiography (vCTA) scanning technique using wide-detector computed tomography, and to assess the image quality of vCTA. Twenty patients with suspicion of acute ischaemic stroke were prospectively scanned and evaluated with a head and neck CTA and with the One-Step Stroke Protocol. Arterial enhancement and contrast-to-noise ratio (CNR) in the carotid arteries was assessed. Three observers scored artefacts and image quality of the cervical arteries. The total z-coverage was evaluated. Mean enhancement in the carotid bifurcation was rated higher in the vCTA (595 +- 164 HU) than CTA (441 +- 117 HU). CNR was rated higher in vCTA. Image quality scores showed no significant difference in the region of the carotid bifurcation between vCTA and CTA. Lower neck image quality scores were slightly lower for vCTA due to artefacts, although not rated as diagnostically relevant. In ten patients, the origin of the left common carotid artery was missed by 1.6 +- 0.8 cm. Mean patient height was 1.8 +- 0.09 m. Carotid bifurcation and origin of vertebral arteries were covered in all patients. The One-Step Stroke Protocol is feasible with good diagnostic image quality of vCTA, although full z-coverage is limited in tall patients. * Interleaving cerebral CTP with neck CTA (One-Step Stroke Protocol) is feasible * Diagnostic quality of One-Step Stroke Protocol neck CTA is similar to conventional CTA * One-Step Stroke Protocol neck CTA suffers from streak artefacts in the lower neck * A limitation of One-Step Stroke Protocol CTA is lack of coverage in tall patients * Precise planning of One-Step Stroke Protocol neck CTA is necessary in tall patients.}, + doi = {10.1007/s00330-016-4592-z}, + file = {Oei17.pdf:pdf\\Oei17.pdf:PDF}, + issue = {6}, + optnote = {DIAG}, + pmid = {27651144}, + gsid = {3697066219334813948}, + gscites = {11}, + ss_id = {242895b9618a7225acb805571956c6843dae0858}, + all_ss_ids = {['c21f83954bab86611134478ab918399813c10313', '242895b9618a7225acb805571956c6843dae0858']}, +} + +@article{Oei17a, + author = {Oei, Marcel T H and Meijer, Frederick J A and van der Woude, Willem-Jan and Smit, Ewoud J and van Ginneken, Bram and Prokop, Mathias and Manniesing, Rashindra}, + title = {Interleaving cerebral {CT} perfusion with neck {CT} angiography part {I}. Proof of concept and accuracy of cerebral perfusion values}, + journal = ER, + year = {2017}, + volume = {27}, + issue = {6}, + month = {6}, + pages = {2649--2656}, + doi = {10.1007/s00330-016-4577-y}, + abstract = {We present a novel One-Step-Stroke protocol for wide-detector CT scanners that interleaves cerebral CTP with volumetric neck CTA (vCTA). We evaluate whether the resulting time gap in CTP affects the accuracy of CTP values. Cerebral CTP maps were retrospectively obtained from 20 patients with suspicion of acute ischemic stroke and served as the reference standard. To simulate a 4 s gap for interleaving CTP with vCTA, we eliminated one acquisition at various time points of CTP starting from the bolus-arrival-time(BAT). Optimal timing of the vCTA was evaluated. At the time point with least errors, we evaluated elimination of a second time point (6 s gap). Mean absolute percentage errors of all perfusion values remained below 10 % in all patients when eliminating any one time point in the CTP sequence starting from the BAT. Acquiring the vCTA 2 s after reaching a threshold of 70HU resulted in the lowest errors (mean <3.0 %). Eliminating a second time point still resulted in mean errors <3.5 %. CBF/CBV showed no significant differences in perfusion values except MTT. However, the percentage errors were always below 10 % compared to the original protocol. Interleaving cerebral CTP with neck CTA is feasible with minor effects on the perfusion values.}, + file = {Oei17a.pdf:pdf\\Oei17a.pdf:PDF}, + optnote = {DIAG}, + pmid = {27718078}, + gsid = {17733791166970944449}, + gscites = {7}, + ss_id = {6cc4abfa6775f257c9231648a24ac9d825b232dd}, + all_ss_ids = {['6cc4abfa6775f257c9231648a24ac9d825b232dd']}, +} + +@article{Oei18, + author = {Oei, Marcel T. H. and Meijer, Frederick J. A. and Mordang, Jan-Jurre and Smit, Ewoud J. and Idema, Albert J. and Goraj, Bozena M. and Laue, Hendrik O. A. and Prokop, Mathias and Manniesing, Rashindra}, + title = {Observer Variability of Reference Tissue Selection for Relative Cerebral Blood Volume Measurements in Glioma Patients}, + journal = ER, + year = {2018}, + volume = {28}, + issue = {9}, + pages = {3902-3911}, + doi = {10.1007/s00330-018-5353-y}, + abstract = {Objectives: To assess observer variability of different reference tissues used for relative CBV (rCBV) measurements in DSC-MRI of glioma patients. + + Methods: In this retrospective study, three observers measured rCBVin DSC-MRimages of 44 glioma patients on two occasions. rCBVis calculated by the CBVin the tumour hotspot/the CBVof a reference tissue at the contralateral side for normalization. One observer annotated the tumour hotspot that was kept constant for all measurements. All observers annotated eight reference tissues of normal white and grey matter. Observer variability was evaluated using the intraclass correlation coefficient (ICC), coefficient of variation (CV) and Bland-Altman analyses. + + Results: For intra-observer, the ICC ranged from 0.50-0.97 (fair-excellent) for all reference tissues. The CV ranged from 5.1-22.1 % for all reference tissues and observers. For inter-observer, the ICC for all pairwise observer combinations ranged from 0.44-0.92 (poor-excellent). The CV ranged from 8.1-31.1 %. Centrum semiovale was the only reference tissue that showed excellent intra- and inter-observer agreement (ICC>0.85) and lowest CVs (<12.5 %). Bland-Altman analyses showed that mean differences for centrum semiovale were close to zero. + + Conclusion: Selecting contralateral centrum semiovale as reference tissue for rCBV provides the lowest observer variability.}, + file = {:Oei18 - Observer Variability of Reference Tissue Selection for Relative Cerebral Blood Volume Measurements in Glioma Patients.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29572637}, + month = {3}, + gsid = {6208890709619164011}, + gscites = {10}, + ss_id = {3c92977e98bbb2991e1843f3c3a2941e651d019d}, + all_ss_ids = {['3c92977e98bbb2991e1843f3c3a2941e651d019d']}, +} + +@article{Oest91, + author = {Oestmann, J. W. and Prokop, M. and Schaefer, C. M. and Galanski, M.}, + title = {Hardware and software artifacts in storage phosphor radiography}, + journal = Radiographics, + year = {1991}, + volume = {11}, + pages = {795--805}, + abstract = {Hardware and software artifacts in digital radiographs acquired with storage phosphor systems can seriously impair image quality and imitate or mask abnormalities. These artifacts are caused by image plate, image reader, and laser printer defects; faulty image readout; processing errors; and unsharp masking. The artifacts can simulate calcifications and pneumothoraces or conceal low-contrast ill-defined lesions and subtle lesions along opacity interfaces. Hardware artifacts need to be recognized and properly traced to repair the system or improve its maintenance. Artifacts due to software characteristics and image post-processing must also be identified to ensure adequate system handling and adjustment of postprocessing algorithms.}, + file = {Oest91.pdf:pdf\\Oest91.pdf:PDF}, + optnote = {DIAG}, + number = {5}, + pmid = {1947316}, + month = {9}, + gsid = {6849789943966352622}, + gscites = {46}, +} + +@article{Oest91a, + author = {Oestmann, J. W. and Reichelt, S. and Prokop, M. and Schaefer, C. and Galanski, M.}, + title = {Digital projection radiography}, + journal = Radiologe, + year = {1991}, + volume = {31}, + pages = {1--7}, + abstract = {Several hundred storage phosphor digital projection radiography (DR) systems are in operation in many parts of the world in experimental and clinical settings. They are used clinically for almost all projection radiographic studies except mammography. An overview is given of the experimental and clinical results achieved so far. Image post-processing has yet to meet the initial expectations. The average image quality will certainly improve with automatic brightness control. Edge enhancement should be performed in selected applications only. A true increase in diagnostic information probably cannot be expected except with dual energy techniques. Dose reductions are possible only in those studies in which the specific imaging task permits a decrease in signal-to-noise ratio.}, + optnote = {DIAG}, + number = {1}, + pmid = {2006247}, + gsid = {3284629912310177246}, + gscites = {4}, +} + +@article{Ogon22, + author = {Ogony, Joshua and de Bel, Thomas and Radisky, Derek C. and Kachergus, Jennifer and Thompson, E. Aubrey and Degnim, Amy C. and Ruddy, Kathryn J. and Hilton, Tracy and Stallings-Mann, Melody and Vachon, Celine and Hoskin, Tanya L. and Heckman, Michael G. and Vierkant, Robert A. and White, Launia J. and Moore, Raymond M. and Carter, Jodi and Jensen, Matthew and Pacheco-Spann, Laura and Henry, Jill E. and Storniolo, Anna Maria and Winham, Stacey J. and van der Laak, Jeroen and Sherman, Mark E.}, + title = {Towards defining morphologic parameters of normal parous and nulliparous breast tissues by artificial intelligence}, + doi = {10.1186/s13058-022-01541-z}, + year = {2022}, + abstract = {Abstract + Background + Breast terminal duct lobular units (TDLUs), the source of most breast cancer (BC) precursors, are shaped by age-related involution, a gradual process, and postpartum involution (PPI), a dramatic inflammatory process that restores baseline microanatomy after weaning. Dysregulated PPI is implicated in the pathogenesis of postpartum BCs. We propose that assessment of TDLUs in the postpartum period may have value in risk estimation, but characteristics of these tissues in relation to epidemiological factors are incompletely described. + + Methods + Using validated Artificial Intelligence and morphometric methods, we analyzed digitized images of tissue sections of normal breast tissues stained with hematoxylin and eosin from donors <= 45 years from the Komen Tissue Bank (180 parous and 545 nulliparous). Metrics assessed by AI, included: TDLU count; adipose tissue fraction; mean acini count/TDLU; mean dilated acini; mean average acini area; mean "capillary" area; mean epithelial area; mean ratio of epithelial area versus intralobular stroma; mean mononuclear cell count (surrogate of immune cells); mean fat area proximate to TDLUs and TDLU area. We compared epidemiologic characteristics collected via questionnaire by parity status and race, using a Wilcoxon rank sum test or Fisher's exact test. Histologic features were compared between nulliparous and parous women (overall and by time between last birth and donation [recent birth: <= 5 years versus remote birth: > 5 years]) using multivariable regression models. + + Results + Normal breast tissues of parous women contained significantly higher TDLU counts and acini counts, more frequent dilated acini, higher mononuclear cell counts in TDLUs and smaller acini area per TDLU than nulliparas (all multivariable analyses p < 0.001). Differences in TDLU counts and average acini size persisted for > 5 years postpartum, whereas increases in immune cells were most marked <= 5 years of a birth. Relationships were suggestively modified by several other factors, including demographic and reproductive characteristics, ethanol consumption and breastfeeding duration. + + Conclusions + Our study identified sustained expansion of TDLU numbers and reduced average acini area among parous versus nulliparous women and notable increases in immune responses within five years following childbirth. Further, we show that quantitative characteristics of normal breast samples vary with demographic features and BC risk factors. + }, + url = {http://dx.doi.org/10.1186/s13058-022-01541-z}, + file = {Ogon22.pdf:pdf\Ogon22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Breast Cancer Research}, + citation-count = {1}, + automatic = {yes}, + volume = {24}, +} + +@article{Olac20, + author = {Olaciregui-Ruiz, Igor and Torres-Xirau, Iban and Teuwen, Jonas and van der Heide, Uulke A. and Mans, Anton}, + title = {A Deep Learning-based correction to {EPID} dosimetry for attenuation and scatter in the {Unity MR-Linac} system}, + doi = {10.1016/j.ejmp.2020.02.020}, + pages = {124-131}, + volume = {71}, + abstract = {Purpose: + EPID dosimetry in the Unity MR-Linac system allows for reconstruction of absolute dose distributions within the patient geometry. Dose reconstruction is accurate for the parts of the beam arriving at the EPID through the MRI central unattenuated region, free of gradient coils, resulting in a maximum field size of ~10x22 cm2 at isocentre. The purpose of this study is to develop a Deep Learning-based method to improve the accuracy of 2D EPID reconstructed dose distributions outside this central region, accounting for the effects of the extra attenuation and scatter. + + Methods: + A U-Net was trained to correct EPID dose images calculated at the isocenter inside a cylindrical phantom using the corresponding TPS dose images as ground truth for training. The model was evaluated using a 5-fold cross validation procedure. The clinical validity of the U-Net corrected dose images (the so-called DEEPID dose images) was assessed with in vivo verification data of 45 large rectum IMRT fields. The sensitivity of DEEPID to leaf bank position errors (+-1.5 mm) and +-5% MU delivery errors was also tested. + + Results: + Compared to the TPS, in vivo 2D DEEPID dose images showed an average g-pass rate of 90.2% (72.6%-99.4%) outside the central unattenuated region. Without DEEPID correction, this number was 44.5% (4.0%-78.4%). DEEPID correctly detected the introduced delivery errors . + Conclusions: DEEPID allows for accurate dose reconstruction using the entire EPID image, thus enabling dosimetric verification for field sizes up to ~19x22 cm2 at isocentre. The method can be used to detect clinically relevant errors.}, + file = {Olac20.pdf:pdf\\Olac20.pdf:PDF}, + journal = PHYSMED, + optnote = {DIAG, RADIOLOGY}, + pmid = {32135486}, + year = {2020}, + month = {3}, + ss_id = {98c18814c12cabb5b4b453141f28e6755527efbe}, + all_ss_ids = {['98c18814c12cabb5b4b453141f28e6755527efbe']}, + gscites = {9}, +} + +@article{Olbr95, + author = {Olbricht, C. J. and Paul, K. and Prokop, M. and Chavan, A. and Schaefer-Prokop, C. M. and Jandeleit, K. and Koch, K. M. and Galanski, M.}, + title = {Minimally invasive diagnosis of renal artery stenosis by spiral computed tomography angiography}, + journal = KI, + year = {1995}, + volume = {48}, + pages = {1332--1337}, + abstract = {We prospectively compared in a blinded fashion spiral computed tomography angiography (CTA) with arteriography in 62 consecutive patients with suspected renal artery stenosis (RAS). For CTA 150 ml of contrast material were injected intravenously. Arteriography was performed by DSA technique with selective catheterization of renal arteries. Of the 157 visualized renal arteries 155 could be evaluated with DSA and a total of 157 with CTA. Sensitivity of CTA for RAS > or = 50\% was 98\% and the specificity was 94\%. Comparison of the grade of stenosis as evaluated by DSA versus CTA showed: identical gradation in 59 arteries (DSA > or = 50\%/CTA > or = 50\%), underestimation by CTA in one artery (DSA 50 to 75\%/CTA < 50\%), and overestimation by CTA in six arteries (DSA < 50\%/CTA 50 to 75\%). Factors that may contribute to these differences include impaired renal function and possibly "underestimation" of ostial RAS by arteriography. One artery not evaluable by arteriography showed a 70\% stenosis by CTA. CTA showed no major side effects. We conclude that CTA has the same accuracy for the diagnosis of RAS > or = 50\% as arteriography. However, CTA is only minimally invasive, safe, and causes less discomfort to patients.}, + optnote = {DIAG}, + number = {4}, + pmid = {8569096}, + month = {10}, + gsid = {1439364794994130571}, + gscites = {135}, +} + +@article{Osch05, + author = {Elisabeth Oschatz and Mathias Prokop and Martina Scharitzer and Michael Weber and Csilla Balassy and Cornelia {Schaefer-Prokop}}, + title = {Comparison of liquid crystal versus cathode ray tube display for the detection of simulated chest lesions}, + journal = ER, + year = {2005}, + volume = {15}, + pages = {1472--6}, + file = {Osch05.pdf:pdf\\Osch05.pdf:PDF}, + optnote = {CXR, DIAG, RADIOLOGY}, + number = {7}, + month = {9}, + gsid = {4338915227163447916}, + gscites = {32}, +} + +@article{Otte05, + author = {J. D. M. Otten and N. Karssemeijer and J. H. C. L. Hendriks and J. H. Groenewoud and J. Fracheboud and A. L. M. Verbeek and H. J. de Koning and R. Holland}, + title = {Effect of recall rate on earlier screen detection of breast cancers based on the {D}utch performance indicators}, + journal = JNCI, + year = {2005}, + volume = {97}, + pages = {748--754}, + doi = {10.1093/jnci/dji131}, + file = {Otte05.pdf:pdf/Otte05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + pmid = {15900044}, + month = {5}, + gsid = {12384820780576153322}, + gscites = {115}, + ss_id = {395ad39f7459433b4b7a43aa100b5e681ee28a7d}, + all_ss_ids = {['395ad39f7459433b4b7a43aa100b5e681ee28a7d']}, +} + +@mastersthesis{Oude19, + author = {Tycho van der Ouderaa}, + title = {Reversible Networks for Memory-efficient Image-to-Image Translation in 3D Medical Imaging}, + year = {2019}, + abstract = {The Pix2pix and CycleGAN losses have vastly improved the qualitative and quantitative visual quality of results in image-to-image translation tasks. We extend this framework by exploring approximately invertible architectures which are well suited to these losses. These architectures are approximately invertible by design and thus partially satisfy cycle-consistency before training even begins. Furthermore, since invertible architectures have constant memory complexity in depth, these models can be built arbitrarily deep. We are able to demonstrate superior quantitative output on the Cityscapes and Maps datasets. + + Additionally, we show that the model allows us to perform several memory-intensive medical imaging tasks, including a super-resolution problem on 3D MRI brain volumes. We also demonstrate that our model can perform a 3D domain-adaptation and 3D super-resolution task on chest CT volumes. By doing this, we provide a proof-of-principle for using reversible networks to create a model capable of pre-processing 3D CT scans to high resolution with a standardized appearance.}, + file = {Oude19.pdf:pdf/Oude19.pdf:PDF}, + optnote = {DIAG}, + school = {University of Amsterdam}, + journal = {Master thesis}, +} + +@inproceedings{Oude19a, + author = {van der Ouderaa, T. F. A. and Worrall, D. E. and van Ginneken, B.}, + title = {Chest {CT} Super-resolution and Domain-adaptation using Memory-efficient {3D} Reversible {GANs}}, + booktitle = MIDL, + year = {2019}, + url = {https://openreview.net/forum?id=SkxueFsiFV}, + abstract = {Medical imaging data are typically large in size. As a result, it can be difficult to train deep neural models on them. The activations of invertible neural networks do not have to be stored to perform backpropagation, therefore such networks can be used to save memory when handling large data volumes. We use a technique called additive coupling to obtain a memory-efficient partially-reversible image-to-image translation model. With this model, we perform a 3D super-resolution and 3D domain-adaptation task, on both paired and unpaired CT scan data. Additionally, we demonstrate experimentally that the model requires significantly less GPU memory than models without reversibility. samples as in or out-of distribution, our method achieves an AUC score of 0.99.}, + file = {:pdf/Oude19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {0ad54bc84119cf0a8ff04d939cc0b4575d7e9b5a}, + all_ss_ids = {['0ad54bc84119cf0a8ff04d939cc0b4575d7e9b5a']}, + gscites = {5}, +} + +@article{Page23, + author = {Page, David B and Broeckx, Glenn and Jahangir, Chowdhury Arif and Verbandt, Sara and Gupta, Rajarsi R and Thagaard, Jeppe and Khiroya, Reena and Kos, Zuzana and Abduljabbar, Khalid and Acosta Haab, Gabriela and Acs, Balazs and Akturk, Guray and Almeida, Jonas S and Alvarado-Cabrero, Isabel and Azmoudeh-Ardalan, Farid and Badve, Sunil and Baharun, Nurkhairul Bariyah and Bellolio, Enrique R and Bheemaraju, Vydehi and Blenman, Kim RM and Botinelly Mendon\c{c}a Fujimoto, Luciana and Bouchmaa, Najat and Burgues, Octavio and Cheang, Maggie Chon U and Ciompi, Francesco and Cooper, Lee AD and Coosemans, An and Corredor, Germ\'{a}n and Dantas Portela, Flavio Luis and Deman, Frederik and Demaria, Sandra and Dudgeon, Sarah N and Elghazawy, Mahmoud and Ely, Scott and Fernandez-Mart\'{i}n, Claudio and Fineberg, Susan and Fox, Stephen B and Gallagher, William M and Giltnane, Jennifer M and Gnjatic, Sacha and Gonzalez-Ericsson, Paula I and Grigoriadis, Anita and Halama, Niels and Hanna, Matthew G and Harbhajanka, Aparna and Hardas, Alexandros and Hart, Steven N and Hartman, Johan and Hewitt, Stephen and Hida, Akira I and Horlings, Hugo M and Husain, Zaheed and Hytopoulos, Evangelos and Irshad, Sheeba and Janssen, Emiel AM and Kahila, Mohamed and Kataoka, Tatsuki R and Kawaguchi, Kosuke and Kharidehal, Durga and Khramtsov, Andrey I and Kiraz, Umay and Kirtani, Pawan and Kodach, Liudmila L and Korski, Konstanty and Kov\'{a}cs, Anik\'{o} and Laenkholm, Anne-Vibeke and Lang-Schwarz, Corinna and Larsimont, Denis and Lennerz, Jochen K and Lerousseau, Marvin and Li, Xiaoxian and Ly, Amy and Madabhushi, Anant and Maley, Sai K and Manur Narasimhamurthy, Vidya and Marks, Douglas K and McDonald, Elizabeth S and Mehrotra, Ravi and Michiels, Stefan and Minhas, Fayyaz ul Amir Afsar and Mittal, Shachi and Moore, David A and Mushtaq, Shamim and Nighat, Hussain and Papathomas, Thomas and Penault-Llorca, Frederique and Perera, Rashindrie D and Pinard, Christopher J and Pinto-Cardenas, Juan Carlos and Pruneri, Giancarlo and Pusztai, Lajos and Rahman, Arman and Rajpoot, Nasir Mahmood and Rapoport, Bernardo Leon and Rau, Tilman T and Reis-Filho, Jorge S and Ribeiro, Joana M and Rimm, David and Vincent-Salomon, Anne and Salto-Tellez, Manuel and Saltz, Joel and Sayed, Shahin and Siziopikou, Kalliopi P and Sotiriou, Christos and Stenzinger, Albrecht and Sughayer, Maher A and Sur, Daniel and Symmans, Fraser and Tanaka, Sunao and Taxter, Timothy and Tejpar, Sabine and Teuwen, Jonas and Thompson, E Aubrey and Tramm, Trine and Tran, William T and van der Laak, Jeroen and van Diest, Paul J and Verghese, Gregory E and Viale, Giuseppe and Vieth, Michael and Wahab, Noorul and Walter, Thomas and Waumans, Yannick and Wen, Hannah Y and Yang, Wentao and Yuan, Yinyin and Adams, Sylvia and Bartlett, John Mark Seaverns and Loibl, Sibylle and Denkert, Carsten and Savas, Peter and Loi, Sherene and Salgado, Roberto and Specht Stovgaard, Elisabeth}, + title = {Spatial analyses of immune cell infiltration in cancer: current methods and future directions: A report of the International Immuno-Oncology Biomarker Working Group on Breast Cancer}, + doi = {10.1002/path.6165}, + year = {2023}, + abstract = {Modern histologic imaging platforms coupled with machine learning methods have provided new opportunities to map the spatial distribution of immune cells in the tumor microenvironment. However, there exists no standardized method for describing or analyzing spatial immune cell data, and most reported spatial analyses are rudimentary. In this review, we provide an overview of two approaches for reporting and analyzing spatial data (raster versus vector-based). We then provide a compendium of spatial immune cell metrics that have been reported in the literature, summarizing prognostic associations in the context of a variety of cancers. We conclude by discussing two well-described clinical biomarkers, the breast cancer stromal tumor infiltrating lymphocytes score and the colon cancer Immunoscore, and describe investigative opportunities to improve clinical utility of these spatial biomarkers. (c) 2023 The Pathological Society of Great Britain and Ireland.}, + url = {http://dx.doi.org/10.1002/path.6165}, + file = {Page23.pdf:pdf\Page23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {The Journal of Pathology}, + citation-count = {1}, + automatic = {yes}, + pages = {514-532}, + volume = {260}, + ss_id = {1ad43eea9d49d403c8155ebd4a85bfc87f0d05ca}, + all_ss_ids = {['1ad43eea9d49d403c8155ebd4a85bfc87f0d05ca']}, + gscites = {9}, +} + +@article{Palm23, + author = {Palmer, Megan and Seddon, James A. and van der Zalm, Marieke M. and Hesseling, Anneke C. and Goussard, Pierre and Schaaf, H. Simon and Morrison, Julie and van Ginneken, Bram and Melendez, Jaime and Walters, Elisabetta and Murphy, Keelin}, + title = {Optimising computer aided detection to identify intra-thoracic tuberculosis on chest x-ray in South African children}, + doi = {10.1371/journal.pgph.0001799}, + year = {2023}, + abstract = {Diagnostic tools for paediatric tuberculosis remain limited, with heavy reliance on clinical algorithms which include chest x-ray. Computer aided detection (CAD) for tuberculosis on chest x-ray has shown promise in adults. We aimed to measure and optimise the performance of an adult CAD system, CAD4TB, to identify tuberculosis on chest x-rays from children with presumptive tuberculosis. Chest x-rays from 620 children <13 years enrolled in a prospective observational diagnostic study in South Africa, were evaluated. All chest x-rays were read by a panel of expert readers who attributed each with a radiological reference of either 'tuberculosis' or 'not tuberculosis'. Of the 525 chest x-rays included in this analysis, 80 (40 with a reference of 'tuberculosis' and 40 with 'not tuberculosis') were allocated to an independent test set. The remainder made up the training set. The performance of CAD4TB to identify 'tuberculosis' versus 'not tuberculosis' on chest x-ray against the radiological reference read was calculated. The CAD4TB software was then fine-tuned using the paediatric training set. We compared the performance of the fine-tuned model to the original model. Our findings were that the area under the receiver operating characteristic curve (AUC) of the original CAD4TB model, prior to fine-tuning, was 0.58. After fine-tuning there was an improvement in the AUC to 0.72 (p = 0.0016). In this first-ever description of the use of CAD to identify tuberculosis on chest x-ray in children, we demonstrate a significant improvement in the performance of CAD4TB after fine-tuning with a set of well-characterised paediatric chest x-rays. CAD has the potential to be a useful additional diagnostic tool for paediatric tuberculosis. We recommend replicating the methods we describe using a larger chest x-ray dataset from a more diverse population and evaluating the potential role of CAD to replace a human-read chest x-ray within treatment-decision algorithms for paediatric tuberculosis.}, + url = {http://dx.doi.org/10.1371/journal.pgph.0001799}, + file = {Palm23.pdf:pdf\Palm23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {PLOS Global Public Health}, + citation-count = {1}, + automatic = {yes}, + pages = {e0001799}, + volume = {3}, + all_ss_ids = {8a039fe22daf6f65a32ba02035f45a8c67b48339}, + gscites = {0}, +} + +@article{Pate10, + author = {Jignesh Patel and Eric E Sigmund and Henry Rusinek and Marcel Oei and James S Babb and Bachir Taouli}, + title = {Diagnosis of cirrhosis with intravoxel incoherent motion diffusion {MRI} and dynamic contrast-enhanced {MRI} alone and in combination: preliminary experience}, + journal = JMRI, + year = {2010}, + volume = {31}, + pages = {589--600}, + doi = {10.1002/jmri.22081}, + abstract = {To report our preliminary experience with the use of intravoxel incoherent motion (IVIM) diffusion-weighted magnetic resonance imaging (DW-MRI) and dynamic contrast-enhanced (DCE)-MRI alone and in combination for the diagnosis of liver cirrhosis.Thirty subjects (16 with noncirrhotic liver, 14 with cirrhosis) were prospectively assessed with IVIM DW-MRI (n = 27) and DCE-MRI (n = 20). IVIM parameters included perfusion fraction (PF), pseudodiffusion coefficient (D*), true diffusion coefficient (D), and apparent diffusion coefficient (ADC). Model-free DCE-MR parameters included time to peak (TTP), upslope, and initial area under the curve at 60 seconds (IAUC60). A dual input single compartmental perfusion model yielded arterial flow (Fa), portal venous flow (Fp), arterial fraction (ART), mean transit time (MTT), and distribution volume (DV). The diagnostic performances for diagnosis of cirrhosis were evaluated for each modality alone and in combination using logistic regression and receiver operating characteristic analyses. IVIM and DCE-MR parameters were compared using a generalized estimating equations model.PF, D*, D, and ADC values were significantly lower in cirrhosis (P = 0.0056-0.0377), whereas TTP, DV, and MTT were significantly increased in cirrhosis (P = 0.0006-0.0154). There was no correlation between IVIM- and DCE-MRI parameters. The highest Az (areas under the curves) values were observed for ADC (0.808) and TTP-DV (0.952 for each). The combination of ADC with DV and TTP provided 84.6\% sensitivity and 100\% specificity for diagnosis of cirrhosis.The combination of DW-MRI and DCE-MRI provides an accurate diagnosis of cirrhosis.}, + file = {Pate10.pdf:pdf\\Pate10.pdf:PDF}, + optnote = {4DCT, DIAG, RADIOLOGY}, + number = {3}, + pmid = {20187201}, + month = {3}, +} + +@conference{Pate17, + author = {Ajay Patel and Frederick J. A. Meijer and Mathias Prokop and Bram van Ginneken and Rashindra Manniesing}, + title = {Robust segmentation of the cranial cavity in non-contrast CT and CT perfusion of the brain}, + booktitle = ECR, + year = {2017}, + abstract = {PURPOSE + Cranial cavity segmentation in CT is the essential first step for subsequent image processing and automated detection of cerebral pathology. This becomes complicated in the presence of skull fractures, metallic foreign objects or due to connected soft tissues such as the orbit. A robust and accurate method is presented to segment the cranial cavity in CT images. + METHOD AND MATERIALS + We propose a multi-atlas based method that uses atlas selection based on anterior skull variations, followed by a two-stage levelset refinement. The method was developed using a set of 99 non-contrast CT and 18 CT perfusion (CTP) scans obtained for emergency indications on a 320-row detector CT scanner. It was evaluated on a different set of 200 non-contrast CT and 100 CTP scans obtained for the same indications. Quality of segmentations was visually assessed. The reference standard consisted of three randomly selected orthogonal slices per patient that were manually annotated by trained observers. The corresponding slices were extracted and compared to the reference standard. Dice similarity coefficient (DSC) and 95th percentile Hausdorff distance (95% HD) were reported. + RESULTS + The segmentation results were evaluated as very good to excellent. The method achieved a mean DSC of 0.98 +- 0.03 and mean 95% HD of 0.60 +- 2.15 mm in comparison to the reference standard. + CONCLUSION + The proposed method is capable of accurate segmentation of the cranial cavity in non-contrast CT and CTP independent of gross pathology or foreign objects. The method provides a fundamental first step towards automated evaluation of cranial CT.}, + optnote = {DIAG}, +} + +@inproceedings{Pate17a, + author = {Ajay Patel and Sil van de Leemput and Mathias Prokop and Bram van Ginneken and Rashindra Manniesing}, + title = {Automatic Cerebrospinal Fluid Segmentation in Non-Contrast CT Images Using a 3D Convolutional Network}, + booktitle = MI, + year = {2017}, + volume = {10134}, + series = SPIE, + doi = {10.1117/12.2254022}, + abstract = {Segmentation of anatomical structures is fundamental in the development of computer aided diagnosis systems for cerebral pathologies. Manual annotations are laborious, time consuming and subject to human error and observer variability. Accurate quantification of cerebrospinal fluid (CSF) can be employed as a morphometric measure for diagnosis and patient outcome prediction. However, segmenting CSF in non-contrast CT images is complicated by low soft tissue contrast and image noise. In this paper we propose a state-of-the-art method using a multi-scale three-dimensional (3D) fully convolution neural network (CNN) to automatically segment all CSF within the cranial cavity. The method is trained on a small dataset comprised of four manually annotated cerebral CT images. Quantitative evaluation of a separate test dataset of four images shows a mean Dice similarity coefficient of 0.87 +- 0.01 and mean absolute volume difference of 4.77 +- 2.70%. The average prediction time was 68 seconds. Our method allows for fast and fully automated 3D segmentation of cerebral CSF in non-contrast CT images, and shows promising results despite a limited amount of training data.}, + file = {Pate17a.pdf:pdf\\Pate17a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {465482443091202827}, + gscites = {8}, + ss_id = {be15a442d2cf19f1bbbd779273055a2de727380c}, + all_ss_ids = {['be15a442d2cf19f1bbbd779273055a2de727380c']}, +} + +@article{Pate17b, + author = {Ajay Patel and Bram van Ginneken and Frederick J. A. Meijer and Ewoud J. van Dijk and Mathias Prokop and Rashindra Manniesing}, + title = {Robust Cranial Cavity Segmentation in CT and CT Perfusion Images of Trauma and Suspected Stroke Patients}, + journal = MIA, + year = {2017}, + volume = {36}, + month = {2}, + pages = {216-228}, + doi = {10.1016/j.media.2016.12.002}, + url = {http://dx.doi.org/10.1016/j.media.2016.12.002}, + abstract = {A robust and accurate method is presented for the segmentation of the cranial cavity in computed tomography (CT) and CT perfusion (CTP) images. The method consists of multi-atlas registration with label fusion followed by a geodesic active contour levelset refinement of the segmentation. Pre-registration atlas selection based on differences in anterior skull anatomy reduces computation time whilst optimising performance. The method was evaluated on a large clinical dataset of 573 acute stroke and trauma patients that received a CT or CTP in our hospital in the period February 2015 to December 2015. The database covers a large spectrum of the anatomical and pathological variations that is typically observed in everyday clinical practice. Three orthogonal slices were randomly selected per patient and manually annotated, resulting in 1659 reference annotations. Segmentations were initially visually inspected for the entire study cohort to assess failures. A total of 20 failures were reported. Quantitative evaluation in comparison to the reference dataset showed a mean Dice coefficient of 98.36 +- 2.59%. The results demonstrate that the method closely approaches the high performance of expert manual annotation.}, + file = {Pate17b.pdf:pdf\\Pate17b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28011374}, + publisher = {Elsevier {BV}}, + gsid = {5343166311394276117}, + gscites = {26}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/170408}, + ss_id = {3f054da4c1d6606941c18c65ae6e435a49758b7b}, + all_ss_ids = {['3f054da4c1d6606941c18c65ae6e435a49758b7b']}, +} + +@inproceedings{Pate18, + author = {Ajay Patel and Rashindra Manniesing}, + title = {A convolutional neural network for intracranial hemorrhage detection in non-contrast {CT}}, + booktitle = MI, + year = {2018}, + volume = {10575}, + series = SPIE, + doi = {10.1117/12.2292975}, + abstract = {The assessment of the presence of intracranial hemorrhage is a crucial step in the work-up of patients requiring emergency care. Fast and accurate detection of intracranial hemorrhage can aid treating physicians by not only expediting and guiding diagnosis, but also supporting choices for secondary imaging, treatment and intervention. However, the automatic detection of intracranial hemorrhage is complicated by the variation in appearance on non-contrast CT images as a result of differences in etiology and location. We propose a method using a convolutional neural network (CNN) for the automatic detection of intracranial hemorrhage. The method is trained on a dataset comprised of cerebral CT studies for which the presence of hemorrhage has been labeled for each axial slice. A separate test dataset of 20 images is used for quantitative evaluation and shows a sensitivity of 0.87, specificity of 0.97 and accuracy of 0.95. The average processing time for a single three-dimensional (3D) CT volume was 2.7 seconds. The proposed method is capable of fast and automated detection of intracranial hemorrhages in non-contrast CT without being limited to a specific subtype of pathology.}, + file = {:pdf/Pate18.pdf:PDF}, + optnote = {DIAG}, + month = {2}, + gsid = {18260974248343960899}, + gscites = {4}, + ss_id = {3a0848c228f2bb19ce1afd4871105eb6a317a3dd}, + all_ss_ids = {['3a0848c228f2bb19ce1afd4871105eb6a317a3dd']}, +} + +@article{Pate19, + author = {Ajay Patel and Sil C. van de Leemput and Mathias Prokop and Bram van Ginneken and Rashindra Manniesing}, + title = {Image Level Training and Prediction: Intracranial Hemorrhage Identification in 3D Non-Contrast CT}, + journal = _IEEE_Access_, + year = {2019}, + volume = {7}, + issue = {1}, + pages = {92355-92364}, + doi = {10.1109/ACCESS.2019.2927792}, + url = {https://ieeexplore.ieee.org/document/8758429}, + abstract = {Current hardware restrictions pose limitations on the use of convolutional neural networks for medical image analysis. There is a large trade-off between network architecture and input image size. For this reason, identification and classification tasks are commonly approached with patch or region based methods often utilizing only local contextual information during training and at inference. Here, a method is presented for the identification of intracranial hemorrhage (ICH) in three-dimensional (3D) non-contrast computed tomography (CT). The method combines a convolutional neural network and recurrent neural network in the form of bidirectional long short-term memory (LSTM) for ICH identification at image level. A convolutional neural network is trained for the identification of ICH in axial slices. LSTM is used to analyze the sequential information obtained from slice level classifications. The method is trained end-to-end using full high-resolution 3D non-contrast CTs. At inference it produces a binary classification with respect to the presence of ICH. A total of 1554 cranial CTs were used to train and validate the method and a separate dataset of 386 images was used for testing. Quantitative analysis showed an area under receiver operating characteristic curve of 0.96. The average time to classification was approximately 0.5 seconds. classification of whole 3D images is therefore possible without the need for pre-processing.}, + file = {:pdf/Pate19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {6029967581749349651}, + gscites = {40}, + ss_id = {59399693fa81c3142552dac2e4456eb066e62c60}, + all_ss_ids = {['59399693fa81c3142552dac2e4456eb066e62c60']}, +} + +@article{Pate19a, + author = {Patel, A. and Schreuder, F. H. B. M. and Klijn, C. J. M. and Prokop, M. and van Ginneken, B. and Marquering, H. A. and Roos, Y. B. W. E. M. and Baharoglu, M. I. and Meijer, F. J. A. and Manniesing, R.}, + title = {Intracerebral haemorrhage segmentation in non-contrast CT}, + journal = NATSCIREP, + year = {2019}, + volume = {9}, + issue = {1}, + pages = {17858}, + doi = {10.1038/s41598-019-54491-6}, + abstract = {A 3-dimensional (3D) convolutional neural network is presented for the segmentation and quantification of spontaneous + intracerebral haemorrhage (ICH) in non-contrast computed tomography (NCCT). The method utilises a combination of + contextual information on multiple scales for fast and fully automatic dense predictions. To handle a large class imbalance + present in the data, a weight map is introduced during training. The method was evaluated on two datasets of 25 and 50 + patients respectively. The reference standard consisted of manual annotations for each ICH in the dataset. Quantitative + analysis showed a median Dice similarity coefficient of 0.91 [0.87 - 0.94] and 0.90 [0.85 - 0.92] for the two test datasets in + comparison to the reference standards. Evaluation of a separate dataset of 5 patients for the assessment of the observer + variability produced a mean Dice similarity coefficient of 0.95 +/- 0.02 for the inter-observer variability and 0.97 +/- 0.01 for the + intra-observer variability. The average prediction time for an entire volume was 104 +/- 15 seconds. The results demonstrate + that the method is accurate and approaches the performance of expert manual annotation.}, + file = {:pdf/Pate19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31780815}, + month = {11}, + gsid = {102660831423117160}, + gscites = {36}, + ss_id = {03254edc77c0459ad9fd9ea458c3ae171c0a4247}, + all_ss_ids = {['03254edc77c0459ad9fd9ea458c3ae171c0a4247']}, +} + +@phdthesis{Pate23, + author = {Ajay Patel}, + title = {Automated Image Analysis of Cranial Non-Contrast CT}, + url = {https://repository.ubn.ru.nl/handle/2066/292990}, + abstract = {Stroke and intracranial hemorrhage (a brain bleed) are serious medical conditions that require fast and accurate diagnosis to aid clinicians in making treatment decisions. Computed tomography (CT) is a widely available and fast imaging technique used for diagnosis, but it relies on interpretation by a clinician. To aid in the diagnosis of stroke and intracranial hemorrhage, artificial intelligence algorithms for computer-aided diagnosis (CAD) have been developed to automatically analyze CT images. This research presents different methods that demonstrate accurate segmentations of large cerebral anatomy and the ability to quantify and segment intracerebral hemorrhages for further analysis. Whole 3D non-contrast CT images were also analyzed automatically to identify the presence of intracranial hemorrhage with high sensitivity. With further development, CAD systems will be able to assist physicians in diagnosing and predicting outcomes of stroke and intracranial hemorrhage in clinical settings.}, + copromotor = {R. Manniesing}, + file = {:pdf/Pate23.pdf:PDF}, + journal = {PhD thesis}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken and M. Prokop}, + school = {Radboud University, Nijmegen}, + year = {2023}, +} + +@mastersthesis{Paye20, + author = {Tristan Payer}, + title = {AI-assisted PD-L1 scoring in non-small-cell lung cancer}, + year = {2020}, + abstract = {With 1.6 million deaths in 2012, is lung cancer the leading cause of cancer-related death worldwide. Lung cancer can be divided into two main sub-types. Small cell lung cancer (SCLC) and non-small-cell lung cancer (NSCLC), with NSCLC making up about 85% of all cases. For a long time chemotherapy has been the only line of treatment for NSCLC patients. Immunotherapy by the drug pembrolizumab is a novel range of treatment that aims to improve the body's own immune system in order to allow it to successfully fight the cancer. Unfortunately, not all patient respond to this treatment and patient selection plays therefore an important role. PD-L1 expression is currently the only biomarker that is used to estimate the efficacy of treatment with pembrolizumab. The amount of PD-L1 expression is measured in the Tumor Proportion Score (TPS). This is a ratio between cells that show membrane staining and cells that do not show this membrane staining. Before the treatment with pembrolizumab it is required that the TPS is estimated. Estimating the TPS is a difficult and time-consuming task for pathologists. Furthermore, pathologist scoring suffers from interobserver variability. This results in a need for a reliable, robust automated method to estimate the TPS. In this work, three conceptually different neural networks are presented aiming for the automatic classification and location of PD-L1-positive and PD-L1-negative tumor cells and immune cells.}, + file = {Paye20.pdf:pdf\\Paye20.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University}, + journal = {Master thesis}, + all_ss_ids = {ed8cbf0b3373cf1dcadd2718dfd3daf6fbe068f3}, + gscites = {0}, +} + +@inproceedings{Peem13, + author = {Maurice Peemen and Arnaud Arindra Adiyoso Setio and Bart Mesman and Henk Corporaal}, + title = {Memory-centric accelerator design for Convolutional Neural Networks}, + booktitle = {Computer Design (ICCD), 2013 IEEE 31st International Conference on}, + year = {2013}, + pages = {13--19}, + doi = {10.1109/ICCD.2013.6657019}, + url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=6657019}, + file = {Peem13.pdf:pdf\\Peem13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {10}, +} + +@conference{Peet23, + author = {Peeters, Dr\'{e} and Alves, Nat\'{a}lia and Venkadesh, K and Dinnessen, R and Saghir, Z and Scholten, E and Huisman, H and Schaefer-Prokop, C and Vliegenthart, R and Prokop, M and Jacobs, C}, + booktitle = ECR, + title = {The effect of applying an uncertainty estimation method on the performance of a deep learning model for nodule malignancy risk estimation}, + abstract = {Purpose: Artificial Intelligence (AI) algorithms often lack uncertainty estimation for classification tasks. Uncertainty estimation may however be an important requirement for clinical adoption of AI algorithms. In this study, we integrate a method for uncertainty estimation into a previously developed AI algorithm and investigate the performance when applying different uncertainty thresholds. + Methods and materials: We used a retrospective external validation dataset from the Danish Lung Cancer Screening Trial, containing 818 benign and 65 malignant nodules. Our previously developed AI algorithm for nodule malignancy risk estimation was extended with a method for measuring the prediction uncertainty. The uncertainty score (UnS) was calculated by measuring the standard deviation over 20 different predictions of an ensemble of AI models. Two UnS thresholds at the 90th and 95th percentile were applied to retain 90% and 95% of all cases as certain, respectively. For these scenarios, we calculated the area under the ROC curve (AUC) for certain and uncertain cases, and for the full set of nodules. + Results: On the full set of 883 nodules, the AUC of the AI risk score was 0.932. For the 90th and 95th percentile, the AUC of the AI risk score for certain cases was 0.934 and 0.935, respectively, and for the uncertain cases was 0.710 and 0.688, respectively. + Conclusion: In this retrospective data set, we demonstrate that integrating an uncertainty estimation method into a deep learning-based nodule malignancy risk estimation algorithm slightly increased the performance on certain cases. The AI performance is substantially worse on uncertain cases and therefore in need of human visual review. + Limitations: This study is a retrospective analysis on data from one single lung cancer screening trial. More external validation is needed.}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, +} + +@conference{Pegg17a, + author = {Sjoert AH Pegge and Midas Meijs and Mathias Prokop and Rashindra Manniesing and Frederick JA Meijer}, + title = {Color-mapping of {4D-CTA} for the detection and classification of cranial arteriovenous fistulas}, + booktitle = ESNR, + year = {2017}, + abstract = {Nowadays, 4D-CTA is available as a non-invasive alternative to digital subtraction angiography (DSA), with reported high accuracy in the evaluation of cranial arteriovenous fistulas (AVF). Optimized processing of 4D-CTA is crucial, considering the large amount of data generated. We present a proof-of-concept study for color-mapping of 4D-CTA for the detection and classification of cranial AVFs.}, + file = {Pegg17a.pdf:pdf\\Pegg17a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Peis22, + author = {Peisen, Felix and H\"{a}nsch, Annika and Hering, Alessa and Brendlin, Andreas S. and Afat, Saif and Nikolaou, Konstantin and Gatidis, Sergios and Eigentler, Thomas and Amaral, Teresa and Moltz, Jan H. and Othman, Ahmed E.}, + title = {Combination of Whole-Body Baseline CT Radiomics and Clinical Parameters to Predict Response and Survival in a Stage-IV Melanoma Cohort Undergoing Immunotherapy}, + doi = {10.3390/cancers14122992}, + year = {2022}, + abstract = {Background: This study investigated whether a machine-learning-based combination of radiomics and clinical parameters was superior to the use of clinical parameters alone in predicting therapy response after three months, and overall survival after six and twelve months, in stage-IV malignant melanoma patients undergoing immunotherapy with PD-1 checkpoint inhibitors and CTLA-4 checkpoint inhibitors. Methods: A random forest model using clinical parameters (demographic variables and tumor markers = baseline model) was compared to a random forest model using clinical parameters and radiomics (extended model) via repeated 5-fold cross-validation. For this purpose, the baseline computed tomographies of 262 stage-IV malignant melanoma patients treated at a tertiary referral center were identified in the Central Malignant Melanoma Registry, and all visible metastases were three-dimensionally segmented (n = 6404). Results: The extended model was not significantly superior compared to the baseline model for survival prediction after six and twelve months (AUC (95% CI): 0.664 (0.598, 0.729) vs. 0.620 (0.545, 0.692) and AUC (95% CI): 0.600 (0.526, 0.667) vs. 0.588 (0.481, 0.629), respectively). The extended model was not significantly superior compared to the baseline model for response prediction after three months (AUC (95% CI): 0.641 (0.581, 0.700) vs. 0.656 (0.587, 0.719)). Conclusions: The study indicated a potential, but non-significant, added value of radiomics for six-month and twelve-month survival prediction of stage-IV melanoma patients undergoing immunotherapy.}, + url = {http://dx.doi.org/10.3390/cancers14122992}, + file = {Peis22.pdf:pdf\Peis22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Cancers}, + citation-count = {9}, + automatic = {yes}, + pages = {2992}, + volume = {14}, +} + +@article{Peis23, + author = {Peisen, Felix and Gerken, Annika and Hering, Alessa and Dahm, Isabel and Nikolaou, Konstantin and Gatidis, Sergios and Eigentler, Thomas K. and Amaral, Teresa and Moltz, Jan H. and Othman, Ahmed E.}, + title = {Can Whole-Body Baseline CT Radiomics Add Information to the Prediction of Best Response, Progression-Free Survival, and Overall Survival of Stage IV Melanoma Patients Receiving First-Line Targeted Therapy: A Retrospective Register Study}, + doi = {10.3390/diagnostics13203210}, + year = {2023}, + abstract = {Background: The aim of this study was to investigate whether the combination of radiomics and clinical parameters in a machine-learning model offers additive information compared with the use of only clinical parameters in predicting the best response, progression-free survival after six months, as well as overall survival after six and twelve months in patients with stage IV malignant melanoma undergoing first-line targeted therapy. Methods: A baseline machine-learning model using clinical variables (demographic parameters and tumor markers) was compared with an extended model using clinical variables and radiomic features of the whole tumor burden, utilizing repeated five-fold cross-validation. Baseline CTs of 91 stage IV malignant melanoma patients, all treated in the same university hospital, were identified in the Central Malignant Melanoma Registry and all metastases were volumetrically segmented (n = 4727). Results: Compared with the baseline model, the extended radiomics model did not add significantly more information to the best-response prediction (AUC [95% CI] 0.548 (0.188, 0.808) vs. 0.487 (0.139, 0.743)), the prediction of PFS after six months (AUC [95% CI] 0.699 (0.436, 0.958) vs. 0.604 (0.373, 0.867)), or the overall survival prediction after six and twelve months (AUC [95% CI] 0.685 (0.188, 0.967) vs. 0.766 (0.433, 1.000) and AUC [95% CI] 0.554 (0.163, 0.781) vs. 0.616 (0.271, 1.000), respectively). Conclusions: The results showed no additional value of baseline whole-body CT radiomics for best-response prediction, progression-free survival prediction for six months, or six-month and twelve-month overall survival prediction for stage IV melanoma patients receiving first-line targeted therapy. These results need to be validated in a larger cohort.}, + url = {http://dx.doi.org/10.3390/diagnostics13203210}, + file = {Peis23.pdf:pdf\Peis23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Diagnostics}, + citation-count = {0}, + automatic = {yes}, + pages = {3210}, + volume = {13}, +} + +@article{Pelo07, + author = {P. Peloschek and J. Sailer and M. Weber and C. J. Herold and M. Prokop and C. M. Schaefer-Prokop}, + title = {Pulmonary nodules: sensitivity of maximum intensity projection versus that of volume rendering of {3D} multidetector {CT} data}, + journal = Radiology, + year = {2007}, + volume = {243}, + pages = {561--569}, + file = {Pelo07.pdf:pdf\\Pelo07.pdf:PDF}, + optnote = {DIAG}, + number = {2}, + pmid = {17456878}, + month = {5}, + gsid = {9887809753866450962}, + gscites = {71}, +} + +@article{Penz21, + author = {Penzkofer, Tobias and Padhani, Anwar R and Turkbey, Baris and Haider, Masoom A and Huisman, Henkjan and Walz, Jochen and Salomon, Georg and Schoots, Ivo G and Richenberg, Jonathan and Villeirs, Geert and Panebianco, Valeria and Rouviere, Olivier and Logager, Vibeke Berg and Barentsz, Jelle}, + title = {ESUR/ESUI position paper: developing artificial intelligence for precision diagnosis of prostate cancer using magnetic resonance imaging.}, + doi = {10.1007/s00330-021-08021-6}, + abstract = {Artificial intelligence developments are essential to the successful deployment of community-wide, MRI-driven prostate cancer diagnosis. AI systems should ensure that the main benefits of biopsy avoidance are delivered while maintaining consistent high specificities, at a range of disease prevalences. Since all current artificial intelligence / computer-aided detection systems for prostate cancer detection are experimental, multiple developmental efforts are still needed to bring the vision to fruition. Initial work needs to focus on developing systems as diagnostic supporting aids so their results can be integrated into the radiologists' workflow including gland and target outlining tasks for fusion biopsies. Developing AI systems as clinical decision-making tools will require greater efforts. The latter encompass larger multicentric, multivendor datasets where the different needs of patients stratified by diagnostic settings, disease prevalence, patient preference, and clinical setting are considered. AI-based, robust, standard operating procedures will increase the confidence of patients and payers, thus enabling the wider adoption of the MRI-directed approach for prostate cancer diagnosis. KEY POINTS: * AI systems need to ensure that the benefits of biopsy avoidance are delivered with consistent high specificities, at a range of disease prevalence. * Initial work has focused on developing systems as diagnostic supporting aids for outlining tasks, so they can be integrated into the radiologists' workflow to support MRI-directed biopsies. * Decision support tools require a larger body of work including multicentric, multivendor studies where the clinical needs, disease prevalence, patient preferences, and clinical setting are additionally defined.}, + journal = ER, + month = may, + pmid = {33991226}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/245173}, + ss_id = {b7839ad75c2d6ee26bd37e47accf1cc4389e297f}, + all_ss_ids = {['b7839ad75c2d6ee26bd37e47accf1cc4389e297f']}, + gscites = {29}, +} + +@article{Peri21, + author = {Perik, T. H. and van Genugten, E. A. J. and Aarntzen, E. H. J. G. and Smit, E. J. and Huisman, H. J. and Hermans, J. J.}, + title = {Quantitative CT perfusion imaging in patients with pancreatic cancer: a systematic review}, + doi = {10.1007/s00261-021-03190-w}, + abstract = {Pancreatic ductal adenocarcinoma (PDAC) is the third leading cause of cancer-related death with a 5-year survival rate of 10%. Quantitative CT perfusion (CTP) can provide additional diagnostic information compared to the limited accuracy of the current standard, contrast-enhanced CT (CECT). This systematic review evaluates CTP for diagnosis, grading, and treatment assessment of PDAC. The secondary goal is to provide an overview of scan protocols and perfusion models used for CTP in PDAC. The search strategy combined synonyms for 'CTP' and 'PDAC.' Pubmed, Embase, and Web of Science were systematically searched from January 2000 to December 2020 for studies using CTP to evaluate PDAC. The risk of bias was assessed using QUADAS-2. 607 abstracts were screened, of which 29 were selected for full-text eligibility. 21 studies were included in the final analysis with a total of 760 patients. All studies comparing PDAC with non-tumorous parenchyma found significant CTP-based differences in blood flow (BF) and blood volume (BV). Two studies found significant differences between pathological grades. Two other studies showed that BF could predict neoadjuvant treatment response. A wide variety in kinetic models and acquisition protocol was found among included studies. Quantitative CTP shows a potential benefit in PDAC diagnosis and can serve as a tool for pathological grading and treatment assessment; however, clinical evidence is still limited. To improve clinical use, standardized acquisition and reconstruction parameters are necessary for interchangeability of the perfusion parameters.}, + file = {:pdf/Peri21.pdf:PDF}, + journal = {Abdominal Radiology}, + year = {2021}, + ss_id = {90b8115c01d3e9da20b6a5ac11139dc9d9946127}, + all_ss_ids = {['90b8115c01d3e9da20b6a5ac11139dc9d9946127']}, + gscites = {7}, +} + +@book{Pete20, + author = {Jens Petersen and Raul San Jose Estepar and Alexander Schmidt-Richberg and Sarah Gerard and Bianca Lassen-Schmidt and Colin Jacobs and Reinhard Beichel and Kensaku Mori}, + title = {Thoracic Image Analysis}, + year = {2020}, + volume = {12502}, + series = LNCS, + publisher = {Springer}, + doi = {10.1007/978-3-030-62469-9}, + url = {https://www.springer.com/gp/book/9783030624682}, + abstract = {This book constitutes the proceedings of the Second International Workshop on Thoracic Image Analysis, TIA 2020, held in Lima, Peru, in October 2020. Due to COVID-19 pandemic the conference was held virtually. COVID-19 infection has brought a lot of attention to lung imaging and the role of CT imaging in the diagnostic workflow of COVID-19 suspects is an important topic. The 14 full papers presented deal with all aspects of image analysis of thoracic data, including: image acquisition and reconstruction, segmentation, registration, quantification, visualization, validation, population-based modeling, biophysical modeling (computational anatomy), deep learning, image analysis in small animals, outcome-based research and novel infectious disease applications.}, + file = {Pete20.pdf:pdf/Pete20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {2a7dafe1287670068300ff77401923f7e151b9f4}, + gscites = {0}, +} + +@article{Pfaf22, + author = {Pfaffenrot, Viktor and Koopmans, Peter J.}, + title = {Magnetization transfer weighted laminar fMRI with multi-echo FLASH}, + doi = {10.1016/j.neuroimage.2022.119725}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.neuroimage.2022.119725}, + file = {Pfaf22.pdf:pdf\Pfaf22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {NeuroImage}, + citation-count = {1}, + automatic = {yes}, + pages = {119725}, + volume = {264}, +} + +@article{Pfob22, + author = {Pfob, Andr\'{e} and Sidey-Gibbons, Chris and Barr, Richard G. and Duda, Volker and Alwafai, Zaher and Balleyguier, Corinne and Clevert, Dirk-Andr\'{e} and Fastner, Sarah and Gomez, Christina and Goncalo, Manuela and Gruber, Ines and Hahn, Markus and Hennigs, Andr\'{e} and Kapetas, Panagiotis and Lu, Sheng-Chieh and Nees, Juliane and Ohlinger, Ralf and Riedel, Fabian and Rutten, Matthieu and Schaefgen, Benedikt and Schuessler, Maximilian and Stieber, Anne and Togawa, Riku and Tozaki, Mitsuhiro and Wojcinski, Sebastian and Xu, Cai and Rauch, Geraldine and Heil, Joerg and Golatta, Michael}, + title = {The importance of multi-modal imaging and clinical information for humans and AI-based algorithms to classify breast masses (INSPiRED 003): an international, multicenter analysis}, + doi = {10.1007/s00330-021-08519-z}, + year = {2022}, + abstract = {Abstract Objectives + AI-based algorithms for medical image analysis showed comparable performance to human image readers. However, in practice, diagnoses are made using multiple imaging modalities alongside other data sources. We determined the importance of this multi-modal information and compared the diagnostic performance of routine breast cancer diagnosis to breast ultrasound interpretations by humans or AI-based algorithms. + + Methods + Patients were recruited as part of a multicenter trial (NCT02638935). The trial enrolled 1288 women undergoing routine breast cancer diagnosis (multi-modal imaging, demographic, and clinical information). Three physicians specialized in ultrasound diagnosis performed a second read of all ultrasound images. We used data from 11 of 12 study sites to develop two machine learning (ML) algorithms using unimodal information (ultrasound features generated by the ultrasound experts) to classify breast masses which were validated on the remaining study site. The same ML algorithms were subsequently developed and validated on multi-modal information (clinical and demographic information plus ultrasound features). We assessed performance using area under the curve (AUC). + + Results + Of 1288 breast masses, 368 (28.6%) were histopathologically malignant. In the external validation set (n = 373), the performance of the two unimodal ultrasound ML algorithms (AUC 0.83 and 0.82) was commensurate with performance of the human ultrasound experts (AUC 0.82 to 0.84; p for all comparisons > 0.05). The multi-modal ultrasound ML algorithms performed significantly better (AUC 0.90 and 0.89) but were statistically inferior to routine breast cancer diagnosis (AUC 0.95, p for all comparisons <= 0.05). + + Conclusions + The performance of humans and AI-based algorithms improves with multi-modal information. + + Key Points + * The performance of humans and AI-based algorithms improves with multi-modal information. + * Multimodal AI-based algorithms do not necessarily outperform expert humans. + * Unimodal AI-based algorithms do not represent optimal performance to classify breast masses. + }, + url = {http://dx.doi.org/10.1007/s00330-021-08519-z}, + file = {Pfob22.pdf:pdf\Pfob22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + citation-count = {7}, + automatic = {yes}, + pages = {4101-4115}, + volume = {32}, +} + +@article{Pfob22, + author = {Pfob, Andr\'{e} and Sidey-Gibbons, Chris and Barr, Richard G. and Duda, Volker and Alwafai, Zaher and Balleyguier, Corinne and Clevert, Dirk-Andr\'{e} and Fastner, Sarah and Gomez, Christina and Goncalo, Manuela and Gruber, Ines and Hahn, Markus and Hennigs, Andr\'{e} and Kapetas, Panagiotis and Lu, Sheng-Chieh and Nees, Juliane and Ohlinger, Ralf and Riedel, Fabian and Rutten, Matthieu and Schaefgen, Benedikt and Schuessler, Maximilian and Stieber, Anne and Togawa, Riku and Tozaki, Mitsuhiro and Wojcinski, Sebastian and Xu, Cai and Rauch, Geraldine and Heil, Joerg and Golatta, Michael}, + title = {The importance of multi-modal imaging and clinical information for humans and AI-based algorithms to classify breast masses (INSPiRED 003): an international, multicenter analysis}, + doi = {10.1007/s00330-021-08519-z}, + year = {2022}, + abstract = {Abstract Objectives + AI-based algorithms for medical image analysis showed comparable performance to human image readers. However, in practice, diagnoses are made using multiple imaging modalities alongside other data sources. We determined the importance of this multi-modal information and compared the diagnostic performance of routine breast cancer diagnosis to breast ultrasound interpretations by humans or AI-based algorithms. + + Methods + Patients were recruited as part of a multicenter trial (NCT02638935). The trial enrolled 1288 women undergoing routine breast cancer diagnosis (multi-modal imaging, demographic, and clinical information). Three physicians specialized in ultrasound diagnosis performed a second read of all ultrasound images. We used data from 11 of 12 study sites to develop two machine learning (ML) algorithms using unimodal information (ultrasound features generated by the ultrasound experts) to classify breast masses which were validated on the remaining study site. The same ML algorithms were subsequently developed and validated on multi-modal information (clinical and demographic information plus ultrasound features). We assessed performance using area under the curve (AUC). + + Results + Of 1288 breast masses, 368 (28.6%) were histopathologically malignant. In the external validation set (n = 373), the performance of the two unimodal ultrasound ML algorithms (AUC 0.83 and 0.82) was commensurate with performance of the human ultrasound experts (AUC 0.82 to 0.84; p for all comparisons > 0.05). The multi-modal ultrasound ML algorithms performed significantly better (AUC 0.90 and 0.89) but were statistically inferior to routine breast cancer diagnosis (AUC 0.95, p for all comparisons <= 0.05). + + Conclusions + The performance of humans and AI-based algorithms improves with multi-modal information. + + Key Points + * The performance of humans and AI-based algorithms improves with multi-modal information. + * Multimodal AI-based algorithms do not necessarily outperform expert humans. + * Unimodal AI-based algorithms do not represent optimal performance to classify breast masses. + }, + url = {http://dx.doi.org/10.1007/s00330-021-08519-z}, + file = {Pfob22.pdf:pdf\Pfob22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + citation-count = {7}, + automatic = {yes}, + pages = {4101-4115}, + volume = {32}, +} + +@inproceedings{Phil13, + author = {Philipsen, R.H.H.M. and Maduskar, P. and Hogeweg, L. and van Ginneken, B.}, + title = {Normalization of Chest Radiographs}, + booktitle = MI, + year = {2013}, + volume = {8670}, + series = SPIE, + pages = {86700G}, + doi = {10.1117/12.2008238}, + abstract = {The clinical use of computer-aided diagnosis (CAD) systems is increasing. A possible limitation of CAD systems is that they are typically trained on data from a small number of sources and as a result, they may not perform optimally on data from different sources. In particular for chest radiographs, it is known that acquisition settings, detector technology, proprietary post-processing and, in the case of analog images, digitization, can all influence the appearance and statistical properties of the image. In this work we investigate if a simple energy normalization procedure is sufficient to increase the robustness of CAD in chest radiography. We evaluate the performance of a supervised lung segmentation algorithm, trained with data from one type of machine, on twenty images each from five different sources. The results, expressed in terms of Jaccard index, increase from 0.530 A,A+- 0.290 to 0.914 A,A+- 0.041 when energy normalization is omitted or applied, respectively. We conclude that energy normalization is an effective way to make the performance of lung segmentation satisfactory on data from different sources.}, + file = {Phil13.pdf:pdf\\Phil13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {7051742070683067485}, + gscites = {10}, + ss_id = {7841c4cf59be69cbba55ff31f302d0e1d6323bff}, + all_ss_ids = {['7841c4cf59be69cbba55ff31f302d0e1d6323bff']}, +} + +@conference{Phil14a, + author = {Philipsen, R.H.H.M. and Maduskar, P. and Hogeweg, L. and Melendez, J. and S\'{a}nchez, C. I. and van Ginneken, B.}, + title = {Robust Computer-Aided Detection of Tuberculosis in Chest Radiographs Using Energy Normalization}, + booktitle = RSNA, + year = {2014}, + abstract = {PURPOSE The performance of computer-aided detection (CAD) algorithms for chest radiography can be influenced by variations in image data coming from different sources. Acquisition settings, detector technology and proprietary post-processing all influence the appearance of radiographs. We developed an algorithm to standardize the appearance of chest radiographs (CXRs) in order to remove these variations prior to image analysis and evaluated its utility for a CAD system aimed at tuberculosis (TB) detection. METHOD AND MATERIALS Three data sets of 200 digital CXRs were used: 100 normal / 100 abnormal cases from an Odelca DR system acquired in Zambia; 100 normal / 100 abnormal cases from a digital Atomed mobile X-ray system acquired in The Gambia; 127 normal / 73 abnormal cases from a Philips Digital Diagnost system acquired in the United Kingdom. Reference standard for suspicion of TB was set by an expert reader. To standardize the appearance of CXRs, the image is decomposed into frequency bands using hierarchical unsharp masking. In a training set the average energy (standard deviation) of each frequency band in the central part of the image is determined. Each energy band is scaled to this reference energy, and the input image is reconstructed from the scaled frequency bands. Subsequently the lung fields and mediastinum are segmented via pixel classification and the energy normalization is repeated for region containing the union of lung fields and mediastinum. Cases were processed by a CAD system (CAD4TB v3.07, Diagnostic Image Analysis Group, Nijmegen, The Netherlands) with and without applying the energy normalization method. This CAD system was trained with cases from an Odelca DR system. Performance was measured as area under the ROC curve (Az). Pairwise comparisons were made with bootstrap estimation, considering p<0.05 significant. RESULTS Without normalization, CAD4TB obtained an Az of 0.80, 0.61 and 0.47 for the data from Zambia, The Gambia, and the United Kingdom, respectively. With normalization, Az increased to 0.87, 0.80 and 0.84. Differences for the data from The Gambia and the United Kingdom were significant. CONCLUSION The robustness of CAD for detection of signs of TB on CXRs is improved by standardizing the radiographs prior to analysis. CLINICAL RELEVANCE/APPLICATION An automated reading system for CXRs that can be used reliably on data from any digital unit has great potential in TB screening and active case finding.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Phil15, + author = {Philipsen, Rick and Maduskar, Pragnya and Hogeweg, Laurens and Melendez, Jaime and S\'{a}nchez, Clara I. and van Ginneken, Bram}, + title = {Localized energy-based normalization of medical images: application to chest radiography}, + journal = TMI, + year = {2015}, + volume = {34}, + pages = {1965-75}, + doi = {10.1109/TMI.2015.2418031}, + abstract = {Automated quantitative analysis systems for medical images often lack the capability to successfully process images from multiple sources. Normalization of such images prior to further analysis is a possible solution to this limitation. This work presents a general method to normalize medical images and thoroughly investigates its effectiveness for chest radiography (CXR). The method starts with an energy decomposition of the image in different bands. Next, each band's localized energy is scaled to a reference value and the image is reconstructed. We investigate iterative and local application of this technique. The normalization is applied iteratively to the lung fields on six datasets from different sources, each comprising 50 normal CXRs and 50 abnormal CXRs. The method is evaluated in three supervised computer-aided detection tasks related to CXR analysis and compared to two reference normalization methods. In the first task, automatic lung segmentation, the average Jaccard overlap significantly increased from 0:720:30 and 0:870:11 for both reference methods to 0:89 0:09 (p < 0:01) with normalization. The second experiment was aimed at segmentation of the clavicles. The reference methods had an average Jaccard index of 0:570:26 and 0:530:26; with normalization this significantly increased to 0:68 0:23 (p < 0:01). The third experiment was detection of tuberculosis related abnormalities in the lung fields. The average area under the Receiver Operating Curve increased significantly from 0:720:14 and 0:790:06 using the reference methods to 0:85 0:05 (p < 0:01) with normalization. We conclude that the normalization can be successfully applied in chest radiography and makes supervised systems more generally applicable to data from different sources.}, + file = {Phil15.pdf:pdf\\Phil15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {9}, + pmid = {25838517}, + month = {9}, + gsid = {15559638283935030636}, + gscites = {35}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/153295}, + ss_id = {31be3cce5c1e9ae715c5fa2d97d1c0ea5b23223a}, + all_ss_ids = {['31be3cce5c1e9ae715c5fa2d97d1c0ea5b23223a']}, +} + +@conference{Phil15a, + author = {Rick H.H.M. Philipsen and Clara I. S\'{a}nchez and P. Maduskar and J. Melendez and B. van Ginneken and W.J. Lew}, + title = {Objective Computerized Chest Radiography Screening to Detect Tuberculosis in the Philippines}, + booktitle = {Union World Conference on Lung Health}, + year = {2015}, + abstract = {Background In the Palawan provincial areas of The Philippines, screening for pulmonary tuberculosis (TB) is performed in a WHO project by symptom screening combined with chest radiography; if at least one of these is positive, subjects undergo molecular Xpert testing. However, interpreting chest radiographs (CXRs) requires skilled personnel whose availability is limited and training is expensive and logistically challenging. Computerized reading is a potential solution: CXRs are objectively interpreted by software for the presence of TB related abnormalities. Operational costs are minimal and a result is available within one minute. We retrospectively engaged computerized reading in the DetecTB (Diagnostic Enhanced Tools for Extra Cases of TB) project, an intensified case finding program targeting prison settings and high-risk communities on Palawan Island in the Philippines. Methods A retrospective study was carried out on all cases in the DetecTB program available mid 2014. We compared the performance of CAD4TB v4.10, a commercially available software package, against the performance of the physicianAC/a,!a,,C/s readings. CAD4TB scores a posterior-anterior CXR for TB related abnormalities on a scale from 0 (normal) to 100 (abnormal). We performed Receiver Operator Characteristic (ROC) curve analysis to compare the computerized and human reading and visually inspected CXRs marked as normal by the physician but as highly suspicious by the software. Results In total, 12,256 radiographs were available for analysis. The number of cases positively read by the physicians was 3,068 (25.0%). 379 subjects (3.1%) tested positive on Xpert. Defining these cases as positive, and the remaining cases as negative, the area under the ROC curve (Az) achieved by CAD4TB v4.10 was 0.911 [CI: 0.895-0.926]. The software achieved 90.0% sensitivity at 80.0% specificity. The physician had a sensitivity of 97.6% and a specificity of 93.1%, which is slightly better than the software. However, this reading is biased as cases that were considered normal by the physician were considered negative by definition in our study design. An independent human observer considered 78 out of the 100 cases read as normal by the physician with the highest CAD4TB scores to be abnormal. Conclusion Computerized reading provides high sensitivity and specificity and may be used to assist or possibly replace a human reader in active case finding programs, and thus improve screening throughput.}, + optnote = {DIAG}, +} + +@article{Phil15b, + author = {R.H.H.M. Philipsen and C. I. S\'{a}nchez and P. Maduskar and J. Melendez and L. Peters-Bax and J.G. Peter and R. Dawson and G. Theron and K. Dheda and B. van Ginneken}, + title = {Automated chest-radiography as a triage for {X}pert testing in resource-constrained settings: a prospective study of diagnostic accuracy and costs}, + journal = NATSCIREP, + year = {2015}, + volume = {5}, + doi = {10.1038/srep12215}, + abstract = {Molecular tests hold great potential for tuberculosis (TB) diagnosis, but are costly, time consuming, and HIV-infected patients are often sputum scarce. Therefore, alternative approaches are needed. We evaluated automated digital chest radiography (ACR) as a rapid and cheap pre-screen test prior to Xpert MTB/RIF (Xpert). 388 suspected TB subjects underwent chest radiography, Xpert and sputum culture testing. Radiographs were analysed by computer software (CAD4TB) and specialist readers, and abnormality scores were allocated. A triage algorithm was simulated in which subjects with a score above a threshold underwent Xpert. We computed sensitivity, specificity, cost per screened subject (CSS), cost per notified TB case (CNTBC) and throughput for different diagnostic thresholds. 18.3% of subjects had culture positive TB. For Xpert alone, sensitivity was 78.9%, specificity 98.1%, CSS $13.09 and CNTBC $90.70. In a pre-screening setting where 40% of subjects would undergo Xpert, CSS decreased to $6.72 and CNTBC to $54.34, with eight TB cases missed and throughput increased from 45 to 113 patients/day. Specialists, on average, read 57% of radiographs as abnormal, reducing CSS ($8.95) and CNTBC ($64.84). ACR pre-screening could substantially reduce costs, and increase daily throughput with few TB cases missed. These data inform public health policy in resource-constrained settings.}, + file = {Phil15b.pdf:pdf\\Phil15b.pdf:PDF}, + optnote = {DIAG}, + pmid = {26212560}, + month = {7}, + gsid = {6367627082351532761}, + gscites = {61}, + ss_id = {1e265722259ad5880226c71ac082a6c8d6f11d84}, + all_ss_ids = {['1e265722259ad5880226c71ac082a6c8d6f11d84']}, +} + +@phdthesis{Phil19, + author = {Rick Philipsen}, + title = {Automated chest radiography reading. Improvements, validation, and cost-effectiveness analysis}, + url = {https://repository.ubn.ru.nl/handle/2066/200677}, + abstract = {Tuberculosis (TB) is one of the top 10 leading causes of death worldwide, now ranking above HIV/AIDS. Despite the fact that with timely diagnosis and correct treatment most people who develop TB can be cured, a staggering 1.7 million were estimated to have died from the disease in 2016, and a total of 10.7 million people fell ill. The World Health Organization's "End TB strategy? was initiated in 2016 to end the global TB epidemic with targets of 80% TB incidence decrease and 90% decrease in TB deaths by 2030. In order to reach these targets, early detection needs to be improved and new screening tools and diagnostic algorithms are needed. One of the tools for the detection of TB that has gained momentum in resource constrained countries in recent years is digital chest radiography. After the initial investment in the equipment, radiography is fast and cheap, it has high sensitivity and it can have a large throughput. However, as a direct consequence of the increased usage of chest X-ray, the lack of well trained personnel to read chest radiographs (CXRs) has become even more apparent. In this thesis, we have focused on improving various aspects of the computer-aided detection for TB (CAD4TB) software, and we have validated and analyzed the effectiveness of the software in various settings.}, + copromotor = {C. S\'{a}nchez Guttierez}, + file = {:pdf/Phil19.pdf:PDF;:png/publications/Phil19.png:PNG image}, + optnote = {DIAG}, + promotor = {B. van Ginneken}, + school = {Radboud University, Nijmegen}, + year = {2019}, + journal = {PhD thesis}, +} + +@article{Phil19a, + author = {Philipsen, R H H M and S\'{a}nchez, C I and Melendez, J and Lew, W J and van Ginneken, B}, + title = {Automated chest X-ray reading for tuberculosis in the Philippines to improve case detection: a cohort study}, + doi = {10.5588/ijtld.18.0004}, + issue = {7}, + pages = {805--810}, + url = {https://www.ingentaconnect.com/contentone/iuatld/ijtld/2019/00000023/00000007/art00006}, + volume = {23}, + abstract = {DetecTB (Diagnostic Enhanced Tools for Extra Cases of TB), an intensified tuberculosis (TB) case-finding programme targeting prisons and high-risk communities was implemented on Palawan Island, the Philippines. To evaluate the performance of TB detection based on computerised chest radiography (CXR) readings. Data from 14 094 subjects were analysed from September 2012 to June 2014. All CXRs were read by a physician and by software. Individuals with TB symptoms or CXR abnormalities according to the physician underwent Xpert MTB/RIF testing, the remaining persons were considered TB-negative (screening reference). A subset of 200 CXRs was read by an independent human reader (radiological reference). This reader also re-read a subset of the most abnormal cases as identified using the software but read as normal by the physician (discordant cases). A total of 10 755 individuals were included in the analysis, 2534 of whom had a positively assessed CXR; 298 cases were Xpert-positive. Using the screening reference, the area under the receiver operating characteristic curve for software readings was 0.93 (95%CI 0.92-0.94), with a sensitivity of 0.98 (95%CI 0.97-0.99) and a specificity of 0.69 (95%CI 0.40-0.98). Based on the radiological reference, the physician performed slightly worse than the software (sensitivity, 0.82, 95%CI 0.74-0.89 and specificity, 0.87, 95%CI 0.81-0.96 vs. sensitivity, 0.83, 95%CI 0.71-0.93 and specificity, 0.87, 95%CI 0.75-0.95), although this was not statistically significant. Of the 291 discordant cases, 70% were assessed as positive, resulting in a 22% increase in TB detection when extrapolated to the full cohort. The performance of automated CXR reading is comparable to that of the attending physicians in DetecTB, and its use as a second reader could increase TB detection.}, + file = {Phil19a.pdf:pdf\\Phil19a.pdf:PDF}, + journal = IJTLD, + month = {7}, + optnote = {DIAG}, + pmid = {31439111}, + year = {2019}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/207373}, + ss_id = {b5a66ad408861f79a1e6606da4ed23f772a7eb02}, + all_ss_ids = {['b5a66ad408861f79a1e6606da4ed23f772a7eb02']}, + gscites = {10}, +} + +@mastersthesis{Phil23, + author = {Philipp, Lena}, + title = {Body Composition Assessment in 3D CT Images}, + abstract = {Body composition as a diagnostic and prognostic biomarker is gaining importance in various medical fields such as oncology. Therefore, accurate quantification methods are necessary, such as analyzing CT images. While several studies introduced deep learning approaches to automatically segment a single slice, quantifying body composition in 3D remains understudied due to the high required annotation effort. This thesis explores the use of annotation-efficient strategies in developing a body composition segmentation model for the abdomen and pelvis. To address this, a fine-tuning strategy was proposed to optimize the annotation process and extend the model's generalization performance trained with L3 slices to the entire abdomen and pelvis. Moreover, a self-supervised pre-training using a contrastive loss was employed to leverage unlabeled data. The goal was to efficiently use the annotated data in developing the segmentation model. The results showed a significant acceleration of the annotation process. However, the pre-training added only limited benefits. The final model achieved excellent results with dice scores of SM: 0.96, VAT: 0.93, and SAT: 0.97. The insights gained from this study are useful to improve annotation procedures, and the developed body composition segmentation model can be used for further evaluation.}, + file = {Phil23.pdf:pdf\\Phil23.pdf:PDF}, + journal = {Master thesis}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2023}, +} + +@conference{Pinc18, + author = {Pinckaers, Hans and Litjens, Geert}, + title = {Training convolutional neural networks with megapixel images}, + booktitle = MIDL, + url = {https://arxiv.org/abs/1804.05712}, + abstract = {To train deep convolutional neural networks, the input data and the intermediate activations need to be kept in memory to calculate the gradient descent step. Given the limited memory available in the current generation accelerator cards, this limits the maximum dimensions of the input data. We demonstrate a method to train convolutional neural networks holding only parts of the image in memory while giving equivalent results. We quantitatively compare this new way of training convolutional neural networks with conventional training. In addition, as a proof of concept, we train a convolutional neural network with 64 megapixel images, which requires 97% less memory than the conventional approach.}, + file = {Pinc18.pdf:PDF\\Pinc18.pdf:PDF}, + optnote = {DIAG}, + year = {2018}, + all_ss_ids = {ab07173f4e352d07f48eddd67136e8e33573aecf}, + gscites = {12}, +} + +@inproceedings{Pinc19, + author = {Pinckaers, Hans and Bulten, Wouter and Litjens, Geert}, + title = {High resolution whole prostate biopsy classification using streaming stochastic gradient descent}, + booktitle = MI, + year = {2019}, + series = SPIE, + doi = {10.1117/12.2512817}, + abstract = {Prostate cancer is the most common cancer for men in Western countries, counting 1.1 million new diagnoses every year. The incidence is expected to increase further, due to the growing elderly population. This is leading to a significantly increased workload for pathologists. The burden of this time-consuming and repetitive workload has the potential to be decreased by computational pathology, e.g., by automatically screening prostate biopsies. The current state-of-the-art in many computational pathology tasks use patch-based convolutional neural networks. Developing such algorithms require detailed annotations of the task-specific classes on whole-slide images, which are challenging to create due to low availability of the pathologists. Therefore, it would be beneficial to be able to train using labels the pathologist already provides for regular clinical practice in the form of a report. However, these reports correspond to whole-slide images which are of such a high resolution that current accelerator cards cannot process them at once due to memory constraints. We developed a method, streaming stochastic gradient descent, to train a convolutional neural network end-to-end with entire high resolution images and slide-level labels extracted from pathology reports. Here we trained a neural network on 2812 whole prostate biopsies, at a input size of 8000x8000 pixels, equivalent to 50x total magnification, for a binary classification, cancerous or benign. We achieved an accuracy of 84%. These results show that we may not need expensive annotations to train classification networks in this domain.}, + file = {:pdf/Pinc19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {3}, + gsid = {6317852457997732629}, + gscites = {3}, + ss_id = {c14d3cf3f27b22bb8e6a3be573095173e7a917f1}, + all_ss_ids = {['c14d3cf3f27b22bb8e6a3be573095173e7a917f1']}, +} + +@article{Pinc20, + author = {Pinckaers, Hans and van Ginneken, Bram and Litjens, Geert}, + title = {Streaming convolutional neural networks for end-to-end learning with multi-megapixel images}, + doi = {10.1109/TPAMI.2020.3019563}, + url = {https://arxiv.org/abs/1911.04432}, + abstract = {Due to memory constraints on current hardware, most convolution neural networks (CNN) are trained on sub-megapixel images. For example, most popular datasets in computer vision contain images much less than a megapixel in size (0.09MP for ImageNet and 0.001MP for CIFAR-10). In some domains such as medical imaging, multi-megapixel images are needed to identify the presence of disease accurately. We propose a novel method to directly train CNNs using any input image size end-to-end. This method exploits the locality of most operations in modern CNNs by performing the forward and backward pass on smaller tiles of the image. In this work, we show a proof of concept using images of up to 66-megapixels (8192x8192), saving approximately 50GB of memory per image. Using two public challenge datasets, we demonstrate that CNNs can learn to extract relevant information from these large images and benefit from increasing resolution. We improved the area under the receiver-operating characteristic curve from 0.580 (4MP) to 0.706 (66MP) for metastasis detection in breast cancer (CAMELYON17). We also obtained a Spearman correlation metric approaching state-of-the-art performance on the TUPAC16 dataset, from 0.485 (1MP) to 0.570 (16MP). The code to reproduce a subset of the experiments is available at https://github.com/DIAGNijmegen/StreamingCNN.}, + file = {Pinc20.pdf:pdf\\Pinc20.pdf:PDF}, + journal = TPAMI, + month = aug, + optnote = {DIAG, INPRESS}, + pmid = {32845835}, + year = {2020}, + ss_id = {499846b295c4a0926f18bcc484c0db00bfbf1300}, + all_ss_ids = {['499846b295c4a0926f18bcc484c0db00bfbf1300']}, + gscites = {59}, +} + +@article{Pinc21, + author = {Pinckaers, Hans and Bulten, Wouter and Van der Laak, Jeroen and Litjens, Geert}, + title = {Detection of prostate cancer in whole-slide images through end-to-end training with image-level labels.}, + doi = {10.1109/TMI.2021.3066295}, + abstract = {Prostate cancer is the most prevalent cancer among men in Western countries, with 1.1 million new diagnoses every year. The gold standard for the diagnosis of prostate cancer is a pathologists' evaluation of prostate tissue. To potentially assist pathologists deep-learning-based cancer detection systems have been developed. Many of the state-of-the-art models are patch-based convolutional neural networks, as the use of entire scanned slides is hampered by memory limitations on accelerator cards. Patch-based systems typically require detailed, pixel-level annotations for effective training. However, such annotations are seldom readily available, in contrast to the clinical reports of pathologists, which contain slide-level labels. As such, developing algorithms which do not require manual pixel-wise annotations, but can learn using only the clinical report would be a significant advancement for the field. In this paper, we propose to use a streaming implementation of convolutional layers, to train a modern CNN (ResNet-34) with 21 million parameters end-to-end on 4712 prostate biopsies. The method enables the use of entire biopsy images at high-resolution directly by reducing the GPU memory requirements by 2.4 TB. We show that modern CNNs, trained using our streaming approach, can extract meaningful features from high-resolution images without additional heuristics, reaching similar performance as state-of-the-art patch-based and multiple-instance learning methods. By circumventing the need for manual annotations, this approach can function as a blueprint for other tasks in histopathological diagnosis. The source code to reproduce the streaming models is available at https://github.com/DIAGNijmegen/pathology-streaming-pipeline.}, + file = {:pdf/Pinc21.pdf:PDF}, + journal = TMI, + month = mar, + pmid = {33729928}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/235135}, + ss_id = {05fc7e2ebc4b7b0efc9bc79d4c5119c1a4d8ae38}, + all_ss_ids = {['05fc7e2ebc4b7b0efc9bc79d4c5119c1a4d8ae38']}, + gscites = {41}, +} + +@article{Pinc22, + author = {Pinckaers, Hans and van Ipenburg, Jolique and Melamed, Jonathan and De Marzo, Angelo and Platz, Elizabeth A. and van Ginneken, Bram and van der Laak, Jeroen and Litjens, Geert}, + title = {Predicting biochemical recurrence of prostate cancer with artificial intelligence}, + doi = {10.1038/s43856-022-00126-3}, + pages = {64}, + volume = {2}, + abstract = {Background: The first sign of metastatic prostate cancer after radical prostatectomy is rising PSA levels in the blood, termed biochemical recurrence. The prediction of recurrence relies mainly on the morphological assessment of prostate cancer using the Gleason grading system. However, in this system, within-grade morphological patterns and subtle histopathological features are currently omitted, leaving a significant amount of prognostic potential unexplored. + Methods: To discover additional prognostic information using artificial intelligence, we trained a deep learning system to predict biochemical recurrence from tissue in H\&E-stained microarray cores directly. We developed a morphological biomarker using convolutional neural networks leveraging a nested case-control study of 685 patients and validated on an independent cohort of 204 patients. We use concept-based explainability methods to interpret the learned tissue patterns. + Results: The biomarker provides a strong correlation with biochemical recurrence in two sets (n = 182 and n = 204) from separate institutions. Concept-based explanations provided tissue patterns interpretable by pathologists. + Conclusions: These results show that the model finds predictive power in the tissue beyond the morphological ISUP grading.}, + file = {:pdf/Pinc22.pdf:PDF}, + journal = COMMMED, + pmid = {35693032}, + year = {2022}, + ss_id = {7201377df1b1bcfa9f4f57d797f561842e848c9f}, + all_ss_ids = {['7201377df1b1bcfa9f4f57d797f561842e848c9f']}, + gscites = {7}, +} + +@article{Pist21, + author = {Pistenmaa, Carrie L. and Nardelli, P. and Ash, S.Y. and Come, C.E. and Diaz, A.A. and Rahaghi, F.N. and Barr, R.G. and Young, K.A. and Kinney, G.L. and Simmons, J.P. and Wade, R.C. and Wells, J.M. and Hokanson, J.E. and Washko, G.R. and San Jos\'{e} Est\'{e}par, R. and Crapo, James D. and Silverman, Edwin K. and Make, Barry J. and Regan, Elizabeth A. and Beaty, Terri H. and Castaldi, Peter J. and Cho, Michael H. and DeMeo, Dawn L. and El Boueiz, Adel and Foreman, Marilyn G. and Ghosh, Auyon and Hayden, Lystra P. and Hersh, Craig P. and Hetmanski, Jacqueline and Hobbs, Brian D. and Hokanson, John E. and Kim, Wonji and Laird, Nan and Lange, Christoph and Lutz, Sharon M. and McDonald, Merry-Lynn and Prokopenko, Dmitry and Moll, Matthew and Morrow, Jarrett and Qiao, Dandi and Regan, Elizabeth A. and Saferali, Aabida and Sakornsakolpat, Phuwanat and Silverman, Edwin K. and Wan, Emily S. and Yun, Jeong and Centeno, Juan Pablo and Charbonnier, Jean-Paul and Coxson, Harvey O. and Galban, Craig J. and Han, MeiLan K. and Hoffman, Eric A. and Humphries, Stephen and Jacobson, Francine L. and Judy, Philip F. and Kazerooni, Ella A. and Kluiber, Alex and Lynch, David A. and Nardelli, Pietro and Newell, John D. and Notary, Aleena and Oh, Andrea and Regan, Elizabeth A. and Ross, James C. and San Jose Estepar, Raul and Schroeder, Joyce and Sieren, Jered and Stoel, Berend C. and Tschirren, Juerg and Van Beek, Edwin and Ginneken, Bramvan and van Rikxoort, Eva and Sanchez- Ferrero, Gonzalo Vegas and Veitel, Lucas and Washko, George R. and Wilson, Carla G. and Jensen, Robert and Everett, Douglas and Crooks, Jim and Pratte, Katherine and Strand, Matt and Wilson, Carla G. and Hokanson, John E. and Austin, Erin and Kinney, Gregory and Lutz, Sharon M. and Young, Kendra A. and Bhatt, Surya P. and Bon, Jessica and Diaz, Alejandro A. and Han, MeiLan K. and Make, Barry and Murray, Susan and Regan, Elizabeth and Soler, Xavier and Wilson, Carla G. and Bowler, Russell P. and Kechris, Katerina and Banaei-Kashani, Farnoush and Curtis, Jeffrey L. and Pernicano, Perry G. and Hanania, Nicola and Atik, Mustafa and Boriek, Aladin and Guntupalli, Kalpatha and Guy, Elizabeth and Parulekar, Amit and DeMeo, Dawn L. and Hersh, Craig and Jacobson, Francine L. and Washko, George and Barr, R. Graham and Austin, John and D'Souza, Belinda and Thomashow, Byron and MacIntyre, Neil and McAdams, H. Page and Washington, Lacey and McEvoy, Charlene and Tashjian, Joseph and Wise, Robert and Brown, Robert and Hansel, Nadia N. and Horton, Karen and Lambert, Allison and Putcha, Nirupama and Casaburi, Richard and Adami, Alessandra and Budoff, Matthew and Fischer, Hans and Porszasz, Janos and Rossiter, Harry and Stringer, William and Sharafkhaneh, Amir and Lan, Charlie and Wendt, Christine and Bell, Brian and Kunisaki, Ken M. and Flenaugh, Eric L. and Gebrekristos, Hirut and Ponce, Mario and Terpenning, Silanath and Westney, Gloria and Bowler, Russell and Lynch, David A. and Rosiello, Richard and Pace, David and Criner, Gerard and Ciccolella, David and Cordova, Francis and Dass, Chandra and D'Alonzo, Gilbert and Desai, Parag and Jacobs, Michael and Kelsen, Steven and Kim, Victor and Mamary, A. James and Marchetti, Nathaniel and Satti, Aditi and Shenoy, Kartik and Steiner, Robert M. and Swift, Alex and Swift, Irene and Vega-Sanchez, Maria Elena and Dransfield, Mark and Bailey, William and Bhatt, Surya P. and Iyer, Anand and Nath, Hrudaya and Wells, J. Michael and Conrad, Douglas and Soler, Xavier and Yen, Andrew and Comellas, Alejandro P. and Hoth, Karin F. and Newell, John and Thompson, Brad and Han, MeiLan K. and Kazerooni, Ella and Labaki, Wassim and Galban, Craig and Vummidi, Dharshan and Billings, Joanne and Begnaud, Abbie and Allen, Tadashi and Sciurba, Frank and Bon, Jessica and Chandra, Divay and Weissfeld, Joel}, + title = {Pulmonary Arterial Pruning and Longitudinal Change in Percent Emphysema and Lung Function}, + doi = {10.1016/j.chest.2021.01.084}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.chest.2021.01.084}, + file = {Pist21.pdf:pdf\Pist21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Chest}, + citation-count = {13}, + automatic = {yes}, + pages = {470-480}, + volume = {160}, +} + +@phdthesis{Plat07, + author = {B. Platel}, + title = {Exploring the {D}eep {S}tructure of {I}mages}, + year = {2007}, + url = {http://repository.tue.nl/617003}, + abstract = {This thesis will focus mainly on techniques for automatic image matching. The results from the study are applicable in image analysis applications such as wide baseline matching, texture recognition, object retrieval, robot localization, video data mining, building panorama?s object class recognition and object location and pose retrieval. Over the years many different methods have been developed for these tasks and a number of these methods function well in specific areas of image analysis. However a trend in these methods is that they depend more and more on ad hoc parameters and thresholds. These parameters are often not representing a physical entity and it is therefore difficult to assign meaningful values to them. The approach often used to ?solve? this problem is extensive training that exhaustively tries all possible parameters within a certain range and simply choses the parameter set yielding the best results for the training set of images. This results in algorithms that work well for the image modality for which they were trained, but as soon as the input images differ too much from the training set, these algorithms break down. The goal of the research described in this thesis is to find a general method for image matching, regardless of the type of input images. Our method has to have a reasonable physical motivation and should have as few parameters and thresholds as possible.}, + copromotor = {L. J. M. Florack}, + file = {Plat07.pdf:pdf/Plat07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. M. ter Haar Romeny}, + school = {Eindhoven University of Technology}, + gsid = {5423685156093683129}, + gscites = {8}, + journal = {PhD thesis}, +} + +@inproceedings{Plat11, + author = {Platel, B. and Huisman, H. and Laue, H and Mus, R. and Mann, R. and Hahn, H. and Karssemeijer, N}, + title = {Computerized Characterization of Breast Lesions using Dual-Temporal Resolution Dynamic Contrast-Enhanced {MR} Images}, + booktitle = {{MICCAI} {W}orkshop: {B}reast {I}mage {A}nalysis}, + year = {2011}, + abstract = {To determine the benefits of a dual-temporal resolution dynamic contrast-enhanced MR protocol for the differentiation of mass-like breast lesions we studied the added value of features derived from this protocol in a computer aided diagnosis system. We developed a CADx system based on an SVM classifier to differentiate benign and malignant breast lesions. A cohort of 93 patients with 133 masses was included in the study. We obtained two different dynamic contrast-enhanced MR image series per study; one with high spatial and one with high temporal resolution. Six morphological descriptors were determined from an automatic segmentation of the lesion. Eight kinetic curve features were derived from the high spatial resolution data and six pharmacokinetic parameters were determined from the high temporal resolution data. Our CADx system demonstrated a performance, measured by the area under the ROC curve, of $A_z=0.85$ for the commonly used combination of kinetic curve features and morphology. A significant improvement in performance is shown by adding the pharmacokinetic parameters; $A_z=0.88, p=0.03$.}, + file = {Plat11.pdf:pdf/Plat11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Plat11a, + author = {Bram Platel and Henkjan Huisman and Hendrik Laue and Ritse Mann and Horst Hahn and Nico Karssemeijer and Roel Mus}, + title = {Computerized Characterization of Breast Masses Using Dual-Temporal Resolution Dynamic Contrast-enhanced {MR} Images}, + booktitle = RSNA, + year = {2011}, + abstract = {PURPOSE: Common computerized differentiation algorithms for breast masses use morphological and kinetic curve features from high spatial resolution DCEMR images. We assess the performance change when these features are combined with pharmacokinetic features derived from a specialized dual-temporal resolution MR protocol. METHOD AND MATERIALS: All patients with enhancing masses from a consecutive set of breast MR studies, acquired between 2008 and 2010 on a Sonata or Symphony 1.5T Siemens scanner, were included. This resulted in a cohort of 93 patients, with 66 benign and 67 malignant lesions (validated by biopsy). Our scanning protocol combined high temporal with high spatial resolution imaging. High temporal resolution (HT) images were acquired every 4.5s using a 3D TurboFLASH sequence (TR 7.8ms, TE 4ms, FA 20A-A?A 1/2 , FOV 320mm) during initial enhancement. Additional 4.5s scans were made every 110s to observe late behavior of the contrast agent. High spatial resolution (HS) images were acquired every 110s using a 3D FLASH sequence (TR 72ms, TE 1.54ms, FA 20A-A?A 1/2 , FOV 320mm). Each lesion was automatically segmented using 'smart opening'; a combination of thresholds, region growing and morphological operations. Every lesion was characterized by a set of 6 morphological and 8 kinetic features derived from HS and 3 pharmacokinetic features derived from HT. An SVM classifier with a radial basis function kernel was used. Ten-fold cross-validation was executed 10 times. Performance was measured by the area under the ROC curve, Az. RESULTS: The accuracy of the combined HS and HT kinetic features was significantly better (Az=0.81, p<0.02) than either the HS or HT kinetic features alone (Az=0.77 and 0.71 respectively). The commonly used combination of morphological and kinetic features from HS showed a diagnostic performance of Az=0.85. The addition of pharmacokinetic features from HT increased this performance significantly (Az=0.88, p=0.03). CONCLUSION: Pharmacokinetic features obtained from a high temporal resolution DCEMR sequence provide additional information over conventional morphological and kinetic features from a high spatial resolution sequence. Combining these features leads to significantly better performance for computerized differentiation of breast masses. CLINICAL RELEVANCE/APPLICATION: Dual temporal resolution breast MR can improve diagnostic accuracy over conventional single resolution analysis, which can increase specificity.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Plat12, + author = {B. Platel and T. Welte and R. Mus and R. Mann and C. I. S\'{a}nchez and H. Hahn and N. Karssemeijer}, + title = {Automated Evaluation of an Ultrafast {MR} Imaging Protocol for the Characterization of Breast Lesions}, + booktitle = RSNA, + year = {2012}, + file = {Plat12.pdf:pdf/Plat12.pdf:PDF}, + optnote = {DIAG}, +} + +@article{Plat14, + author = {Platel, B. and Mus, R. and Welte, T. and Karssemeijer, N. and Mann, R.}, + title = {Automated Characterization of Breast Lesions Imaged with an Ultrafast {DCE}-{MR} Protocol}, + journal = TMI, + year = {2014}, + pages = {225-232}, + doi = {10.1109/TMI.2013.2281984}, + abstract = {Dynamic contrast-enhanced magnetic resonance imaging (DCE-MRI) of the breast has become an invaluable tool in the clinical work-up of patients suspected of having breast carcinoma. The purpose of this study is to introduce novel features extracted from the kinetics of contrast agent uptake imaged by a short (100~s) view-sharing MRI protocol, and to investigate how these features measure up to commonly used features for regular DCE-MRI of the breast. Performance is measured with a computer aided diagnosis (CADx) system aimed at distinguishing benign from malignant lesions. A bi-temporal breast MRI protocol was used. This protocol produces five regular, high spatial-resolution T1-weighted acquisitions interleaved with a series of 20 ultrafast view-sharing acquisitions during contrast agent uptake. We measure and compare the performances of morphological and kinetic features derived from both the regular DCE-MRI sequence and the ultrafast view-sharing sequence with four different classifiers. The classification performance of kinetics derived from the short (100~s) ultrafast acquisition starting with contrast agent administration, is significantly higher than the performance of kinetics derived from a much lengthier (510~s), commonly used 3D gradient echo acquisition. When combined with morphology information all classifiers show a higher performance for the ultrafast acquisition (two out of four results are significantly better}, + file = {Plat14.pdf:pdf/Plat14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {24058020}, + month = {2}, + gsid = {15091674449478966998}, + gscites = {60}, + ss_id = {30562d0cc6cfc7526d678103b46d02e446af2df8}, + all_ss_ids = {['30562d0cc6cfc7526d678103b46d02e446af2df8']}, +} + +@article{Poko15, + author = {Pokorny, Morgan and Van de Ven, Wendy and Barentsz, Jelle and Thompson, Leslie}, + title = {Reply to Yaalini Shanmugabavan, Stephanie Guillaumier and Hashim U. Ahmed's Letter to the Editor re: Morgan R. Pokorny, Maarten de Rooij, Earl Duncan, et al. Prospective Study of Diagnostic Accuracy Comparing Prostate Cancer Detection by Transrectal Ultrasound-Guided Biopsy Versus Magnetic Resonance ({MR}) Imaging with Subsequent {MR}-guided Biopsy in Men Without Previous Prostate Biopsies. Eur Urol 2014;66:22-9}, + journal = EU, + year = {2015}, + volume = {67}, + pages = {e54--e55}, + doi = {10.1016/j.eururo.2014.08.066}, + file = {Poko15.pdf:pdf\\Poko15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {25194908}, + month = {3}, +} + +@article{Pola23, + author = {Polack, Meaghan and Smit, Marloes A. and Crobach, Stijn A.L.P. and Terpstra, Valeska and Roodvoets, Annet G.H. and Meershoek-Klein Kranenbarg, Elma M. and Dequeker, Els M.C. and van der Laak, Jeroen A. and Tollenaar, Rob A.E.M. and van Krieken, Han J.H.J.M. and Mesker, Wilma E.}, + title = {Uniform Noting for International application of the Tumor-stroma ratio as Easy Diagnostic tool: The UNITED study - An update}, + doi = {10.1016/j.ejso.2022.11.378}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejso.2022.11.378}, + file = {Pola23.pdf:pdf\Pola23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Surgical Oncology}, + citation-count = {0}, + automatic = {yes}, + pages = {e132-e133}, + volume = {49}, +} + +@article{Pomp15, + author = {Pompe, Esther and van Rikxoort, Eva M. and Schmidt, Michael and R\"uhaak, Jan and Gallardo-Estrella, L. and Vliegenthart, Rozemarijn and Oudkerk, Matthijs and de Koning, Harry J. and van Ginneken, Bram and de Jong, Pim A. and Lammers, Jan-Willem J. and Mohamed Hoesein, Firdaus A A.}, + title = {Parametric response mapping adds value to current computed tomography biomarkers in diagnosing chronic obstructive pulmonary disease}, + journal = AJRCCM, + year = {2015}, + volume = {191}, + pages = {1084--1086}, + doi = {10.1164/rccm.201411-2105LE}, + file = {Pomp15.pdf:pdf\\Pomp15.pdf:PDF}, + optnote = {DIAG}, + number = {9}, + pmid = {25932766}, + month = {5}, + gsid = {2861070141397780388}, + gscites = {27}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/153586}, + ss_id = {fd9e39eec2414dfe630f8fbd82b093ef18f2f6b4}, + all_ss_ids = {['fd9e39eec2414dfe630f8fbd82b093ef18f2f6b4']}, +} + +@conference{Pomp15a, + author = {Pompe, E. and De Jong, P. and Van Rikxoort, E. and Gallardo-Estrella, L. and De Jong, W. and Vliegenthart, R. and Oudkerk, M. and Van der Aalst, C. and Van Ginneken, B. and Lammers, J.-W. and Mohamed Hoesein, F.}, + title = {Bone density is associated with emphysema and air trapping on {CT} in smokers}, + booktitle = ATS, + year = {2015}, + publisher = {European Respiratory Society}, + doi = {10.1183/13993003.congress-2015.PA3754}, + abstract = {Background: Osteoporosis is more common in patients with chronic obstructive pulmonary disease (COPD) and smokers. Bone density derived from computed tomography (CT) can be used to identify patients at risk for a vertebral fracture.Aims and objectives: We investigated whether COPD related measures on computed tomography (CT) scans were associated with lower bone density or vertebral fractures in smokers with and without COPD.Methods: We included participants from the NELSON lung cancer screening trial. Bone density was measured as the Hounsfield Unit (HU) in the first lumbar vertebra and vertebral fractures were assessed semi-quantitatively. The 15th Percentile method (Perc15) was used to assess emphysema and the airway lumen perimeter (Pi10) was used to measure airway wall thickness. The expiratory/inspiratory-ratioMeanLungDensity (E/I-ratioMLD) was used as a measure for air trapping and tracheal index (TI) was used to assess trachea deformity.Results: 1,093 male participants were included. Lower Perc15 and higher E/I-ratioMLD were significantly associated with lower bone density (b = -1.29, p = 0.03 and b = -0.48, p = 0.01, respectively). Pi10 and TI were not associated with bone density changes. 95 (8.7\%) participants had a vertebral fracture. All CT-derived biomarkers were not associated with fracture prevalence.Conclusions: Bone density is lower with increasing extents of emphysema and small airway disease, but is not associated with large airway disease and trachea deformity. This may indicate the necessity to measure bone density early in the disease in COPD patients with emphysema and air trapping to prevent vertebral fractures.}, + optnote = {DIAG, RADIOLOGY}, + month = {9}, + gsid = {6276940639978485861}, + gscites = {1}, +} + +@article{Pomp16, + author = {Pompe, Esther and {de Jong}, Pim A and {van Rikxoort}, Eva M and {Gallardo Estrella}, Leticia and {de Jong}, Werner U and Vliegenthart, Rozemarijn and Oudkerk, Matthijs and {van der Aalst}, Carlijn M and {van Ginneken}, Bram and Lammers, Jan-Willem J and {Mohamed Hoesein}, Firdaus Aa}, + title = {Smokers with emphysema and small airway disease on computed tomography have lower bone density}, + journal = IJCOPD, + year = {2016}, + volume = {11}, + pages = {1207--1216}, + doi = {10.2147/COPD.S103680}, + url = {http://dx.doi.org/10.2147/COPD.S103680}, + abstract = {Osteoporosis is more common in patients with COPD and in smokers. The aim of this study was to assess whether measures of emphysema and airway disease on computed tomography (CT) were associated with lower bone density or vertebral fractures in smokers with and without COPD. For this purpose, we included participants from the NELSON lung cancer screening trial. Bone density was measured as Hounsfield Units in the first lumbar vertebra, and vertebral fractures were assessed semiquantitatively. The 15th percentile method (Perc15) was used to assess emphysema, and the airway lumen perimeter (Pi10) was used for airway wall thickness. Expiratory/inspiratory-ratiomean lung density (E/I-ratioMLD) was used as a measure for air trapping and tracheal index to assess tracheal deformity. Linear regression models and logistic regression models were used to assess associations between CT biomarkers, bone density, and presence of fractures. Exactly 1,093 male participants were eligible for analysis. Lower Perc15 and higher E/I-ratioMLD were significantly associated with lower bone density (b=-1.27, P=0.02 and b=-0.37, P=0.02, respectively). Pi10 and tracheal index were not associated with bone density changes. CT-derived biomarkers were not associated with fracture prevalence. Bone density is lower with increasing extent of emphysema and small airway disease but is not associated with large airway disease and tracheal deformity. This may indicate the necessity to measure bone density early in smokers with emphysema and air trapping to prevent vertebral fractures.}, + file = {Pomp16.pdf:pdf\\Pomp16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27354779}, + month = {6}, + gsid = {2929627178945368618}, + gscites = {14}, + ss_id = {b9c37550bf40f47dd0ce71ac59490144fd7707e8}, + all_ss_ids = {['b9c37550bf40f47dd0ce71ac59490144fd7707e8']}, +} + +@article{Pomp16a, + author = {Pompe, M. and {van Rikxoort}, E.M. and Mets, O.M. and Charbonnier, JP. and Kuhnigk, JM. and {de Koning}, H.J. and Oudkerk, M. and Vliegenthart, R. and Zanen, P. and Lammers, JW.J. and {van Ginneken}, B. and {de Jong}, P.A. and {Mohamed Hoesein}, F.A.A}, + title = {Follow-up of CT-derived airway wall thickness: Correcting for changes in inspiration level improves reliability}, + journal = EJR, + year = {2016}, + volume = {85}, + issue = {11}, + pages = {2008-2013}, + doi = {10.1016/j.ejrad.2016.09.009}, + abstract = {Objectives + Airway wall thickness (AWT) is affected by changes in lung volume. This study evaluated whether correcting AWT on computed tomography (CT) for differences in inspiration level improves measurement agreement, reliability, and power to detect changes over time. + + Methods + Participants of the Dutch-Belgian lung cancer screening trial who underwent 3-month repeat CT for an indeterminate pulmonary nodule were included. AWT on CT was calculated by the square root of the wall area at a theoretical airway with an internal perimeter of 10?mm (Pi10). The scan with the highest lung volume was labelled as the reference scan and the scan with the lowest lung volume was labelled as the comparison scan. Pi10 derived from the comparison scan was corrected by multiplying it with the ratio of CT lung volume of the comparison scan to CT lung volume on the reference scan. Agreement of uncorrected and corrected Pi10 was studied with the Bland-Altman method, reliability with intra-class correlation coefficients (ICC), and power to detect changes over time was calculated. + + Results + 315 male participants were included. Limit of agreement and reliability for Pi10 was ?0.61 to 0.57?mm (ICC?=?0.87), which improved to ?0.38 to 0.37?mm (ICC?=?0.94) after correction for inspiration level. To detect a 15% change over 3 months, 71 subjects are needed for Pi10 and 26 subjects for Pi10 adjusted for inspiration level. + + Conclusions + Correcting Pi10 for differences in inspiration level improves reliability, agreement, and power to detect changes over time.}, + file = {Pomp16a.pdf:pdf\\Pomp16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27776653}, + month = {11}, + gsid = {13729606176859777849}, + gscites = {8}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/173155}, + ss_id = {2de6f1a75703272b9140f19cfe83c0a42527ef72}, + all_ss_ids = {['2de6f1a75703272b9140f19cfe83c0a42527ef72']}, +} + +@article{Pomp17, + author = {Esther Pompe and Pim A. de Jong and David A. Lynch and Nikolas Lessmann and I. I{\v{s}}gum and Bram van Ginneken and J-W.J. Lammers and Mohamed Hoesein, Firdaus A.A.}, + title = {Computed tomographic findings in subjects who died from respiratory disease in the {National Lung Screening Trial}}, + journal = ERJ, + year = {2017}, + volume = {49}, + issue = {4}, + month = {4}, + pages = {1601814}, + doi = {10.1183/13993003.01814-2016}, + abstract = {We evaluated the prevalence of significant lung abnormalities on computed tomography (CT) in patients who died from a respiratory illness other than lung cancer in the National Lung Cancer Screening Trial (NLST). In this retrospective case?control study, NLST participants in the CT arm who died of respiratory illness other than lung cancer were matched for age, sex, pack-years and smoking status to a surviving control. A chest radiologist and a radiology resident blinded to the outcome independently scored baseline CT scans visually and qualitatively for the presence of emphysema, airway wall thickening and fibrotic lung disease. The prevalence of CT abnormalities was compared between cases and controls by using chi-squared tests. In total, 167 participants died from a respiratory cause other than lung cancer. The prevalence of severe emphysema, airway wall thickening and fibrotic lung disease were 28.7\% versus 4.8\%, 26.9\% versus 13.2\% and 18.6\% versus 0.5\% in cases and controls, respectively. Radiological findings were significantly more prevalent in deaths compared with controls (all p<0.001). CT-diagnosed severe emphysema, airway wall thickening and fibrosis were much more common in NLST participants who died from respiratory disease, and CT may provide an additional means of identifying these diseases.}, + file = {Pomp17.pdf:pdf\\Pomp17.pdf:PDF}, + optnote = {DIAG}, + pmid = {28424361}, + gsid = {3304458654067065492}, + gscites = {17}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/174975}, + ss_id = {bdba224a4595a84c0850c4bbc3d13b2582e4f08a}, + all_ss_ids = {['bdba224a4595a84c0850c4bbc3d13b2582e4f08a']}, +} + +@article{Ponc14, + author = {Poncela, A. and Gallardo-Estrella, L.}, + title = {Command-based voice teleoperation of a mobile robot via a human-robot interface}, + journal = Robotica, + year = {2014}, + pages = {1-18}, + doi = {10.1017/S0263574714000010}, + file = {Ponc14.pdf:pdf\\Ponc14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + publisher = {Cambridge University Press}, + month = {1}, +} + +@article{Post13a, + author = {van der Post, Rachel and van der Laak, Jeroen and Sturm, Bart and Clarijs, Ruud and Schaafsma, Ewout and van Krieken, Han and Nap, Marius}, + title = {The evaluation of colon biopsies using virtual microscopy is reliable}, + doi = {10.1111/his.12131}, + pages = {114-121}, + volume = {63}, + abstract = {Aims: Virtual microscopy offers major advantages for pathology practice, separating slide evaluation from slide production. The aim of this study was to investigate the reliability of using whole slide images as compared with routine glass slides for diagnostic purposes. Methods and results: Colon biopsies (n = 295) were assessed using both glass slides and whole slide images by four pathologists and two residents. Two pathologists scored the digital images of biopsies in a primary diagnostic setting. For each case, the consensus diagnosis was de?ned as the majority diagnosis on the study's glass slides. All diagnoses were grouped into seven main diagnostic categories, and further divided into subgroups. The overall concor + dance rates were 89.6% for whole slide images and 91.6% for light microscopy. The concordance rates of the subgroups 'adenoma' and 'adenocarcinoma' between whole slide images and conventional microscopy showed only small variability. The intraobserver (whole slide images versus glass slide) agreement, including subgroups, was substantial, with a mean j-value of 0.78, and was higher than the interobserver agreement for glass slides (interobserver j-value of 0.69). Conclusions: This study shows good diagnostic accuracy and reproducibility for virtual microscopy, indicating that this technology can reliably be used for pathological evaluation of colon biopsies in a primary clinical setting.}, + file = {Post13.pdf:pdf\\post13.pdf:PDF}, + journal = Histopathology, + month = {3}, + optnote = {DIAG}, + year = {2013}, + gsid = {12291140451159326056}, + gscites = {21}, +} + +@inproceedings{Poza06, + author = {J. Poza and G. Vecilla and R. Hornero and M. I. L\'{o}pez and C. I. S\'{a}nchez and A. D\'{i}ez}, + title = {Tele{O}ftal{W}eb 3.0: {W}eb-based ophthalmologic medical record}, + booktitle = {Telemedicine in Future Health}, + year = {2006}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Poza08, + author = {J. Poza and R. Hornero and J. Escudero and A. Fern\'{a}ndez and C. I. S\'{a}nchez}, + title = {Regional analysis of spontaneous {MEG} rhythms in patients with {A}lzheimer's disease using spectral entropies}, + journal = AOBE, + year = {2008}, + volume = {36}, + pages = {141--152}, + doi = {10.1007/s10439-007-9402-y}, + abstract = {{A}lzheimer's disease ({AD}) is the most common form of dementia. {A}geing is the greatest known risk factor for this disorder. {T}herefore, the prevalence of {AD} is expected to increase in western countries due to the rise in life expectancy. {N}owadays, a low diagnosis accuracy is reached, but an early and accurate identification of {AD} should be attempted. {I}n this sense, only a few studies have focused on the magnetoencephalographic ({MEG}) {AD} patterns. {T}his work represents a new effort to explore the ability of three entropies from information theory to discriminate between spontaneous {MEG} rhythms from 20 {AD} patients and 21 controls. {T}he {S}hannon ({SSE}), {T}sallis ({TSE}), and {R}?nyi ({RSE}) spectral entropies were calculated from the time-frequency distribution of the power spectral density ({PSD}). {T}he entropies provided statistically significant lower values for {AD} patients than for controls in all brain regions (p < 0.0005). {T}his fact suggests a significant loss of irregularity in {AD} patients' {MEG} activity. {M}aximal accuracy of 87.8\% was achieved by both the {TSE} and {RSE} (90.0\%, sensitivity; 85.7\%, specificity). {T}he statistically significant results obtained by both the extensive ({SSE} and {RSE}) and non-extensive ({TSE}) spectral entropies suggest that {AD} could disturb long and short-range interactions causing an abnormal brain function.}, + file = {Poza08.pdf:pdf\\Poza08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {17994279}, + month = {11}, + gsid = {17346087654973148941}, + gscites = {56}, +} + +@book{Prok03a, + author = {Prokop, M. and Galanski, M. and Schaefer-Prokop, C.}, + title = {Spiral and multislice computed tomography of the body}, + year = {2003}, + publisher = {Thieme Medical Publisher}, + url = {http://books.google.nl/books?id=K9GbaGpOdGwC}, + optnote = {DIAG}, + gsid = {14376944788777475951,4652244669951073364}, + gscites = {473}, +} + +@article{Prok03b, + author = {Prokop, Mathias and Neitzel, Ulrich and Schaefer-Prokop, Cornelia}, + title = {Principles of image processing in digital chest radiography}, + journal = JTI, + year = {2003}, + volume = {18}, + pages = {148--164}, + abstract = {Image processing has a major impact on image quality and diagnostic performance of digital chest radiographs. Goals of processing are to reduce the dynamic range of the image data to capture the full range of attenuation differences between lungs and mediastinum, to improve the modulation transfer function to optimize spatial resolution, to enhance structural contrast, and to suppress image noise. Image processing comprises look-up table operations and spatial filtering. Look-up table operations allow for automated signal normalization and arbitrary choice of image gradation. The most simple and still widely applied spatial filtering algorithms are based on unsharp masking. Various modifications were introduced for dynamic range reduction and MTF restoration. More elaborate and more effective are multi-scale frequency processing algorithms. They are based on the subdivision of an image in multiple frequency bands according to its structural composition. This allows for a wide range of image manipulations including a size-independent enhancement of low-contrast structures. Principles of the various algorithms will be explained and their impact on image appearance will be illustrated by clinical examples. Optimum and sub-optimum parameter settings are discussed and pitfalls will be explained.}, + file = {Prok03b.pdf:pdf\\Prok03b.pdf:PDF}, + optnote = {CXR, DIAG, RADIOLOGY}, + number = {3}, + pmid = {12867812}, + month = {7}, + gsid = {12656742792899063754}, + gscites = {67}, +} + +@article{Prok20, + author = {Prokop, Mathias and van Everdingen, Wouter and van Rees Vellinga, Tjalco and Quarles van Ufford, Jet and Stoger, Lauran and Beenen, Ludo and Geurts, Bram and Gietema, Hester and Krdzalic, Jasenko and Schaefer-Prokop, Cornelia and van Ginneken, Bram and Brink, Monique and {the COVID-19 Standardized Reporting Working Group of the Dutch Radiological Society}}, + title = {{CO-RADS} - A categorical {CT} assessment scheme for patients with suspected {COVID-19}: definition and evaluation}, + journal = Radiology, + number = {2}, + year = {2020}, + pages = {E97--E104}, + volume = 296, + doi = {10.1148/radiol.2020201473}, + abstract = {Purpose To introduce the COVID-19 Reporting and Data System (CO-RADS) for standardized assessment of pulmonary involvement of COVID-19 on non-enhanced chest CT and report its initial interobserver agreement and performance. Methods The Dutch Radiological Society (NVvR) developed CO-RADS based on other efforts for standardization, such as Lung-RADS or BI-RADS. CO-RADS assesses the suspicion for pulmonary involvement of COVID-19 on a scale from 1 (very low) to 5 (very high). The system is meant to be used in patients presenting with moderate to severe symptoms of COVID-19. The system was evaluated using 105 chest CTs of patients admitted to the hospital with clinical suspicion of COVID-19 in whom RT-PCR was performed (62 +/- 16 years, 61 men, 53 with positive RT-PCR). Eight observers assessed the scans using CO-RADS. Fleiss' kappa was calculated, and scores of individual observers were compared to the median of the remaining seven observers. The resulting area under the receiver operating characteristics curve (AUC) was compared to results from RT-PCR and clinical diagnosis of COVID-19. Results There was absolute agreement among observers in 573 (68.2%) of 840 observations. Fleiss' kappa was 0.47 (95% confidence interval (CI) 0.45-0.47), with the highest kappa for CO-RADS categories 1 (0.58, 95% CI 0.54-0.62) and 5 (0.68, 95% CI 0.65-0.72). The average AUC was 0.91 (95% CI 0.85-0.97) for predicting RT-PCR outcome and 0.95 (95% CI 0.91-0.99) for clinical diagnosis. The false negative rate for CO-RADS 1 was 9/161 (5.6%, 95% CI 1.0-10%), and the false positive rate for CO-RADS 5 was 1/286 (0.3%, 95% CI 0-1.0%). Conclusions CO-RADS is a categorical assessment scheme for pulmonary involvement of COVID-19 on non-enhanced chest CT providing very good performance for predicting COVID-19 in patients with moderate to severe symptoms and has a substantial interobserver agreement, especially for categories 1 and 5.}, + file = {Prok20.pdf:pdf\\Prok20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {32339082}, + gsid = {16652056044070556446}, + gscites = {571}, + ss_id = {12e5057823a452673ae0aa4f8a64d4a88fdeaf27}, + all_ss_ids = {['12e5057823a452673ae0aa4f8a64d4a88fdeaf27']}, +} + +@article{Prok93, + author = {Prokop, M. and Schaefer, C. and Kalender, W. A. and Polacin, A. and Galanski, M.}, + title = {[Vascular imaging with spiral-{CT}. The path to {CT}-angiography]}, + journal = Radiologe, + year = {1993}, + volume = {33}, + pages = {694--704}, + abstract = {Spiral CT is a technique that allows for high-quality two-dimensional angiographic projections and 3D imaging of vascular structures. The authors present the technical and methodological principles of the technique, including scan parameters and parameters of contrast application for various clinical imaging tasks. They present their experience with over 150 clinical cases using spiral CT angiography. Suitable applications of this technique include congenital anomalies, aneurysms, dissections, stenoses, thrombi and vascular tumor involvement. Given a problem-adapted examination technique, pathologic changes in vessels of as little as 2 mm can be visualized. In some cases with complex vascular anatomy, spiral CT angiography can be superior to arterial angiography.}, + optnote = {DIAG}, + number = {12}, + pmid = {8303028}, +} + +@article{Prok93a, + author = {Prokop, M. and Schaefer, C. M. and Oestmann, J. W. and Galanski, M.}, + title = {Improved parameters for unsharp mask filtering of digital chest radiographs}, + journal = Radiology, + year = {1993}, + volume = {187}, + pages = {521--526}, + abstract = {Observer performance with four unsharp mask filtering algorithms for storage phosphor chest radiographs was compared with that with conventional screen-film radiographs in the detection of three types of simulated lung disease: nodules, fine lines, and micronodule clusters. Previously studied parameter sets (small [1.4-mm] and medium [5-mm] filter masks) and two new parameter sets (large [2.5-cm] and ultralarge [7-cm] masks) were compared by means of receiver operating characteristic analysis. With medium and small masks, nodule detection was inferior to that achieved with other modes. Use of ultralarge masks decreased the detection of lines compared with detection with conventional screen-film radiographs. Although detection of micronodule clusters was worse with digital images than with conventional screen-film radiographs, results with large and ultralarge masks were better than with small masks. Overall, filtering with large masks was best suited for simultaneously matching linear, nodular, and micronodular structures. These results suggest that lesion-specific processing of digital chest images is not necessary.}, + optnote = {DIAG}, + number = {2}, + pmid = {8475301}, + month = {5}, + gsid = {9424534228534281862}, + gscites = {61}, +} + +@article{Prok96, + author = {Prokop, M. and Schaefer-Prokop, C. and Galanski, M.}, + title = {[Spiral {CT} of the lung. Technique, findings, value]}, + journal = Radiologe, + year = {1996}, + volume = {36}, + pages = {457--469}, + abstract = {Spiral CT allows for examination of the whole chest within a single breathhold. As compared to standard CT, spiral CT has an increased sensitivity for the detection of pulmonary nodules, of small mediastinal and bronchopulmonary lymph nodes, and of pleural plaques improves characterization of lesion morphology. New diagnostic applications include the detection of very subtle diffuse lung disease and the diagnosis of pulmonary embolism and vascular malformations. For the diagnosis of tracheobronchial pathology, spiral CT is an ideal supplement to bronchoscopy.}, + optnote = {DIAG}, + number = {6}, + pmid = {8767115}, + gsid = {7428159115637114647}, + gscites = {13}, +} + +@article{Prok97, + author = {Prokop, M. and Shin, H. O. and Schanz, A. and Schaefer-Prokop, C. M.}, + title = {Use of maximum intensity projections in {CT} angiography: a basic review}, + journal = Radiographics, + year = {1997}, + volume = {17}, + pages = {433--451}, + abstract = {Maximum intensity projection (MIP) is a simple three-dimensional visualization tool that can be used to display computed tomographic angiography data sets. MIP images are not threshold dependent and preserve attenuation information. Thus, they often yield acceptable results even in cases in which shaded surface display images fail because of threshold problems. MIP is particularly useful for depicting small vessels. Because MIP does not allow for differentiation between foreground and background, MIP images are best suited for displaying relatively simple anatomic situations in which superimposition of structures does not occur (eg, the abdominal aorta). If anatomic structures are superimposed over the vessel of interest, the MIP technique can provide images of diagnostic quality as long as the contrast of the vessel of interest is sufficiently high compared with that of surrounding structures. Editing procedures for MIP are usually used to exclude unwanted structures from the volume of interest and include cutting functions and region-growing algorithms. Artifacts from vessel pulsation and respiratory motion may occur and simulate abnormalities, but, with careful attention, they can be distinguished from real disease. MIP images should always be interpreted together with the original transaxial data set. Knowledge of display properties and artifacts is necessary for correct interpretation of MIP images and helps one create images of optimal quality, choose appropriate examination parameters, and distinguish artifacts from disease.}, + optnote = {DIAG}, + number = {2}, + pmid = {9084083}, + month = {3}, + gsid = {5696633616648746947}, + gscites = {140}, +} + +@article{Prok97a, + author = {Prokop, M. and Schaefer-Prokop, C. and Galanski, M.}, + title = {Spiral {CT} angiography of the abdomen}, + journal = ABDI, + year = {1997}, + volume = {22}, + pages = {143--153}, + optnote = {DIAG}, + number = {2}, + pmid = {9013522}, + month = {2}, + gsid = {14210338552809741791}, + gscites = {48}, +} + +@article{Prok97b, + author = {Prokop, M. and Schaefer-Prokop, C. M.}, + title = {Digital image processing}, + journal = ER, + year = {1997}, + volume = {7}, + pages = {S73--S82}, + abstract = {The image quality of a radiograph is determined by the local contrast, spatial resolution, latitude and the image noise. The goal of digital processing is to improve the visualisation of pathology by optimising these physical parameters. Processing parameters need to be chosen correctly in order to overcome the inverse relationship between contrast and latitude while producing images that retain a conventional appearance. Unsharp mask filtering (UMF) is a simple technique for improving image quality. This technique, however, suffers from serious drawbacks, such as the suppression of pathologic lesions or artifacts that may simulate pathology. Manufacturers have developed different approaches in order to overcome problems and artifacts derived from this technique.}, + optnote = {DIAG}, + number = {11}, + pmid = {9169105}, + month = {2}, + ss_id = {7b442acc3a6bedd75701c15484e7e0c817111122}, + all_ss_ids = {['7b442acc3a6bedd75701c15484e7e0c817111122']}, + gscites = {0}, +} + +@article{Pros12, + author = {Prosch, Helmut and Schaefer-Prokop, Cornelia M. and Eisenhuber, Edith and Kienzl, Daniela and Herold, Christian J.}, + title = {{CT} protocols in interstitial lung diseases-A survey among members of the European Society of Thoracic Imaging and a review of the literature}, + journal = ER, + year = {2013}, + volume = {33}, + pages = {1553-1563}, + doi = {10.1007/s00330-012-2733-6}, + abstract = {PURPOSE: The aim of this study was to survey the current CT protocols used by members of the European Society of Thoracic Imaging (ESTI) to evaluate patients with interstitial lung diseases (ILD). METHODS: A questionnaire was e-mailed to 173 ESTI members. The survey focussed on CT acquisition and reconstruction techniques. In particular, questions referred to the use of discontinuous HRCT or volume CT protocols, the acquisition of additional acquisitions in expiration or in the prone position, and methods of radiation dose reduction and on reconstruction algorithms. RESULTS: The overall response rate was 37 \%. Eighty-five percent of the respondents used either volume CT alone or in combination with discontinuous HRCT. Forty-five percent of the respondents adapt their CT protocols to the patient's weight and/or age. Expiratory CT or CT in the prone position was performed by 58 \% and 59 \% of the respondents, respectively. The number of reconstructed series ranged from two to eight. CONCLUSION: Our survey showed that radiologists with a special interest and experience in chest radiology use a variety of CT protocols for the evaluation of ILD. There is a clear preference for volumetric scans and a strong tendency to use the 3D information. KEY POINTS: AC/a,!AC/ Experienced thoracic radiologists use various CT protocols for evaluating interstitial lung diseases. AC/a,!AC/ Most workers prefer volumetric CT acquisitions, making use of the 3D information AC/a,!AC/ More attention to reducing the radiation dose appears to be needed.}, + file = {Pros12.pdf:pdf\\Pros12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {23239063}, + month = {12}, + ss_id = {629276b035ec77dbc9af62d747d2a923079ec12f}, + all_ss_ids = {['629276b035ec77dbc9af62d747d2a923079ec12f']}, + gscites = {49}, +} + +@article{Pros13, + author = {Prosch, H. and Schaefer-Prokop, C.}, + title = {[Radiological evaluation of incidental pulmonary nodules]}, + journal = Radiologe, + year = {2013}, + volume = {53}, + pages = {623--638}, + doi = {10.1007/s00117-013-2530-5}, + abstract = {Since the widespread use of computed tomography (CT), the detection of pulmonary nodules has considerably increased and has become part of the daily clinical routine. In the evaluation of pulmonary nodules, malignant nodules have to be differentiated from benign pulmonary nodules with a high level of confidence. The diagnostic approach for pulmonary nodules depends on the pretest probability for malignancy. For indeterminate pulmonary nodules <8 mm, non-contrast CT observational follow-up is recommended and depending on the size and pretest probability for malignancy, follow-up CT intervals range from 3 to 12 months. For indeterminate pulmonary nodules >8 mm, management is based on patient surgical risk and pretest probability for malignancy. Either CT follow-up alone, 18-fluorodeoxyglucose-positron emission tomography (FDG-PET) or non-surgical biopsy for tissue diagnosis are utilized to evaluate the lesions. For pulmonary nodules with a high pretest probability for malignancy, surgical resection is recommended unless specifically contraindicated.}, + file = {Pros13.pdf:pdf\\Pros13.pdf:PDF}, + optnote = {DIAG}, + number = {7}, + pmid = {23873184}, + month = {7}, + ss_id = {fcb8b0a49b42ea6834e69133f752db45836ba1f8}, + all_ss_ids = {['fcb8b0a49b42ea6834e69133f752db45836ba1f8']}, + gscites = {0}, +} + +@article{Puig02a, + author = {Puig, Stefan and Schaefer-Prokop, Cornelia and Mang, Thomas and Prokop, Mathias}, + title = {Single- and multi-slice spiral computed tomography of the paediatric kidney}, + journal = EJR, + year = {2002}, + volume = {43}, + pages = {139--145}, + abstract = {Single- and multi-slice computed tomography (CT) is regarded as the primary imaging tool in traumatology, both in adults and children. For complicated infectious disease and renal tumours, these techniques are recommended only as secondary diagnostic tools. Specifically, multi-slice CT (MSCT) provides excellent spatial resolution, which is a particular advantage for the evaluation of small structures as they are typical in children. However, MSCT offers more information than is required for diagnosis. Therefore, low-dose protocols are necessary for paediatric examinations. The CT dose-index (CTDI(vol)) should not exceed 2 mGy for newborns, 4 mGy for toddlers, 5 mGy for elementary school children, and 8 mGy for adolescents.}, + file = {Puig02a.pdf:pdf\\Puig02a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {12127211}, + month = {8}, + gsid = {15315034383501454370}, + gscites = {15}, +} + +@article{Putz19, + author = {Putzky, Patrick and Karkalousos, Dimitrios and Teuwen, Jonas and Moriakov, Nikita and Bakker, Bart and Caan, Matthan and Welling, Max}, + title = {i-RIM applied to the fastMRI challenge}, + journal = {arXiv:1910.08952}, + year = {2019}, + abstract = {We, team AImsterdam, summarize our submission to the fastMRI challenge (Zbontar et al., 2018). Our approach builds on recent advances in invertible learning to infer models as presented in Putzky and Welling (2019). Both, our single-coil and our multi-coil model share the same basic architecture.}, + file = {Putz19.pdf:pdf\\Putz19.pdf:PDF}, + optnote = {DIAG}, + month = {10}, + ss_id = {646755572428ec82a125c19d371b5c853ce3c050}, + all_ss_ids = {['646755572428ec82a125c19d371b5c853ce3c050']}, + gscites = {27}, +} + +@inproceedings{Rabb14, + author = {Rabbani, S. P. and Maduskar, P. and Philipsen, R. H. H. M. and Hogeweg, L. and van Ginneken, B.}, + title = {Effect of image variation on computer aided detection systems}, + booktitle = MI, + year = {2014}, + volume = {9035}, + series = SPIE, + pages = {90352H-1--90352H-8}, + doi = {10.1117/12.2043520}, + abstract = {As the importance of Computer Aided Detection (CAD) systems application is rising in medical imaging field due to the advantages they generate; it is essential to know their weaknesses and try to find a proper solution for them. A common possible practical problem that affects CAD systems performance is: dissimilar training and testing datasets declines the efficiency of CAD systems. In this paper normalizing images is proposed, three different normalization methods are applied on chest radiographs namely (1) Simple normalization (2) Local Normalization (3) Multi Band Local Normalization. The supervised lung segmentation CAD system performance is evaluated on normalized chest radiographs with these three different normalization methods in terms of Jaccard index. As a conclusion the normalization enhances the performance of CAD system and among these three normalization methods Local Normalization and Multi band Local normalization improve performance of CAD system more significantly than the simple normalization}, + file = {Rabb14.pdf:pdf\\Rabb14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {17944010023072660192}, + gscites = {1}, + ss_id = {03582bc12f4bb32136d028e3d09fa07b98b25bb9}, + all_ss_ids = {['03582bc12f4bb32136d028e3d09fa07b98b25bb9']}, +} + +@inproceedings{Rads11, + author = {M. Radstake and M. Velikova and P. Lucas and M. Samulski}, + title = {Critiquing {K}nowledge {R}epresentation in {M}edical {I}mage {I}nterpretation using {S}tructure {L}earning}, + booktitle = {Knowledge Representation for Health-Care (KR4HC)}, + year = {2011}, + volume = {6512}, + series = LNAI, + publisher = {Springer Verlag}, + pages = {56--70}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Ramo12, + author = {J. Ramos and T. Kockelkorn and B. van Ginneken and M. A. Viergever and R. Ramos and A. Campilho}, + title = {Supervised Content Based Image Retrieval Using Radiology Reports}, + booktitle = {Image Analysis and Recognition}, + year = {2012}, + volume = {7325}, + series = LNCS, + pages = {249--258}, + doi = {10.1007/978-3-642-31298-4_30}, + abstract = {Content based image retrieval (CBIR) is employed in medicine to improve radiologistsA-A?A 1/2 diagnostic performance. Today effective medical CBIR systems are limited to specific applications, as to reduce the amount of medical knowledge to model. Although supervised approaches could ease the incorporation of medical expertise, its application is not common due to the scarce number of available user annotations. This paper introduces the application of radiology reports to supervise CBIR systems. The concept is to make use of the textual distances between reports to build a transformation in image space through a manifold learning algorithm. A comparison was made between the presented approach and non-supervised CBIR systems, using a Leave-One- Patient-Out evaluation in a database of computer tomography scans of interstitial lung diseases. Supervised CBIR augmented the mean average precision consistently with an increase between 3 to 8 points, which suggests supervision by radiology reports increases CBIR performance.}, + file = {Ramo12.pdf:pdf\\Ramo12.pdf:PDF}, + optnote = {DIAG}, + gsid = {274380764387987398}, + gscites = {3}, + ss_id = {1df43a13035c094cc8d3eafd058507f4f3e4c57c}, + all_ss_ids = {['1df43a13035c094cc8d3eafd058507f4f3e4c57c']}, +} + +@inproceedings{Ramo13, + author = {J. Ramos and T. Kockelkorn and B. van Ginneken and M. A. Viergever and J. Grutters and R. Ramos and A. Campilho}, + title = {Learning Interstitial Lung Diseases {CT} Patterns from Reports Keywords}, + booktitle = {The Fifth International Workshop on Pulmonary Image Analysis}, + year = {2013}, + pages = {21--32}, + abstract = {The interpretation of CT exams from patients with interstitial lung diseases depends on the correct assessment of associated CT patterns. Computer aided diagnosis systems often study the automatic identification of CT patterns, using the division of the lung in volumes of interest and the use of supervised classification. Despite moderate success, this approach has been hampered by the shortage of medical annotations available to research groups. We propose a new method that collects exams that contain CT patterns through the presence of keywords in radiology reports, to learn pattern models using a multiple instance learning algorithm. We compared our approach to the traditional use of volumes of interest annotations for six interstitial lung diseases patterns. The results show our approach performed comparatively in four of the studied patterns, and poorly for the other two. The results suggest that under certain conditions learning CT patterns from radiology reports is possible, which could foster developments in computer aided diagnosis systems.}, + file = {Ramo13.pdf:pdf\\Ramo13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {10439674960459647694}, + gscites = {2}, + ss_id = {c286748728235f6b06e91e1e0ddb4fb3e2b97412}, + all_ss_ids = {['c286748728235f6b06e91e1e0ddb4fb3e2b97412']}, +} + +@article{Ramo15, + author = {Ramos, Jose and Kockelkorn, Thessa and Ramos, Isabel and Ramos, Rui and van Ginneken, Bram and Viergever, Max and Campilho, Aurelio and Grutters, Jan}, + title = {Content Based Image Retrieval by Metric Learning from Radiology Reports: Application to Interstitial Lung Diseases}, + journal = JBHI, + year = {2015}, + volume = {20}, + number = {1}, + pages = {281--292}, + doi = {10.1109/JBHI.2014.2375491}, + abstract = {Content Based Image Retrieval (CBIR) is a search technology that could aid medical diagnosis by retrieving and presenting earlier reported cases that are related to the one being diagnosed. To retrieve relevant cases, CBIR systems depend on supervised learning to map low level image contents to high level diagnostic concepts. However, the annotation by medical doctors for training and evaluation purposes is a difficult and timeconsuming task, which restricts the supervised learning phase to specific CBIR problems of well defined clinical applications. This paper proposes a new technique that automatically learns the similarity between the several exams from textual distances extracted from radiology reports, thereby successfully reducing the number of annotations needed. Our method firstly infers the relation between patients by using information retrieval techniques to determine the textual distances between patient radiology reports. These distances are subsequently used to supervise a metric learning algorithm, that transforms the image space accordingly to textual distances. CBIR systems with different image descriptions and different levels of medical annotations were evaluated, with and without supervision from textual distances, using a database of computer tomography scans of patients with interstitial lung diseases. The proposed method consistently improves CBIR mean average precision, with improvements that can reach 38\%, and more marked gains for small annotation sets. Given the overall availability of radiology reports in Picture Archiving and Communication Systems, the proposed approach can be broadly applied to CBIR systems in different medical problems, and may facilitate the introduction of CBIR in clinical practice.}, + file = {Ramo15.pdf:pdf\\Ramo15.pdf:PDF}, + optnote = {DIAG}, + pmid = {25438332}, + month = {1}, + gsid = {11929678439974614784}, + gscites = {33}, + ss_id = {2d49c0641c15bb0d0d00c11fbc0155c9942a9ba3}, + all_ss_ids = {['2d49c0641c15bb0d0d00c11fbc0155c9942a9ba3']}, +} + +@inproceedings{Raza15, + author = {Razavi, Mohammad and Wang, Lei and Gubern-M\'{e}rida, Albert and Ivanovska, Tatyana and Laue, Hendrik and Karssemeijer, Nico and Hahn, Horst K}, + title = {Towards Accurate Segmentation of Fibroglandular Tissue in Breast {MRI} Using Fuzzy C-Means and Skin-Folds Removal}, + booktitle = {Image Analysis and ProcessingAC/a,!aEUR?ICIAP 2015}, + year = {2015}, + publisher = {Springer}, + pages = {528--536}, + file = {Raza15.pdf:pdf\\Raza15.pdf:PDF}, + optnote = {DIAG}, + gsid = {8743979472382399251}, + gscites = {11}, + ss_id = {ba17f21a7df9005f18a97e1385b83d90709c8ebe}, + all_ss_ids = {['ba17f21a7df9005f18a97e1385b83d90709c8ebe']}, +} + +@book{Raza16, + author = {Razavi, Mohammad and Wang, Lei and Tan, Tao and Karssemeijer, Nico and Linsen, Lars and Frese, Udo and Hahn, Horst K. and Zachmann, Gabriel}, + title = {Novel Morphological Features for Non-mass-like Breast Lesion Classification on DCE-MRI}, + doi = {10.1007/978-3-319-47157-0_37}, + year = {2016}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-319-47157-0_37}, + file = {Raza16.pdf:pdf\Raza16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Machine Learning in Medical Imaging}, + citation-count = {2}, + automatic = {yes}, + pages = {305-312}, +} + +@article{Reij18, + author = {Reijnen, C. and Kusters-Vandevelde, H. and Abbink, K. and Zusterzeel, P. and van Herwaarden, A. and van der Laak, J. and Massuger, L. and Snijders, M. and Pijnenborg, J. and Bulten, J.}, + title = {Quantification of Leydig cells and stromal hyperplasia in the postmenopausal ovary of women with endometrial carcinoma}, + journal = HUMP, + year = {2018}, + month = {11}, + doi = {10.1016/j.humpath.2018.10.022}, + abstract = {Endometrioid endometrial carcinomas (EECs) are correlated with high serum levels of androgens and estrogen. We hypothesized that Leydig cells and ovarian stromal hyperplasia contribute to postmenopausal ovarian androgen production, and are observed more frequently in EEC patients. Ovaries of postmenopausal women with EEC (n=36) or non-endometrioid endometrial carcinoma (NEEC) (n=19) were examined for the presence of hilar Leydig cells, and compared with ovaries resected for benign conditions (n=22). Leydig cells were counted manually and a Leydig cell density was calculated per mm2 hilar surface. Ovarian stromal hyperplasia was scored as atrophic, moderate hyperplastic, or marked hyperplastic. In all endometrial carcinomas (ECs), these findings were correlated with the serum levels of sex steroids and hormone receptor expression in their ECs. In EEC patients, mean number of Leydig cells was 282.8 cells compared to 76.3 cells in NEEC patients and 66.4 cells in controls. Leydig cells, marked stromal hyperplasia, and combined presence were observed more frequently in EEC patients compared to NEEC and controls. Combined presence was associated with higher serum sex steroid levels and increased tumor expression of estrogen and progesterone receptor. A cut-off value for Leydig cell hyperplasia could be proposed at a total of 300 Leydig cells bilaterally, examining a representative cross-section of both hili. Concluding, we have quantified hilar Leydig cells and demonstrated that Leydig cells may contribute to the development of EEC by increased androgen production in postmenopausal women. The correlation between sex hormone levels and Leydig cell hyperplasia may support endometrial pathology screening in these women.}, + file = {Reij18.pdf:pdf\\Reij18.pdf:PDF}, + optnote = {DIAG}, + pmid = {30428390}, + ss_id = {d646accbc0394ecfa0da14066a825ca1695c8dac}, + all_ss_ids = {['d646accbc0394ecfa0da14066a825ca1695c8dac']}, + gscites = {3}, +} + +@article{Rein06, + author = {J. M. Reinhardt and B. van Ginneken and M. Sonka}, + title = {Special {I}ssue on {P}ulmonary {I}maging}, + journal = TMI, + year = {2006}, + volume = {25}, + pages = {381-384}, + doi = {10.1109/TMI.2006.870840}, + file = {Rein06.pdf:pdf\\Rein06.pdf:PDF}, + gsid = {1034614471213999356}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + month = {4}, + gscites = {1}, +} + +@inproceedings{Rein09a, + author = {Rianne Reinartz and Bram Platel and Toon Boselie and Henk van Mameren and Henk van Santbrink and B. M. ter Haar Romeny}, + title = {Cervical vertebrae tracking in video-fluoroscopy using the normalized gradient field}, + booktitle = MICCAI, + year = {2009}, + volume = {12}, + series = LNCS, + pages = {524--531}, + doi = {10.1007/978-3-642-04268-3_65}, + abstract = {For patients with neck problems valuable functional and diagnostic information can be obtained from a fluoroscopy video of a flexion-extension movement of the cervical spine. In most cases physicians have to manually extract the vertebrae, making the analysis of these video sequences tedious and time consuming. In this paper we propose an automatic fast and precise method for tracking cervical vertebrae. Our method relies only on a rough selection of template areas of each vertebra in a single frame of the video sequence. Compared to existing automated methods, no contours need to be extracted and no vertebra segmentation is required. Tracking is done with a normalized gradient field, using only the gradient orientations as features. Experimental results show that the algorithm is robust and able to track the vertebrae accurately even if they are partially occluded or if a disc prosthesis is present.}, + file = {Rein09a.pdf:pdf/Rein09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {Pt 1}, + pmid = {20426028}, + gsid = {16417647805314870902}, + gscites = {17}, +} + +@inproceedings{Rein21, + author = {Reinke, Annika and Eisenmann, Matthias and Tizabi, Minu Dietlinde and Sudre, Carole H. and Radsch, Tim and Antonelli, Michela and Arbel, Tal and Bakas, Spyridon and Cardoso, Jorge and Cheplygina, Veronika and Farahani, Keyvan and Glocker, Ben and Heckmann-Notzel, Doreen and Isensee, Fabian and Jannin, Pierre and Kahn, Charles and Kleesiek, Jens and Kurc, Tahsin and Kozubek, Michal and Landman, Bennett A. and Litjens, Geert and Maier-Hein, Klaus and Martel, Anne Lousise and Muller, Henning and Petersen, Jens and Reyes, Mauricio and Rieke, Nicola and Stieltjes, Bram and Summers, Ronald M. and Tsaftaris, Sotirios A. and van Ginneken, Bram and Kopp-Schneider, Annette and Jager, Paul and Maier-Hein, Lena}, + booktitle = MIDL, + title = {Common limitations of performance metrics in biomedical image analysis}, + doi = {10.1117/12.2549650}, + abstract = {Diffuse large B-cell lymphoma (DLBCL) is the most common type of B-cell lymphoma. It is characterized by a heterogeneous morphology, genetic changes and clinical behavior. A small specific subgroup of DLBCL, harbouring a MYC gene translocation is associated with worse patient prognosis and outcome. Typically, the MYC translocation is assessed with a molecular test (FISH), that is expensive and time-consuming. Our hypothesis is that genetic changes, such as translocations could be visible as changes in the morphology of an HE-stained specimen. However, it has not proven possible to use morphological criteria for the detection of a MYC translocation in the diagnostic setting due to lack of specificity. + In this paper, we apply a deep learning model to automate detection of the MYC translocations in DLBCL based on HE-stained specimens. The proposed method works at the whole-slide level and was developed based on a multicenter data cohort of 91 patients. All specimens were stained with HE, and the MYC translocation was confirmed using fluorescence in situ hybridization (FISH). The system was evaluated on an additional 66 patients, and obtained AUROC of 0.83 and accuracy of 0.77. The proposed method presents proof of a concept giving insights in the applicability of deep learning methods for detection of a genetic changes in DLBCL. In future work we will evaluate our algorithm for automatic pre-screen of DLBCL specimens to obviate FISH analysis in a large number of patients.}, + file = {:pdf/Rein21.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + year = {2021}, + ss_id = {a9dc7e9f174dbd624d5a8294ca6dc9b671e1ae97}, + all_ss_ids = {['a9dc7e9f174dbd624d5a8294ca6dc9b671e1ae97']}, + gscites = {2}, +} + +@article{Rein21a, + author = {Annika Reinke and Matthias Eisenmann and Minu D. Tizabi and Carole H. Sudre and Tim Radsch and Michela Antonelli and Tal Arbel and Spyridon Bakas and M. Jorge Cardoso and Veronika Cheplygina and Keyvan Farahani and Ben Glocker and Doreen Heckmann-Notzel and Fabian Isensee and Pierre Jannin and Charles E. Kahn and Jens Kleesiek and Tahsin Kurc and Michal Kozubek and Bennett A. Landman and Geert Litjens and Klaus Maier-Hein and Bjoern Menze and Henning Muller and Jens Petersen and Mauricio Reyes and Nicola Rieke and Bram Stieltjes and Ronald M. Summers and Sotirios A. Tsaftaris and Bram van Ginneken and Annette Kopp-Schneider and Paul Jager and Lena Maier-Hein}, + journal = {arXiv preprint arXiv:2104.05642}, + url = {https://arxiv.org/abs/2104.05642}, + title = {Common Limitations of Image Processing Metrics: A Picture Story}, + abstract = {While the importance of automatic image analysis is increasing at an enormous pace, recent meta-research revealed major flaws with respect to algorithm validation. Specifically, performance metrics are key for objective, transparent and comparative performance assessment, but relatively little attention has been given to the practical pitfalls when using specific metrics for a given image analysis task. A common mission of several international initiatives is therefore to provide researchers with guidelines and tools to choose the performance metrics in a problem-aware manner. This dynamically updated document has the purpose to illustrate important limitations of performance metrics commonly applied in the field of image analysis. The current version is based on a Delphi process on metrics conducted by an international consortium of image analysis experts.}, + file = {:21c - Common Limitations of Image Processing Metrics_ a Picture Story.pdf:PDF}, + month = apr, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + year = {2021}, + ss_id = {787ef2981e94e481432cb2a1296903676ec80fa9}, + all_ss_ids = {['787ef2981e94e481432cb2a1296903676ec80fa9']}, + gscites = {94}, +} + +@article{Rein23, + author = {Reinke, Annika and Tizabi, Minu D. and Baumgartner, Michael and Eisenmann, Matthias and Heckmann-Notzel, Doreen and Kavur, A. Emre and Radsch, Tim and Sudre, Carole H. and Acion, Laura and Antonelli, Michela and Arbel, Tal and Bakas, Spyridon and Benis, Arriel and Blaschko, Matthew and Buttner, Florian and Cardoso, M. Jorge and Cheplygina, Veronika and Chen, Jianxu and Christodoulou, Evangelia and Cimini, Beth A. and Collins, Gary S. and Farahani, Keyvan and Ferrer, Luciana and Galdran, Adrian and van Ginneken, Bram and Glocker, Ben and Godau, Patrick and Haase, Robert and Hashimoto, Daniel A. and Hoffman, Michael M. and Huisman, Merel and Isensee, Fabian and Jannin, Pierre and Kahn, Charles E. and Kainmueller, Dagmar and Kainz, Bernhard and Karargyris, Alexandros and Karthikesalingam, Alan and Kenngott, Hannes and Kleesiek, Jens and Kofler, Florian and Kooi, Thijs and Kopp-Schneider, Annette and Kozubek, Michal and Kreshuk, Anna and Kurc, Tahsin and Landman, Bennett A. and Litjens, Geert and Madani, Amin and Maier-Hein, Klaus and Martel, Anne L. and Mattson, Peter and Meijering, Erik and Menze, Bjoern and Moons, Karel G. M. and Muller, Henning and Nichyporuk, Brennan and Nickel, Felix and Petersen, Jens and Rafelski, Susanne M. and Rajpoot, Nasir and Reyes, Mauricio and Riegler, Michael A. and Rieke, Nicola and Saez-Rodriguez, Julio and Sanchez, Clara I. and Shetty, Shravya and van Smeden, Maarten and Summers, Ronald M. and Taha, Abdel A. and Tiulpin, Aleksei and Tsaftaris, Sotirios A. and Van Calster, Ben and Varoquaux, Gael and Wiesenfarth, Manuel and Yaniv, Ziv R. and Jager, Paul F. and Maier-Hein, Lena}, + year = {2023}, + month = {2}, + title = {Understanding metric-related pitfalls in image analysis validation}, + abstract = {Validation metrics are key for the reliable tracking of scientific progress and for bridging the current chasm between artificial intelligence (AI) research and its translation into practice. However, increasing evidence shows that particularly in image analysis, metrics are often chosen inadequately in relation to the underlying research problem. This could be attributed to a lack of accessibility of metric-related knowledge: While taking into account the individual strengths, weaknesses, and limitations of validation metrics is a critical prerequisite to making educated choices, the relevant knowledge is currently scattered and poorly accessible to individual researchers. Based on a multi-stage Delphi process conducted by a multidisciplinary expert consortium as well as extensive community feedback, the present work provides the first reliable and comprehensive common point of access to information on pitfalls related to validation metrics in image analysis. Focusing on biomedical image analysis but with the potential of transfer to other fields, the addressed pitfalls generalize across application domains and are categorized according to a newly created, domain-agnostic taxonomy. To facilitate comprehension, illustrations and specific examples accompany each pitfall. As a structured body of information accessible to researchers of all levels of expertise, this work enhances global comprehension of a key topic in image analysis validation.}, + file = {:pdf/Rein23.pdf:PDF}, + journal = {arXiv:2302.01790}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + ss_id = {1898998f27ce0750a42f8f3ca2ed7e292f05aee2}, + all_ss_ids = {['1898998f27ce0750a42f8f3ca2ed7e292f05aee2']}, + gscites = {8}, +} + +@inproceedings{Reis15, + author = {Reis, Sara and Eiben, Bjoern and Mertzanidou, Thomy and Hipwell, John and Hermsen, Meyke and van der Laak, Jeroen and Pinder, Sarah and Bult, Peter and Hawkes, David}, + title = {Minimum slice spacing required to reconstruct 3D shape for serial sections of breast tissue for comparison with medical imaging}, + doi = {10.1117/12.2081909}, + year = {2015}, + abstract = {There is currently an increasing interest in combining the information obtained from radiology and histology with the intent of gaining a better understanding of how different tumour morphologies can lead to distinctive radiological signs which might predict overall treatment outcome. Relating information at different resolution scales is challenging. Reconstructing 3D volumes from histology images could be the key to interpreting and relating the radiological image signal to tissue microstructure. The goal of this study is to determine the minimum sampling (maximum spacing between histological sections through a fixed surgical specimen) required to create a 3D reconstruction of the specimen to a specific tolerance. We present initial results for one lumpectomy specimen case where 33 consecutive histology slides were acquired.}, + url = {http://dx.doi.org/10.1117/12.2081909}, + file = {Reis15.pdf:pdf\Reis15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Imaging 2015: Digital Pathology}, + citation-count = {2}, + automatic = {yes}, +} + +@article{Rema16, + author = {Romain Remark and Taha Merghoub and Niels Grabe and Geert Litjens and Diane Damotte and Jedd D. Wolchok and Miriam Merad and Sacha Gnjatic}, + title = {In-depth tissue profiling using multiplexed immunohistochemical consecutive staining on single slide}, + journal = {Science Immunology}, + year = {2016}, + volume = {1}, + number = {1}, + month = {6}, + pages = {aaf6925--aaf6925}, + doi = {10.1126/sciimmunol.aaf6925}, + url = {http://dx.doi.org/10.1126/sciimmunol.aaf6925}, + abstract = {Despite remarkable recent achievements of immunotherapy strategies in cancer treatment, clinical responses remain limited to subsets of patients. Predictive markers of disease course and response to immunotherapy are urgently needed. Recent results have revealed the potential predictive value of immune cell phenotype and spatial distribution at the tumor site, prompting the need for multidimensional immunohistochemical analyses of tumor tissues. To address this need, we developed a sample-sparing, highly multiplexed immunohistochemistry technique based on iterative cycles of tagging, image scanning, and destaining of chromogenic substrate on a single slide. This assay, in combination with a newly developed automated digital landscaping solution, democratizes access to high-dimensional immunohistochemical analyses by capturing the complexity of the immunome using routine pathology standards. Applications of the method extend beyond cancer to screen and validate comprehensive panels of tissue-based prognostic and predictive markers, perform indepth in situ monitoring of therapies, and identify targets of disease.}, + file = {Rema16.pdf:pdf\\Rema16.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + pmid = {28783673}, + publisher = {American Association for the Advancement of Science ({AAAS})}, + gsid = {15567873193529139666}, + gscites = {130}, + ss_id = {69cc46b96b0e8b0926de51d7a5bf83e3c29897f9}, + all_ss_ids = {['69cc46b96b0e8b0926de51d7a5bf83e3c29897f9']}, +} + +@inproceedings{Riad13, + author = {Medhat M. Riad and Bram Platel and Frank-Erik de Leeuw and Nico Karssemeijer}, + title = {Detection of white matter lesions in cerebral small vessel disease}, + booktitle = MI, + year = {2013}, + volume = {8670}, + series = SPIE, + doi = {10.1117/12.2007940}, + abstract = {White matter lesions (WML) are diffuse white matter abnormalities commonly found in older subjects and are important indicators of stroke, multiple sclerosis, dementia and other disorders. We present an automated WML detection method and evaluate it on a dataset of small vessel disease (SVD) patients. In early SVD, small WMLs are expected to be of importance for the prediction of disease progression. Commonly used WML segmentation methods tend to ignore small WMLs and are mostly validated on the basis of total lesion load or a Dice coefficient for all detected WMLs. Therefore, in this paper, we present a method that is designed to detect individual lesions, large or small, and we validate the detection performance of our system with FROC (free-response ROC) analysis. For the automated detection, we use supervised classification making use of multimodal voxel based features from different magnetic resonance imaging (MRI) sequences, including intensities, tissue probabilities, voxel locations and distances, neighborhood textures and others. After preprocessing, including co-registration, brain extraction, bias correction, intensity normalization, and nonlinear registration, ventricle segmentation is performed and features are calculated for each brain voxel. A gentle-boost classifier is trained using these features from 50 manually annotated subjects to give each voxel a probability of being a lesion voxel. We perform ROC analysis to illustrate the benefits of using additional features to the commonly used voxel intensities; significantly increasing the area under the curve (Az) from 0.81 to 0.96 (p<0.05). We perform the FROC analysis by testing our classifier on 50 previously unseen subjects and compare the results with manual annotations performed by two experts. Using the first annotator results as our reference, the second annotator performs at a sensitivity of 0.90 with an average of 41 false positives per subject while our automated method reached the same level of sensitivity at approximately 180 false positives per subject.}, + file = {Riad13.pdf:pdf\\Riad13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {8090823468847966154}, + gscites = {4}, + ss_id = {171511e9d4b9a921232d02464304ee443c23f897}, + all_ss_ids = {['171511e9d4b9a921232d02464304ee443c23f897']}, +} + +@article{Ridg15, + author = {Ridge, Carole A. and Yildirim, Afra and Boiselle, Phillip M. and Franquet, Tomas and Schaefer-Prokop, Cornelia M. and Tack, Denis and Gevenois, Pierre Alain and Bankier, Alexander A.}, + title = {Differentiating between Subsolid and Solid Pulmonary Nodules at {CT}: Inter- and Intraobserver Agreement between Experienced Thoracic Radiologists}, + journal = Radiology, + year = {2016}, + pages = {888-96}, + doi = {10.1148/radiol.2015150714}, + abstract = {Purpose To quantify the reproducibility and accuracy of experienced thoracic radiologists in differentiating between subsolid and solid pulmonary nodules at CT. Materials and Methods The institutional review board of Beth Israel Deaconess Medical Center approved this multicenter study. Six thoracic radiologists, with a mean of 21 years of experience in thoracic radiology (range, 17-22 years), selected images of 10 solid and 10 subsolid nodules to create a database of 120 nodules; this selection served as the reference standard. Each radiologist then interpreted 120 randomly ordered nodules in two different sessions that were separated by a minimum of 3 weeks. The radiologists classified whether or not each nodule was subsolid. Inter- and intraobserver agreement was assessed with a AZAo statistic. The number of correct classifications was calculated and correlated with nodule size by using Bland-Altman plots. The relationship between disagreement and nodule morphologic characteristics was analyzed by calculating the intraclass correlation coefficient. Results Interobserver agreement (AZAo) was 0.619 (range, 0.469-0.745; 95\% confidence interval (CI): 0.576, 0.663) and 0.670 (range, 0.440-0.839; 95\% CI: 0.608, 0.733) for interpretation sessions 1 and 2, respectively. Intraobserver agreement (AZAo) was 0.792 (95\% CI: 0.750, 0.833). Averaged for interpretation sessions, correct classification was achieved by all radiologists for 58\% (70 of 120) of nodules. Radiologists agreed with their initial determination (the reference standard) in 77\% of cases (range, 45\%-100\%). Nodule size weakly correlated with correct classification (long axis: Spearman rank correlation coefficient, rs = 0.161 and P = .049; short axis: rs = 0.128 and P = .163). Conclusion The reproducibility and accuracy of thoracic radiologists in classifying whether or not a nodule is subsolid varied in the retrospective study. This inconsistency may affect surveillance recommendations and prognostic determinations. (A,A(c)) RSNA, 2015 Online supplemental material is available for this article.}, + file = {Ridg15.pdf:pdf\\Ridg15.pdf:PDF}, + optnote = {DIAG}, + pmid = {26458208}, + month = {3}, + all_ss_ids = {['75bc9847c96d8f319f82d1b9ba01b5a396247152', 'a6a2fed1ee525c0d2a35ae6454660eca53ce03fc', 'fdf663cc9dda29d55f2e2f44f9876f3b08f854f8']}, + gscites = {69}, +} + +@conference{Riel13, + author = {Sarah J van Riel and Cornelia M Schaefer-Prokop and Eva M van Rikxoort and Bram van Ginneken and Mathias Prokop and Steven Schalekamp and Colin Jacobs and Pim A de Jong and Hester A Gietema and Ernst Th Scholten}, + title = {Impact of section thickness on classification of pulmonary nodules into solid, part-solid, and non-solid: an observer study}, + booktitle = RSNA, + year = {2013}, + abstract = {PURPOSE Recently published recommendations by the Fleischner Society differentiate between solid, part-solid, and non-solid nodules. A section thickness of 1mm is recommended for evaluation. It is, however, common practice to reconstruct thicker (3mm or 5mm) sections to reduce the number of sections to evaluate. Purpose of this study was to evaluate the impact of section thickness on nodule classification agreement. METHOD AND MATERIALS 20 part-solid, 10 non-solid and 10 solid nodular lesions were randomly selected from the {NELSON} screening trial. A reference standard was established using the consensus reading of two experienced chest radiologists. Data had been acquired using a low dose (16x0.75mm, 120-140 kVp, 30 mAs) protocol. Complete {CT}s were shown with axial and coronal projections with either 1mm, 3mm or 5mm section thickness, the latter two with 1mm overlap. Readers could interactively scroll through the scans, use magnification, windowing and manual calibre measurements as warranted. Four readers of varying experience were asked to classify the lesions as solid (1), part-solid (2), or non-solid (3). All readings were done twice in six sessions, in which all permutations of nodules and section thicknesses were presented in different random orders. We report percentage agreement between observers and the consensus reference. All results stated are averaged over all reading sessions. RESULTS Mean agreement rate with the reference standard decreased from 85% (range 78-95%) to 77% (range 68-84%) and 75% (range 68-84%), for 1mm, 3mm, and 5mm section thickness, respectively. Readers were affected differently by increasing section thickness. The most experienced reader was influenced the least (agreement = 84-82-80%). Two readers demonstrated a major decrease in performance already for 3mm (81-72-70% and 91-78-81%). One reader showed a stepwise performance decline (86-77-69%). CONCLUSION Nodule classification is affected by section thickness. The degree of loss of accuracy appears to be reader dependent. CLINICAL RELEVANCE/APPLICATION Nodule classification is impaired by increasing section thickness which may have consequences for patient management. Visual classification therefore requires acquisition and storage of 1mm sections.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Riel13a, + author = {Sarah J van Riel and Eva M van Rikxoort and Colin Jacobs and Steven Schalekamp and Mathias Prokop and Bram van Ginneken and Pim A de Jong and Ernst Th Scholten and Hester A Gietema and Cornelia M Schaefer-Prokop}, + title = {Intra- and inter-reader variability of pulmonary nodule classification according to the Fleischner guidelines: clinical consequences}, + booktitle = RSNA, + year = {2013}, + abstract = {PURPOSE Recently published recommendations by the Fleischner Society differentiate between non-solid and partsolid nodules. For the latter follow up or invasive diagnostic procedures are recommended depending on the size of the solid core. For solid nodules, different recommendations apply. We evaluated inter- and intra-reader variability of nodule classification and the impact on patient management. METHOD AND MATERIALS 20 part-solid, 10 non-solid and 10 solid nodular lesions were randomly selected from the {NELSON} screening trial. Data had been acquired using a low dose (16x0.75mm, 120-140 kVp, 30 mAs) protocol. Complete {CT}s were shown with axial and coronal projections with 1mm section thickness. Readers could interactively scroll through the scans, use magnification, windowing and manual calibre measurements as warranted. Four readers of varying experience were asked to classify the lesions as solid (1), part-solid with a core > 5mm and < 5mm, respectively (2 and 3), or as non-solid (4). All readings were done twice in six sessions, in which all permutations of nodules and section thicknesses were presented in different random orders. Inter- and intra-reader agreement were calculated using CohenAC/a,!a,,C/s kappa statistics. To evaluate possible consequences on patient management, the number of differences between assigned scores of 1 or 2 (invasive diagnosis) and scores of 3 or 4 (follow up) were calculated. All results stated are averaged over all reading sessions. RESULTS Inter-reader agreement was low with mean kappa of 0.33 (range 0.02-0.58). Intra-reader agreement was moderate with mean kappa 0.54 (range 0.31-0.72). Patient management would have differed in 27% caused by interreader disagreement, and would have changed in 8% caused by intrareader variability. 28% of all nodules were uniformly classified over all reading sessions. Of these, 18% were classified as solid and 73% as non-solid. CONCLUSION Inter- and intra-reader agreement are low and moderate for the classification of pulmonary nodules according to Fleischner criteria if pure visual analysis is used. This may affect patient management. CLINICAL RELEVANCE/APPLICATION Variability in nodule classification may have consequences on patient management; use of digital analysis tools appears to be necessary to improve classification.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Riel15, + author = {van Riel, Sarah J. and S\'{a}nchez, Clara I. and Bankier, Alexander A. and Naidich, David P. and Verschakelen, Johnny and Scholten, Ernst T. and de Jong, Pim A. and Jacobs, Colin and van Rikxoort, Eva and Peters-Bax, Liesbeth and Snoeren, Miranda and Prokop, Mathias and van Ginneken, Bram and Schaefer-Prokop, Cornelia}, + title = {Observer Variability for Classification of Pulmonary Nodules on Low-Dose {CT} Images and Its Effect on Nodule Management}, + journal = Radiology, + year = {2015}, + volume = {277}, + number = {3}, + pages = {863-871}, + doi = {10.1148/radiol.2015142700}, + abstract = {Purpose To examine the factors that affect inter- and intraobserver agreement for pulmonary nodule type classification on low-radiation-dose computed tomographic (CT) images, and their potential effect on patient management. Materials and Methods Nodules (n = 160) were randomly selected from the Dutch-Belgian Lung Cancer Screening Trial cohort, with equal numbers of nodule types and similar sizes. Nodules were scored by eight radiologists by using morphologic categories proposed by the Fleischner Society guidelines for management of pulmonary nodules as solid, part solid with a solid component smaller than 5 mm, part solid with a solid component 5 mm or larger, or pure ground glass. Inter- and intraobserver agreement was analyzed by using Cohen AZAo statistics. Multivariate analysis of variance was performed to assess the effect of nodule characteristics and image quality on observer disagreement. Effect on nodule management was estimated by differentiating CT follow-up for ground-glass nodules, solid nodules 8 mm or smaller, and part-solid nodules smaller than 5 mm from immediate diagnostic work-up for solid nodules larger than 8 mm and part-solid nodules 5 mm or greater. Results Pair-wise inter- and intraobserver agreement was moderate (mean AZAo, 0.51 [95\% confidence interval, 0.30, 0.68] and 0.57 [95\% confidence interval, 0.47, 0.71]). Categorization as part-solid nodules and location in the upper lobe significantly reduced observer agreement (P = .012 and P < .001, respectively). By considering all possible reading pairs (28 possible combinations of observer pairs AfaEUR? 160 nodules = 4480 possible agreements or disagreements), a discordant nodule classification was found in 36.4\% (1630 of 4480), related to presence or size of a solid component in 88.7\% (1446 of 1630). Two-thirds of these discrepant readings (1061 of 1630) would have potentially resulted in different nodule management. Conclusion There is moderate inter- and intraobserver agreement for nodule classification by using current recommendations for low-radiation-dose CT examinations of the chest. Discrepancies in nodule categorization were mainly caused by disagreement on the size and presence of a solid component, which may lead to different management in the majority of cases with such discrepancies. (A,A(c)) RSNA, 2015.}, + file = {Riel15.pdf:pdf\\Riel15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26020438}, + month = {12}, + gsid = {13650233394438006732}, + gscites = {150}, + ss_id = {a920a54da22d5e6114588d1ad78fadba534c1ad4}, + all_ss_ids = {['a920a54da22d5e6114588d1ad78fadba534c1ad4']}, +} + +@conference{Riel15b, + author = {S.J. van Riel and K. Chung and E.T. Scholten and P.A. de Jong and C.M. Schaefer-Prokop and B. van Ginneken}, + title = {{ACR} Lung-{RADS} guidelines for pure ground-glass nodules: is a threshold of 20mm adequate?}, + booktitle = {ECR}, + year = {2015}, + abstract = {Purpose: The ACR Lung-RADS for management of pulmonary nodules categorizes pure ground-glass nodules (GGN) into a 1 year (category 2) or 6 months (category 3) follow-up CT scan, using a cutoff value of 20mm as threshold. An analysis of GGNs categorized according to the Lung-RADS is presented. Methods and Materials: All pure GGNs from two sites of the NELSON screening trial were selected. Morphology was determined in the first CT scan they were annotated. Each nodule was considered only once. Diameter measurements were taken from the NELSON database. Nodules were categorized according to the ACR Lung-RADS, using the proposed threshold of 20mm. Changes in categorization were calculated when lowering the threshold to 15 and 10mm. Results: 86 GGNs were included, with 16 malignant GGNs (19%) including 9 invasive adenocarcinomas. For a 20mm threshold, only 2% of GGNs were assigned to Lung-RADS 3. For a 15mm threshold this increased to 16%; for a 10mm threshold to 53%. Of the 16 cancers, only 1 was assigned to Lung-RADS 3 using a 20mm threshold. This increased to 4 and 12 for thresholds of 15 and 10mm, respectively. Conclusion: Using a threshold of 20mm to select pure GGNs that warrant shorter follow-up will miss almost all cancers. A less conservative threshold of 10mm - as has been suggested by the Fleischner Society - will select three-quarters of the cancers at the expense of an overall increased number of followup CTs.}, + optnote = {RADIOLOGY DIAG}, +} + +@conference{Riel15c, + author = {S.J. van Riel and F. Ciompi and M. Winkler Wille and E.Th. Scholten and N. Sverzellati and S. Rossi and A. Dirksen and M. Brink and R. Wittenberg and M. Naqibullah and M. Prokop and C. Schaefer-Prokop and B. van Ginneken}, + title = {Can morphological features differentiate between malignant and benign pulmonary nodules, detected in a screen setting?}, + booktitle = RSNA, + year = {2015}, + abstract = {PURPOSE Existing nodule classification systems and risk models (e.g., McWilliams model, LungRADS) consider only nodule type, size, growth, and the presence of a spiculated border. However, radiologists consider additional morphological features when assigning a malignancy risk. Goal of the study was to determine the power of additional morphological features to differentiate between benign and malignant nodules. METHOD AND MATERIALS All 60 cancers were selected from the Danish Lung Cancer Screening Trial, in thefirst scan where they were visible, and a benign set of 120 randomly selected and 120 sizematched benign nodules from baseline scans were included, all from different participants. Data had been acquired using a lowdose (16x0.75mm, 120 kVp, 40 mAs) protocol, and 1mm section thickness reconstruction. Seven radiologists were asked to score the presence of morphological features for each nodule referring to density distribution (homogeneous, inhomogeneous, high, low), lesion margin (spiculation, lobulation, demarcation by interlobular septa, sharplydefined, illdefined), lesion surrounding (distortion of the surrounding parenchyma, pleural/fissure retraction, attachment to pleura, fissure or vessel) and lesion architecture (thickened wall of a bulla, bubbles, air bronchogram). Separately per observer and feature, chi square analysis was used to determine the power to discriminate between benign and malignant nodules. Features with a pvalue <0.05 in AC/aEURdegAY=4 observers are reported. RESULTS Significant differences were seen for inhomogeneous density distribution (p <0.001 0.003) and pleural/fissure retraction (p < 0.001 0.047) in 7 observers. The presence of bubbles (p <0.001 0.025), spiculation (p <0.001), lobulation (p <0.001), and an illdefined nodule border (p<0.0010.012) were significant in 6 observers. The presence of a thickened bulla wall in 5 observers (p<0.0010.042), and air bronchogram (p<0.0010.006) and distortion of surrounding architecture (p<0.0010.004) was significantly different in 4 observers. CONCLUSION We have identified several morphological features that are significantly associated with malignancy of pulmonary nodules, but not included in current risk prediction models. CLINICAL RELEVANCE/APPLICATION Morphological features can be used to differentiate malignant from benign nodules. Further studies will show whether integration of more morphological features will increase the power of risk prediction.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Riel15d, + author = {S.J. van Riel and F. Ciompi and M. Winkler Wille and E.Th. Scholten and A. Dirksen and K. Chung and M. Prokop and C. Schaefer-Prokop and B. van Ginneken}, + title = {Comparing LungRADS and the McWilliams nodule malignancy score: which approach works best to select screen detected pulmonary nodules for more aggressive followup?}, + booktitle = RSNA, + year = {2015}, + abstract = {PURPOSE In 2014 LungRADS was published to standardize CT lung screening reporting and management, based on nodule type, size, and growth. In 2013 the McWilliams model was published providing a nodule malignancy probability based on nodule size, type, morphology and subject characteristics. Threshold of the McWilliams score provides an alternative over LungRADS categories to determine workup for screendetected nodules. We compare both approaches on an independent data set. METHOD AND MATERIALS All 60 cancers were selected from the Danish Lung Cancer Screening Trial, in the first scan where they were visible, and 120 randomly selected benign nodules from baseline scans were added, all from different participants. Data had been acquired using a lowdose (16x0.75mm, 120kVp, 40mAs) protocol, and 1mm section thickness reconstruction. For each nodule, the malignancy probability was calculated using McWilliams model 2b. Parameters were available from the screening database or scored by an expert radiologist. Completely calcified nodules and perifissural nodules were given a score of 0, in accordance with the McWilliams model. All nodules were categorized into their LungRADS category based on nodule type and diameter. Perifissural nodules were treated as regular solid nodules, inaccordance with LungRADS guidelines. Sensitivity and specificity were calculated, for each LungRADS category cutoff. For each specificity level, corresponding sensitivity of the McWilliams model was determined. RESULTS McWilliams performed substantially better than LungRADS in selecting malignant nodules for more aggressive followup. Defining LungRADS category 2/3/4A/4B and up as a positive screening result, nodule malignancy specificity was 21%/65%/86%/99% and sensitivity was 100%/85%/58%/32%. At the same specificities, McWilliamsAC/a,!a,,C/s sensitivity was higher with 100%/96%/86%/45%. CONCLUSION For every cutoff level in LungRADS, the McWilliams model operating at the same specificity has superior sensitivity to differentiate malignant from benign nodules. CLINICAL RELEVANCE/APPLICATION The McWilliams model seems to be a better tool than LungRADS to provide a malignancy risk and help radiologists determine which subgroup of nodules detected in a screening setting need more invasive workup.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Riel15e, + author = {S.J. van Riel and F. Ciompi and M. Winkler Wille and M. Naqibullah and E.Th. Scholten and C. Schaefer-Prokop and B. van Ginneken}, + title = {Lung-RADS versus the McWilliams nodule malignancy score for risk prediction: an evaluation using lesions from the DLCST Trial}, + booktitle = {World Conference on Lung Cancer}, + year = {2015}, + abstract = {PURPOSE: Lung-RADS published in 2014 by the American College of Radiology is based on results of the literature and expert opinion and uses nodule type, size, and growth to recommend nodule management adjusted to malignancy risk. The McWilliams model published in 2013 is based on baseline screen detected nodules from the Canadian screening trial and provides a nodule malignancy probability based on nodule size, type, morphology and subject characteristics. We compare the performance of both approaches on an independent data set. METHODS: We selected 60 cancers from the Danish Lung Cancer Screening Trial as presented in the first scan they were visible, and randomly added 120 benign nodules from baseline scans, all from different participants. Data had been acquired using a low-dose (16x0.75mm, 120kVp, 40mAs) protocol, and 1mm section thickness reconstruction. For each nodule, the malignancy probability was calculated using McWilliams model 2b. Parameters were available from the screening database or scored by an expert radiologist. For the McWilliams model completely calcified nodules and perifissural nodules were assigned a malignancy probability of 0, in accordance with the McWilliams model. All nodules were categorized into their Lung-RADS category based on nodule type and diameter. Perifissural nodules were treated as solid nodules, in accordance with Lung-RADS guidelines. For each Lung-RADS category cut-off sensitivity and specificity were calculated. Corresponding sensitivities and specificities using the McWilliams model were determined. RESULTS: McWilliams performed superiorly to Lung-RADS in selecting malignant nodules for more aggressive follow-up. Defining Lung-RADS category 2/3/4A/4B and higher as a positive screening result, specificities to exclude lung malignancy were 21%/65%/86%/99% and vice versa sensitivities to predict malignancy were 100%/85%/58%/32%. At the same sensitivity levels, McWilliams model yielded overall higher specificities with 2%/86%/98%/100%, respectively. Similarly, at the same specificities McWilliamsAC/a,!a,,C/s model achieved higher sensitivities with 100%/95%/85%/48%, respectively. CONCLUSION: For every cut-off level of Lung-RADS, the McWilliams model yields superior specificity to reduce unnecessary work-up for benign nodules, and higher sensitivity to predict malignancy. The McWilliams model seems to be a better tool than Lung-RADS to provide a malignancy risk, thus reducing unnecessary work-up and helping radiologists determine which subgroup of nodules detected in a screening setting need more invasive work-up.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Riel17, + author = {van Riel, Sarah J and Ciompi, Francesco and Jacobs, Colin and Winkler Wille, Mathilde M and Scholten, Ernst Th and Naqibullah, Matiullah and Lam, Stephen and Prokop, Mathias and Schaefer-Prokop, Cornelia and van Ginneken, Bram}, + title = {Malignancy risk estimation of screen-detected nodules at baseline {CT}: comparison of the {PanCan} model, {Lung-RADS} and {NCCN} guidelines}, + journal = ER, + year = {2017}, + volume = {27}, + number = {10}, + pages = {4019-4029}, + doi = {10.1007/s00330-017-4767-2}, + abstract = {To compare the PanCan model, Lung-RADS and the 1.2016 National Comprehensive Cancer Network (NCCN) guidelines for discriminating malignant from benign pulmonary nodules on baseline screening CT scans and the impact diameter measurement methods have on performances. From the Danish Lung Cancer Screening Trial database, 64 CTs with malignant nodules and 549 baseline CTs with benign nodules were included. Performance of the systems was evaluated applying the system's original diameter definitions: D(longest-C) (PanCan), D(meanAxial) (NCCN), both obtained from axial sections, and D(mean3D) (Lung-RADS). Subsequently all diameter definitions were applied uniformly to all systems. Areas under the ROC curves (AUC) were used to evaluate risk discrimination. PanCan performed superiorly to Lung-RADS and NCCN (AUC 0.874 vs. 0.813, p?=?0.003; 0.874 vs. 0.836, p?=?0.010), using the original diameter specifications. When uniformly applying D(longest-C), D(mean3D) and D(meanAxial), PanCan remained superior to Lung-RADS (p? 2% these limits were 5.6% and 6.2%, respectively. For T = -910 the mean emphysema score for segments R2, 6, 9 and 10 was below 16.3% (11.2% to 16.2% ), while the mean score for all other segments was above 18.4% (18.5% to 30.3%). In the subgroup with total scores > 10% these limits were 20.0% and 22.1%. CONCLUSION There is not only lobar variability in distribution of emphysema in chronic smokers but also segmental with segment R2, and segments 6, 9 and 10 of both lungs being least affected. CLINICAL RELEVANCE/APPLICATION The results provide new insights about the distribution of smoking-related emphysema.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Rikx08, + author = {E. M. van Rikxoort and I. I{\v{s}}gum and M. Staring and S. Klein and B. van Ginneken}, + title = {Adaptive {L}ocal {M}ulti-{A}tlas {S}egmentation: {A}pplication to {H}eart {S}egmentation in {C}hest {CT} {S}cans}, + booktitle = MI, + year = {2008}, + series = SPIE, + pages = {691407-1--691407-6}, + doi = {10.1117/12.772301}, + abstract = {{S}egmentation of anatomical structures is a prerequisite for many medical image analysis tasks. {W}e propose a method that integrates local voxel classification and global shape models. {T}he method starts by computing a local feature vector for every voxel and mapping this, via a classifier trained from example segmentations, to a probability that the voxel belongs to the structure to be segmented. {N}ext, this probabilistic output is entered into a global shape model. {T}his shape model is constructed by mapping aligned blurred versions of reference segmentations of the training data into a vector space and applying principal component analysis ({PCA}). {T}he mapping onto a vector space that is applied guarantees valid results from the {PCA}. {A}n advantage of using such a shape model is that there is no need to define corresponding landmarks on all training scans, which is a hard task on 3{D} data. {S}egmentation of unseen test data is performed by a least squares fit of the results of the voxel classification, after alignment and blurring, into the {PCA} space. {T}he result of this procedure is for each voxel a probability that it belongs to the structure to be segmented conditioned on both local and global information. {W}e demonstrate the effectiveness of the method on segmentation of lungs containing pathologic abnormalities in 3{D} {CT} data.}, + file = {Rikx08.pdf:pdf\\Rikx08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {403647541700125074}, + gscites = {24}, + ss_id = {024bb3acac8036e8a335b909f91c78391dcd4868}, + all_ss_ids = {['024bb3acac8036e8a335b909f91c78391dcd4868']}, +} + +@article{Rikx08a, + author = {E. M. van Rikxoort and B. van Ginneken and M. Klik and M. Prokop}, + title = {Supervised enhancement filters: application to fissure detection in chest {CT} scans}, + journal = TMI, + year = {2008}, + volume = {27}, + pages = {1--10}, + doi = {10.1109/TMI.2007.900447}, + abstract = {{I}n medical image processing, many filters have been developed to enhance certain structures in 3-{D} data. {I}n this paper, we propose to use pattern recognition techniques to design more optimal filters. {T}he essential difference with previous approaches is that we provide a system with examples of what it should enhance and suppress. {T}his training data is used to construct a classifier that determines the probability that a voxel in an unseen image belongs to the target structure(s). {T}he output of a rich set of basis filters serves as input to the classifier. {I}n a feature selection process, this set is reduced to a compact, efficient subset. {W}e show that the output of the system can be reused to extract new features, using the same filters, that can be processed by a new classifier. {S}uch a multistage approach further improves performance. {W}hile the approach is generally applicable, in this work the focus is on enhancing pulmonary fissures in 3-{D} computed tomography ({CT}) chest scans. {A} supervised fissure enhancement filter is evaluated on two data sets, one of scans with a normal clinical dose and one of ultra-low dose scans. {R}esults are compared with those of a recently proposed conventional fissure enhancement filter. {I}t is demonstrated that both methods are able to enhance fissures, but the supervised approach shows better performance; the areas under the receiver operating characteristic ({ROC}) curve are 0.98 versus 0.90, for the normal dose data and 0.97 versus 0.87 for the ultra low dose data, respectively.}, + file = {Rikx08a.pdf:pdf\\Rikx08a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {18270056}, + gsid = {2240117688941041987}, + gscites = {95}, + ss_id = {728b2b4783db4b9b2fe53835deea9bab95410ae8}, + all_ss_ids = {['728b2b4783db4b9b2fe53835deea9bab95410ae8']}, +} + +@conference{Rikx08b, + author = {E. M. van Rikxoort and B. van Ginneken and J. G. Goldin and M. S. Brown and M. Prokop}, + title = {Automatic {Q}uantification of {C}ompleteness of {P}ulmonary {F}issures on a {L}arge {D}atabase}, + booktitle = RSNA, + year = {2008}, + abstract = {PURPOSE To investigate the variation in incomplete pulmonary fissures on volumetric high-resolution CT scans using a completely automatic method. METHOD AND MATERIALS A set of 1000 volumetric CT scans was randomly selected from the NELSON study, a lung cancer screening trial with low-dose CT (16x 0.75 mm collimation, 30 mAs). The lungs were automatically segmented and the pulmonary fissures were enhanced using a k-nearest neighbor classifier employing Hessian eigenvalues and gray value information. Based on the lung and fissure information, a linear discriminant classifier was trained to assign each voxel to a pulmonary lobe based on its relative position in the lung and its distance from and position relative to the fissures. To determine the completeness of the pulmonary fissures, each point on the enhanced fissures that was above a certain threshold was assigned to the closest point on the lobe border. Voxels on the lobe border not assigned to a fissure voxel were considered to be non-fissure and therefore incomplete. The percentage of incomplete border voxels quantifies the incompleteness of the fissures. Results were calculated for the major fissure in the left lung and the major and minor fissures in the right lung. Based on experimentation four categories were defined for quantification: (almost) complete (0-20% missing), slightly incomplete (20-35%), incomplete (35-90%) and absent (90%-100%). The method was validated by manual assessments of fissural completeness in a subset of 50 cases. RESULTS Visual and automatic categorization of fissural completeness were consistent in 87% of cases for the left and right major fissures and in 82% of cases for the right minor fissure. The left major fissure was complete in 53% of subjects, 23% was slightly incomplete, 23% was incomplete and in 1% was absent. For the right major fissure those numbers were 42%, 34%, 24% and 0% respectively. The right minor fissure was most often incomplete; 8%, 12%, 66% and 14% for the respective categories. CONCLUSION A quantitative measure for fissural completeness can be determined fully automatically. There is substantial variation in fissural completeness with fissures often being incomplete. CLINICAL RELEVANCE/APPLICATION Fissural completeness may be an indicator of collateral ventilation and have significance in endobronchial valve treatment planning for emphysema patients.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Rikx08c, + author = {E. M. van Rikxoort and M. de Bruijne and B. van Ginneken}, + title = {Integrating local voxel classification and global shape models for medical image segmentation}, + booktitle = MI, + year = {2008}, + series = SPIE, + pages = {69144J-1 -- 69144J-6}, + doi = {10.1117/12.773209}, + file = {Rikx08c.pdf:pdf\\Rikx08c.pdf:PDF;Rikx08c.pdf:Rikx08c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {3765223466138193399}, + gscites = {1}, +} + +@article{Rikx09, + author = {E. M. van Rikxoort and B. de Hoop and S. van de Vorst and M. Prokop and B. van Ginneken}, + title = {Automatic segmentation of pulmonary segments from volumetric chest {CT} scans}, + journal = TMI, + year = {2009}, + volume = {28}, + pages = {621--630}, + doi = {10.1109/TMI.2008.2008968}, + abstract = {{A}utomated extraction of pulmonary anatomy provides a foundation for computerized analysis of computed tomography ({CT}) scans of the chest. {A} completely automatic method is presented to segment the lungs, lobes and pulmonary segments from volumetric {CT} chest scans. {T}he method starts with lung segmentation based on region growing and standard image processing techniques. {N}ext, the pulmonary fissures are extracted by a supervised filter. {S}ubsequently the lung lobes are obtained by voxel classification where the position of voxels in the lung and relative to the fissures are used as features. {F}inally, each lobe is subdivided in its pulmonary segments by applying another voxel classification that employs features based on the detected fissures and the relative position of voxels in the lobe. {T}he method was evaluated on 100 low-dose {CT} scans obtained from a lung cancer screening trial and compared to estimates of both interobserver and intraobserver agreement. {T}he method was able to segment the pulmonary segments with high accuracy (77%), comparable to both interobserver and intraobserver accuracy (74% and 80%, respectively).}, + file = {Rikx09.pdf:pdf\\Rikx09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + pmid = {19211346}, + month = {4}, + gsid = {6723869456775488402}, + gscites = {116}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/80377}, + ss_id = {35ed683f966071aa4d9e428ebdc7b245a310f9ea}, + all_ss_ids = {['35ed683f966071aa4d9e428ebdc7b245a310f9ea']}, +} + +@inproceedings{Rikx09a, + author = {E. M. van Rikxoort and M. Prokop and B. de Hoop and M. A. Viergever and J. P. W. Pluim and B. van Ginneken}, + title = {Automatic segmentation of the pulmonary lobes from fissures, airways, and lung borders: evaluation of robustness against missing data}, + booktitle = MICCAI, + year = {2009}, + series = LNCS, + pages = {263--271}, + doi = {10.1007/978-3-642-04268-3_33}, + abstract = {{A}utomatic segmentation of structures with missing or invisible borders is a challenging task. {S}ince structures in the lungs are related, humans use contextual and shape information to infer the position of invisible borders. {A}n example of a task in which the borders are often incomplete or invisible is the segmentation of the pulmonary lobes. {I}n this paper, a fully automatic segmentation of the pulmonary lobes in chest {CT} scans is presented. {T}he method is especially designed to be robust to incomplete fissures by incorporating contextual information from automatic lung, fissure, and bronchial tree segmentations, as well as shape information. {S}ince the method relies on the result of automatic segmentations, it is important that the method is robust against failure of one or more of these segmentation methods. {I}n an extensive experiment on 10 chest {CT} scans with manual segmentations, the robustness of the method to incomplete fissures and missing input segmentations is shown. {I}n a second experiment on 100 chest {CT} scans with incomplete fissures, the method is shown to perform well.}, + file = {Rikx09a.pdf:pdf\\Rikx09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5761}, + pmid = {20425996}, + gsid = {15092302850885680539}, + gscites = {26}, + ss_id = {8a4482179ae334ba0dbd76060c30332a65f9861a}, + all_ss_ids = {['8a4482179ae334ba0dbd76060c30332a65f9861a']}, +} + +@article{Rikx09b, + author = {van Rikxoort, E. M. and de Hoop, B. and Viergever, M. A. and Prokop, M. and van Ginneken, B.}, + title = {Automatic lung segmentation from thoracic computed tomography scans using a hybrid approach with error detection}, + journal = MP, + year = {2009}, + volume = {36}, + pages = {2934-2947}, + doi = {10.1118/1.3147146}, + abstract = {{L}ung segmentation is a prerequisite for automated analysis of chest {CT} scans. {C}onventional lung segmentation methods rely on large attenuation differences between lung parenchyma and surrounding tissue. {T}hese methods fail in scans where dense abnormalities are present, which often occurs in clinical data. {S}ome methods to handle these situations have been proposed, but they are too time consuming or too specialized to be used in clinical practice. {I}n this article, a new hybrid lung segmentation method is presented that automatically detects failures of a conventional algorithm and, when needed, resorts to a more complex algorithm, which is expected to produce better results in abnormal cases. {I}n a large quantitative evaluation on a database of 150 scans from different sources, the hybrid method is shown to perform substantially better than a conventional approach at a relatively low increase in computational cost.}, + file = {Rikx09b.pdf:pdf\\Rikx09b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {7}, + pmid = {19673192}, + month = {6}, + gsid = {14169941223089065550}, + gscites = {254}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/80370}, + ss_id = {a4b778f8e002b34fd337ada732d5257c846495a9}, + all_ss_ids = {['a4b778f8e002b34fd337ada732d5257c846495a9']}, +} + +@inproceedings{Rikx09c, + author = {E.M. van Rikxoort and W. Baggerman and B. van Ginneken}, + title = {Automatic segmentation of the airway tree from thoracic {CT} scans using a multi-threshold approach}, + booktitle = {The Second International Workshop on Pulmonary Image Analysis}, + year = {2009}, + pages = {341-349}, + abstract = {{A} method for automatic extraction of the airway tree from thoracic {CT} scans is presented that uses adaptive thresholds while growing the airways. {T}he method is evaluated on 20 volumetric chest {CT} scans provided by the {E}xtraction of {A}irways from {CT} 2009 ({EXACT}09) challenge. {T}he scans were acquired at different sites, using several different scanners, scanning protocols, and reconstruction parameters. {T}here are scans of clinical dose, low dose, and ultra-low dose data, in inspiration and expiration, from both relatively healthy and severely ill patients. {T}he results show that the method is able to detect a large number of airway branches at the cost of relatively high leakage volume.}, + file = {Rikx09c.pdf:pdf\\Rikx09c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {13063578325072036697}, + gscites = {37}, + ss_id = {31379d21a9c062f56598616eaf4b24b539c3dd8d}, + all_ss_ids = {['31379d21a9c062f56598616eaf4b24b539c3dd8d']}, +} + +@phdthesis{Rikx09d, + author = {E. M. van Rikxoort}, + title = {Segmentation of anatomical structures in chest {CT} scans}, + year = {2009}, + url = {http://igitur-archive.library.uu.nl/dissertations/2009-0923-200149/UUindex.html}, + abstract = {{I}n this thesis, methods are described for the automatic segmentation of anatomical structures from chest {CT} scans. {F}irst, a method to segment the lungs from chest {CT} scans is presented. {S}tandard lung segmentation algorithms rely on large attenuation differences between the lungs and the surrounding tissue. {T}hose methods are fast and perform well in a large percentage of cases. {H}owever, when abnormalities are present, such algorithms often fail. {T}he few methods that have been proposed to handle such cases are too time consuming to be applied in clinical practice. {W}e propose a method that combines two existing methods using automatic error detection. {T}his method is shown to perform substantially better than a standard algorithm at a relatively low increase in computational cost. {A}fter the lungs are segmented, the fissures inside the lungs are extracted. {T}he goal of the fissure extraction is to detect both the lobar and the accessory fissures. {T}o this end, a supervised filter based on pattern recognition techniques is designed. {I}t is shown that the filter is able to detect fissures with a high accuracy. {I}n a next chapter, the lungs and fissures are used to identify lobes and segments in the lungs. {F}irst, the lobes are extracted using a supervised method that employs the lobar fissures and the lungs. {S}ince there are no physical boundaries between the segments, a method is designed that assigns to each voxel a probability that it belongs to a segment based on the position in the lobe and the position relative to the closest fissure. {I}t is shown that the method is able to assign a set of points in each scan to the correct segment with an accuracy comparable to the inter- and intra-observer agreement. {I}n the next chapter, a method to extract the airways from chest {CT} scans is presented. {F}rom an automatically determined seed point in the trachea, a tree structure is grown based on a set of rules. {A}long with a segmentation of the bronchial tree, a method to extract the anatomical labels for airway branches up to segmental level is presented. {T}he method is shown to perform well in different types of {CT} scans. {T}he next chapter is a methodological chapter that describes two new frameworks for multi-atlas segmentation. {T}he frameworks improve upon standard multi-atlas segmentation by including an automatic atlas selection, a stopping criterion that determines when no more improvement is expected, and a method to determine which area of a scan would benefit from additional atlases. {T}he method is applied to segmentation of the heart in chest {CT} scans and, to show the general applicability, segmentation of the caudate nucleus in brain {MR} scans. {I}n the final chapter, a method to segment the lobes is presented that combines the segmentation results and methodology described in previous chapters to enhance performance. {T}he method incorporates contextual information using automatic lung, fissure, and bronchial tree segmentations. {S}hape information is incorporated using a multi-atlas approach. {T}he method is shown to perform well in data with incomplete lobar fissures.}, + copromotor = {B. van Ginneken}, + file = {Rikx09d.pdf:pdf\\Rikx09d.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {M. A. Viergever and W. M. Prokop}, + school = {Utrecht University}, + gsid = {3931825979186587910}, + gscites = {3}, + journal = {PhD thesis}, +} + +@conference{Rikx09e, + author = {E. M. van Rikxoort and M. Prokop and B. de Hoop and J. P. W. Pluim and B. van Ginneken}, + title = {Automatic {S}egmentation of the {P}ulmonary {L}obes from {C}omputed {T}omography {C}hest {CT} {S}cans: {R}obustness {A}gainst {I}ncomplete {F}issures by {I}ncluding {C}ontextual {I}nformation}, + booktitle = RSNA, + year = {2009}, + abstract = {PURPOSE: To develop and validate a robust technique for automatic extraction of the pulmonary lobes from thoracic CT scans that is not sensitive to the presence of incomplete fissures. METHOD AND MATERIALS: Volumetric CT scans (Philips Mx8000IDT, 16 x 0.75 mm collimation) were used: 100 scans selected from a lung cancer screening program (30 mAs) that showed substantially incomplete fissures, and 20 scans (100-150 mAs) randomly selected from clinical practice. We propose a multi-atlas approach in which existing lobar segmentations in scans with complete fissures are deformed to unseen test scans in which the fissures, the lungs, and the bronchial tree have been automatically segmented. The key element is a cost function that exploits information from fissures, lung borders, and bronchial tree, in such a way that less reliable information (lungs, airways) is only used when the most reliable information (fissures) is missing. To cope with anatomical variation in lobar shapes, the atlas that is anatomically most similar to the test scan is automatically selected. For quantitative evaluation, the right and left major fissure and the right minor fissure in the 20 clinical scans were automatically segmented. For the 100 scans with incomplete fissures, a human observer indicated whether the lobe segmentation was (1) completely correct, (2) incorrect within 12 mm of the true lobe border, or (3) incorrect further than 12 mm from the true lobe border. RESULTS: The average distance of the manually segmented lobe borders to the automatic lobe borders was 0.48 mm for the left major fissure, 1.23 mm for the right major fissure, and 1.28 mm for the right minor fissure. The observer scored for the left major fissure 79 lobe border as correct and the other 21 to be within 12 mm. For the right major fissure those numbers were 89 and 11, and for the right minor fissure 76 and 24, respectively. CONCLUSION: Automatic segmentation of pulmonary lobes in cases with substantially incomplete pulmonary fissures is feasible with a high accuracy. CLINICAL RELEVANCE/APPLICATION: Automatic segmentation of the pulmonary lobes is essential for quantitative analysis of chest CT scans.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Rikx10, + author = {E. M. van Rikxoort and M. Prokop and B. de Hoop and M. Viergever and J. Pluim and B. van Ginneken}, + title = {Automatic {S}egmentation of {P}ulmonary {L}obes {R}obust {A}gainst {I}ncomplete {F}issures}, + journal = TMI, + year = {2010}, + volume = {29}, + pages = {1286--1296}, + doi = {10.1109/TMI.2010.2044799}, + abstract = {{A} method for automatic segmentation of pulmonary lobes from computed tomography ({CT}) scans is presented that is robust against incomplete fissures. {T}he method is based on a multi-atlas approach in which existing lobar segmentations are deformed to test scans in which the fissures, the lungs, and the bronchial tree have been automatically segmented. {T}he key element of our method is a cost function that exploits information from fissures, lung borders, and bronchial tree in an effective way, such that less reliable information (lungs, airways) is only used when the most reliable information (fissures) is missing. {T}o cope with the anatomical variation in lobe shape, an atlas selection mechanism is introduced. {T}he method is evaluated on two test sets of 120 scans in total. {T}he results show that the lobe segmentation closely follows the fissures when they are present. {I}n a simulated experiment in which parts of complete fissures are removed, the robustness of the method against different levels of incomplete fissures is shown. {W}hen the fissures are incomplete, an observer study shows agreement of the automatically determined lobe borders with a radiologist for 81\% of the lobe borders on average.}, + file = {Rikx10.pdf:pdf\\Rikx10.pdf:PDF;Rikx10.pdf:Rikx10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {20304724}, + month = {6}, + gsid = {14940016956782221874}, + gscites = {99}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/88439}, + ss_id = {f640a7a2f5618c0ca8e49b84af937bc16e868175}, + all_ss_ids = {['f640a7a2f5618c0ca8e49b84af937bc16e868175']}, +} + +@article{Rikx10a, + author = {E. M. van Rikxoort and I. I{\v{s}}gum and Y. Arzhaeva and M. Staring and S. Klein and M. A. Viergever and J. P. W. Pluim and B. van Ginneken}, + title = {Adaptive local multi-atlas segmentation: application to the heart and the caudate nucleus}, + journal = MIA, + year = {2010}, + volume = {14}, + pages = {39--49}, + doi = {10.1016/j.media.2009.10.001}, + abstract = {{A}tlas-based segmentation is a powerful generic technique for automatic delineation of structures in volumetric images. {S}everal studies have shown that multi-atlas segmentation methods outperform schemes that use only a single atlas, but running multiple registrations on volumetric data is time-consuming. {M}oreover, for many scans or regions within scans, a large number of atlases may not be required to achieve good segmentation performance and may even deteriorate the results. {I}t would therefore be worthwhile to include the decision which and how many atlases to use for a particular target scan in the segmentation process. {T}o this end, we propose two generally applicable multi-atlas segmentation methods, adaptive multi-atlas segmentation ({AMAS}) and adaptive local multi-atlas segmentation ({ALMAS}). {AMAS} automatically selects the most appropriate atlases for a target image and automatically stops registering atlases when no further improvement is expected. {ALMAS} takes this concept one step further by locally deciding how many and which atlases are needed to segment a target image. {T}he methods employ a computationally cheap atlas selection strategy, an automatic stopping criterion, and a technique to locally inspect registration results and determine how much improvement can be expected from further registrations. {AMAS} and {ALMAS} were applied to segmentation of the heart in computed tomography scans of the chest and compared to a conventional multi-atlas method ({MAS}). {T}he results show that {ALMAS} achieves the same performance as {MAS} at a much lower computational cost. {W}hen the available segmentation time is fixed, both {AMAS} and {ALMAS} perform significantly better than {MAS}. {I}n addition, {AMAS} was applied to an online segmentation challenge for delineation of the caudate nucleus in brain {MRI} scans where it achieved the best score of all results submitted to date.}, + file = {Rikx10a.pdf:pdf\\Rikx10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {19897403}, + month = {2}, + gsid = {4163686614704886737}, + gscites = {179}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/87927}, + ss_id = {b75ff2b9b67d7eaa6d0ef3231b755cde8f6915b7}, + all_ss_ids = {['b75ff2b9b67d7eaa6d0ef3231b755cde8f6915b7']}, +} + +@inproceedings{Rikx10b, + author = {E. M. van Rikxoort and J. G. Goldin and B. van Ginneken and M. Galperin-Aizenberg and C. Ni and M. S. Brown}, + title = {Interactively learning a patient specific k-nearest neighbor classifier based on confidence weighted samples}, + booktitle = ISBI, + year = {2010}, + pages = {556--559}, + doi = {10.1109/ISBI.2010.5490287}, + abstract = {{A}n automatic segmentation method that fails for one scan of a patient is likely to fail in all follow up scans as well. {W}e propose to construct a patient specific k-nearest neighbor classifier that learns from the test data while the user is interactively correcting the segmentation in the baseline scan. {W}e apply the system to lung segmentation in chest {CT} scans. {T}he system is set up in such a way that interaction is limited to single clicks in misclassified areas. {V}oxels indicated by a user as erroneously labeled are added to the training data. {I}n classification, patient specific confidence weights are applied relative to the similarity between the test and training samples. {T}he method is quantitatively validated on baseline and follow up scans from 16 patients. {T}he results improve substantially in both baseline and follow up scans with only five clicks from the user in the baseline scan on average.}, + file = {Rikx10b.pdf:pdf\\Rikx10b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {13602227483189066767}, + gscites = {2}, + ss_id = {338b5d8af7633715a9062049ac8445c8b830df6d}, + all_ss_ids = {['338b5d8af7633715a9062049ac8445c8b830df6d']}, +} + +@inproceedings{Rikx10c, + author = {E.M. van Rikxoort and M. Galperin-Aizenberg and J. G. Goldin and T. T. J. P. Kockelkorn and B. van Ginneken and M. S. Brown}, + title = {Multi-classifier semi-supervised classification of tuberculosis patterns on chest {CT} scans}, + booktitle = {The Third International Workshop on Pulmonary Image Analysis}, + year = {2010}, + pages = {41-48}, + abstract = {Classification of different textures present in chest CT scans of patients with pulmonary tuberculosis (TB) is of crucial importance for the success of ongoing vaccine and drug testing trials. In this paper, a new multi-classifier semi-supervised method (MCSS) is proposed that is trained with a small set of labeled examples and improves classification performance by sampling interesting samples from unlabeled scans based on uncertainty among a pool of classifiers. The interesting samples are added to the small labeled set with a label assigned by 'expert' classifiers. MCSS is applied to 20 scans of patients with proven TB for which a reference standard was obtained by a consensus reading. Another set of 35 scans was used without manual labels. The performance of MCSS is compared to conventional supervised classification and two other semi-supervised methods and shown to outperform all other methods.}, + file = {Rikx10c.pdf:pdf\\Rikx10c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TB}, + gsid = {995758793341684130}, + gscites = {4}, +} + +@inproceedings{Rikx11, + author = {E. M. van Rikxoort and J. G. Goldin and M. Galperin-Aizenberg and M. S. Brown}, + title = {Classification of pulmonary emphysema from chest {CT} scans using integral geometry descriptors}, + booktitle = MI, + year = {2011}, + volume = {7963}, + series = SPIE, + pages = {79631O-1--79631O-6}, + doi = {10.1117/12.878180}, + abstract = {To gain insight into the underlying pathways of emphysema and monitor the effect of treatment, methods to quantify and phenotype the different types of emphysema from chest {CT} scans are of crucial importance. Current standard measures rely on density thresholds for individual voxels, which is influenced by inspiration level and does not take into account the spatial relationship between voxels. Measures based on texture analysis do take the interrelation between voxels into account and therefore might be useful for distinguishing different types of emphysema. In this study, we propose to use {M}inkowski functionals combined with rotation invariant {G}aussian features to distinguish between healthy and emphysematous tissue and classify three different types of emphysema. {M}inkowski functionals characterize binary images in terms of geometry and topology. In 3{D}, four {M}inkowski functionals are defined. By varying the threshold and size of neighborhood around a voxel, a set of {M}inkowski functionals can be defined for each voxel. Ten chest {CT} scans with 1810 annotated regions were used to train the method. A set of 108 features was calculated for each training sample from which 10 features were selected to be most informative. A linear discriminant classifier was trained to classify each voxel in the lungs into a subtype of emphysema or normal lung. The method was applied to an independent test set of 30 chest {CT} scans with varying amounts and types of emphysema with 4347 annotated regions of interest. The method is shown to perform well, with an overall accuracy of 95%.}, + file = {Rikx11.pdf:pdf\\Rikx11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {5300889966077716690}, + gscites = {1}, +} + +@conference{Rikx11a, + author = {E. M. van Rikxoort and M. Galperin-Aizenberg and C.M. Jude and J. G. Goldin and M.S. Brown}, + title = {Quantifying Emphysema From Chest {C}omputed {T}omography Scans Using Integral Geometry Descriptors: Improved Performance Over Density Measures In Low Dose Scans}, + booktitle = ATS, + year = {2011}, + abstract = {{RATIONALE:} {T}o gain insight into the underlying pathways of emphysema and monitor the effect of treatment, methods to quantify emphysema from low dose chest computed tomography ({CT}) scans are of crucial importance. Currently, emphysema is often quantified in {CT} scans by densitometric measures (e.g. relative area under -950 {HU} ({RA}950)), which are known to be affected by inspiration level of the patient and scanner parameters. Especially in low-dose {CT} scans, image noise can lead to an increase in {RA}950. The purpose of this study is to compare a method for quantifying emphysema from low dose chest {CT} scans using texture analysis based on integral geometry descriptors to {RA}950. {METHODS:} {S}tandardized pre-treatment {CT} scans from 13 subjects enrolled in clinical trials for emphysema treatment were taken from an anonymized research database. {CT} imaging of the thorax was performed at suspended full inspiration with at most 1.25 mm slice thickness, 0.6 mm slice spacing at 120 {KV} and 25-40 m{A}s. An automatic method to classify each voxel into emphysema ({E}) or non-emphysema ({NE}) was developed based on {M}inkowski {F}unctionals and rotationally invariant Gaussian features. A combination of nine features was used to train a k-nearest neighbor classifier which assigns to each voxel in a test scan a label {E} or {NE}. To evaluate the automatic method, 18 regions of interest ({ROI}) were randomly placed in the lungs in each scan, equally divided over 6 different zones in the lungs. An observer study was performed in which expert chest radiologists annotated each {ROI} as being either {NE} or {E}. The texture analysis method was applied to the {ROI}s in a leave-one-patient-out fashion. An emphysema score ({ES}) was defined as the percentage of voxels in the {ROI} classified as emphysematous. {RA}950 was also calculated for each {ROI}. For both methods, an {ES} > 5% was defined to be emphysematous. The output of the texture analysis method and {RA}950 were compared to the visual assessment using confusion matrices. {RESULTS:} The confusion matrix comparing the proposed method and {RA}950 to the visual annotation is shown in {T}able 1. The standard {RA}950 measure has an accuracy for classifying emphysema vs. non-emphysema of 67%, the proposed method has an accuracy of 89%. {F}igure 1 illustrates the effect of image noise on both methods. {CONCLUSION:} The proposed method is able to quantify emphysema with a better accuracy than the standard {RA}950 method in low dose chest {CT} scans.}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, +} + +@conference{Rikx11b, + author = {E. M. van Rikxoort and M.Galperin-Aizenberg and F. Abtin and H. J. Kim and P. Lu and G. Shaw and J. G. Goldin and M.S. Brown}, + title = {Pulmonary Fissure Integrity Assessment in Subjects with Severe Emphysema: Evaluation of a Fully Automatic Method}, + booktitle = ATS, + year = {2011}, + abstract = {{RATIONALE:} {P}ulmonary fissures are important anatomical landmarks in recognizing pulmonary lobar structure and the regional assessment of the extent and distribution of lung disease. Incomplete fissures contribute to collateral ventilation across lobes and detection and quantification of incomplete lobar fissures may have clinical implications for the outcome of resections and bronchoscopic lung volume reduction ({BLVR}) planning for emphysema patients. The purpose of this study is to evaluate a technique for automatic quantification of fissural completeness from chest computed tomography scans on a database of subjects with severe emphysema. {METHODS:} {F}rom an imaging research database, 96 patients with severe emphysema enrolled in clinical trials for {BLVR} treatment were identified. Standardized pre-treatment volumetric {CT} imaging of the thorax was performed at suspended full inspiration with at most 1 mm slice thickness at 120 {KV} and 80-110 m{A}s. Fissural completeness for the left major and right major and minor fissures was assessed by visual read and automatic quantification. Visual assessment of fissural completeness was performed by two expert radiologists inspecting all three planes. For visual scoring a dichotomous scale was used to score fissure integrity: complete or partial. A complete fissure was defined as more than 90% intact surface of the fissure and no obvious vessels crossing over. For automatic quantification, the lungs, fissures, and lobes were automatically segmented and the percentage integrity of the fissures was calculated as the percentage of the lobar border defined by a fissure. The continuous integrity score of the automatic method is compared to the dichotomous visual assessments of both readers using {ROC} analysis, boxplots, and rank sum tests. {RESULTS:} {F}or all three pulmonary fissures, compared to both readers, boxplot analysis showed that the automatic method gave significantly higher completeness scores (means 91.04%, 93.16%, 93.65% and 90.06%, 88.08%, 93.77%, for the right major, right minor, and left major fissures, for both readers, respectively) for fissures assessed by the readers as complete than for those assessed as partial (means 83.17%, 69.49%, 86.18%, and 82.97%, 70.09%, 85.16% ) (p<0.001). The kappa statistics for the radiologists? agreement were 0.75, 0.59, and 0.74, respectively. The {ROC} curves for the automatic method compared to each reader for each fissure are provided in {F}igure 2. {CONCLUSION:} {A} completely automatic method for quantification of fissural completeness is able to quantify fissural completeness in a cohort of subjects with severe emphysema comparable to expert radiologists.}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, +} + +@inproceedings{Rikx11d, + author = {E. M. van Rikxoort and B. van Ginneken}, + title = {Automatic segmentation of the lungs and lobes from thoracic {CT} scans}, + booktitle = {The Fourth International Workshop on Pulmonary Image Analysis}, + year = {2011}, + pages = {261--268}, + abstract = {Lung and lobe segmentation are prerequisites for automated analysis of chest {CT} scans. This paper presents fully automatic methods for segmentation of the lungs and lobes from thorax {CT} scans. Both methods have previously been published. The lung segmentation starts by automatically identifying the trachea and main bronchi. From the trachea, the lungs are found using a region growing approach. In cases for which errors are automatically detected in the resulting lung segmentation, a multi-atlas segmentation approach is applied. The lobe segmentation is based on a multi-atlas approach and was especially designed to be robust against incomplete ssures. The methods were evaluated on 55 volumetric chest {CT} scans provided by the {LO}be and {L}ung {A}nalysis 2011 {(LOLA11)} challenge. The scans were acquired at different sites, using several different scanners, scanning protocols, and reconstruction parameters.}, + optnote = {DIAG, RADIOLOGY}, + file = {:./pdf/Rikx11d.pdf:PDF}, + gsid = {6106095811807054823}, + gscites = {28}, + ss_id = {dd4c3ad5fe9bcbaeb028818972727219bf6cbdbc}, + all_ss_ids = {['dd4c3ad5fe9bcbaeb028818972727219bf6cbdbc']}, +} + +@inproceedings{Rikx12, + author = {E. M. van Rikxoort and P. A. de Jong and O. M. Mets and B. van Ginneken}, + title = {Automatic classication of pulmonary function in {COPD} patients using trachea analysis in chest {CT} scans}, + booktitle = MI, + year = {2012}, + volume = {8315}, + series = SPIE, + pages = {83150P--83150P-6}, + doi = {10.1117/12.911603}, + abstract = {{C}hronic {O}bstructive {P}ulmonary {D}isease ({COPD}) is a chronic lung disease that is characterized by airflow limitation. {COPD} is clinically diagnosed and monitored using pulmonary function testing ({PFT}), which measures global inspiration and expiration capabilities of patients and is time-consuming and labor-intensive. It is becoming standard practice to obtain paired inspiration-expiration {CT} scans of {COPD} patients. Predicting the {PFT} results from the {CT} scans would alleviate the need for {PFT} testing. It is hypothesized that the change of the trachea during breathing might be an indicator of tracheomalacia in {COPD} patients and correlate with {COPD} severity. In this paper, we propose to automatically measure morphological changes in the trachea from paired inspiration and expiration {CT} scans and investigate the in fluence on {COPD} {GOLD} stage classification. The trachea is automatically segmented and the trachea shape is encoded using the lengths of rays cast from the center of gravity of the trachea. These features are used in a classi er, combined with emphsyma scoring, to attempt to classify subjects into their {COPD} stage. A database of 187 subjects, well distributed over the {COPD} {GOLD} stages 0 through 4 was used for this study. The data was randomly divided into training and test set. Using the training scans, a nearest mean classifier was trained to classify the subjects into their correct {GOLD} stage using either emphysema score, tracheal shape features, or a combination. Combining the proposed trachea shape features with emphysema score, the classi cation performance into {GOLD} stages improved with 11% to 51%. In addition, an 80% accuracy was achieved in distinguishing healthy subjects from {COPD} patients.}, + file = {:./pdf/Rikx12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {14466201478600686172}, + gscites = {6}, + ss_id = {07b1cfaff7bb3fcdd0df7ddd43cf46258be9438b}, + all_ss_ids = {['07b1cfaff7bb3fcdd0df7ddd43cf46258be9438b']}, +} + +@article{Rikx12a, + author = {E. M. van Rikxoort and J..G. Goldin and F. Abtin and H. J. Kim and P. Lu and B. van Ginneken and G. Shaw and M. Galperin-Aizenberg and M. S. Brown}, + title = {A method for the automatic quantification of the completeness of pulmonary fissures: evaluation in a database of subjects with severe emphysema}, + journal = ER, + year = {2012}, + volume = {22}, + pages = {302--309}, + doi = {10.1007/s00330-011-2278-0}, + abstract = {{O}bjectives: {T}o propose and evaluate a technique for automatic quantification of fissural completeness from chest computed tomography {(CT)} in a database of subjects with severe emphysema. {M}ethods: {N}inety-six {CT} studies of patients with severe emphysema were included. The lungs, fissures and lobes were automatically segmented. The completeness of the fissures was calculated as the percentage of the lobar border defined by a fissure. The completeness score of the automatic method was compared with a visual consensus read by three radiologists using boxplots, rank sum tests and {R}OC analysis. {R}esults: {T}he consensus read found 49% (47/96), 15% (14/96) and 67% (64/96) of the right major, right minor and left major fissures to be complete. For all fissures visually assessed as being complete the automatic method resulted in significantly higher completeness scores (mean 92.78%) than for those assessed as being partial or absent (mean 77.16%; all p values <0.001). The areas under the curves for the automatic fissural completeness were 0.88, 0.91 and 0.83 for the right major, right minor and left major fissures respectively. {C}onclusions: {A}n automatic method is able to quantify fissural completeness in a cohort of subjects with severe emphysema consistent with a visual consensus read of three radiologists.}, + file = {Rikx12a.pdf:pdf\\Rikx12a.pdf:PDF}, + optnote = {DIAG, LungVolumeReduction, RADIOLOGY}, + number = {2}, + pmid = {21984417}, + month = {10}, + gsid = {9186093955945686788}, + gscites = {63}, + ss_id = {145633f228eda9babfe3f73fa981c6787f66541d}, + all_ss_ids = {['145633f228eda9babfe3f73fa981c6787f66541d']}, +} + +@conference{Rikx12b, + author = {E. M. van Rikxoort and B. van Ginneken and M. Schmidt and A. Lahaije and H. van Helvoort and and M. Prokop and Y. Heijdra}, + title = {Dynamic Hyperinflation in {COPD} Patients: Is It Related to Emphysema and Airway Morphology in Inspiration and Expiration Thoracic {CT}?}, + booktitle = RSNA, + year = {2012}, + abstract = {{PURPOSE} Dynamic hyperinflation ({DH}), progressive airtrapping during exercise, is related to dyspnea in {COPD} patients. A subgroup of {COPD} patients divided over all {GOLD} stages suffer from dynamic hyperinflation. This study relates automatically extracted measures of airway morphology, airtrapping and emphysema to dynamic hyperinflation. {METHOD AND MATERIALS} As part of a research study, eighteen {COPD} patients underwent low dose {CT} (64x0.75mm, Siemens Sensation 64) at full inspiration and expiration. Using a portable ergo-spirometry system ({O}xycon {M}obile) to measure operational lung volume during activities of daily life, the study population was divided into ten patients suffering from {DH} and eight non-{DH}. Research software ({D}iagnostic {I}mage {A}nalysis {G}roup, {N}ijmegen, The {N}etherlands; {F}raunhofer {MEVIS}, {B}remen, {G}ermany) automatically segmented the lungs and airways in both the inspiration and expiration scans. As measures for emphysema, the percentages of lung volume below -950 {HU} ({IN}-950) and the 15th percentile of the cumulative histogram ({PD}15) were computed in inspiration scans. As a quantification of air-trapping, the percentages of lung volume below -856 {HU} were computed in expiration scans ({EX}-856). Airway morphology was extracted from the inspiration scans as the wall area percentage at airways with a circumference of 10mm ({Pi10}), computed using regression on all cross-sectional airway measurements. Means and standard deviations for each measurement are reported for the two groups. {RESULTS} The means and standard deviations of the measurements for the {DH} and non-{DH} groups were 19.7 A-A?A 1/2 8.6% and 14.6 A-A?A 1/2 10.5% for IN-950, -956 A-A?A 1/2 15 {HU} and -945 A-A?A 1/2 23 {HU} for {PD}15, 47.2 A-A?A 1/2 13.7% and 35.1 A-A?A 1/2 10.9% for {EX}-856, and 48.0 A-A?A 1/2 6.9% and 40.7 A-A?A 1/2 9.3% for {Pi10}, respectively. The differences between the {DH} and non-{DH} group are thus more pronounced for {EX}-856 and {Pi10} than for {IN}-950 and {PD}15. {CONCLUSION} Quantitative measurements from inspiration and expiration {CT} scans related to airway morphology appear to be more informative for distinguishing {COPD} patients suffering from dynamic hyperinflation from non-hyperinflators than emphysema related measurements. {CLINICAL RELEVANCE/APPLICATION} Understanding the underlying pathways of hyperinflation is important for the management of {COPD} since reduction of dynamic hyperinflation is strongly correlated with less dyspnea complaints.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Rikx13, + author = {E. M. van Rikxoort and B. van Ginneken}, + title = {Automated segmentation of pulmonary structures in thoracic computed tomography scans: a review}, + journal = PMB, + year = {2013}, + volume = {58}, + pages = {R187-R220}, + doi = {10.1088/0031-9155/58/17/R187}, + abstract = {Computed tomography ({CT}) is the modality of choice for imaging the lungs in vivo. Sub-millimeter isotropic images of the lungs can be obtained within seconds, allowing detection of small lesions and detailed analysis of disease processes. The high resolution of thoracic {CT} and the high prevalence of lung diseases require a high degree of automation in the analysis pipeline. Automated segmentation of pulmonary structures in thoracic {CT} has been an important research topic for over a decade now. This systematic review provides an overview of current literature. We discuss segmentation methods for the lungs, the pulmonary vasculature, the airways, including airway tree construction and airway wall segmentation, the fissures, the lobes, and the pulmonary segments. For each topic the current state of the art is summarized, and topics for future research are identified.}, + file = {Rikx13.pdf:pdf\\Rikx13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {23956328}, + month = {8}, + gsid = {7858654548922786505}, + gscites = {101}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/140549}, +} + +@article{Riss14, + author = {Rissmann, R. and Pinckaers, J. H. F. M. and de Man, A. J. M. and Cohen, A. C. and Dubois, E. A.}, + title = {{Pharmacology E-learning 2.0 - Web vs. App}}, + journal = BJCP, + year = {2014}, + volume = {78}, + number = {4}, + month = {10}, + pages = {772}, + doi = {10.1111/bcp.12400}, + file = {Riss14.pdf:pdf\\Riss14.pdf:PDF}, + optnote = {DIAG}, +} + +@conference{Ritc15, + author = {A.J. Ritchie and C. Sanghera and C. Jacobs and W. Zhang and J. Mayo and H. Roberts and M. Gingras and S. Pasian and L. Stewart and S. Tsai and D. Manos and J.M. Seely and P. Burrowes and R. Bhatia and S. Atkar-Khattra and B. van Ginneken and M. Tammemagi and S. Lam}, + title = {Computer Vision Tool and Technician as First Reader of Lung Cancer Screening {CT}}, + booktitle = {World Conference on Lung Cancer}, + year = {2015}, + abstract = {BACKGROUND: The recommendation by the US Preventive Services Task Force to screen high-risk smokers with low-dose computed tomography (LDCT) and the recent decision by the Centers for Medicare and Medicaid Services to fund LDCT screening under the Medicare program mean that LDCT screening will be implemented at the population level in the US and likely in other countries. With the large volume of scans that will be generated, accurate and efficient interpretation of LDCT images is key to providing a cost-effective implementation of LDCT screening to the large at risk population. OBJECTIVE: To evaluate an alternative workflow to identify and triage abnormal LDCT scans in which a technician assisted by Computer Vision (CV) software acts as first reader with the aim to reduce workload, improve speed, consistency and quality of interpretation of screening LDCT scans. METHODS: A test dataset of baseline Pan-Canadian Early Detection of Lung Cancer Study LDCT scans (New Engl J Med. 2013;369:908-17) was used. This included: 136 scans with lung cancers, 556 scans with benign nodules and 136 scans without nodules. The scans were randomly assigned for analysis by the CV software (CIRRUS Lung Screening, Diagnostic Imaging Analysis Group, Nijmegen, The Netherlands). The annotated scans were then reviewed by a technician without knowledge of the diagnosis. The scans were classified by the technician as either normal (no nodules or benign nodules only, potentially not requiring radiologist review) or abnormal (suspicious of malignancy or other abnormality requiring radiologist review). The results were compared with the Pan-Can Study radiologists. Nodules found by CIRRUS but not by the radiologist were reviewed by a subspecialty trained chest radiologist with 14 years experience in lung cancer screening (JM). RESULTS: The overall sensitivity and specificity of the technician to identify an abnormal scan were: 97.7% (95% CI: 96.3 - 98.7) and 98.0% (95% CI: 89.5 - 99.7) respectively. The technician correctly identified all the scans with malignant nodules. The time taken by the technician to read a scan was 208A,A+-120 sec. CONCLUSIONS: A technician assisted by CV software can categorize accurately abnormal scans for review by a radiologist. Pre-screening by a technician and CV software is a promising strategy for reducing workload, improving the speed, consistency and quality of scan interpretation of screening chest CTs. ACKNOWLEDGEMENTS: Supported by the Terry Fox Research Institute, The Canadian Partnership Against Cancer and the BC Cancer Foundation}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Ritc15a, + author = {A.J Ritchie and M.C. Tammemagi and C. Jacobs and W. Zhang and J. Mayo and H. Roberts and M. Gingras and S. Pasian and L. Stewart and S. Tsai and D. Manos and J.M. Seely and P. Burrowes and R. Bhatia and S. Atkar-Khattra and B. van Ginneken and M. Tsao and S. Lam}, + title = {Automated measurement of malignancy risk of lung nodule detected by screening computed tomography}, + booktitle = {World Conference on Lung Cancer}, + year = {2015}, + abstract = {BACKGROUND: We have previously reported a practical predictive tool that accurately estimates the probability of malignancy for lung nodules detected at baseline screening LDCT (New Engl J Med. 2013;369:908-17). Manual measurement of nodule dimensions and generation of malignancy risk scores is time consuming and subjected to intra- and inter-observer variability. OBJECTIVE: The goal of this study is to prepare a nodule malignancy risk prediction model based on automated computer generated nodule data and compare it to an established model based on radiologistsAC/a,!a,,C/ generated data. METHODS: Using the same published PanCan dataset (New Engl J Med. 2013;369:908-17) with the number of lung cancers updated, we prepared a logistic regression model predicting lung cancer using computer-generated imaging data from the CIRRUS Lung Screening software (Diagnostic Imaging Analysis Group, Nijmegen, The Netherlands). Ninety-one of the 2,537 baseline (first) scans were not available or could not be processed by CIRRUS. The remaining 2,446 scans were first annotated by the CIRRUS software. A human non-radiologist reader then accepted/rejected the annotated marks and manually searched the LDCT for nodules missed by CIRRUS or the study radiologist. New nodules found that were not recorded by the study radiologist were reviewed by a subspecialty trained chest radiologist with 14 years experience in lung cancer screening (JM). Nodule morphometric measurements (maximum and mean diameter, volume, mass, density) and total nodule count per scan irrespective of size were automatically generated by the CIRRUS software. The nodule type (nonsolid, part-solid, or solid), nodule description (lobulated, spiculated or well defined) and nodule location (upper versus middle or lower lobe) were manually entered. The variables were evaluated in models as untransformed and natural log transformed variables. Nonlinear relationships with lung cancer were also evaluated. Socio-demographic and clinical history predictors were not included in the model. RESULTS: Radiologists evaluation identified 8,570 pulmonary nodules of any size in 2063 individuals - 124 nodules in 119 individuals were diagnosed as cancer in follow-up. Based on CIRRUS software annotated marks that were accepted by a human reader, computer analysis identified 11,520 pulmonary nodules in 2174 individuals - 121 nodules in 115 individuals were diagnosed as cancer in follow-up. Thirty-six percent of the new nodules found by CIRRUS and/or second human reader were AC/aEURdegAY=4 mm (meanA,A+-SD, 5.9A,A+- 3.5 mm). Both the computer generated imaging data model (Model-CIRRUS) and the radiologist generated data model (Model-RAD) demonstrated excellent discrimination and calibration. Their predictive performances were also similar. Comparing Model-CAD to Model-RAD, the AUCs were 0.9537 versus 0.9541, the 90th percentile absolute errors were 0.0008 versus 0.0007, and the Brier scores were 0.0093 versus 0.0137. Mean nodule diameter is a better risk predictor than maximum nodule diameter, nodule density or mass. CONCLUSION: The predictive performances of computer and radiologist generated data models were similar. The model can be integrated to the CIRRUS Lung Screening software to automatically generate a nodule malignancy risk score to facilitate nodule management recommendation. ACKNOWLEDGEMENTS: Supported by the Terry Fox Research Institute, The Canadian Partnership Against Cancer and the BC Cancer Foundation.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Ritc16, + author = {Ritchie, Alexander J. and Sanghera, Calvin and Jacobs, Colin and Zhang, Wei and Mayo, John and Schmidt, Heidi and Gingras, Michel and Pasian, Sergio and Stewart, Lori and Tsai, Scott and Manos, Daria and Seely, Jean M. and Burrowes, Paul and Bhatia, Rick and Atkar-Khattra, Sukhinder and {van Ginneken}, Bram and Tammemagi, Martin and Tsao, Ming Sound and Lam, Stephen and {the Pan-Canadian Early Detection of Lung Cancer Study Group}}, + title = {Computer Vision Tool and Technician as First Reader of Lung Cancer Screening {CT} Scans}, + journal = JTO, + year = {2016}, + volume = {11}, + issue = {5}, + pages = {709-717}, + doi = {10.1016/j.jtho.2016.01.021}, + abstract = {To implement a cost-effective low-dose computed tomography (LDCT) lung cancer screening programA-A?A 1/2 at the population level, accurate and efficient interpretation of a large volume of LDCT scans is needed. The objective of this study was to evaluate a workflow strategy to identify abnormal LDCT scans in which a technician assisted by computer vision (CV) software acts as a first reader with the aim to improve speed, consistency, and quality of scan interpretation.Without knowledge of the diagnosis, a technician reviewed 828 randomly batched scans (136 with lung cancers, 556 with benign nodules, and 136 without nodules) from the baseline Pan-Canadian Early Detection of Lung Cancer Study that had been annotated by the CV software CIRRUS Lung Screening (Diagnostic Image Analysis Group, Nijmegen, The Netherlands). The scans were classified as either normal (no nodules \geq1 mm or benign nodules) or abnormal (nodules or other abnormality). The results were compared with the diagnostic interpretation by Pan-Canadian Early Detection of Lung Cancer Study radiologists.The overall sensitivity and specificity of the technician in identifying an abnormal scan were 97.8\% (95\% confidence interval: 96.4-98.8) and 98.0\% (95\% confidence interval: 89.5-99.7), respectively. Of the 112 prevalent nodules that were found to be malignant in follow-up, 92.9\% were correctly identified by the technician plus CV compared with 84.8\% by the study radiologists. The average time taken by the technician to review a scan after CV processing was 208 A-A?A 1/2 120 seconds.Prescreening CV software and a technician as first reader is a promising strategy for improving the consistency and quality of screening interpretation of LDCT scans.}, + file = {Ritc16.pdf:pdf\\Ritc16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26994641}, + month = {5}, + gsid = {3679975545286903047}, + gscites = {25}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/172195}, + ss_id = {cb695909a72f6a2b288bded8058ce8ab6398424c}, + all_ss_ids = {['cb695909a72f6a2b288bded8058ce8ab6398424c']}, +} + +@inproceedings{Robb11, + author = {Robben, Saskia and Velikova, Marina and Lucas, Peter J.F. and Samulski, Maurice}, + title = {Discretisation {D}oes {A}ffect the {P}erformance of {B}ayesian {N}etworks}, + booktitle = {Research and Development in Intelligent Systems XXVII}, + year = {2011}, + pages = {237--250}, + doi = {10.1007/978-0-85729-130-1_17}, + abstract = {In this paper, we study the use of Bayesian networks to interpret breast X-ray images in the context of breast-cancer screening. In particular, we investigate the performance of a manually developed Bayesian network under various discretisation schemes to check whether the probabilistic parameters in the initial manual network with continuous features are optimal and correctly reflect the reality. The classification performance was determined using ROC analysis. A few algorithms perform better than the continuous baseline: best was the entropy-based method of Fayyad and Irani, but also simpler algorithms did outperform the continuous baseline. Two simpler methods with only 3 bins per variable gave results similar to the continuous baseline. These results indicate that it is worthwhile to consider discretising continuous data when developing Bayesian networks and support the practical importance of probabilitistic parameters in determining the network's performance.}, + file = {Robb11.pdf:pdf\\Robb11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {10}, +} + +@article{Rodr18, + author = {Rodriguez-Ruiz, Alejandro and Gubern-Merida, Albert and Imhof-Tas, Mechli and Lardenoije, Susanne and Wanders, Alexander J T and Andersson, Ingvar and Zackrisson, Sophia and Lang, Kristina and Dustler, Magnus and Karssemeijer, Nico and Mann, Ritse M and Sechopoulos, Ioannis}, + title = {One-view digital breast tomosynthesis as a stand-alone modality for breast cancer detection: do we need more?}, + journal = ER, + year = {2018}, + volume = {28}, + issue = {5}, + month = {5}, + pages = {1938--1948}, + doi = {10.1007/s00330-017-5167-3}, + abstract = {To compare the performance of one-view digital breast tomosynthesis (1v-DBT) to that of three other protocols combining DBT and mammography (DM) for breast cancer detection. Six radiologists, three experienced with 1v-DBT in screening, retrospectively reviewed 181 cases (76 malignant, 50 benign, 55 normal) in two sessions. First, they scored sequentially: 1v-DBT (medio-lateral oblique, MLO), 1v-DBT (MLO) + 1v-DM (cranio-caudal, CC) and two-view DM + DBT (2v-DM+2v-DBT). The second session involved only 2v-DM. Lesions were scored using BI-RADS(r) and level of suspiciousness (1-10). Sensitivity, specificity, receiver operating characteristic (ROC) and jack-knife alternative free-response ROC (JAFROC) were computed. On average, 1v-DBT was non-inferior to any of the other protocols in terms of JAFROC figure-of-merit, area under ROC curve, sensitivity or specificity (p>0.391). While readers inexperienced with 1v-DBT screening improved their sensitivity when adding more images (69-79 %, p=0.019), experienced readers showed similar sensitivity (76 %) and specificity (70 %) between 1v-DBT and 2v-DM+2v-DBT (p=0.482). Subanalysis by lesion type and breast density showed no difference among modalities. Detection performance with 1v-DBT is not statistically inferior to 2v-DM or to 2v-DM+2v-DBT; its use as a stand-alone modality might be sufficient for readers experienced with this protocol. * One-view breast tomosynthesis is not inferior to two-view digital mammography. * One-view DBT is not inferior to 2-view DM plus 2-view DBT. * Training may lead to 1v-DBT being sufficient for screening.}, + file = {:pdf/Rodr18.pdf:PDF}, + optnote = {AXTI, DIAG, RADIOLOGY}, + pmid = {29230524}, + gsid = {14791245295281752832}, + gscites = {25}, + ss_id = {38f5f8539b523929aafed804c3533b62abdc3781}, + all_ss_ids = {['38f5f8539b523929aafed804c3533b62abdc3781']}, +} + +@inproceedings{Rodr18a, + author = {Alejandro Rodriguez-Ruiz and Jonas Teuwen and Kaman Chung and Nico Karssemeijer and Margarita Chevalier and Albert Gubern-Merida and Ioannis Sechopoulos}, + title = {Pectoral muscle segmentation in breast tomosynthesis with deep learning}, + booktitle = MI, + year = {2018}, + series = SPIE, + doi = {10.1117/12.2292920}, + abstract = {Digital breast tomosynthesis (DBT) has superior detection performance than mammography (DM) for population-based breast cancer screening, but the higher number of images that must be reviewed poses a challenge for its implementation. This may be ameliorated by creating a twodimensional synthetic mammographic image (SM) from the DBT volume, containing the most relevant information. When creating a SM, it is of utmost importance to have an accurate lesion localization detection algorithm, while segmenting fibroglandular tissue could also be beneficial. These tasks encounter an extra challenge when working with images in the medio-lateral oblique view, due to the presence of the pectoral muscle, which has similar radiographic density. In this work, we present an automatic pectoral muscle segmentation model based on a u-net deep learning architecture, trained with 136 DBT images acquired with a single system (different BIRADS (r) densities and pathological findings). The model was tested on 36 DBT images from that same system resulting in a dice similarity coefficient (DSC) of 0.977 (0.967-0.984). In addition, the model was tested on 125 images from two different systems and three different modalities (DBT, SM, DM), obtaining DSCs between 0.947 and 0.970, a range determined visually to provide adequate segmentations. For reference, a resident radiologist independently annotated a mix of 25 cases obtaining a DSC of 0.971. The results suggest the possibility of using this model for inter-manufacturer DBT, DM and SM tasks that benefit from the segmentation of the pectoral muscle, such as SM generation, computer aided detection systems, or patient dosimetry algorithms.}, + optnote = {DIAG}, + month = {2}, + gsid = {12130497468928813322}, + gscites = {21}, + ss_id = {8db5fb8ff224535dfeb095215b030e8d260344b7}, + all_ss_ids = {['8db5fb8ff224535dfeb095215b030e8d260344b7']}, +} + +@article{Rodr18b, + author = {Rodriguez-Ruiz, Alejandro and Teuwen, Jonas and Vreemann, Suzan and Bouwman, Ramona W and van Engen, Ruben E and Karssemeijer, Nico and Mann, Ritse M and Gubern-Merida, Albert and Sechopoulos, Ioannis}, + title = {New reconstruction algorithm for digital breast tomosynthesis: better image quality for humans and computers}, + journal = ACTR, + year = {2018}, + volume = {59}, + issue = {9}, + month = {9}, + pages = {1051--1059}, + doi = {10.1177/0284185117748487}, + abstract = {Background The image quality of digital breast tomosynthesis (DBT) volumes depends greatly on the reconstruction algorithm. Purpose To compare two DBT reconstruction algorithms used by the Siemens Mammomat Inspiration system, filtered back projection (FBP), and FBP with iterative optimizations (EMPIRE), using qualitative analysis by human readers and detection performance of machine learning algorithms. Material and Methods Visual grading analysis was performed by four readers specialized in breast imaging who scored 100 cases reconstructed with both algorithms (70 lesions). Scoring (5-point scale: 1 = poor to 5 = excellent quality) was performed on presence of noise and artifacts, visualization of skin-line and Cooper's ligaments, contrast, and image quality, and, when present, lesion visibility. In parallel, a three-dimensional deep-learning convolutional neural network (3D-CNN) was trained (n = 259 patients, 51 positives with BI-RADS 3, 4, or 5 calcifications) and tested (n = 46 patients, nine positives), separately with FBP and EMPIRE volumes, to discriminate between samples with and without calcifications. The partial area under the receiver operating characteristic curve (pAUC) of each 3D-CNN was used for comparison. Results EMPIRE reconstructions showed better contrast (3.23 vs. 3.10, P = 0.010), image quality (3.22 vs. 3.03, P < 0.001), visibility of calcifications (3.53 vs. 3.37, P = 0.053, significant for one reader), and fewer artifacts (3.26 vs. 2.97, P < 0.001). The 3D-CNN-EMPIRE had better performance than 3D-CNN-FBP (pAUC-EMPIRE = 0.880 vs. pAUC-FBP = 0.857; P < 0.001). Conclusion The new algorithm provides DBT volumes with better contrast and image quality, fewer artifacts, and improved visibility of calcifications for human observers, as well as improved detection performance with deep-learning algorithms.}, + file = {:pdf/Rodr18b.pdf:PDF}, + optnote = {AXTI, DIAG, RADIOLOGY}, + pmid = {29254355}, + gsid = {1067383496078365227}, + gscites = {33}, + ss_id = {419668ed3bba71b382853b171b1bf4aecd83d483}, + all_ss_ids = {['419668ed3bba71b382853b171b1bf4aecd83d483']}, +} + +@inproceedings{Rodr18c, + author = {Rodriguez-Ruiz, Alejandro and Mordang, Jan-Jurre and Karssemeijer, Nico and Sechopoulos, Ioannis and Mann, Ritse}, + title = {Can radiologists improve their breast cancer detection in mammography when using a deep learning based computer system as decision support?}, + doi = {10.1117/12.2317937}, + year = {2018}, + abstract = {For more than a decade, radiologists have used traditional computer aided detection systems to read mammograms, but mainly because of a low computer specificity may not improve their screening performance, according to several studies. The breakthrough in deep learning techniques has boosted the performance of machine learning algorithms, also for breast cancer detection in mammography. The objective of this study was to determine whether radiologists improve their breast cancer detection performance when they concurrently use a deep learningbased computer system for decision support, compared to when they read mammography unaided. A retrospective, fully-crossed, multi-reader multi-case (MRMC) study was designed to compare this. The employed decision support system was Transpara™ (Screenpoint Medical, Nijmegen, the Netherlands). Radiologists interact by clicking an area on the mammogram, for which the computer system displays its cancer likelihood score (1-100). In total, 240 cases (100 cancers, 40 false positive recalls, 100 normals) acquired with two different mammography systems were retrospectively collected. Seven radiologists scored each case once with, and once without the use of decision support, providing a forced BI-RADS® score and a level of suspiciousness (1-100). MRMC analysis of variance of the area under the receiver operating characteristic curves (AUC), and specificity and sensitivity were computed. When using decision support, the AUC increased from 0.87 to 0.89 (P=0.043) and specificity increased from 73% to 78% (P=0.030), while sensitivity did not significantly increment (84% to 87%, P=0.180). In conclusion, radiologists significantly improved their performance when using a deep learningbased computer system as decision support.}, + url = {http://dx.doi.org/10.1117/12.2317937}, + file = {Rodr18c.pdf:pdf\Rodr18c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {14th International Workshop on Breast Imaging (IWBI 2018)}, + citation-count = {7}, + automatic = {yes}, +} + +@article{Rodr19, + author = {Rodriguez-Ruiz, Alejandro and Lang, Kristina and Gubern-Merida, Albert and Teuwen, Jonas and Broeders, Mireille and Gennaro, Gisella and Clauser, Paola and Helbich, Thomas H and Chevalier, Margarita and Mertelmeier, Thomas and Wallis, Matthew G and Andersson, Ingvar and Zackrisson, Sophia and Sechopoulos, Ioannis and Mann, Ritse M}, + title = {Can we reduce the workload of mammographic screening by automatic identification of normal exams with artificial intelligence? A feasibility study}, + journal = ER, + year = {2019}, + volume = {29}, + issue = {9}, + month = {4}, + pages = {4825-4832}, + doi = {10.1007/s00330-019-06186-9}, + abstract = {To study the feasibility of automatically identifying normal digital mammography (DM) exams with artificial intelligence (AI) to reduce the breast cancer screening reading workload. A total of 2652 DM exams (653 cancer) and interpretations by 101 radiologists were gathered from nine previously performed multi-reader multi-case receiver operating characteristic (MRMC ROC) studies. An AI system was used to obtain a score between 1 and 10 for each exam, representing the likelihood of cancer present. Using all AI scores between 1 and 9 as possible thresholds, the exams were divided into groups of low- and high likelihood of cancer present. It was assumed that, under the pre-selection scenario, only the high-likelihood group would be read by radiologists, while all low-likelihood exams would be reported as normal. The area under the reader-averaged ROC curve (AUC) was calculated for the original evaluations and for the pre-selection scenarios and compared using a non-inferiority hypothesis. Setting the low/high-likelihood threshold at an AI score of 5 (high likelihood > 5) results in a trade-off of approximately halving (- 47%) the workload to be read by radiologists while excluding 7% of true-positive exams. Using an AI score of 2 as threshold yields a workload reduction of 17% while only excluding 1% of true-positive exams. Pre-selection did not change the average AUC of radiologists (inferior 95% CI > - 0.05) for any threshold except at the extreme AI score of 9. It is possible to automatically pre-select exams using AI to significantly reduce the breast cancer screening reading workload. * There is potential to use artificial intelligence to automatically reduce the breast cancer screening reading workload by excluding exams with a low likelihood of cancer. * The exclusion of exams with the lowest likelihood of cancer in screening might not change radiologists' breast cancer detection performance. * When excluding exams with the lowest likelihood of cancer, the decrease in true-positive recalls would be balanced by a simultaneous reduction in false-positive recalls.}, + file = {Rodr19.pdf:pdf\\Rodr19.pdf:PDF}, + optnote = {AXTI, DIAG, RADIOLOGY}, + pmid = {30993432}, + ss_id = {1098db3a130ba082c6695a326b70d88dac71f27a}, + all_ss_ids = {['1098db3a130ba082c6695a326b70d88dac71f27a']}, + gscites = {120}, +} + +@inproceedings{Roel03, + author = {T. A. A. J. Roelofs and S. van Woudenberg and J. H. C. L. Hendriks and N. Karssemeijer}, + title = {Optimized soft-copy display of digitized mammograms}, + booktitle = MI, + year = {2003}, + volume = {5034}, + series = SPIE, + pages = {10-19}, + doi = {10.1117/12.479981}, + url = {http://link.aip.org/link/?PSI/5034/10/1}, + abstract = {Digitization and CRT display reduce sharpness of mammograms. To ensure image quality on a CRT, comparable to the quality of original films, a modified unsharp-masking (USM) algorithm is proposed to correct for this reduction. This study evaluates the clinical value of this algorithm and determines the optimal setting of its parameters. Eight complete mammographic cases were processed by a modified USM algorithm with 19 settings for three parameters, resulting in 152 stimuli. All cases showed a clearly visible mass; five also contained microcalcifications. The modification of the standard USM algorithm consisted of selectively improving low contrasts. Moreover, the USM enhancement was made grey value dependent to avoid clipping. Four experienced screening radiologists and four physicists (having experience with mammography imaging) rated all mammograms on a 1-10 point scale, according to image quality and suitability for diagnosis. The images were randomly presented. Before the experiment started, a subset of the images was shown to familiarize the observers to the range of images and parameter settings. For a contrast enhancement factor of about 0.4, the processed mammograms appeared to be significantly better than the original digitized mammograms (P<.001). Differences in the results for the radiologists and the physicists were small.}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {8711232220331594389}, + gscites = {5}, + ss_id = {ae276654985fe5b01288625048662340310f37e5}, + all_ss_ids = {['ae276654985fe5b01288625048662340310f37e5']}, +} + +@inproceedings{Roel04, + author = {A.A. Roelofs and S. van Woudenberg and J.H. Hendriks and C.J. Evertsz and N. Karssemeijer}, + title = {Effects of computer-aided diagnosis on radiologists' detection of breast masses}, + booktitle = {IWDM '04: Proceedings of the 7th International Workshop on Digital Mammography}, + year = {2004}, + pages = {219--224}, + journal = DIGMAM, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Roel06, + author = {A. A. J. Roelofs and S. van Woudenberg and J. D. M. Otten and J. H. C. L. Hendriks and A. B\"odicker and C. J. G. Evertsz and N. Karssemeijer}, + title = {Effect of soft-copy display supported by {CAD} on mammography screening performance}, + journal = ER, + year = {2006}, + volume = {16}, + pages = {45--52}, + doi = {10.1007/s00330-005-2878-7}, + abstract = {{D}iagnostic performance and reading speed for conventional mammography film reading is compared to reading digitized mammograms on a dedicated workstation. {A} series of mammograms judged negative at screening and corresponding priors were collected. {H}alf were diagnosed as cancer at the next screening, or earlier for interval cancers. {T}he others were normal. {O}riginal films were read by fifteen experienced screening radiologists. {T}he readers annotated potential abnormalities and estimated their likelihood of malignancy. {M}ore than 1 year later, five radiologists reread a subset of 271 cases (88 cancer cases having visible signs in retrospect and 183 normals) on a mammography workstation after film digitization. {M}arkers from a computer-aided detection ({CAD}) system for microcalcifications were available to the readers. {P}erformance was evaluated by comparison of {A}z-scores based on {ROC} and multiple-{R}eader multiple-case ({MRMC}) analysis, and localized receiver operating characteristic ({LROC}) analysis for the 271 cases. {R}eading speed was also determined. {N}o significant difference in diagnostic performance was observed between conventional and soft-copy reading. {A}verage {A}z-scores were 0.83 and 0.84 respectively. {S}oft-copy reading was only slightly slower than conventional reading. {U}sing a mammography workstation including {CAD} for detection of microcalcifications, soft-copy reading is possible without loss of quality or efficiency.}, + file = {Roel06.pdf:pdf\\Roel06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {16132926}, + month = {8}, + gsid = {16706916398824875552}, + gscites = {18}, + ss_id = {980eb600637d3f07c103029a76bc20e5b005f9eb}, + all_ss_ids = {['980eb600637d3f07c103029a76bc20e5b005f9eb']}, +} + +@article{Roel07, + author = {A. A. J. Roelofs and N. Karssemeijer and N. Wedekind and C. Beck and S. van Woudenberg and P. R. Snoeren and J. H. C. L. Hendriks and M. Rosselli del Turco and N. Bjurstam and H. Junkermann and D. Beijerinck and B. S\'{e}radour and C. J. G. Evertsz}, + title = {Importance of comparison of current and prior mammograms in breast cancer screening}, + journal = Radiology, + year = {2007}, + volume = {242}, + pages = {70--77}, + doi = {10.1148/radiol.2421050684}, + file = {Roel07.pdf:pdf\\Roel07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {17185661}, + month = {1}, + gsid = {15832914948995714833}, + gscites = {83}, + ss_id = {0f24ca9412671bc21bb3d62c765837877bb19e2d}, + all_ss_ids = {['0f24ca9412671bc21bb3d62c765837877bb19e2d']}, +} + +@article{Roel12, + author = {Roelofsen, Thijs and van Kempen, Leon C. L. T. and van der Laak, Jeroen A. W. M. and van Ham, Maaike A. and Bulten, Johan and Massuger, Leon F. A. G.}, + title = {Concurrent Endometrial Intraepithelial Carcinoma {(EIC)} and Serous Ovarian Cancer. Can {EIC} Be Seen as the Precursor Lesion?}, + doi = {10.1097/IGC.0b013e3182434a81}, + number = {3}, + pages = {457-464}, + volume = {22}, + abstract = {Objective: The pathogenesis of serous ovarian carcinoma (SOC) is still unknown. Recently, endometrial intraepithelial carcinoma (EIC) was proposed to be the precursor lesion of SOC. This study examines the model of EIC as precursor for SOC. Methods: Cases of SOC with a noninvasive or superficially invasive serous lesion, a hyperplastic lesion with/without atypia, or EIC in the endometrium were selected for inclusion in this study. Tissue sections from both ovaries, the fallopian tubes, and the uterus were extensively reviewed by an expert gynecopathologist. For both EIC and SOC, immunostaining for p53, Ki-67, estrogen receptor, and progesterone receptor; TP53 mutation analysis; and in situ ploidy analysis were performed. Results: Nine cases of SOC with concurrent EIC in the endometrium were identified. Immunostaining for p53, Ki-67, estrogen receptor, and progesterone receptor revealed almost identical expression patterns and similar intensities in each pair of EIC and coincidentSOC.IdenticalTP53mutationswerefoundinSOCandcoincidingEICin33%ofthe cases, suggesting a clonal origin. DNA ploidy analysis, as a marker for neoplastic progression, demonstrated an increased number of aneuploid nuclei in SOC compared to their corresponding EIC (P = 0.039). In addition, the mean amount of DNA per nucleus in SOC was higher (ie, more aneuploid) compared to EIC (P = 0.039). Conclusion: This study provides a first indication of EIC as possible precursor lesion for SOC. This finding could have major clinical implications for future ovarian cancer management and underscores EIC as a possible target for early SOC detection and prevention.}, + file = {Roel12.pdf:pdf\\Roel12.pdf:PDF}, + journal = IJGYNCAN, + month = {3}, + optnote = {DIAG}, + year = {2012}, + gsid = {342965923672756254}, + gscites = {31}, + ss_id = {6beb86f0b9019f6de377b970b6e22feba507c190}, + all_ss_ids = {['6beb86f0b9019f6de377b970b6e22feba507c190']}, +} + +@conference{Roest22a, + author = {Christian Roest and Thomas C. Kwee and Anindo Saha and Jurgen Futterer and Derya Yakar and Henkjan Huisman}, + booktitle = ECR, + title = {AI-Assisted Biparametric MRI Surveillance of Prostate Cancer: Feasibility Study}, + abstract = {PURPOSE: To evaluate the feasibility of automatic longitudinal analysis of consecutive biparametric MRI (bpMRI) scans to detect clinically significant (cs) prostate cancer (PCa). METHODS AND MATERIALS: This retrospective study included a multi-center dataset of 1539 patients who underwent bpMRI (T2 + DWI) between 2014--2020, of whom 105 patients underwent at least two consecutive bpMRI before biopsy without pathologically confirmed csPCa prior to follow-up. A deep learning prostate cancer detection model was developed and trained to produce a heatmap of all PIRADS>=2 lesions across baseline and current studies. The aligned heatmaps for each patient's baseline and current examination were used to extract differential volumetric and likelihood features reflecting explainable changes between examinations. A logistic classifier was trained to predict from these features csPCa (ISUP>1) at the time of the current examination according to biopsy. A model trained on the current study only was developed for comparison. An extended model was developed incorporating clinical parameters (PSA density and age). Cross-validation was performed to assess the detection performance of the models on unseen data. The diagnostic performance of the best model was compared to the radiologist scores. Diagnostic accuracies are compared using likelihood ratio tests and ROC analysis. RESULTS: The model including baseline and current study (AUC 0.73 CI: 0.49 0.89) performed better than the current only model (AUC 0.70 CI: 0.42 0.86), and significantly (P=0.002) improved fit. Adding clinical variables further improved diagnostic performance (AUC 0.79 CI: 0.60 0.94). The extended surveillance model's performance was comparable to that of the radiologist (AUC 0.69 CI: 0.52 0.86). CONCLUSIONS: Our proposed AI-assisted surveillance of prostate MRI can pick up explainable, diagnostically relevant changes with promising diagnostic accuracy. CLINICAL RELEVANCE: Research on artificial intelligence that integrates longitudinal information from follow-up prostate MRI is lacking.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@article{Roest22b, + author = {Christian Roest and Thomas C. Kwee and Anindo Saha and Jurgen Futterer and Derya Yakar and Henkjan Huisman}, + booktitle = ER, + doi = {10.1007/s00330-022-09032-7}, + title = {AI-Assisted Biparametric MRI Surveillance of Prostate Cancer: Feasibility Study}, + abstract = {OBJECTIVES: To evaluate the feasibility of automatic longitudinal analysis of consecutive biparametric MRI (bpMRI) scans to detect clinically significant (cs) prostate cancer (PCa). METHODS: This retrospective study included a multi-center dataset of 1513 patients who underwent bpMRI (T2 + DWI) between 2014 and 2020, of whom 73 patients underwent at least two consecutive bpMRI scans and repeat biopsies. A deep learning PCa detection model was developed to produce a heatmap of all PIRADS >= 2 lesions across prior and current studies. The heatmaps for each patient's prior and current examination were used to extract differential volumetric and likelihood features reflecting explainable changes between examinations. A machine learning classifier was trained to predict fromthese features csPCa (ISUP > 1) at the current examination according to biopsy. A classifier trained on the current study only was developed for comparison. An extended classifier was developed to incorporate clinical parameters (PSA, PSA density, and age). The cross-validated diagnostic accuracies were compared using ROC analysis. The diagnostic performance of the best model was compared to the radiologist scores. RESULTS: The model including prior and current study (AUC 0.81, CI: 0.69, 0.91) resulted in a higher (p = 0.04) diagnostic accuracy than the current only model (AUC 0.73, CI: 0.61, 0.84). Adding clinical variables further improved diagnostic performance (AUC 0.86, CI: 0.77, 0.93). The diagnostic performance of the surveillance AI model was significantly better (p = 0.02) than of radiologists (AUC 0.69, CI: 0.54, 0.81). Conclusions Our proposed AI-assisted surveillance of prostate MRI can pick up explainable, diagnostically relevant changes with promising diagnostic accuracy.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, + ss_id = {0b10a3e57da548c641e38f7ad02937e6bd50203d}, + all_ss_ids = {['0b10a3e57da548c641e38f7ad02937e6bd50203d']}, + gscites = {7}, +} + +@article{Roll09, + author = {E. Rollano-Hijarrubia and R. Manniesing and W.J. Niessen}, + title = {Selective deblurring for improved calcification visualization and quantification in carotid {CT} angiography: validation using micro-{CT}}, + journal = TMI, + year = {2009}, + volume = {28}, + pages = {446--453}, + doi = {10.1109/TMI.2008.2006529}, + abstract = {Visualization and quantification of small structures with computed tomography (CT) is hampered by the limited spatial resolution of the system. Histogram-based selective deblurring (HiSD) is a deconvolution method that restores small high-density structures, i.e., calcifications, of a CT image, using the high-intensity voxel information of the deconvolved image, while preserving the original hounsfield Units (HUs) in the remaining tissues. In this study, high resolution micro-CT data are used to validate the potential of HiSD to improve calcium visualization and quantification in the carotid arteries on in vivo contrast-enhanced CTA data. The evaluation is performed qualitatively and quantitatively on 15 atherosclerotic plaques obtained from ten different patients. HiSD in combination with vessel segmentation significantly improves calcification visualization and quantification on in vivo contrast-enhanced CT images. Calcification blur is reduced, while avoiding noise amplification and edge-ringing artifacts in the surrounding tissues. Calcification quantification errors are reduced by 23.5\% on average.}, + file = {Roll09.pdf:pdf\\Roll09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {19244016}, + month = {3}, + gsid = {17842968710583635884}, + gscites = {20}, +} + +@article{Roo22, + author = {de Roo, Saskia F. and Teunissen, Joris S. and Rutten, Matthieu J. C. M. and van der Heijden, Brigitte E. P. A.}, + title = {Tourniquet Does Not Affect Long-term Outcomes in Minor Hand Surgery: A Randomized Controlled Trial}, + doi = {10.1097/gox.0000000000004495}, + year = {2022}, + abstract = { + Background: + Surgeons often prefer to use a tourniquet during minor procedures, such as carpal tunnel release (CTR) or trigger finger release (TFR). Besides the possible discomfort for the patient, the effect of tourniquet use on long-term results and complications is unknown. Our primary aim was to compare the patient-reported outcomes 1 year after CTR or TFR under local anesthesia with or without tourniquet. Secondary outcomes included satisfaction, sonographically estimated scar tissue thickness after CTR, and postoperative complications. + + + Methods: + Between May 2019 and May 2020, 163 patients planned for open CTR or TFR under local anesthesia were included. Before surgery, and at 3, 6, and 12 months postoperatively, Quick Disabilities of the Arm, Shoulder and Hand and Boston Carpal Tunnel questionnaires were administered, and complications were noted. At 6 months postoperatively, an ultrasound was conducted to determine the thickness of scar tissue in the region of median nerve. + + + Results: + A total of 142 patients (51 men [38%]) were included. The Quick Disabilities of the Arm, Shoulder and Hand questionnaire and Boston Carpal Tunnel Questionnaire scores improved significantly in both groups during follow-up, wherein most improvements were seen in the first 3 months. No difference in clinical outcome and scar tissue formation was found between the two groups after 12 months. The complication rate was comparable between both groups. Thirty-two (24%) patients had at least one complication, none needed surgical interventions, and no recurrent symptoms were seen. + + + Conclusions: + Our study shows similar long-term clinical outcomes, formation of scar tissue, and complication rates for patients undergoing CTR or TFR with or without a tourniquet. Tourniquet usage should be based on shared decision-making. + }, + url = {http://dx.doi.org/10.1097/GOX.0000000000004495}, + file = {Roo22.pdf:pdf\Roo22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Plastic and Reconstructive Surgery - Global Open}, + citation-count = {0}, + automatic = {yes}, + pages = {e4495}, + volume = {10}, +} + +@article{Rooi94, + author = {de Rooij, T. P. and Oestmann, J. W. and Schultze Kool, L. J. and Vrooman, H. A. and Prokop, M. and Schaefer, C. M.}, + title = {Application of AMBER in single- and dual-energy digital imaging: improvement in noise level and display dynamic range}, + journal = Radiographics, + year = {1994}, + volume = {14}, + pages = {407--414}, + abstract = {The combination of computed radiography (CR) and advanced multiple beam equalization radiography (AMBER) was evaluated for both single- and dual-energy chest radiography. The improved signal-to-noise ratio found with CR and AMBER resulted in a better visualization of structures in the mediastinum and basal lung than that found with CR alone. For the central lung, no improvement was seen. Because of the compressed dynamic range with CR and AMBER, contrast on hard copies and video monitors could be high without a sacrifice in image latitude. Dual-energy images showed a considerably lower noise level. The combined use of AMBER and CR promises to overcome the dynamic range limitations of digital displays while improving signal-to-noise ratio.}, + optnote = {DIAG}, + number = {2}, + pmid = {8190963}, + month = {3}, + gsid = {8731853798187128339}, + gscites = {2}, +} + +@article{Rose11, + author = {Andrew B Rosenkrantz and Marcel Oei and James S Babb and Benjamin E Niver and Bachir Taouli}, + title = {Diffusion-weighted imaging of the abdomen at 3.0 Tesla: image quality and apparent diffusion coefficient reproducibility compared with 1.5 Tesla}, + journal = JMRI, + year = {2011}, + volume = {33}, + pages = {128--135}, + doi = {10.1002/jmri.22395}, + abstract = {To compare single-shot echo-planar imaging (SS EPI) diffusion-weighted MRI (DWI) of abdominal organs between 1.5 Tesla (T) and 3.0T in healthy volunteers in terms of image quality, apparent diffusion coefficient (ADC) values, and ADC reproducibility. Eight healthy volunteers were prospectively imaged in this HIPAA-compliant IRB-approved study. Each subject underwent two consecutive scans at both 1.5 and 3.0T, which included breathhold and free-breathing DWI using a wide range of b-values (0 to 800 s/mm?). A blinded observer rated subjective image quality (maximum score= 8), and a separate observer placed regions of interest within the liver, renal cortices, pancreas, and spleen to measure ADC at each field strength. Paired Wilcoxon tests were used to compare abdominal DWI between 1.5T and 3.0T for specific combinations of organs, b-values, and acquisition techniques.Subjective image quality was significantly lower at 3.0T for all comparisons (P = 0.0078- 0.0156). ADC values were similar at 1.5T and 3.0T for all assessed organs, except for lower liver ADC at 3.0T using b0-500-600 and breathhold technique. ADC reproducibility was moderate at both 1.5T and 3.0T, with no significant difference in coefficient of variation of ADC between field strengths.Compared with 1.5T, SS EPI at 3.0T provided generally similar ADC values, however, with worse image quality. Further optimization of abdominal DWI at 3.0T is needed.}, + file = {Rose11.pdf:pdf\\Rose11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {21182130}, + month = {12}, +} + +@article{Ross20, + author = {Rossi, Alberto and Hosseinzadeh, Matin and Bianchini, Monica and Scarselli, Franco and Huisman, Henkjan}, + title = {Multi-modal siamese network for diagnostically similar lesion retrieval in prostate {MRI}}, + abstract = {Multi-parametric prostate MRI (mpMRI) is a powerful tool to diagnose prostate cancer, though difficult to interpret even for experienced radiologists. A common radiological procedure is to compare a magnetic resonance image with similarly diagnosed cases. To assist the radiological image interpretation process, computerized Content-Based Image Retrieval systems (CBIRs) can therefore be employed to improve the reporting workflow and increase its accuracy. In this paper, we propose a new, supervised siamese deep learning architecture able to handle multi-modal and multi-view MR images with similar PIRADS score. An experimental comparison with well-established deep learning-based CBIRs (namely standard siamese networks and autoencoders) showed significantly improved performance with respect to both diagnostic (ROC-AUC), and information retrieval metrics (Precision-Recall, Discounted Cumulative Gain and Mean Average Precision). Finally, the new proposed multi-view siamese network is general in design, facilitating a broad use in diagnostic medical imaging retrieval.}, + file = {Ross20.pdf:pdf\\Ross20.pdf:PDF}, + journal = TMI, + doi = {10.1109/TMI.2020.3043641}, + pmid = {33296302}, + year = {2020}, + ss_id = {d89bbd9b8c45e37b4e874fd1fb0690b1af338c29}, + all_ss_ids = {['d89bbd9b8c45e37b4e874fd1fb0690b1af338c29']}, + gscites = {14}, +} + +@article{Roth22, + author = {Roth, Holger R. and Xu, Ziyue and Tor-D\'{i}ez, Carlos and Sanchez Jacob, Ramon and Zember, Jonathan and Molto, Jose and Li, Wenqi and Xu, Sheng and Turkbey, Baris and Turkbey, Evrim and Yang, Dong and Harouni, Ahmed and Rieke, Nicola and Hu, Shishuai and Isensee, Fabian and Tang, Claire and Yu, Qinji and S\"{o}lter, Jan and Zheng, Tong and Liauchuk, Vitali and Zhou, Ziqi and Moltz, Jan Hendrik and Oliveira, Bruno and Xia, Yong and Maier-Hein, Klaus H. and Li, Qikai and Husch, Andreas and Zhang, Luyang and Kovalev, Vassili and Kang, Li and Hering, Alessa and Vila\c{c}a, Jo\~{a}o L. and Flores, Mona and Xu, Daguang and Wood, Bradford and Linguraru, Marius George}, + title = {Rapid artificial intelligence solutions in a pandemic--The COVID-19-20 Lung CT Lesion Segmentation Challenge}, + doi = {10.1016/j.media.2022.102605}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.media.2022.102605}, + file = {Roth22.pdf:pdf\Roth22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Image Analysis}, + citation-count = {8}, + automatic = {yes}, + pages = {102605}, + volume = {82}, +} + +@article{Roth22, + author = {Roth, Holger R. and Xu, Ziyue and Tor-D\'{i}ez, Carlos and Sanchez Jacob, Ramon and Zember, Jonathan and Molto, Jose and Li, Wenqi and Xu, Sheng and Turkbey, Baris and Turkbey, Evrim and Yang, Dong and Harouni, Ahmed and Rieke, Nicola and Hu, Shishuai and Isensee, Fabian and Tang, Claire and Yu, Qinji and S\"{o}lter, Jan and Zheng, Tong and Liauchuk, Vitali and Zhou, Ziqi and Moltz, Jan Hendrik and Oliveira, Bruno and Xia, Yong and Maier-Hein, Klaus H. and Li, Qikai and Husch, Andreas and Zhang, Luyang and Kovalev, Vassili and Kang, Li and Hering, Alessa and Vila\c{c}a, Jo\~{a}o L. and Flores, Mona and Xu, Daguang and Wood, Bradford and Linguraru, Marius George}, + title = {Rapid artificial intelligence solutions in a pandemic--The COVID-19-20 Lung CT Lesion Segmentation Challenge}, + doi = {10.1016/j.media.2022.102605}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.media.2022.102605}, + file = {Roth22.pdf:pdf\Roth22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Image Analysis}, + citation-count = {8}, + automatic = {yes}, + pages = {102605}, + volume = {82}, +} + +@article{Rudy14, + author = {Rina D. Rudyanto and Sjoerd Kerkstra and Eva M. van Rikxoort and Catalin Fetita and Pierre-Yves Brillet and Christophe Lefevre and Wenzhe Xue and Xiangjun Zhu and Jianming Liang and Ilkay \"Oks\"uz and Devrim \"Unay and Kamuran Kadipasaoglu and Ra\'{u}l San Jos\'{e} Est\'{e}par and James C. Ross and George R. Washko and Juan-Carlos Prieto and Marcela Hern\'{a}ndez Hoyos and Maciej Orkisz and Hans Meine and Markus H\"ullebrand and Christina St\"ocker and Fernando Lopez Mir and Valery Naranjo and Eliseo Villanueva and Marius Staring and Changyan Xiao and Berend C. Stoel and Anna Fabijanska and Erik Smistad and Anne C. Elster and Frank Lindseth and Amir Hossein Foruzan and Ryan Kiros and Karteek Popuri and Dana Cobzas and Daniel Jimenez-Carretero and Andres Santos and Maria J. Ledesma-Carbayo and Michael Helmberger and Martin Urschler and Michael Pienn and Dennis G.H. Bosboom and Arantza Campo and Mathias Prokop and Pim A. de Jong and Carlos Ortiz-de-Solorzano and Arrate Mu{\~n}oz-Barrutia and Bram van Ginneken}, + title = {Comparing algorithms for automated vessel segmentation in computed tomography scans of the lung: The {VESSEL12} study}, + journal = MIA, + year = {2014}, + volume = {18}, + pages = {1217--1232}, + doi = {10.1016/j.media.2014.07.003}, + abstract = {The {VESSEL12} ({VES}sel {SE}mentation in the {L}ung) challenge objectively compares the performance of different algorithms to identify vessels in thoracic computed tomography ({CT}) scans. Vessel segmentation is fundamental in computer aided processing of data generated by 3D imaging modalities. As manual vessel segmentation is prohibitively time consuming, any real world application requires some form of automation. Several approaches exist for automated vessel segmentation, but judging their relative merits is difficult due to a lack of standardized evaluation. We present an annotated reference dataset containing 20 {CT} scans and propose nine categories to perform a comprehensive evaluation of vessel segmentation algorithms from both academia and industry. Twenty algorithms participated in the {VESSEL12} challenge, held at International Symposium on Biomedical Imaging ({ISBI}) 2012. All results have been published at the {VESSEL12} website http://{VESSEL12}.grand-challenge.org. The challenge remains ongoing and open to new participants. Our three contributions are: (1) an annotated reference dataset available online for evaluation of new algorithms; (2) a quantitative scoring system for objective comparison of algorithms; and (3) performance analysis of the strengths and weaknesses of the various vessel segmentation methods in the presence of various lung diseases.}, + file = {Rudy14.pdf:pdf\\Rudy14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {25113321}, + publisher = {Elsevier BV}, + month = {10}, + gsid = {5886181063228782}, + gscites = {104}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/137671}, +} + +@inproceedings{Ruh19, + author = {Ruhe, David and Codreanu, Valeriu and van Leeuwen, Caspar and Podareanu, Damian and Saletore, Vikram and Teuwen, Jonas}, + title = {{Generating CT-scans with 3D Generative Adversarial Networks Using a Supercomputer}}, + booktitle = {Medical Imaging meets NeurIPS}, + year = {2019}, + abstract = {In this work we set a baseline for using Generative Adversarial Networks (GAN) togenerate 3D medical images. Specifically, we trained a GAN to generate complete3D chest computed tomography (CT) scans up to 512x512x128 volumes. Most notably this is motivated by the purpose of anonymizing data for easier sharing and applications for low-dosage radiotherapy planning. Because of the memory and compute requirements, the model was trained on a CPU-based supercomputer. End-to-end model training took +-8 days. Additionally, we propose two metrics for large-resolution 3D volume comparison. In this paper, we report first results using these metrics, hoping to be bettered inthe future.To our knowledge, this is the largest-scale experiment of generative models so far,and the first to generate full-resolution complete 3D CT scans.}, + optnote = {DIAG, RADIOLOGY}, +} + +@phdthesis{Ruiz19, + author = {Alejandro Rodriguez Ruiz}, + title = {Artificial intelligence \& tomosynthesis for breast cancer detection}, + year = {2019}, + url = {https://repository.ubn.ru.nl/handle/2066/201151}, + abstract = {Breast cancer is the most common cancer in women, with almost 2 million new cases diagnosed every year around the globe. However, despite important improvements in awareness, detection, diagnosis, and treatment, breast cancer is still a major cause of mortality, accounting for approximately 500,000 annual deaths worldwide. Breast cancer mortality has decreased in the past decades, primarily because of the introduction of population-based screening programs with mammography and improvements in therapy. By imaging asymptomatic women periodically, breast cancer can be detected early, improving prognosis. However, these screening programs are far from perfect. The use of mammography -a 2D technique- to image the 3D volume of the breast leads to cancers being missed and for many false positive assessments. Another issue is the current labor-intensive screening workflow, in which radiologists must assess millions of exams yearly, of which actually only less than 1% result in a cancer diagnosis. This screening process is heavily threatened by the increasing scarcity of radiologists.}, + copromotor = {I. Sechopoulos and R. Mann}, + file = {:pdf/Ruiz19.pdf:PDF;:png/publications/Ruiz19 - Artificial Intelligence & Tomosynthesis for Breast Cancer Detection.png:PNG image}, + optnote = {AXTI, DIAG, RADIOLOGY}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Rutg21, + author = {Rutgers, Jikke J. and B\'{a}nki, Tessa and van der Kamp, Ananda and Waterlander, Tomas J. and Scheijde-Vermeulen, Marijn A. and van den Heuvel-Eibrink, Marry M. and van der Laak, Jeroen A. W. M. and Fiocco, Marta and Mavinkurve-Groothuis, Annelies M. C. and de Krijger, Ronald R.}, + title = {Interobserver variability between experienced and inexperienced observers in the histopathological analysis of Wilms tumors: a pilot study for future algorithmic approach}, + doi = {10.1186/s13000-021-01136-w}, + year = {2021}, + abstract = {Abstract + Background + Histopathological classification of Wilms tumors determines treatment regimen. Machine learning has been shown to contribute to histopathological classification in various malignancies but requires large numbers of manually annotated images and thus specific pathological knowledge. This study aimed to assess whether trained, inexperienced observers could contribute to reliable annotation of Wilms tumor components for classification performed by machine learning. + + Methods + Four inexperienced observers (medical students) were trained in histopathology of normal kidneys and Wilms tumors by an experienced observer (pediatric pathologist). Twenty randomly selected scanned Wilms tumor-slides (from n = 1472 slides) were annotated, and annotations were independently classified by both the inexperienced observers and two experienced pediatric pathologists. Agreement between the six observers and for each tissue element was measured using kappa statistics (k). + + Results + Pairwise interobserver agreement between all inexperienced and experienced observers was high (range: 0.845-0.950). The interobserver variability for the different histological elements, including all vital tumor components and therapy-related effects, showed high values for all k-coefficients (> 0.827). + + Conclusions + Inexperienced observers can be trained to recognize specific histopathological tumor and tissue elements with high interobserver agreement with experienced observers. Nevertheless, supervision by experienced pathologists remains necessary. Results of this study can be used to facilitate more rapid progress for supervised machine learning-based algorithm development in pediatric pathology and beyond. + }, + url = {http://dx.doi.org/10.1186/s13000-021-01136-w}, + file = {Rutg21.pdf:pdf\Rutg21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Diagnostic Pathology}, + citation-count = {3}, + automatic = {yes}, + volume = {16}, +} + +@article{Saad19, + author = {Saadatmand, Sepideh and Geuzinge, H Amarens and Rutgers, Emiel J T and Mann, Ritse M and de Roy van Zuidewijn, Diderick B W and Zonderland, Harmien M and Tollenaar, Rob A E M and Lobbes, Marc B I and Ausems, Margreet G E M and van 't Riet, Martijne and Hooning, Maartje J and Mares-Engelberts, Ingeborg and Luiten, Ernest J T and Heijnsdijk, Eveline A M and Verhoef, Cees and Karssemeijer, Nico and Oosterwijk, Jan C and Obdeijn, Inge-Marie and de Koning, Harry J and Tilanus-Linthorst, Madeleine M A and FaMRIsc study group}, + title = {MRI versus mammography for breast cancer screening in women with familial risk (FaMRIsc): a multicentre, randomised, controlled trial}, + journal = LANCETO, + year = {2019}, + volume = {20}, + issue = {8}, + month = {6}, + pages = {1136-1147}, + doi = {10.1016/S1470-2045(19)30275-X}, + abstract = {Approximately 15% of all breast cancers occur in women with a family history of breast cancer, but for whom no causative hereditary gene mutation has been found. Screening guidelines for women with familial risk of breast cancer differ between countries. We did a randomised controlled trial (FaMRIsc) to compare MRI screening with mammography in women with familial risk. In this multicentre, randomised, controlled trial done in 12 hospitals in the Netherlands, women were eligible to participate if they were aged 30-55 years and had a cumulative lifetime breast cancer risk of at least 20% because of a familial predisposition, but were BRCA1, BRCA2, and TP53 wild-type. Participants who were breast-feeding, pregnant, had a previous breast cancer screen, or had a previous a diagnosis of ductal carcinoma in situ were eligible, but those with a previously diagnosed invasive carcinoma were excluded. Participants were randomly allocated (1:1) to receive either annual MRI and clinical breast examination plus biennial mammography (MRI group) or annual mammography and clinical breast examination (mammography group). Randomisation was done via a web-based system and stratified by centre. Women who did not provide consent for randomisation could give consent for registration if they followed either the mammography group protocol or the MRI group protocol in a joint decision with their physician. Results from the registration group were only used in the analyses stratified by breast density. Primary outcomes were number, size, and nodal status of detected breast cancers. Analyses were done by intention to treat. This trial is registered with the Netherlands Trial Register, number NL2661. Between Jan 1, 2011, and Dec 31, 2017, 1355 women provided consent for randomisation and 231 for registration. 675 of 1355 women were randomly allocated to the MRI group and 680 to the mammography group. 218 of 231 women opting to be in a registration group were in the mammography registration group and 13 were in the MRI registration group. The mean number of screening rounds per woman was 4*3 (SD 1*76). More breast cancers were detected in the MRI group than in the mammography group (40 vs 15; p=0*0017). Invasive cancers (24 in the MRI group and eight in the mammography group) were smaller in the MRI group than in the mammography group (median size 9 mm [5-14] vs 17 mm [13-22]; p=0*010) and less frequently node positive (four [17%] of 24 vs five [63%] of eight; p=0*023). Tumour stages of the cancers detected at incident rounds were significantly earlier in the MRI group (12 [48%] of 25 in the MRI group vs one [7%] of 15 in the mammography group were stage T1a and T1b cancers; one (4%) of 25 in the MRI group and two (13%) of 15 in the mammography group were stage T2 or higher; p=0*035) and node-positive tumours were less frequent (two [11%] of 18 in the MRI group vs five [63%] of eight in the mammography group; p=0*014). All seven tumours stage T2 or higher were in the two highest breast density categories (breast imaging reporting and data system categories C and D; p=0*0077) One patient died from breast cancer during follow-up (mammography registration group). MRI screening detected cancers at an earlier stage than mammography. The lower number of late-stage cancers identified in incident rounds might reduce the use of adjuvant chemotherapy and decrease breast cancer-related mortality. However, the advantages of the MRI screening approach might be at the cost of more false-positive results, especially at high breast density. Dutch Government ZonMw, Dutch Cancer Society, A Sister's Hope, Pink Ribbon, Stichting Coolsingel, J&T Rijke Stichting.}, + file = {Saad19.pdf:pdf\\Saad19.pdf:PDF}, + optnote = {DIAG}, + pmid = {31221620}, + gsid = {15260137744364653571}, + gscites = {19}, + ss_id = {7cbf61f2d7b1fae068e3b815ebcb09e09555cd50}, + all_ss_ids = {['7cbf61f2d7b1fae068e3b815ebcb09e09555cd50']}, +} + +@article{Sadr23, + author = {Sadr, Soroush and Mohammad-Rahimi, Hossein and Motamedian, Saeed Reza and Zahedrozegar, Samira and Motie, Parisa and Vinayahalingam, Shankeeth and Dianat, Omid and Nosrat, Ali}, + title = {Deep Learning for Detection of Periapical Radiolucent Lesions: A Systematic Review and Meta-analysis of Diagnostic Test Accuracy.}, + doi = {10.1016/j.joen.2022.12.007}, + issue = {3}, + pages = {248--261.e3}, + volume = {49}, + abstract = {The aim of this systematic review and meta-analysis was to investigate the overall accuracy of deep learning models in detecting periapical (PA) radiolucent lesions in dental radiographs, when compared to expert clinicians. Electronic databases of Medline (via PubMed), Embase (via Ovid), Scopus, Google Scholar, and arXiv were searched. Quality of eligible studies was assessed by using Quality Assessment and Diagnostic Accuracy Tool-2. Quantitative analyses were conducted using hierarchical logistic regression for meta-analyses on diagnostic accuracy. Subgroup analyses on different image modalities (PA radiographs, panoramic radiographs, and cone beam computed tomographic images) and on different deep learning tasks (classification, segmentation, object detection) were conducted. Certainty of evidence was assessed by using Grading of Recommendations Assessment, Development, and Evaluation system. A total of 932 studies were screened. Eighteen studies were included in the systematic review, out of which 6 studies were selected for quantitative analyses. Six studies had low risk of bias. Twelve studies had risk of bias. Pooled sensitivity, specificity, positive likelihood ratio, negative likelihood ratio, and diagnostic odds ratio of included studies (all image modalities; all tasks) were 0.925 (95% confidence interval [CI], 0.862-0.960), 0.852 (95% CI, 0.810-0.885), 6.261 (95% CI, 4.717-8.311), 0.087 (95% CI, 0.045-0.168), and 71.692 (95% CI, 29.957-171.565), respectively. No publication bias was detected (Egger's test, P = .82). Grading of Recommendations Assessment, Development and Evaluationshowed a "high" certainty of evidence for the studies included in the meta-analyses. Compared to expert clinicians, deep learning showed highly accurate results in detecting PA radiolucent lesions in dental radiographs. Most studies had risk of bias. There was a lack of prospective studies.}, + file = {Sadr23.pdf:pdf\\Sadr23.pdf:PDF}, + journal = {Journal of endodontics}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36563779}, + year = {2023}, +} + +@inproceedings{Saha20a, + author = {Anindo Saha and Fakrul Tushar and Khrystyna Faryna and Vincent D'Anniballe and Rui Hou and Maciej Mazurowski and Geoffrey Rubin and Joseph Lo}, + title = {Weakly Supervised 3D Classification of Chest CT using Aggregated Multi-Resolution Deep Segmentation Features}, + booktitle = MI, + series = SPIE, + year = {2020}, + volume = {11314}, + pages = {39 -- 44}, + url = {https://arxiv.org/abs/2011.00149}, + doi = {10.1117/12.2550857}, + abstract = {Weakly supervised disease classification of CT imaging suffers from poor localization owing to case-level annotations, where even a positive scan can hold hundreds to thousands of negative slices along multiple planes. Furthermore, although deep learning segmentation and classification models extract distinctly unique combinations of anatomical features from the same target class(es), they are typically seen as two independent processes in a computer-aided diagnosis (CAD) pipeline, with little to no feature reuse. In this research, we propose a medical classifier that leverages the semantic structural concepts learned via multi-resolution segmentation feature maps, to guide weakly supervised 3D classification of chest CT volumes. Additionally, a comparative analysis is drawn across two different types of feature aggregation to explore the vast possibilities surrounding feature fusion. Using a dataset of 1593 scans labeled on a case-level basis via rule-based model, we train a dual-stage convolutional neural network (CNN) to perform organ segmentation and binary classification of four representative diseases (emphysema, pneumonia/atelectasis, mass and nodules) in lungs. The baseline model, with separate stages for segmentation and classification, results in AUC of 0.791. Using identical hyperparameters, the connected architecture using static and dynamic feature aggregation improves performance to AUC of 0.832 and 0.851, respectively. This study advances the field in two key ways. First, case-level report data is used to weakly supervise a 3D CT classifier of multiple, simultaneous diseases for an organ. Second, segmentation and classification models are connected with two different feature aggregation strategies to enhance the classification performance.}, + optnote = {DIAG, RADIOLOGY}, + gsid = {15892485006675731541,4434301023253438692}, +} + +@inproceedings{Saha20b, + author = {Anindo Saha and Matin Hosseinzadeh and Henkjan Huisman}, + title = {Encoding Clinical Priori in 3D Convolutional Neural Networks for Prostate Cancer Detection in bpMRI}, + booktitle = {Medical Imaging Meets NeurIPS Workshop - 34th Conference on Neural Information Processing Systems (NeurIPS)}, + year = {2020}, + url = {https://arxiv.org/abs/2011.00263}, + abstract = {We hypothesize that anatomical priors can be viable mediums to infuse domain-specific clinical knowledge into state-of-the-art convolutional neural networks (CNN) based on the U-Net architecture. We introduce a probabilistic population prior which captures the spatial prevalence and zonal distinction of clinically significant prostate cancer (csPCa), in order to improve its computer-aided detection (CAD) in bi-parametric MR imaging (bpMRI). To evaluate performance, we train 3D adaptations of the U-Net, U-SEResNet, UNet++ and Attention U-Net using 800 institutional training-validation scans, paired with radiologically-estimated annotations and our computed prior. For 200 independent testing bpMRI scans with histologically-confirmed delineations of csPCa, our proposed method of encoding clinical priori demonstrates a strong ability to improve patient-based diagnosis (upto 8.70% increase in AUROC) and lesion-level detection (average increase of 1.08 pAUC between 0.1-1.0 false positive per patient) across all four architectures.}, + optnote = {DIAG, RADIOLOGY}, + gsid = {17689577475889939052}, + ss_id = {0932c8d086f3646cd8a42f9ee5353fe092d73cd4}, + all_ss_ids = {['0932c8d086f3646cd8a42f9ee5353fe092d73cd4']}, + gscites = {5}, +} + +@inproceedings{Saha20c, + author = {Anindo Saha and Prem Prasad and Abdullah Thabit}, + title = {Leveraging Adaptive Color Augmentation in Convolutional Neural Networks for Deep Skin Lesion Segmentation}, + booktitle = ISBI, + year = {2020}, + pages = {2014-2017}, + url = {https://arxiv.org/abs/2011.00148}, + doi = {10.1109/ISBI45749.2020.9098344}, + abstract = {Fully automatic detection of skin lesions in dermatoscopic images can facilitate early diagnosis and repression of malignant melanoma and non-melanoma skin cancer. Although convolutional neural networks are a powerful solution, they are limited by the illumination spectrum of annotated dermatoscopic screening images, where color is an important discriminative feature. In this paper, we propose an adaptive color augmentation technique to amplify data expression and model performance, while regulating color difference and saturation to minimize the risks of using synthetic data. Through deep visualization, we qualitatively identify and verify the semantic structural features learned by the network for discriminating skin lesions against normal skin tissue. The overall system achieves a Dice Ratio of 0.891 with 0.943 sensitivity and 0.932 specificity on the ISIC 2018 Testing Set for segmentation.}, + optnote = {DIAG, RADIOLOGY}, + gsid = {18395962714448299499}, +} + +@mastersthesis{Saha20d, + author = {Anindo Saha and Matin Hosseinzadeh and Henkjan Huisman}, + title = {Computer-Aided Detection of Clinically Significant Prostate Cancer in mpMRI}, + abstract = {Non-invasive multiparametric MR imaging (mpMRI) can facilitate the early detection of clinically significant prostate cancer (csPCa). However, interpretation of radiological findings is susceptible to overdiagnosis and low inter-reader agreement, as current assessment standards share a limited ability to distinguish csPCa from benign prostate cancer (PCa) and other non-malignant conditions. In this research, we propose a novel multi-stage computer-aided detection (CAD) model to perform automated voxel-level detection of csPCa in prostate mpMRI. The model is driven by convolutional neural networks (CNN), which use anisotropically-strided 3D convolutions to leverage the spatial context between adjacent MRI slices, without forgoing computational efficiency. It combines spatial and channel-wise attention mechanisms to adaptively target the most salient prostatic structures and discriminative feature dimensions in mpMRI volumes, at multiple resolutions. It uses an additional 3D residual classifier for independent false positive reduction. Finally, it exploits an anatomical prior, which captures the spatial prevalence of csPCa and its zonal distinction, to infuse clinical priori into the CNN architecture for guided inference and feature extraction. For 487 institutional testing scans, the 3D CAD system achieves 83.95% and 89.94% detection sensitivity at 0.5 and 1.0 false positive per patient, respectively, along with 0.884 AUROC in patient-based diagnosis. For 296 external testing scans, the 3D CAD system exhibits moderate agreement with a consensus of expert radiologists (77.70%; kappa = 0.543) and independent pathologists (78.04%; kappa = 0.527), thereby demonstrating a strong ability to generalize to histologically-confirmed csPCa detection using radiologist-supported training samples only.}, + url = {http://eia.udg.edu/~aoliver/maiaDocs/bookMaia3rd_small.pdf}, + optnote = {DIAG, RADIOLOGY}, + school = {University of Girona, UNICAS, University of Bourgogne, Radboud University Medical Center}, + year = {2020}, + journal = {Master thesis}, +} + +@article{Saha21a, + author = {Anindo Saha and Matin Hosseinzadeh and Henkjan Huisman}, + title = {End-to-end Prostate Cancer Detection in bpMRI via 3D CNNs: Effects of Attention Mechanisms, Clinical Priori and Decoupled False Positive Reduction}, + journal = MIA, + pages = {102155}, + year = {2021}, + doi = {10.1016/j.media.2021.102155}, + url = {https://www.sciencedirect.com/science/article/pii/S1361841521002012}, + abstract = {We present a multi-stage 3D computer-aided detection and diagnosis (CAD) model for automated localization of clinically significant prostate cancer (csPCa) in bi-parametric MR imaging (bpMRI). Deep attention mechanisms drive its detection network, targeting salient structures and highly discriminative feature dimensions across multiple resolutions. Its goal is to accurately identify csPCa lesions from indolent cancer and the wide range of benign pathology that can afflict the prostate gland. Simultaneously, a decoupled residual classifier is used to achieve consistent false positive reduction, without sacrificing high sensitivity or computational efficiency. In order to guide model generalization with domain-specific clinical knowledge, a probabilistic anatomical prior is used to encode the spatial prevalence and zonal distinction of csPCa. Using a large dataset of 1950 prostate bpMRI paired with radiologically-estimated annotations, we hypothesize that such CNN-based models can be trained to detect biopsy-confirmed malignancies in an independent cohort. + + For 486 institutional testing scans, the 3D CAD system achieves 83.69+-5.22% and 93.19+-2.96% detection sensitivity at 0.50 and 1.46 false positive(s) per patient, respectively, with 0.882+-0.030 AUROC in patient-based diagnosis -significantly outperforming four state-of-the-art baseline architectures (U-SEResNet, UNet++, nnU-Net, Attention U-Net) from recent literature. For 296 external biopsy-confirmed testing scans, the ensembled CAD system shares moderate agreement with a consensus of expert radiologists (76.69%; kappa = 0.51+-0.04) and independent pathologists (81.08%; kappa = 0.56+-0.06); demonstrating strong generalization to histologically-confirmed csPCa diagnosis.}, + optnote = {DIAG, RADIOLOGY}, + algorithm = {https://grand-challenge.org/algorithms/prostate-mri-cad-cspca/}, + gsid = {10384137846444027679}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/238409}, + ss_id = {16bd5fd779ba5acfd59403d3e91d6cf6ecd28f93}, + all_ss_ids = {['16bd5fd779ba5acfd59403d3e91d6cf6ecd28f93']}, + gscites = {78}, +} + +@inproceedings{Saha21b, + author = {Anindo Saha and Joeran S. Bosma and Jasper Linmans and Matin Hosseinzadeh and Henkjan Huisman}, + title = {Anatomical and Diagnostic Bayesian Segmentation in Prostate {MRI} -- Should Different Clinical Objectives Mandate Different Loss Functions?}, + booktitle = {Medical Imaging Meets NeurIPS Workshop - 35th Conference on Neural Information Processing Systems (NeurIPS)}, + year = {2021}, + url = {https://arxiv.org/abs/2110.12889}, + abstract = {We hypothesize that probabilistic voxel-level classification of anatomy and malignancy in prostate MRI, although typically posed as near-identical segmentation tasks via U-Nets, require different loss functions for optimal performance due to inherent differences in their clinical objectives. We investigate distribution, region and boundary-based loss functions for both tasks across 200 patient exams from the publicly-available ProstateX dataset. For evaluation, we conduct a thorough comparative analysis of model predictions and calibration, measured with respect to multi-class volume segmentation of the prostate anatomy (whole-gland, transitional zone, peripheral zone), as well as, patient-level diagnosis and lesion-level detection of clinically significant prostate cancer. Notably, we find that distribution-based loss functions (in particular, focal loss) are well-suited for diagnostic or panoptic segmentation tasks such as lesion detection, primarily due to their implicit property of inducing better calibration. Meanwhile, (with the exception of focal loss) both distribution and region/boundary-based loss functions perform equally well for anatomical or semantic segmentation tasks, such as quantification of organ shape, size and boundaries.}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {cad5d5cc2c32623da074133016d1db3da410fc7b}, + all_ss_ids = {['cad5d5cc2c32623da074133016d1db3da410fc7b']}, + gscites = {4}, +} + +@conference{Saha21c, + author = {Anindo Saha and Joeran S. Bosma and Christian Roest and Matin Hosseinzadeh and Jurgen Futterer and Henkjan Huisman}, + booktitle = RSNA, + title = {Deep Learning with Bayesian Inference for Prostate Cancer Diagnosis across Longitudinal Biparametric MRI}, + abstract = {BACKGROUND: Despite increasing use of active surveillance via biparametric MR imaging (bpMRI) for prostate cancer (PCa) management, there is a lack of research in medical image computing that utilize longitudinal studies to assist present-day diagnosis. PURPOSE: To investigate the efficacy of a deep learning-based PCa detection model, that integrates past bpMRI exams and population-level ana-tomical priors via Bayesian inference. MATERIALS AND METHODS: This retrospective study included 250 consecutive biopsy-naive men (median age: 64 yrs; IQR: 60-69) with elevated levels of PSA (me-dian level: 8 ng/mL; IQR: 5-11), who underwent at least two consecutive MRI exams between 2016-2018 (N=500). Intra-patient bpMRI scans were rigidly registered, paired with expert voxel-level annotations of PI-RADS 2-5 lesions and subsequently used to train a deep learning model to predict and localize all PI-RADS findings. Radiologists utilize prior studies to inform present-day diagnosis. Similar-ly, for each patient case, computer-aided diagnosis for the follow-up bpMRI exam was derived via Bayesian modelling; probabilistical-ly integrating past bpMRI exams and a population prior for spatial PCa prevalence and zonal anatomy. Diagnostic performance was evaluated by the ability to accurately discriminate patients with benign prostatic tissue (n=10) or PI-RADS <= 3 lesions (n=159), from those carrying PI-RADS >= 4 lesions (n=81), over 5-fold cross-validation. Normalized Wilcoxon Mann-Whitney U statistic was used to derive AUROC and confidence intervals were computed over 5000 replications of bootstrapping. RESULTS: Computer-aided diagnosis of follow-up studies without priori, yielded an AUROC of 0.77 (95% CI: 0.71, 0.83), F0.5 score of 0.51 (95% CI: 0.42, 0.61), positive predictive value (PPV) of 0.51 (95% CI: 0.40, 0.60) and negative predictive value (NPV) of 0.78 (95% CI: 0.71, 0.83). Computer-aided diagnosis of follow-up studies with the inclusion of priori via Bayesian inference yielded an AUROC of 0.80 (95% CI: 0.76, 0.84), F0.5 score of 0.58 (95% CI: 0.48, 0.68), PPV of 0.60 (95% CI: 0.48, 0.72) and NPV of 0.78 (95% CI: 0.72, 0.85).In comparison to stand-alone diagnosis, factoring in prior studies resulted in a 41.4% reduction of AUROC standard deviation across each fold. CONCLUSION: Incorporating past studies and clinical priors via Bayesian inference can improve diagnostic certainty and robustness of deep learning in follow-up patient exams. CLINICAL RELEVANCE/APPLICATION: Prostate cancer is one of the most prevalent cancers in men worldwide. In the absence of experienced radiologists, its morphological heterogeneity can lead to low inter-reader agreement. Automated, reliable detection algorithms can improve diagnostic accuracy with consistent quantitative analysis.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, +} + +@conference{Saha22a, + author = {Anindo Saha and Joeran S. Bosma and Jasper Twilt and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Futterer and Maarten de Rooij and Henkjan Huisman}, + booktitle = RSNA, + title = {Artificial Intelligence and Radiologists at Prostate Cancer Detection in MRI: The PI-CAI Challenge}, + abstract = {PURPOSE: The PI-CAI challenge aims to validate the diagnostic performance of artificial intelligence (AI) and radiologists at clinically significant prostate cancer (csPCa) detection/diagnosis in MRI, with histopathology and follow-up (>= 3 years) as reference. The study hypothesizes that state-of-the-art AI algorithms, trained using thousands of patient exams, are non-inferior to radiologists reading bpMRI. MATERIALS AND METHODS: This retrospective study tentatively includes 12,373 prostate MRI exams (11,193 patients) between 2012 and 2021, curated from three Dutch centers and one Norwegian center. All patient exams are of men suspected of harboring csPCa, without a history of treatment or prior positive histopathology (Gleason Grade Group >= 2) findings. Acquisitions were obtained using Siemens Healthineers or Philips Medical Systems-based 1.5T/3T MRI scanners. In total, 11,373 cases are available to develop and train AI solutions in the framework of a grand challenge. The remaining 1000 unseen cases are reserved for testing. A subset of 400 testing cases is used to facilitate a reader study with 63 radiologists (42 centers, 18 countries; 1-23 years of experience reading prostate MRI {median: 9 years}). Readers perform their assessments sequentially for each patient: first, using bpMRI sequences only; secondly, using full mpMRI sequences (in compliance with PI-RADS v2.1). Multi-reader multi-case (MRMC) analysis is used to compare the overall patient-level diagnostic performance of readers against that of the top five AI algorithms. Non-inferiority is tested with a significance margin of 0.05. Permutation tests are used to statistically compare AI and radiologists' performance at PI-RADS operating points. The PI-CAI challenge spans 7 months (May-November 2022) in duration. Key aspects of its study protocol have been established in conjunction with an international scientific advisory board of 16 experts in prostate AI, radiology, and urology --to unify and standardize present-day guidelines, and ensure meaningful validation of prostate-AI towards clinical translation. RESULTS: Preliminary results of the PI-CAI challenge will be presented. CLINICAL RELEVANCE: Prostate MRI assessments show high inter-reader variability (>50%), long reporting times, and strong dependence on expertise. A thorough comparison of AI with radiologists builds trust, allowing AI to help improve diagnostic accuracy and reduce workload.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@conference{Saha23a, + author = {Anindo Saha and Joeran S. Bosma and Jasper Twilt and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Futterer and Maarten de Rooij and Henkjan Huisman}, + title = {Artificial Intelligence and Radiologists at Prostate Cancer Detection in MRI: The PI-CAI Challenge}, + booktitle = ECR, + year = {2023}, + abstract = {PURPOSE: The PI-CAI challenge aims to validate the diagnostic performance of artificial intelligence (AI) and radiologists at clinically significant prostate cancer (csPCa) detection in MRI, with histopathology and follow-up (>=3 years) as the reference standard. METHODS: This retrospective study includes 10,207 prostate MRI exams (9129 patients) curated from four European tertiary care centers between 2012-2021. All patients were men suspected of harboring prostate cancer, without a history of treatment or prior csPCa findings. Imaging was acquired using various 1.5 or 3T MRI scanners, equipped with surface coils. Algorithm developers worldwide were invited to develop AI models for detecting csPCa in bipara-metric MRI (bpMRI). For a given bpMRI exam, AI models were required to complete two tasks: localize all csPCa lesions (if any), and predict the case-level likelihood of csPCa diagnosis. To this end, AI models could use imaging data and several variables (e.g. patient age, PSA level, scanner model) to inform their predictions. Once devel-oped, these algorithms were independently tested using 1000 cases (including external data) in a fully-blinded setting. RESULTS: The PI-CAI study protocol was established in conjunction with 16 experts across prostate radiology, urology and AI. Between June-November 2022, >830 individuals (>50 countries) opted-in and >310 algorithm submissions were made. When trained on 1500 training cases, the top five most performant AI models reached 0.88+-0.01 AUROC in patient diagnosis, and 76.38+-0.74% sensitivity at 0.5 false positives per case in lesion detection. CONCLUSION: Preliminary findings indicate that the diagnostic performance of state-of-the-art AI models is comparable to that of radiologists reported in literature. LIMITATIONS: Radiology readings of the original data were used to guide biopsy planning, histopathology grading, and in turn, set the reference standard.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Saha23b, + author = {Anindo Saha and Joeran S. Bosma and Jasper Twilt and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Futterer and Maarten de Rooij and Henkjan Huisman}, + title = {Artificial Intelligence and Radiologists at Prostate Cancer Detection in MRI: The PI-CAI Challenge}, + booktitle = MIDL, + year = {2023}, + abstract = {We hypothesized that state-of-the-art AI models, trained using thousands of patient cases, are non-inferior to radiologists at clinically significant prostate cancer diagnosis using MRI. To test the same, we designed an international comparative study titled the PI-CAI challenge, where we investigated AI models that were independently developed, trained and externally tested using a large multi-center cohort of 10,207 patient exams. Preliminary results indicate that when trained on 1,500 cases only, such models already achieve diagnostic performance comparable to that of radiologists reported in literature.}, + url = {https://openreview.net/forum?id=XfXcA9-0XxR}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Sail04, + author = {Johannes Sailer and Mathias Prokop and Ulrich Neitzel and Michael Weber and Philipp Peloschek and Cornelia {Schaefer-Prokop}}, + title = {Comparison of an automatic versus a semiautomatic mode for gray-scale adaptation for digital chest radiography}, + journal = IR, + year = {2004}, + volume = {39}, + pages = {325--33}, + file = {Sail04.pdf:pdf\\Sail04.pdf:PDF}, + optnote = {CXR, DIAG, RADIOLOGY}, + number = {6}, + month = {6}, + gsid = {7416471056879015000}, + gscites = {3}, +} + +@article{Saks14, + author = {Saksens, Nicole and Kersten, Eveline and Groenewoud, Joannes M M. and van Grinsven, Mark J J P. and van de Ven, Johannes P H. and S\'{a}nchez, Clara I. and Schick, Tina and Fauser, Sascha and den Hollander, Anneke and Hoyng, Carel and Boon, Camiel J F.}, + title = {Clinical characteristics of familial and sporadic age-related macular degeneration: differences and similarities}, + journal = IOVS, + year = {2014}, + volume = {55}, + pages = {7085-7092}, + doi = {10.1167/iovs.14-14659}, + abstract = {Purpose: To describe the differences and similarities in clinical characteristics and phenotype of familial and sporadic patients with age-related macular degeneration (AMD). Methods: We evaluated data of 1828 AMD patients and 1715 controls enrolled in the European Genetic Database. All subjects underwent ophthalmologic examination, including visual acuity testing and fundus photography. Images were graded and fundus photographs were used for automatic drusen quantification by a machine learning algorithm. Data on disease characteristics, family history, medical history and lifestyle habits were obtained by a questionnaire. Results: The age at first symptoms was significantly lower in AMD patients with a positive family history (68.7 years) than in AMD patients with no family history (71.8 years; P = 1.9x10-5). Risk factors identified in sporadic and familial subjects were increasing age (OR 1.08 per year; P = 3.0x10-51 and OR 1.15; P = 5.3x10-36, respectively) and smoking (OR 1.01 per pack year; P = 1.1x10-6 and OR 1.02; P = 0.005). Physical activity and daily red meat consumption were significantly associated with AMD in sporadic subjects only (OR 0.49; P = 3.7x10-10 and OR 1.81; P = 0.001). With regard to the phenotype, geographic atrophy and cuticular drusen were significantly more prevalent in familial AMD (17.5\% and 21.7\%, respectively) as compared to sporadic AMD (9.8\% and 12.1\%). Conclusions: Familial AMD patients become symptomatic at a younger age. The higher prevalence of geographic atrophy and cuticular drusen in the familial AMD cases may be explained by the contribution of additional genetic factors segregating within families.}, + file = {Saks14.pdf:pdf\\Saks14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {25301878}, + month = {11}, + gsid = {15875798834789801368}, + gscites = {9}, + ss_id = {6873c25b4466ae69a4755dd1f937bf0b82f6ad11}, + all_ss_ids = {['6873c25b4466ae69a4755dd1f937bf0b82f6ad11']}, +} + +@mastersthesis{Samu06, + author = {Samulski, Maurice}, + title = {Classification of {B}reast {L}esions in {D}igital {M}ammograms}, + year = {2006}, + abstract = {{B}reast cancer is the most common life-threatening type of cancer affecting women in {T}he {N}etherlands. {A}bout 10% of the {D}utch women have to face breast cancer in their lifetime. {T}he success of the treatment of breast cancer largely depends on the stage of a tumor at the time of detection. {I}f the size of the invasive cancer is smaller than 20 mm and no metastases are found, chances of successful treatment are high. {T}herefore, early detection of breast cancer is essential. {A}lthough mammography screening is currently the most effective tool for early detection of breast cancer, up to one-fifth of women with invasive breast cancer have a mammogram that is interpreted as normal, i.e., a false-negative mammogram result. {A}n important cause are interpretation errors, i.e., when a radiologist sees the cancer, but classify it as benign. {I}n addition, the number of false-positive mammogram results is quite high, more than half of women who undergo a biopsy actually have breast cancer. {T}o overcome such limitations, {C}omputer-{A}ided {D}iagnosis ({CAD}) systems for automatic classification of breast lesions as either benign or malignant are being developed. {CAD} systems help radiologists with the interpretation of lesions, such that they refer less women for further examination when they actually have benign lesions. {T}he dataset we used consists of mammographic features extracted by automated image processing algorithms from digitized mammograms of the {D}utch screening programme. {I}n this thesis we constructed several types of classifiers, i.e., {B}ayesian networks and support vector machines, for the task of computer-aided diagnosis of breast lesions. {W}e evaluated the results with receiver operating characteristic ({ROC}) analysis to compare their classification performance. {T}he overall conclusion is that support vector machines are still the method of choice if the aim is to maximize classification performance. {A}lthough {B}ayesian networks are not primarily designed for classification problems, they did not perform drastically lower. {I}f new datasets are being constructed and more background knowledge becomes available, the advantages of {B}ayesian networks, i.e., incorporating domain knowledge and modeling dependencies, could play an important role in the future.}, + file = {Samu06.pdf:pdf\\Samu06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + school = {Radboud University Nijmegen}, + journal = {Master thesis}, +} + +@inproceedings{Samu07, + author = {Samulski, Maurice and Karssemeijer, Nico and Lucas, Peter and Groot, Perry}, + title = {Classification of mammographic masses using support vector machines and {B}ayesian networks}, + booktitle = MI, + year = {2007}, + volume = {6514}, + series = SPIE, + pages = {65141J}, + doi = {10.1117/12.709679}, + url = {http://link.aip.org/link/?PSI/6514/65141J/1}, + abstract = {{I}n this paper, we compare two state-of-the-art classification techniques characterizing masses as either benign or malignant, using a dataset consisting of 271 cases (131 benign and 140 malignant), containing both a {MLO} and {CC} view. {F}or suspect regions in a digitized mammogram, 12 out of 81 calculated image features have been selected for investigating the classification accuracy of support vector machines ({SVM}s) and {B}ayesian networks ({BN}s). {A}dditional techniques for improving their performance were included in their comparison: the {M}anly transformation for achieving a normal distribution of image features and principal component analysis ({PCA}) for reducing our high-dimensional data. {T}he performance of the classifiers were evaluated with {R}eceiver {O}perating {C}haracteristics ({ROC}) analysis. {T}he classifiers were trained and tested using a k-fold cross-validation test method (k=10). {I}t was found that the area under the {ROC} curve ({A}z) of the {BN} increased significantly (p=0.0002) using the {M}anly transformation, from {A}z = 0.767 to {A}z = 0.795. {T}he {M}anly transformation did not result in a significant change for {SVM}s. {A}lso the difference between {SVM}s and {BN}s using the transformed dataset was not statistically significant (p=0.78). {A}pplying {PCA} resulted in an improvement in classification accuracy of the naive {B}ayesian classifier, from {A}z = 0.767 to {A}z = 0.786. {T}he difference in classification performance between {BN}s and {SVM}s after applying {PCA} was small and not statistically significant (p=0.11).}, + file = {Samu07.pdf:pdf\\Samu07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {3}, + gsid = {4945331176484965869}, + gscites = {13}, + ss_id = {17076b47c70f579a9e3fc9a898c86dfe62fdfdca}, + all_ss_ids = {['17076b47c70f579a9e3fc9a898c86dfe62fdfdca']}, +} + +@inproceedings{Samu08, + author = {Samulski, Maurice and Karssemeijer, Nico}, + title = {Matching mammographic regions in mediolateral oblique and cranio caudal views: a probabilistic approach}, + booktitle = MI, + year = {2008}, + volume = {6915}, + series = SPIE, + pages = {69151M}, + doi = {10.1117/12.769792}, + url = {http://link.aip.org/link/?PSI/6915/69151M/1}, + abstract = {{M}ost of the current {CAD} systems detect suspicious mass regions independently in single views. {I}n this paper we present a method to match corresponding regions in mediolateral oblique ({MLO}) and craniocaudal ({CC}) mammographic views of the breast. {F}or every possible combination of mass regions in the {MLO} view and {CC} view, a number of features are computed, such as the difference in distance of a region to the nipple, a texture similarity measure, the gray scale correlation and the likelihood of malignancy of both regions computed by single-view analysis. {I}n previous research, {L}inear {D}iscriminant {A}nalysis was used to discriminate between correct and incorrect links. {I}n this paper we investigate if the performance can be improved by employing a statistical method in which four classes are distinguished. {T}hese four classes are defined by the combinations of view ({MLO}/{CC}) and pathology ({TP}/{FP}) labels. {W}e use distance-weighted k-{N}earest {N}eighbor density estimation to estimate the likelihood of a region combination. {N}ext, a correspondence score is calculated as the likelihood that the region combination is a {TP}-{TP} link. {T}he method was tested on 412 cases with a malignant lesion visible in at least one of the views. {I}n 82.4% of the cases a correct link could be established between the {TP} detections in both views. {I}n future work, we will use the framework presented here to develop a context dependent region matching scheme, which takes the number and likelihood of possible alternatives into account. {I}t is expected that more accurate determination of matching probabilities will lead to improved {CAD} performance.}, + file = {Samu08.pdf:pdf\\Samu08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {3}, + gsid = {12671692002821730625}, + gscites = {20}, + ss_id = {11ddc349211e0ee1ff0e463ef6933bfa9d3919fa}, + all_ss_ids = {['11ddc349211e0ee1ff0e463ef6933bfa9d3919fa']}, +} + +@inproceedings{Samu08a, + author = {Samulski, Maurice and Karssemeijer, Nico}, + title = {Linking mass regions in mediolateral oblique and cranio caudal views}, + booktitle = {Proceedings of the 14th ASCI conference}, + year = {2008}, + pages = {214--221}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Samu08b, + author = {Samulski, M. and Karssemeijer, N. and Boetes, C. and den Heeten, G.}, + title = {An Interactive Computer-aided Detection Workstation for Reading Mammograms}, + booktitle = {94th Radiological Society of North America Scientific Assembly and Annual Meeting}, + year = {2008}, + url = {http://rsna2008.rsna.org/event_display.cfm?em_id=6012408}, + abstract = {{PURPOSE}/{AIM} {T}o experience the use of an interactive computer-aided decision support system for the detection of mammographic masses. {T}o demonstrate the real-time classification of breast lesions. {CONTENT} {ORGANIZATION} {T}he idea of using {CAD} in an interactive way will be explained. {T}hen a case review will be offered, in which participants evaluate a small set of abnormal and normal screening mammograms in two sequential sessions: one without and the other with interactive {CAD}. {P}articipants are asked to find and rate abnormal masses. {A}t the end of the session, participants can judge their performance and are given the opportunity to review their scores for each case. {SUMMARY} {C}urrent computer-aided detection workstations display suspicious mammographic regions identified by computer algorithms as prompts to avoid perceptual oversights. {I}n the presented system the presence of {CAD} regions can be probed interactively using a mouse click and aid the radiologist with the interpretation of masses. {I}nitial studies suggest that readers may improve their detection performance using {CAD} in an interactive way.}, + file = {A1 Poster:pdf\\Samu08b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {e0f346c924c5e504a8318026e2d38d33bc6069b7}, + all_ss_ids = {['e0f346c924c5e504a8318026e2d38d33bc6069b7']}, + gscites = {1}, +} + +@inproceedings{Samu09, + author = {Samulski, M. and Hupse, A. and Boetes, C. and den Heeten, G. and Karssemeijer, N.}, + title = {Analysis of probed regions in an interactive {CAD} system for the detection of masses in mammograms}, + booktitle = MI, + year = {2009}, + volume = {7263}, + series = SPIE, + pages = {726314}, + doi = {10.1117/12.813391}, + url = {http://link.aip.org/link/?PSI/7263/726314/1}, + abstract = {{M}ost computer aided detection ({CAD}) systems for mammographic mass detection display all suspicious regions identified by computer algorithms and are mainly intended to avoid missing cancers due to perceptual oversights. {C}onsidering that interpretation failure is recognized to be a more common cause of missing cancers in screening than perceptual oversights, a dedicated mammographic {CAD} system has been developed that can be queried interactively for the presence of {CAD} prompts using a mouse click. {T}o assess the potential benefit of using {CAD} in an interactive way, an observer study was conducted in which 4 radiologists and 6 non-radiologists evaluated 60 cases with and without {CAD}, to compare the detection performance of the unaided reader with that of the reader with {CAD} assistance. 20 cases had a malignant mass, and 40 were cancer-free. {D}uring the reading sessions we recorded time and probed locations which reveal information about the search strategy and detection process. {T}he purpose of this study is to determine a relation between detection performance and time to first probe of the lesion and to investigate if longer reading times lead to more reports of malignant lesions in lesion-free areas. {O}n average, 65.0% of the malignant lesions were found within 60 seconds and this percentage stabilizes after this period. {R}esults suggest that longer reading time did not lead to more false positives. 74.6% of the reported true positive findings were hit by the first probe, and 93.2% were hit within 5 probes, which may suggest that many of the correctly reported malignant masses were perceived immediately after image onset.}, + file = {Samu09.pdf:pdf\\Samu09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {2}, + gsid = {15103250986617971832}, + gscites = {5}, + ss_id = {390fb83c871c013ba98b5590a8c729266b05ed87}, + all_ss_ids = {['390fb83c871c013ba98b5590a8c729266b05ed87']}, +} + +@article{Samu10, + author = {M. Samulski and R. Hupse and C. Boetes and R. Mus and G. den Heeten and N. Karssemeijer}, + title = {Using {C}omputer {A}ided {D}etection in {M}ammography as a {D}ecision {S}upport}, + journal = ER, + year = {2010}, + volume = {20}, + pages = {2323-2330}, + doi = {10.1007/s00330-010-1821-8}, + abstract = {{O}bjective: {T}o evaluate an interactive computer-aided detection ({CAD}) system for reading mammograms to improve decision making. {M}ethods: {A} dedicated mammographic workstation has been developed in which readers can probe image locations for the presence of {CAD} information. {I}f present, {CAD} findings are displayed with the computed malignancy rating. {A} reader study was conducted in which four screening radiologists and five non-radiologists participated to study the effect of this system on detection performance. {T}he participants read 120 cases of which 40 cases had a malignant mass that was missed at the original screening. {T}he readers read each mammogram both with and without {CAD} in separate sessions. {E}ach reader reported localized findings and assigned a malignancy score per finding. {M}ean sensitivity was computed in an interval of false-positive fractions less than 10%. {R}esults: {M}ean sensitivity was 25.1% in the sessions without {CAD} and 34.8% in the {CAD}-assisted sessions. {T}he increase in detection performance was significant (p = 0.012). {A}verage reading time was 84.7+/- 61.5 s/case in the unaided sessions and was not significantly higher when interactive {CAD} was used (85.9 +/- 57.8 s/case). {C}onclusion: {I}nteractive use of {CAD} in mammography may be more effective than traditional {CAD} for improving mass detection without affecting reading time.}, + file = {Samu10.pdf:pdf/Samu10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + pmid = {20532890}, + month = {6}, + gsid = {4560188704602657719}, + gscites = {67}, + ss_id = {7fa621703a760695adebedc925d1749efe73797d}, + all_ss_ids = {['7fa621703a760695adebedc925d1749efe73797d']}, +} + +@inproceedings{Samu11, + author = {M.R.M. Samulski and P.R. Snoeren and B. Platel and B. van Ginneken and L. Hogeweg and C. Schaefer-Prokop and N. Karssemeijer}, + title = {Computer-{A}ided {D}etection as a {D}ecision {A}ssistant in {C}hest {R}adiography}, + booktitle = MI, + year = {2011}, + volume = {7966}, + series = SPIE, + pages = {796614-1--796614-6}, + doi = {10.1117/12.877968}, + abstract = {Background: Contrary to what may be expected, finding abnormalities in complex images like pulmonary nodules in chest radiographs is not dominated by time-consuming search strategies but by an almost immediate global interpretation. This was already known in the nineteen-seventies from experiments with briefly flashed chest radiographs. Later on, experiments with eye-trackers showed that abnormalities attracted the attention quite fast but often without further reader actions. Prolonging one's search seldom leads to newly found abnormalities and may even increase the chance of errors. The problem of reading chest radiographs is therefore not dominated by finding the abnormalities, but by interpreting them. Hypothesis: This suggests that readers could benefit from computer-aided detection (CAD) systems not so much by their ability to prompt potential abnormalities, but more from their ability to `interpret' the potential abnormalities. In this paper, this hypothesis was investigated by an observer experiment. Experiment: In one condition, the traditional CAD condition, the most suspicious CAD locations were shown to the subjects, without telling them the levels of suspiciousness according to CAD. In the other condition, interactive CAD condition, levels of suspiciousness were given, but only when readers requested them at specified locations. These two conditions focus on decreasing search errors and decision errors, respectively. Results of reading without CAD were also recorded. Six subjects, all non-radiologists, read 223 chest radiographs in both conditions. CAD results were obtained from the OnGuard 5.0 system developed by Riverain Medical (Miamisburg, Ohio). Results: The observer data were analyzed by Location Response Operating Characteristic analysis (LROC). It was found that: 1) With the aid of CAD, the performance is significantly better than without CAD; 2) The performance with interactive CAD is significantly better than with traditional CAD at low false positive rates.}, + file = {Samu11.pdf:pdf/Samu11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {3}, + gsid = {13676975540218125841}, + gscites = {7}, + ss_id = {cfd171ef5dd78227c2253f32340d7d4c040d4444}, + all_ss_ids = {['cfd171ef5dd78227c2253f32340d7d4c040d4444']}, +} + +@article{Samu11a, + author = {Samulski, M. and Karssemeijer, N.}, + title = {Optimizing {C}ase-based {D}etection {P}erformance in a {M}ultiview {CAD} {S}ystem for {M}ammography}, + journal = TMI, + year = {2011}, + volume = {30}, + pages = {1001--1009}, + doi = {10.1109/TMI.2011.2105886}, + abstract = {When reading mammograms, radiologists combine information from multiple views to detect abnormalities. Most computer-aided detection {(CAD)} systems, however, use primitive methods for inclusion of multi-view context or analyze each view independently. In previous research it was found that in mammography lesion-based detection performance of {CAD} systems can be improved when correspondences between {MLO} and {CC} views are taken into account. However, detection at case level detection did not improve. In this paper, we propose a new learning method for multi-view {CAD} systems, which is aimed at optimizing case-based detection performance. The method builds on a single-view lesion detection system and a correspondence classifier. The latter provides class probabilities for the various types of region pairs and correspondence features. The correspondence classifier output is used to bias the selection of training patterns for a multi-view {CAD} system. In this way training can be forced to focus on optimization of case-based detection performance. The method is applied to the problem of detecting malignant masses and architectural distortions. Experiments involve 454 mammograms consisting of 4 views with a malignant region visible in at least one of the views. To evaluate performance, 5-fold cross validation and {FROC} analysis was performed. Bootstrapping was used for statistical analysis. A significant increase of case-based detection performance was found when the proposed method was used. Mean sensitivity increased by 4.7% in the range of 0.01-0.5 false positives per image.}, + file = {Samu11a.pdf:pdf/Samu11a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + pmid = {21233045}, + month = {4}, + gsid = {18346121360535689983}, + gscites = {79}, + ss_id = {413ed0cca2f1915d279ae891324633c18c89cb74}, + all_ss_ids = {['413ed0cca2f1915d279ae891324633c18c89cb74']}, +} + +@phdthesis{Samu11c, + author = {M. Samulski}, + title = {Computer Aided Detection as a Decision Aid in Medical Screening}, + year = {2011}, + url = {http://repository.ubn.ru.nl/handle/2066/91305}, + abstract = {For many years, it has been recognized that even the best radiologists make errors when reading medical exams including perception failures and interpretation failures. To reduce these problems, computer aided detection and diagnosis systems have been designed to aid radiologists detecting and classifying abnormalities. The first part of this thesis concerns combining information from multiple mammographic projection views to improve detection performance of computer aided detection systems. Most computer-aided detection systems that are used in the clinic today are focussed on reducing perception errors. The research presented in the second part of this thesis investigates if presenting CAD results in a fundamentally different way to avoid interpretation errors is more effective than current computer aided detection methods that focus on preventing perceptual oversights in medical screening.}, + file = {Samu11c.pdf:pdf/Samu11c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {N. Karssemeijer and P. J. F. Lucas}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@inproceedings{Sanc04, + author = {C. I. S\'{a}nchez and R. Hornero and M. I. L\'{o}pez and J. Poza}, + title = {Retinal image analysis to detect and quantify lesions associated with diabetic retinopathy}, + booktitle = EMBS, + year = {2004}, + volume = {3}, + pages = {1624--1627}, + doi = {10.1109/IEMBS.2004.1403492}, + abstract = {{A}n automatic method to detect hard exudates, a lesion associated with diabetic retinopathy, is proposed. {T}he algorithm found on their color, using a statistical classification, and their sharp edges, applying an edge detector, to localize them. {A} sensitivity of 79.62\% with a mean number of 3 false positives per image is obtained in a database of 20 retinal image with variable color, brightness and quality. {I}n that way, we evaluate the robustness of the method in order to make adequate to a clinical environment. {F}urther efforts will be done to improve its performance.}, + optnote = {DIAG, RADIOLOGY}, + pmid = {17272012}, + gsid = {13289391543586700109}, + gscites = {116}, +} + +@inproceedings{Sanc06, + author = {C. I. S\'{a}nchez and A. Mayo and M. Garc\'{i}a and M. I. L\'{o}pez and R. Hornero}, + title = {Automatic image processing algorithm to detect hard exudates based on mixture models}, + booktitle = EMBS, + year = {2006}, + volume = {1}, + pages = {4453--4456}, + doi = {10.1109/IEMBS.2006.260434}, + abstract = {{A}utomatic detection of hard exudates from retinal images is clinically significant. {H}ard exudates are associated with diabetic retinopathy and have been found to be one of the most prevalent earliest clinical signs of retinopathy. {I}n this study, an automatic method to detect hard exudates is proposed. {T}he algorithm is based on mixture models to dynamically threshold the images in order to separate hard exudates from background. {W}e prospectively assessed the algorithm performance using a database of 20 retinal images with variable color, brightness, and quality. {T}he algorithm obtained a sensitivity of 90.23\% and a predictive value of 82.5\% using a lesion-based criterion. {T}he image-based classification accuracy is also evaluated obtaining a sensitivity of 100\% and a specificity of 90\%}, + optnote = {DIAG, RADIOLOGY}, + pmid = {17945839}, + month = {8}, + gsid = {1822525621576928668}, + gscites = {33}, +} + +@inproceedings{Sanc06a, + author = {C. I. S\'{a}nchez and M. I. L\'{o}pez and M. Garc\'{i}a and A. Mayo and R. Hornero}, + title = {Automated retinal image analysis in a diabetic retinopathy telescreening program}, + booktitle = {Telemedicine in Future Health}, + year = {2006}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Sanc08, + author = {C. I. S\'{a}nchez and R. Hornero and M. I. L\'{o}pez and M. Aboy and J. Poza and D. Ab\'{a}solo}, + title = {A novel automatic image processing algorithm for detection of hard exudates based on retinal image analysis}, + journal = MEP, + year = {2008}, + volume = {30}, + number = {3}, + pages = {350--357}, + doi = {10.1016/j.medengphy.2007.04.010}, + abstract = {{W}e present an automatic image processing algorithm to detect hard exudates. {A}utomatic detection of hard exudates from retinal images is an important problem since hard exudates are associated with diabetic retinopathy and have been found to be one of the most prevalent earliest signs of retinopathy. {T}he algorithm is based on {F}isher's linear discriminant analysis and makes use of colour information to perform the classification of retinal exudates. {W}e prospectively assessed the algorithm performance using a database containing 58 retinal images with variable colour, brightness, and quality. {O}ur proposed algorithm obtained a sensitivity of 88\% with a mean number of 4.83+/-4.64 false positives per image using the lesion-based performance evaluation criterion, and achieved an image-based classification accuracy of 100\% (sensitivity of 100\% and specificity of 100\%).}, + file = {Sanc08.pdf:pdf\\Sanc08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {17556004}, + month = {4}, + gsid = {806873315643611742}, + gscites = {179}, + ss_id = {896b8f5aeb3a16a887fb0fe1199096551b6f2ea9}, + all_ss_ids = {['896b8f5aeb3a16a887fb0fe1199096551b6f2ea9']}, +} + +@phdthesis{Sanc08b, + author = {C.I. S\'{a}nchez}, + title = {Retinal image analysis by mixture model based clustering and discriminant analysis for automatic detection of hard exudates and haemorhages: A tool for diabetic retinopathy screening}, + abstract = {Diabetic Retinopathy (DR) is a disorder of the retinal vasculature that eventually develops to some degree in nearly all patients with diabetes. The growing incidence of diabetes has caused DR to become the major cause of blindness and vision defects in developed countries. In view of the increasing prevalence of diabetes mellitus and DR throughout most of the world, mass screening is time consuming and requires many trained graders to examine the fundus photographs searching for retinal lesions. A reliable method for automated assessment of the presence of lesions in fundus images would be a valuable tool in assisting the limited number of professional and reducing the examination time. Additionally, this system could be a useful clinical aid in the routine eye care by displaying retinal images with enhanced details and facilitating diagnosis. This thesis aims at developing a Computer-Aided Diagnosis (CAD) technology to assist ophthalmologists in DR screening programs, in order to detect and quantify early signs associated with DR in fundus photographs. In the onset of the disease, several visual retinal abnormalities appear in the retinal fundus, including red lesions, such as microaneurysms and haemorrhages, and white lesions, such as hard exudates and cotton-wool spots. Among these lesions, haemorrhages and hard exudates are the most prevalent in background retinopathy Therefore, their detection is critical for DR mass screening. Additionally, hard exudates represent the most specific maker for the presence of coexistent retinal oedema, the major cause of visual loss in the nonproliferative forms of DR. The principal aim of this work is to develop novel computer methods to automatically detect and quantify haemorrhages and hard exudates in retinal images as an aid for the DR diagnosis in screening programs. As a first step, several methods are studied for the automatic detection of anatomical structures: the optic disk, the blood vessels and the posterior pole. The detection of these features provides a retinal coordinate system for retinopathy grading. Besides, their localization is indispensable to reduce the number of false detections due to their similarities to retinal signs in terms of brightness and contrast. Different image analysis techniques, such as wavelets, mathematical morphology and Expectation Maximization algorithm, are investigated to address the localisation of the optic disk, blood vessels and the vascular arcades. The automatic detection of haemorrhages and hard exudates relies on an innovative initial image segmentation based on mixture model based clustering, followed by a subsequent refinement stage using supervised classification. The clustering process considers the probability distribution function of the entire image as a mixture of individual distributions. The resulting pixel-cluster memberships provide a transformation of the pixels into different image regions which are suspected of corresponding to retinal lesions. Finally, a study of different discriminant analysis techniques, such as linear discriminant analysis, quadratic discriminant analysis, logistic regression and k nearest neighbours, is performed to classify regions into haemorrhages or hard exudates. Therefore, an interpretation of the retinal image will be achieved detecting DR signs and identifying retinal images with the presence of these ocular symptoms.}, + file = {Sanc08b.pdf:pdf\\Sanc08b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {R. Hornero}, + school = {Universidad de Valladolid, Spain}, + year = {2008}, + journal = {PhD thesis}, +} + +@inproceedings{Sanc09, + author = {S\'{a}nchez, C. I. and Niemeijer, M. and Kockelkorn, T. and Abr\`amoff, M. D. and van Ginneken, B.}, + title = {Active learning approach for detection of hard exudates, cotton wool spots, and drusen in retinal images}, + booktitle = MI, + year = {2009}, + volume = {7260}, + series = SPIE, + pages = {72601I1--72601I8}, + doi = {10.1117/12.813679}, + abstract = {{C}omputer-aided {D}iagnosis ({CAD}) systems for the automatic identification of abnormalities in retinal images are gaining importance in diabetic retinopathy screening programs. {A} huge amount of retinal images are collected during these programs and they provide a starting point for the design of machine learning algorithms. {H}owever, manual annotations of retinal images are scarce and expensive to obtain. {T}his paper proposes a dynamic {CAD} system based on active learning for the automatic identification of hard exudates, cotton wool spots and drusen in retinal images. {A}n uncertainty sampling method is applied to select samples that need to be labeled by an expert from an unlabeled set of 4000 retinal images. {I}t reduces the number of training samples needed to obtain an optimum accuracy by dynamically selecting the most informative samples. {R}esults show that the proposed method increases the classification accuracy compared to alternative techniques, achieving an area under the {ROC} curve of 0.87, 0.82 and 0.78 for the detection of hard exudates, cotton wool spots and drusen, respectively.}, + file = {Sanc09.pdf:pdf\\Sanc09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {7951641732601365129}, + gscites = {11}, + ss_id = {d6f43ce72b81460ebe93f3f3c959ff5983d66b07}, + all_ss_ids = {['d6f43ce72b81460ebe93f3f3c959ff5983d66b07']}, +} + +@article{Sanc09a, + author = {C. I. S\'{a}nchez and M. Garc\'{i}a and A. Mayo and M. I. L\'{o}pez and R. Hornero}, + title = {Retinal image analysis based on mixture models to detect hard exudates}, + journal = MIA, + year = {2009}, + volume = {13}, + number = {4}, + pages = {650--658}, + doi = {10.1016/j.media.2009.05.005}, + abstract = {{D}iabetic {R}etinopathy is one of the leading causes of blindness in developed countries. {H}ard exudates have been found to be one of the most prevalent earliest clinical signs of retinopathy. {T}hus, automatic detection of hard exudates from retinal images is clinically significant. {I}n this study, an automatic method to detect hard exudates is proposed. {T}he algorithm is based on mixture models to dynamically threshold the images in order to separate exudates from background. {A} postprocessing technique, based on edge detection, is applied to distinguish hard exudates from cotton wool spots and other artefacts. {W}e prospectively assessed the algorithm performance using a database of 80 retinal images with variable colour, brightness, and quality. {T}he algorithm obtained a sensitivity of 90.2\% and a positive predictive value of 96.8\% using a lesion-based criterion. {T}he image-based classification accuracy is also evaluated obtaining a sensitivity of 100\% and a specificity of 90\%.}, + file = {Sanc09a.pdf:pdf\\Sanc09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {19539518}, + month = {8}, + gsid = {16533196676659344093}, + gscites = {222}, + ss_id = {699e59747454c422766132be28c2aa3c4c04de99}, + all_ss_ids = {['699e59747454c422766132be28c2aa3c4c04de99']}, +} + +@inproceedings{Sanc09b, + author = {S\'{a}nchez, C. I. and Hornero, R. and Mayo, A. and Garcia,M.}, + title = {Mixture model-based clustering and logistic regression for automatic detection of microaneurysms in retinal images}, + booktitle = MI, + year = {2009}, + volume = {7260}, + series = SPIE, + doi = {10.1117/12.812088}, + pages = {72601M1--72601M8}, + abstract = {{D}iabetic {R}etinopathy is one of the leading causes of blindness and vision defects in developed countries. {A}n early detection and diagnosis is crucial to avoid visual complication. {M}icroaneurysms are the first ocular signs of the presence of this ocular disease. {T}heir detection is of paramount importance for the development of a computer-aided diagnosis technique which permits a prompt diagnosis of the disease. {H}owever, the detection of microaneurysms in retinal images is a difficult task due to the wide variability that these images usually present in screening programs. {W}e propose a statistical approach based on mixture model-based clustering and logistic regression which is robust to the changes in the appearance of retinal fundus images. {T}he method is evaluated on the public database proposed by the {R}etinal {O}nline {C}hallenge in order to obtain an objective performance measure and to allow a comparative study with other proposed algorithms.}, + file = {Sanc09b.pdf:pdf\\Sanc09b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {3729643137782569352}, + gscites = {74}, + ss_id = {7c96564eb21ed23357d3ce5f33445756002ae47e}, + all_ss_ids = {['7c96564eb21ed23357d3ce5f33445756002ae47e']}, +} + +@inproceedings{Sanc10, + author = {C. I. S\'{a}nchez and M. Niemeijer and M. S. A. Suttorp-Schulten and M. D. Abr\`amoff and B. van Ginneken}, + title = {Improving hard exudate detection in retinal images through a combination of local and contextual information}, + booktitle = ISBI, + year = {2010}, + pages = {5--8}, + doi = {10.1109/ISBI.2010.5490429}, + abstract = {{C}ontextual information is of paramount importance in medical image understanding to detect and differentiate pathologies, especially when interpreting difficult cases. {C}urrent computer-aided detection ({CAD}) systems typically employ only local information to classify candidates, without taking into account global image information or the relation of a candidate with neighboring structures. {I}n this work, we improve the detection of hard exudates in retinal images incorporating contextual information in the {CAD} system. {T}he context is described by means of high-level contextual-based features based on the spatial relation with surrounding anatomical landmarks and similar lesions. {R}esults show that a contextual {CAD} system for hard exudate detection is superior to an approach that uses only local information, with a significant increase of the figure of merit of the {F}ree {R}eceiver {O}perating {C}haracteristic ({FROC}) curve from 0.840 to 0.945.}, + file = {Sanc10.pdf:pdf\\Sanc10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + gsid = {1493852564349449873}, + gscites = {37}, + ss_id = {51371dc70506dbdcc2d79eb28783c80226d8bc36}, + all_ss_ids = {['51371dc70506dbdcc2d79eb28783c80226d8bc36']}, +} + +@inproceedings{Sanc10a, + author = {C. I. S\'{a}nchez and M. Niemeijer and M. D. Abr\`amoff and B. van Ginneken}, + title = {Active learning for an efficient training strategy of computer-aided diagnosis systems: application to diabetic retinopathy screening}, + booktitle = MICCAI, + year = {2010}, + volume = {6363}, + series = LNCS, + pages = {603-610}, + doi = {10.1007/978-3-642-15711-0_75}, + abstract = {{T}he performance of computer-aided diagnosis ({CAD}) systems can be highly in uenced by the training strategy. {CAD} systems are traditionally trained using available labeled data, extracted from a specific data distribution or from public databases. {D}ue to the wide variability of medical data, these databases might not be representative enough when the {CAD} system is applied to data extracted from a different clinical setting, diminishing the performance or requiring more labeled samples in order to get better data generalization. {I}n this work, we propose the incorporation of an active learning approach in the training phase of {CAD} systems for reducing the number of required training samples while maximizing the system performance. {T}he benefit of this approach has been evaluated using a specific {CAD} system for {D}iabetic {R}etinopathy screening. {T}he results show that 1) using a training set obtained from a different data source results in a considerable reduction of the {CAD} performance; and 2) using active learning the selected training set can be reduced from 1000 to 200 samples while maintaining an area under the {R}eceiver {O}perating {C}haracteristic curve of 0.856.}, + file = {Sanc10a.pdf:pdf\\Sanc10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {20879450}, + gsid = {17509952020796744898}, + gscites = {21}, + ss_id = {a0083e8d84d4858e4c85dcfce6db4ec18b57535a}, + all_ss_ids = {['a0083e8d84d4858e4c85dcfce6db4ec18b57535a']}, +} + +@article{Sanc11, + author = {Clara I. S\'{a}nchez and Meindert Niemeijer and Alina V Dumitrescu and Maria S A Suttorp-Schulten and Michael D Abr\`amoff and Bram van Ginneken}, + title = {Evaluation of a Computer-Aided Diagnosis system for Diabetic Retinopathy screening on public data}, + journal = IOVS, + year = {2011}, + volume = {52}, + pages = {4866--4871}, + doi = {10.1167/iovs.10-6633}, + abstract = {Purpose. To evaluate the performance of a comprehensivecomputer-aided diagnosis (CAD) system for Diabetic Retinopathy (DR) screening using a publicly available database of retinal images and compare its performance to that of human experts. Methods. A previously developed, comprehensive DR CAD system was applied to the 1200 digital color fundus photographs (non-mydriatic camera, single field) of 1200 eyes in the publicly available "Messidor" dataset. The ability of the system to distinguish normal images from those with DR was determined using Receiver Operator Characteristic (ROC) analysis. Two human experts also determined the presence of DR in each of the images. Results. The system achieved an area under the ROC curve of 0.876 for successfully distinguishing normal images from those with DR with a sensitivity of 92.2\% at a specificity of 50\%. This compares favorably with the two human experts who achieved sensitivities of 94.5\% and 91.2\% at specificity 50\%. Conclusions. This study shows, for the first time, the performance of a comprehensive DR screening system on an independent, publicly available dataset. The performance of the system on this dataset is comparable to that of human experts.}, + file = {Sanc11.pdf:pdf\\Sanc11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {21527381}, + month = {6}, + gsid = {2962166101861378440}, + gscites = {115}, + ss_id = {c8ae5ea9ede39decae80e0e8f9a13b8b87fb6f75}, + all_ss_ids = {['c8ae5ea9ede39decae80e0e8f9a13b8b87fb6f75']}, +} + +@article{Sanc12, + author = {C. I. S\'{a}nchez and M. Niemeijer and I. I{\v{s}}gum and A. V. Dumitrescu and M. S. A. Suttorp-Schulten and M. D. Abr\`amoff and B. van Ginneken}, + title = {Contextual computer-aided detection: Improving bright lesion detection in retinal images and coronary calcification identification in {CT} scans}, + journal = MIA, + year = {2012}, + volume = {16}, + number = {1}, + pages = {50--62}, + doi = {10.1016/j.media.2011.05.004}, + abstract = {Contextual information plays an important role in medical image understanding. Medical experts make use of context to detect and differentiate pathologies in medical images, especially when interpreting difficult cases. The majority of computer-aided diagnosis (CAD) systems, however, employ only local information to classify candidates, without taking into account global image information or the relation of a candidate with neighboring structures. In this paper, we present a generic system for including contextual information in a CAD system. Context is described by means of high-level features based on the spatial relation between lesion candidates and surrounding anatomical landmarks and lesions of different classes (static contextual features) and lesions of the same type (dynamic contextual features). We demonstrate the added value of contextual CAD for two real-world CAD tasks: the identification of exudates and drusen in 2D retinal images and coronary calcifications in 3D computed tomography scans. Results show that in both applications contextual CAD is superior to a local CAD approach with a significant increase of the figure of merit of the Free Receiver Operating Characteristic curve from 0.84 to 0.92 and from 0.88 to 0.98 for exudates and drusen, respectively, and from 0.87 to 0.93 for coronary calcifications.}, + file = {Sanc12.pdf:pdf\\Sanc12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {21689964}, + month = {1}, + gsid = {13166674494125805160}, + gscites = {43}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/110688}, + ss_id = {30a5be97d463a7e30248c1c5cc3511ded6205d8f}, + all_ss_ids = {['30a5be97d463a7e30248c1c5cc3511ded6205d8f']}, +} + +@conference{Sanc15a, + author = {Clara I. S\'{a}nchez and Stanley Lambertus and Bart Bloemen and Nathalie Bax and Freerk G. Venhuizen and Mark J. J. P. van Grinsven and Bram van Ginneken and Thomas Theelen and Carel Hoyng}, + booktitle = ARVO, + title = {Automatic quantification of geographic atrophy in fundus autofluorescence images of Stargardt patients}, + abstract = {Purpose: To evaluate an observer-independent image analysis algorithm that automatically quantifies the area of geographic atrophy in fundus autofluorescence images of Stargardt patients. Methods: Fundus autofluorescence images of 20 eyes of 20 Stargardt patients with presence of one delineated or patchy atrophy region in the macular area were selected. An image analysis algorithm was developed to automatically segment the area of atrophy starting from an arbitrarily selected seed point inside the atrophy region. The method was based on a combination of region growing algorithm and a dynamic, user-independent threshold selection procedure using Otsu thresholding. In order to assess the performance obtained by the proposed algorithm, manual annotations were made by an experienced human grader. The grader manually delineated the atrophy areas on the same set of images using dedicated software developed for this task. Results: A high correlation was observed between the manual area measurements and the automatically quantified values obtained by the proposed algorithm, with a mean intra-class correlation coefficient ({ICC}) value larger than 0.89. In addition, the quantification time was reduced substantially by a factor of 27 compared to manual assessment. The output of the software was also shown to be independent of the user input and highly reproducible, with an {ICC} value larger than 0.99 between two executions of the algorithm at different time points and with different seed points. Conclusions: An image analysis algorithm for automatic quantification of geographic atrophy in autofluorescence images of Stargardt patients was developed. The proposed algorithm allows for precise, reproducible and fast quantification of the atrophy area, providing an accurate procedure to measure disease progression and assess potential therapies in large dataset analyses independent of human observers.}, + optnote = {DIAG, RADIOLOGY}, + year = {2015}, + all_ss_ids = {589037d110f74defb9cac3b42a370f7b044a1c4b}, + gscites = {3}, +} + +@conference{Sand19, + author = {Wendelien Sanderink and Jonas Teuwen and Linda Appelman and Ioannis Sechopoulos and Nico Karssemeijer and Ritse Mann}, + title = {Simultaneous multi-slice single-shot DWI compared to routine read-out-segmented DWI for evaluation of breast lesions}, + booktitle = ISMRM, + year = {2019}, + abstract = {Synopsis + The aim of this study was to compare a prototype simultaneous multi-slice single-shot echo planar imaging (SMS-ss-DWI-EPI) sequence with + conventional readout-segmented echo-planar imaging (rs-DWI-EPI) for diffusion-weighted imaging of the breast at 3T magnetic resonance + imaging (MRI). A reader study was conducted to evaluate image quality, lesion conspicuity and BI-RADS score. Our results show that although + the image quality with the conventional rs-DWI-EPI is superior, malignant lesions have improved visibility with the SMS-ss-DWI-EPI sequence. + + Introduction + The addition of diffusion-weighted imaging (DWI) to contrast-enhanced breast MRI improves the classification of breast lesions, which leads in turn to an + increased positive predictive value of biopsies. Consequently, DWI with evaluation of the corresponding apparent diffusion coefficient (ADC) is included in + most state-of-the-art breast MRI protocols . The echo train of the readout-segmented echo-planar imaging-based DWI sequence (rs-DWI-EPI) was + shortened to reduce distortion and improve the resulting image quality. However, this sequence results in a lower signal-to-noise ratio (SNR) than singleshot + echo planar imaging (ss-EPI) . In practice, detection of lesions on DWI is often problematic due to a relatively low lesion conspicuity. To improve the + detectability of lesions and the speed of acquisition, a prototype DWI sequence, the simultaneous multi-slice single-shot DWI-EPI (SMS-ss-DWI-EPI), was + developed. In this study we compare this prototype sequence with rs-DWI-EPI at 3T, in terms of image quality (IQ), lesion conspicuity, and breast imaging + reporting and data system (BI-RADS ) score. + + Methods + From September 2017 to August 2018, 15 women with known breast cancer or suspicious breast lesions were included, after providing signed informed + consent. Women were scanned with the conventional rs-DWI-EPI and the SMS-ss-DWI-EPI during the same clinical examination on a 3T MAGNETOM + Skyra system (Siemens Healthcare, Erlangen, Germany) using a 16-channel bilateral breast coil. Parameters of the rs-DWI-EPI sequence were: TR: 5450 + ms, TE: 57 ms, FoV: 340 mm, voxel size: 1.2x1.2x5 mm , acquisition time: 4:23 min, b-values: 50, 850 s/mm , SPAIR fat suppression. Parameters of the + SMS-ss-DWI-EPI sequence were: TR: 4000 ms, TE: 70 ms, FoV: 360 mm, voxel size: 0.9(i)x0.9(i)x4 mm , acquisition time: 2:45 min, b-values: 50, 400, + 800 s/mm , SPAIR fat suppression. In addition, the clinical protocol included one pre- and five post-contrast administration regular T1-weighted Dixon + acquisitions, ultrafast T1-weighted TWIST acquisitions during the inflow of contrast, and a T2 weighted Dixon acquisition. In total, 33 lesions (27 malignant, + 5 benign and 1 unknown) were detected on the contrast-enhanced series and described in the clinical MRI reports. Two dedicated breast radiologists (4 + and 10 years of experience with breast MRI) independently scored both sequences for overall IQ (1: extremely poor to 9: excellent). All lesions were also + independently evaluated for conspicuity (1: not visible, 2: visible if location is given, 3: visible), and a BI-RADS score (1 to 5) was given for each lesion. + Statistical analysis was performed in SPSS using the Wilcoxon signed-rank test. + + Results + Results are presented in Table 1. Overall IQ was significantly higher for the conventional rs-DWI-EPI than for the SMS-ss-DWI-EPI (p=0.006). Lesion + conspicuity scores were significantly higher for SMS-ss-DWI-EPI (p=0.016). Benign lesions had similar conspicuity with both sequences while malignant + lesions had significantly higher conspicuity with SMS-ss-DWI-EPI (p=0.027) (for example, see Figure 1). There was no significant difference in BI-RADS + scores (p=0.151) between the two sequences. + + Discussion + Although the conventional rs-DWI-EPI sequence results in better IQ, in general ss-EPI results in a higher SNR, which may lead to better visibility of + malignant lesions with SMS-ss-DWI-EPI. This might eventually improve the clinical value of DWI in addition to contrast enhanced breast MRI. + Simultaneous Multi-Slice (SMS) ensures that slices are excited simultaneously with a multiband pulse, which leads to a reduced acquisition time. In our + protocol, the combination of ss-EPI and SMS results in a higher spatial resolution while still having a shorter acquisition time than the conventional + sequence. The higher achievable spatial resolution may be an important factor for the improved lesion visibility, and conspicuity of malignant lesions. This + may make the SMS approach suitable for fast screening and diagnosis of breast cancer. Still, further development of the SMS-ss-DWI-EPI sequence is + needed for improved IQ and even better lesion conspicuity. Extension of the data pool and evaluation by additional readers is pending. + + Conclusion + Despite the perceived poorer image quality of the SMS-ss-DWI-EPI sequence, malignant lesions are better visualized using this sequence. When image + quality and conspicuity are further improved, this technique might enable improved lesion detection on unenhanced diffusion weighted breast MRI.}, + optnote = {DIAG}, +} + +@article{Sand19a, + author = {Sanderink, Wendelien B G and Laarhuis, Babette I and Strobbe, Luc J A and Sechopoulos, Ioannis and Bult, Peter and Karssemeijer, Nico and Mann, Ritse M}, + title = {A systematic review on the use of the breast lesion excision system in breast disease}, + journal = INSI, + year = {2019}, + volume = {10}, + issue = {1}, + month = {5}, + pages = {49}, + doi = {10.1186/s13244-019-0737-3}, + abstract = {To outline the current status of and provide insight into possible future research on the breast lesion excision system (BLES) as a diagnostic and therapeutic device. A systematic search of the literature was performed using PubMed, Embase, and the Cochrane databases to identify relevant studies published between January 2002 and April 2018. Studies were considered eligible for inclusion if they evaluated the diagnostic or therapeutic accuracy or safety of BLES. Ultimately, 17 articles were included. The reported underestimation rates of atypical ductal hyperplasia and ductal carcinoma in situ (DCIS) ranged from 0 to 14.3% and from 0 to 22.2%, respectively. Complete excision rates for invasive ductal carcinoma and DCIS ranged from 5.3 to 76.3%. Bleeding was the most frequently reported complication (0-11.8%). Device-related complications may arise, with an empty basket being the most common (0.6-3.6%). Thermal damage of the specimen, caused by the use of a radiofrequency cutting wire, was reported in eight of the included studies. Most thermal artifacts were reported as superficial and small (0.1-1.9 mm). The BLES, an automated, image-guided, single-pass biopsy system for breast lesions using radiofrequency is designed to excise and retrieve an intact tissue specimen. It is an efficient and safe breast biopsy method with acceptable complication rates, which may be used as an alternative to vacuum-assisted biopsies. The variable rate of complete excision raises questions about the possibility to use BLES as a therapeutic device for the excision of small lesions. Further research should focus on this aspect of BLES.}, + file = {Sand19a.pdf:pdf\\Sand19a.pdf:PDF}, + optnote = {AXTI, DIAG}, + pmid = {31049740}, + gsid = {11238611003834879571}, + gscites = {5}, + ss_id = {57098a73d38c2b147b1dd2574d87b4c30eec21ab}, + all_ss_ids = {['57098a73d38c2b147b1dd2574d87b4c30eec21ab']}, +} + +@conference{Sand20, + author = {Wendelien Sanderink and Jonas Teuwen and Linda Appelman and Ioannis Sechopoulos and Nico Karssemeijer and Ritse Mann}, + booktitle = {ISMRM Benelux}, + title = {Simultaneous multi-slice single-shot DWI compared to routine read-out-segmented DWI for evaluation of breast lesions}, + abstract = {Synopsis + The aim of this study was to compare a prototype simultaneous multi-slice single-shot echo planar imaging (SMS-ss-DWI-EPI) sequence with conventional readout-segmented echo-planar imaging (rs-DWI-EPI) for diffusion-weighted imaging of the breast at 3T magnetic resonance imaging (MRI). A reader study was conducted to evaluate image quality, lesion conspicuity and BI-RADS score. Our results show that although the image quality with the conventional rs-DWI-EPI is superior, malignant lesions have improved visibility with the SMS-ss-DWI-EPI sequence. + + Introduction + The addition of diffusion-weighted imaging (DWI) to contrast-enhanced breast MRI improves the classification of breast lesions, which leads in turn to an increased positive predictive value of biopsies. Consequently, DWI with evaluation of the corresponding apparent diffusion coefficient (ADC) is included in most state-of-the-art breast MRI protocols. The echo train of the readout-segmented echo-planar imaging-based DWI sequence (rs-DWI-EPI) was shortened to reduce distortion and improve the resulting image quality. However, this sequence results in a lower signal-to-noise ratio (SNR) than single-shot echo planar imaging (ss-EPI). In practice, detection of lesions on DWI is often problematic due to a relatively low lesion conspicuity. To improve the detectability of lesions and the speed of acquisition, a prototype DWI sequence, the simultaneous multi-slice single-shot DWI-EPI (SMS-ss-DWI-EPI), was developed. In this study, we compare this prototype sequence with rs-DWI-EPI at 3T, in terms of image quality (IQ), lesion conspicuity, and the presence of artifacts. + + Methods + From September 2017 to December 2018, 25 women with known breast cancer or suspicious breast lesions were included, after providing signed informed consent. Women were scanned with the conventional rs-DWI-EPI and the SMS-ss-DWI-EPI during the same clinical examination on a 3T MAGNETOM Skyra system (Siemens Healthcare, Erlangen, Germany) using a 16-channel bilateral breast coil. Parameters of the rs-DWI-EPI sequence were: TR: 5450 ms, TE: 57 ms, FoV: 340 mm, voxel size: 1.2x1.2x5 mm , acquisition time: 4:23 min, b-values: 50, 850 s/mm , SPAIR fat suppression. Parameters of the SMS-ss-DWI-EPI sequence were: TR: 4000 ms, TE: 70 ms, FoV: 360 mm, voxel size: 0.9(i)x0.9(i)x4 mm, acquisition time: 2:45 min, b-values: 50, 400, 800 s/mm , SPAIR fat suppression. In addition, the clinical protocol included one pre- and five post-contrast regular T1-weighted Dixon acquisitions, ultrafast T1-weighted TWIST acquisitions during the inflow of contrast, and a T2 weighted Dixon acquisition. In total, 42 malignant (32 invasive ductal carcinomas, 4 invasive lobular carcinomas, 1 ductal carcinoma in situ and 5 other malignant lesions) and 12 benign lesions were detected on the contrast-enhanced series. Malignant lesions had a mean MRI size of 18.7 mm +- 15.1 mm (range: 3 - 92 mm) and benign lesions had a mean size of 5.9 mm +- 3.8 mm (range: 3 - 15 mm). Four dedicated breast radiologists (4 to 15 years of experience with breast MRI) independently scored both sequences for overall IQ (1: extremely poor to 9: excellent). All lesions were also independently evaluated for conspicuity (1: not visible, 2: visible if location is given, 3: visible). Statistical analysis was performed in SPSS using Generalized Linear Models and the Wilcoxon signed-rank test. + + Results + Overall IQ was significantly higher for the conventional rs-DWI-EPI (Mean +- SD: 5.5 +- 1.9) than for the SMS-ss-DWI-EPI (Mean +- SD: 4.2 +- 2.0) (p=0.002). Lesion conspicuity scores were significantly higher for SMS-ss-DWI-EPI (p=0.009). Benign lesions had similar conspicuity with both sequences while malignant lesions had significantly higher conspicuity with SMS-ss-DWI-EPI (p=0.041) (for example, see Figure 1). + Infolding and ghosting artifacts were scored as disturbing or worse by 2 or more radiologists in 6 and 15 cases, for Resolve and SMS respectively. Distortion artifacts were scored as disturbing or worse in 4 and 17 cases, respectively. + + Discussion: Although the conventional rs-DWI-EPI sequence results in better IQ, in general ss-EPI results in a higher SNR, which may lead to better visibility of malignant lesions with SMS-ss-DWI-EPI. This might eventually improve the clinical value of DWI in addition to contrast enhanced breast MRI. Simultaneous Multi-Slice (SMS) ensures that slices are excited simultaneously with a multiband pulse, which leads to a reduced acquisition time. In our protocol, the combination of ss-EPI and SMS results in a higher spatial resolution while still having a shorter acquisition time than the conventional sequence. The higher achievable spatial resolution may be an important factor for the improved lesion visibility, and conspicuity of malignant lesions. This may make the SMS approach suitable for fast screening and diagnosis of breast cancer. Still, further development of the SMS-ss-DWI-EPI sequence is needed for improved IQ, decreased presence of artifacts and even better lesion conspicuity. + + Conclusion + Despite the perceived poorer image quality and the more disturbing presence of artifacts in the SMS-ss-DWI-EPI sequence, malignant lesions are better visualized using this sequence. When image quality and conspicuity are further improved, this technique might enable improved lesion detection on unenhanced diffusion weighted breast MRI.}, + optnote = {DIAG}, + year = {2020}, +} + +@article{Sand20a, + author = {Sanderink, W.B.G. and Caballo, M. and Strobbe, L.J.A. and Bult, P. and Vreuls, W. and Venderink, D.J. and Sechopoulos, I. and Karssemeijer, N. and Mann, R.M.}, + title = {Reliability of MRI tumor size measurements for minimal invasive treatment selection in small breast cancers}, + doi = {10.1016/j.ejso.2020.04.038}, + year = {2020}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejso.2020.04.038}, + file = {Sand20a.pdf:pdf\Sand20a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Surgical Oncology}, + citation-count = {3}, + automatic = {yes}, + pages = {1463-1470}, + volume = {46}, +} + +@article{Sand20a, + author = {Sanderink, W.B.G. and Caballo, M. and Strobbe, L.J.A. and Bult, P. and Vreuls, W. and Venderink, D.J. and Sechopoulos, I. and Karssemeijer, N. and Mann, R.M.}, + title = {Reliability of MRI tumor size measurements for minimal invasive treatment selection in small breast cancers}, + doi = {10.1016/j.ejso.2020.04.038}, + year = {2020}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejso.2020.04.038}, + file = {Sand20a.pdf:pdf\Sand20a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Surgical Oncology}, + citation-count = {3}, + automatic = {yes}, + pages = {1463-1470}, + volume = {46}, +} + +@article{Satt22, + author = {Satturwar, Swati and Girolami, Ilaria and Munari, Enrico and Ciompi, Francesco and Eccher, Albino and Pantanowitz, Liron}, + title = {Program death ligand-1 immunocytochemistry in lung cancer cytological samples: A systematic review.}, + doi = {10.1002/dc.24955}, + issue = {6}, + pages = {313--323}, + volume = {50}, + abstract = {In this era of personalized medicine, targeted immunotherapies like immune checkpoint inhibitors (ICI) blocking the programmed death-1 (PD-1)/program death ligand-1 (PD-L1) axis have become an integral part of treating advanced stage non-small cell lung carcinoma (NSCLC) and many other cancer types. Multiple monoclonal antibodies are available commercially to detect PD-L1 expression in tumor cells by immunohistochemistry (IHC). As most clinical trials initially required tumor biopsy for PD-L1 detection by IHC, many of the currently available PD-1/PD-L1 assays have been developed and validated on formalin fixed tissue specimens. The majority (>50%) of lung cancer cases do not have a surgical biopsy or resection specimen available for ancillary testing and instead must rely primarily on fine needle aspiration biopsy specimens for diagnosis, staging and ancillary tests. Review of the literature shows multiple studies exploring the feasibility of PD-L1 IHC on cytological samples. In addition, there are studies addressing various aspects of IHC validation on cytology preparations including pre-analytical (e.g., different fixatives), analytical (e.g., antibody clone, staining platforms, inter and intra-observer agreement, cytology-histology concordance) and post-analytical (e.g., clinical outcome) issues. Although promising results in this field have emerged utilizing cytology samples, many important questions still need to be addressed. This review summarizes the literature of PD-L1 IHC in lung cytology specimens and provides practical tips for optimizing analysis.}, + file = {Satt22.pdf:pdf\\Satt22.pdf:PDF}, + journal = {Diagnostic cytopathology}, + month = {6}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35293692}, + year = {2022}, + ss_id = {a9b2faab3a7195ee1deacda52004b4cd396f427a}, + all_ss_ids = {['a9b2faab3a7195ee1deacda52004b4cd396f427a']}, + gscites = {6}, +} + +@article{Sch03a, + author = {Schaefer-Prokop, Cornelia and Klein, Jeffrey S}, + title = {Digital chest radiography}, + journal = JTI, + year = {2003}, + volume = {18}, + pages = {123}, + file = {Sch03a.pdf:pdf\\Sch03a.pdf:PDF}, + optnote = {CXR, DIAG, RADIOLOGY}, + number = {3}, + month = {7}, +} + +@article{Sch08, + author = {Schaefer-Prokop, Cornelia and Neitzel, Ulrich and Venema, Henk W and Uffmann, Martin and Prokop, Mathias}, + title = {Digital chest radiography: an update on modern technology, dose containment and control of image quality}, + journal = ER, + year = {2008}, + volume = {18}, + pages = {1818--30}, + doi = {10.1007/s00330-008-0948-3}, + file = {Sch08.pdf:pdf\\Sch08.pdf:PDF}, + optnote = {CXR, DIAG, RADIOLOGY}, + number = {9}, + pmid = {18431577}, + month = {4}, + gsid = {10747008268035270820}, + gscites = {176}, +} + +@article{Scha01a, + author = {Schaefer-Prokop, C. and Prokop, M. and Fleischmann, D. and Herold, C.}, + title = {High-resolution {CT} of diffuse interstitial lung disease: key findings in common disorders}, + journal = ER, + year = {2001}, + volume = {11}, + pages = {373--392}, + abstract = {High-resolution CT (HRCT) is the radiological imaging technique that most closely reflects changes in lung structure. It represents the radiological method of choice for the diagnostic work-up of patients with known or suspected diffuse interstitial lung disease. A single HRCT finding is frequently nonspecific, but the combination of the various HRCT findings together with their anatomic distribution can suggest the most probable diagnosis. The purpose of this article is to summarize the classic HRCT features of the most common diffuse interstitial lung diseases. Lists of differential diagnoses and distinguishing key features are provided to improve diagnostic confidence. The presence of classic HRCT features often obviates the need for biopsy. In patients with atypical findings, HRCT can be used to determine the most appropriate biopsy site.}, + file = {Scha01a.pdf:pdf\\Scha01a.pdf:PDF}, + optnote = {DIAG}, + number = {3}, + pmid = {11288840}, + month = {2}, + gsid = {17200936979199710204}, + gscites = {83}, +} + +@article{Scha01b, + author = {Schaefer-Prokop, C. and Eisenhuber, E. and Fuchsj\"ager, M. and Puig, S. and Prokop, M.}, + title = {[Current developments in the area of digital thoracic radiography]}, + journal = Radiologe, + year = {2001}, + volume = {41}, + pages = {230--239}, + abstract = {All three currently commercially available systems for digital radiography of the chest such as the selenium drum, storage phosphor plates and the flat panel direct detector systems provide an excellent image quality that is at least equivalent or superior to that of conventional film. Reasons for that are the continuously improved detective or dose efficiency of the detector systems and an improved image processing. The new direct detector systems have the largest potential for dose reduction while storage phosphor and selenium radiographs are usually obtained with a dose comparable to that of a 400 speed system. Improved image processing algorithms allow for the production of digital images that are adapted to the conventional image characteristics within the lung regions combined with an increased transparency of the high absorption areas such as the retrocardial and retrodiaphragmatic regions.}, + optnote = {DIAG}, + number = {3}, + pmid = {11322068}, + gsid = {3616619742534065588}, + gscites = {2}, +} + +@article{Scha01c, + author = {Schaefer-Prokop, C. and N\"obauer, I. and Weidekamm, C. and Katz-Papatheophilou, E.}, + title = {[Radiological diagnosis of adult respiratory distress syndrome (ARDS)]}, + journal = WIENMW, + year = {2001}, + volume = {151}, + pages = {520--523}, + abstract = {The bedside chest radiograph represents the imaging modality of choice for diagnosis and monitoring of adult respiratory distress syndrome (ARDS). Imaging findings are strongly influenced by means of mechanical ventilation therapy. The chest radiograph is relatively insensitive and not specific for the diagnosis of complications such as pneumonia or interstitial emphysema. Computed tomography (CT) is suitable for quantitative assessment of lung compartments with respect to the degree of aeration and to tissue density values. With CT, the understanding of the underlying pathophysiology and the effects of ventilation therapy (PEEP) could be improved. The role of CT in the clinical routine is still limited due to the high risk to transport patients with ARDS.}, + optnote = {DIAG}, + number = {21-23}, + pmid = {11762246}, +} + +@article{Scha02c, + author = {Schaefer-Prokop, C. and Prokop, M.}, + title = {New imaging techniques in the treatment guidelines for lung cancer}, + journal = ERJS, + year = {2002}, + volume = {35}, + pages = {71s--83s}, + abstract = {Computed tomography (CT) remains the main imaging technique for the preoperative staging and post-therapeutic evaluation of bronchogenic carcinoma. Spiral CT has already overcome some of the problems encountered with central or more extensive tumours. Multislice CT offers further improvement and allows for scanning of the whole chest within a single breath-hold using a thin-section high-resolution technique. Problem-adapted sections in arbitrary directions become available and provide an excellent spatial resolution. One can expect improved accuracy for the evaluation of transfissural tumour growth, chest wall involvement, mediastinal infiltration and lymph node staging. Despite recent advances in magnetic resonance (MR) techniques for imaging the chest, the role of MR for staging of bronchogenic carcinoma remains limited. It offers advantages such as the assessment of chest-wall involvement or mediastinal involvement in patients in whom CT remains equivocal. Lymph-node-specific MR contrast agents offer new diagnostic potential for the assessment of metastatic disease. New techniques for the display of three-dimensional data sets include volume rendering and virtual bronchoscopy. These techniques represent new tools for the evaluation and demonstration of pathology within the central tracheobronchial tree. Their most important application is the guidance of bronchoscopic biopsies. The assessment of an indeterminate pulmonary nodule is frequently based on positron emission tomography imaging. As an alternative, nodule vascularization (contrast enhancement patterns on CT or magnetic resonance imaging (MRI)), calcifications (absorption characteristics at various X-ray energies on CT or dual energy radiography), and morphological features (high resolution imaging at CT) can be used as the basis for nodule differentiation. The dynamics of contrast enhancement in CT or MRI can also be used for the assessment of tumour viability after chemotherapy. Lung cancer screening programmes are still controversial. Low-dose computed tomography scanning and computed assisted detection algorithms based on chest radiographs or computed tomography scans form the technical basis for such projects.}, + file = {Scha02c.pdf:pdf\\Scha02c.pdf:PDF}, + optnote = {CXR, DIAG, RADIOLOGY}, + pmid = {12064683}, + month = {2}, + gsid = {16546005280649739955}, + gscites = {90}, +} + +@article{Scha02e, + author = {Schaefer-Prokop, Cornelia and Uffmann, M. and Stadler, A.}, + title = {[Digital radiography: from storage phosphor plates to direct detector systems]}, + journal = WIENMWS, + year = {2002}, + pages = {30--34}, + abstract = {All three currently commercially available systems for digital radiography of the chest such as the selenium drum, storage phosphor plates and the flat panel direct detector systems provide an excellent image quality that is at least equivalent or superior to that of conventional film. Reasons for that are the continuously improved detective or dose efficiency of the detector systems and an improved image processing. The new direct detector systems have the largest potential for dose reduction while storage phosphor and selenium radiographs are usually obtained with a dose comparable to that of a 400 speed system. Improved image processing algorithms allow for the production of digital images that are adapted to the conventional image characteristics within the lung regions combined with an increased transparency of the high absorption areas such as the retrocardial and retrodiaphragmatic regions.}, + optnote = {DIAG}, + number = {113}, + pmid = {12621834}, +} + +@article{Scha03b, + author = {Schaefer-Prokop, Cornelia and Uffmann, Martin and Eisenhuber, Edith and Prokop, Mathias}, + title = {Digital radiography of the chest: detector techniques and performance parameters}, + journal = JTI, + year = {2003}, + volume = {18}, + pages = {124--137}, + abstract = {Substantial advances in detector technology characterize digital chest radiography. This article compares the various systems from a radiologist's point of view. Computed radiography (CR) is a well-established system that is robust, has good reproducibility, and is relatively inexpensive. Image quality has been continuously improved in recent years while the physical size of the readout units has been reduced and the throughput increased. CR is the only digital system that can be used for bedside chest radiographs. Improved detector properties and dual reading have made it a dose-efficient system. Although now widely available, a 4K image matrix does not appear to offer a general diagnostic improvement for imaging the chest. New developments with respect to detector composition and readout process can be expected in the future. Direct radiography (DR) is the common name for different technologies that are characterized by a direct readout matrix that covers the whole exposure area. Conversion of x-ray intensity into electric signals can either be direct (selenium-based systems) or indirect (scintillator/photodiode systems). Advantages of DR systems are a high image quality and the potential for dose reduction. The role of selenium radiography (Thoravision) has decreased after the advent of DR systems although this dedicated chest unit offers high image quality at 400 speed acquisition dose. Especially in a PACS environment, CR and DR systems will increasingly substitute for conventional radiography with advantages for CR for bedside chest radiographs and for DR for high-end chest stands.}, + file = {Scha03b.pdf:pdf\\Scha03b.pdf:PDF}, + optnote = {CXR, DIAG, RADIOLOGY}, + number = {3}, + pmid = {12867810}, + month = {7}, + gsid = {10155781491425026912}, + gscites = {48}, +} + +@article{Scha03c, + author = {Schaefer-Prokop, C. and Uffmann, M. and Sailer, J. and Kabalan, N. and Herold, C. and Prokop, M.}, + title = {[Digital thorax radiography: flat-panel detector or storage phosphor plates]}, + journal = Radiologe, + year = {2003}, + volume = {43}, + pages = {351--361}, + doi = {10.1007/s00117-003-0893-8}, + abstract = {Flat panel detectors are characterized by improved handling and increased dose efficiency. This allows for increasing of work flow efficiency and for reducing the exposure dose by about 50\% compared to current systems with a sensitivity of 400. Whether the increased dose efficiency should be used to reduce acquisition dose or to increase image quality in the chest, will be shown by further clinical experience and will be also determined by the subjective preference of the radiologists. The decreased level of image noise opens new perspectives for image processing that way that elaborated multifrequency processing allows for optimizing the display of very small and low contrast structures that was so far limited by overlying image noise. Specialized applications of dual energy subtraction and temporal subtraction will also profit by the new detector technology and will be further driven forward in context with applications such as computed assisted diagnosis even though this is currently not yet broadly applied. Storage phosphor radiography still represents an important alternative technique based on its larger flexibility with respect to equipment configuration, its broader application options in intensive care and emergency radiology and due to economic reasons. These facts are further underlined by the fact that image quality also in storage phosphor radiography could be constantly increased by improving detector technology and image processing and consequently has a high standard.}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {12764583}, + month = {5}, + gsid = {1158244897300726847}, + gscites = {7}, +} + +@article{Scha05b, + author = {Schaefer-Prokop, Cornelia and Prokop, Mathias}, + title = {{MDCT} for the diagnosis of acute pulmonary embolism}, + journal = ER, + year = {2005}, + volume = {15 Suppl 4}, + pages = {D37--D41}, + abstract = {With the advent of multidetector CT, pulmonary CT angiography (MD-CTPA) has substantially gained in spatial resolution and is the accepted method of choice to diagnose and rule out acute pulmonary embolism down to the subsegmental level. This article reviews means to optimize scanning technique and contrast injection protocols dependent on the scanner type used. It summarizes recent publications on the performance of MD-CTPA with special emphasis on the diagnostic accuracy, interpretation and clinical role of (isolated) peripheral emboli. Diagnostic algorithms are outlined that describe the role of CT in context with the pretest probability, the D-Dimer, lower limb sonography and scintigraphy.}, + optnote = {DIAG}, + pmid = {16479644}, + month = {11}, + gsid = {1689402466372729981}, + gscites = {39}, +} + +@inproceedings{Scha07, + author = {Michiel Schaap and Rashindra Manniesing and Ihor Smal and Theo van Walsum and Aad van der Lugt and Wiro Niessen}, + title = {Bayesian tracking of tubular structures and its application to carotid arteries in {CTA}}, + booktitle = MICCAI, + year = {2007}, + volume = {10}, + series = LNCS, + pages = {562--570}, + doi = {10.1007/978-3-540-75759-7_68}, + abstract = {This paper presents a Bayesian framework for tracking of tubular structures such as vessels. Compared to conventional tracking schemes, its main advantage is its non-deterministic character, which strongly increases the robustness of the method. A key element of our approach is a dedicated observation model for tubular structures in regions with varying intensities. Furthermore, we show how the tracking method can be used to obtain a probabilistic segmentation of the tracked tubular structure. The method has been applied to track the internal carotid artery from CT angiography data of 14 patients (28 carotids) through the skull base. This is a challenging problem, owing to the close proximity of bone, overlap in intensity values of lumen voxels and (partial volume) bone voxels, and the tortuous path of the vessels. The tracking was successful in 25 cases, and the extracted path were found to be close (< 1.0mm) to manually traced paths by two observers.}, + file = {Scha07.pdf:pdf\\Scha07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {Pt 2}, + pmid = {18044613}, + gsid = {4500787342925790451}, + gscites = {58}, +} + +@article{Scha08b, + author = {Schaefer-Prokop, Cornelia and Prokop, Mathias}, + title = {CTPA for the diagnosis of acute pulmonary embolism during pregnancy}, + journal = ER, + year = {2008}, + volume = {18}, + pages = {2705--2708}, + doi = {10.1007/s00330-008-1158-8}, + abstract = {CT pulmonary angiography (CTPA) has been suggested by the Fleischner society as the first test following a negative leg ultrasound in pregnant patients with suspected pulmonary embolism. This editorial discusses the use of CTPA as a diagnostic tool in pregnant women and comments on the need for specifically adapting CT protocols during pregnancy in the light of new research describing a substantial number of non-diagnostic examinations in pregnant women if routine scanning protocols are used for CTA of the pulmonary arteries. Potential reasons for these high numbers of insufficient examinations are physiological changes occurring during pregnancy that lead to a hyperdynamic circulation, which reduces average enhancement of the pulmonary vasculature. In addition, there are possible breathing-related effects that include an increased risk for Valsalva manoeuvre with devastating effects for pulmonary vascular enhancement. Techniques to overcome these problems are discussed: bolus triggering with short start delays, high flow rates or high contrast medium concentration, preferential use of fast CT systems and the use of low kVp CT techniques. CT data acquisition during deep inspiration should be avoided and shallow respiration may be considered as an alternative to suspended breathing in this patient group. All these factors can contribute to optimization of the quality of pulmonary CTA in pregnant patients. It is time now to adapt our protocols and provide optimum care for this sensitive patient group.}, + file = {Scha08b.pdf:pdf\\Scha08b.pdf:PDF}, + optnote = {DIAG}, + number = {12}, + pmid = {18795302}, + month = {9}, + gsid = {12417002089703118484}, + gscites = {69}, +} + +@article{Scha09a, + author = {C. M. Schaefer-Prokop and D. W. De Boo and M. Uffmann and M. Prokop}, + title = {DR and CR: Recent advances in technology}, + journal = EJR, + year = {2009}, + volume = {72}, + pages = {194--201}, + doi = {10.1016/j.ejrad.2009.05.055}, + abstract = {After some initial reluctance, nowadays transition from conventional analogue-to-digital radiographic technique is realized in the vast majority of institutions. The eventual triumph of digital over conventional technique is related to its undoubted advantages with respect to image quality and improved image handling in the context of a picture archiving and communication system. CR represents the older system, which matured over decades and experienced some important recent improvements with respect to dose efficiency and work-flow efficiency that strengthened its position. It represents a very versatile, economically attractive system that is equally suited for integrated systems as well as for cassette-based imaging at the bedside. DR systems offer superb image quality and realistic options for dose reduction based on their high dose efficiency. While for a long time only integrated systems were on the market suited for a large patient throughput, also mobile DR systems became recently available. While for the next years, it is likely that DR and CR systems will coexist, the long term perspective of CR will depend on further innovations with respect to dose efficiency and signal-to-noise characteristics while for DR economical aspects and broader availability of mobile systems will play a role.}, + file = {Scha09a.pdf:pdf\\Scha09a.pdf:PDF}, + optnote = {DIAG}, + number = {2}, + pmid = {19695809}, + month = {11}, + gsid = {3601581568937480558}, + gscites = {57}, +} + +@article{Scha09b, + author = {Schaefer-Prokop, Cornelia and Uffmann, Martin}, + title = {Update on digital radiography}, + journal = EJR, + year = {2009}, + volume = {72}, + pages = {193}, + doi = {10.1016/j.ejrad.2009.05.056}, + optnote = {DIAG}, + number = {2}, + pmid = {19766420}, + month = {11}, +} + +@article{Scha10b, + author = {Schaefer-Prokop, C.}, + title = {Conventional and {CT} diagnostics of bronchial carcinoma}, + journal = Radiologe, + year = {2010}, + volume = {50}, + pages = {675--683}, + doi = {10.1007/s00117-009-1958-0}, + abstract = {A number of important technical advances made in recent years in the area of both digital radiography as well as multidetector computed tomography (MDCT) have improved detection and staging of bronchial carcinoma. The aim of elaborate processing such as temporal subtraction, rib suppression, dual energy subtraction or CAD is to aid the radiologist in detecting lung tumors at the earliest stage possible. For both CT and radiography techniques the differentiation between true and false positive lesions seems to be the biggest challenge. MDCT with multiplanar projections is the imaging method of choice for staging of the extent of local tumor spread (T staging), while N staging and M staging are the domain of positron emission tomography (PET) or even better of integrated PET/CT. Management rules for follow-up of solid and semi-solid lesions seen in CT consider the risks of the patient and are summarized in international guidelines. In 2009 a new 7th edition of the TNM classification was published, which, among other aspects, sub-classifies tumor size more specifically and the presence of a satellite nodule in the tumor lobe is down-staged to T3 and no longer determines tumor resectability. The N staging was not modified. One of the most important new features is the fact that the new classification no longer applies only to non-small cell lung cancer (NSCLC) but also to SCLC and carcinoid tumors.}, + file = {Scha10b.pdf:pdf\\Scha10b.pdf:PDF}, + optnote = {RADIOLOGY, nodulecharacterization, nodulecharacterizationCT}, + number = {8}, + pmid = {20628724}, + month = {7}, +} + +@conference{Scha11, + author = {S. Schalekamp and B. Heggelman and C. Schaefer-Prokop}, + title = {"Focal lesions on Gadoxetate disodium enhanced and diffusionweighted liver {MRI}: a guidance for differential diagnosis"}, + booktitle = ECR, + year = {2011}, + file = {Scha11.pdf:pdf\\Scha11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Scha12a, + author = {S. Schalekamp and B. van Ginneken and L. Bax and M.W. Imhof-Tas and L. Meiss and A. Tiehuis and N. Karssemeijer and C.M. Schaefer-Prokop}, + title = {Bone suppression imaging improves observer performance for the detection of lung nodules in chest radiographs}, + booktitle = RSNA, + year = {2012}, + abstract = {PURPOSE. Overprojection by osseous structures has been reported in 22 to 95% of missed lung cancer cases as underlying reason for misdiagnosis. Purpose of the study was to assess the effect of bone suppressed imaging on observer performance in detecting lung nodules in chest radiographs. METHOD AND MATERIALS. Posteroanterior and lateral digital chest radiographs of 108 patients with a CT proven solitary nodule, and 192 controls were read by 5 radiologists and 3 residents. Conspicuity of nodules on the radiographs was classified in four categories: obvious (n=32), moderate (n=32), subtle (n=28) and very subtle (n=16). Commercially available software (ClearRead Bone Suppression 2.4, formerly Softview, Riverain Medical, Miamisburg, Ohio) was used to generate bone suppressed images (BSI) of the PA radiographs. Observers read the PA and lateral chest radiographs without and with the availability of BSI. Anatomic locations of suspicion and confidence scores were digitally recorded. Multi reader multi case (MRMC) receiver operating characteristics (ROC) were used for statistical analysis: partial area under the curve using a clinically acceptable specificity between 80 and 100% served as the figure of merit. RESULTS. Average age was 64.8 years for nodule patients and 63.5 years for controls. Average nodule size was 17.5mm (median 17mm). ROC analysis showed improved detection with use of bone suppression imaging compared to chest radiographs alone (p= 0.008). Operating at a specificity of 90%, lung nodule detection sensitivity increased from 67% without BSI to 72% with BSI. Increase of detection performance was highest for moderate and subtle nodules with 11% (66% vs. 73%). Two of 8 nodules that were not reported by any of the observers with CXR alone, were seen by at least 4 observers with BSI.CONCLUSION. Bone suppression imaging improves radiologists? detection performance for pulmonary nodules, in particularly for nodules with intermediate conspicuity.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Scha12b, + author = {S. Schalekamp and B. van Ginneken and E. Koedam and L. Quekel and M. Snoeren and R. Wittenberg and N. Karssemeijer and CM Schaefer-Prokop}, + title = {Computer aided detection helps radiologists to detect pulmonary nodules in chest radiographs, when having bone suppressed images available}, + booktitle = RSNA, + year = {2012}, + abstract = {PURPOSE. Studies evaluating the effects of computer aided detection (CAD) to support radiologists in the detection of pulmonary nodules in chest radiographs (CXR) described the radiologists? difficulties to discriminate from false positive candidates. We investigated the effect of an improved CAD system on nodule detection performance in CXR with the additional availability of bone suppression images (BSI). METHOD AND MATERIALS. Five radiologists and 3 residents served as observers and were asked to separately score location and confidence score for the detection of nodules in CXR without (mode A) and with the availability of CAD candidates (mode B). The study group consisted of 108 PA and lateral CXRs with a CT proven solitary pulmonary nodule and 192 age-matched CT proven negative controls. For both reading modes readers had BSI available. CAD marks and BSI were created by a commercially available software package (ClearRead +Detect 5.2, formerly Onguard 5.2; ClearRead Bone Suppression 2.4, formerly Softview 2.4, Riverain Medical, Miamisburg, Ohio). Multi reader multi case (MRMC) Receiver operating characteristics (ROC) were used for statistical analysis: partial area under the curve, at a specificity interval of 80-100% served as figure of merit. RESULTS. Average nodule size was 17.5mm (7-36mm), with a malignancy rate of 83%. Standalone CAD performance was 74% with 1.0 FP/image. Observer performance significantly increased with the use of CAD and outperformed CXR with use of BSI (p=0.047). Mean sensitivity of the observers was 74% with 0.25 FP/image without CAD and 80% with 0.33 FP/image with CAD. Operating at a specificity of 90%, lung nodule detection sensitivity increased from 70.3% with CXR and BSI to 73.0% with additional availability of CAD. On average 10.5% of all true positive CAD marks were rejected by the radiologists, indicating potential for further improvement with the use of CAD. CONCLUSION. Even with bone suppressed images available, computer aided detection shows additional value in lung nodule detection for radiologists.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Scha12c, + author = {S. Schalekamp and B. van Ginneken and L. Bax and M.W. Imhof-Tas and M. Snoeren and L. Quekel and E. Koedam and N. Karssemeijer and C.M. Schaefer-Prokop}, + title = {Bone suppression imaging improves observer performance for the detection of lung nodules in chest radiographs}, + booktitle = ESTI, + year = {2012}, + abstract = {Objective. To assess observer performance in detecting lung nodules in chest x-rays using bone suppressed imaging. Materials and Methods. Posteroanterior and lateral digital chest radiographs of 108 patients with a solitary CT-proven nodule and 192 controls were read by 5 certified radiologists. Commercially available software (Softview 2.4, Riverain Medical Group, Miamisburg, Ohio) was used to generate bone suppressed images (BSI). We conducted a sequential reader study, in which radiologists marked and scored suspicious regions first on the standard radiographs alone and subsequently with the use BSI. Receiver operating characteristics (ROC) were used for statistical analysis: partial area under the curve (pAUC), based on the specificity normally applied in clinical routine, served as figure of merit (interval 0 - 0.1). Results. Average nodule size was 17.5mm (median 17mm), with a malignancy rate of 83%. Use of BSI significantly improved detection performance compared to chest radiographs alone (pAUC 0.053 vs. pAUC=0.059, p=0.04). Operating at a specificity of 95%, lung nodule detection sensitivity increased from 56.8% to 63.2% with the aid of BSI. For the individual readers, the sensitivities were 51%-56%, 73%-72%, 55%-68%, 49%-56%, and 56%-64%, without and with BSI, respectively. Conclusion. Bone suppression imaging significantly improves the radiologist's performance for the detection of pulmonary nodules in radiographs.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Scha12d, + author = {S. Schalekamp and B. van Ginneken and L. Bax and M.W. Imhof-Tas and L. Meiss and A. Tiehuis and E. Koedam and L. Quekel and M. Snoeren and R. Wittenberg and N. Karssemeijer and CM Schaefer-Prokop}, + title = {Botsuppressie in thoraxfoto's verbetert de detectie van pulmonale nodules door radiologen}, + booktitle = RADD, + year = {2012}, + abstract = {Doel: Uit de literatuur is gebleken dat overprojectie van ossale structuren op thoraxfotoA-A?A 1/2 s een onderliggende reden is voor het missen van longkanker in 22 tot 95% van de gevallen. In deze studie wilden we het effect meten van botsuppressie software op de detectie van long nodules in thoraxfotos. Methoden: Posteroanterior en laterale digitale thoraxfotoA-A?A 1/2 s van 108 patienten met een solitaire nodule, en 192 controle patiA-A?A 1/2 nten werden beoordeeld door 5 radiologen en 3 arts-assistenten. Zowel de nodules, als de afwezigheid van afwijkingen was geverifieerd middels CT. Nodules werden ingedeeld in 4 moeilijkheidscategorien: makkelijk(n=32), gemiddeld(n=32), moeilijk(n=28), zeer moeilijk(n=16). Commercieel verkrijgbare software (ClearRead Bone Suppression 2.4, Riverain Medical, Miamisburg, Ohio) werd gebruikt om botsuppressie beelden te construeren van de PA thoraxfoto. Lezers gaven verdachte gebieden aan met markers en een daarbij behorende verdachtheidsscore. Dit deden zij met en zonder de hulp van botsuppressie beelden. Statistische analyse bestond uit multi reader multi case receiver operating characteristics (ROC). Een klinisch relevant interval met een specificiteit tussen de 80 en 100% werd gebruikt om de detectie te meten. Resultaten: Gemiddelde grootte van de nodules was 17,5mm (mediaan 17mm). ROC analyse liet een verbeterde detectie zien met behulp van botsuppressie (p=0.008). Bij een specificiteit van 90% steeg de detectie van long nodules van 67% zonder botsuppressie naar 72% met botsuppressie. Verbetering werd vooral gezien bij de gemiddeld tot moeilijk zichtbare nodules (van 66% naar 73%). Conclusie: Botsuppressie verbetert de detectie van long nodules in thoraxfotoA-A?A 1/2 s door radiologen, vooral voor nodules met een gemiddelde tot moeilijke zichtbaarheid.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Scha13, + author = {S. Schalekamp and B. van Ginneken and CM Schaefer-Prokop and N. Karssemeijer}, + title = {Impact of Bone Suppression Imaging on the Detection of Lung Nodules in Chest Radiographs: Analysis of Multiple Reading Sessions}, + booktitle = MI, + year = {2013}, + series = SPIE, + pages = {86730Y}, + doi = {10.1117/12.2002799}, + abstract = {Lung cancer is frequently overlooked in chest radiographs (CXR), often caused by overprojection of bone structures in the image. Bone suppression imaging (BSI) techniques could improve detection and interpretation of lung nodules. We investigated the effect of a new software product (Clearread BSI 2.4, Riverain Medical Group, Miamisburg, Ohio) that suppresses ribs and clavicles, on the detection of lung nodules. Eight observers, including five radiologists and three residents assessed radiographs of 111 patients with a CT proven solitary nodule and 189 controls. In a fully crossed study design observers assessed first radiographs without and with BSI sequentially. Secondly they scored radiographs independently having BSI available from the beginning. Five months later, the same readers scored the same cases again in an independent reading session, completing the three scorings for CXRs with BSI. Multi reader multi case (MRMC) receiver operating characteristics (ROC) were used for statistical analysis. DBM variance component estimates were calculated. Reading times were digitally recorded. Observer achieved a mean area under the curve (AUC) for unaided reading of 0.855. AUC increased to 0.883(p=0.002) with BSI in the sequential reading mode and to 0.874 (p=0.21) in the independent reading mode. In the second independent reading session after five months the AUC was 0.882 (p=0.20). Median reading times were 19s per case for the unaided CXR with another 10s for reading BSI sequentially. For the independent modes reading times were 19s and 18s. Total observer variance between sequential and independent reading design remained the same. A strong increase of uncorrelated components was found in the independent reading sessions, masking the ability to demonstrate differences in observer performance across modalities. In conclusion, bone suppression imaging improves lung nodule detection in CXR and does not prolong reading time. The independent study design has little power compared to the sequential study design due to a strong increase of uncorrelated variance components.}, + file = {Scha13.pdf:pdf\\Scha13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + ss_id = {4f1105d82ae39f9705da50fc98f4b545d80723dc}, + all_ss_ids = {['4f1105d82ae39f9705da50fc98f4b545d80723dc']}, + gscites = {3}, +} + +@article{Scha13a, + author = {Schalekamp, Steven and van Ginneken, Bram and Meiss, Louis and Peters-Bax, Liesbeth and Quekel, Lorentz G B A. and Snoeren, Miranda M. and Tiehuis, Audrey M. and Wittenberg, Rianne and Karssemeijer, Nico and Schaefer-Prokop, Cornelia M.}, + title = {Bone suppressed images improve radiologists' detection performance for pulmonary nodules in chest radiographs}, + journal = EJR, + year = {2013}, + volume = {82}, + pages = {2399-2405}, + doi = {10.1016/j.ejrad.2013.09.016}, + abstract = {To assess the effect of bone suppression imaging on observer performance in detecting lung nodules in chest radiographs.Posteroanterior (PA) and lateral digital chest radiographs of 111 (average age 65) patients with a CT proven solitary nodule (median diameter 15mm), and 189 (average age 63) controls were read by 5 radiologists and 3 residents. Conspicuity of nodules on the radiographs was classified in obvious (n=32), moderate (n=32), subtle (n=29) and very subtle (n=18). Observers read the PA and lateral chest radiographs without and with an additional PA bone suppressed image (BSI) (ClearRead Bone Suppression 2.4, Riverain Technologies, Ohio) within one reading session. Multi reader multi case (MRMC) receiver operating characteristics (ROC) were used for statistical analysis.ROC analysis showed improved detection with use of BSI compared to chest radiographs alone (AUC=0.883 versus 0.855; p=0.004). Performance also increased at high specificities exceeding 80\% (pAUC=0.136 versus 0.124; p=0.0007). Operating at a specificity of 90\%, sensitivity increased with BSI from 66\% to 71\% (p=0.0004). Increase of detection performance was highest for nodules with moderate and subtle conspicuity (p=0.02; p=0.03).Bone suppressed images improve radiologists' detection performance for pulmonary nodules, especially for those of moderate and subtle conspicuity.}, + file = {Scha13a.pdf:pdf\\Scha13a.pdf:PDF}, + optnote = {DIAG}, + number = {12}, + pmid = {24113431}, + month = {12}, + gsid = {8460371450451194013}, + gscites = {27}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/122870}, + ss_id = {c78606bf03806c66b3d24122d564173a448181ed}, + all_ss_ids = {['c78606bf03806c66b3d24122d564173a448181ed']}, +} + +@conference{Scha13b, + author = {S. Schalekamp and B. van Ginneken and M. Brink and B. Heggelman and M. Spee and I. Somers and N. Karssemeijer and CM Schaefer-Prokop}, + title = {"Computer Aided Detection shows added value to Bone Suppression Imaging for the detection of lung nodules in chest radiographs"}, + booktitle = {WCTI}, + year = {2013}, + abstract = {PURPOSE: To assess the added value of computer aided detection to bone suppression imaging on observer performance in detecting lung nodules in chest radiographs. MATERIALS AND METHODS: Posteroanterior (PA) and lateral digital chest radiographs of 111 (average age 65; m:f 66:45) patients with a CT proven solitary nodule (median diameter 15mm), and 189 (average age 63; m:f 111:78) controls were read by 6 radiologists and 6 residents. Institutional review board approval was obtained. Observers read the PA and lateral chest radiographs without and with Computer Aided Detection (CAD) (ClearRead +Detect 5.2, Riverain Technologies, Miamisburg, Ohio) within one reading session, and provided locations with scores of suspicion for the presence of a nodule. CAD marks were displayed as lesion contours and accompanied by a likelihood of suspiciousness. Bone suppressed images (BSI) (ClearRead Bone Suppresion 2.4, Riverain Technologies, Miamisburg, Ohio) were available at all time. Multi reader multi case (MRMC) localization receiver operating characteristics (AFROC) were used for statistical analysis. Besides, reader scores and CAD scores were independently combined, only using the reader mark locations. Significance of difference was set at P < 0.05. RESULTS: Sensitivity of the CAD system was 74% at 1.0 FP/image. LROC analysis showed improved detection with use of BSI plus CAD compared to chest radiographs with BSI (AUC = 0.848 versus 0.858; p= 0.02). Operating at a specificity of 90%, sensitivity increased with CAD from 66% to 69% (p=0.005). An independent combination of reader with CAD showed an AUC of 0.857 and a sensitivity of 70% at a specificity of 90%. CAD detected 148 of the 313 nodules initially missed by the observers. CONCLUSION: Computer aided detection showed added value to bone suppressed images for the detection of lung nodules in chest radiographs. CAD was able to detect 47% of the nodules missed by the observers. Independent combination of reader with CAD showed similar performance as when CAD was used as a second reader.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Scha13c, + author = {S. Schalekamp and B. van Ginneken and CM. Schaefer-Prokop and N. Karssemeijer}, + title = {"Computer aided detection of lung nodules in chest radiographs: novel approaches to improve reader performance"}, + booktitle = {MIPS}, + year = {2013}, + abstract = {RATIONALE: To investigate new methods of using a computer aided detection (CAD) system for the detection of lung nodules in chest radiographs. CAD systems for the detection of lung nodules may be clinically used but their standalone performance is still low in comparison with radiologists. Since CAD is limited by relatively high false positive rates, the way these systems are currently used may not be optimal. In our study we investigate two alternatives: Use of CAD as interactive decision support and CAD as an independent second reader. METHODS: We selected 300 frontal and lateral digital chest radiographs, including 111 with a solitary pulmonary nodule. All chest radiographs were analyzed by a commercially available CAD system (ClearRead +Detect 5.2, Riverain Technologies, Miamisburg, Ohio) which provided lesion contours of suspicious regions, accompanied by a probability score. The CAD system was used in an interactive manner. CAD marks and their suspiciousness score remained hidden unless their location was queried by the radiologist. Twelve radiologists read the CXRs without CAD and with the interactive CAD in two reading sessions. AFROC MRMC analysis was used to measure detection performance. Partial areas under the curve in a FPF range between 0 and 0.2 were used to compare reader performance. Besides, results of a weighted independent combination of CAD scores and reader scores, at the location of reader findings, were evaluated. RESULTS: Average partial area under the curve for radiologists without CAD was 0.127. No improvement was seen for radiologists with use of the interactive CAD (pAUC=0.127, p=0.88). Independent combination of reader scores with CAD significantly improved performance (pAUC=0.135,p=0.007). CONCLUSION: Though interactive use of CAD did not improve reader performance for the detection of lung nodules in chest radiographs, CAD has potential as decision support, since a simple weighted combination of reader scores with CAD scores significantly improved performance. Lack of confidence in CAD in the interactive sessions may explain these results.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Scha13d, + author = {S. Schalekamp and N. Karssemeijer and CM. Schaefer-Prokop and B. van Ginneken}, + title = {"Independent combination of multiple readers for the detection of lung nodules in chest radiographs: setting a benchmark for computer-aided detection"}, + booktitle = RSNA, + year = {2013}, + abstract = {PURPOSE: The detection performance for lung nodules in chest radiography shows a large interreader variability. High miss rates of lung cancer have been reported though judged as being visible in retrospect. History has proven that computer intelligence is able to surpass human performance also for complex tasks (e.g., Watson). Purpose of our study was to explore the potential gain in performance by independent combination of multiple observers. That way we aimed to define the upper boundary of "visual" detectability that ideally should be achieved by a computer aided detection (CAD) system. METHODS: 111 digital chest radiographs (CXR) containing a single small nodule (average diameter 16mm.) and 189 normal controls served as study group. Nodules had to be visible on the frontal radiograph with 42% of them judged as being of low and very low conspicuity. Twelve observers were asked to localize the lung nodules in the CXRs with help of bone suppressed images. Location based ROC was used for analysis. Mean sensitivity in a false positive fraction range between 0 and 0.2 was used to measure nodule localization performance. This was done for all observers separately and subsequently for the combination of multiple observers (up to 12). Observer findings were averaged when findings were located within 1.5 cm of each other. When no finding was present at the location of another observersA-A?A 1/2 finding a zero-score was assigned in the averaging calculation. RESULTS: The mean sensitivity at a false positive fraction range between 0 and 0.2 was 64.0% for single reading (range 45.5% - 78.2%). Combining the readings of two observers improved lung nodule detection on average to a mean sensitivity of 73.1%. Adding more observers lead to a further performance increase up to a mean sensitivity for 12 observers of 82.3%. On average, 26 nodules were missed by single observers, 15 nodules by a combination of 2 observers, and only 5 nodules were missed when combining 12 observers. CONCLUSION: The variable and partially low baseline performance underlines the limitation of the "single observer". If CAD is able to reach the combined performance of multiple readers, a dramatic increase of nodule localization performance can be expected with drastic reduction of missed rates.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Scha14, + author = {Schalekamp, S. and van Ginneken, B. and Karssemeijer, N. and Schaefer-Prokop, C. M.}, + title = {Chest radiography: new technological developments and their applications}, + journal = SEMRESCCM, + year = {2014}, + volume = {35}, + pages = {3--16}, + doi = {10.1055/s-0033-1363447}, + abstract = {Digital chest radiography is still the most common radiological examination. With the upcoming three-dimensional (3D) acquisition techniques the value of radiography seems to diminish. But because radiography is inexpensive, readily available, and requires very little dose, it is still being used for the first-line detection of many cardiothoracic diseases. In the last decades major technical developments of this 2D technique are being achieved. First, hardware developments of digital radiography have improved the contrast to noise, dose efficacy, throughput, and workflow. Dual energy acquisition techniques reduce anatomical noise by splitting a chest radiograph into a soft tissue image and a bone image. Second, advanced processing methods are developed to enable and improve detection of many kinds of disease. Digital bone subtraction by a software algorithm mimics the soft tissue image normally acquired with dedicated hardware. Temporal subtraction aims to rule out anatomical structures clotting the image, by subtracting a current radiograph with a previous radiograph. Finally, computer-aided detection systems help radiologists for the detection of various kinds of disease such as pulmonary nodules or tuberculosis.}, + file = {Scha14.pdf:pdf\\Scha14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {24481755}, + month = {1}, + gsid = {16185250871510695519}, + gscites = {17}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/268940}, + ss_id = {dabf1c07425730421cd8e1bf4ad9d7fffb7b036f}, + all_ss_ids = {['dabf1c07425730421cd8e1bf4ad9d7fffb7b036f']}, +} + +@article{Scha14a, + author = {Schalekamp, Steven and van Ginneken, Bram and Koedam, Emmeline and Snoeren, Miranda M. and Tiehuis, Audrey M. and Wittenberg, Rianne and Karssemeijer, Nico and Schaefer-Prokop, Cornelia M.}, + title = {Computer aided detection improves detection of pulmonary nodules in chest radiographs beyond the support by bone suppressed images}, + journal = Radiology, + year = {2014}, + volume = {272}, + pages = {252-261}, + doi = {10.1148/radiol.14131315}, + abstract = {Purpose: Purpose of our study was to evaluate the added value of computer aided detection for lung nodules in chest radiographs when radiologists have bone suppressed images available. Methods: Written informed consent was waived by the IRB. Selection of study images and study setup was reviewed and approved by the institutional review boards. 300 posteroranterior (PA) and lateral chest radiographs, (189 negative radiographs and 111 patients with a solitary nodule) were selected from image archives in four institutions. PA images were processed by a commercially available computer aided detection (CAD) system (ClearRead +Detect 5.2, Riverain Technologies, Miamisburg, Ohio), and PA bone suppressed images (BSI) were generated (ClearRead BSI). Five radiologists and three residents evaluated the radiographs with BSI available first without CAD and secondly after inspection of the CAD marks. Readers marked suspicious locations and provided a confidence score for that location to be a nodule. Location based ROC analysis was performed using JAFROC analysis. Area under the curve (AUC) functioned as figure of merit and p-values were computed with the Dorfman-Berbaum-Metz method. Results: Average nodule size was 16 mm. CAD standalone reached a sensitivity of 74% at 1.0 false positive per image. Without CAD average AUC for observers was 0.812. With CAD performance significantly improved to an AUC of 0.841 (P = .0001). CAD detected 127 of 239 nodules that were missed after evaluation of the radiographs together with BSI pooled over all observers. Only 57 of these detections were eventually marked by the observers after review of CAD candidates. Conclusion: CAD improved radiologistsA-A?A 1/2 performance for the detection of lung nodules on chest radiographs, even when baseline performance was optimized by providing lateral and bone suppressed images. Still the majority of true positive CAD candidates is being dismissed by the observers.}, + file = {Scha14a.pdf:pdf\\Scha14a.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {24635675}, + month = {7}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/137680}, + ss_id = {6b18013b756f365ee486e752272327a554ea841a}, + all_ss_ids = {['6b18013b756f365ee486e752272327a554ea841a', 'b2b0b7891f02492fe0452d6dea3297c57a85476b']}, + gscites = {70}, +} + +@article{Scha14b, + author = {Schalekamp, S. and van Ginneken, B. and Heggelman, Bgf and Imhof-Tas, M. and Somers, I. and Brink, M. and Spee, M. and Schaefer-Prokop, Cm and Karssemeijer, N.}, + title = {New methods for using computer-aided detection information for the detection of lung nodules on chest radiographs}, + journal = BJR, + year = {2014}, + volume = {87}, + pages = {20140015}, + doi = {10.1259/bjr.20140015}, + abstract = {Objective: To investigate two new methods of using computer-aided detection (CAD) system information for the detection of lung nodules on chest radiographs. We evaluated an interactive CAD application and an independent combination of radiologists and CAD scores. Methods: 300 posteroanterior and lateral digital chest radiographs were selected, including 111 with a solitary pulmonary nodule (average diameter, 16AC/a,!aEURdegmm). Both nodule and control cases were verified by CT. Six radiologists and six residents reviewed the chest radiographs without CAD and with CAD (ClearRead +DetectAC/aEURzAC/ 5.2; Riverain Technologies, Miamisburg, OH) in two reading sessions. The CAD system was used in an interactive manner; CAD marks, accompanied by a score of suspicion, remained hidden unless the location was queried by the radiologist. Jackknife alternative free response receiver operating characteristics multireader multicase analysis was used to measure detection performance. Area under the curve (AUC) and partial AUC (pAUC) between a specificity of 80\% and 100\% served as the measure for detection performance. We also evaluated the results of a weighted combination of CAD scores and reader scores, at the location of reader findings. Results: AUC for the observers without CAD was 0.824. No significant improvement was seen with interactive use of CAD (AUCAC/a,!aEURdeg=AC/a,!aEURdeg0.834; pAC/a,!aEURdeg=AC/a,!aEURdeg0.15). Independent combination significantly improved detection performance (AUCAC/a,!aEURdeg=AC/a,!aEURdeg0.834; pAC/a,!aEURdeg=AC/a,!aEURdeg0.006). pAUCs without and with interactive CAD were similar (0.128), but improved with independent combination (0.137). Conclusion: Interactive CAD did not improve reader performance for the detection of lung nodules on chest radiographs. Independent combination of reader and CAD scores improved the detection performance of lung nodules. Advances in knowledge: (1) Interactive use of currently available CAD software did not improve the radiologists' detection performance of lung nodules on chest radiographs. (2) Independently combining the interpretations of the radiologist and the CAD system improved detection of lung nodules on chest radiographs.}, + file = {Scha14b.pdf:pdf\\Scha14b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1036}, + pmid = {24625084}, + month = {4}, + gsid = {6744117835177351208}, + gscites = {8}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/277816}, + ss_id = {bd9cd87e942d44cf6bd24a9a1105b4ec6c0883d5}, + all_ss_ids = {['bd9cd87e942d44cf6bd24a9a1105b4ec6c0883d5']}, +} + +@article{Scha14c, + author = {Schaefer-Prokop, C. and Prosch, H. and Prokop, M.}, + title = {[Lung cancer screening. What have we learnt for the practice so far?]}, + journal = Radiologe, + year = {2014}, + volume = {54}, + pages = {462--469}, + doi = {10.1007/s00117-013-2605-3}, + abstract = {Lung cancer is the most frequent cause of tumor-associated death and only has a good prognosis if detected at a very early tumor stage.For the first time the American National Lung Screening Trial (NLST) could prove that low-dose computed tomography (CT) screening is able to reduce lung cancer mortality by 20AC/a,!aEURdeg\%.To date, however, three much smaller and therefore statistically underpowered European trials could not confirm the positive results of the NLST. The results of the largest European trial NELSON are expected within the next 2 years. In addition, there are a number of open or not yet satisfactorily answered questions, such as the definition of the appropriate screening population, the management of nodules detected by screening, the effects of over-diagnosis and the risk of cumulative radiation exposure.The success of the NLST prompted several predominantly American professional societies to issue a positive recommendation about the implementation of lung cancer screening in a population at risk. However, potentially conflicting results of European studies and a number of not yet optimized issues justify caution and call for a pooled analysis of European studies in order to provide statistically sound results and to ensure a high efficiency of screening with respect to the radiation applied, mental and physical patient burden and, last but not least, the financial efforts.}, + file = {Scha14c.pdf:pdf\\Scha14c.pdf:PDF}, + optnote = {DIAG}, + number = {5}, + pmid = {24824379}, + month = {5}, + gsid = {15579013926935901771}, + gscites = {4}, + ss_id = {c20ebcc69b4c426fa0231ce719576ed7ba7b0629}, + all_ss_ids = {['c20ebcc69b4c426fa0231ce719576ed7ba7b0629']}, +} + +@article{Scha14d, + author = {Schalekamp, Steven and van Ginneken, Bram and Schaefer-Prokop, Cornelia M and Karssemeijer, Nico}, + title = {Influence of study design in receiver operating characteristics studies: sequential versus independent reading}, + journal = JMI, + year = {2014}, + volume = {1}, + pages = {015501--015501}, + doi = {10.1117/1.JMI.1.1.015501}, + abstract = {Observer studies to assess new image processing devices or computer-aided diagnosis techniques are often performed, but little is known about the effect of the study design on observer performance results. We investigated the effect of the sequential and independent reading design on observer study results with respect to reader performance and their statistical power. For this we performed an observer study for the detection of lung nodules with bone-suppressed images (BSIs) compared with original chest radiographs. In a fully crossed observer study, eight observers assessed a series of 300 radiographs four times, including one assessment of the original radiograph with sequential BSI and two independent reading sessions with BSI. Observer performance was compared using multireader multicase receiver operating characteristics. No significant difference between the effect of BSI in the sequential and the independent reading sessions could be found (p=0.09; p=0.46). Compared with the original radiographs, increased performance with BSI was significant in the sequential and one of the independent reading sessions (p<0.0001; p=0.0007), and nonsignificant in the other independent reading session (p=0.10). A strong increase of uncorrelated variance components was found in the independent reading sessions, masking the ability to demonstrate differences in observer performance across modalities. Therefore, the sequential reading design is the preferred design because it is less burdensome and has more statistical power.}, + file = {Scha14d.pdf:pdf\\Scha14d.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + month = {4}, + gsid = {4547693503675497615}, + gscites = {6}, + ss_id = {51d271d58ca1492055fa83f97019c34672c77dd0}, + all_ss_ids = {['51d271d58ca1492055fa83f97019c34672c77dd0']}, +} + +@article{Scha14e, + author = {Schalekamp, Steven and van Ginneken, Bram and Koedam, Emmeline and Snoeren, Miranda M. and Tiehuis, Audrey M. and Wittenberg, Rianne and Karssemeijer, Nico and Schaefer-Prokop, Cornelia M.}, + title = {Computer-aided detection improves detection of pulmonary nodules in chest radiographs beyond the support by bone-suppressed images}, + journal = Radiology, + year = {2014}, + volume = {272}, + pages = {252--261}, + doi = {10.1148/radiol.14131315}, + abstract = {To evaluate the added value of computer-aided detection (CAD) for lung nodules on chest radiographs when radiologists have bone-suppressed images (BSIs) available.Written informed consent was waived by the institutional review board. Selection of study images and study setup was reviewed and approved by the institutional review boards. Three hundred posteroanterior (PA) and lateral chest radiographs (189 radiographs with negative findings and 111 radiographs with a solitary nodule) in 300 subjects were selected from image archives at four institutions. PA images were processed by using a commercially available CAD, and PA BSIs were generated. Five radiologists and three residents evaluated the radiographs with BSIs available, first, without CAD and, second, after inspection of the CAD marks. Readers marked locations suspicious for a nodule and provided a confidence score for that location to be a nodule. Location-based receiver operating characteristic analysis was performed by using jackknife alternative free-response receiver operating characteristic analysis. Area under the curve (AUC) functioned as figure of merit, and P values were computed with the Dorfman-Berbaum-Metz method.Average nodule size was 16.2 mm. Stand-alone CAD reached a sensitivity of 74\% at 1.0 false-positive mark per image. Without CAD, average AUC for observers was 0.812. With CAD, performance significantly improved to an AUC of 0.841 (P = .0001). CAD detected 127 of 239 nodules that were missed after evaluation of the radiographs together with BSIs pooled over all observers. Only 57 of these detections were eventually marked by the observers after review of CAD candidates.CAD improved radiologists' performance for the detection of lung nodules on chest radiographs, even when baseline performance was optimized by providing lateral radiographs and BSIs. Still, most of the true-positive CAD candidates are dismissed by observers.}, + file = {Scha14e.pdf:pdf\\Scha14e.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {24635675}, + month = {7}, + gsid = {773280616186312110}, + gscites = {70}, + ss_id = {6b18013b756f365ee486e752272327a554ea841a}, + all_ss_ids = {['6b18013b756f365ee486e752272327a554ea841a', 'b2b0b7891f02492fe0452d6dea3297c57a85476b']}, +} + +@conference{Scha14f, + author = {Schalekamp, Steven and van den Berk, Inge and Hartmann, Ieneke and Snoeren, Miranda and Odink, Arlette and Pegge, Sjoert and Schijf, Laura and Karssemeijer, Nico and Schaefer-Prokop, Cornelia M.}, + title = {Bone suppressed images improve pulmonary fungal infection detection in chest radiographs}, + booktitle = ECR, + year = {2014}, + abstract = {Purpose: To assess the effect of bone suppression images on observer performance in detecting pulmonary fungal infections (PFI) on chest radiographs. Materials and Methods: 105 frontal (bedside or PA) chest radiographs (CXR) of suspects for PFI were collected from four institutions. Presence or absence of disease was determined by an expert radiologist and a clinical researcher in consensus, using CT as reference standard. Radiographs could contain single or multiple sites of infection, and were classified into four categories of subtlety. Bone suppressed images (BSI) were generated by ClearRead BSI 3.2 (Riverain Technologies, Ohio). Five radiologists and two residents participated in an observer study. Evaluation was done on a per lung basis, resulting in 78 diseased lungs and 132 normal lungs. Observers scored the lungs on a continuous scale (0-100), and marked the most suspicious lesion, if present. Area under the ROC curve (AUC) served as performance measure. P-values were calculated using the Dorfman-Berbaum-Metz method. Sensitivity and specificity were calculated considering only the lungs with a suspiciousness score over 50 to be positive. Results: AUC without BSI was 0.815, and increased to 0.853 with BSI (p=0.01). Six of the seven observers increased their performance, four of them significantly. Sensitivity increased from 49% to 66%. Specificity dropped from 95% to 90%. Significant improvement of performance was seen in the group of very subtle cases (p<0.001). Conclusion: BSI significantly improved detection performance of PFI in chest radiographs, especially for very subtle abnormalities. BSI improved the sensitivity of the CXR examination, outweighing the smaller loss in specificity.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Scha14g, + author = {Schalekamp, Steven and Karssemeijer, Nico and Schaefer-Prokop, Cornelia and van Ginneken, Bram}, + title = {Double reading improves detection of small lung tumors in chest radiographs: can a computer aided detection system replace the second reader?}, + booktitle = ECR, + year = {2014}, + abstract = {Purpose: To investigate if a computer aided detection (CAD) system could replace a second human reader in a double reading process. Methods: We selected 300 posteroanterior and lateral digital chest radiographs(CXR), including 111 with a solitary CT proven pulmonary nodule with an average diameter of 16mm. Twelve radiologists read the CXRs having bone suppressed images available. Readers marked and scored suspicious regions on a continuous scale from 0 to 100. All images were also processed by a CAD system (ClearRead+Detect 5.2, Riverain Technologies) that also provided a score between 0 and 100 for each suspicious location it identified. Location based ROC analysis was used to measure nodule localization performance. Mean sensitivity in a false positive fraction range between 0 and 0.2 was used to compare reader performance. Double reading was evaluated by combining the findings of two readers: scores of findings at the same location were averaged while findings that were not identified by the second observer were averaged with 0. CAD results were treated similar as every observer for simulated double reading. A paired t-test was used to calculate differences. Results: The mean sensitivity was 64.0% for single reading (range 45.5%-78.2%). Combining the readings of two observers improved lung nodule detection on average to 73.1% (range 58.3%-83.8%; p=0.001). Performance with CAD as the second reader increased to 67.8% (range 58.1%-81.1%; p=0.02). Conclusion: Use of CAD as the second reader significantly improves detection of lung nodules, although it is not yet as good as double reading by two human readers.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Scha14h, + author = {Schalekamp, Steven and van Ginneken, Bram and van den Berk, Inge A H. and Hartmann, Ieneke J C. and Snoeren, Miranda M. and Odink, Arlette E. and van Lankeren, Winnifred and Pegge, Sjoert A H. and Schijf, Laura J. and Karssemeijer, Nico and Schaefer-Prokop, Cornelia M.}, + title = {Bone suppression increases the visibility of invasive pulmonary aspergillosis in chest radiographs}, + journal = PLOSONE, + year = {2014}, + volume = {9}, + pages = {e108551}, + doi = {10.1371/journal.pone.0108551}, + abstract = {Chest radiographs (CXR) are an important diagnostic tool for the detection of invasive pulmonary aspergillosis (IPA) in critically ill patients, but their diagnostic value is limited by a poor sensitivity. By using advanced image processing, the aim of this study was to increase the value of chest radiographs in the diagnostic work up of neutropenic patients who are suspected of IPA.The frontal CXRs of 105 suspected cases of IPA were collected from four institutions. Radiographs could contain single or multiple sites of infection. CT was used as reference standard. Five radiologists and two residents participated in an observer study for the detection of IPA on CXRs with and without bone suppressed images (ClearRead BSI 3.2; Riverain Technologies). The evaluation was performed separately for the right and left lung, resulting in 78 diseased cases (or lungs) and 132 normal cases (or lungs). For each image, observers scored the likelihood of focal infectious lesions being present on a continuous scale (0-100). The area under the receiver operating characteristics curve (AUC) served as the performance measure. Sensitivity and specificity were calculated by considering only the lungs with a suspiciousness score of greater than 50 to be positive.The average AUC for only CXRs was 0.815. Performance significantly increased, to 0.853, when evaluation was aided with BSI (pAC/a,!A =AC/a,!A 0.01). Sensitivity increased from 49\% to 66\% with BSI, while specificity decreased from 95\% to 90\%.The detection of IPA in CXRs can be improved when their evaluation is aided by bone suppressed images. BSI improved the sensitivity of the CXR examination, outweighing a small loss in specificity.}, + file = {Scha14h.pdf:pdf\\Scha14h.pdf:PDF}, + optnote = {DIAG}, + number = {10}, + pmid = {25279774}, + month = {10}, + gsid = {10859929995411569077}, + gscites = {16}, + ss_id = {76651ee9770b111f53dbcb92b3cb1174d3760073}, + all_ss_ids = {['76651ee9770b111f53dbcb92b3cb1174d3760073']}, +} + +@article{Scha14i, + author = {Schaefer-Prokop, C.}, + title = {[HRCT patterns of the most important interstitial lung diseases]}, + journal = Radiologe, + year = {2014}, + volume = {54}, + pages = {1170--1179}, + doi = {10.1007/s00117-014-2734-3}, + abstract = {Interstitial lung diseases are a mixed group of diffuse parenchymal lung diseases which can have an acute or chronic course. Idiopathic diseases and diseases with an underlying cause (e.g. collagen vascular diseases) share the same patterns. Thin section computed tomography (CT) plays a central role in the diagnostic work-up. The article describes the most important interstitial lung diseases following a four pattern approach with a predominant nodular or reticular pattern or a pattern with increased or decreased lung density.}, + file = {Scha14i.pdf:pdf\\Scha14i.pdf:PDF}, + optnote = {DIAG}, + number = {12}, + pmid = {25503518}, + month = {12}, + ss_id = {711091a5f3498c65061d5cff533169a7b7cedf8a}, + all_ss_ids = {['711091a5f3498c65061d5cff533169a7b7cedf8a']}, + gscites = {2}, +} + +@phdthesis{Scha15, + author = {Steven Schalekamp}, + title = {Advanced processing in chest radiography: impact on observer performance}, + year = {2015}, + url = {http://hdl.handle.net/2066/142565}, + abstract = {This thesis reports on the effect of both digitally bone supressed images and computer aided detection on the detecton of lung nodules and other lung diseases in chest radiography.}, + copromotor = {C. M. Schaefer-Prokop}, + file = {Scha15.pdf:pdf/Scha15.pdf:PDF}, + optnote = {DIAG}, + promotor = {N. Karssemeijer and B. van Ginneken}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Scha16, + author = {Schalekamp, Steven and Karssemeijer, Nico and Cats, Ariane M. and {De Hoop}, Bartjan and Geurts, Bram H J. and Berger-Hartog, Olette and {van Ginneken}, Bram and Schaefer-Prokop, Cornelia M.}, + title = {The Effect of Supplementary Bone-Suppressed Chest Radiographs on the Assessment of a Variety of Common Pulmonary Abnormalities: Results of an Observer Study}, + journal = JTI, + year = {2016}, + volume = {31}, + number = {2}, + month = {3}, + pages = {119-125}, + doi = {10.1097/RTI.0000000000000195}, + url = {http://dx.doi.org/10.1097/RTI.0000000000000195}, + abstract = {The aim of the study was to investigate the effect of bone-suppressed chest radiographs on the detection of common chest abnormalities.A total of 261 posteroanterior and lateral chest radiographs were collected from 2 hospitals. Radiographs could contain single or multiple focal opacities <3 cm (n=66), single or multiple focal opacities >3 cm (n=33), diffuse lung disease (n=49), signs of cardiogenic congestion (n=26), or no abnormalities (n=110). Twenty-one cases contained >1 type of disease. All abnormalities were confirmed by a computed tomographic scan obtained within 4 weeks of the radiograph. Bone-suppressed images (BSIs) were generated from every posteroanterior radiograph (ClearRead BSI 3.2). All cases were read by 6 radiologists without BSI, followed by an evaluation of the same case with BSI. Presence or absence of each disease category and confidence (0-100) of the observers were documented for each interpretation. Differences in the number of correct detections without and with BSI were analyzed using the Wilcoxon signed-rank test.On average, 6 more cases with focal lesions were correctly identified with BSI (P=0.03), and 1 additional case with diffuse abnormalities was found with BSI (P=0.32). None of the observers demonstrated a decrease in the number of correctly detected cases with diffuse abnormalities or cardiogenic congestion with BSI. False positives in normal cases with availability of BSI mainly referred to the detection of small focal lesions (on average 7 per reader; P=0.04).BSI does not negatively affect the interpretation of diffuse lung disease, while improving visualization of focal lesions on chest radiographs. BSI leads to overcalling of focal abnormalities in normal radiographs.}, + file = {Scha16.pdf:pdf\\Scha16.pdf:PDF}, + optnote = {DIAG}, + pmid = {26783697}, + gsid = {4979950243250816911}, + gscites = {7}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/167509}, + ss_id = {36f4a3250668a84acc6e3eff9f9b4f496dab24df}, + all_ss_ids = {['36f4a3250668a84acc6e3eff9f9b4f496dab24df']}, +} + +@article{Scha20, + author = {Schalekamp, S and Huisman, M and van Dijk, R A and Boomsma, M F and Freire Jorge, P J and de Boer, W S and Herder, G J M and Bonarius, M and Groot, O A and Jong, E and Schreuder, A and Schaefer-Prokop, C M}, + title = {Model-based Prediction of Critical Illness in Hospitalized Patients with COVID-19}, + journal = Radiology, + year = {2020}, + pages = {202723}, + doi = {10.1148/radiol.2020202723}, + abstract = {Background The prognosis of hospitalized patients with severe coronavirus disease 2019 (COVID-19) is difficult to predict, while the capacity of intensive care units (ICUs) is a limiting factor during the peak of the pandemic and generally dependent on a country's clinical resources. Purpose To determine the value of chest radiographic findings together with patient history and laboratory markers at admission to predict critical illness in hospitalized patients with COVID-19. Material and Methods In this retrospective study including patients from 7th March 2020 to 24 April 2020, a consecutive cohort of hospitalized patients with RT-PCR-confirmed COVID-19 from two large Dutch community hospitals was identified. After univariable analysis, a risk model to predict critical illness (i.e. death and/or ICU admission with invasive ventilation) was developed, using multivariable logistic regression including clinical, CXR and laboratory findings. Distribution and severity of lung involvement was visually assessed using an 8-point scale (chest radiography score). Internal validation was performed using bootstrapping. Performance is presented as an area under the receiver operating characteristic curve (AUC). Decision curve analysis was performed, and a risk calculator was derived. Results The cohort included 356 hospitalized patients (69 +-12 years, 237 male) of whom 168 (47%) developed critical illness. The final risk model's variables included gender, chronic obstructive lung disease, symptom duration, neutrophil count, C-reactive protein level, lactate dehydrogenase level, distribution of lung disease and chest radiography score at hospital presentation. The AUC of the model was 0.77 (95% CI: 0.72-0.81, P < .001). A risk calculator was derived for individual risk assessment; Dutch COVID-19 risk model (see Appendix E2). At an example threshold of 0.70, 71 of 356 patients would be predicted to develop critical illness of which 59 (83%) would be true-positives. Conclusion A risk model based on chest radiographic and laboratory findings obtained at admission was predictive of critical illness in hospitalized patients with coronavirus disease 2019. This risk calculator might be useful for triage of patients to the limited number of ICU beds/facilities.}, + file = {Scha20.pdf:pdf\\Scha20.pdf:PDF}, + optnote = {DIAG, INPRESS, RADIOLOGY}, + pmid = {32787701}, + ss_id = {58af0cc4afb228ed71f5fee4bc2c8e5481d020b4}, + all_ss_ids = {['58af0cc4afb228ed71f5fee4bc2c8e5481d020b4']}, + gscites = {68}, +} + +@article{Scha21, + author = {Schaefer-Prokop, Cornelia and Prokop, Mathias}, + title = {Chest Radiography in COVID-19: No Role in Asymptomatic and Oligosymptomatic Disease}, + doi = {10.1148/radiol.2020204038}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.2020204038}, + file = {Scha21.pdf:pdf\Scha21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + citation-count = {3}, + automatic = {yes}, + pages = {E156-E157}, + volume = {298}, +} + +@article{Scha22, + author = {Schaap, M. J. and Cardozo, N. J. and Patel, A. and de Jong, E. M. G. J. and van Ginneken, B. and Seyger, M. M. B.}, + title = {Image-based automated Psoriasis Area Severity Index scoring by Convolutional Neural Networks}, + journal = {Journal of the European Academy of Dermatology and Venereology}, + volume = {36}, + number = {1}, + pages = {68-75}, + doi = {https://doi.org/10.1111/jdv.17711}, + file = {Scha22.pdf:pdf\\Scha22.pdf:PDF}, + abstract = {Abstract Background The Psoriasis Area and Severity Index (PASI) score is commonly used in clinical practice and research to monitor disease severity and determine treatment efficacy. Automating the PASI score with deep learning algorithms, like Convolutional Neural Networks (CNNs), could enable objective and efficient PASI scoring. Objectives To assess the performance of image-based automated PASI scoring in anatomical regions by CNNs and compare the performance of CNNs to image-based scoring by physicians. Methods Imaging series were matched to PASI subscores determined in real life by the treating physician. CNNs were trained using standardized imaging series of 576 trunk, 614 arm and 541 leg regions. CNNs were separately trained for each PASI subscore (erythema, desquamation, induration and area) in each anatomical region (trunk, arms and legs). The head region was excluded for anonymity. Additionally, PASI-trained physicians retrospectively determined image-based subscores on the test set images of the trunk. Agreement with the real-life scores was determined with the intraclass correlation coefficient (ICC) and compared between the CNNs and physicians. Results Intraclass correlation coefficients between the CNN and real-life scores of the trunk region were 0.616, 0.580, 0.580 and 0.793 for erythema, desquamation, induration and area, respectively, with similar results for the arms and legs region. PASI-trained physicians (N = 5) were in moderate-good agreement (ICCs 0.706-0.793) with each other for image-based PASI scoring of the trunk region. ICCs between the CNN and real-life scores were slightly higher for erythema (0.616 vs. 0.558), induration (0.580 vs. 0.573) and area scoring (0.793 vs. 0.694) than image-based scoring by physicians. Physicians slightly outperformed the CNN on desquamation scoring (0.580 vs. 0.589). Conclusions Convolutional Neural Networks have the potential to automatically and objectively perform image-based PASI scoring at an anatomical region level. For erythema, desquamation and induration scoring, CNNs performed similar to physicians, while for area scoring CNNs outperformed physicians on image-based PASI scoring.}, + year = {2022}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/248278}, + ss_id = {fe0311bb9d578c2811912c020045f1c3b7345543}, + all_ss_ids = {['fe0311bb9d578c2811912c020045f1c3b7345543']}, + gscites = {14}, +} + +@article{Scha22a, + abstract = {Artificial intelligence (AI) applications for chest radiography and chest CT are among the most developed applications in radiology. More than 40 certified AI products are available for chest radiography or chest CT. These AI products cover a wide range of abnormalities, including pneumonia, pneumothorax and lung cancer. Most applications are aimed at detecting disease, complemented by products that characterize or quantify tissue. At present, none of the thoracic AI products is specifically designed for the pediatric population. However, some products developed to detect tuberculosis in adults are also applicable to children. Software is under development to detect early changes of cystic fibrosis on chest CT, which could be an interesting application for pediatric radiology. In this review, we give an overview of current AI products in thoracic radiology and cover recent literature about AI in chest radiography, with a focus on pediatric radiology. We also discuss possible pediatric applications.}, + author = {Schalekamp, Steven and Klein, Willemijn M and van Leeuwen, Kicky G}, + doi = {10.1007/s00247-021-05146-0}, + journal = PEDRAD, + number = {11}, + pages = {2120--2130}, + title = {{Current and emerging artificial intelligence applications in chest imaging: a pediatric perspective}}, + url = {https://doi.org/10.1007/s00247-021-05146-0}, + volume = {52}, + year = {2022}, + optnote = {DIAG, RADIOLOGY}, + file = {Scha22a.pdf:pdf\\Scha22a.pdf:PDF}, +} + +@article{Scha23, + author = {Scharm, Sarah C. and Schaefer-Prokop, Cornelia and Winther, Hinrich B. and Huisinga, Carolin and Werncke, Thomas and Vogel-Claussen, Jens and Wacker, Frank K. and Shin, Hoen-oh}, + title = {Regional Pulmonary Morphology and Function: Photon-counting CT Assessment}, + doi = {10.1148/radiol.230318}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.230318}, + file = {Scha23.pdf:pdf\Scha23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + citation-count = {0}, + automatic = {yes}, + volume = {308}, +} + +@article{Scha90, + author = {Schaefer, C. and Prokop, M. and Galanski, M.}, + title = {[Drug-induced changes in the lungs]}, + journal = Radiologe, + year = {1990}, + volume = {30}, + pages = {564--573}, + abstract = {There is a growing number of drugs with lung toxicity, and radiologists are increasingly confronted with nonspecific patterns of possibly drug-induced lung disease. The present article reviews clinical symptoms, pathological findings and radiographic features associated with drugs causing lung disease. Roentgen-morphological categorization is based on the predominant pattern and distinguishes five groups of drugs that cause interstitial opacities, air space consolidation, mixed interstitial and consolidating opacities, pulmonary edema and alterations associated with pulmonary vessels. Clinical, pathological and radiological findings are nonspecific in the majority of cases, and clinicians and radiologists can only hope to assess the probability of drug-induced lung disease by correlating radiographic and clinical data. Useful clinical data include respiratory symptoms, results of respiratory function tests, dose and schedule of drug administration, and information concerning concomitant or previous administration of drugs or radiation therapy. Useful radiographic data include the distribution of densities seen on the chest radiograph, the presence or absence of thoracic adenopathy and pleural effusion. Drug-induced lung disease frequently simulates disseminated opportunistic infections (particularly pneumocystis carinii) and must be differentiated from these because the treatment is completely different. Since early recognition and withdrawal of the noxious agent constitute the best treatment for drug-induced disease, the physician's alertness to drug toxicity is most important.}, + optnote = {DIAG}, + number = {12}, + pmid = {2290925}, + gsid = {8250516334524870965}, + gscites = {1}, +} + +@article{Scha92, + author = {Schaefer, C. M. and Prokop, M. and Oestmann, J. W. and Wiesmann, W. and Haubitz, B. and Meschede, A. and Reichelt, S. and Schirg, E. and Stender, H. S. and Galanski, M.}, + title = {Impact of hard-copy size on observer performance in digital chest radiography}, + journal = Radiology, + year = {1992}, + volume = {184}, + pages = {77--81}, + doi = {10.1148/radiology.184.1.1609106}, + abstract = {To determine the impact of reduced hard-copy size on diagnostic performance of digital radiography, screen-film chest radiographs were compared with isodose digital storage phosphor radiographs in the detection of simulated nodules, fine pulmonary lines, and micronodular opacities superimposed on the chests of 10 healthy volunteers. Digital radiographs were laser-printed in a full-size conventional format and in image lengths of two-thirds, one-half, and five-elevenths of the conventional format. Eighteen thousand observations by eight radiologists were analyzed by use of receiver operating characteristics. The detectability of lines and micronodular opacities decreased with declining image format size. In the detection of micronodular opacities, only the nearly full-size digital images were equivalent to conventional images. In the detection of linear opacities, reduction of image length by one-half or more reduced performance (analysis of variance, P less than .05). Only for the detection of nodules was no major difference found.}, + file = {Scha92.pdf:pdf\\Scha92.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {1609106}, + month = {7}, + gsid = {8000148587221092582}, + gscites = {43}, +} + +@article{Scha93, + author = {Schaefer, C. M. and Prokop, M.}, + title = {Storage phosphor radiography of the chest}, + journal = Radiology, + year = {1993}, + volume = {186}, + pages = {314--315}, + optnote = {DIAG}, + number = {2}, + pmid = {8421725}, + month = {2}, + gsid = {17717543224830902906}, + gscites = {22}, +} + +@article{Scha96, + author = {Schaefer-Prokop, C. M. and Prokop, M. and Schmidt, A. and Neitzel, U. and Galanski, M.}, + title = {Selenium radiography versus storage phosphor and conventional radiography in the detection of simulated chest lesions}, + journal = Radiology, + year = {1996}, + volume = {201}, + pages = {45--50}, + abstract = {To compare selenium detectors with three conventional and digital detector systems for the detection of simulated pulmonary lesions.Templates containing nodules, linear structures, and micronodular opacities were superimposed over an anthropomorphic chest phantom. The authors compared lesion detection with use of storage phosphor radiography (250 speed), selenium radiography (250 speed) with an antiscatter grid, selenium radiography (450 speed) without an antiscatter grid, an asymmetric screen-film system (400 speed), and a conventional screen-film system (250 speed). Detection performance of 10 radiologists was compared by using a multireader-multicase receiver operating characteristic analysis of variance.For the detection of nodules, no statistically significant differences between imaging modes were seen. For the detection of micronodules and linear lesions, both selenium techniques were superior to all other modes (P < .05). In addition, the asymmetric screen-film radiographs were inferior (P < .05) to the conventional screen-film radiographs and to storage phosphor radiographs for the detection of micronodules.The selenium detector improves detection of simulated fine linear and low-contrast micronodular details and appears to be superior to other detector systems for chest radiography.}, + optnote = {DIAG}, + number = {1}, + pmid = {8816519}, + month = {10}, + gsid = {12735890839054190761}, + gscites = {71}, +} + +@article{Scha97, + author = {Schaefer-Prokop, C. and Prokop, M.}, + title = {Digital radiography of the chest: comparison of the selenium detector with other imaging systems}, + journal = MM, + year = {1997}, + volume = {41}, + pages = {2--11}, + file = {Scha97.pdf:pdf\\Scha97.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + gsid = {15126018229602444596}, + gscites = {10}, +} + +@article{Scha97c, + author = {Schaefer-Prokop, C. M. and Prokop, M.}, + title = {Storage phosphor radiography}, + journal = ER, + year = {1997}, + volume = {7 Suppl 3}, + pages = {S58--S65}, + abstract = {Storage phosphor radiography is a digital technique that uses photo-stimulable, phosphor screens to substitute for conventional screen-film combinations. While the technique is more than 15 years old, it is only recently that technological and economic aspects of these systems have become favourable enough to envisage a more widespread clinical application.}, + optnote = {DIAG}, + pmid = {9169103}, + month = {2}, + gsid = {12625601783769526901}, + gscites = {50}, +} + +@article{Scha99, + author = {Schaefer-Prokop, C.}, + title = {Chest radiography: potential and limitations}, + number = {5 Suppl 1}, + pages = {10--11}, + volume = {65}, + journal = {Minerva Anestesiol}, + optnote = {DIAG, RADIOLOGY}, + pmid = {10389418}, + year = {1999}, +} + +@inproceedings{Schi03, + author = {A. M. R. Schilham and B. van Ginneken and M. Loog}, + title = {Multi-scale nodule detection in chest radiographs}, + booktitle = MICCAI, + year = {2003}, + volume = {2878}, + series = LNCS, + pages = {602-609}, + doi = {10.1007/b93810}, + abstract = {{E}arly detection is the most promising way to enhance a patients chance for survival of lung cancer. {I}n this work, a novel computer algorithm for nodule detection in chest radiographs is presented that takes into account the wide size range for lung nodules through the use of multi-scale image processing techniques. {T}he method consists of: i) {L}ung field segmentation with an {A}ctive {S}hape {M}odel [1]; ii) {N}odule candidate detection by {L}indebergs multi-scale blob detector [2] and quadratic classification; iii) {B}lob segmentation by multi-scale edge focusing; iv) k {N}earest neighbor classification. {E}xperiments on the complete {JSRT} database [3] show that by accepting on average 2 false positives per image, 50.6% of all nodules are detected. {F}or 10 false positives, this increases to 69.5%.}, + file = {Schi03.pdf:pdf\\Schi03.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {16504552768822728760}, + gscites = {36}, + ss_id = {20edec8455ced02cf81cd9b36da6d0c1e78466c6}, + all_ss_ids = {['20edec8455ced02cf81cd9b36da6d0c1e78466c6']}, +} + +@conference{Schi03a, + author = {A. M. R. Schilham and B. van Ginneken and M. Loog}, + title = {Influence of the {N}umber of {T}raining {S}amples for {C}omputer-{A}ided {D}etection of {L}ung {N}odules in {C}hest {R}adiographs}, + booktitle = RSNA, + year = {2003}, + pages = {523-524}, + optnote = {DIAG, RADIOLOGY}, + gsid = {15577272289273505873}, + gscites = {1}, +} + +@conference{Schi05, + author = {A. M. R. Schilham and B. van Ginneken}, + title = {Computer-aided diagnosis as a second reader for nodule detection in chest radiographs versus single and double reading}, + booktitle = ECR, + year = {2005}, + pages = {205}, + abstract = {{P}urpose: {I}nstead of using computer-aided diagnosis ({CAD}) only for generating prompts at suspicious areas, we assess the potential of a {CAD} system as an independent second reader for lung nodule detection in chest radiographs, compared to single and double reading. {M}ethod and materials: {T}he public {JSRT} database was used, containing 154 chest radiographs with a single proven lung nodule and 93 normal cases. {F}ive experienced radiologists and one resident participated. {A}ll cases were read by one radiologist and by the resident. {A} {CAD} system developed by the authors was used as an independent second reader, operating at 7 markers on average per case (marking 68% of the nodules). {O}bservers marked regions suspect of being a nodule, and assigned a suspicion score, using a low threshold on confidence. {S}cores were scaled to make observers comparable. {I}ndependent double reading was simulated by averaging probabilities of matching regions, with zero probability for missing regions. {P}erformance was assessed with localized {ROC} curves. {R}esults are given as sensitivities at a clinically important, low false positive referral rate of 0.02. {R}esults: {S}ingle reading sensitivities of radiologists and resident were similar: 0.13. {T}he {JSRT} database contains many subtle nodules which may explain these low sensitivities. {W}ith double reading, the sensitivity increased to 0.40. {U}sing {CAD} as a second reader gave a sensitivity of 0.27. {C}onclusions: {U}sing {CAD} as an independent double reader outperforms single reading: {T}he sensitivity at low false positive referral rate is doubled. {H}uman double reading is superior to {CAD} double reading.}, + optnote = {DIAG, RADIOLOGY}, + gsid = {6161421081099752108}, + gscites = {3}, +} + +@conference{Schi05a, + author = {A. M. R. Schilham and B. van Ginneken and H. Gietema and M. Prokop}, + title = {Noise Filtering for Reliable Emphysema Quantification in Thin-slice Low-dose {CT} Images}, + booktitle = RSNA, + year = {2005}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Schi05b, + author = {A. M. R. Schilham and M. Prokop and H. Gietema and B. van Ginneken}, + title = {Local noise reduction for emphysema scoring in low-dose {CT} images}, + booktitle = MI, + year = {2005}, + volume = {5747}, + series = SPIE, + pages = {921-929}, + doi = {10.1117/12.595774}, + abstract = {{C}omputed {T}omography ({CT}) has become the new reference standard for quantification of emphysema. {T}he most popular measure for emphysema derived from {CT} is the {P}ixel {I}ndex ({PI}), which expresses the fraction of the lung volume with abnormally low intensity values. {A}s {PI} is calculated from a single, fixed threshold on intensity, this measure is strongly influenced by noise. {T}his effect shows up clearly when comparing the {PI} score for a high-dose scan to the {PI} score for a low-dose (i.e. noisy) scan of the same subject. {T}his paper presents a class of noise filters that make use of a local noise estimate to specify the filtering strength: {L}ocal {N}oise {V}ariance {W}eighted {A}veraging ({LNVWA}). {T}he performance of the filter is assessed by comparing high-dose and low-dose {PI} scores for 11 subjects. {LNVWA} improves the reproducibility of high-dose {PI} scores: {F}or an emphysema threshold of -910 {HU}, the root-mean-square difference in {PI} score drops from 10% of the lung volume to 3.3% of the lung volume if {LNVWA} is used.}, + file = {Schi05b.pdf:pdf\\Schi05b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + ss_id = {9631891250b66696348d177974c08c5ff523bde9}, + all_ss_ids = {['9631891250b66696348d177974c08c5ff523bde9']}, + gscites = {0}, +} + +@inproceedings{Schi06, + author = {A. M. R. Schilham and B. van Ginneken}, + title = {Simulating nodules in chest radiographs with real nodules from multi-slice {CT} images}, + booktitle = MI, + year = {2006}, + series = SPIE, + pages = {614456-1--614456-8}, + doi = {10.1117/12.651623}, + abstract = {{T}o improve the detection of nodules in chest radiographs, large databases of chest radiographs with annotated, proven nodules are needed for training of both radiologists and computer-aided detection systems. {T}he construction of such databases is a laborious and time-consuming task. {T}his study presents a novel technique to produce large amounts of chest x-rays with annotated, simulated nodules. {R}ealistic nodules in radiographs are generated using real nodules segmented from {CT} images. {R}esults from an observer study indicate that the simulated nodules can not be distinguished from real nodules. {T}his method has great potential to aid the development of automated detection systems and to generate teaching files for human observers.}, + file = {Schi06.pdf:pdf\\Schi06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6144}, + month = {3}, + gsid = {426748162498317427}, + gscites = {2}, + ss_id = {89d57c0dfb6c4a5d0f883a96648569f605ba8b61}, + all_ss_ids = {['89d57c0dfb6c4a5d0f883a96648569f605ba8b61']}, +} + +@article{Schi06a, + author = {A. M. R. Schilham and B. van Ginneken and H. Gietema and M. Prokop}, + title = {Local noise weighted filtering for emphysema scoring of low-dose {CT} images}, + journal = TMI, + year = {2006}, + volume = {25}, + pages = {451--463}, + doi = {10.1109/TMI.2006.871545}, + abstract = {{C}omputed tomography ({CT}) has become the new reference standard for quantification of emphysema. {T}he most popular measure of emphysema derived from {CT} is the pixel index ({PI}), which expresses the fraction of the lung volume with abnormally low intensity values. {A}s {PI} is calculated from a single, fixed threshold on intensity, this measure is strongly influenced by noise. {T}his effect shows up clearly when comparing the {PI} score of a high-dose scan to the {PI} score of a low-dose (i.e., noisy) scan of the same subject. {I}n this paper, the noise variance ({NOVA}) filter is presented: a general framework for (iterative) nonlinear filtering, which uses an estimate of the spatially dependent noise variance in an image. {T}he {NOVA} filter iteratively estimates the local image noise and filters the image. {F}or the specific purpose of emphysema quantification of low-dose {CT} images, a dedicated, noniterative {NOVA} filter is constructed by using prior knowledge of the data to obtain a good estimate of the spatially dependent noise in an image. {T}he performance of the {NOVA} filter is assessed by comparing characteristics of pairs of high-dose and low-dose scans. {T}he compared characteristics are the {PI} scores for different thresholds and the size distributions of emphysema bullae. {A}fter filtering, the {PI} scores of high-dose and low-dose images agree to within 2\%-3\% points. {T}he reproducibility of the high-dose bullae size distribution is also strongly improved. {NOVA} filtering of a {CT} image of typically 400 x 512 x 512 voxels takes only a couple of minutes which makes it suitable for routine use in clinical practice.}, + file = {Schi06a.pdf:pdf\\Schi06a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {16608060}, + month = {4}, + gsid = {6010863254825410177}, + gscites = {89}, + ss_id = {ccf65024e744697617e208593fd1d47f9c27d9aa}, + all_ss_ids = {['ccf65024e744697617e208593fd1d47f9c27d9aa']}, +} + +@article{Schi06b, + author = {A. M. R. Schilham and B. van Ginneken and M. Loog}, + title = {A computer-aided diagnosis system for detection of lung nodules in chest radiographs with an evaluation on a public database}, + journal = MIA, + year = {2006}, + volume = {10}, + pages = {247--258}, + doi = {10.1016/j.media.2005.09.003}, + abstract = {{A} computer algorithm for nodule detection in chest radiographs is presented. {T}he algorithm consists of four main steps: (i) image preprocessing; (ii) nodule candidate detection; (iii) feature extraction; (iv) candidate classification. {T}wo optional extensions to this scheme are tested: candidate selection and candidate segmentation. {T}he output of step (ii) is a list of circles, which can be transformed into more detailed contours by the extra candidate segmentation step. {I}n addition, the candidate selection step (which is a classification step using a small number of features) can be used to reduce the list of nodule candidates before step (iii). {T}he algorithm uses multi-scale techniques in several stages of the scheme: {C}andidates are found by looking for local intensity maxima in {G}aussian scale space; nodule boundaries are detected by tracing edge points found at large scales down to pixel scale; some of the features used for classification are taken from a multi-scale {G}aussian filterbank. {E}xperiments with this scheme (with and without the segmentation and selection steps) are carried out on a previously characterized, publicly available database, that contains a large number of very subtle nodules. {F}or this database, counting as detections only those nodules that were indicated with a confidence level of 50\% or more, radiologists previously detected 70\% of the nodules. {F}or our algorithm, it turns out that the selection step does have an added value for the system, while segmentation does not lead to a clear improvement. {W}ith the scheme with the best performance, accepting on average two false positives per image results in the identification of 51\% of all nodules. {F}or four false positives, this increases to 67\%. {T}his is close to the previously reported 70\% detection rate of the radiologists.}, + file = {Schi06b.pdf:pdf\\Schi06b.pdf:PDF}, + optnote = {DIAG, NoduleDetectionCT, RADIOLOGY}, + pmid = {16293441}, + month = {4}, + gsid = {1234054976175339261}, + gscites = {167}, + ss_id = {5db204b00f065be22c671e9d8b795d20b3c43281}, + all_ss_ids = {['5db204b00f065be22c671e9d8b795d20b3c43281']}, +} + +@inproceedings{Schi08, + author = {G. van Schie and N. Karssemeijer}, + title = {Detection of Microcalcifications Using a Nonuniform Noise Model}, + booktitle = {IWDM '08: Proceedings of the 9th international workshop on Digital Mammography}, + year = {2008}, + publisher = {Springer-Verlag}, + pages = {378--384}, + doi = {10.1007/978-3-540-70538-3_53}, + abstract = {For the detection of microcalcifications in mammograms, accurate noise estimation is of crucial importance. In this paper we present a method that makes a robust estimate of the signal dependent image noise, by taking into account quantum noise and detector inhomogeneity. In digital mammograms, high frequency image noise is dominated by quantum noise, which in raw images can be described by a square root model where the noise is proportional to the pixel value. However, due to detector inhomogeneity, the anode heel effect and other sources of variation, noise properties vary across an image. We developed a method that deals with these effects in a general way, by making a nonuniform noise model that is pixel value and location dependent. This is established by subdividing the image into tiles. In each tile a square root model of the noise is estimated, that by interpolation gives a model of the noise as a function of pixel value and location. Results indicate some improvement in microcalcification detection, when this model is used to estimate noise in images acquired with an inhomogeneous detector.}, + file = {Schi08.pdf:pdf/Schi08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {17302526749829221638}, + gscites = {10}, + ss_id = {ce9b7e56072a7ffdebdd6516a3044975ae38ac73}, + all_ss_ids = {['ce9b7e56072a7ffdebdd6516a3044975ae38ac73']}, +} + +@inproceedings{Schi09, + author = {G. van Schie and N. Karssemeijer}, + title = {Noise model for microcalcification detection in reconstructed tomosynthesis slices}, + booktitle = MI, + year = {2009}, + volume = {7260}, + series = SPIE, + pages = {72600M}, + doi = {10.1117/12.810936}, + abstract = {For the detection of microcalcifications, accurate noise estimation has shown to be an important step. In tomosynthesis, noise models have been proposed for projection data. However, it is expected that manufacturers of tomosynthesis systems will not store the raw projection images, but only the reconstructed volumes. We therefore investigated if and how signal dependent image noise can be modelled in the reconstructed volumes. For this research we used a dataset of 41 tomosynthesis volumes, of which 12 volumes contained a total of 20 microcalcification clusters. All volumes were acquired with a prototype of {S}ectra's photon-counting tomosynthesis system. Preliminary results show that image noise is signal dependent in a reconstructed volume, and that a model of this noise can be estimated from a volume at hand. Evaluation of the noise model was performed by using a basic microcalcification cluster detection algorithm that classifies voxels by using a threshold on a local contrast filter. Image noise was normalized by dividing local contrast in a voxel by the standard deviation of the estimated image noise in that voxel. {FROC} analysis shows that performance increases strongly, when we use our model to correct for signal dependent image noise in reconstructed volumes.}, + file = {Schi09.pdf:pdf/Schi09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {6869130013441123416}, + gscites = {15}, + ss_id = {7fcd3d506310b5c7deb6858027dfd9e6a3f40258}, + all_ss_ids = {['7fcd3d506310b5c7deb6858027dfd9e6a3f40258']}, +} + +@inproceedings{Schi10, + author = {G. van Schie and K. Leifland, M. G. Wallis and E. Moa and M. Hemmendorff and N. Karssemeijer}, + title = {The Effect of Slab Size on Mass Detection Performance of a Screen-Film {CAD} System in Reconstructed Tomosynthesis Volumes}, + booktitle = {IWDM '10: Proceedings of the 10th international workshop on Digital Mammography}, + year = {2010}, + publisher = {Springer-Verlag}, + pages = {497-504}, + doi = {10.1007/978-3-642-13666-5_67}, + abstract = {In the development of a computer-aided detection ({CAD}) system a large database of training samples is of major importance. However digital breast tomosynthesis ({DBT}) is a relatively new modality and no large database of cases is available yet. To overcome this limitation we are developing a {CAD} system for mass detection in {DBT} that can be trained with regular 2{D} mammograms, for which large datasets are available. We trained our system with a very large database of screen-film mammograms ({SFM}). Our approach does not use projection images, but only reconstructed volumes, because it is expected that manufacturers of tomosynthesis systems will only store the reconstructed volumes. In this study we developed a method that converts reconstructed volumes into a series of {SFM}-like slices and combinations of slices, called slabs. By combining slices into slabs, more information of a whole mass, which usually spans several slices, is used and its appearance becomes more similar to a 2{D} mammogram. In this study we investigate the effect of using slabs of different sizes on the performance of our {CAD} system. For validation we use a dataset of 63 tomosynthesis cases (245 volumes) consisting of 42 normal cases (163 volumes) and 21 abnormal cases (82 volumes) with a total of 47 malignant masses and architectural distortions. The volumes are acquired with a tomosynthesis system from {S}ectra and are reconstructed into 0.3 cm thick slices. Results show that performance of our {CAD} system increases significantly when slices are combined into larger slabs. Best performance is obtained when a slab thickness of 1.5 cm (5 slices) is used, which is significantly higher than using slabs of a single slice, two slices and all slices.}, + file = {Schi10.pdf:pdf/Schi10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {5930966169749141545}, + gscites = {2}, + ss_id = {cbf00889d89fe8c70d66ad190cc94c3ed25f7f42}, + all_ss_ids = {['cbf00889d89fe8c70d66ad190cc94c3ed25f7f42']}, +} + +@inproceedings{Schi11, + author = {G. van Schie and C. Tanner and N. Karssemeijer}, + title = {Estimating corresponding locations in ipsilateral breast tomosynthesis views}, + booktitle = MI, + year = {2011}, + volume = {7963}, + series = SPIE, + pages = {796306}, + doi = {10.1117/12.878239}, + abstract = {To improve cancer detection in mammography, breast exams usually consist of two views per breast. To combine information from both views, radiologists and multiview computer-aided detection (CAD) systems need to match corresponding regions in the two views. In digital breast tomosynthesis (DBT), finding corresponding regions in ipsilateral volumes may be a difficult and time-consuming task for radiologists, because many slices have to be inspected individually. In this study we developed a method to quickly estimate corresponding locations in ipsilateral tomosynthesis views by applying a mathematical transformation. First a compressed breast model is matched to the tomosynthesis view containing a point of interest. Then we decompress, rotate and compress again to estimate the location of the corresponding point in the ipsilateral view. In this study we use a simple elastically deformable sphere model to obtain an analytical solution for the transformation in a given DBT case. The model is matched to the volume by using automatic segmentation of the pectoral muscle, breast tissue and nipple. For validation we annotated 181 landmarks in both views and applied our method to each location. Results show a median 3D distance between the actual location and estimated location of 1.5 cm; a good starting point for a feature based local search method to link lesions for a multiview CAD system. Half of the estimated locations were at most 1 slice away from the actual location, making our method useful as a tool in mammographic workstations to interactively find corresponding locations in ipsilateral tomosynthesis views.}, + file = {Schi11.pdf:pdf/Schi11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {17594284512436240042}, + gscites = {2}, + ss_id = {3d3ca5dc423c0265941d1dc9d8884b304a1ee4fe}, + all_ss_ids = {['3d3ca5dc423c0265941d1dc9d8884b304a1ee4fe']}, +} + +@article{Schi11a, + author = {Guido van Schie and Christine Tanner and Peter Snoeren and Maurice Samulski and Karin Leifland and Matthew G Wallis and Nico Karssemeijer}, + title = {Correlating locations in ipsilateral breast tomosynthesis views using an analytical hemispherical compression model}, + journal = PMB, + year = {2011}, + volume = {56}, + pages = {4715--4730}, + doi = {10.1088/0031-9155/56/15/006}, + abstract = {To improve cancer detection in mammography, breast examinations usually consist of two views per breast. In order to combine information from both views, corresponding regions in the views need to be matched. In 3D digital breast tomosynthesis (DBT), this may be a difficult and time-consuming task for radiologists, because many slices have to be inspected individually. For multiview computer-aided detection (CAD) systems, matching corresponding regions is an essential step that needs to be automated. In this study, we developed an automatic method to quickly estimate corresponding locations in ipsilateral tomosynthesis views by applying a spatial transformation. First we match a model of a compressed breast to the tomosynthesis view containing a point of interest. Then we estimate the location of the corresponding point in the ipsilateral view by assuming that this model was decompressed, rotated and compressed again. In this study, we use a relatively simple, elastically deformable sphere model to obtain an analytical solution for the transformation in a given DBT case. We investigate three different methods to match the compression model to the data by using automatic segmentation of the pectoral muscle, breast tissue and nipple. For validation, we annotated 208 landmarks in both views of a total of 146 imaged breasts of 109 different patients and applied our method to each location. The best results are obtained by using the centre of gravity of the breast to define the central axis of the model, around which the breast is assumed to rotate between views. Results show a median 3D distance between the actual location and the estimated location of 14.6 mm, a good starting point for a registration method or a feature-based local search method to link suspicious regions in a multiview CAD system. Approximately half of the estimated locations are at most one slice away from the actual location, which makes the method useful as a mammographic workstation tool for radiologists to interactively find corresponding locations in ipsilateral tomosynthesis views.}, + file = {Schi11a.pdf:pdf/Schi11a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {15}, + pmid = {21737868}, + month = {7}, + gsid = {15866260991456062910}, + gscites = {19}, + ss_id = {182850da64dee765b90588045f836b2a20077a2e}, + all_ss_ids = {['182850da64dee765b90588045f836b2a20077a2e']}, +} + +@article{Schi13, + author = {van Schie, Guido and Mann, Ritse and Imhof-Tas, Mechli and Karssemeijer, Nico}, + title = {Generating synthetic mammograms from reconstructed tomosynthesis volumes}, + journal = TMI, + year = {2013}, + volume = {32}, + pages = {2322-2331}, + doi = {10.1109/TMI.2013.2281738}, + abstract = {Digital breast tomosynthesis (DBT) is a promising 3D modality that may replace mammography in the future. However, lesion search is likely to require more time in DBT volumes, while comparisons between views from different projections and prior exams might be harder to make. This may make screening with DBT cumbersome. A solution may be provided by synthesizing 2D mammograms from DBT, which may then be used to guide the search for abnormalities. In this work we focus on synthesizing mammograms in which masses and architectural distortions are optimally visualized. Our approach first determines relevant points in a DBT volume with a computer-aided detection system and then renders a mammogram from the intersection of a surface fitted through these points and the DBT volume. The method was evaluated in a pilot observer study where three readers reported mass findings in 87 patients (25 malignant, 62 normal) for which both DBT and digital mammograms were available. We found that on average, diagnostic accuracy in the synthetic mammograms was higher (Az=0.85) than in conventional mammograms (Az=0.81), although the difference was not statistically significant. Preliminary results suggest that the synthesized mammograms are an acceptable alternative for real mammograms regarding the detection of mass lesions.}, + file = {Schi13.pdf:pdf\\Schi13.pdf:PDF}, + optnote = {DIAG}, + number = {12}, + pmid = {24058019}, + month = {12}, + gsid = {1205294412888661201}, + gscites = {40}, + ss_id = {4a281df10b34a521ccfe28db516bc67e90df41af}, + all_ss_ids = {['4a281df10b34a521ccfe28db516bc67e90df41af']}, +} + +@article{Schi13a, + author = {van Schie, Guido and Wallis, Matthew G. and Leifland, Karin and Danielsson, Mats and Karssemeijer, Nico}, + title = {Mass detection in reconstructed digital breast tomosynthesis volumes with a computer-aided detection system trained on 2D mammograms}, + journal = MP, + year = {2013}, + volume = {40}, + pages = {041902}, + doi = {10.1118/1.4791643}, + abstract = {Purpose: To develop a computer-aided detection (CAD) system for masses in digital breast tomosynthesis (DBT) which can make use of an existing CAD system for detection of breast masses in full-field digital mammography (FFDM). This approach has the advantage that large digital screening databases that are becoming available can be used for training. DBT is currently not used for screening which makes it hard to obtain sufficient data for training.Methods: The proposed CAD system is applied to reconstructed DBT volumes and consists of two stages. In the first stage, an existing 2D CAD system is applied to slabs composed of multiple DBT slices, after processing the slabs to a representation similar to that of the FFDM training data. In the second stage, the authors group detections obtained in the slabs that detect the same object and determine the 3D location of the grouped findings using one of three different approaches, including one that uses a set of features extracted from the DBT slabs. Experiments were conducted to determine performance of the CAD system, the optimal slab thickness for this approach and the best method to establish the 3D location. Experiments were performed using a database of 192 patients (752 DBT volumes). In 49 patients, one or more malignancies were present which were described as a mass, architectural distortion, or asymmetry. Free response receiver operating characteristic analysis and bootstrapping were used for statistical evaluation.Results: Best performance was obtained when slab thickness was in the range of 1-2 cm. Using the feature based 3D localization procedure developed in the study, accurate 3D localization could be obtained in most cases. Case sensitivities of 80\% and 90\% were achieved at 0.35 and 0.99 false positives per volume, respectively.Conclusions: This study indicates that there may be a large benefit in using 2D mammograms for the development of CAD for DBT and that there is no need to exclusively limit development to DBT data.}, + file = {Schi13a.pdf:pdf\\Schi13a.pdf:PDF}, + optnote = {DIAG}, + number = {4}, + pmid = {23556896}, + month = {3}, + gsid = {4270628869848954159}, + gscites = {40}, + ss_id = {6fd7b098a6d041926be75beb4fc1835f2c695d7d}, + all_ss_ids = {['6fd7b098a6d041926be75beb4fc1835f2c695d7d']}, +} + +@phdthesis{Schi14, + author = {Guido van Schie}, + title = {Image Computing Methods for Accurate and Efficient Interpretation of Digital Breast Tomosynthesis}, + year = {2014}, + url = {http://repository.ubn.ru.nl/handle/2066/127117}, + file = {Schi14.pdf:pdf/Schi14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@mastersthesis{Schi20, + author = {Martijn Schilpzand}, + title = {Automatic Placenta Localisation from Ultrasound Imaging in a Resource-Limited Setting}, + abstract = {Placenta previa and low-lying placenta are dangerous conditions that can cause severe + maternal and fetal complications. Obstetric ultrasound imaging is commonly used to + detect these maternal risk factors. Unfortunately, low-income countries suffer from a + shortage of trained sonographers to perform ultrasound examinations. To address this + problem, this study presents an algorithm to automatically detect low-lying placenta or + placenta previa from ultrasound data acquired with a standardized acquisition protocol. + This acquisition protocol can be taught to any healthcare worker within two hours. The + detection algorithm was optimized for performance and efficiency so that it can run on + a smartphone in combination with low-cost ultrasound equipment. The dataset used + in this study originates from St. Luke's hospital in Wolisso, Ethiopia and was acquired + with a low-cost ultrasound device. The detection algorithm consisted of two parts. First, + the placenta was segmented by a deep learning model with a U-Net architecture. This + segmentation model achieved a median test Dice of 0.835 on 2D ultrasound images. + Then, the segmentation data was used as input for a binary classifier which classified a + case as either normal placenta or as a class which includes both low-lying placenta and + placenta previa. The classification model achieved a sensitivity of 85% and a specificity + of 86%.}, + file = {Schi20.pdf:pdf/Schi20.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University Nijmegen}, + year = {2020}, + journal = {Master thesis}, +} + +@article{Schi22, + author = {Schilpzand, Martijn and Neff, Chase and van Dillen, Jeroen and van Ginneken, Bram and Heskes, Tom and de Korte, Chris and van den Heuvel, Thomas}, + title = {Automatic Placenta Localization From Ultrasound Imaging in a Resource-Limited Setting Using a Predefined Ultrasound Acquisition Protocol and Deep Learning.}, + doi = {10.1016/j.ultrasmedbio.2021.12.006}, + issue = {4}, + pages = {663--674}, + volume = {48}, + abstract = {Placenta localization from obstetric 2-D ultrasound (US) imaging is unattainable for many pregnant women in low-income countries because of a severe shortage of trained sonographers. To address this problem, we present a method to automatically detect low-lying placenta or placenta previa from 2-D US imaging. Two-dimensional US data from 280 pregnant women were collected in Ethiopia using a standardized acquisition protocol and low-cost equipment. The detection method consists of two parts. First, 2-D US segmentation of the placenta is performed using a deep learning model with a U-Net architecture. Second, the segmentation is used to classify each placenta as either normal or a class including both low-lying placenta and placenta previa. The segmentation model was trained and tested on 6574 2-D US images, achieving a median test Dice coefficient of 0.84 (interquartile range = 0.23). The classifier achieved a sensitivity of 81% and a specificity of 82% on a holdout test set of 148 cases. Additionally, the model was found to segment in real time (19 +- 2 ms per 2-D US image) using a smartphone paired with a low-cost 2-D US device. This work illustrates the feasibility of using automated placenta localization in a resource-limited setting.}, + file = {Schi22.pdf:pdf\\Schi22.pdf:PDF}, + journal = {Ultrasound in medicine & biology}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35063289}, + year = {2022}, + ss_id = {7383ab9582f0e50753a0571f6e4d759f99f49d27}, + all_ss_ids = {['2fe9af8a6b41fc9db41e76621f037aac453ab433', '7383ab9582f0e50753a0571f6e4d759f99f49d27']}, + gscites = {5}, +} + +@conference{Schm12, + author = {M. Schmidt and E. M. van Rikxoort and O. M. Mets and P. A. de Jong and J. Kuhnigk and B van Ginneken}, + title = {Automatic Quantification of Airway Dimensions in {COPD}: Processing Large Databases of Chest {CT} scans}, + booktitle = RSNA, + year = {2012}, + abstract = {{PURPOSE} Pathological changes of the airways are strongly associated with lung function impairment in chronic obstructive pulmonary disease ({COPD}). A system for automatic quantification of airway dimensions in chest {CT} scans is presented and its performance is validated. {METHOD AND MATERIALS} For this study, 1113 full inspiration low dose chest {CT} scans (16x0.75mm, 120-140{kVp}, 30{mAs}) of male participants of a lung cancer screening trial were processed automatically. Processing starts with detection of the trachea and segmentation of the airway tree based on region growing and morphological processing. The airway segmentation is converted to a centerline-model and the 5 lobes are classified by searching for maximal subtrees in terms of volume and separation of barycenters. Cross-section image planes oriented perpendicular to the local airway direction are defined at a spacing of 1mm throughout the entire airway tree. For each of them, intensity profiles of 72 rays pointing from the center point outwards are analyzed using an intensity integration technique, which accounts for partial volume effects and allows for accurate determination of inner and outer wall boundaries. Lumen diameter and area, wall thickness and area, relative wall area and lumen perimeter are calculated per cross-section. Branching areas and positions where the detection of the boundaries failed are automatically excluded from further analysis. For each scan, an experienced radiologist was asked to accept or reject the measurements based on structured reports showing a model of the airway tree with the classified lobes and a representative selection of up to 98 airway cross-sections and the detected inner and outer wall boundaries. The reliability of the system was determined based on the overall acceptance ratio and the occurence of different processing errors that lead to a rejection of measurements. {RESULTS} The measurements in 1042 cases (93.6%) were rated as excellent by an experienced radiologist. The remaining cases were rejected due to problems with the trachea detection (0.7%), airway segmentation (2.6%), lobe classification (1.8%) or other reasons (1.3%). {CONCLUSION} Automatic quantification of airway dimensions in large chest {CT} scan databases is feasible with a high success rate.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Schm12a, + author = {M. Schmidt and E. M. van Rikxoort and O. M. Mets and P. A. de Jong and P. Zanen and J. Kuhnigk and B. van Ginneken}, + title = {Relating Automatic Quantification of Airway Wall Thickness, Emphysema and Air Trapping in Inspiration and Expiration Chest {CT} Scans to {COPD} {GOLD} Stages}, + booktitle = RSNA, + year = {2012}, + abstract = {{PURPOSE} Previous studies investigated the relation of emphysema and air trapping measurements to {COPD} {GOLD} stages. This study investigates the additional value of airway wall thickness measurements in inspiration chest {CT} scans. {METHOD AND MATERIALS} A database of low-dose chest {CT} scans of control subjects ({GOLD} 0, n=44) and {COPD} subjects ({GOLD} {I-IV}, n=45/44/41/34) was used. In house developed software (Fraunhofer MEVIS, Bremen, Germany; Diagnostic Image Analysis Group, Nijmegen, the Netherlands) automatically segmented the lungs and airways in both the inspiration and expiration scans. Airway wall thickness was assessed in orthogonal cross-sections every 1mm throughout the entire airway tree (inspiration only) using an intensity-integration technique which accounts for partial volume effects. Wall thickness was averaged across all cross-sections with a lumen diameter of 2.75-3.25mm ({WT3}). Emphysema was determined as the percentage of lung volume below -950 HU in inspiration scans ({IN}-950), air trapping was determined as the percentage of lung volume below -850 HU in expiration scans ({EX}-850). Multiple linear regression models for different combinations of {CT} measurements ({IN}-950, {EX}-850, {WT3}, {IN}-950 & {EX}-850, {IN}-950 & {WT3}, {IN}-950 & {EX}-850 & {WT3}) as input variables and numeric {GOLD} stage (integer number 0-4) as output variable were established and compared to each other in terms of adjusted squared correlation coefficient R2 between output of the regression model and true {GOLD} stages. {RESULTS} The overall highest correlation was observed for the combination of all three measurements as input variables with adjusted R2=0.72, closely followed by R2=0.69 for the combination of inspiration only measurements {IN}-950 and {WT3}. Lower values were observed for all remaining models ({IN}-950 & {EX}-850: 0.64 / {EX}-850: 0.64 / {IN}-950: 0.53 / {WT3}: 0.23). {CONCLUSION} The combination of airway wall thickness and emphysema measurements from only inspiration {CT} scans provides more information for the prediction of lung function as determined by {GOLD} stages than combination of emphysema and air trapping measurements in inspiration and expiration {CT} scans. {CL{IN}ICAL RELEVANCE/APPLICATION} Airway wall thickness measurements contribute valuable information for the prediction of lung function from chest {CT} scans and may provide additional insight into development of {COPD}.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Schm13, + author = {Michael Schmidt and Eva M van Rikxoort and Onno M. Mets and Pim A de Jong and Jan-Martin Kuhnigk and Matthijs Oudkerk and Harry de Koning and Bram van Ginneken}, + title = {Reproducibility of automated three-dimensional airway wall thickness measurements in thoracic computed tomography and influence of inspiration depth}, + booktitle = RSNA, + year = {2013}, + abstract = {PURPOSE Pathological changes of the airways are strongly associated with lung function impairment in chronic obstructive pulmonary disease ({COPD}). We investigate the reproducibility of {CT}-based airway dimension measurements and their dependence on the level of inspiration. METHOD AND MATERIALS We analyzed 740 pairs of low-dose chest {CT} scans of male (former) smokers who were recalled for a three month follow up scan in the {NELSON} lung cancer screening trial. Given the slow progression of {COPD}, we expect that no significant {COPD}-related changes in airway dimensions should exist between baseline and three month follow-up. Each scan was analyzed fully automatic using {CIRRUS} Lung 13.03 and airway wall thickness (Pi10) and lung volume were recorded. Subjects where processing failed for any of the two scans were excluded for analysis (n=32). First, we analyzed the differences in airway wall thickness measurements for all scan pairs. Next, we determined reproducibility in absence of significant changes in inspiration depth by repeating the analysis for the subset of scans where the difference in lung volume between baseline and follow-up was less than 200ml (n=312). Finally, we investigated the correlation between difference in inspiration depth and airway wall thickness measurements, established a linear correction model for the airway measurements and analyzed differences for corrected measurements. RESULTS Average Pi10 measurements for all 708 scan pairs were 2.46A,A+-0.55mm (range 1.09-4.33mm). Median (95th percentile) absolute difference between baseline and follow-up Pi10 measurements in all 708 scan pairs was 0.13mm (0.53mm), median lung volume differences were 230ml (1050ml). In scan pairs with well reproduced depth of inspiration (AZaEUR?Vol < 200ml, n=312), median absolute Pi10 difference was 0.10mm (0.38mm). Relative differences in lung volume and Pi10 measurements were highly correlated (r=-0.64, p<0.001). After correction for lung volume changes, absolute measurement difference was 0.11mm (0.40mm). CONCLUSION Changes in level of inspiration are significantly associated to changes of airway wall thickness and accounted for approximately 25% of the total differences between baseline and follow-up measurements. CLINICAL RELEVANCE/APPLICATION Inspiration depth should be controlled or linear correction should be applied for monitoring of airway wall thickness. This may help to better differentiate {COPD} subtypes in chest {CT} scans.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Schm14, + author = {M Schmidt and E. M. Van Rikxoort and J. Kuhnigk and P. F. Judy and Bram van Ginneken}, + title = {Subvoxel Accurate Airway Wall Measurements using an Intensity Integration Approach: Comparative Study using the {COPDGene} Phantom}, + booktitle = RSNA, + year = {2014}, + abstract = {PURPOSE Accurate quantification of airway wall thickness in thoracic CT is a key technique for the extraction of imaging biomarkers in obstructive pulmonary disease. We present an intensity integration approach that allows measurements of airway wall thickness with an accuracy higher than the resolution of the CT data. METHOD AND MATERIALS For most airways visible on CT, wall thickness is less than the resolution of the image data and partial volume effects hamper accurate measurement. By making appropriate assumptions about the density of air, airway wall and lung parenchyma, it is possible to fit a model to an array of line profiles perpendicular to the airway wall and thus measure the wall thickness precisely. This intensity integration approach was compared with two established methods: Full-Width-at-Half-Maximum (FWHM) and Phase Congruency (PC). We used the improved COPDGene phantom with 7 polycarbonate tubes with varying sizes (lumen diameter: 2.5/3/3/6/6/6/6 mm, wall thickness: 0.4/0.6/0.6/0.9/1.2/1.2/1.5 mm) and orientations (tubes 3 and 6 at 30A,Adeg angle from scanner z-axis, all others in z-axis direction). The intensity integration and FWHM method were implemented by the authors, PC measurements were obtained using the open source software Airway Inspector. We measured each tube at five positions and calculated the average measurement. Accuracy of each method was determined by calculating the minimum, mean and maximum signed as well as mean unsigned measurement errors for each method. Additionally, linear regression was used to determine each methodAC/a,!a,,C/s ability to detect changes in wall thickness regardless of constant measurement bias. RESULTS The smallest minimum/mean/maximum signed errors were obtained for intensity integration: -0.07/-0.01/0.05 mm versus 0.39/0.74/1.04 mm (FWHM) and -0.26/0.13/0.57 mm (PC). Intensity integration also gave the lowest mean absolute errors: 0.05 mm versus 0.74mm (FWHM) and 0.27mm (PC). Correlation was very high for intensity integration and FWHM (r=0.997 and r=0.972, both p<0.001) and only high for PC (r=0.746, p=0.054). CONCLUSION Intensity integration yielded the superior results in terms of overall error and measurement consistency. FWHM significantly overestimates thin walls but provides consistent measurements. PC is unreliable for measuring small airways. CLINICAL RELEVANCE/APPLICATION Airway wall thickness measurements are best performed with an intensity integration quantification algorithm.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Scho11, + author = {M. G. Schouten and K. Nagel and T. Hambrock and C.M. Hoeks and G. Litjens and J.O. Barentsz and J.J. F\"utterer}, + title = {Differentiation of Normal Prostate Tissue, Prostatitis, and Prostate Cancer: Correlation between Diffusion-weighted Imaging and {MR}-guided Biopsy}, + booktitle = RSNA, + year = {2011}, + abstract = {{PURPOSE} To prospectively discriminate between normal prostate tissue, prostatitis and prostate cancer, based on apparent diffusion coefficient (ADC)-values, using MR-guided biopsy specimens as the standard of reference. {METHOD AND MATERIALS} The requirement to obtain institutional review board approval was waived. MR-guided biopsies were performed in 130 consecutive patients with cancer suspicious regions (CSRs) on multi-parametric MR imaging at 3T. Exclusion criteria were patients with suspicion of prostate cancer recurrence after therapy, and biopsy specimens which could not be categorized within the following histopathological groups: normal prostate tissue, prostatitis and prostate cancer. During the biopsy procedure, an axial diffusion-weighted sequence was acquired with construction of ADC maps (TR/TE, 2000/67 ms; section thickness of 4 mm; in-plane resolution, 1.8 x 1.8 mm and b-values of 0, 100, 500 and 800 s/mm2). To verify the biopsy location, a confirmation scan with the needle left in situ was acquired. This confirmation scan was projected on the calculated ADC map in order to draw a region-of-interest (ROI) on the ADC map, representing the biopsied CSR. The obtained ADC-values of this ROI were compared with the histological outcomes of the biopsy specimens. A one-way ANOVA with post-hoc comparison was used to test for ADC-value differences among the three histological groups. Differences were considered to be significant at p<.05. {RESULTS} In total 85 out of 130 patients were included in this study. The median ADC-values differed significantly (ANOVA, p<0.001) across normal prostate tissue (1.05x10-3 mm2/s, SD?0.17), prostatitis (1.13x10-3 mm2/s, SD?0.19) and prostate cancer (0.86x10-3 mm2/s, SD?0.13). Bonferroni post-hoc comparisons of the three groups showed that there is a statistically significant difference in median ADC-values of prostate tissue with prostatitis and prostate cancer in the peripheral zone (p<0.001) and the central gland (p=0.03). Furthermore, a statistically significant difference was found in median ADC-values of normal prostate tissue and prostate cancer in the peripheral zone (p<0.001) and the central gland (p<0.001). {CONCLUSION} Median ADC-values can prospectively discriminate between prostate cancer and normal prostate tissue or prostatitis. {CLINICAL RELEVANCE/APPLICATION} Fast non-invasive prediction of histological classification with diffusion weighted imaging in the prostate may improve patient management.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Scho11a, + author = {Martijn G Schouten and Joyce G R Bomers and Derya Yakar and Henkjan Huisman and Eva Rothgang and Dennis Bosboom and Tom W J Scheenen and Sarthak Misra and Jurgen J F\"utterer}, + title = {Evaluation of a robotic technique for transrectal {MRI}-guided prostate biopsies}, + journal = ER, + year = {2012}, + volume = {22}, + pages = {476-483}, + doi = {10.1007/s00330-011-2259-3}, + abstract = {OBJECTIVES: To evaluate the accuracy and speed of a novel robotic technique as an aid to perform magnetic resonance image (MRI)-guided prostate biopsies on patients with cancer suspicious regions. METHODS: A pneumatic controlled MR-compatible manipulator with 5 degrees of freedom was developed in-house to guide biopsies under real-time imaging. From 13 consecutive biopsy procedures, the targeting error, biopsy error and target displacement were calculated to evaluate the accuracy. The time was recorded to evaluate manipulation and procedure time. RESULTS: The robotic and manual techniques demonstrated comparable results regarding mean targeting error (5.7 vs 5.8 mm, respectively) and mean target displacement (6.6 vs 6.0 mm, respectively). The mean biopsy error was larger (6.5 vs 4.4 mm) when using the robotic technique, although not significant. Mean procedure and manipulation time were 76 min and 6 min, respectively using the robotic technique and 61 and 8 min with the manual technique. CONCLUSIONS: Although comparable results regarding accuracy and speed were found, the extended technical effort of the robotic technique make the manual technique - currently - more suitable to perform MRI-guided biopsies. Furthermore, this study provided a better insight in displacement of the target during in vivo biopsy procedures.}, + file = {Scho11a.pdf:pdf\\Scho11a.pdf:PDF}, + optnote = {BioMR, DIAG, MAGIC, RADIOLOGY}, + pmid = {21956697}, + month = {9}, + gsid = {15023985581200351958}, + gscites = {70}, + ss_id = {1ac1810f6db89758612fd372617c71f07a7b018a}, + all_ss_ids = {['1ac1810f6db89758612fd372617c71f07a7b018a']}, +} + +@article{Scho13, + author = {Scholten, Ernst Th and Mali, Willem P Th M. and Prokop, Mathias and van Ginneken, Bram and Glandorf, Ron and van Klaveren, Rob and Oudkerk, Matthijs and de Jong, Pim A.}, + title = {Non-solid lung nodules on low-dose computed tomography: comparison of detection rate between 3 visualization techniques}, + journal = CANI, + year = {2013}, + volume = {13}, + pages = {150--154}, + doi = {10.1102/1470-7330.2013.0016}, + abstract = {Objective: To compare various visualization techniques for the detection of non-solid nodules in low-dose lung cancer screening computed tomography (CT) scans. Methods: An enriched sample of 216 male lung cancer screening subjects aged 60.4 A,A+- 6.0 years was used. Two blinded independent readers searched for non-solid nodules on 5-mm multiplanar reconstructions, 1-mm slices and 7-mm maximum intensity projections (trial protocol). The reference standard was a consensus diagnosis of all non-solid nodules reported at least once. Results: Twenty-three individuals (10.6\%) had in total 34 non-solid nodules. Interobserver agreement was good (Cohen kappa 0.89-0.95). For both observers, we found no differences between the 3 viewing techniques (P > 0.13). Conclusion: In low-dose lung cancer screening CT scans, we were unable to find a viewing technique superior to that used in the trial by experienced observers who focused on non-solid nodule detection.}, + file = {Scho13.pdf:pdf\\Scho13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {23598304}, + gsid = {6683656125149514426}, + gscites = {7}, + ss_id = {696f9c08ce42d9e71407d05987e5f3aa62fad108}, + all_ss_ids = {['696f9c08ce42d9e71407d05987e5f3aa62fad108']}, +} + +@article{Scho13a, + author = {Scholten, Ernst Th and Jacobs, Colin and van Ginneken, Bram and Willemink, Martin J. and Kuhnigk, Jan-Martin and van Ooijen, Peter M A. and Oudkerk, Matthijs and Mali, Willem P Th M. and de Jong, Pim A.}, + title = {Computer-Aided Segmentation and Volumetry of Artificial Ground-Glass Nodules at Chest {CT}}, + journal = AJR, + year = {2013}, + volume = {201}, + pages = {295--300}, + doi = {10.2214/AJR.12.9640}, + abstract = {OBJECTIVE. The purpose of this study was to investigate a new software program for semiautomatic measurement of the volume and mass of ground-glass nodules (GGNs) in a chest phantom and to investigate the influence of CT scanner, reconstruction filter, tube voltage, and tube current. MATERIALS AND METHODS. We used an anthropomorphic chest phantom with eight artificial GGNs with two different CT attenuations and four different volumes. CT scans were obtained with four models of CT scanner at 120 kVp and 25 mAs with a soft and a sharp reconstruction filter. On the 256-MDCT scanner, the tube current-exposure time product and tube voltage settings were varied. GGNs were measured with software that automatically segmented the nodules. Absolute percentage error (APE) was calculated for volume, mass, and density. Wilcoxon signed rank, Mann-Whitney U, and Kruskal-Wallis tests were used for analysis. RESULTS. Volume and mass did not differ significantly from the true values. When measurements were expressed as APE, the error range was 2-36\% for volume and 5-46\% for mass, which was significantly different from no error. We did not find significant differences in APE between CT scanners with filters for lower tube current for volume or lower tube voltage for mass. CONCLUSION. Computer-aided segmentation and mass and volume measurements of GGNs with the prototype software had promising results in this study.}, + file = {Scho13a.pdf:pdf\\Scho13a.pdf:PDF}, + optnote = {DIAG}, + number = {2}, + pmid = {23883209}, + month = {8}, + gsid = {5997033147149702974}, + gscites = {30}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/118196}, + ss_id = {bec0446b87854ea9f5b0210565e3c49ace87f24e}, + all_ss_ids = {['bec0446b87854ea9f5b0210565e3c49ace87f24e']}, +} + +@article{Scho13b, + author = {Scholten, Ernst Th and de Hoop, Bartjan and Jacobs, Colin and van Amelsvoort-van de Vorst, Saskia and van Klaveren, Rob J. and Oudkerk, Matthijs and Vliegenthart, Rozemarijn and de Koning, Harry J. and van der Aalst, Carlijn M. and Mali, Willem Th M. and Gietema, Hester A. and Prokop, Mathias and van Ginneken, Bram and de Jong, Pim A.}, + title = {Semi-automatic quantification of subsolid pulmonary nodules: comparison with manual measurements}, + journal = PLOSONE, + year = {2013}, + volume = {8}, + pages = {e80249}, + doi = {10.1371/journal.pone.0080249}, + abstract = {Accurate measurement of subsolid pulmonary nodules (SSN) is becoming increasingly important in the management of these nodules. SSNs were previously quantified with time-consuming manual measurements. The aim of the present study is to test the feasibility of semi-automatic SSNs measurements and to compare the results to the manual measurements.In 33 lung cancer screening participants with 33 SSNs, the nodules were previously quantified by two observers manually. In the present study two observers quantified these nodules by using semi-automated nodule volumetry software. Nodules were quantified for effective diameter, volume and mass. The manual and semi-automatic measurements were compared using Bland-Altman plots and paired T tests. Observer agreement was calculated as an intraclass correlation coefficient. Data are presented as mean (SD).Semi-automated measurements were feasible in all 33 nodules. Nodule diameter, volume and mass were 11.2 (3.3) mm, 935 (691) ml and 379 (311) milligrams for observer 1 and 11.1 (3.7) mm, 986 (797) ml and 399 (344) milligrams for observer 2, respectively. Agreement between observers and within observer 1 for the semi-automatic measurements was good with an intraclass correlation coefficient >0.89. For observer 1 and observer 2, measured diameter was 8.8\% and 10.3\% larger (p<0.001), measured volume was 24.3\% and 26.5\% larger (p<0.001) and measured mass was 10.6\% and 12.0\% larger (p<0.001) with the semi-automatic program compared to the manual measurements.Semi-automated measurement of the diameter, volume and mass of SSNs is feasible with good observer agreement. Semi-automated measurement makes quantification of mass and volume feasible in daily practice.}, + file = {Scho13b.pdf:pdf\\Scho13b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pmid = {24278264}, + month = {11}, + gsid = {14511785431496206226}, + gscites = {29}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/125893}, + ss_id = {f6f881961e305ee13ba292d599969a2ad8d1edcf}, + all_ss_ids = {['f6f881961e305ee13ba292d599969a2ad8d1edcf']}, +} + +@phdthesis{Scho14b, + author = {Ernst Th. Scholten}, + title = {SubSolid Nodules in lung cancer screening}, + year = {2014}, + url = {http://dspace.library.uu.nl/handle/1874/298095}, + abstract = {With eight million deaths in 2012 lung cancer is the most common cause of cancer death in the world, and the problem is still growing. As long as the goal of a total ban on smoking tobacco is not fulfilled, lung cancer screening as a means of secondary prevention has great potential. The aim of lung cancer screening is the detection of lung cancer in an early and hopefully curable stage resulting in a significant reduction in lung cancer deaths in the population. The Dutch Belgium lung cancer screening trial (NELSON), that included 15822 participants is the largest in Europe. The results of this trial, aiming at a reduction of the lung cancer mortality rate of 20%, are expected in 2015 or 2016. This thesis focuses on SubSolid Nodules (SSNs), a special kind of nodules that pose a growing problem in lung cancer screening. SubSolid Nodules are also referred to as Ground Glass Opacities (GGO) or Ground Glass Nodules (GGN). In this term ground glass is defined as a circumscribed area of increased lung attenuation with preservation of the bronchial and vascular margins. The problems with SSNs compared to solid nodules are in part inherent to the appearance of the nodules: SSNs are less dense and less well circumscript and thus not only harder to see on the image but also more difficult to measure accurately compared to solid nodules. The second problem with SSNs that makes them more difficult to handle codes comes the fact that their biological behavior is different from SSNs. Therefore a verdict of malignancy by the pathologist does not necessarily mean that the patient will benefit from treatment of that nodule. In this thesis we tried to make a contribution towards the solution of these problems by: AC/a,!AC/ reporting on the possible improvement of the visual detection of SSNs AC/a,!AC/ investigating several aspects of the semi-automatic segmentation of SSNs AC/a,!AC/ investigating the feasibility of the segmentation of a solid component in a SSN AC/a,!AC/ investigating a more conservative approach compared to present recommendations in managing SSNs. AC/a,!AC/ evaluating the radiological aspect of missed carcinomaAC/a,!a,,C/s in the NELSON trial. In conclusion, in this thesis we have demonstrated that semi-automatic segmentation of SSNs and its solid component is achievable with sufficient accuracy in an efficient manner. The restrained NELSON approach of SSNs has not lead to any interval or post screen carcinoma in the investigated follow up period. Furthermore we have shown that although interval or post screen carcinomas due to human errors are rare, there is room for improvement in the detection of intra bronchial lesions and that at present still a lot has to be learned about the significance of focal bulla wall thickening. However, even by demonstrating that the more restrictive NELSON approach of SSNs is to be preferred over the current recommendation, we still donAC/a,!a,,C/t know what the biological behavior of SSNs is, and consequently what would be the optimal screening or therapeutic strategy in these cases. Therefore several challenges still have to be met to optimize the strategy of lung cancer screening.}, + copromotor = {P. A. de Jong and H. A. Gietema}, + file = {Scho14b.pdf:pdf\\Scho14b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {PhD thesis}, +} + +@article{Scho14c, + author = {Scholten, Ernst T. and de Jong, Pim A. and de Hoop, Bartjan and van Klaveren, Rob and van Amelsvoort-van de Vorst, Saskia and Oudkerk, Matthijs and Vliegenthart, Rozemarijn and de Koning, Harry J. and van der Aalst, Carlijn M. and Vernhout, Ren\'{e} M. and Groen, Harry J M. and Lammers, Jan-Willem J. and van Ginneken, Bram and Jacobs, Colin and Mali, Willem P T M. and Horeweg, Nanda and Weenink, Carla and Thunnissen, Erik and Prokop, Mathias and Gietema, Hester A.}, + title = {Towards a close computed tomography monitoring approach for screen detected subsolid pulmonary nodules?}, + journal = ERJ, + year = {2015}, + volume = {45}, + pages = {765-773}, + doi = {10.1183/09031936.00005914}, + abstract = {Pulmonary subsolid nodules (SSNs) have a high likelihood of malignancy, but are often indolent. A conservative treatment approach may therefore be suitable. The aim of the current study was to evaluate whether close follow-up of SSNs with computed tomography may be a safe approach. The study population consisted of participants of the Dutch-Belgian lung cancer screening trial (Nederlands Leuvens Longkanker Screenings Onderzoek; NELSON). All SSNs detected during the trial were included in this analysis. Retrospectively, all persistent SSNs and SSNs that were resected after first detection were segmented using dedicated software, and maximum diameter, volume and mass were measured. Mass doubling time (MDT) was calculated. In total 7135 volunteers were included in the current analysis. 264 (3.3\%) SSNs in 234 participants were detected during the trial. 147 (63\%) of these SSNs in 126 participants disappeared at follow-up, leaving 117 persistent or directly resected SSNs in 108 (1.5\%) participants available for analysis. The median follow-up time was 95 months (range 20-110). 33 (28\%) SSNs were resected and 28 of those were (pre-) invasive. None of the non-resected SSNs progressed into a clinically relevant malignancy. Persistent SSNs rarely developed into clinically manifest malignancies unexpectedly. Close follow-up with computed tomography may be a safe option to monitor changes.}, + file = {Scho14c.pdf:pdf\\Scho14c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {25431271}, + month = {11}, + gsid = {16713008169098417611,9948573486239255340}, + gscites = {85}, + all_ss_ids = {['2fa5f435f41242e8ef053e94ae7af400014ca7d3', '6ca452a24dbe147205850148d98163481aa22dc6']}, +} + +@article{Scho14d, + author = {Scholten, Ernst Th and Jacobs, Colin and van Ginneken, Bram and van Riel, Sarah and Vliegenthart, Rozemarijn and Oudkerk, Matthijs and de Koning, Harry J. and Horeweg, Nanda and Prokop, Mathias and Gietema, Hester A. and Mali, Willem P Th M. and de Jong, Pim A.}, + title = {Detection and quantification of the solid component in pulmonary subsolid nodules by semiautomatic segmentation}, + journal = ER, + year = {2015}, + volume = {25}, + pages = {488-496}, + doi = {10.1007/s00330-014-3427-z}, + abstract = {To determine whether semiautomatic volumetric software can differentiate part-solid from nonsolid pulmonary nodules and aid quantification of the solid component.As per reference standard, 115 nodules were differentiated into nonsolid and part-solid by two radiologists; disagreements were adjudicated by a third radiologist. The diameters of solid components were measured manually. Semiautomatic volumetric measurements were used to identify and quantify a possible solid component, using different Hounsfield unit (HU) thresholds. The measurements were compared with the reference standard and manual measurements.The reference standard detected a solid component in 86 nodules. Diagnosis of a solid component by semiautomatic software depended on the threshold chosen. A threshold of -300 HU resulted in the detection of a solid component in 75 nodules with good sensitivity (90 \%) and specificity (88 \%). At a threshold of -130 HU, semiautomatic measurements of the diameter of the solid component (mean 2.4 mm, SD 2.7 mm) were comparable to manual measurements at the mediastinal window setting (mean 2.3 mm, SD 2.5 mm [pAC/a,!aEURdeg=AC/a,!aEURdeg0.63]).Semiautomatic segmentation of subsolid nodules could diagnose part-solid nodules and quantify the solid component similar to human observers. Performance depends on the attenuation segmentation thresholds. This method may prove useful in managing subsolid nodules.AC/a,!AC/ Semiautomatic segmentation can accurately differentiate nonsolid from part-solid pulmonary nodules AC/a,!AC/ Semiautomatic segmentation can quantify the solid component similar to manual measurements AC/a,!AC/ Semiautomatic segmentation may aid management of subsolid nodules following Fleischner Society recommendations AC/a,!AC/ Performance for the segmentation of subsolid nodules depends on the chosen attenuation thresholds.}, + file = {Scho14d.pdf:pdf\\Scho14d.pdf:PDF}, + optnote = {DIAG}, + number = {2}, + pmid = {25287262}, + month = {10}, + ss_id = {c00533c3203b54405ba3192167c83a080a536ff1}, + all_ss_ids = {['c00533c3203b54405ba3192167c83a080a536ff1']}, + gscites = {56}, +} + +@article{Scho14f, + author = {Scholten, Ernst Th and de Jong, Pim A. and Jacobs, Colin and van Ginneken, Bram and van Riel, Sarah and Willemink, Martin J. and Vliegenthart, Rozemarijn and Oudkerk, Matthijs and de Koning, Harry J. and Horeweg, Nanda and Prokop, Mathias and Mali, Willem P Th M. and Gietema, Hester A.}, + title = {Interscan variation of semi-automated volumetry of subsolid pulmonary nodules}, + journal = ER, + year = {2015}, + volume = {25}, + pages = {1040-1047}, + doi = {10.1007/s00330-014-3478-1}, + abstract = {We aimed to test the interscan variation of semi-automatic volumetry of subsolid nodules (SSNs), as growth evaluation is important for SSN management.From a lung cancer screening trial all SSNs that were stable over at least 3 months were included (NAC/a,!aEURdeg=AC/a,!aEURdeg44). SSNs were quantified on the baseline CT by two observers using semi-automatic volumetry software for effective diameter, volume, and mass. One observer also measured the SSNs on the second CT 3 months later. Interscan variation was evaluated using Bland-Altman plots. Observer agreement was calculated as intraclass correlation coefficient (ICC). Data are presented as mean (A,A+- standard deviation) or median and interquartile range (IQR). A Mann-Whitney U test was used for the analysis of the influence of adjustments on the measurements.Semi-automatic measurements were feasible in all 44 SSNs. The interscan limits of agreement ranged from -12.0 \% to 9.7 \% for diameter, -35.4 \% to 28.6 \% for volume and -27.6 \% to 30.8 \% for mass. Agreement between observers was good with intraclass correlation coefficients of 0.978, 0.957, and 0.968 for diameter, volume, and mass, respectively.Our data suggest that when using our software an increase in mass of 30 \% can be regarded as significant growth.AC/a,!AC/ Recently, recommendations regarding subsolid nodules have stressed the importance of growth quantification. AC/a,!AC/ Volumetric measurement of subsolid nodules is feasible with good interscan agreement. AC/a,!AC/ Increase of mass of 30 \% can be regarded as significant growth.}, + file = {Scho14f.pdf:pdf\\Scho14f.pdf:PDF}, + optnote = {DIAG}, + number = {4}, + pmid = {25413965}, + month = {11}, + gsid = {2011905741890872805}, + gscites = {26}, + ss_id = {45a0d554907e10b52846ac7757e24f9ddc9a9d4e}, + all_ss_ids = {['45a0d554907e10b52846ac7757e24f9ddc9a9d4e']}, +} + +@conference{Scho15a, + author = {E.T. Scholten and C. Jacobs and B. van Ginneken and S. van Riel and R. Vliegenthart and M. Oudkerk and M. Prokop and H.A. Gietema and P.A. de Jong.}, + title = {Detection and quantification of the solid component in pulmonary subsolid nodules by semiautomatic segmentation}, + booktitle = ECR, + year = {2015}, + abstract = {Purpose: To determine whether semiautomatic volumetric software can differentiate partsolid from nonsolid pulmonary nodules and aid quantification of the solid component. Methods: As the reference standard 115 nodules were differentiated into nonsolid and partsolid by two radiologists, disagreements were adjudicated by a third radiologist. The diameters of solid components were measured manually. Semiautomatic volumetric measurements were used to identify and quantify a possible solid component, using different Hounsfield Unit (HU) thresholds. The measurements were compared with the reference standard and manual measurements. Results: The reference standard detected a solid component in 86 nodules. Diagnosis of a solid component by semiautomatic software depended on the threshold chosen. A threshold of -300 HU resulted in the detection of a solid component in 75 nodules with good sensitivity (90%) and specificity (88%). At a threshold of -130 HU semiautomatic measurements of the diameter of the solid component (mean 2.4 mm, SD 2.7 mm) were comparable to manual measurements at mediastinal window setting (mean 2.3 mm, SD 2.5 mm [p=0.63]). Conclusion: Semiautomatic segmentation of subsolid nodules could diagnose partsolid nodules and quantify the solid component similar to human observers. Its performance depends on the attenuation segmentation thresholds This method may prove useful in managing subsolid nodules.}, + file = {Scho15a.pdf:pdf\\Scho15a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {12372288646108507958}, + gscites = {56}, + all_ss_ids = {['c00533c3203b54405ba3192167c83a080a536ff1']}, +} + +@article{Scho15b, + author = {Scholten, Ernst Th and Horeweg, Nanda and {de Koning}, Harry J. and Vliegenthart, Rozemarijn and Oudkerk, Matthijs and Mali, Willem P Th M. and {de Jong}, Pim A.}, + title = {Computed tomographic characteristics of interval and post screen carcinomas in lung cancer screening}, + journal = ER, + year = {2015}, + volume = {25}, + number = {1}, + month = {1}, + pages = {81--88}, + doi = {10.1007/s00330-014-3394-4}, + url = {http://dx.doi.org/10.1007/s00330-014-3394-4}, + abstract = {To analyse computed tomography (CT) findings of interval and post-screen carcinomas in lung cancer screening.Consecutive interval and post-screen carcinomas from the Dutch-Belgium lung cancer screening trial were included. The prior screening and the diagnostic chest CT were reviewed by two experienced radiologists in consensus with knowledge of the tumour location on the diagnostic CT.Sixty-one participants (53 men) were diagnosed with an interval or post-screen carcinoma. Twenty-two (36\%) were in retrospect visible on the prior screening CT. Detection error occurred in 20 cancers and interpretation error in two cancers. Errors involved intrabronchial tumour (n = 5), bulla with wall thickening (n = 5), lymphadenopathy (n = 3), pleural effusion (n = 1) and intraparenchymal solid nodules (n = 8). These were missed because of a broad pleural attachment (n = 4), extensive reticulation surrounding a nodule (n = 1) and extensive scarring (n = 1). No definite explanation other than human error was found in two cases. None of the interval or post-screen carcinomas involved a subsolid nodule.Interval or post-screen carcinomas that were visible in retrospect were mostly due to detection errors of solid nodules, bulla wall thickening or endobronchial lesions. Interval or post-screen carcinomas without explanation other than human errors are rare.AC/a,!AC/ 22\% of missed carcinomas originally presented as bulla wall thickening on CT. AC/a,!AC/ 22\% of missed carcinomas originally presented as endobronchial lesions on CT. AC/a,!AC/ All malignant endobronchial lesions presented as interval carcinomas. AC/a,!AC/ In the NELSON trial subsolid nodules were not a source of missed carcinomas.}, + file = {Scho15b.pdf:pdf\\Scho15b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {25187382}, +} + +@conference{Scho16, + author = {Scholten, E Th. and Jacobs, C. and Schaefer-Prokop, C M. and R\"uhaak, J. and {de Koning}, H J. and Oudkerk, M. and {de Jong}, P A. and M. Prokop and {van Ginneken}, B.}, + title = {Temporal subtraction of chest {CT} in lung cancer screening}, + booktitle = ECR, + year = {2016}, + abstract = {Purpose: Detection of change between CT images is crucial in lung cancer screening. We analysed whether subtraction images can detect change in nodular size between successive low-dose CT images. Methods and Materials: Given two successive CT scans lung segmentation and non-rigid registration between the two scans is performed and a subtraction image is obtained by subtracting the deformed prior scan from the current scan. Subtraction images of a total of 111 participants of a lung cancer screening trial that were referred to a pulmonologist on the basis of the last CTwere studied. An experienced radiologist annotated all relevant nodular changes by inspecting the subtraction images. The two original images were available side-by-side for confirmation. In addition, the quality of the subtraction images, was scored on a 1-5 scale. Results: The quality of the subtraction images was rated high: only six subtraction images (4\%) had a rating lower than 4, meaning that the images were significantly degraded by artefacts. In the referred cases 58 new nodules > 5 mm were found. Furthermore 45 nodules with significant growth were noted. A significant nodule was missed in 8 cases, of which 2 were stable and hence not visible on the subtraction image, 1 was missed because of its pleural location and 5 showed a mean increase in the diameter of 21\%. Conclusion: Temporal subtraction of CT images is a promising tool for the visual detection of change, especially significant growth of nodules, between successive CT scans.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Scho23, + author = {Schouten, Daan and Litjens, Geert}, + booktitle = MI, + title = {PythoStitcher: an iterative approach for stitching digitized tissue fragments into full resolution whole-mount reconstructions}, + doi = {10.1117/12.2652512}, + pages = {1247118}, + series = {#SPIE#}, + volume = {12471}, + abstract = {In histopathology, whole-mount sections (WMS) can be utilized to capture entire cross-sections of excised tissue, thereby reducing the number of slides per case, preserving tissue context, and minimizing cutting artefacts. Additionally, the use of WMS allows for easier correlation of histology and pre-operative imaging. However, in digital pathology, WMS are not frequently acquired due to the limited availability of whole-slide scanners that can scan double-width glass slides. Moreover, whole-mounts may still be too large to fit on a single double-width glass slide, and archival material is typically stored as tissue fragments. In this work, we present PythoStitcher, an improved version of the AutoStitcher algorithm for automatically stitching multiple tissue fragments and constructing a full-resolution WMS. PythoStitcher is based on a genetic algorithm that iteratively optimizes the affine transformation matrix for each tissue ...}, + file = {Scho23.pdf:pdf\\Scho23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + year = {2023}, + ss_id = {69cec4aa46e33d973af972f7b78e5991d8f03601}, + all_ss_ids = {['69cec4aa46e33d973af972f7b78e5991d8f03601']}, + gscites = {0}, +} + +@conference{Schr11, + author = {Bruce Schroeder and Ralph Highnam and Andrew Cave and Jenny Walker and Nico Karssemeijer and Martin Yaffe and Roberta Jong and Olivier Alonzo-Proulx}, + title = {At What Age Should Breast Screening Begin?}, + booktitle = RSNA, + year = {2011}, + abstract = {PURPOSE: Ages for breast screening are set so as to optimize early breast cancer detection while minimizing costs and risk. In this study, we test the correlation of age to breast density to ascertain if there is a certain age when density change is greatest and thus where x-ray sensitivity improves markedly. METHOD AND MATERIALS: Volumetric breast density was calculated on digital mammograms of 69,000 women using Volpara, an FDA cleared, and fully automated breast density assessment tool for use on digital mammograms. The digital mammograms consisted of: - 15,000 women (aged 27-99) imaged on GE equipment in Toronto (screening and diagnosis) - 50,000 women (aged 50-70) imaged on Hologic equipment in Utrecht (screening) - 1,500 women (aged 45-70) imaged on Siemens equipment in Auckland (screening) - 400 women (aged 40-76) imaged on GE equipment in Greenville, North Carolina (screening) From the individual results for each woman, we then computed average breast density and inter-quartile range at each age and for each geographical location. RESULTS: The overall level of breast density differed between sites, most likely related to population. Greenville, for example, had 20% higher breast volumes, although it is known that the algorithm can over-estimate density when a tilted paddle is used such as in Utrecht. Previous results have shown Volpara producing similar results for the same breast imaged on Hologic and GE systems with standard paddle. Breast density decreased from 18% to 8% at Toronto (age 27 to 75), 14% to 7% at Utrecht and Auckland (50 to 75), and 10% to 5% in Greenville (40 to 75). At all sites density dimishes and then plateaus. Interestingly, the rates of breast density decrease appear similar across sites but there is no apparent abrupt change in density at any age. For all sites, at any given age there was considerable variation in breast density. For example, the inter-quartile ranges for the Toronto data were 13-23% at age 30, 8-18% at age 40, 7-17% at age 50, 4-12% at age 60 and 4-9% at age 70. CONCLUSION: Screening based purely on age may deny access to women who have less dense breasts and in whom x-ray sensitivity is high. Personalized risk/benefit analyses, including breast density assessment, may provide more suitable guidance for appropriate utilization of medical resources. CLINICAL RELEVANCE/APPLICATION: Breast screening should be offered on the basis of breast density and risk factors, not just age.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Schr17, + author = {Schreuder, Anton and Schaefer-Prokop, Cornelia M and Scholten, Ernst T and Jacobs, Colin and Prokop, Mathias and van Ginneken, Bram}, + title = {Use of a risk model combining clinical information and CT findings to customize follow-up intervals in lung cancer screening}, + booktitle = RSNA, + year = {2017}, + abstract = {Purpose: The U.S. has launched an annual CT lung cancer screening program, irrespective of individual participants' malignancy risk. We developed a risk model based on information from the baseline CT and clinical information to calculate the trade-off between cost savings by omitting one year follow-up scans in low risk individuals and the number of delayed cancer diagnoses. Method and Materials: We used data from the National Lung Screening Trial. We selected all subjects who underwent a baseline scan and a one year follow up scan, those diagnosed with lung cancer after the baseline scan were excluded. Using baseline clinical data and baseline scan variables, various models were developed to estimate the risk of developing lung cancer after the one year follow-up scan, using backward stepwise regression. The full model included both clinical and scan variables. Additionally we tested a clinical-only model and a nodule-only model, the latter including the largest nodule diameter as the only variable. Furthermore, the published Brock and Patz models were validated on the same data set. Results: 174 of 24,542 participants were diagnosed with lung cancer in the year after the first annual follow up. Best predictors included in the full model were older age, higher smoking duration and intensity, shorter smoking quit time, previous COPD and cancer diagnosis, emphysema, longest and perpendicular diameter of the largest nodule, presence of subsolid nodules, presence of an upper lobe nodule, and presence of a spiculated nodule. Using our full model, 9,972, 16,298, 19,726, and 21,158 of the cancer-free persons could have safely avoided the one year follow-up scan, at the expense of delaying the diagnosis of 17, 44, 70, and 88 of the lung cancer patients, respectively. The area under the ROC curve ranged from 0.79 with our full model to 0.73 with the Brock model to 0.67 in the Patz model. Conclusion: Predictive models based on clinical and baseline scan information can be used to personalize follow up intervals in lung cancer screening, saving radiation and costs. Results differed substantially depending on the risk model used. Clinical Relevance/Application: Our model can be used to improve lung cancer screening efficiency by selecting a substantial proportion of participants for a two year follow-up interval, while delaying lung cancer diagnosis in only very few cases. This can greatly reduce costs, radiation burden and radiologist's work-load.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Schr18, + author = {Schreuder, Anton and Schaefer-Prokop, Cornelia and Scholten, Ernst and Jacobs, Colin and Prokop, Mathias and van Ginneken, Bram}, + title = {Lung cancer risk to personalise annual and biennial follow-up computed tomography screening}, + journal = Thorax, + year = {2018}, + volume = {73}, + number = {7}, + month = {3}, + pages = {626--633}, + doi = {10.1136/thoraxjnl-2017-211107}, + abstract = {Background: All lung cancer CT screening trials used fixed follow-up intervals, which may not be optimal. We developed new lung cancer risk models for personalising screening intervals to 1 year or 2 years, and compared these with existing models. + Methods: We included participants in the CT arm of the National Lung Screening Trial (2002-2010) who underwent a baseline scan and a first annual follow-up scan and were not diagnosed with lung cancer in the first year. True and false positives and the area under the curve of each model were calculated. Internal validation was performed using bootstrapping. + Results: Data from 24 542 participants were included in the analysis. The accuracy was 0.785, 0.693, 0.697, 0.666 and 0.727 for the polynomial, patient characteristics, diameter, Patz and PanCan models, respectively. Of the 24 542 participants included, 174 (0.71%) were diagnosed with lung cancer between the first and the second annual follow-ups. Using the polynomial model, 2558 (10.4%, 95% CI 10.0% to 10.8%), 7544 (30.7%, 30.2% to 31.3%), 10 947 (44.6%, 44.0% to 45.2%), 16 710 (68.1%, 67.5% to 68.7%) and 20 023 (81.6%, 81.1% to 92.1%) of the 24 368 participants who did not develop lung cancer in the year following the first follow-up screening round could have safely skipped it, at the expense of delayed diagnosis of 0 (0.0%, 0.0% to 2.7%), 8 (4.6%, 2.2% to 9.2%), 17 (9.8%, 6.0% to 15.4%), 44 (25.3%, 19.2% to 32.5%) and 70 (40.2%, 33.0% to 47.9%) of the 174 lung cancers, respectively. + Conclusions: The polynomial model, using both patient characteristics and baseline scan morphology, was significantly superior in assigning participants to 1-year or 2-year screening intervals. Implementing personalised follow-up intervals would enable hundreds of participants to skip a screening round per lung cancer diagnosis delayed.}, + file = {:pdf/Schr18.pdf:PDF;:pdf/Schr18_Appendix.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29602813}, + publisher = {{BMJ}}, + gsid = {9268925672447372226}, + gscites = {29}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/198322}, + ss_id = {032a0374513489195712dabb380d0feb3bb85d46}, + all_ss_ids = {['032a0374513489195712dabb380d0feb3bb85d46']}, +} + +@article{Schr18b, + author = {Schreuder, Anton and van Ginneken, Bram and Scholten, Ernst T and Jacobs, Colin and Prokop, Mathias and Sverzellati, Nicola and Desai, Sujal R and Devaraj, Anand and Schaefer-Prokop, Cornelia M}, + title = {Classification of {CT} Pulmonary Opacities as Perifissural Nodules: Reader Variability}, + journal = Radiology, + year = {2018}, + volume = {288}, + number = {3}, + month = {7}, + pages = {867--875}, + doi = {10.1148/radiol.2018172771}, + abstract = {Purpose To study interreader variability for classifying pulmonary opacities at CT as perifissural nodules (PFNs) and determine how reliably radiologists differentiate PFNs from malignancies. Materials and Methods CT studies were obtained retrospectively from the National Lung Screening Trial (2002-2009). Nodules were eligible for the study if they were noncalcified, solid, within the size range of 5 to 10 mm, and scanned with a section thickness of 2 mm or less. Six radiologists classified 359 nodules in a cancer-enriched data set as PFN, non-PFN, or not applicable. Nodules classified as not applicable by at least three radiologists were excluded, leaving 316 nodules for post-hoc statistical analysis. Results The study group contained 22.2% cancers (70 of 316). The median proportion of nodules classified as PFNs was 45.6% (144 of 316). All six radiologists uniformly classified 17.7% (56 of 316) of the nodules as PFNs. The Fleiss k was 0.50. Compared with non-PFNs, nodules classified as PFNs were smaller and more often located in the lower lobes and attached to a fissure (P < .001). Thirteen (18.6%) of 70 cancers were misclassified 21 times as PFNs. Individual readers' misclassification rates ranged from 0% (0 of 125) to 4.9% (eight of 163). Of 13 misclassified malignancies, 11 were in the upper lobes and two were attached to a fissure. Conclusion There was moderate interreader agreement when classifying nodules as perifissural nodules. Less than 2.5% of perifissural nodule classifications were misclassified lung cancers (21 of 865) in this cancer-enriched study. Allowing nodules classified as perifissural nodules to be omitted from additional follow-up in a screening setting could substantially reduce the number of unnecessary scans; excluding perifissural nodules in the upper lobes would greatly decrease the misclassification rate.}, + file = {:pdf/Schr18b.pdf:PDF}, + optnote = {DIAG}, + pmid = {29969076}, + publisher = {Radiological Society of North America ({RSNA})}, + gsid = {9860123135480948586}, + gscites = {34}, + ss_id = {a2f7d760233f22f037d17c8196ea1463597e66e8}, + all_ss_ids = {['a2f7d760233f22f037d17c8196ea1463597e66e8']}, +} + +@conference{Schr18c, + author = {Schreuder, Anton and van Ginneken, Bram and Scholten, Ernst T and Jacobs, Colin and Prokop, Mathias and Sverzellati, Nicola and Desai, Sujal R and Devaraj, Anand and Schaefer-Prokop, Cornelia M}, + title = {What is a perifissural nodule? Low inter-observer agreement in NLST data}, + booktitle = {European Societies of Cardiovascular Radiology and Thoracic Imaging joint meeting}, + year = {2018}, + abstract = {Purpose/Objectives: Pulmonary nodules on chest CT classified by radiologists as perifissural nodules (PFN) have been shown to have a negligible chance of malignancy. We studied the inter-observer variability for classifying nodules as PFNs in National Lung Screening Trial (NLST) data. Methods and Materials: Out of a sample of 5819 low-dose CT scans of slice thickness <=2mm from the NLST, we detected and annotated 3669 non-calcified solid nodules with diameters of 5 to 10mm. 359 nodules were selected for the observer study. With definitions provided, six radiologists independently classified these nodules as either "typical PFN,? "atypical PFN,? "non-PFN,? or "not applicable.? A "typical PFN? has a lentiform, triangular, or polygonal shape, is located on or within 10 mm of the visceral pleura or lung fissure, and has extending linear densities. An "atypical PFN? lacks one of the three key criteria defining a typical PFN. A "non-PFN? showed at least one of the following features: spiculation, irregular shape, un-sharp borders, and distortion of the pleura or fissure. Adherence to these definitions was not required. Opacities deemed to be subsolid, completely calcified, not nodular, or not visible were considered "not applicable?; when rated as such by at least three radiologists, these were excluded (n=43). This left 316 nodules for analysis using descriptive statistics, Mann-Whitney U tests, and Fleiss' kappa (k). Results: Fifty-six of 316 nodules (17.7%) were classified by all six radiologists as either typical or atypical PFNs. More than four times the number, 229/316 nodules (72.5%), were classified by at least one radiologist as PFNs. k was 0.50; when distinguishing PFN subgroups, k decreased to 0.30. Only 7/316 nodules (2.2%) were unanimously classified as typical PFNs; none were atypical PFNs by unanimity. Compared to non-PFNs, nodules classified by at least three readers as either typical or atypical PFNs were smaller on average and were more often located in the lower lobes and attached to a fissure (p<0.001). Pleural attachment was not a good distinguishing factor between PFNs and non-PFNs (p=0.54). Conclusion: There is only a fair to moderate inter-radiologist agreement when classifying pulmonary nodules as PFNs. More than two-thirds of all nodules in our sample were considered to be a PFN by at least one radiologist; less than one-fifth were PFNs by full consensus, and considerably less when distinguishing PFN subgroups. This suggests that better criteria for identifying PFNs need to be developed and adhered to.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Schr18d, + author = {Schreuder, Anton and Schaefer-Prokop, Cornelia M and Scholten, Ernst T and Lynch, David and Charbonnier, Jean-Paul and Jacobs, Colin}, + title = {Perifissural nodule count as a biomarker for COPD GOLD stages and emphysema measurements?}, + booktitle = {European Societies of Cardiovascular Radiology and Thoracic Imaging Joint Meeting}, + year = {2018}, + abstract = {Purpose/Objectives: One of the main pathophysiological mechanisms of chronic obstructive pulmonary disease (COPD) is inflammation mediated by the immune system. Intrapulmonary lymph nodes have a distinctive computed tomography (CT) morphology as perifissural nodules (PFN). The purpose of this study was to evaluate the relation between the number of PFNs and emphysema scores. Methods and Materials: CT scans, lung function performances, and clinical data were obtained from the COPDGene study. Forty participants were randomly selected per GOLD stages groups 0 through 4, (n=200). The baseline scans were screened for nodules by a trained medical researcher with support from computer-aided detection (CAD) software. Nodules which were considered solid, had sharp borders, lacked spiculation, and were not completely calcified were assessed independently by two experienced radiologists. The number of these nodules classified as PFNs was counted per scan and averaged per GOLD stage group, and normalized emphysema score (NormES-950)- and 10mm airway lumen perimeter (Pi10)-based groups divided by quartiles. Statistical analyses was done using descriptive statistics, one-way ANOVA, and Cohen's kappa. Results: Three of 200 (1.5%) scans could not be processed by the CAD. Out of 175 scans, 584 CAD annotations were accepted and an additional 172 nodules were identified. 174 calcified, 20 ground glass, 8 subsolid, and 136 spiculated, unsharp, or irregularly shaped solid nodules were excluded by the trained medical researcher. The median diameter and volume of the remaining 418 nodules were 3.9mm (25th percentile (P25)=2.6mm, 75thpercentile (P75)=5.6mm) and 18.4mm3 (P25=8.1mm3, P75=40.4mm3), respectively. Readers A and B independently classified 198/418 (47.4%) and 311/418 (74.4%) of the remaining nodules as PFNs, respectively. The interrater agreement was 0.34 (Cohen's kappa). The average number of PFNs found in GOLD groups 0 to 4 were, respectively, 0.90, 1.00, 0.98, 1.15, and 1.00 for Reader A (p=0.97), and 1.44, 1.55, 1.38, 1.70, and 1.83 for Reader B (p=0.84). The medians of the NormES and Pi10 were 7.24HU (P25=2.45HU, P75=19.17HU) and 2.35mm (P25=1.95mm, P75=2.80mm), respectively. ANOVA of the average number of PFNs between the NormES and Pi10 quartiles respectively resulted in p-values of <0.001 and 0.499 for Reader A, and 0.001 and 0.585 for Reader B. Conclusion: Despite fair interreader agreement, higher NormES is associated with greater average numbers of PFNs. There are no statistically significant differences across GOLD stages and Pi10 quartiles. In practice, a scan may only contain whole numbers of PFNs; additionally, due to high variation, PFN count is not recommended for differentiating stages of COPD.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Schr18e, + author = {Schreuder, Anton and Jacobs, Colin and Lessmann, Nikolas and Scholten, Ernst T and Isgum, Ivana and Prokop, Mathias and Schaefer-Prokop, Cornelia M and van Ginneken, Bram}, + title = {Improved Lung Cancer and Mortality Prediction Accuracy Using Survival Models Based on Semi-Automatic CT Image Measurements}, + booktitle = {World Conference on Lung Cancer}, + year = {2018}, + abstract = {Background: In a lung cancer CT screening setting, imaging biomarkers are typically extracted by experienced human readers. We found that adding semi-automatic computer-aided detection (CAD) measurements to a base model significantly improved lung cancer and mortality risk prediction accuracy. Method: Participants' baseline CT scans, characteristics, and 7-year follow-up outcomes were obtained from the National Lung Screening Trial. The selection included all 1810 deceased and a random selection of 4190 surviving participants from the CT screening arm with an image available; the latter subcohort was sampled with replacement up to 24432 to approximate the full CT arm. Seventeen patient characteristics variables endorsed by literature were considered for each model. CAD was used to automatically measure normalized emphysema score, coronary calcium volume, and thoracic aorta calcium volume. Pulmonary nodule consistency, volume, solid core volume (if part-solid), and upper lobe location were annotated by an experienced radiologist with CAD support. Only the largest noncalcified nodule was considered for the model; having no nodules was the reference. Cox proportional hazard regression was performed on patient characteristics variables only (base model) and combined with CAD variables (new model). This was done for three outcomes: lung cancer diagnosis, lung cancer mortality, and overall mortality. The average continuous net reclassification improvements (NRI) between the base and new models were calculated for each year following the baseline scan. To calculate NRI, the net percentages of subjects with and without the event of interest correctly reclassified as high and low risk, respectively, are summed (maximum range: -2 to 2); positive scores indicate that the new model is more accurate. Result: CAD measures were successfully computed for 5575 baseline scans. After sampling, the test cohort consisted of 24370 participants. 3.9% were diagnosed with lung cancer (940/24370) and 6.9% died (1681/24370), of which 24.9% due to lung cancer (418/1681). For all outcomes, the new models were significantly superior to the base model. With lung cancer diagnosis as the outcome, the NRI at 1, 4, and 7 years follow-up were 0.628 (95% confidence interval: 0.373-0.700), 0.331 (0.261-0.390), and 0.349 (0.293-0.389), respectively. The respective NRIs were 0.501 (0.290-0.642), 0.288 (0.221-0.374), and 0.255 (0.218-0.339) when predicting lung cancer mortality and 0.496 (0.295-0.610), 0.301 (0.239-0.376), and 0.270 (0.201-0.320) when predicting overall mortality. Conclusion: CAD measures of emphysema and atherosclerosis and CAD-supported pulmonary nodule annotations are of added value for predicting lung cancer and mortality. These new models may be used to further personalize lung cancer CT screening follow-up protocols.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Schr18f, + author = {Schreuder, Anton and Jacobs, Colin and Gallardo-Estrella, Leti and and Schaefer-Prokop, Cornelia M and Fukumoto, Wataru and Prokop, Mathias and van Ginneken, Bram}, + title = {Normalized emphysema score progression: An improved CT biomarker for mortality}, + booktitle = RSNA, + year = {2018}, + abstract = {Purpose: Normalized emphysema score (normES) is a protocol-robust and validated CT biomarker for mortality. We aimed to improve mortality prediction by modelling its change over time. Method and materials: CT scans from all 1810 deceased participants from the National Lung Screening Trial were selected. Of these, 445 died from lung cancer. A random selection of 4190 surviving participants were sampled with replacement up to 24432 to approximate the full cohort. The normES was obtained by computing the emphysema scores after resampling, normalization, and bullae cluster analysis. The reference models contained solely the baseline (T0) normES. To investigate if progression of emphysema provides additional information, normES from the first (T1) and second annual screening rounds (T2) and normES progression (normESprog) were added to the base model. normESprog was calculated by subtracting the T0 log(normES) from the T1 or T2 log(normES) and dividing by the time in between. Proportional hazard models predicting all-cause and lung cancer mortality were compared by calculating the continuous net reclassification improvement (NRI) for each year of follow-up. Results: The analysis of T0 and T1 data was performed on 22695 samples; 3547 lacked T0 or T1 scans, or had corrupted data. NRI improvement for all-cause and lung cancer mortality prediction compared to the base models were 4.5% (95%CI: -7.3 to 8.4%) and 4.1% (-9.3 to 14.6%) 3 years after baseline, 6.1% (-5.3 to 9.4%) and 0.1% (-7.1 to 12.2%) after 5 years, and 6.1% (-6.2 to 8.7%) and -0.4% (-5.6 to 11.3%) after 7 years, respectively. When modelling the T0 to T2 interval, another 2603 samples were excluded. For all-cause mortality, the 3, 5, and 7 year time points showed respective NRI improvements of -0.5% (-6.7 to 8.0%), 10.8% (5.5% to 14.7%), and 12.2% (7.1% to 15.6%). Improvements in lung cancer mortality prediction were -6.1% (-24.0 to 12.6%), 19.6% (10.6 to 29.2%), and 24.1% (15.4% to 31.7%), respectively. All hazard models had a logrank test p<.001. Conclusion: Two normES measurements are better than one at predicting mortality over longer periods of time. The time between normES measurements should be sufficiently distant to account for the slow progression of emphysema.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Schr19, + author = {Schreuder, Anton and Jacobs, Colin and Gallardo-Estrella, Leticia and Prokop, Mathias and Schaefer-Prokop, Cornelia M and van Ginneken, Bram}, + title = {Predicting all-cause and lung cancer mortality using emphysema score progression rate between baseline and follow-up chest CT images: A comparison of risk model performances}, + journal = PLOSONE, + year = {2019}, + volume = {14}, + issue = {2}, + pages = {e0212756}, + doi = {10.1371/journal.pone.0212756}, + abstract = {Normalized emphysema score is a protocol-robust CT biomarker of mortality. We aimed to improve mortality prediction by including the emphysema score progression rate-its change over time-into the models. CT scans from 6000 National Lung Screening Trial CT arm participants were included. Of these, 1810 died (445 lung cancer-specific). The remaining 4190 survivors were sampled with replacement up to 24432 to approximate the full cohort. Three overlapping subcohorts were formed which required participants to have images from specific screening rounds. Emphysema scores were obtained after resampling, normalization, and bullae cluster analysis of the original images. Base models contained solely the latest emphysema score. Progression models included emphysema score progression rate. Models were adjusted by including baseline age, sex, BMI, smoking status, smoking intensity, smoking duration, and previous COPD diagnosis. Cox proportional hazard models predicting all-cause and lung cancer mortality were compared by calculating the area under the curve per year follow-up. In the subcohort of participants with baseline and first annual follow-up scans, the analysis was performed on 4940 participants (23227 after resampling). Area under the curve for all-cause mortality predictions of the base and progression models 6 years after baseline were 0.564 (0.564 to 0.565) and 0.569 (0.568 to 0.569) when unadjusted, and 0.704 (0.703 to 0.704) to 0.705 (0.704 to 0.705) when adjusted. The respective performances predicting lung cancer mortality were 0.638 (0.637 to 0.639) and 0.643 (0.642 to 0.644) when unadjusted, and 0.724 (0.723 to 0.725) and 0.725 (0.725 to 0.726) when adjusted. Including emphysema score progression rate into risk models shows no clinically relevant improvement in mortality risk prediction. This is because scan normalization does not adjust for an overall change in lung density. Adjusting for changes in smoking behavior is likely required to make this a clinically useful measure of emphysema progression.}, + file = {Schr19.pdf:pdf\\Schr19.pdf:PDF}, + optnote = {DIAG}, + pmid = {30789954}, + month = {2}, + gsid = {7043689849476863169}, + gscites = {3}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/202118}, + ss_id = {010f33ceac3fb9aae25cb7ef51ba0f0dda95bb24}, + all_ss_ids = {['010f33ceac3fb9aae25cb7ef51ba0f0dda95bb24']}, +} + +@article{Schr19a, + author = {Schreur, Vivian and Domanian, Artin and Liefers, Bart and Venhuizen, Freerk G. and Klevering, B. Jeroen and Hoyng, Carel B. and de Jong, Eiko K. and Theelen, Thomas}, + title = {Morphological and topographical appearance of microaneurysms on optical coherence tomography angiography}, + journal = BJO, + year = {2019}, + volume = {103}, + number = {5}, + pages = {630--635}, + doi = {10.1136/bjophthalmol-2018-312258}, + url = {https://bjo.bmj.com/content/early/2018/06/19/bjophthalmol-2018-312258}, + abstract = {To investigate retinal microaneurysms in patients with diabetic macular oedema (DME) by optical coherence tomography angiography (OCTA) according to their location and morphology in relationship to their clinical properties, leakage on fundus fluorescein angiography (FFA) and retinal thickening on structural OCT.}, + file = {Schr19a.pdf:pdf\\Schr19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29925511}, + month = {6}, +} + +@article{Schr19b, + author = {Schreur, Vivian and de Breuk, Anita and Venhuizen, Freerk G. and S\'{a}nchez, Clara I. and Tack, Cees J. and Klevering, B. Jeroen and de Jong, Eiko K. and Hoyng, Carel B.}, + title = {Retinal hyperreflective foci in type 1 diabetes mellitus}, + journal = Retina, + year = {2019}, + month = {7}, + doi = {10.1097/IAE.0000000000002626}, + abstract = {To investigate hyperreflective foci (HF) on spectral-domain optical coherence tomography in patients with Type 1 diabetes mellitus across different stages of diabetic retinopathy (DR) and diabetic macular edema (DME) and to study clinical and morphological characteristics associated with HF. Spectral-domain optical coherence tomography scans and color fundus photographs were obtained of 260 patients. Spectral-domain optical coherence tomography scans were graded for the number of HF and other morphological characteristics. The distribution of HF across different stages of DR and DME severity were studied. Linear mixed-model analysis was used to study associations between the number of HF and clinical and morphological parameters. Higher numbers of HF were found in patients with either stage of DME versus patients without DME (P < 0.001). A trend was observed between increasing numbers of HF and DR severity, although significance was only reached for moderate nonproliferative DR (P = 0.001) and proliferative DR (P = 0.019). Higher numbers of HF were associated with longer diabetes duration (P = 0.029), lower high-density lipoprotein cholesterol (P = 0.005), and the presence of microalbuminuria (P = 0.005). In addition, HF were associated with morphological characteristics on spectral-domain optical coherence tomography, including central retinal thickness (P = 0.004), cysts (P < 0.001), subretinal fluid (P = 0.001), and disruption of the external limiting membrane (P = 0.018). The number of HF was associated with different stages of DR and DME severity. The associations between HF and clinical and morphological characteristics can be of use in further studies evaluating the role of HF as a biomarker for disease progression and treatment response.}, + file = {Schr19b.pdf:pdf\\Schr19b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31356496}, + gsid = {95204898750501445}, + gscites = {12}, + ss_id = {766b44a5bc3a4ffb30dd1b4b4d76752e8c81698e}, + all_ss_ids = {['766b44a5bc3a4ffb30dd1b4b4d76752e8c81698e']}, +} + +@article{Schr20, + author = {Schreuder, Anton and Jacobs, Colin and Scholten, Ernst T. and Prokop, Mathias and van Ginneken, Bram and Lynch, David A. and Schaefer-Prokop, Cornelia M.}, + title = {Association between the number and size of intrapulmonary lymph nodes and chronic obstructive pulmonary disease severity}, + journal = PRJ, + year = {2020}, + volume = {8}, + pages = {e9166}, + doi = {10.7717/peerj.9166}, + url = {https://doi.org/10.7717/peerj.9166}, + abstract = {Purpose: One of the main pathophysiological mechanisms of chronic obstructive pulmonary disease is inflammation, which has been associated with lymphadenopathy. Intrapulmonary lymph nodes can be identified on CT as perifissural nodules (PFN). We investigated the association between the number and size of PFNs and measures of COPD severity. Materials and Methods: CT images were obtained from COPDGene. 50 subjects were randomly selected per GOLD stage (0 to 4), GOLD-unclassified, and never-smoker groups and allocated to either "Healthy," "Mild," or "Moderate/severe" groups. 26/350 (7.4%) subjects had missing images and were excluded. Supported by computer-aided detection, a trained researcher prelocated non-calcified opacities larger than 3 mm in diameter. Included lung opacities were classified independently by two radiologists as either "PFN," "not a PFN," "calcified," or "not a nodule"; disagreements were arbitrated by a third radiologist. Ordinal logistic regression was performed as the main statistical test. Results: A total of 592 opacities were included in the observer study. A total of 163/592 classifications (27.5%) required arbitration. A total of 17/592 opacities (2.9%) were excluded from the analysis because they were not considered nodular, were calcified, or all three radiologists disagreed. A total of 366/575 accepted nodules (63.7%) were considered PFNs. A maximum of 10 PFNs were found in one image; 154/324 (47.5%) contained no PFNs. The number of PFNs per subject did not differ between COPD severity groups (\textit{p} = 0.50). PFN short-axis diameter could significantly distinguish between the Mild and Moderate/severe groups, but not between the Healthy and Mild groups (\textit{p} = 0.021). Conclusions: There is no relationship between PFN count and COPD severity. There may be a weak trend of larger intrapulmonary lymph nodes among patients with more advanced stages of COPD.}, + file = {Schr20.pdf:pdf\\Schr20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {32685283}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/225855}, + ss_id = {0060644239845c59813a5a9122d34e1f75d18e4c}, + all_ss_ids = {['0060644239845c59813a5a9122d34e1f75d18e4c']}, + gscites = {0}, +} + +@article{Schr20a, + author = {Schreuder, Anton and Scholten, Ernst T. and van Ginneken, Bram and Jacobs, Colin}, + title = {Artificial intelligence for detection and characterization of pulmonary nodules in lung cancer CT screening: ready for practice?}, + journal = TLCR, + volume = {10}, + number = {5}, + pages = {2378--2388}, + year = {2021}, + doi = {10.21037/tlcr-2020-lcs-06}, + abstract = {Lung cancer computed tomography (CT) screening trials using low-dose CT have repeatedly demonstrated a reduction in the number of lung cancer deaths in the screening group compared to a control group. With various countries currently considering the implementation of lung cancer screening, recurring discussion points are, among others, the potentially high false positive rates, cost-effectiveness, and the availability of radiologists for scan interpretation. Artificial intelligence (AI) has the potential to increase the efficiency of lung cancer screening. We discuss the performance levels of AI algorithms for various tasks related to the interpretation of lung screening CT scans, how they compare to human experts, and how AI and humans may complement each other. We discuss how AI may be used in the lung cancer CT screening workflow according to the current evidence and describe the additional research that will be required before AI can take a more prominent role in the analysis of lung screening CT scans.}, + file = {Schr20a.pdf:pdf\\Schr20a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {34164285}, + ss_id = {2a32913ae752a56f9997a3869069a768f09a52e7}, + all_ss_ids = {['2a32913ae752a56f9997a3869069a768f09a52e7']}, + gscites = {27}, +} + +@article{Schr20b, + author = {Schreuder, Anton and Schaefer-Prokop, C. M.}, + title = {Perifissural nodules: ready for application into lung cancer CT screening?}, + journal = ATM, + year = {2020}, + doi = {10.21037/atm-20-3384}, + file = {Schr20b.pdf:pdf\\Schr20b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {04eda893d160910706751960e4ca9dd730f42674}, + all_ss_ids = {['04eda893d160910706751960e4ca9dd730f42674']}, + gscites = {0}, +} + +@article{Schr20c, + author = {Schreuder, Anton and Jacobs, Colin and Scholten, Ernst T. and van Ginneken, Bram and Schaefer-Prokop, Cornelia M. and Prokop, Mathias}, + title = {Typical CT Features of Intrapulmonary Lymph Nodes: A Review}, + journal = {Radiology: Cardiothoracic Imaging}, + volume = {2}, + number = {4}, + pages = {e190159}, + year = {2020}, + doi = {10.1148/ryct.2020190159}, + abstract = {Several studies investigated the appearance of intrapulmonary lymph nodes (IPLNs) at CT with pathologic correlation. IPLNs are benign lesions and do not require follow-up after initial detection. There are indications that IPLNs represent a considerable portion of incidentally found pulmonary nodules seen at high-resolution CT. The reliable and accurate identification of IPLNs as benign nodules may substantially reduce the number of unnecessary follow-up CT examinations. Typical CT features of IPLNs are a noncalcified solid nodule with sharp margins; a round, oval, or polygonal shape; distanced 15 mm or less from the pleura; and most being located below the level of the carina. The term perifissural nodule (PFN) was coined based on some of these characteristics. Standardization of those CT criteria are a prerequisite for accurate nodule classification. However, four different definitions of PFNs can currently be found in the literature. Furthermore, there is considerable variation in the reported interobserver agreement, malignancy rate, and prevalence of PFNs. The purpose of this review was to provide an overview of what is known about PFNs. In addition, knowledge gaps in defining PFNs will be discussed. A decision tree to guide clinicians in classifying nodules as PFNs is provided.}, + file = {Schr20c.pdf:pdf\\Schr20c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {33778597}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/229529}, + ss_id = {302fbaac065fe76b0f8d6e2bccc8c715d1ab230a}, + all_ss_ids = {['302fbaac065fe76b0f8d6e2bccc8c715d1ab230a']}, + gscites = {7}, +} + +@article{Schr21, + author = {Schreuder, Anton and Jacobs, Colin and Lessmann, Nikolas and Broeders, Mireille JM and Silva, Mario and Isgum, Ivana and de Jong, Pim A and Sverzellati, Nicola and Prokop, Mathias and Pastorino, Ugo and Schaefer-Prokop, Cornelia M and van Ginneken, Bram}, + title = {Combining pulmonary and cardiac computed tomography biomarkers for disease-specific risk modelling in lung cancer screening}, + journal = ERJ, + volume = {58}, + number = {3}, + pages = {2003386}, + year = {2021}, + doi = {10.1183/13993003.03386-2020}, + abstract = {Objectives: Combined assessment of cardiovascular disease (CVD), chronic obstructive pulmonary disease (COPD), and lung cancer (LC) may improve the effectiveness of LC screening in smokers. The aims were to derive and assess risk models for predicting LC incidence, CVD mortality, and COPD mortality by combining quantitative CT measures from each disease, and to quantify the added predictive benefit of self-reported patient characteristics given the availability of a CT scan. Methods: A survey model (patient characteristics only), CT model (CT information only), and final model (all variables) were derived for each outcome using parsimonious Cox regression on a sample from the National Lung Screening Trial (n=15 000). Validation was performed using Multicentric Italian Lung Detection data (n=2287). Time-dependent measures of model discrimination and calibration are reported. Results: Age, mean lung density, emphysema score, bronchial wall thickness, and aorta calcium volume are variables which contributed to all final models. Nodule features were crucial for LC incidence predictions but did not contribute to CVD and COPD mortality prediction. In the derivation cohort, the LC incidence CT model had a 5-year area under the receiver operating characteristic curve (AUC) of 82*5%% (95%% confidence interval=80*9-84*0%), significantly inferior to that of the final model (84*0%, 82*6-85*5%). However, the addition of patient characteristics did not improve the LC incidence model performance in the validation cohort (CT model=80*1%, 74*2-86*0%; final model=79*9, 73*9-85*8%). Similarly, the final CVD mortality model outperformed the other two models in the derivation cohort (survey model=74*9%, 72*7-77*1%; CT model=76*3%, 74*1-78*5%; final model=79*1%, 77*0-81*2%) but not the validation cohort (survey model=74*8%, 62*2-87*5%; CT model=72*1%, 61*1-83*2%; final model=72*2%, 60*4-84*0%). Combining patient characteristics and CT measures provided the largest increase in accuracy for the COPD mortality final model (92*3%, 90*1-94*5%) compared to either other model individually (survey model=87*5%, 84*3-90*6%; CT model=87*9%, 84*8-91*0%), but no external validation was performed due to a very low event frequency. Conclusions: CT measures of CVD and COPD provides small but reproducible improvements to nodule-based LC risk prediction accuracy from 3 years' onwards. Self-reported patient characteristics may not be of added predictive value when CT information is available.}, + file = {Schr21.pdf:pdf\\Schr21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {33574075}, + ss_id = {c4422edc3d7460dc858db3e23cef64136a49628e}, + all_ss_ids = {['c4422edc3d7460dc858db3e23cef64136a49628e']}, + gscites = {6}, +} + +@phdthesis{Schr21a, + author = {Schreuder, Anton}, + title = {Lung cancer screening: use the scan to decide who to scan when}, + year = {2021}, + abstract = {Lung cancer is the number one cause of cancer deaths worldwide. One of the most effective ways to reduce lung cancer mortality is early diagnosis, which can be achieved via CT screening. However, deciding who and when to screen is challenging because it is difficult to estimate a person's risk of developing lung cancer in the future. Screening involves trade-offs: the goal is to diagnose lung cancer in its early stages while performing as few scans as possible. Fewer scans result in lower health-care costs, less work for medical personnel, and a lower burden on participants. By using information from the CT scan, risk models can be developed which can more accurately predict the risk of lung cancer in screening participants. The models and recommendations described in his PhD thesis can be used to personalize screening strategies, where participants with a lower risk are scanned less frequently.}, + copromotor = {C. Jacobs}, + url = {https://hdl.handle.net/2066/231282}, + file = {Schr21a.pdf:pdf\\Schr21a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {C.M. Schaefer-Prokop and B. van Ginneken}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Schr21b, + author = {Schreuder, Anton and Mets, Onno M and Schaefer-Prokop, Cornelia M and Jacobs, Colin and Prokop, Mathias}, + title = {Microsimulation modeling of extended annual CT screening among lung cancer cases in the National Lung Screening Trial}, + journal = LUNGC, + year = {2021}, + volume = {156}, + pages = {5--11}, + doi = {10.1016/j.lungcan.2021.04.004}, + abstract = {Purpose: To microsimulate the effects of three additional annual CT screening rounds on lung cancer (LC) survival in the National Lung Screening Trial (NLST). Methods: We used multiple imputation to model the effect of additional screening in the full NLST cohort on the time to LC diagnosis and on LC death in those participants who were diagnosed with LC by the end of NLST. Nodule growth models were derived from a Dutch in-vivo study. Microsimulations were repeated 500 times. The method was validated by simulating three rounds of CT screening in the original chest radiography (CXR) cohort. The times up to which the simulations remained within the 95% confidence bands of the CT cohort's original results were used to estimate the validity of the results in the CT cohort with three additional simulated screening rounds. Results: Validation of the simulation approach on the CXR cohort resulted in a LC mortality reduction which remained well within the 95% confidence intervals of the original CT cohort up to 6.5 years after the start of simulations. Simulating additional CT screening in the CT cohort led to LCs being diagnosed earlier than originally, resulting in a relative risk reduction in LC mortality of 11% (95% confidence bands, 7% to 14%) at 6.5 years. This is equivalent to preventing 71% (48% to 94%) more LC deaths than the original CT cohort achieved in comparison to the original CXR cohort. Conclusion: Three additional annual CT screening rounds in the NLST may have led to substantial further LC mortality reduction.}, + file = {Schr21b.pdf:pdf\\Schr21b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {33866117}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/233968}, + ss_id = {e6826552a5bff552a12087552097519a614ce0c3}, + all_ss_ids = {['e6826552a5bff552a12087552097519a614ce0c3']}, + gscites = {1}, +} + +@article{Schr21c, + author = {Schreuder, Anton and Prokop, Mathias and Scholten, Ernst T. and Mets, Onno M. and Chung, Kaman and Mohamed Hoesein, Firdaus A. A. and Jacobs, Colin and Schaefer-Prokop, Cornelia M.}, + title = {CT-Detected Subsolid Nodules: A Predictor of Lung Cancer Development at Another Location?}, + journal = {Cancers}, + year = {2021}, + volume = {13}, + number = {11}, + pages = {2812}, + doi = {10.3390/cancers13112812}, + abstract = {The purpose of this case-cohort study was to investigate whether the frequency and computed tomography (CT) features of pulmonary nodules posed a risk for the future development of lung cancer (LC) at a different location. Patients scanned between 2004 and 2012 at two Dutch academic hospitals were cross-linked with the Dutch Cancer Registry. All patients who were diagnosed with LC by 2014 and a random selection of LC-free patients were considered. LC patients who were determined to be LC-free at the time of the scan and all LC-free patients with an adequate scan were included. The nodule count and types (solid, part-solid, ground-glass, and perifissural) were recorded per scan. Age, sex, and other CT measures were included to control for confounding factors. The cohort included 163 LC patients and 1178 LC-free patients. Cox regression revealed that the number of ground-glass nodules and part-solid nodules present were positively correlated to future LC risk. The area under the receiver operating curve of parsimonious models with and without nodule type information were 0.827 and 0.802, respectively. The presence of subsolid nodules in a clinical setting may be a risk factor for future LC development in another pulmonary location in a dose-dependent manner. Replication of the results in screening cohorts is required for maximum utility of these findings.}, + file = {Schr21c.pdf:pdf\\Schr21c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {34200018}, + ss_id = {53e10b43522e45781bfd4763b49aebba6602c0ac}, + all_ss_ids = {['53e10b43522e45781bfd4763b49aebba6602c0ac']}, + gscites = {1}, +} + +@article{Schr22, + author = {Schreuder, Anton and Jacobs, Colin and Lessmann, Nikolas and Broeders, Mireille JM and Silva, Mario and Isgum, Ivana and de Jong, Pim A and van den Heuvel, Michel and Sverzellati, Nicola and Prokop, Mathias and Pastorino, Ugo and Schaefer-Prokop, Cornelia M and van Ginneken, Bram}, + title = {Scan-based competing death risk model for reevaluating lung cancer computed tomography screening eligibility}, + journal = ERJ, + year = {2022}, + volume = {59}, + number = {5}, + pages = {2101613}, + doi = {10.1183/13993003.01613-2021}, + abstract = {Purpose A baseline CT scan for lung cancer (LC) screening may reveal information indicating that certain LC screening participants can be screened less, and instead require dedicated early cardiac and respiratory clinical input. We aimed to develop and validate competing death (CD) risk models using CT information to identify participants with a low LC and a high CD risk.Methods Participant demographics and quantitative CT measures of LC, cardiovascular disease, and chronic obstructive pulmonary disease were considered for deriving a logistic regression model for predicting five-year CD risk using a sample from the National Lung Screening Trial (n=15 000). Multicentric Italian Lung Detection data was used to perform external validation (n=2287).Results Our final CD model outperformed an external pre-scan model (CDRAT) in both the derivation (Area under the curve=0.744 [95\% confidence interval=0.727 to 0.761] and 0.677 [0.658 to 0.695], respectively) and validation cohorts (0.744 [0.652 to 0.835] and 0.725 [0.633 to 0.816], respectively). By also taking LC incidence risk into consideration, we suggested a risk threshold where a subgroup (6258/23 096, 27\%) was identified with a number needed to screen to detect one LC of 216 (versus 23 in the remainder of the cohort) and ratio of 5.41 CDs per LC case (versus 0.88). The respective values in the validation cohort subgroup (774/2287, 34\%) were 129 (versus 29) and 1.67 (versus 0.43).Conclusions Evaluating both LC and CD risks post-scan may improve the efficiency of LC screening and facilitate the initiation of multidisciplinary trajectories among certain participants.FootnotesThis manuscript has recently been accepted for publication in the European Respiratory Journal. It is published here in its accepted form prior to copyediting and typesetting by our production team. After these production processes are complete and the authors have approved the resulting proofs, the article will move to the latest issue of the ERJ online. Please open or download the PDF to view this article.Conflict of interest: Dr. Schreuder has nothing to disclose.Conflict of interest: Dr. Jacobs reports grants from MeVis Medical Solutions AG, Bremen, Germany, outside the submitted work; .Conflict of interest: Dr. Lessmann has nothing to disclose.Conflict of interest: Dr. Broeders has nothing to disclose.Conflict of interest: Dr. Silva has nothing to disclose.Conflict of interest: Dr. I{\v s}gum has nothing to disclose.Conflict of interest: Dr. de Jong reports other from Philips Healthcare, during the conduct of the study; .Conflict of interest: Dr. van den Heuvel has nothing to disclose.Conflict of interest: Dr. Sverzellati has nothing to disclose.Conflict of interest: Dr. Prokop reports personal fees from Bracco, Bayer, Toshiba, \&amp; Siemens, grants from Toshiba, other from Thiroux, outside the submitted work; .Conflict of interest: Dr. Pastorino has nothing to disclose.Conflict of interest: Dr. Schaefer-Prokop has nothing to disclose.Conflict of interest: Dr. van Ginneken reports other from Thirona, grants from Mevis Medical Solutions, grants from Delft Imaging Systems, outside the submitted work; .}, + file = {Schr22.pdf:pdf\\Schr22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {34649976}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/252027}, + ss_id = {956fe0cd25879aa006d9885eba32da7100b6d338}, + all_ss_ids = {['956fe0cd25879aa006d9885eba32da7100b6d338']}, + gscites = {3}, +} + +@article{Schu23, + author = {M. Schuurmans and N. Alves and Pierpaolo Vendittelli and H. Huisman and J. Hermans}, + title = {Artificial Intelligence in Pancreatic Ductal Adenocarcinoma Imaging: A Commentary on Potential Future Applications.}, + doi = {10.1053/j.gastro.2023.04.003}, + file = { Schu23.pdf:pdf\\Schu23.pdf:PDF}, + journal = {Gastroenterology}, + optnote = {DIAG, RADIOLOGY}, + pmid = {37054755}, + year = {2023}, + ss_id = {77ac189f7a47c859a4bbee196c1ac6a2ac046338}, + all_ss_ids = {['77ac189f7a47c859a4bbee196c1ac6a2ac046338']}, + gscites = {0}, +} + +@article{Schuurmans22a, + author = {Schuurmans, Megan and Alv\'{a}s, Natalia and Vendittelli, Pierpaolo and Huisman, Henkjan and Hermans, John}, + title = {Setting the Research Agenda for Clinical Artificial Intelligence in Pancreatic Adenocarcinoma Imaging}, + doi = {https://doi.org/10.3390/cancers14143498}, + pages = {3498}, + url = {https://www.mdpi.com/2072-6694/14/14/3498}, + abstract = {Pancreatic ductal adenocarcinoma (PDAC), estimated to become the second leading cause of cancer deaths in western societies by 2030, was flagged as a neglected cancer by the European Commission and the United States Congress. Due to lack of investment in research and development, combined with a complex and aggressive tumour biology, PDAC overall survival has not significantly improved the past decades. Cross-sectional imaging and histopathology play a crucial role throughout the patient pathway. However, current clinical guidelines for diagnostic workup, patient stratification, treatment response assessment, and follow-up are non-uniform and lack evidence-based consensus. Artificial Intelligence (AI) can leverage multimodal data to improve patient outcomes, but PDAC AI research is too scattered and lacking in quality to be incorporated into clinical workflows. This review describes the patient pathway and derives touchpoints for image-based AI research in collaboration with a multi-disciplinary, multi-institutional expert panel. The literature exploring AI to address these touchpoints is thoroughly retrieved and analysed to identify the existing trends and knowledge gaps. The results show absence of multi-institutional, well-curated datasets, an essential building block for robust AI applications. Furthermore, most research is unimodal, does not use state-of-the-art AI techniques, and lacks reliable ground truth. Based on this, the future research agenda for clinically relevant, image-driven AI in PDAC is proposed.}, + journal = {Cancers}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, + ss_id = {0de05b444cb65b031f018dc44309dca7f7618a7f}, + all_ss_ids = {['0de05b444cb65b031f018dc44309dca7f7618a7f']}, + gscites = {0}, +} + +@conference{Schuurmans22b, + author = {Schuurmans, Megan and Alves, Natalia and Huisman, Henkjan and Hermans, John}, + booktitle = RSNA, + title = {Deep Learning for Detection of Iso-attenuating Pancreatic Adenocarcinoma in Computed Tomography}, + abstract = {Purpose: Investigate the ability of deep learning to localize iso-attenuating pancreatic adenocacinoma (pCa) in computed tomography (CT) images and assess the effect of including prior anatomy information on localization performance. This retrospective study included contrast-enhanced CT scans in the portal venous phase of 44 patients from the public Medical Segmentation Decathlon dataset, who had a visually iso-attenuating tumour. Two previously developed deep learning (DL) algorithms based on the 3D nnUnet framework were applied to the set of iso-attenuating lesions: one considering only tumour information for training (nnUnet_T) and one considering surrounding anatomical structures, namely the pancreas parenchyma, common bile duct, pancreatic duct, arteries and veins in additional to the tumor (nnUNet_MS). Each model creates 10 different outputs, based on 2 random initialisations and 5-fold crossvalidation. The performance of the two models at pCa localization was evaluated with average precision and a permutation test was performed to show statistical significance. Results: The average precision for the nnUNet_MS was 80.03 % +- 8.6%, while for the nnUnet_T model it was 72.87% +- 7.1% (p<0.05). This indicates that surrounding anatomy aids the localization of iso-attenuating pCa lesions. By having access to prior anatomy information during training the network can better focus on the regions of the image where the tumor is located, avoiding confusion with other anatomical structures. Conclusion: In current clinical workup the detection of iso-attenuating pCa lesions on CT is very challenging, as the attenuation is similar to the pancreas parenchyma. The proposed 3D nnUnet_MS model can be used to detect and localize iso-attenuating lesions. Clinical relevance statement: Iso-attenuating pCa is linked to earlier stages of disease and better outcome, but is challenging to detect on CT. DL can accurately detect iso-attenuating lesions and benefits from anatomy information.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@inproceedings{Schw13, + author = {J. Schwaab and Y. Diez and J. Mart\'{i} and R. Mart\'{i} and J. van Zelst and B. Platel and T. Tan and J. Gregori and S. Wirtz and J. Kramme and M. G\"unther}, + title = {Image Quality in automated breast ultrasound images: a preliminary study for the development of automated image quality assessment}, + booktitle = {{MICCAI} {W}orkshop: {B}reast {I}mage {A}nalysis}, + year = {2013}, + file = {Schw13.pdf:pdf\\Schw13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {17918216720968280397}, + gscites = {3}, +} + +@inproceedings{Schw15, + author = {Schwaab, J. and Gubern-M\'{e}rida, A. and Wang, L. and Gunther, M.}, + title = {Automatic assessment of nipple position in {A}utomated 3{D} {B}reast {U}ltrasound images}, + booktitle = {{MICCAI} {W}orkshop: {B}reast {I}mage {A}nalysis}, + year = {2015}, + file = {Schw15.pdf:pdf\\Schw15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Schw16, + author = {Schwaab, Julia and Diez, Yago and Oliver, Arnau and Marti, Robert and van Zelst, Jan and Gubern-Merida, Albert and Mourri, Ahmed Bensouda and Gregori, Johannes and Gunther, Matthias}, + title = {Automated quality assessment in three-dimensional breast ultrasound images}, + journal = JMI, + year = {2016}, + volume = {3}, + issue = {2}, + month = {4}, + pages = {027002}, + doi = {10.1117/1.JMI.3.2.027002}, + abstract = {Automated three-dimensional breast ultrasound (ABUS) is a valuable adjunct to x-ray mammography for breast cancer screening of women with dense breasts. High image quality is essential for proper diagnostics and computer-aided detection. We propose an automated image quality assessment system for ABUS images that detects artifacts at the time of acquisition. Therefore, we study three aspects that can corrupt ABUS images: the nipple position relative to the rest of the breast, the shadow caused by the nipple, and the shape of the breast contour on the image. Image processing and machine learning algorithms are combined to detect these artifacts based on 368 clinical ABUS images that have been rated manually by two experienced clinicians. At a specificity of 0.99, 55% of the images that were rated as low quality are detected by the proposed algorithms. The areas under the ROC curves of the single classifiers are 0.99 for the nipple position, 0.84 for the nipple shadow, and 0.89 for the breast contour shape. The proposed algorithms work fast and reliably, which makes them adequate for online evaluation of image quality during acquisition. The presented concept may be extended to further image modalities and quality aspects.}, + file = {Schw16.pdf:pdf\\Schw16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27158633}, +} + +@inproceedings{Seab10, + author = {Seabra, Jose and Sanches, Joao and Ciompi, Francesco and Radeva, Petia}, + title = {Ultrasonographic plaque characterization using a rayleigh mixture model}, + booktitle = ISBI, + year = {2010}, + pages = {1--4}, + url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5490428}, + abstract = {A correct modelling of tissue morphology is determinant for the identification of vulnerable plaques. This paper aims at describing the plaque composition by means of a Rayleigh Mixture Model applied to ultrasonic data. The effectiveness of using a mixture of distributions is established through synthetic and real ultrasonic data samples. Furthermore, the proposed mixture model is used in a plaque classification problem in Intravascular Ultrasound (IVUS) images of coronary plaques. A classifier tested on a set of 67 in-vitro plaques, yields an overall accuracy of 86% and sensitivity of 92%, 94% and 82%, for fibrotic, calcified and lipidic tissues, respectively. These results strongly suggest that different plaques types can be distinguished by means of the coefficients and Rayleigh parameters of the mixture distribution.}, + file = {Seab10.pdf:pdf\\Seab10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + gsid = {6853869133266746055}, + gscites = {11}, +} + +@article{Seab11, + author = {Seabra, Jose and Ciompi, Francesco and Pujol, Oriol and Mauri, Josepa and Radeva, Petia and Sanches, Joao}, + title = {Rayleigh mixture model for plaque characterization in intravascular ultrasound}, + journal = TBME, + year = {2011}, + volume = {58}, + pages = {1314--1324}, + url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5688445}, + abstract = {Vulnerable plaques are the major cause of carotid and coronary vascular problems, such as heart attack or stroke. A correct modelling of plaque echo-morphology and composition can help the identification of such lesions. The Rayleigh distribution is widely used to describe (nearly) homogeneous areas in ultrasound images. Since plaques may contain tissues with heterogeneous regions more complex distributions depending on multiple parameters are usually needed, such as Rice, K or Nakagami distributions. In such cases, the problem formulation becomes more complex and the optimization procedure to estimate the plaque echo-morphology is more difficult. Here we propose to model the tissue echo-morphology by means of a mixture of Rayleigh distributions, known as Rayleigh Mixture Model. The problem formulation is still simple but its ability to describe complex textural patterns is very powerful. In this paper we present a method for the automatic estimation of the RMM mixture parameters by means of the Expectation Maximization algorithm which aims at characterizing tissue echomorphology in ultrasound. The performance of the proposed model is evaluated with a database of in-vitro IVUS cases. We show that the mixture coefficients and Rayleigh parameters explicitly derived from the mixture model are able to accurately describe different plaque types and to significantly improve the characterization performance of an already existing methodology.}, + file = {Seab11.pdf:pdf\\Seab11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + publisher = {IEEE}, + month = {5}, + gsid = {9249766986013401268}, + gscites = {80}, +} + +@article{Sech20, + author = {Ioannis Sechopoulos and Jonas Teuwen and Ritse Mann}, + title = {Artificial Intelligence for Breast Cancer Detection in Mammography: state of the art}, + doi = {10.1016/j.semcancer.2020.06.002}, + abstract = {Screening for breast cancer with mammography has been introduced in various countries over the last 30 years, initially using analog screen-film-based systems and, over the last 20 years, transitioning to the use of fully digital systems. With the introduction of digitization, the computer interpretation of images has been a subject of intense interest, resulting in the introduction of computer-aided detection (CADe) and diagnosis (CADx) algorithms in the early 2000's. Although they were introduced with high expectations, the potential improvement in the clinical realm failed to materialize, mostly due to the high number of false positive marks per analyzed image. + + In the last five years, the artificial intelligence (AI) revolution in computing, driven mostly by deep learning and convolutional neural networks, has also pervaded the field of automated breast cancer detection in digital mammography and digital breast tomosynthesis. Research in this area first involved comparison of its capabilities to that of conventional CADe/CADx methods, which quickly demonstrated the potential of this new technology. In the last couple of years, more mature and some commercial products have been developed, and studies of their performance compared to that of experienced breast radiologists are showing that these algorithms are on par with human-performance levels in retrospective data sets. Although additional studies, especially prospective evaluations performed in the real screening environment, are needed, it is becoming clear that AI will have an important role in the future breast cancer screening realm. Exactly how this new player will shape this field remains to be determined, but recent studies are already evaluating different options for implementation of this technology. + + The aim of this review is to provide an overview of the basic concepts and developments in the field AI for breast cancer detection in digital mammography and digital breast tomosynthesis. The pitfalls of conventional methods, and how these are, for the most part, avoided by this new technology, will be discussed. Importantly, studies that have evaluated the current capabilities of AI and proposals for how these capabilities should be leveraged in the clinical realm will be reviewed, while the questions that need to be answered before this vision becomes a reality are posed.}, + file = {Sech20.pdf:pdf/Sech20.pdf:PDF}, + journal = {Seminars in Cancer Biology}, + optnote = {DIAG, INPRESS, RADIOLOGY}, + pmid = {32531273}, + year = {2020}, + month = {6}, + ss_id = {4cfea5ab26c5824d616fa8e2562c0666ecee55aa}, + all_ss_ids = {['4cfea5ab26c5824d616fa8e2562c0666ecee55aa']}, + gscites = {95}, +} + +@article{Seku20, + author = {Anjany Sekuboyina and Malek E. Husseini and Amirhossein Bayat and Maximilian Loffler and Hans Liebl and Hongwei Li and Giles Tetteh and Jan Kukacka and Christian Payer and Darko Stern and Martin Urschler and Maodong Chen and Dalong Cheng and Nikolas Lessmann and Yujin Hu and Tianfu Wang and Dong Yang and Daguang Xu and Felix Ambellan and Tamaz Amiranashvili and Moritz Ehlke and Hans Lamecker and Sebastian Lehnert and Marilia Lirio and Nicolas Perez de Olaguer and Heiko Ramm and Manish Sahu and Alexander Tack and Stefan Zachow and Tao Jiang and Xinjun Ma and Christoph Angerman and Xin Wang and Kevin Brown and Alexandre Kirszenberg and Elodie Puybareau and Di Chen and Yiwei Bai and Brandon H. Rapazzo and Timyoas Yeah and Amber Zhang and Shangliang Xu and Feng Hou and Zhiqiang He and Chan Zeng and Zheng Xiangshang and Xu Liming and Tucker J. Netherton and Raymond P. Mumme and Laurence E. Court and Zixun Huang and Chenhang He and Li-Wen Wang and Sai Ho Ling and Le Duy Huynh and Nicolas Boutry and Roman Jakubicek and Jiri Chmelik and Supriti Mulay and Mohanasankar Sivaprakasam and Johannes C. Paetzold and Suprosanna Shit and Ivan Ezhov and Benedikt Wiestler and Ben Glocker and Alexander Valentinitsch and Markus Rempfler and Bjorn H. Menze and Jan S. Kirschke}, + title = {Ver{S}e: A Vertebrae labelling and segmentation benchmark for multi-detector {CT} images}, + journal = MIA, + year = {2021}, + volume = {73}, + pages = {102166}, + doi = {10.1016/j.media.2021.102166}, + abstract = {Vertebral labelling and segmentation are two fundamental tasks in an automated spine processing pipeline. Reliable and accurate processing of spine images is expected to benefit clinical decision support systems for diagnosis, surgery planning, and population-based analysis of spine and bone health. However, designing automated algorithms for spine processing is challenging predominantly due to considerable variations in anatomy and acquisition protocols and due to a severe shortage of publicly available data. Addressing these limitations, the Large Scale Vertebrae Segmentation Challenge (VerSe) was organised in conjunction with the International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) in 2019 and 2020, with a call for algorithms tackling the labelling and segmentation of vertebrae. Two datasets containing a total of 374 multi-detector CT scans from 355 patients were prepared and 4505 vertebrae have individually been annotated at voxel level by a human-machine hybrid algorithm (https://osf.io/nqjyw/, https://osf.io/t98fz/). A total of 25 algorithms were benchmarked on these datasets. In this work, we present the results of this evaluation and further investigate the performance variation at the vertebra level, scan level, and different fields of view. We also evaluate the generalisability of the approaches to an implicit domain shift in data by evaluating the top-performing algorithms of one challenge iteration on data from the other iteration. The principal takeaway from VerSe: the performance of an algorithm in labelling and segmenting a spine scan hinges on its ability to correctly identify vertebrae in cases of rare anatomical variations. The VerSe content and code can be accessed at: https://github.com/anjany/verse}, + file = {Seku20.pdf:pdf/Seku20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {4525652165222884760}, + gscites = {113}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/238698}, + ss_id = {4882a6d4e40e048921d04570bec76a217b35b25f}, + all_ss_ids = {['4882a6d4e40e048921d04570bec76a217b35b25f']}, +} + +@article{Selv06, + author = {S. E. Selvan and C. C. Xavier and N. Karssemeijer and J. Sequeira and R. A. Cherian and B. Y. Dhala}, + title = {Parameter estimation in stochastic mammogram model by heuristic optimization techniques}, + journal = TITB, + year = {2006}, + volume = {10}, + number = {4}, + pages = {685--695}, + doi = {10.1109/TITB.2006.874197}, + abstract = {{T}he appearance of disproportionately large amounts of high-density breast parenchyma in mammograms has been found to be a strong indicator of the risk of developing breast cancer. {H}ence, the breast density model is popular for risk estimation or for monitoring breast density change in prevention or intervention programs. {H}owever, the efficiency of such a stochastic model depends on the accuracy of estimation of the model's parameter set. {W}e propose a new approach-heuristic optimization-to estimate more accurately the model parameter set as compared to the conventional and popular expectation-maximization ({EM}) algorithm. {A}fter initial segmentation of a given mammogram, the finite generalized {G}aussian mixture ({FGGM}) model is constructed by computing the statistics associated with different image regions. {T}he model parameter set thus obtained is estimated by particle swarm optimization ({PSO}) and evolutionary programming ({EP}) techniques, where the objective function to be minimized is the relative entropy between the image histogram and the estimated density distributions. {W}hen our heuristic approach was applied to different categories of mammograms from the {M}ini-{MIAS} database, it yielded lower floor of estimation error in 109 out of 112 cases (97.3%), and 101 out of 102 cases (99.0%), for the number of image regions being five and eight, respectively, with the added advantage of faster convergence rate, when compared to the {EM} approach. {B}esides, the estimated density model preserves the number of regions specified by the information-theoretic criteria in all the test cases, and the assessment of the segmentation results by radiologists is promising.}, + file = {Selv06.pdf:pdf/Selv06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {17044402}, + month = {10}, + gsid = {17201149467712925424}, + gscites = {31}, + ss_id = {49c24eee5a4beae2052e774c275e3a5a2411924e}, + all_ss_ids = {['49c24eee5a4beae2052e774c275e3a5a2411924e']}, +} + +@inproceedings{Seti13, + author = {Arnaud A. A. Setio and Fons van der Sommen and Svitlana Zinger and Erik J. Schoon and Peter H. N. de With}, + title = {Evaluation and Comparison of Textural Feature Representation for the Detection of Early Stage Cancer in Endoscopy}, + booktitle = {Proc. VISAPP}, + year = {2013}, + pages = {238-243}, + abstract = {Esophageal cancer is the fastest rising type of cancer in the Western world. The novel technology of High Definition (HD) endoscopy enables physicians to find texture patterns related to early cancer. It encourages the development of a Computer-Aided Decision (CAD) system in order to help physicians with faster identification of early cancer and decrease the miss rate. However, an appropriate texture feature extraction, which is needed for classification, has not been studied yet. In this paper, we compare several techniques for texture feature extraction, including co-occurrence matrix features, LBP and Gabor features and evaluate their performance in detecting early stage cancer in HD endoscopic images. In order to exploit more image characteristics, we introduce an efficient combination of the texture and color features. Furthermore, we add a specific preprocessing step designed for endoscopy images, which improves the classification accuracy. After reducing the feature dimensionality using Principal Component Analysis (PCA), we classify selected features with a Support Vector Machine (SVM). The experimental results validated by an expert gastroenterologist show that the proposed feature extraction is promising and reaches a classification accuracy up to 96.48%.}, + file = {Seti13.pdf:pdf\\Seti13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Seti14, + author = {Arnaud Arindra Adiyoso Setio and Jaap Gelderblom and Colin Jacobs and Bram van Ginneken}, + title = {Automatic Detection of Large Pulmonary Nodules in Thoracic {CT} Images}, + booktitle = RSNA, + year = {2014}, + abstract = {PURPOSE: Existing computer-aided detection (CAD) systems excel at finding small nodules but often fail to detect the much rarer larger nodules. However, these large nodules are highly suspicious for being cancer. Therefore, we developed a CAD system specifically designed to detect large nodules. METHOD AND MATERIALS: Data from the publicly available LIDC/IDRI database was used. CT scans with section thickness over 2.5 mm were excluded. We selected all scans in which at least one of the four radiologists who read each case, annotated a solid nodule larger than 10 mm. The detection pipeline is initiated by a three-dimensional lung segmentation algorithm. Large nodules attached to the pleural wall are often excluded in this segmentation. Therefore, a rolling-ball algorithm was applied to the lung segmentation to include these nodules. The detection of nodule candidates was performed using a cascade of double threshold on intensity, followed by morphological operation. Connected component analysis was subsequently applied to get initial nodule candidates. The segmentation of the initial candidates was refined using a previously published nodule segmentation method. For each candidate, a total of nine intensity and shape features were extracted. A Support Vector Machine (SVM) classifier with a radial basis function was used to classify nodule candidates and performance was evaluated using a 10-fold cross-validation scheme. CAD marks on nodules annotated by all four radiologists were counted as true positives. CAD marks on nodules annotated by less than four radiologists were ignored in the analysis. Other CAD marks were considered false positives. RESULTS: In 271 scans, 208 large nodules were annotated by all four radiologists. The candidate detection stage detected 98.6% (205/208) of the large nodules, with an average of 44.6 false positives per scan. After classification, the CAD system achieved a sensitivity of 95.7% (199/208) and 84.6% (176/208) at 7.5 and 1.0 false positives per scan, respectively. CONCLUSION: A dedicated CAD system for large pulmonary nodules can identify the vast majority of highly suspicious lesions in thoracic CT scans with a small number of false positives. CLINICAL RELEVANCE/APPLICATION: As computers start to gain a more important role in CT lung cancer screening, it is vital that CAD reaches a high sensitivity in the detection of large nodules, which are likely to be cancer.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Seti15, + author = {Arnaud A. A. Setio and Colin Jacobs and Francesco Ciompi and Sarah J. van Riel and Mathilde Marie Winkler Wille and Asger Dirksen and Eva M. van Rikxoort and Bram van Ginneken}, + title = {Computer-aided detection of lung cancer: combining pulmonary nodule detection systems with a tumor risk prediction model}, + booktitle = MI, + year = {2015}, + volume = {9414}, + series = SPIE, + doi = {10.1117/12.2080955}, + abstract = {Computer-Aided Detection (CAD) has been shown to be a promising tool for automatic detection of pulmonary nodules from computed tomography (CT) images. However, the vast majority of detected nodules are benign and do not require any treatment. For effective implementation of lung cancer screening programs, accurate identification of malignant nodules is the key. We investigate strategies to improve the performance of a CAD system in detecting nodules with a high probability of being cancers. Two strategies were proposed: (1) combining CAD detections with a recently published lung cancer risk prediction model and (2) the combination of multiple CAD systems. First, CAD systems were used to detect the nodules. Each CAD system produces markers with a certain degree of suspicion. Next, the malignancy probability was automatically computed for each marker, given nodule characteristics measured by the CAD system. Last, CAD degree of suspicion and malignancy probability were combined using the product rule. We evaluated the method using 62 nodules which were proven to be malignant cancers, from 180 scans of the Danish Lung Cancer Screening Trial. The malignant nodules were considered as positive samples, while all other findings were considered negative. Using a product rule, the best proposed system achieved an improvement in sensitivity, compared to the best individual CAD system, from 41.9% to 72.6% at 2 false positives (FPs)/scan and from 56.5% to 88.7% at 8 FPs/scan. Our experiment shows that combining a nodule malignancy probability with multiple CAD systems can increase the performance of computerized detection of lung cancer.}, + file = {Seti15.pdf:pdf\\Seti15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {94141O}, + month = {3}, + gsid = {4260948689623411701}, + gscites = {4}, + ss_id = {f59a55d1e7cb49f8b2062df48ecd267b24191aa8}, + all_ss_ids = {['f59a55d1e7cb49f8b2062df48ecd267b24191aa8']}, +} + +@article{Seti15a, + author = {Arnaud A. A. Setio and Colin Jacobs and Jaap Gelderblom and Bram van Ginneken}, + title = {Automatic detection of large pulmonary solid nodules in thoracic {CT} images}, + journal = MP, + year = {2015}, + volume = {42}, + pages = {5642--5653}, + doi = {10.1118/1.4929562}, + abstract = {Current computer-aided detection (CAD) systems for pulmonary nodules in computed tomography (CT) scans have a good performance for relatively small nodules, but often fail to detect the much rarer larger nodules, which are more likely to be cancerous. We present a novel CAD system specifically designed to detect solid nodules larger than 10 mm.The proposed detection pipeline is initiated by a three-dimensional lung segmentation algorithm optimized to include large nodules attached to the pleural wall via morphological processing. An additional preprocessing is used to mask out structures outside the pleural space to ensure that pleural and parenchymal nodules have a similar appearance. Next, nodule candidates are obtained via a multistage process of thresholding and morphological operations, to detect both larger and smaller candidates. After segmenting each candidate, a set of 24 features based on intensity, shape, blobness, and spatial context are computed. A radial basis support vector machine (SVM) classifier was used to classify nodule candidates, and performance was evaluated using ten-fold cross-validation on the full publicly available lung image database consortium database.The proposed CAD system reaches a sensitivity of 98.3\% (234/238) and 94.1\% (224/238) large nodules at an average of 4.0 and 1.0 false positives/scan, respectively.The authors conclude that the proposed dedicated CAD system for large pulmonary nodules can identify the vast majority of highly suspicious lesions in thoracic CT scans with a small number of false positives.}, + file = {Seti15a.pdf:pdf\\Seti15a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + pmid = {26429238}, + publisher = {American Association of Physicists in Medicine}, + month = {9}, + gsid = {14121163271329355688}, + gscites = {138}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/154609}, + ss_id = {c973f1bff6304e0e6d4656d1379d1414929b782f}, + all_ss_ids = {['c973f1bff6304e0e6d4656d1379d1414929b782f']}, +} + +@article{Seti16, + author = {A. A. A. Setio and F. Ciompi and G. Litjens and P. Gerke and C. Jacobs and S. van Riel and M. Winkler Wille and M. Naqibullah and C. I. S\'{a}nchez and B. van Ginneken}, + title = {Pulmonary nodule detection in {CT} images: false positive reduction using multi-view convolutional networks}, + journal = TMI, + year = {2016}, + volume = {35}, + number = {5}, + pages = {1160-1169}, + doi = {10.1109/TMI.2016.2536809}, + abstract = {We propose a novel Computer-Aided Detection (CAD) system for pulmonary nodules using multi-view convolutional networks (ConvNets), for which discriminative features are automatically learnt from the training data. The network is fed with nodule candidates obtained by combining three candidate detectors specifically designed for solid, subsolid, and large nodules. For each candidate, a set of 2-D patches from differently oriented planes is extracted. The proposed architecture comprises multiple streams of 2-D ConvNets, for which the outputs are combined using a dedicated fusion method to get the final classification. Data augmentation and dropout are applied to avoid overfitting. On 888 scans of the publicly available LIDCIDRI dataset, our method reaches high detection sensitivities of 85.4% and 90.1% at 1 and 4 false positives per scan, respectively. An additional evaluation on independent datasets from the ANODE09 challenge and DLCST is performed. We showed that the proposed multi-view ConvNets is highly suited to be used for false positive reduction of a CAD system.}, + file = {Seti16.pdf:pdf\\Seti16.pdf:PDF}, + optnote = {DIAG}, + pmid = {26955024}, + month = {5}, + gsid = {8158189024082045415}, + gscites = {972}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/164462}, + ss_id = {06c8b4c1dd0c045bc6a08d0062c5042b5588d55b}, + all_ss_ids = {['06c8b4c1dd0c045bc6a08d0062c5042b5588d55b']}, +} + +@article{Seti17, + author = {Setio, Arnaud Arindra Adiyoso and Traverso, Alberto and de Bel, Thomas and Berens, Moira S N and Bogaard, Cas van den and Cerello, Piergiorgio and Chen, Hao and Dou, Qi and Fantacci, Maria Evelina and Geurts, Bram and Gugten, Robbert van der and Heng, Pheng Ann and Jansen, Bart and de Kaste, Michael M J and Kotov, Valentin and Lin, Jack Yu-Hung and Manders, Jeroen T M C and Sonora-Mengana, Alexander and Garcia-Naranjo, Juan Carlos and Papavasileiou, Evgenia and Prokop, Mathias and Saletta, Marco and Schaefer-Prokop, Cornelia M and Scholten, Ernst T and Scholten, Luuk and Snoeren, Miranda M and Torres, Ernesto Lopez and Vandemeulebroucke, Jef and Walasek, Nicole and Zuidhof, Guido C A and Ginneken, Bram van and Jacobs, Colin}, + title = {Validation, comparison, and combination of algorithms for automatic detection of pulmonary nodules in computed tomography images: The {LUNA16} challenge}, + journal = MIA, + year = {2017}, + volume = {42}, + pages = {1-13}, + doi = {10.1016/j.media.2017.06.015}, + url = {https://arxiv.org/abs/1612.08012}, + abstract = {Automatic detection of pulmonary nodules in thoracic computed tomography (CT) scans has been an active area of research for the last two decades. However, there have only been few studies that provide a comparative performance evaluation of different systems on a common database. We have therefore set up the LUNA16 challenge, an objective evaluation framework for automatic nodule detection algorithms using the largest publicly available reference database of chest CT scans, the LIDC-IDRI data set. In LUNA16, participants develop their algorithm and upload their predictions on 888 CT scans in one of the two tracks: 1) the complete nodule detection track where a complete CAD system should be developed, or 2) the false positive reduction track where a provided set of nodule candidates should be classified. This paper describes the setup of LUNA16 and presents the results of the challenge so far. Moreover, the impact of combining individual systems on the detection performance was also investigated. It was observed that the leading solutions employed convolutional networks and used the provided set of nodule candidates. The combination of these solutions achieved an excellent sensitivity of over 95% at fewer than 1.0 false positives per scan. This highlights the potential of combining algorithms to improve the detection performance. Our observer study with four expert readers has shown that the best system detects nodules that were missed by expert readers who originally annotated the LIDC-IDRI data. We released this set of additional nodules for further development of CAD systems.}, + file = {Seti17.pdf:pdf\\Seti17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28732268}, + publisher = {Elsevier}, + month = {12}, + gsid = {2084538205545992787}, + gscites = {774}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/179531}, + ss_id = {7975bfe13445f81edddd6e101d7d469cd20cbd89}, + all_ss_ids = {['7975bfe13445f81edddd6e101d7d469cd20cbd89']}, +} + +@phdthesis{Seti18, + author = {Arnaud Arindra Adiyoso Setio}, + title = {Computer-aided diagnosis in thoracic CT scans for lung cancer screening}, + url = {http://hdl.handle.net/2066/191619}, + abstract = {Lung cancer is the leading cause of cancer death worldwide. With an approximated 155,870 deaths, lung cancer accounts for 1 in 4 mortalities caused by cancer. The 5-year survival rate of subjects diagnosed with lung cancer is only 18.1%. Only when lung cancers are diagnosed in an early stage, treatment options are better and the 5-year survival is 55%. To reduce the high mortality rate, there is a strong need to detect subjects with lung cancer early.}, + copromotor = {C. Jacobs}, + file = {:pdf/seti18.pdf:PDF}, + optnote = {DIAG}, + promotor = {B. van Ginneken and M. Prokop}, + school = {Radboud University, Nijmegen}, + year = {2018}, + journal = {PhD thesis}, +} + +@mastersthesis{Sheikh22a, + author = {Sheikh Adilina and Anindo Saha and Henkjan Huisman}, + title = {Domain Generalization for Prostate Cancer Detection in MRI}, + abstract = {Modern computer-aided detection/diagnosis (CAD) based on deep learning algorithms achieve high results in detection of prostate cancer in magnetic resonance imaging (MRI). However, the performance of these algorithms drop when the testing cases are taken from a different domain (i.e. samples acquired using a different MRI scanner). In this research, we have investigated the performances of the state-of-the-art domain generalization techniques beginning from the simple solutions like histogram matching to the more advanced deep learning based models like CycleGAN. We do not introduce any new novel method in this study rather we have reapplied the current state-of-the-art techniques and compared the performances. From our experimental results, we have deduced that simple solutions are not adequate to capture the complexity of medical images and hence fail to obtain domain generalization. We have to rely on advanced techniques that take into account not just the intensity information but also the spatial information to achieve our goal.}, + url = {http://eia.udg.edu/~aoliver/maiaDocs/bookMaia5th_small2.pdf}, + optnote = {DIAG, RADIOLOGY}, + school = {University of Girona, UNICAS, University of Bourgogne, Radboud University Medical Center}, + year = {2022}, + journal = {Master thesis}, +} + +@article{Sher22, + author = {Sherman, Mark E. and de Bel, Thomas and Heckman, Michael G. and White, Launia J. and Ogony, Joshua and Stallings-Mann, Melody and Hilton, Tracy and Degnim, Amy C. and Vierkant, Robert A. and Hoskin, Tanya and Jensen, Matthew R. and Pacheco-Spann, Laura and Henry, Jill E. and Storniolo, Anna Maria and Carter, Jodi M. and Winham, Stacey J. and Radisky, Derek C. and van der Laak, Jeroen}, + title = {Serum hormone levels and normal breast histology among premenopausal women}, + doi = {10.1007/s10549-022-06600-9}, + year = {2022}, + abstract = {Breast terminal duct lobular units (TDLUs) are the main source of breast cancer (BC) precursors. Higher serum concentrations of hormones and growth factors have been linked to increased TDLU numbers and to elevated BC risk, with variable effects by menopausal status. We assessed associations of circulating factors with breast histology among premenopausal women using artificial intelligence (AI) and preliminarily tested whether parity modifies associations. Pathology AI analysis was performed on 316 digital images of H&E-stained sections of normal breast tissues from Komen Tissue Bank donors ages <= 45 years to assess 11 quantitative metrics. Associations of circulating factors with AI metrics were assessed using regression analyses, with inclusion of interaction terms to assess effect modification. Higher prolactin levels were related to larger TDLU area (p < 0.001) and increased presence of adipose tissue proximate to TDLUs (p < 0.001), with less significant positive associations for acini counts (p = 0.012), dilated acini (p = 0.043), capillary area (p = 0.014), epithelial area (p = 0.007), and mononuclear cell counts (p = 0.017). Testosterone levels were associated with increased TDLU counts (p < 0.001), irrespective of parity, but associations differed by adipose tissue content. AI data for TDLU counts generally agreed with prior visual assessments. Among premenopausal women, serum hormone levels linked to BC risk were also associated with quantitative features of normal breast tissue. These relationships were suggestively modified by parity status and tissue composition. We conclude that the microanatomic features of normal breast tissue may represent a marker of BC risk.}, + url = {http://dx.doi.org/10.1007/s10549-022-06600-9}, + file = {Sher22.pdf:pdf\Sher22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Breast Cancer Research and Treatment}, + citation-count = {0}, + automatic = {yes}, + pages = {149-158}, + volume = {194}, +} + +@article{Sido23, + author = {Sidorenkov, Grigory and Stadhouders, Ralph and Jacobs, Colin and Mohamed Hoesein, Firdaus A. A. and Gietema, Hester A. and Nackaerts, Kristiaan and Saghir, Zaigham and Heuvelmans, Marjolein A. and Donker, Hylke C. and Aerts, Joachim G. and Vermeulen, Roel and Uitterlinden, Andre and Lenters, Virissa and van Rooij, Jeroen and Schaefer-Prokop, Cornelia and Groen, Harry J. M. and de Jong, Pim A. and Cornelissen, Robin and Prokop, Mathias and de Bock, Geertruida H. and Vliegenthart, Rozemarijn}, + title = {Multi-source data approach for personalized outcome prediction in lung cancer screening: update from the NELSON trial.}, + doi = {10.1007/s10654-023-00975-9}, + abstract = {Trials show that low-dose computed tomography (CT) lung cancer screening in long-term (ex-)smokers reduces lung cancer mortality. However, many individuals were exposed to unnecessary diagnostic procedures. This project aims to improve the efficiency of lung cancer screening by identifying high-risk participants, and improving risk discrimination for nodules. This study is an extension of the Dutch-Belgian Randomized Lung Cancer Screening Trial, with a focus on personalized outcome prediction (NELSON-POP). New data will be added on genetics, air pollution, malignancy risk for lung nodules, and CT biomarkers beyond lung nodules (emphysema, coronary calcification, bone density, vertebral height and body composition). The roles of polygenic risk scores and air pollution in screen-detected lung cancer diagnosis and survival will be established. The association between the AI-based nodule malignancy score and lung cancer will be evaluated at baseline and incident screening rounds. The association of chest CT imaging biomarkers with outcomes will be established. Based on these results, multisource prediction models for pre-screening and post-baseline-screening participant selection and nodule management will be developed. The new models will be externally validated. We hypothesize that we can identify 15-20% participants with low-risk of lung cancer or short life expectancy and thus prevent ~140,000 Dutch individuals from being screened unnecessarily. We hypothesize that our models will improve the specificity of nodule management by 10% without loss of sensitivity as compared to assessment of nodule size/growth alone, and reduce unnecessary work-up by 40-50%.}, + file = {Sido23.pdf:pdf\\Sido23.pdf:PDF}, + journal = {European journal of epidemiology}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36943671}, + year = {2023}, + volume = {38}, + number = {4}, + pages = {445--454}, + ss_id = {42f306e5fe8d8a22bc805f0f98e3fd2c292f06ac}, + all_ss_ids = {['42f306e5fe8d8a22bc805f0f98e3fd2c292f06ac']}, + gscites = {0}, +} + +@article{Sier20, + author = {M. M. Sieren and F. Brenne and A. Hering and H. Kienapfel and N. Gebauer and T. H. Oechtering and A. F\"{u}rschke and F. Wegner and E. Stahlberg and S. Heldmann and J. Barkhausen and A. Frydrychowicz}, + title = {Rapid study assessment in follow-up whole-body computed tomography in patients with multiple myeloma using a dedicated bone subtraction software}, + journal = ER, + year = {2020}, + doi = {10.1007/s00330-019-06631-9}, + volume = 30, + pages = {3198-3209}, + abstract = {Objectives + The diagnostic reading of follow-up low-dose whole-body computed tomography (WBCT) examinations in patients with multiple myeloma (MM) is a demanding process. This study aimed to evaluate the diagnostic accuracy and benefit of a novel software program providing rapid-subtraction maps for bone lesion change detection. + + Methods + Sixty patients (66 years +- 10 years) receiving 120 WBCT examinations for follow-up evaluation of MM bone disease were identified from our imaging archive. The median follow-up time was 292 days (range 200-641 days). Subtraction maps were calculated from 2-mm CT images using a nonlinear deformation algorithm. Reading time, correctly assessed lesions, and disease classification were compared to a standard reading software program. De novo clinical reading by a senior radiologist served as the reference standard. Statistics included Wilcoxon rank-sum test, Cohen's kappa coefficient, and calculation of sensitivity, specificity, positive/negative predictive value, and accuracy. + + Results + Calculation time for subtraction maps was 84 s +- 24 s. Both readers reported exams faster using subtraction maps (reader A, 438 s +- 133 s; reader B, 1049 s +- 438 s) compared to PACS software (reader A, 534 s +- 156 s; reader B, 1486 s +- 587 s; p < 0.01). The course of disease was correctly classified by both methods in all patients. Sensitivity for lesion detection in subtraction maps/conventional reading was 92%/80% for reader A and 88%/76% for reader B. Specificity was 98%/100% for reader A and 95%/96% for reader B. + + Conclusion + A software program for the rapid-subtraction map calculation of follow-up WBCT scans has been successfully tested and seems suited for application in clinical routine. Subtraction maps significantly facilitated reading of WBCTs by reducing reading time and increasing sensitivity.}, + file = {Sier20.pdf:pdf\\Sier20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {32048038}, + month = {2}, +} + +@conference{Silv17, + author = {M. Silva and G. Capretti and N. Sverzellati and C. Jacobs and F. Ciompi and B. van Ginneken and C.M. Schaefer-Prokop and A. Marchian\`o and U. Pastorino}, + title = {Subsolid and part-solid nodules in lung cancer screening: comparison between visual and computer-aided detection}, + booktitle = ECR, + year = {2017}, + abstract = {Purpose: To compare visual detection and computer aided diagnosis (CAD) for detection of non-solid nodules (NSN) and part-solid nodules (PSN), in a lung cancer screening trial. Methods and Materials: Baseline low-dose computed tomography (LDCT) of 2303 subjects were assessed by 2 independent operators: a) visual detection (VD); b) CAD software (CIRRUS Lung Screening). CAD was run also on first and second incidence round. Rate of agreement was calculated by weighted k test. Sensitivity and negative predictive value (NPV) were calculated according to cumulative number of subjects with detected nodules (VD and CAD). LDCT features were compared between CAD-only and VD-only detected nodules. Results: Nodules were detected in 215/2303 subjects (dominant nodule: 171/215 NSN and 44/215 PSN), notably 149 were CAD-only detected (113 NSN and 36 PSN), 27 were VD-only detected (25 NSN and 2 PSN), and 39 were CAD and VD detected (33 NSN and 6 PSN). The agreement was fair (weighted k=0.276). Sensitivity and NPV for CAD 87.4\% and 98.7\%, and for VD 30.7\% and 93.3\%. Automatic and manual caliper were similar for the assessment of maximum diameter (p=0.111). CAD-only and VD-only detected nodules showed similar nodule diameter (p=0.727) and proportion of PSN (p=0.073). Volume of CAD-only detected nodules was significantly smaller (p=0.019). Among the 27 VD-only detected nodules, 11 NSN were subsequently detected by CAD at the incidence rounds. Conclusion: Detection of NSN and PSN showed only fair agreement between VD and CAD. Combined assessment of LDCT by both VD and CAD is needed to achieve optimal sensitivity.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Silv17a, + author = {Mario Silva and Giovanni Capretti and Nicola Sverzellati and Colin Jacobs and Francesco Ciompi and Bram van Ginneken and Cornelia M Schaefer-Prokop and Mathias Prokop and Alfonso Marchiano and Ugo Pastorino}, + title = {Non-solid and Part-solid Nodules: Comparison Between Visual and Computer Aided Detection}, + booktitle = {World Congress of Thoracic Imaging}, + year = {2017}, + abstract = {Purpose: To compare visual detection and computer aided diagnosis (CAD) for detection of non-solid nodules (NSN) and part-solid nodules (PSN), in a lung cancer screening trial. Material and Method: Baseline low-dose computed tomography (LDCT) of 2303 subjects were assessed by 2 independent operators: a) visual detection (VD); b) CAD software (CIRRUS Lung Screening). CAD was run also on first and second incidence round. The rate of agreement was calculated by weighted k test. Sensitivity and negative predictive value (NPV) were calculated according to the cumulative number of subjects with detected nodules (VD and CAD). LDCT features were compared between CAD-only and VD-only detected nodules. Results: Nodules were detected in 215/2303 subjects (dominant nodule: 171/215 NSN and 44/215 PSN), notably 149 were CAD-only detected (113 NSN and 36 PSN), 27 were VD-only detected (25 NSN and 2 PSN), and 39 were CAD and VD detected (33 NSN and 6 PSN). The agreement was fair (weighted k=0.276 95\%CI 0.203-0.350). Sensitivity and NPV for CAD 87.4\% and 98.7\%, and for VD 30.7\% and 93.3\%. Automatic and manual caliper were similar for the assessment of maximum diameter (p=0.111). CAD-only and VD-only detected nodules showed similar nodule diameter (p=0.727) and proportion of PSN (p=0.073). Volume of CAD-only detected nodules was significantly smaller (p=0.019). Among the 27 VD-only detected nodules, 11 NSN were subsequently detected by CAD at the incidence rounds. Conclusion: The detection of NSN and PSN showed only fair agreement between VD and CAD. Combined assessment of LDCT by both VD and CAD is needed to achieve the optimal sensitivity.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Silv18, + author = {Silva, Mario and Schaefer-Prokop, Cornelia M and Jacobs, Colin and Capretti, Giovanni and Ciompi, Francesco and van Ginneken, Bram and Pastorino, Ugo and Sverzellati, Nicola}, + title = {Detection of Subsolid Nodules in Lung Cancer Screening: Complementary Sensitivity of Visual Reading and Computer-Aided Diagnosis}, + journal = IR, + year = {2018}, + volume = {53}, + number = {8}, + month = {3}, + pages = {441--449}, + doi = {10.1097/RLI.0000000000000464}, + abstract = {The aim of this study was to compare computer-aided diagnosis (CAD) and visual reading for the detection of subsolid nodules (SSNs) in volumetrl measuremic low-dose computed tomography (LDCT) for lung cancer screening. Prospective visual detection (VD) and manuaent of SSN were performed in the 2303 baseline volumetric LDCTs of the Multicenter Italian Lung Detection trial. Baseline and 2- and 4-year LDCTs underwent retrospective CAD analysis, subsequently reviewed by 2 experienced thoracic radiologists. The reference standard was defined by the cumulative number of SSNs detected by any reading method between VD and CAD. The number of false-positive CAD marks per scan (FP/scan) was calculated. The positive predictive value of CAD was quantified per nodule (PPV) and per screenee (PPV). The sensitivity and negative predictive value were compared between CAD and VD. The longitudinal 3-time-point sensitivity of CAD was calculated in the subgroup of persistent SSNs seen by VD (ratio between the prevalent SSNs detected by CAD through 3 time points and the total number of persistent prevalent SSNs detected by VD) to test the sensitivity of iterated CAD analysis during a screening program. Semiautomatic characteristics (diameter, volume, and mass; both for whole nodule and solid component) were compared between SSN detected CAD-only or VD-only to investigate whether either reading method could suffer from specific sensitivity weakness related to SSN features. Semiautomatic and manual diameters were compared using Spearman ? correlation and Bland-Altman plot. Computer-aided diagnosis and VD detected a total of 194 SSNs in 6.7% (155/2,303) of screenees at baseline LDCT. The CAD showed mean FP/scan of 0.26 (604/2,303); PPV 22.5% (175/779) for any SSN, with 54.4% (37/68) for PSN and 19.4% for NSN (138/711; P < 0.001); PPV 25.6% (137/536). The sensitivity of CAD was superior to that of VD (88.4% and 34.2%, P < 0.001), as well as negative predictive value (99.2% and 95.5%, P < 0.001). The longitudinal 3-time-point sensitivity of CAD was 87.5% (42/48). There was no influence of semiautomatic characteristics on the performance of either reading method. The diameter of the solid component in PSN was larger by CAD compared with manual measurement. At baseline, CAD detected 3 of 4 SSNs, which were first overlooked by VD and subsequently evolved to lung cancer. Computer-aided diagnosis and VD as concurrent reading methods showed complementary performance, with CAD having a higher sensitivity, especially for PSN, but requiring visual confirmation to reduce false-positive calls. Computer-aided diagnosis and VD should be jointly used for LDCT reading to reduce false-negatives of either lone method. The semiautomatic measurement of solid core showed systematic shift toward a larger diameter, potentially resulting in an up-shift within Lung CT Screening Reporting and Data System classification.}, + file = {Silv18.pdf:pdf\\Silv18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29543693}, + gsid = {3431674930923271869}, + gscites = {29}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/193523}, + ss_id = {6d4507a54e77f55af2bfa786ca804a13018ff273}, + all_ss_ids = {['6d4507a54e77f55af2bfa786ca804a13018ff273']}, +} + +@article{Silv18a, + author = {Silva, Mario and Prokop, Mathias and Jacobs, Colin and Capretti, Giovanni and Sverzellati, Nicola and Ciompi, Francesco and van Ginneken, Bram and Schaefer-Prokop, Cornelia M and Galeone, Carlotta and Marchiano, Alfonso and Pastorino, Ugo}, + title = {Long-term Active Surveillance of Screening Detected Subsolid Nodules is a Safe Strategy to Reduce Overtreatment}, + journal = JTO, + year = {2018}, + volume = {13}, + month = {7}, + pages = {1454--1463}, + doi = {10.1016/j.jtho.2018.06.013}, + abstract = {Lung cancer presenting as subsolid nodule (SSN) can show slow growth, hence treating SSN is controversial. Our aim was to determine the long-term outcome of subjects with unresected SSNs in lung cancer screening. Since 2005, the Multicenter Italian Lung Detection (MILD) screening trial implemented active surveillance for persistent SSN, as opposed to early resection. Presence of SSNs was related to diagnosis of cancer at the site of SSN, elsewhere in the lung or in the body. The risk of overall mortality and lung cancer mortality was tested by Cox proportional hazards model. SSN were found in 16.9% (389/2,303) of screenees. During 9.3+-1.2 years of follow-up, the hazard ratio (HR) of lung cancer diagnosis in subjects with SSN was 6.77 (95%CI:3.39-13.54), with 73% (22/30) of cancers not arising from SSN (median time to diagnosis 52 months from SSN). Lung cancer-specific mortality in subjects with SSN was significantly increased (HR 3.80; 95%CI:1.24-11.65) compared to subjects without lung nodules. Lung cancer arising from SSN did not lead to death within the follow-up period. Subjects with SSN in the MILD cohort showed a high risk of developing lung cancer elsewhere in the lung, with only a minority of cases arising from SSN, and never representing the cause of death. These results demonstrate the safety of active surveillance for conservative management of SSN until signs of solid component growth, and the need for prolonged follow-up because of high risk of other cancers.}, + file = {Silv18a.pdf:pdf\\Silv18a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30026071}, + gsid = {4749736138620847044}, + gscites = {50}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/196781}, + ss_id = {4e67443737640a6d88642034266c0ce2d76d9ae7}, + all_ss_ids = {['4e67443737640a6d88642034266c0ce2d76d9ae7']}, +} + +@conference{Silv19, + author = {Silva, Mario and Milanese, Gianluca and Sabia, Frederica and Jacobs, Colin and van Ginneken, Bram and Prokop, Mathias and Schaefer-Prokop, Cornelia and Sestini, Stefano and Marchiano, Alfonso and Sverzellati, Nicola and Pastorino, Ugo}, + title = {Lung Cancer Screening in NLST Eligibles: Tailoring Annual Low-Dose Computed Tomography by Post-Test Risk Stratification}, + booktitle = RSNA, + year = {2019}, + abstract = {PURPOSE: To calculate the risk of lung cancer (LC) in 1 and 3 years after baseline low-dose computed tomography (LDCT), in screenees selected by National Lung Screening Trial (NLST) criteria. METHOD AND MATERIALS: For the aim of this post-hoc analysis, screenees from a prospective lung cancer screening (LCS) trial were retrospectively selected: age>=55years, pack-years>=30. Pre-test metrics: baseline demographics, medical interview, and pulmonary function test. Post-test metrics: retrospective LDCT reading by FDA-approved workstation for LCS, featuring computer aided diagnosis (CAD) and advanced semi-automatic algorithm for volumetric segmentation of nodule. Solid nodules were classified into 3 categories: 1)no nodule or nodule 1-112mm^3; 2)nodule 113-260mm^3; 3)nodule>260mm^3. Subsolid nodules were assigned either category 2(non-solid or part-solid nodules with solid component>5mm) or category 3(solid component>=5mm). The highest category was used for screenee-wise risk assessment. The primary outcome was LC diagnosis at 1 year or 3 years; the secondary outcome was the stage of LC. The Chi squared test was used to test the association between metrics and the primary outcome at 1 or 3 years. The risk of LC in 1 or 3 years was calculated by univariate and multivariate models. RESULTS: In 1,248 NLST-eligible screenees, LC frequency was 1.2% at 1 year and 2.3% at 3 years. At 1 year, category 3 was the only predictor of LC risk in multivariate model (odds ratio 79.84 p<0.001), confirming that early follow up by LDCT (e.g.3months) is needed for characterisation of such nodules. At 3 years, LC risk was predicted by category 2 (OR5.99 p=0.009) and 3 (OR26.55 p<001), Tiffeneau<70% (OR 2.75 p=0.024). LCS simulation with triennial screening rounds for category 1 and selective annual round for category 2 and 3 (29% in our population) showed 35% reduction of LDCT through 3 years. CONCLUSION: Annual LCS could be selectively offered to 30% of NLST eligible screenees, while longer interval might be safe in those with category 1. Validation of volumetric thresholds is granted through multiple software vendors. CLINICAL RELEVANCE/APPLICATION: LCS strategy can be optimised by tailoring annual LDCT to a minority of subjects at high risk, while longer screening intervals could be a safe strategy for low risk subjects yielding substantial reduction of LDCT burden (radiation and cost). This model is being prospectively tested in a LCS trial with LDCT every 3 years}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Silv19a, + author = {Silva, Mario and Milanese, Gianluca and Sabia, Frederica and Jacobs, Colin and van Ginneken, Bram and Prokop, Mathias and Schaefer-Prokop, Cornelia and Marchiano, Alfonso and Sverzellati, Nicola and Pastorino, Ugo}, + title = {Lung cancer risk after baseline round of screening: Only 20% of NLST eligibles require annual round}, + booktitle = ESTI, + year = {2019}, + abstract = {PURPOSE/OBJECTIVES: To calculate the risk of lung cancer (LC) in 1 year and 3 years after baseline low-dose computed tomography (LDCT) in high-risk subjects selected by the National Lung Cancer Screening Trial (NLST) criteria. METHODS AND MATERIALS: Subjects from the Multicentric Italian Lung Detection (MILD) trial were selected according to NLST criteria: age >=55 years and pack-years >=30. Baseline characteristics were: smoking status (former/current), gender, percent of predicted forced expiratory volume in first second (FEV 1% pred, 90% threshold), Tiffeneau ratio (70% threshold), nodules at baseline LDCT. The risk of LC in 1 and 3 years was calculated by multivariate models. RESULTS: In 1,248 NLST eligible screenees, LC frequency was 1.2% at 1year, 2.6% at 3 years. At 1 year, nodule volume on LDCT was the only predictor of LC risk (volume >250mm3, odds ratio (OR) 34.25, p=0.0009). At 3 years, the risk of LC was predicted by: nodule volume 113-250 mm3 (OR9.52 p=0.01), nodule volume >250mm3 (OR29.07, p<0.001), Tiffeneau <=70% (OR2.08 p=0.0195). A simulation of triennial screening rounds, with selective annual round only for nodule volume >=113mm3 (19,9% in our population) showed 40% reduction of LDTC through 3 years, and 80% LDCT saving at each annual round. CONCLUSION: Annual round is worthwhile for nodule >=113mm3 (about 20% in our population). Screening every 3 years can safely reduce the LDCT burden for nodule <113mm3 (about 80% in our population).}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Silv20, + author = {Silva, Mario and Milanese, Gianluca and Sestini, Stefano and Sabia, Federica and Jacobs, Colin and van Ginneken, Bram and Prokop, Mathias and Schaefer-Prokop, Cornelia M. and Marchiano, Alfonso and Sverzellati, Nicola and Pastorino, Ugo}, + title = {Lung cancer screening by nodule volume in Lung-RADS v1.1: negative baseline CT yields potential for increased screening interval}, + journal = ER, + year = {2020}, + volume = {31}, + pages = {1956--1968}, + doi = {10.1007/s00330-020-07275-w}, + abstract = {The 2019 Lung CT Screening Reporting & Data System version 1.1 (Lung-RADS v1.1) introduced volumetric categories for nodule management. The aims of this study were to report the distribution of Lung-RADS v1.1 volumetric categories and to analyse lung cancer (LC) outcomes within 3 years for exploring personalized algorithm for lung cancer screening (LCS).}, + file = {Silv20.pdf:pdf\\Silv20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + number = {4}, + pmid = {32997182}, + ss_id = {367878e980e109d80ad80066cec1961a940614ec}, + all_ss_ids = {['367878e980e109d80ad80066cec1961a940614ec']}, + gscites = {25}, +} + +@article{Simp19, + author = {Amber L. Simpson and Michela Antonelli and Spyridon Bakas and Michel Bilello and Keyvan Farahani and Bram van Ginneken and Annette Kopp-Schneider and Bennett A. Landman and Geert Litjens and Bjoern Menze and Olaf Ronneberger and Ronald M. Summers and Patrick Bilic and Patrick F. Christ and Richard K. G. Do and Marc Gollub and Jennifer Golia-Pernicka and Stephan H. Heckers and William R. Jarnagin and Maureen K. McHugo and Sandy Napel and Eugene Vorontsov and Lena Maier-Hein and M. Jorge Cardoso}, + title = {A large annotated medical image dataset for the development and evaluation of segmentation algorithms}, + url = {https://arxiv.org/pdf/1902.09063.pdf}, + abstract = {Semantic segmentation of medical images aims to associate a pixel with a label in a medical image without human initialization. The success of semantic segmentation algorithms is contingent on the availability of high-quality imaging data with corresponding labels provided by experts. We sought to create a large collection of annotated medical image datasets of various clinically relevant anatomies available under open source license to facilitate the development of semantic segmentation algorithms. Such a resource would allow: 1) objective assessment of general-purpose segmentation methods through comprehensive benchmarking and 2) open and free access to medical image data for any researcher interested in the problem domain. Through a multi-institutional effort, we generated a large, curated dataset representative of several highly variable segmentation tasks that was used in a crowd-sourced challenge - the Medical Segmentation Decathlon held during the 2018 Medical Image Computing and Computer Aided Interventions Conference in Granada, Spain. Here, we describe these ten labeled image datasets so that these data may be effectively reused by the research community.}, + file = {:pdf/Simp19.pdf:PDF}, + gscites = {604}, + gsid = {12275877761746940475}, + journal = {arXiv:1902.09063}, + month = {2}, + optnote = {DIAG}, + year = {2019}, + ss_id = {4654aa505e5bcdb089d0df202cd7ceabc9d2d41f}, + all_ss_ids = {['4654aa505e5bcdb089d0df202cd7ceabc9d2d41f']}, +} + +@article{Slaa21, + author = {Slaats, Jeroen and Dieteren, Cindy E. and Wagena, Esther and Wolf, Louis and Raaijmakers, Tonke K. and van der Laak, Jeroen A. and Figdor, Carl G. and Weigelin, Bettina and Friedl, Peter}, + title = {Metabolic Screening of Cytotoxic T-cell Effector Function Reveals the Role of CRAC Channels in Regulating Lethal Hit Delivery}, + doi = {10.1158/2326-6066.cir-20-0741}, + year = {2021}, + abstract = {AbstractCytotoxic T lymphocytes (CTL) mediate cytotoxicity toward tumor cells by multistep cell-cell interactions. However, the tumor microenvironment can metabolically perturb local CTL effector function. CTL activity is typically studied in two-dimensional (2D) liquid coculture, which is limited in recapitulating the mechanisms and efficacy of the multistep CTL effector response. We here developed a microscopy-based, automated three-dimensional (3D) interface coculture model suitable for medium-throughput screening to delineate the steps and CTL effector mechanisms affected by microenvironmental perturbation. CTL effector function was compromised by deregulated redox homeostasis, deficient mitochondrial respiration, as well as dysfunctional Ca2+ release-activated Ca2+ (CRAC) channels. Perturbation of CRAC channel function dampened calcium influx into CTLs, delayed CTL degranulation, and lowered the frequency of sublethal hits (i.e., additive cytotoxicity) delivered to the target cell. Thus, CRAC channel activity controls both individual contact efficacy and CTL cooperativity required for serial killing of target cells. The multistep analysis of CTL effector responses in 3D coculture will facilitate the identification of immune-suppressive mechanisms and guide the rational design of targeted intervention strategies to restore CTL effector function.}, + url = {http://dx.doi.org/10.1158/2326-6066.CIR-20-0741}, + file = {Slaa21.pdf:pdf\Slaa21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Cancer Immunology Research}, + citation-count = {5}, + automatic = {yes}, + pages = {926-938}, + volume = {9}, +} + +@article{Sleb15, + author = {Slebos, Dirk-Jan and van Rikxoort, Eva M. and van der Bij, Wim}, + title = {Air Trapping in Emphysema}, + journal = AJRCCM, + year = {2015}, + volume = {192}, + pages = {e45}, + doi = {10.1164/rccm.201504-0666IM}, + file = {Sleb15.pdf:pdf\\Sleb15.pdf:PDF}, + optnote = {DIAG}, + number = {5}, + pmid = {26325161}, + month = {9}, + gsid = {8498611071850151444}, + gscites = {1}, +} + +@mastersthesis{Sloo20, + author = {Ilse Slootweg}, + title = {Patient variables related to false predictions of deep-learning assisted prostate cancer detection in MRI}, + abstract = {Background: + DL-CAD for prediction of clinically significant prostate cancer (csPCa) in mpMRI is developed to aid radiologists in PI-RADS evaluation. DL-CAD predictions have low accuracy, possibly due to clinical risk factors of csPCa that are not taken into account by DL-CAD. + + Purpose: + Aim is to identify patient subgroups of clinical characteristics in which DL-CAD predictions differ from radiologists. + + Methods: + DL-CAD was applied to a test cohort of men examined for PCa according to PI-RADSv2 between 2016 and 2017. Ground truth was provided by manually annotated PI-RADS >=4 lesions. Patient age and PSA were derived from the electronic patient record and other variables were mined from the written radiological reports. False and correct predicted patients were compared on variable distributions and false positive rates were compared between variable categories. + + Results: + CsPCa was predicted for a total of 482 men (36.9% PIRADS >=4). Benign and malignant patients statistically differed on all clinical variables (P<.05). DL-CAD negative predictive value and positive predictive value were 0.912 and 0.457, respectively. False and correct positive predicted patients significantly differed on age (P<.05), PSA (P<.001), and PSAD (P<.001) as well as prostate volume (P<.001), number of lesions (P<.001), and number of affected zones (P<.001). Analysis of negative predictions was inconclusive due to small population size. + + Conclusions: + False positive DL-CAD csPCa predictions are due to unavailable clinical variables that are used in radiologists' PI-RADS risk assessment. We advise to study the effect of including age, PSA and PSAD information in DL-CAD input on prediction accuracy.}, + file = {:pdf/Sloo20.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2020}, + journal = {Master thesis}, +} + +@inproceedings{Slui02, + author = {I. C. Sluimer and B. van Ginneken}, + title = {Detection of abnormal tissue in {HRCT} scans of the chest}, + booktitle = CARS, + doi = {10.1007/978-3-642-56168-9_255}, + year = {2002}, + pages = {1101}, + optnote = {DIAG, RADIOLOGY}, + gsid = {9666435673167423528}, + gscites = {2}, + ss_id = {ee7ea95666aa2686dbf70c6aeed8c2794ed3cbac}, + all_ss_ids = {['ee7ea95666aa2686dbf70c6aeed8c2794ed3cbac']}, +} + +@article{Slui03, + author = {I. C. Sluimer and P. F. van Waes and M. A. Viergever and B. van Ginneken}, + title = {Computer-aided diagnosis in high-resolution {CT} of the lungs}, + journal = MP, + year = {2003}, + volume = {30}, + pages = {3081-3090}, + doi = {10.1118/1.1624771}, + abstract = {{A} computer-aided diagnosis (CAD) system is presented to automatically distinguish normal from abnormal tissue in high-resolution {CT} chest scans acquired during daily clinical practice. {F}rom high-resolution computed tomography scans of 116 patients, 657 regions of interest are extracted that are to be classified as displaying either normal or abnormal lung tissue. {A} principled texture analysis approach is used, extracting features to describe local image structure by means of a multi-scale filter bank. {T}he use of various classifiers and feature subsets is compared and results are evaluated with {ROC} analysis. {P}erformance of the system is shown to approach that of two expert radiologists in diagnosing the local regions of interest, with an area under the {ROC} curve of 0.862 for the {CAD} scheme versus 0.877 and 0.893 for the radiologists.}, + file = {Slui03.pdf:pdf\\Slui03.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {12}, + pmid = {14713074}, + month = {11}, + gsid = {6511737817147038623}, + gscites = {181}, + ss_id = {e2e040d3f75ff4e4c04b281f46163806f80ebcce}, + all_ss_ids = {['e2e040d3f75ff4e4c04b281f46163806f80ebcce']}, +} + +@conference{Slui03a, + author = {I. C. Sluimer and P. F. M. G. van Waes and B. van Ginneken}, + title = {Texture analysis for automated classification of lung {HRCT} images}, + booktitle = RSNA, + year = {2003}, + pages = {647}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Slui04, + author = {I. C. Sluimer and M. Niemeijer and B. van Ginneken}, + title = {Lung field segmentation from thin-slice {CT} scans in presence of severe pathology}, + booktitle = MI, + year = {2004}, + volume = {5370}, + series = SPIE, + pages = {1447--1455}, + doi = {10.1117/12.535312}, + abstract = {{C}onventional methods for the segmentation of lung fields from thorax {CT} scans are based on thresholding. {T}hey rely on a large grey value contrast between the lung parenchyma and surrounding tissues. {I}n the presence of consolidations or other high density pathologies, these methods fail. {F}or the segmentation of such scans, a lung shape should be induced without relying solely on grey level information. {W}e present a segmentation-by-registration approach to segment the lung fields from several thin-slice {CT} scans (slice-thickness 1 mm) containing high density pathologies. {A} scan of a normal subject is elastically registered to each of the abnormal scans. {A}pplying the found deformations to a lung mask created for the normal subject, a segmentation of the abnormal lungs is found. {W}e implemented a conventional lung field segmentation method and compared it to the one using non-rigid registration techniques. {T}he results of the algorithms were evaluated against manual segmentations in several slices of each scan. {I}t is shown that the segmentation-by-registration approach can successfully identify the lung regions where the conventional method fails.}, + file = {Slui04.pdf:pdf\\Slui04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {97306129207600256}, + gscites = {32}, + ss_id = {7a1964f613685258b49161681d67585ddff896c2}, + all_ss_ids = {['7a1964f613685258b49161681d67585ddff896c2']}, +} + +@phdthesis{Slui05, + author = {I. C. Sluimer}, + title = {Automated image analysis of the pathological lung in {CT}}, + year = {2005}, + url = {http://igitur-archive.library.uu.nl/dissertations/2005-0909-200011/UUindex.html}, + abstract = {{T}he general objective of the thesis is automation of the analysis of the pathological lung from {CT} images. {S}pecifically, we aim for automated detection and classification of abnormalities in the lung parenchyma. {W}e first provide a review of computer analysis techniques applied to {CT} of the lungs as published in the past five years. {T}he rest of the thesis focuses on the steps needed for computer aided detection and diagnosis of disease in {H}igh {R}esolution {CT} ({HRCT}) images of the lung parenchyma: segmentation of the pathological lung, detection of abnormalities and classification of abnormalities. {F}or the normal lung, segmentation can be performed making use of the excellent contrast between air and surrounding tissues. {H}owever, this approach fails when the lung is affected by high density pathology. {D}ense pathologies are present in approximately a fifth of clinical scans, and for computer analysis such as detection and quantification of abnormal areas it is vital that these pathologies are not missed in the initial segmentation. {W}e propose a hybrid approach that incorporates atlas-based registration and voxel classification and as such responds to both shape and textural cues from both inside and outside the lung. {I}t is shown that the proposed method can accurately segment scans with up to a quarter of the lung volume affected by high density pathology and therefore presents a considerable improvement over conventional techniques. {A}fter the lung fields have been identified, the task is to recognize possibly abnormal regions within them. {W}e present an automated method for making the distinction between normal and abnormal lung tissue that makes use of the pattern recognition technique of statistical feature-based learning. {I}ts performance in classifying a set of regions of interest ({ROI}s) is compared to that of two experts. {I}t is concluded that the computer system is almost as adept at retrieving diagnostic information from the isolated {ROI}s as are the expert radiologists. {H}owever, performance of the radiologists when reviewing the entire scan is not rivaled by the automated system, indicating that further improvement should be sought in incorporation of the context of the {ROI}s. {I}n the final chapter it is investigated whether the more complex differentiation between several types of abnormal tissue can be made automatically. {T}his analysis is performed on the entire lung field, rather than a selected set of {ROI}s. {T}o this end, we propose an automatic unsupervised method of subdividing the lung fields into regions that are homogeneous in texture. {I}t is shown that the proposed subdivision more closely resembles freehand drawing than the popular square grid, and additionally improves the computer classification performance. {I}n classification of the entire lung fields, the automated system mostly confuses the same classes as do the experts. {O}n a more global scale, the computer is used to answer the question whether or not a certain type of abnormality is present in a slice. {I}n this task it achieves accuracies comparable to that of the experts.}, + copromotor = {B. van Ginneken}, + file = {Slui05.pdf:pdf\\Slui05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {M. A. Viergever and W. M. Prokop}, + school = {Utrecht University}, + journal = {PhD thesis}, +} + +@article{Slui05a, + author = {I.C. Sluimer and M. Prokop and B. van Ginneken}, + title = {Toward automated segmentation of the pathological lung in {CT}}, + journal = TMI, + year = {2005}, + volume = {24}, + pages = {1025-1038}, + doi = {10.1109/TMI.2005.851757}, + abstract = {{C}onventional methods of lung segmentation rely on a large gray value contrast between lung fields and surrounding tissues. {T}hese methods fail on scans with lungs that contain dense pathologies, and such scans occur frequently in clinical practice. {W}e propose a segmentation-by-registration scheme in which a scan with normal lungs is elastically registered to a scan containing pathology. {W}hen the resulting transformation is applied to a mask of the normal lungs, a segmentation is found for the pathological lungs. {A}s a mask of the normal lungs, a probabilistic segmentation built up out of the segmentations of 15 registered normal scans is used. {T}o refine the segmentation, voxel classification is applied to a certain volume around the borders of the transformed probabilistic mask. {P}erformance of this scheme is compared to that of three other algorithms: a conventional, a user-interactive and a voxel classification method. {T}he algorithms are tested on 10 three-dimensional thin-slice computed tomography volumes containing high-density pathology. {T}he resulting segmentations are evaluated by comparing them to manual segmentations in terms of volumetric overlap and border positioning measures. {T}he conventional and user-interactive methods that start off with thresholding techniques fail to segment the pathologies and are outperformed by both voxel classification and the refined segmentation-by-registration. {T}he refined registration scheme enjoys the additional benefit that it does not require pathological (hand-segmented) training data.}, + file = {Slui05a.pdf:pdf\\Slui05a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {8}, + pmid = {16092334}, + month = {8}, + gsid = {884962951440896774}, + gscites = {320}, + ss_id = {2421eaf85e274e59508c87df80bc4242edae7168}, + all_ss_ids = {['2421eaf85e274e59508c87df80bc4242edae7168']}, +} + +@article{Slui06, + author = {I. C. Sluimer and M. Prokop and I. Hartmann and B. van Ginneken}, + title = {Automated classification of hyperlucency, fibrosis, ground glass, solid and focal lesions in high resolution {CT} of the lung}, + journal = MP, + year = {2006}, + volume = {33}, + pages = {2610-2620}, + doi = {10.1118/1.2207131}, + abstract = {{A}n automatic method for textural analysis of complete {HRCT} lung slices is presented. {T}he system performs classification of regions of interest ({ROI}s) into one of six classes: normal, hyperlucency, fibrosis, ground glass, solid, and focal. {W}e propose a novel method of automatically generating {ROI}s that contain homogeneous texture. {T}he use of such regions rather than square regions is shown to improve performance of the automated system. {F}urthermore, the use of two different, previously published, feature sets is investigated. {B}oth feature sets are shown to yield similar results. {C}lassification performance of the complete system is characterized by {ROC} curves for each of the classes of abnormality and compared to a total of three expert readings by two experienced radiologists. {T}he different types of abnormality can be automatically distinguished with areas under the {ROC} curve that range from 0.74 (focal) to 0.95 (solid). {T}he kappa statistics for intraobserver agreement, interobserver agreement, and computer versus observer agreement were 0.70, 0.53+/-0.02, and 0.40+/-0.03, respectively. {T}he question whether or not a class of abnormality was present in a slice could be answered by the computer system with an accuracy comparable to that of radiologists.}, + file = {Slui06.pdf:pdf\\Slui06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {7}, + pmid = {16898465}, + month = {6}, + gsid = {7126259624329785862}, + gscites = {79}, + ss_id = {cb724200872323436552c2303f4d8f3ae9ea7d1a}, + all_ss_ids = {['cb724200872323436552c2303f4d8f3ae9ea7d1a']}, +} + +@article{Slui06a, + author = {I. C. Sluimer and A. M. R. Schilham and M. Prokop and B. van Ginneken}, + title = {Computer analysis of computed tomography scans of the lung: a survey}, + journal = TMI, + year = {2006}, + volume = {25}, + pages = {385-405}, + doi = {10.1109/TMI.2005.862753}, + abstract = {{C}urrent computed tomography ({CT}) technology allows for near isotropic, submillimeter resolution acquisition of the complete chest in a single breath hold. {T}hese thin-slice chest scans have become indispensable in thoracic radiology, but have also substantially increased the data load for radiologists. {A}utomating the analysis of such data is, therefore, a necessity and this has created a rapidly developing research area in medical imaging. {T}his paper presents a review of the literature on computer analysis of the lungs in {CT} scans and addresses segmentation of various pulmonary structures, registration of chest scans, and applications aimed at detection, classification and quantification of chest abnormalities. {I}n addition, research trends and challenges are identified and directions for future research are discussed.}, + file = {Slui06a.pdf:pdf\\Slui06a.pdf:PDF}, + optnote = {DIAG, NoduleDetectionCT, RADIOLOGY}, + number = {4}, + pmid = {16608056}, + month = {4}, + gsid = {10943222434899920168}, + gscites = {621}, + ss_id = {ecad8c357147604fc065e717f54758ad4c10552b}, + all_ss_ids = {['ecad8c357147604fc065e717f54758ad4c10552b']}, +} + +@article{Slui23, + author = {van der Sluijs, Koen M. and Thannhauser, Jos and Visser, Iris M. and Nabeel, P. M. and Raj, Kiran V. and Malik, Afrah E. F. and Reesink, Koen D. and Eijsvogels, Thijs M. H. and Bakker, Esm\'{e}e A. and Kaur, Prabhdeep and Joseph, Jayaraj and Thijssen, Dick H. J.}, + title = {Central and local arterial stiffness in White Europeans compared to age-, sex-, and BMI-matched South Asians}, + doi = {10.1371/journal.pone.0290118}, + year = {2023}, + abstract = { + Background + Ethnicity impacts cardiovascular disease (CVD) risk, and South Asians demonstrate a higher risk than White Europeans. Arterial stiffness is known to contribute to CVD, and differences in arterial stiffness between ethnicities could explain the disparity in CVD risk. We compared central and local arterial stiffness between White Europeans and South Asians and investigated which factors are associated with arterial stiffness. + Methods + Data were collected from cohorts of White Europeans (the Netherlands) and South Asians (India). We matched cohorts on individual level using age, sex, and body mass index (BMI). Arterial stiffness was measured with ARTSENS(r) Plus. Central stiffness was expressed as carotid-femoral pulse wave velocity (cf-PWV, m/s), and local carotid stiffness was quantified using the carotid stiffness index (Beta) and pressure-strain elastic modulus (Epsilon, kPa). We compared arterial stiffness between cohorts and used multivariable linear regression to identify factors related to stiffness. + Results + We included n = 121 participants per cohort (age 53+-10 years, 55% male, BMI 24 kg/m2). Cf-PWV was lower in White Europeans compared to South Asians (6.8+-1.9 vs. 8.2+-1.8 m/s, p<0.001), but no differences were found for local stiffness parameters Beta (5.4+-2.4 vs. 5.8+-2.3, p = 0.17) and Epsilon (72+-35 vs. 70+-31 kPa, p = 0.56). Age (standardized b, 95% confidence interval: 0.28, 0.17-0.39), systolic blood pressure (0.32, 0.21-0.43), and South Asian ethnicity (0.46, 0.35-0.57) were associated with cf-PWV; associations were similar between cohorts (p>0.05 for interaction). Systolic blood pressure was associated with carotid stiffness in both cohorts, whereas age was associated to carotid stiffness only in South Asians and BMI only in White Europeans. + Conclusion + Ethnicity is associated with central but not local arterial stiffness. Conversely, ethnicity seems to modify associations between CVD risk factors and local but not central arterial stiffness. This suggests that ethnicity interacts with arterial stiffness measures and the association of these measures with CVD risk factors. + }, + url = {http://dx.doi.org/10.1371/journal.pone.0290118}, + file = {Slui23.pdf:pdf\Slui23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {PLOS ONE}, + citation-count = {0}, + automatic = {yes}, + pages = {e0290118}, + volume = {18}, +} + +@conference{Smee18, + author = {Esther Smeets and Jonas Teuwen and Jeroen van der Laak and Martin Gotthardt and Francesco Ciompi and Erik Aarntzen}, + booktitle = {European Society for Molecular Imaging}, + title = {Tumor heterogeneity as a PET-biomarker predicts overall survival of pancreatic cancer patients}, + abstract = {INTRODUCTION + Pancreatic ductal adenocarcinoma (PDAC) shows a 5-year survival rate of 8%[1]. This mortality results from a lack of methods to accurately treat patients[2]. PDAC is remarkable for its fibrotic reaction, which is present at early stages of PDAC development[3]. Components of this environment can be measured on clinical images[4]. PET derived parameters, e.g. SUVmax, have not been able to provide prognostic information. In this study we developed an algorithm based on FDG-PET texture features (TF) that classifies heterogeneous or homogeneous tumors and shows a correlation with overall survival. + + + METHODS + In total, 121 patients with histologically proven PDAC who underwent 18F-FDG PET/CT (Siemens Biograph mCT, Knoxville, US) were selected from the hospital system. Eighty-six EANM reconstructed scans were visually labeled as 'homogenous' or 'heterogeneous' by experienced Nuclear Medicine physicians and served as training set to develop the classifier [5]. All the 121 scans were used as validation set for the correlation with overall survival (OS). Tumors were delineated using 40% threshold of the SUVmax with manual correction. TF were extracted using the PyRadiomcis toolbox [6]. TF were selected and tested for robustness as described in literature [7-9]. The classifier was build using logistic regression. Prognostic impact was assessed by Kaplan Meier survival analysis and log-rank test. + + + RESULTS + Optimal performance of the leave-one-out cross-validation classifier in the training set yielded an accuracy of 0.73 and AUC of 0.71 in classifying PDAC as heterogeneous or homogeneous tumors. Of note, two tumors were visually labeled as homogenous but correctly classifier as heterogeneous by the classifier after review. For the 121 patients the OS of PDAC tumors classified as heterogeneous, was significantly worse than for homogeneous tumors; median OS 69 weeks (95%CI 64 to 91 weeks) versus median 95 weeks (95%CI 76 to 114), p= 0.0285). This is in contrast with single standard PET parameters, single TF or manual labeling, which had no significant prognostic impact. + + + CONCLUSIONS + We developed an algorithm that accurately classifies PDAC as heterogeneous or homogeneous, based on a set of 18F-FDG PET derived texture features. We showed that the classification result has prognostic value, improving upon standard PET derived parameters and single texture-features. Further validation of this algorithm in an external cohort of PDAC patients is ongoing. + + + REFERENCES + + [1] Siegel, R.L., K.D. Miller, and A. Jemal, Cancer statistics, 2016. CA Cancer J Clin, 2016. 66(1): p. 7-30. + [2] Ryan, D.P., T.S. Hong, and N. Bardeesy, Pancreatic adenocarcinoma. N Engl J Med, 2014. 371(11): p. 1039-49. + [3] Neesse, A., et al., Stromal biology and therapy in pancreatic cancer: a changing paradigm. Gut, 2015. 64(9): p. 1476-84. + [4] Heid, I., et al., Co-clinical Assessment of Tumor Cellularity in Pancreatic Cancer. Clin Cancer Res, 2017. 23(6): p. 1461-1470. + [5] Boellaard, R., et al., FDG PET and PET/CT: EANM procedure guidelines for tumour PET imaging: version 1.0. Eur J Nucl Med Mol Imaging, 2010. 37(1): p. 181-200. + [6] van Griethuysen, J.J.M., et al., Computational Radiomics System to Decode the Radiographic Phenotype. Cancer Res, 2017. 77(21): p. e104-e107. + [7] Yan, J., et al., Impact of Image Reconstruction Settings on Texture Features in 18F-FDG PET. J Nucl Med, 2015. 56(11): p. 1667-73. + [8] Leijenaar, R.T., et al., The effect of SUV discretization in quantitative FDG-PET Radiomics: the need for standardized methodology in tumor texture analysis. Sci Rep, 2015. 5: p. 11075. + [9] Grootjans, W., et al., The Impact of Optimal Respiratory Gating and Image Noise on Evaluation of Intratumor Heterogeneity on 18F-FDG PET Imaging of Lung Cancer. J Nucl Med, 2016. 57(11): p. 1692-1698.}, + optnote = {DIAG, RADIOLOGY}, + year = {2018}, +} + +@conference{Smit09, + author = {E. J. Smit and A. M. Mendrik and A. Waaijer and G. Bertolini and E. P. A. Vonken and B. van Ginneken and M. Prokop}, + title = {Reconstruction of high-quality {CTA} from noisy cerebral {CT} perfusion data}, + booktitle = ECR, + year = {2009}, + abstract = {{P}urpose: {C}urrent techniques for reconstructing {CT} angiography ({CTA}) from {CT} perfusion ({CTP}) data use single arterial phase (a{CTP}) or the temporal {MIP} (t{MIP}) but such images suffer from the increased image noise in thin-section {CTP} data. {W}e developed and tested a technique that allows for reconstruction of high-quality {CTA} from noisy cerebral {CTP} data. {M}ethods and {M}aterials: {W}e developed a novel method for reconstructing {CTA} from {CTP} data that combines the advantages of t{MIP} (high vascular contrast) and temporal mean (low image noise). {T}his perfusion-derived {CTA} (p{CTA}) was tested on 17 patients with suspected subarachnoid haemorrhage who underwent {CTP} (80 k{V}, 150 m{A}s, 30 scans every 2s) and standard {CTA} with 64x0.625mm collimation. {S}tandard {CTA}, a{CTP}, t{MIP} and p{CTA} were randomized and blindly presented to an expert who was unaware of the techniques. {A}ll scans were scored on a 5-point scale for arterial contrast, detail visibility, vascular noise and overall image quality. {I}mage noise was determined over a 100mm2 homogenous {ROI}. {R}esults: p{CTA} yielded significantly superior visual scores for overall image quality and vascular noise (p<0.05). {O}verall image quality with t{MIP} was significantly superior to both {CTA} and a{CTP}. {D}etail visibility was similar to t{MIP} but significantly better than {CTA} and a{CTA}. {A}rterial contrast was significantly higher than for standard {CTA} and similar to t{MIP} and a{CTA}. {I}mage noise was significantly lowest on p{CTA} and highest for a{CTA}. {C}onclusion: {O}ur {CTA} reconstruction algorithm appears superior to conventional {CTA} of the brain and existing techniques for reconstructing {CTA} from {CTP} data.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Smit10, + author = {E. J. Smit and J. Dankbaar and A. M. Mendrik and B. van Ginneken and E. Vonken and M. Prokop}, + title = {Reconstruction of High Quality {CT} Angiography from Noisy Cerebral {CT} Perfusion Data}, + booktitle = RSNA, + year = {2010}, + abstract = {Purpose: To develop and test a method for the reconstruction of cerebral Computed Tomography (CT) Angiography (CTA) from CT Perfusion data that, with respect to image quality, can substitute for conventional CTA. Method and materials: We propose the temporal Maximum Intensity Projection (tMIP) with prior temporal Guassian filtering as a technique to obtain angiography from CT Perfusion data (perfusion derived CTA, p-CTA). The amount of temporal filtering was evaluated to optimize Contrast-to-Noise Ratio (CNR) in the Circle of Willis (CoW). No spatial filtering was applied. We retrospectively selected 25 consecutive stroke patients who underwent CTP (80 kV, 150 mAs, 25 scans every 2s) and CTA (120 kV, 150 mAs) both with 128 x 0.625 mm collimation. Arterial enhancement was measured in 10 regions of interest (ROI) in the CoW (5 large and 5 small arteries) and image noise in one occipital lobe for perfusion derived CTA (p-CTA), standard tMIP, arterial phase of CTP (a-CTP) and conventional CTA. Results: Optimal CNR in the CoW was obtained at a temporal Gaussian filter strength of 2 seconds and was equal for both large and small arteries. The p-CTA resulted in an average CNR increase of 12% and a noise reduction of 25% compared to standard tMIP. The optimal filter strength was equal for both large and small vessels in the circle of Willis, and for comprised a Gaussian filter with a standard deviation of 2 seconds. The p-CTA was superior to conventional CTA with 23% higher CNR and 42% lower noise and to a-CTP with 42% higher CNR and 119% noise reduction. Conclusion: The presented method is an excellent technique to obtain CT Angiography from CT Perfusion data and renders CTA images with CNR and SNR superior to conventional CTA of the brain. Clinical relevance/application: Obtaining high quality CT Angiography (CTA) from noisy CT Perfusion (CTP) data could obviate an additional CTA scan, reduce total radiation exposure and the amount of contrast material needed.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Smit12, + author = {Smit, Ewoud J. and Vonken, Evert-Jan and van der Schaaf, Irene C. and Mendrik, Adrienne and Dankbaar, Jan Willem and Horsch, Alexander D. and van Seeters, Tom and van Ginneken, Bram and Prokop, Mathias}, + title = {Timing-Invariant Reconstruction for Deriving High-quality {CT} Angiographic Data from Cerebral {CT} Perfusion Data}, + journal = Radiology, + year = {2012}, + volume = {263}, + pages = {216-225}, + doi = {10.1148/radiol.11111068}, + abstract = {PURPOSE: To compare a simple and robust technique used to reconstruct high-quality computed tomographic (CT) angiographic images from CT perfusion data with currently used CT angiography techniques. MATERIALS AND METHODS: Institutional review board approval was waived for this retrospective study, which included 25 consecutive patients who had had a stroke. We created temporal maximum intensity projection (tMIP) CT angiographic images by using prior temporal filtering as a timing-insensitive technique to produce CT angiographic images from CT perfusion data. The temporal filter strength was optimized to gain maximal contrast-to-noise ratios (CNRs) in the circle of Willis. The resulting timing-invariant (TI) CT angiography was compared with standard helical CT angiography, the arterial phase of dynamic CT angiography, and nonfiltered tMIP CT angiography. Vascular contrast, image noise, and CNR were measured. Four experienced observers scored all images for vascular noise, vascular contour, detail of small and medium arteries, venous superimposition, and overall image quality in a blinded side-by-side comparison. Measurements were compared with a paired t test; P = .05 indicated a significant difference. RESULTS: On average, optimized temporal filtering in TI CT angiography increased CNR by 18\% and decreased image noise by 18\% at the expense of a decrease in vascular contrast of 3\% when compared with nonfiltered tMIP CT angiography. CNR, image noise, vascular noise, vascular contour, detail visibility of small and medium arteries, and overall image quality of TI CT angiograms were superior to those of standard CT angiography, tMIP CT angiography, and the arterial phase of dynamic CT angiography at a vascular contrast that was similar to that of standard CT angiography. Venous superimposition was similar for all techniques. Image quality of the arterial phase of dynamic CT angiography was rated inferior to that of standard CT angiography. CONCLUSION: TI CT angiographic images constructed by using temporally filtered tMIP CT angiographic data have excellent image quality that is superior to that achieved with currently used techniques, but they suffer from modest venous superimposition.A-A?A 1/2 RSNA, 2012.}, + file = {Smit12.pdf:pdf\\Smit12.pdf:PDF}, + optnote = {DIAG}, + pmid = {22332063}, + month = {4}, + gsid = {11443188990303908052}, + gscites = {66}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/109996}, + ss_id = {656947026157a7e1248db7f764db9e09e98e3b18}, + all_ss_ids = {['656947026157a7e1248db7f764db9e09e98e3b18']}, +} + +@conference{Smit12a, + author = {E. J. Smit and E. Vonken and F. J. Meijer and J. Dankbaar and A. Horsch and B. van Ginneken and B. Velthuis and I. van der Schaaf and M. Prokop}, + title = {Acute Ischemic Stroke Assessment on {CT}-Angiography That Derived from {CT}-Perfusion Images}, + booktitle = RSNA, + year = {2012}, + abstract = {PURPOSE: To assess the diagnostic accuracy of timing-invariant CT angiography (TI-CTA) that is derived from CT perfusion (CTP) data for acute ischemic stroke assessment. METHOD AND MATERIALS: We selected 73 consecutive patients with suspected acute ischemic stroke. Standard CT angiography (CTA) and CT perfusion images were obtained at admission, and timing-invariant CTA was automatically derived from the CTP data. Two experienced radiologists were individually presented with a blinded and randomized sequence of scans. All images were scored for the presence of occlusion in the following artery segments: internal carotid artery (ICA), middle cerebral artery (MCA) segment 1 (M1) and trifurcation to segment 2 (M2), anterior cerebral artery segments 1 (A1) and 2 (A2), basilar artery (BA), posterior cerebral artery segments 1 (P1) and 2 (P2). Sensitivity and specificity of timing-invariant CTA were determined relative to standard CTA (unanimous occlusions). RESULTS: Timing-invariant CTA provided high sensitivity and specificity for arterial occlusion assessment. Specificity per segment was: ICA: 99%, A1: 98%, A2: 100%, M1: 96%, M2: 98%, BA: 100%, P1: 100%, P2: 100%. Sensitivity per segment was: ICA: 88%, M1: 94% and M2: 89%. Sensitivity could not be calculated for the other segments since those had two or less occlusions. There was one false negative score in the ICA that was missed by one observer only, the false negatives in the MCA were never solitary occlusions but were assigned to other MCA segments. CONCLUSION: Timing-invariant CTA that is derived from CT perfusion data provides a diagnostic accuracy that is similar to standard CTA. These findings suggest that a separate cerebral CTA scan can be omitted if CT perfusion imaging is performed. CLINICAL RELEVANCE/APPLICATION: If CT perfusion has been performed, a separate standard CTA of the brain can be omitted, thereby reducing the total radiation dose and the amount of contrast material needed.}, + optnote = {DIAG}, +} + +@article{Smit13, + author = {Smit, Ewoud J. and Vonken, Evert-Jan and van Seeters, Tom and Dankbaar, Jan Willem and van der Schaaf, Irene C. and Kappelle, L Jaap and van Ginneken, Bram and Velthuis, Birgitta K. and Prokop, Mathias}, + title = {Timing-Invariant Imaging of Collateral Vessels in Acute Ischemic Stroke}, + journal = Stroke, + year = {2013}, + volume = {44}, + pages = {2194-2199}, + doi = {10.1161/STROKEAHA.111.000675}, + abstract = {BACKGROUND AND PURPOSE: Although collateral vessels have been shown to be an important prognostic factor in acute ischemic stroke, patients with lack of collaterals on standard imaging techniques may still have good clinical outcome. We postulate that in these cases collateral vessels are present though not visible on standard imaging techniques that are based on a single time frame. METHODS: This study included 40 consecutive patients with acute ischemic stroke with a large-vessel occlusion. Standard computed tomography angiography (CTA, single time frame) and CT perfusion (multiple time frames) were obtained at admission and timing-invariant (TI)-CTA was created from the CT perfusion data. Clinical outcome data (modified Rankin Scale) were assessed at 3-month follow-up. Four experienced observers independently assessed collateral status twice on both standard CTA and TI-CTA in an independent, blinded, randomized manner. Collateral status was rated as good if AC/aEURdegAY=50\% and poor if <50\% of collaterals were present compared with the contralateral hemisphere. RESULTS: Collateral status was rated higher on TI-CTA (good in 84\%) compared with standard CTA (good in 49\%; P<0.001). Thirty-one percent of patients with poor collateral status on standard CTA still had good clinical outcome. All of those patients, however, showed good collaterals on TI-CTA. All cases with poor collateral status rated on TI-CTA had poor clinical outcome. CONCLUSIONS: Collateral vessels may not always be visible on standard single time-frame CTA because of delayed contrast arrival. Future prognostic studies in acute stroke should consider delay-insensitive techniques, such as TI-CTA, instead of standard single time-frame imaging, such as standard CTA.}, + file = {Smit13.pdf:pdf\\Smit13.pdf:PDF}, + optnote = {DIAG}, + pmid = {23760216}, + month = {6}, + gsid = {15052282752148575019}, + gscites = {96}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/116747}, + ss_id = {41ec277af272f5fd6146fb13a8816289a464f6fd}, + all_ss_ids = {['41ec277af272f5fd6146fb13a8816289a464f6fd']}, +} + +@article{Smit15, + author = {Smit, E. J. and Vonken, E-J. and Meijer, F J A. and Dankbaar, J. W. and Horsch, A. D. and van Ginneken, B. and Velthuis, B. and van der Schaaf, I. and Prokop, M.}, + title = {Timing-Invariant {CT} Angiography Derived from {CT} Perfusion Imaging in Acute Stroke: A Diagnostic Performance Study}, + journal = AJNR, + year = {2015}, + volume = {36}, + pages = {1834--1838}, + doi = {10.3174/ajnr.A4376}, + abstract = {Timing-invariant (or delay-insensitive) CT angiography derived from CT perfusion data may obviate a separate cranial CTA in acute stroke, thus enhancing patient safety by reducing total examination time, radiation dose, and volume of contrast material. We assessed the diagnostic accuracy of timing-invariant CTA for detecting intracranial artery occlusion in acute ischemic stroke, to examine whether standard CTA can be omitted.Patients with suspected ischemic stroke were prospectively enrolled and underwent CTA and CTP imaging at admission. Timing-invariant CTA was derived from the CTP data. Five neuroradiologic observers assessed all images for the presence and location of intracranial artery occlusion in a blinded and randomized manner. Sensitivity and specificity of timing-invariant CTA and standard CTA were calculated by using an independent expert panel as the reference standard. Interrater agreement was determined by using AZAo statistics.We included 108 patients with 47 vessel occlusions. Overall, standard CTA and timing-invariant CTA provided similar high diagnostic accuracy for occlusion detection with a sensitivity of 96\% (95\% CI, 90\%-100\%) and a specificity of 100\% (99\%-100\%) for standard CTA and a sensitivity of 98\% (95\% CI, 94\%-100\%) and a specificity of 100\% (95\% CI, 100\%-100\%) for timing-invariant CTA. For proximal large-vessel occlusions, defined as occlusions of the ICA, basilar artery, and M1, the sensitivity and specificity were 100\% (95\% CI, 100\%-100\%) for both techniques. Interrater agreement was good for both techniques (mean AZAo value, 0.75 and 0.76).Timing-invariant CTA derived from CTP data provides diagnostic accuracy similar to that of standard CTA for the detection of artery occlusions in acute stroke.}, + file = {Smit15.pdf:pdf\\Smit15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26113070}, + month = {6}, + gsid = {7113799955940245108}, + gscites = {21}, + ss_id = {f948e6ceea98e653874bae245610957e54c76121}, + all_ss_ids = {['f948e6ceea98e653874bae245610957e54c76121']}, +} + +@phdthesis{Smit19, + author = {Smit, Ewoud}, + title = {Feasibility of a single-acquisition CT stroke protocol}, + year = {2019}, + abstract = {CT angiography is a widely used technique for the noninvasive evaluation of neurovascular pathology. Because CTA is a snapshot of arterial contrast enhancement, information on flow dynamics is limited. Dynamic CTA techniques, also referred to as 4D-CTA, have become available for clinical practice in recent years. This article provides a description of 4D-CTA techniques and a review of the available literature on the application of 4D-CTA for the evaluation of intracranial vascular malformations and hemorrhagic and ischemic stroke. Most of the research performed to date consists of observational cohort studies or descriptive case series. These studies show that intracranial vascular malformations can be adequately depicted and classified by 4D-CTA, with DSA as the reference standard. In ischemic stroke, 4D-CTA better estimates thrombus burden and the presence of collateral vessels than conventional CTA. In intracranial hemorrhage, 4D-CTA improves the detection of the "spot? sign, which represents active ongoing bleeding.}, + copromotor = {E. Vonken}, + file = {Smit19.pdf:pdf/Smit19.pdf:PDF;:png/publications/Smit19.PNG:PNG image}, + optnote = {DIAG}, + promotor = {M. A. Viergever and W. M. Prokop and B. van Ginneken}, + school = {University of Utrecht}, + journal = {PhD thesis}, +} + +@inproceedings{Smit21, + title = {Quality control of whole-slide images through multi-class semantic segmentation of artifacts}, + author = {Smit, Gijs and Ciompi, Francesco and Cig{\'e}hn, Maria and Bod{\'e}n, Anna and van der Laak, Jeroen and Mercan, Caner}, + booktitle = MIDL, + year = {2021}, + url = {https://openreview.net/forum?id=7EZ4JOtlRl}, + abstract = {Quality control is an integral part in the digitization process of whole-slide histopathology images due to artifacts that arise during various stages of slide preparation. Manual control and supervision of these gigapixel images are labor-intensive. Therefore, we report the first multi-class deep learning model trained on whole-slide images covering multiple tissue and stain types for semantic segmentation of artifacts. Our approach reaches a Dice score of 0.91, on average, across six artifact types, and outperforms the competition on external test set. Finally, we extend the artifact segmentation network to a multi-decision quality control system that can be deployed in routine clinical practice.}, + optnote = {DIAG, PATHOLOGY}, +} + +@article{Smit23, + author = {Marloes A. Smit and Francesco Ciompi and John-Melle Bokhorst and Gabi W. van Pelt and Oscar G.F. Geessink and Hein Putter and Rob A.E.M. Tollenaar and J. Han J.M. van Krieken and Wilma E. Mesker and Jeroen A.W.M. van der Laak}, + title = {Deep learning based tumor-stroma ratio scoring in colon cancer correlates with microscopic assessment}, + journal = {Journal of Pathology Informatics}, + year = {2023}, + doi = {https://doi.org/10.1016/j.jpi.2023.100191}, + abstract = {Background + The amount of stroma within the primary tumor is a prognostic parameter for colon cancer patients. This phenomenon can be assessed using the tumor-stroma ratio (TSR), which classifies tumors in stroma-low (<=50% stroma) and stroma-high (>50% stroma). Although the reproducibility for TSR determination is good, improvement might be expected from automation. The aim of this study was to investigate whether the scoring of the TSR in a semi- and fully automated method using deep learning algorithms is feasible. + + Methods + A series of 75 colon cancer slides were selected from a trial series of the UNITED study. For the standard determination of the TSR, 3 observers scored the histological slides. Next, the slides were digitized, color normalized, and the stroma percentages were scored using semi- and fully automated deep learning algorithms. Correlations were determined using intraclass correlation coefficients (ICCs) and Spearman rank correlations. + + Results + 37 (49%) cases were classified as stroma-low and 38 (51%) as stroma-high by visual estimation. A high level of concordance between the 3 observers was reached, with ICCs of 0.91, 0.89, and 0.94 (all P<.001). Between visual and semi-automated assessment the ICC was 0.78 (95% CI 0.23-0.91, P-value 0.005), with a Spearman correlation of 0.88 (P<.001). Spearman correlation coefficients above 0.70 (N=3) were observed for visual estimation versus the fully automated scoring procedures. + + Conclusion + Good correlations were observed between standard visual TSR determination and semi- and fully automated TSR scores. At this point, visual examination has the highest observer agreement, but semi-automated scoring could be helpful to support pathologists.}, + file = {Smit23.pdf:pdf\\Smit23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {935f46ae7c1a5be1ed7a5e176db38fb919bf30df}, + all_ss_ids = {['935f46ae7c1a5be1ed7a5e176db38fb919bf30df']}, + gscites = {3}, +} + +@inproceedings{Snoe03, + author = {P. R. Snoeren and N. Karssemeijer}, + title = {Gray scale registration of mammograms using a model of image acquisition}, + booktitle = IPMI, + year = {2003}, + volume = {18}, + series = LNCS, + pages = {401--412}, + doi = {10.1007/978-3-540-45087-0_34}, + abstract = {{A} parametric technique is proposed to match the pixel-value distributions of two mammograms of the same woman. {I}t can be applied to mammograms of the left and the right breast, or, more effectively, to temporal mammograms, e.g., from two screening rounds. {T}he main reason to match mammograms is to lessen irrelevant differences between images due to acquisition: by varying breast compression, different film types, et cetera. {F}irstly, a technique like this might reduce the radiologist's efforts to detect relevant differences like abnormal growth in breast tissue that signals breast cancer. {S}econdly, though not the aim of this study, applications might be found in subtraction radiology or in the computer aided detection of abnormalities in temporal mammograms. {I}nstead of arbitrarily shifting and/or scaling the pixel-values of one image to match the other or directly mapping one histogram to the other, the proposed method is based on general aspects of acquisition. {T}his encompasses (1) breast compression; (2) exposure time; (3) incident radiation intensity; and, (4a) film properties and digitization for screen-film mammograms, or (4b) detector response for unprocessed digital mammograms. {T}he method does not require a priori knowledge about specific settings of acquisition to match histograms; the degrees of freedom are estimated from the pixel-value distributions of the two mammograms themselves. {B}y the method it is possible to match digitized screen-film mammograms (in the next also referred to as analog mammograms) as well as unprocessed digital mammograms in any of the four possible combinations: analog to analog, analog to digital, digital to analog, and digital to digital.}, + file = {Snoe03.pdf:pdf\\Snoe03.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {15344475}, + gsid = {4086411044599121897}, + gscites = {6}, + ss_id = {21f36695f2f92200916359a1b61d5e5648e2dd49}, + all_ss_ids = {['21f36695f2f92200916359a1b61d5e5648e2dd49']}, +} + +@article{Snoe04, + author = {P. R. Snoeren and N. Karssemeijer}, + title = {Thickness correction of mammographic images by means of a global parameter model of the compressed breast}, + journal = TMI, + year = {2004}, + volume = {23}, + pages = {799--806}, + doi = {10.1109/TMI.2004.827477}, + file = {Snoe04.pdf:pdf\\Snoe04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {7}, + pmid = {15250632}, + month = {7}, + gsid = {2352303434720850503}, + gscites = {51}, + ss_id = {8d31ec41bad5116e2cba21ba5b5d165f770c5d3c}, + all_ss_ids = {['8d31ec41bad5116e2cba21ba5b5d165f770c5d3c']}, +} + +@inproceedings{Snoe05, + author = {P. R. Snoeren and N. Karssemeijer}, + title = {Thickness correction of mammographic images by anisotropic filtering and interpolation of dense tissue}, + booktitle = MI, + year = {2005}, + volume = {5747}, + series = SPIE, + pages = {1521-1527}, + doi = {10.1117/12.595255}, + url = {http://link.aip.org/link/?PSI/5747/1521/1}, + abstract = {Without image processing, the dynamic range of display systems is too small to optimally display both the interior and the peripheral zone of a compressed breast. To overcome manual adjustment of contrast, we propose an algorithm for peripheral enhancement of digital or digitized mammograms. This is done by virtually adding homogeneous tissue at the peripheral zone, where the breast comes loose from the compression paddle. The gradual signal increase due to a smaller breast thickness near the breast edge is estimated by the solution of the anisotropic diffusion equation. The conductivity is set small in the direction perpendicular to the breast edge, and large in the parallel direction. By this, large conductivities (much blurring) can be applied, while undesirable artifacts that would be caused by isotropic filtering are minimized. This measure is not always enough to prevent some reduction of relevant contrast. Therefore, dense tissue along parallel curves to the breast edge is interpolated before smoothing the image. Anisotropic diffusion filtering and dense tissue interpolation are both new techniques to improve peripheral enhancement. Comparison with some other methods showed that our approach performs at least as good as other methods.}, + file = {Snoe05.pdf:pdf/Snoe05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {4}, + gsid = {263805911348911436}, + gscites = {18}, + ss_id = {2e5d21f0b2553210b0b0ec6e2b8c292de8c9106c}, + all_ss_ids = {['2e5d21f0b2553210b0b0ec6e2b8c292de8c9106c']}, +} + +@article{Snoe07, + author = {P. R. Snoeren and N. Karssemeijer}, + title = {Gray-scale and geometric registration of full-field digital and film-screen mammograms}, + journal = MIA, + year = {2007}, + volume = {11}, + pages = {146--156}, + doi = {10.1016/j.media.2006.11.003}, + abstract = {{D}uring the transition from traditional film-screen ({FS}) mammography to full-field digital ({FFD}) mammography, images from both modalities are used in hospitals and in mammography screening centers, as comparison of mammograms from subsequent examinations of a client is an important part of the diagnostic procedure. {A} parametric method is presented to register a {FS} mammogram and a {FFD} mammogram of the same woman with respect to geometry and gray-scales. {T}he main motivation for the study is to lessen irrelevant differences between mammograms due to acquisition. {F}irst, a technique like this might increase the radiologist?s ability to detect relevant differences like abnormal growth in breast tissue that signal breast cancer. {S}econd, applications may be found in subtraction radiology or in computer-aided detection of abnormalities in temporal mammograms. {T}he proposed method is based on a parametric model of the most important aspects of acquisition, which relates the pixel values of two images. {T}his encompasses (1) breast positioning; (2) breast compression; (3) exposure time; (4) incident radiation intensity; and (5a) film properties and digitization for {FS} mammograms, or (5b) detector response for {FFD} mammograms. {T}he method does not require a priori knowledge about specific settings of acquisition; the parameters are estimated from the two mammograms themselves.}, + file = {Snoe07.pdf:pdf\\Snoe07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {17208511}, + month = {4}, + gsid = {15703766258730611185}, + gscites = {12}, + ss_id = {2a2d8905070d3dab2cdf73577193113a058326ac}, + all_ss_ids = {['2a2d8905070d3dab2cdf73577193113a058326ac']}, +} + +@inproceedings{Snoe10, + author = {P. R. Snoeren and G. Litjens and B. van Ginneken and N. Karssemeijer}, + title = {Training a Computer Aided Detection System with Simulated Lung Nodules in Chest Radiographs}, + booktitle = {The Third International Workshop on Pulmonary Image Analysis}, + year = {2010}, + pages = {139--149}, + abstract = {This paper addresses the hypothesis that artificially implanted lung nodules from computed tomography exams (CT exams) into chest radiographs can improve the performance of a computer aided detection system (CAD system). Twenty-four three-dimensional lung nodules were segmented and projected in five directions, mimicking 120 X-rayed nodules. The CAD system was tested by fivefold cross validation on a publicly available database. The results were evaluated by a free-response receiver operating characteristic analysis (FROC). It was found that the performance of the CAD system trained with simulated nodules comes close to the performance of state of the art CAD systems that are trained with real nodules. The CAD system trained with real nodules did improve by adding simulated nodules, but only when there were few real nodules used for training.}, + file = {Snoe10.pdf:pdf\\Snoe10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {6149147987068257017}, + gscites = {10}, + ss_id = {9d273b257071f18da2d269a81a635f1e5dac740e}, + all_ss_ids = {['9d273b257071f18da2d269a81a635f1e5dac740e']}, +} + +@article{Snoe21, + author = {Snoeckx, Annemiek and Franck, Caro and Silva, Mario and Prokop, Mathias and Schaefer-Prokop, Cornelia and Revel, Marie-Pierre}, + title = {The radiologist's role in lung cancer screening}, + doi = {10.21037/tlcr-20-924}, + year = {2021}, + abstract = {The radiologist's role in lung cancer screening}, + url = {http://dx.doi.org/10.21037/TLCR-20-924}, + file = {Snoe21.pdf:pdf\Snoe21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Translational Lung Cancer Research}, + citation-count = {6}, + automatic = {yes}, + pages = {2356-2367}, + volume = {10}, +} + +@article{Soga18, + author = {{Sogancioglu}, E. and {Hu}, S. and {Belli}, D. and {van Ginneken}, B.}, + title = {Chest {X}-ray Inpainting with Deep Generative Models}, + journal = {arXiv:1809.01471}, + year = {2018}, + abstract = {Generative adversarial networks have been successfully applied to inpainting in natural images. However, the current state-of-the-art models have not yet been widely adopted in the medical imaging domain. In this paper, we investigate the performance of three recently published deep learning based inpainting models: context encoders, semantic image inpainting, and the contextual attention model, applied to chest x-rays, as the chest exam is the most commonly performed radiological procedure. We train these generative models on 1.2M 128 $\times$ 128 patches from 60K healthy x-rays, and learn to predict the center 64 $\times$ 64 region in each patch. We test the models on both the healthy and abnormal radiographs. We evaluate the results by visual inspection and comparing the PSNR scores. The outputs of the models are in most cases highly realistic. We show that the methods have potential to enhance and detect abnormalities. In addition, we perform a 2AFC observer study and show that an experienced human observer performs poorly in detecting inpainted regions, particularly those generated by the contextual attention model.}, + optnote = {DIAG}, + month = {8}, + gsid = {9158256103222862149}, + gscites = {15}, + ss_id = {eb6f5da9826e08c4d940267c5c90bcbbce268f46}, + all_ss_ids = {['eb6f5da9826e08c4d940267c5c90bcbbce268f46']}, +} + +@article{Soga20, + author = {E. {Sogancioglu} and K. {Murphy} and E. {Calli} and E. T. {Scholten} and S. {Schalekamp} and B. {Van Ginneken}}, + title = {Cardiomegaly Detection on Chest Radiographs: Segmentation Versus Classification}, + journal = {IEEE Access}, + year = {2020}, + volume = {8}, + pages = {94631-94642}, + doi = {10.1109/ACCESS.2020.2995567}, + url = {https://ieeexplore.ieee.org/document/9096290}, + abstract = {In this study, we investigate the detection of cardiomegaly on frontal chest radiographs through two alternative deep-learning approaches - via anatomical segmentation and via image-level classification. We used the publicly available ChestX-ray14 dataset, and obtained heart and lung segmentation annotations for 778 chest radiographs for the development of the segmentation-based approach. The classification-based method was trained with 65k standard chest radiographs with image-level labels. For both approaches, the best models were found through hyperparameter searches where architectural, learning, and regularization related parameters were optimized systematically. The resulting models were tested on a set of 367 held-out images for which cardiomegaly annotations were hand-labeled by two independent expert radiologists. Sensitivity, specificity, positive predictive value, negative predictive value, and area under the receiver operating characteristic curve (AUC) were calculated. The performance of the segmentation-based system with an AUC of 0.977 is significantly better for classifying cardiomegaly than the classification-based model which achieved an AUC of 0.941. Only the segmentation-based model achieved comparable performance to an independent expert reader (AUC of 0.978). We conclude that the segmentation-based model requires 100 times fewer annotated chest radiographs to achieve a substantially better performance, while also producing more interpretable results.}, + file = {Soga20.pdf:pdf\\Soga20.pdf:PDF}, + optnote = {DIAG, INPRESS, RADIOLOGY}, + ss_id = {4e152f76ccc69de9323ff63ca8fc5bbb61fa1712}, + all_ss_ids = {['4e152f76ccc69de9323ff63ca8fc5bbb61fa1712']}, + gscites = {22}, +} + +@article{Soga22, + author = {Sogancioglu, Ecem and Murphy, Keelin and Th Scholten, Ernst and Boulogne, Luuk H. and Prokop, Mathias and van Ginneken, Bram}, + year = {2022}, + journal = MP, + title = {Automated estimation of total lung volume using chest radiographs and deep learning}, + doi = {10.1002/mp.15655}, + number = {7}, + pages = {4466--4477}, + volume = {49}, + abstract = {BACKGROUND: Total lung volume is an important quantitative biomarker and is used for the assessment of restrictive lung diseases. + PURPOSE: In this study, we investigate the performance of several deep-learning approaches for automated measurement of total lung volume from chest radiographs. + METHODS: About 7621 posteroanterior and lateral view chest radiographs (CXR) were collected from patients with chest CT available. Similarly, 928 CXR studies were chosen from patients with pulmonary function test (PFT) results. The reference total lung volume was calculated from lung segmentation on CT or PFT data, respectively. This dataset was used to train deep-learning architectures to predict total lung volume from chest radiographs. The experiments were constructed in a stepwise fashion with increasing complexity to demonstrate the effect of training with CT-derived labels only and the sources of error. The optimal models were tested on 291 CXR studies with reference lung volume obtained from PFT. Mean absolute error (MAE), mean absolute percentage error (MAPE), and Pearson correlation coefficient (Pearson's r) were computed. + RESULTS: The optimal deep-learning regression model showed an MAE of 408 ml and an MAPE of 8.1\% using both frontal and lateral chest radiographs as input. The predictions were highly correlated with the reference standard (Pearson's r = 0.92). CT-derived labels were useful for pretraining but the optimal performance was obtained by fine-tuning the network with PFT-derived labels. + CONCLUSION: We demonstrate, for the first time, that state-of-the-art deep-learning solutions can accurately measure total lung volume from plain chest radiographs. The proposed model is made publicly available and can be used to obtain total lung volume from routinely acquired chest radiographs at no additional cost. This deep-learning system can be a useful tool to identify trends over time in patients referred regularly for chest X-ray.}, + file = {PubMed entry:http\://www.ncbi.nlm.nih.gov/pubmed/35388486:text/html}, + pmid = {35388486}, + ss_id = {1d5f65d8f721089fca1e7fac8d1ef214f12e2c23}, + all_ss_ids = {['1d5f65d8f721089fca1e7fac8d1ef214f12e2c23']}, + gscites = {4}, +} + +@article{Soly15, + author = {Solyanik, Olga and Hollmann, Patrick and Dettmer, Sabine and Kaireit, Till and Schaefer-Prokop, Cornelia and Wacker, Frank and Vogel-Claussen, Jens and Shin, Hoen-Oh}, + title = {Quantification of Pathologic Air Trapping in Lung Transplant Patients Using {CT} Density Mapping: Comparison with Other {CT} Air Trapping Measures}, + journal = PLOSONE, + year = {2015}, + volume = {10}, + pages = {e0139102}, + doi = {10.1371/journal.pone.0139102}, + abstract = {To determine whether density mapping (DM) is more accurate for detection and quantification of pathologic air trapping (pAT) in patients after lung transplantation compared to other CT air trapping measures. One-hundred forty-seven lung and heart-lung transplant recipients underwent CT-examinations at functional residual capacity (FRC) and total lung capacity (TLC) and PFT six months after lung transplantation. Quantification of air trapping was performed with the threshold-based method in expiration (EXP), density mapping (DM) and the expiratory to inspiratory ratio of the mean lung density (E/I-ratio MLD). A non-rigid registration of inspiration-expiration CT-data with a following voxel-to-voxel mapping was carried out for DM. Systematic variation of attenuation ranges was performed for EXP and DM and correlated with the ratio of residual volume to total lung capacity (RV/TLC) by Spearman rank correlation test. AT was considered pathologic if RV/TLC was above the 95th percentile of the predicted upper limit of normal values. Receiver operating characteristic (ROC) analysis was performed. The optimal attenuation range for the EXP method was from -790 HU to -950 HU (EXP-790 to -950HU) (r = 0.524, p<0.001) to detect air trapping. Within the segmented lung parenchyma, AT was best defined as voxel difference less than 80 HU between expiration and registered inspiration using the DM method. DM correlated best with RV/TLC (r = 0.663, p<0.001). DM and E/I-ratio MLD showed a larger AUC (0.78; 95\% CI 0.69-0.86; 0.76, 95\% CI 0.67-0.85) than EXP -790 HU to -950 HU (0.71, 95\% CI 0.63-0.78). DM and E/I-ratio MLD showed better correlation with RV/TLC and are more suited quantitative CT-methods to detect pAT in lung transplant patients than the EXP-790HU to -950HU.}, + file = {Soly15.pdf:pdf\\Soly15.pdf:PDF}, + optnote = {DIAG}, + number = {10}, + pmid = {26430890}, + month = {10}, + ss_id = {21a62e34c2b257a66a7f2f539364594b7756a681}, + all_ss_ids = {['21a62e34c2b257a66a7f2f539364594b7756a681']}, + gscites = {19}, +} + +@mastersthesis{Sons19, + author = {Patrick Sonsma}, + title = {Lymphocyte detection in hematoxylin-eosin stained histopathological images of breast cancer}, + abstract = {Lymphocytes are immune cells that form an important bio-marker in the prognosis of breast cancer. In some cases more effective treatment can be chosen based on the lymphocyte presence near tumor regions. For trained pathologists the detection of lymphocytes in Hematoxylin-Eosin stained images is however a challenging and time intensive task with subjective interpretations. In this research we explore the lymphocyte detection problem with a deep learning approach and strive towards a robust, objective and efficient tool for computer aided diagnosis. + We generate a large data-set with machine produced labels by applying an existing model on destained and restained immunohistochemical histopathological images. On this data we train and evaluate a more minimal rendition of the known YOLO object detection model and report moderate results.}, + file = {Sons19.pdf:pdf\\Sons19.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University}, + year = {2019}, + journal = {Master thesis}, +} + +@article{Spre18, + author = {Sprem, Jurica and de Vos, Bob D. and Lessmann, Nikolas and de Jong, Pim A. and Viergever, Max A. and Isgum, Ivana}, + title = {Impact of automatically detected motion artifacts on coronary calcium scoring in chest computed tomography}, + journal = JMI, + year = {2018}, + volume = {5}, + pages = {044007}, + doi = {10.1117/1.JMI.5.4.044007}, + optnote = {DIAG}, + file = {Spre18.pdf:pdf\\Spre18.pdf:PDF}, +} + +@article{Spre18a, + author = {Sprem, Jurica and de Vos, Bob D. and Lessmann, Nikolas and van Hamersvelt, Robbert W. and Greuter, Marcel J. W. and de Jong, Pim A. and Leiner, Tim and Viergever, Max A. and Isgum, Ivana}, + title = {Coronary calcium scoring with partial volume correction in anthropomorphic thorax phantom and screening chest {CT} images}, + journal = PLOSONE, + year = {2018}, + volume = {13}, + pages = {e0209318}, + doi = {10.1371/journal.pone.0209318}, + optnote = {DIAG}, + file = {Spre18a.pdf:pdf\\Spre18a.pdf:PDF}, +} + +@mastersthesis{Spro20, + author = {Spronck, J.}, + title = {Multi conditional lung nodule synthesis for improved nodule malignancy classification in Computed Tomography scans}, + year = {2020}, + url = {http://www.scriptiesonline.uba.uva.nl/en/scriptie/639430}, + abstract = {Deep learning systems are increasingly being researched for the development of Computer-Aided Diagnosis (CAD) of lung nodules in lung cancer screening. In order to achieve clinically relevant accuracy, these supervised deep learning systems rely on large amounts of annotated data. However, the availability of labels in medical imaging is rather limited due to the costs involved in obtaining annotations from expert clinicians. To compensate for this data scarcity, the synthesis of additional training data by Generative Adversarial Networks (GANs) has gained increased attention and has shown to be useful for the improvement of several supervised learning tasks. This study compares multiple advanced training methods involving synthesized data from a multi-conditional-Wasserstein-GAN (mcWGAN) to improve a nodule malignancy classifier. It specifically examines the potential of a mcWGAN to generate synthetic nodules that are hard-to-classify through input conditions from misclassified nodules. This approach was compared with conventional nodule synthesis, where the input conditions are sampled from the distributions of all nodules, rather than the misclassified nodules solely. We examined whether the use of pre-training on synthesized nodules or ImageNet further improves the classifier's performance. The proposed mcWGAN proved to be successful at generating a wide variety of additional training data with manipulable malignancy and nodule diameter attributes. We show that ImageNet pre-training, combined with synthetic data augmentation, consistently outperforms other training approaches.}, + file = {Spro17.pdf:pdf/Spro17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + school = {Universiteit van Amsterdam}, + journal = {Master thesis}, +} + +@conference{Spro22, + author = {Spronck, J. and Eekelen, L.V. and Tessier, L. and Bogaerts, J. and van der Woude, L. and van den Heuvel, M. and Theelen, W. and Ciompi, F.}, + title = {Deep learning-based quantification of immune infiltrate for predicting response to pembrolizumab from pre-treatment biopsies of metastatic non-small cell lung cancer: A study on the PEMBRO-RT phase II trial}, + booktitle = {Immuno-Oncology and Technology}, + year = {2022}, + abstract = {Background + Immunotherapy has become the standard of care for metastatic non-small cell lung cancer (mNSCLC) without a targetable driver alteration, yet we still lack insight into which patients (pts) will benefit from such treatments. To that end, we investigated characteristics of the immune infiltrate in the tumor microenvironment in relation to immunotherapy response. We report the results of an automated deep learning approach applied to digital H&E whole slide images (WSIs) of pre-treatment biopsies from the PEMBRO-RT clinical trial. + + Methods + 61 quality-checked H&E WSIs were processed with 3 deep learning algorithms. We extracted a tissue mask using an existing method (Bandi et al., 2019), and detected tumor and immune cells using HoVerNet (Graham et al., 2019). Tumor clusters were identified by combining the output of HoVerNet and tumor segmentation from an nnUnet (Isensee et al., 2021) model that we trained on external NSCLC images. From the output of this pipeline, we extracted immune infiltrate-based density metrics, calculated over all tissue (allINF), stroma within 500um from the tumor border (sINF), tumor region (tINF), and the combination of stroma and tumor (t+sINF). All metrics were used in ROC analysis after dichotomizing pts as responders and non-responders (response was defined as complete or partial response at any time point or stable disease for >=12 weeks according to RECIST 1.1 measurement). Differences in metric distributions between the two groups were tested with a two-sided Welch t-test. Kaplan-Meier (KM) analysis was performed on progression-free survival (5-year follow-up). + + Results + Our automated analysis reported denser immune infiltrates in responders, although not statistically significant (0.05 0.63, where tINF reported an AUC of 0.70. KM analysis showed p=0.07 if pts were stratified based on the median tINF, and p=0.02 if stratified based on the optimal operating point of its ROC curve. + + Conclusions + Deep learning models that analyze the immune infiltrate density on H&E WSIs can identify mNSCLC responders to pembrolizumab.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Spro23, + author = {Joey Spronck and Thijs Gelton and Leander van Eekelen and Joep Bogaerts and Leslie Tessier and Mart van Rijthoven and Lieke van der Woude and Michel van den Heuvel and Willemijn Theelen and Jeroen van der Laak and Francesco Ciompi}, + booktitle = MIDL, + title = {nnUNet meets pathology: bridging the gap for application to whole-slide images and computational biomarkers}, + url = {https://openreview.net/forum?id=aHuwlUu_QR}, + abstract = {Image segmentation is at the core of many tasks in medical imaging, including the engineering of computational biomarkers. While the self-configuring nnUNet framework for image segmentation tasks completely shifted the state-of-the-art in the radiology field, it has never been applied to the field of histopathology, likely due to inherent limitations that nnUNet has when applied to the pathology domain "off-the-shelf". We introduce nnUNet for pathology, built upon the original nnUNet, and engineered domain-specific solutions to bridge the gap between radiology and pathology. Our framework includes critical hyperparameter adjustments and pathology-specific color augmentations, as well as an essential whole-slide image inference pipeline. We developed and validated our approach on non-small cell lung cancer data, showing the effectiveness of nnUNet for pathology over default nnUNet settings, and achieved the first position on the experimental segmentation task of the TIGER challenge on breast cancer data when using our pipeline "out-of-the-box". We also introduce a novel inference uncertainty approach, which proved helpful for the quantification of the tumor-infiltrating lymphocytes biomarker in non-small cell lung cancer biopsies of patients treated with immunotherapy. We coded our framework as a workflow-friendly segmentation tool and made it publicly available.}, + optnote = {DIAG, PATHOLOGY}, + year = {2023}, +} + +@article{Srik12, + author = {Srikantha, Abhilash and Harz, Markus and Wang, Lei and Platel, Bram and Mann, Ritse M. and Hahn, Horst K. and Peitgen, Heinz-Otto}, + title = {Symmetry-based detection of ductal carcinoma in situ in breast {MRI}}, + journal = EJR, + year = {2013}, + volume = {81 Suppl 1}, + pages = {S158--S159}, + doi = {10.1016/S0720-048X(12)70066-9}, + file = {Srik12.pdf:pdf/Srik12.pdf:PDF}, + optnote = {DIAG}, + pmid = {23083573}, + month = {9}, + gsid = {10081678298073820973}, + gscites = {2}, + ss_id = {e5f7b5efa305a5936b7a89db4a30c35a8e51fe76}, + all_ss_ids = {['e5f7b5efa305a5936b7a89db4a30c35a8e51fe76']}, +} + +@inproceedings{Srik13, + author = {Srikantha, Abhilash and Harz, Markus T. and Newstead, Gillian and Wang, Lei and Platel, Bram and Hegenscheid, Katrin and Mann, Ritse M. and Hahn, Horst K. and Peitgen, Heinz-Otto}, + title = {Symmetry-based detection and diagnosis of DCIS in breast {MRI}}, + booktitle = MI, + year = {2013}, + series = SPIE, + pages = {86701E--86701E-8}, + doi = {10.1007/978-3-642-40602-7_28}, + abstract = {Detecting early stage breast cancers like Ductal Carcinoma In Situ (DCIS) is important, as it supports effective and minimally invasive treatments. Although Computer Aided Detection/Diagnosis (CADe/ CADx) systems have been successfully employed for highly malignant carcinomas, their performance on DCIS is inadequate. In this context, we propose a novel approach combining symmetry, kinetics and morphology that achieves superior performance. We base our work on contrast enhanced data of 18 pure DCIS cases with hand annotated lesions and 9 purely normal cases. The overall sensitivity and specificity of the system stood at 89\% each.}, + file = {Srik13.pdf:pdf/Srik13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {12935671979757870060}, + gscites = {6}, +} + +@inproceedings{Staa02, + author = {J. J. Staal and S. N. Kalitzin and B. van Ginneken and M. D. Abr\`amoff and T. Berendschot and M. A. Viergever}, + title = {Classifying convex sets for vessel detection in retinal images}, + booktitle = ISBI, + year = {2002}, + pages = {269--272}, + doi = {10.1109/ISBI.2002.1029245}, + abstract = {{W}e present a method to detect vessels in images of the retina. {I}nstead of relying on pixel classification, as many detection algorithms do, we propose a more natural representation for elongated structures, such as vessels. {T}his new representation consists of primitives called affine convex sets. {O}n these convex sets we apply the classification step. {T}he reason for choosing this approach is two-fold: (1) {B}y using a dedicated representation of image structures, one can exploit prior knowledge. (2) {A} method based on pixel classification is often computationally unattractive. {T}he method can also be applied to other image structures, if an appropriate representation for the structures is chosen. {T}he method was tested on fundus reflection images. {W}e obtained an accuracy of 0.897, a sensitivity of 0.700 and a specificity of 0.923.}, + file = {Staa02.pdf:pdf\\Staa02.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {13070870055231707928}, + gscites = {14}, + ss_id = {d5ea31e555f04e668dce705c220241750de6b4f6}, + all_ss_ids = {['d5ea31e555f04e668dce705c220241750de6b4f6']}, +} + +@phdthesis{Staa04, + author = {J. J. Staal}, + title = {Segmentation of elongated structures in medical images}, + year = {2004}, + url = {http://dspace.library.uu.nl/handle/1874/1250}, + abstract = {{T}he research described in this thesis concerns the automatic detection, recognition and segmentation of elongated structures in medical images. {F}or this purpose techniques have been developed to detect subdimensional pointsets (e.g. ridges, edges) in images of arbitrary dimension. {T}hese pointsets are grouped into primitives, such as line elements and surface patches. {T}he primitives form the basis for recognition and segmentation task, which is accomplished with classifiers from statistical pattern recognition. {T}wo applications are given: segmentation of the vasculature in color images of the human retina and detection, labeling and segmentation of ribs in {CT}-scans (computed tomography) of the thorax.}, + copromotor = {B. van Ginneken and S. N. Kalitzin}, + file = {Staa04.pdf:pdf\\Staa04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {M. A. Viergever}, + school = {Utrecht University}, + gsid = {13977393024038699968}, + gscites = {5}, + journal = {PhD thesis}, +} + +@article{Staa04a, + author = {Staal, J. J. and Abr\`amoff, M. D. and Niemeijer, M. and Viergever, M. A. and van Ginneken, B.}, + title = {Ridge Based Vessel Segmentation in Color Images of the Retina}, + journal = TMI, + year = {2004}, + volume = {23}, + number = {4}, + pages = {501-509}, + doi = {10.1109/TMI.2004.825627}, + abstract = {{A} method is presented for automated segmentation of vessels in two-dimensional color images of the retina. {T}his method can be used in computer analyses of retinal images, e.g., in automated screening for diabetic retinopathy. {T}he system is based on extraction of image ridges, which coincide approximately with vessel centerlines. {T}he ridges are used to compose primitives in the form of line elements. {W}ith the line elements an image is partitioned into patches by assigning each image pixel to the closest line element. {E}very line element constitutes a local coordinate frame for its corresponding patch. {F}or every pixel, feature vectors are computed that make use of properties of the patches and the line elements. {T}he feature vectors are classified using a kappa{NN}-classifier and sequential forward feature selection. {T}he algorithm was tested on a database consisting of 40 manually labeled images. {T}he method achieves an area under the receiver operating characteristic curve of 0.952. {T}he method is compared with two recently published rule-based methods of {H}oover et al. and {J}iang et al. {T}he results show that our method is significantly better than the two rule-based methods (p < 0.01). {T}he accuracy of our method is 0.944 versus 0.947 for a second observer.}, + file = {Staa04a.pdf:pdf/Staa04a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {15084075}, + month = {4}, + gsid = {1421589366492561845}, + gscites = {2720}, + ss_id = {ac9748ea3945eb970cc32a37db7cfdfd0f22e74c}, + all_ss_ids = {['ac9748ea3945eb970cc32a37db7cfdfd0f22e74c']}, +} + +@inproceedings{Staa04b, + author = {J. J. Staal and B. van Ginneken and M. A. Viergever}, + title = {Automatic rib segmentation in {CT} data}, + booktitle = {Computer Vision Approaches to Medical Image Analysis and Mathematical Methods in Biomedical Image Analysis}, + year = {2004}, + volume = {3117}, + series = LNCS, + pages = {193--204}, + abstract = {{A} supervised method is presented for the detection and segmentation of ribs in computed tomography ({CT}) data. {I}n a first stage primitives are extracted that represent parts of the centerlines of elongated structures. {E}ach primitive is characterized by a number of features computed from local image structure. {F}or a number of training cases, the primitives are labeled by a human observer into two classes (rib vs. non-rib). {T}his data is used to train a classifier. {N}ow, primitives obtained from any image can be labeled automatically. {I}n a final stage the primitives classified as ribs are used to initialize a seeded region growing process to obtain the complete rib cage.}, + file = {Staa04b.pdf:pdf\\Staa04b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {917845112200227194}, + gscites = {16}, + ss_id = {e2a49c9e56d53f9b09ed50de495452bbf74adc03}, + all_ss_ids = {['e2a49c9e56d53f9b09ed50de495452bbf74adc03']}, +} + +@article{Staa07, + author = {J. Staal and B. van Ginneken and M. A. Viergever}, + title = {Automatic rib segmentation and labeling in computed tomography scans using a general framework for detection, recognition and segmentation of objects in volumetric data}, + journal = MIA, + year = {2007}, + volume = {11}, + pages = {35--46}, + doi = {10.1016/j.media.2006.10.001}, + abstract = {{A} system for automatic segmentation and labeling of the complete rib cage in chest {CT} scans is presented. {T}he method uses a general framework for automatic detection, recognition and segmentation of objects in three-dimensional medical images. {T}he framework consists of five stages: (1) detection of relevant image structures, (2) construction of image primitives, (3) classification of the primitives, (4) grouping and recognition of classified primitives and (5) full segmentation based on the obtained groups. {F}or this application, first 1{D} ridges are extracted in 3{D} data. {T}hen, primitives in the form of line elements are constructed from the ridge voxels. {N}ext a classifier is trained to classify the primitives in foreground (ribs) and background. {I}n the grouping stage centerlines are formed from the foreground primitives and rib numbers are assigned to the centerlines. {I}n the final segmentation stage, the centerlines act as initialization for a seeded region growing algorithm. {T}he method is tested on 20 {CT}-scans. {O}f the primitives, 97.5\% is classified correctly (sensitivity is 96.8\%, specificity is 97.8\%). {A}fter grouping, 98.4\% of the ribs are recognized. {T}he final segmentation is qualitatively evaluated and is very accurate for over 80\% of all ribs, with slight errors otherwise.}, + file = {Staa07.pdf:pdf\\Staa07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {17126065}, + month = {2}, + gsid = {6032541118265568544}, + gscites = {72}, + ss_id = {6c776183e47ff076b5de754dfd1f3942e1aecbac}, + all_ss_ids = {['6c776183e47ff076b5de754dfd1f3942e1aecbac']}, +} + +@inproceedings{Stan18, + author = {Kai Standvoss and Tanja Crijns and Lisa Goerke and Demian Janssen and Simon Kern and Timo van Niedek and Joris van Vugt and Natali Alfonso Burgos and Emma J. Gerritse and Justin Mol and Dion van de Vooren and Mohsen Ghafoorian and Thomas L.A. van den Heuvel and Rashindra Manniesing}, + title = {Cerebral Microbleed Detection in Traumatic Brain Injury Patients using {3D} Convolutational Neural Networks}, + booktitle = MI, + year = {2018}, + volume = {10575}, + series = SPIE, + doi = {10.1117/12.2294016}, + abstract = {The number and location of cerebral microbleeds (CMBs) in patients with traumatic brain injury (TBI) is important to determine the severity of trauma and may hold prognostic value for patient outcome. However, manual assessment is subjective and time-consuming due to the resemblance of CMBs to blood vessels, the possible presence of imaging artifacts, and the typical heterogeneity of trauma imaging data. In this work, we present a computer aided detection system based on 3D convolutional neural networks for detecting CMBs in 3D susceptibility weighted images. Network architectures with varying depth were evaluated. Data augmentation techniques were employed to improve the networks' generalization ability and selective sampling was implemented to handle class imbalance. The predictions of the models were clustered using a connected component analysis. The system was trained on ten annotated scans and evaluated on an independent test set of eight scans. Despite this limited data set, the system reached a sensitivity of 0.87 at 16.75 false positives per scan (2.5 false positives per CMB), outperforming related work on CMB detection in TBI patients.}, + file = {:pdf/Stan18.pdf:PDF}, + optnote = {DIAG}, + month = {2}, + ss_id = {a5535bb713b3cf4546242a3434877c95ee02b02b}, + all_ss_ids = {['a5535bb713b3cf4546242a3434877c95ee02b02b']}, + gscites = {11}, +} + +@article{Star09, + author = {M. Staring and J. P. W. Pluim and B. J. de Hoop and S. Klein and B. van Ginneken and H. Gietema and G. Nossent and C. M. Schaefer-Prokop and S. van de Vorst and M. Prokop}, + title = {Image Subtraction Facilitates Assessment of Volume and Density Change in Ground-Glass Opacities in Chest {CT}}, + journal = IR, + year = {2009}, + volume = {44}, + pages = {61--66}, + doi = {10.1097/RLI.0b013e318197fcb7}, + abstract = {{O}bjectives: {T}o study the impact of image subtraction of registered images on the detection of change in pulmonary ground-glass nodules identified on chest {CT}. {M}aterials and {M}ethods: {A} cohort of 33 individuals (25 men, 8 women; age range 51-75 years) with 37 focal ground-glass opacities ({GGO}) were recruited from a lung cancer screening trial. {F}or every participant, 1 to 3 follow-up scans were available (total number of pairs, 84). {P}airs of scans of the same nodule were registered nonrigidly and then subtracted to enhance differences in size and density. {F}our observers rated size and density change of the {GGO} between pairs of scans by visual comparison alone and with additional availability of a subtraction image and indicated their confidence. {A}n independent experienced chest radiologist served as an arbiter having all reader data, clinical data, and follow-up examinations available. {N}odule pairs for which the arbiter could not establish definite progression, regression, or stability were excluded from further evaluation. {T}his left 59 and 58 pairs for evaluation of size and density change, respectively. {W}eighted kappa statistics were used to assess interobserver agreement and agreement with the arbiter. {S}tatistical significance was tested with a z-test. {R}esults: {W}hen the subtraction image was available, the average interobserver improved from 0.52 to 0.66 for size change and from 0.47 to 0.57 for density change. {A}verage agreement with the arbiter improved from 0.61 to 0.76 for size change and from 0.53 to 0.64 for density change. {T}he effect was more pronounced when observer confidence without the subtraction image was low: agreement improved from 0.26 to 0.57 and from 0.19 to 0.47 in those cases. {C}onclusions: {I}mage subtraction improves the evaluation of subtle changes in pulmonary ground-glass opacities and decreases interobserver variability.}, + file = {Star09.pdf:pdf\\Star09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {19104438}, + month = {2}, + gsid = {16017624786647148270}, + gscites = {18}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/80400}, + ss_id = {d64d150155c9e1995efae28446625f1dfced6031}, + all_ss_ids = {['d64d150155c9e1995efae28446625f1dfced6031']}, +} + +@inproceedings{Stav96, + author = {M. Stavridi and B. van Ginneken and J. J. Koenderink}, + title = {Surface {BRDF} and texture of bricks}, + booktitle = {Three-Dimensional and Unconventional Imaging for Industrial Inspection and Metrology}, + year = {1996}, + volume = {2599}, + series = SPIE, + pages = {406-417}, + doi = {10.1117/12.230398}, + abstract = {{W}e analyze material properties underlying visual appearance, such as surface bidirectional reflection distribution function ({BRDF}) and texture. {W}e perform gonioradiometric measurements on bricks and fit the data to sets of models of specular and diffuse reflectance on rough surfaces in order to describe the composite reflection mechanisms, of the surfaces under study. {W}e also acquire images and perform image texture statistical discrimination techniques to determine the textural differences in the surface appearance, resulting from the variation of illumination and viewing.}, + file = {:pdf\\Stav96.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {1}, + gsid = {16094171393473022824}, + gscites = {4}, +} + +@article{Stav97, + author = {M. Stavridi and B. van Ginneken and J. J. Koenderink}, + title = {Surface bidirectional reflection distribution function and the texture of bricks and tiles}, + journal = APOPT, + year = {1997}, + volume = {36}, + pages = {3717-3725}, + doi = {10.1364/AO.36.003717}, + abstract = {{W}e study and analyze properties underlying the visual appearance of materials such as the surface bidirectional reflection distribution function and texture. {T}he spatial distribution of scattered light in relation to the incident light determines the surface appearance and can be partly specified by the bidirectional reflection distribution function, which is defined as the directionally dependent ratio of radiance to irradiance. {W}e perform gonioradiometric measurements on samples of bricks and tiles. {T}o describe the reflection mechanisms in the surfaces under study, we combine models of specular and diffuse reflectance from rough surfaces and fit them to the experimental data. {W}e also collect images and determine the textural differences in the surface appearance, resulting from the variation in the illumination direction and the viewing directions.}, + file = {Stav97.pdf:pdf\\Stav97.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {16}, + pmid = {18253397}, + month = {6}, + gsid = {3106951890713888296}, + gscites = {15}, +} + +@article{Stee17, + author = {Steens, Stefan C A and Bekers, Elise M and Weijs, Willem L J and Litjens, Geert J S and Veltien, Andor and Maat, Arie and van den Broek, Guido B and van der Laak, Jeroen A W M and Futterer, Jurgen J and van der Kaa, Christina A Hulsbergen and Merkx, Matthias A W and Takes, Robert P}, + title = {Evaluation of tongue squamous cell carcinoma resection margins using ex-vivo MR.}, + volume = {12}, + issue = {5}, + pages = {821-828}, + doi = {10.1007/s11548-017-1524-6}, + abstract = {Purpose of this feasibility study was (1) to evaluate whether application of ex-vivo 7T MR of the resected tongue specimen containing squamous cell carcinoma may provide information on the resection margin status and (2) to evaluate the research and developmental issues that have to be solved for this technique to have the beneficial impact on clinical outcome that we expect: better oncologic and functional outcomes, better quality of life, and lower costs. We performed a non-blinded validation of ex-vivo 7T MR to detect the tongue squamous cell carcinoma and resection margin in 10 fresh tongue specimens using histopathology as gold standard. In six of seven specimens with a histopathologically determined invasion depth of the tumor of [Formula: see text] mm, the tumor could be recognized on MR, with a resection margin within a 2 mm range as compared to histopathology. In three specimens with an invasion depth of [Formula: see text] mm, the tumor was not visible on MR. Technical limitations mainly included scan time, image resolution, and the fact that we used a less available small-bore 7T MR machine. Ex-vivo 7T probably will have a low negative predictive value but a high positive predictive value, meaning that in tumors thicker than a few millimeters we expect to be able to predict whether the resection margin is too small. A randomized controlled trial needs to be performed to show our hypothesis: better oncologic and functional outcomes, better quality of life, and lower costs.}, + file = {:pdf/Stee17.pdf:PDF}, + journal = IJCARS, + month = {may}, + optnote = {RADIOLOGY}, + pmid = {28130702}, + year = {2017}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/174271}, + ss_id = {17e7607acf9b2aeda7b32e369df4a5ea2f903194}, + all_ss_ids = {['17e7607acf9b2aeda7b32e369df4a5ea2f903194']}, + gscites = {21}, +} + +@conference{Steg09, + author = {D. F. Stegeman and G. A. van Elswijk and W. J. M. van de Ven and B. U. Kleine}, + title = {The three frequencies: the alpha-motoneuron pool as transmitter of rhythmicities in cortico-spinal motor drive}, + booktitle = {International workshop and conference on human reflexes: wiring and firing of motoneurons}, + year = {2009}, + abstract = {In the study of corticospinal communication, the conceptual mechanism called communication through coherence (CTC, Fries, 2005) gets increasing support by experimental evidence. The concept comprises the assumption that neuronal groups communicate through coherent oscillatory activity in specific frequency bands roughly in the range between 10 - 80 Hz, divided in alpha-, the beta- and the gamma-band. Especially when corticospinal interaction gives support to the CTC principle, this is regarded as strong evidence (Van Elswijk, 2007). An interesting question for the corticospinal tract is how centrally evoked oscillatory phenomena are translated by the alpha-motoneuron pool into motoneuron activity. Thereby, the properties of the alpha-motoneuron, especially its long afterhyperpolarized (AHP) phase, are utmost relevant in signal transmission. A number of authors have already looked into such spinal transmission and motor neuron firing patterns (e.g. Farina et al., 2004; Meyers et al., 2007; Williams and Baker, 2009). Three different types frequencies or better frequency bands play a role in the discussion. First, there is a specific alpha-, beta-, or gamma-frequency band in which the corticospinal interaction apparently occurs (the 1st frequency). Since frequencies in these bands are in the order of the firing frequencies of motor neurons (the 2nd frequency), attempts are made to interpret the couplings in the 1st frequency directly from this 2nd one. We will elucidate that for a proper understanding these have to be separated conceptually. And then the frequency content of the involved electrophysiological signals (EEG, MEG, EMG; the 3rd frequency) plays a role. They lead for instance to a regular question whether and why the EMG signal should be rectified before a coherence analysis is done, a question hardly ever posed for the EEG signal. By using a model for corticospinal transmission (Kleine et al., 2001, Matthews. 1996), we investigated the effectiveness and the properties of oscillatory central drive transmission to the muscle, its relation with the firing patterns of the alpha-motoneurons, the mutual interaction of those firing patterns, and the consequences for especially the surface EMG signal. In this attempt, we clearly distinguish the three frequency bands involved from the beginning to avoid confusion.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Steg10, + author = {D. F. Stegeman and W. J. M. van de Ven and G. A. van Elswijk and R. Oostenveld and B. U. Kleine}, + title = {The alpha-motoneuron pool as transmitter of rhythmicities in cortical motor drive}, + journal = CLINNP, + year = {2010}, + volume = {121}, + pages = {1633--1642}, + doi = {10.1016/j.clinph.2010.03.052}, + abstract = {OBJECTIVE: Investigate the effectiveness and frequency dependence of central drive transmission via the alpha-motoneuron pool to the muscle. METHODS: We describe a model for the simulation of alpha-motoneuron firing and the EMG signal as response to central drive input. The transfer in the frequency domain is investigated. Coherence between stochastical central input and EMG is also evaluated. RESULTS: The transmission of central rhythmicities to the EMG signal relates to the spectral content of the latter. Coherence between central input to the alpha-motoneuron pool and the EMG signal is significant whereby the coupling strength hardly depends on the frequency in a range from 1 to 100 Hz. Common central input to pairs of alpha-motoneurons strongly increases the coherence levels. The often-used rectification of the EMG signal introduces a clear frequency dependence. CONCLUSIONS: Oscillatory phenomena are strongly transmitted via the alpha-motoneuron pool. The motoneuron firing frequencies do play a role in the transmission gain, but do not influence the coherence levels. Rectification of the EMG signal enhances the transmission gain, but lowers coherence and introduces a strong frequency dependency. We think that it should be avoided. SIGNIFICANCE: Our findings show that rhythmicities are translated into alpha-motoneuron activity without strong non-linearities.}, + file = {Steg10.pdf:pdf/Steg10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {10}, + pmid = {20434397}, + month = {10}, +} + +@conference{Stei14a, + author = {A. Steiner and F. Mhimbira and van den Hombergh, J. and P. Clowes and C. Mangu and van Ginneken, B. and Hoelscher, M. and K. Reither}, + title = {Computer-aided diagnosis of X-rays in a screening for pulmonary tuberculosis of a prison population in Tanzania}, + booktitle = {45th World Conference on Lung Health}, + year = {2014}, + abstract = {Background: Recent studies have shown that computer-aided diagnosis (CAD) of chest X-rays (CXR) allows the detection of pulmonary tuberculosis (TB) with a performance similar to that of clinical officers. This study investigated the performance of the new CAD4TB system in a Tanzanian prison. Design/Methods: Between August 2013 and June 2014, all inmates of the Ukonga Prison, Dar es Salaam, are screened for TB. X-rays are classified as "normal", "abnormal, not suggestive of TB", "abnormal, consistent with TB", and "abnormal, highly suggestive of active TB" by an assistant medical officer with two years of training in radiology (reader A) and processed by the software CAD4TB v3.07 (Diagnostic Image Analysis Group, Nijmegen University, The Netherlands). A subset of 517 consecutive images was re-evaluated by reader A, as well as by an independent clinical officer (reader B), and by a TB expert. The CXR findings of the TB expert, based on a case definition which included X-ray results either "consistent with TB" or "highly suggestive of active TB", determined the radiological reference that was used for CAD4TB and the two other readers. Results: On the 466 negative and 51 positive images, readers A and B performed with a sensitivity of 52.9% and 43.1%, and a specificity of 92.4% and 77.5%, respectively. CAD4TB performed significantly better than reader B, but not as good as reader A (AUC 0.751, e.g. sensitivity 52.9% and specificity 83.5% for a defined threshold). The intra-reader agreement of reader A was good (Cohen's i:31.80), but the agreement between A and B was low (13.36). Although all readers, including CAD4TB, analyzed every CXR in under two minutes, human readers delayed 10% of images by more than 20 hours during the screening. Conclusion: This first evaluation of CAD4TB in a real-world screening situation showed, in line with previous studies, that CAD4TB performs better than a clinical officer (B), but not as good as a more experienced X-ray reader (A). Moreover, the overall time to detection is kept predictably short, which is an important criterion in multistage screening algorithms.}, + optnote = {DIAG}, +} + +@article{Stei15, + author = {Steiner, A. and Mangu, C. and {van den Hombergh}, J. and {van Deutekom}, H. and {van Ginneken}, B. and Clowes, P. and Mhimbira, F. and Mfinanga, S. and Rachow, A. and Reither, K. and Hoelscher, M.}, + title = {Screening for pulmonary tuberculosis in a {T}anzanian prison and computer-aided interpretation of chest {X}-rays}, + journal = PHA, + year = {2015}, + volume = {5}, + number = {4}, + month = {12}, + pages = {249--254}, + doi = {10.5588/pha.15.0037}, + url = {http://dx.doi.org/10.5588/pha.15.0037}, + abstract = {Tanzania is a high-burden country for tuberculosis (TB), and prisoners are a high-risk group that should be screened actively, as recommended by the World Health Organization. Screening algorithms, starting with chest X-rays (CXRs), can detect asymptomatic cases, but depend on experienced readers, who are scarce in the penitentiary setting. Recent studies with patients seeking health care for TB-related symptoms showed good diagnostic performance of the computer software CAD4TB.To assess the potential of computer-assisted screening using CAD4TB in a predominantly asymptomatic prison population.Cross-sectional study.CAD4TB and seven health care professionals reading CXRs in local tuberculosis wards evaluated a set of 511 CXRs from the Ukonga prison in Dar es Salaam. Performance was compared using a radiological reference. Two readers performed significantly better than CAD4TB, three were comparable, and two performed significantly worse (area under the curve 0.75 in receiver operating characteristics analysis). On a superset of 1321 CXRs, CAD4TB successfully interpreted >99\%, with a predictably short time to detection, while 160 (12.2\%) reports were delayed by over 24 h with conventional CXR reading.CAD4TB reliably evaluates CXRs from a mostly asymptomatic prison population, with a diagnostic performance inferior to that of expert readers but comparable to local readers.Abstract available from the publisher.Abstract available from the publisher.}, + file = {Stei15.pdf:pdf\\Stei15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26767179}, + gsid = {16224410692512843779}, + gscites = {15}, + ss_id = {f00068fa78dee1aba549dda6d6dca018287bae61}, + all_ss_ids = {['f00068fa78dee1aba549dda6d6dca018287bae61']}, +} + +@article{Stoe08, + author = {B. C. Stoel and D. G. Parr and E. M. Bakker and H. Putter and J. Stolk and H. A. Gietema and A. M. Schilham and B. van Ginneken and R. J. van Klaveren and J. W. J. Lammers and M. Prokop}, + title = {Can the extent of low-attenuation areas on {CT} scans really demonstrate changes in the severity of emphysema?}, + journal = Radiology, + year = {2008}, + volume = {247}, + pages = {293--4; author reply 294}, + doi = {10.1148/radiol.2471071608}, + file = {Stoe08.pdf:pdf\\Stoe08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {18372475}, + month = {4}, + gsid = {8941581572812805403}, + gscites = {11}, + ss_id = {399a236e0f31ec8a941285aae48be979bcd875ee}, + all_ss_ids = {['399a236e0f31ec8a941285aae48be979bcd875ee']}, +} + +@mastersthesis{Stoe19, + author = {Emiel Stoelinga}, + title = {Extracting biomarkers from hematoxylin-eosin stained histopathological images of lung cancer}, + abstract = {In this thesis the technique of deep learning was applied to the field of digital pathology, more specifically lung cancer, to extract several different biomarkers. Tertairy lymphoid structures (TLS) have been found to indicate a positive patient prognosis, especially in combination with germinal centers (GC). Therefore, a VGG16-like network was trained to detect TLS and GC in histopathological slides of lung squamous cell carcinoma with F1 scores on the pixel level of 0.922 and 0.802 respectively. Performance on a different held-out test set on the object level was 0.640 and 0.500 for TLS and GC respectively. + Treatment differs per growth pattern of lung adenocarcinoma and variability between pathol ogists in the assessment of lung adenocarcinoma exists. Therefore, a similar VGG16-like network was trained to segment growth patterns of adenocarcinoma in slides of lung tissue with F1 scores on the pixel level of 0.891, 0.524, 0.812 and 0.954 for solid adenocarcinoma, acinar adenocarcinoma, micropapillary adenocarcinoma and non-tumor tissue respectively. + Because the previous system was only trained on sparsely annotated data and consequently did not encounter neighbouring growth patterns of lung adenocarcinoma, a method with genera tive adversarial networks to generate fake densely annotated realistic looking image patches from sparsely annotated data was examined and a comparison between three types of models was made.}, + file = {Stoe19.pdf:pdf\\Stoe19.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University}, + year = {2019}, + journal = {Master thesis}, +} + +@conference{Stoi17a, + author = {Laura Stoilescu and Marnix C. Maas and Henkjan Huisman}, + title = {Feasibility of multireference tissue normalization of T2-weighted prostate {MRI}}, + booktitle = {European Society for Magnetic Resonance in Medicine and Biology}, + year = {2017}, + abstract = {Purpose / Introduction + Prostate MRI finds 18% more clinically significant prostate cancer while avoiding 27% biopsies [1]. Reproducibility for multi-parametric T2+DWI+DCE + prostate MRI mpMRI is moderate [2] even though a PIRADS reading standard is available [3]. Quantification could help improve reproducibility, which to + some extent works for ADC. Scanner provided T2 maps are no solution as it leads to a different texture, lower spatial resolution and increased scan time. + We have previously developed a method for normalizing T2-weighted images [4]. The normalized value achieved a diagnostic accuracy AUC of 0.85 + over 0.64 for the raw T2-weighted values. That method required a separate proton density weighted sequence, an exact knowledge of the sequence + model and one reference tissue region. We propose a new method using multiple reference tissues that does not require an additional sequence, nor + detailed knowledge about the sequence model. The recent development of deep learning makes it feasible to segment multiple reference tissues. The + hypothesis is that the reference tissues allow building a patient specific model to normalize the T2-weighted prostate MR images for quantitative use. + Subjects and Methods + To test the hypothesis we manually delineated reference tissues and tumor lesions in mpMRI studies of prostate cancer patients. All lesions were + interpreted by expert radiologists and assigned a PIRADS score. The normalized T2 was then validated for its ability to discriminate PIRADS 2-3 from 4-5 + classes. Regions of interest ROI were drawn in four distinct tissue types in fifty T2-weighted images from regular multiparametric prostate MRI mpMRI. + The four reference tissue types were: obturator internus muscle, body fat, femoral head, bladder lumen. Four average ROI signals were computed per + patient. Each reference tissue was assigned a fixed reference value T2 relaxation found in literature. Per patient, a smooth spline model was fitted to the + average, reference pairs. The estimated spline model was then inverted to map patients' raw T2-weighted image scalar values to normalized values. The + effect of the normalization was determined by computing and comparing the diagnostic accuracy using ROC analysis. + Results + The area under the ROC AUC was significantly higher p<0.05 in normalized T2.5/22/2017 #542: Feasibility of multireference tissue normalization of T2weighted prostate MRI. + + Discussion / Conclusion + The significant improvement of the diagnostic accuracy demonstrates the potential of our normalization method for the quantitative interpretation of T2-weighted prostate MRI. The results were similar to our previous method.The method still requires manual delineation of multiple reference tissues, + however, we will develop deep learning segmentation methods to automate the method and enable regular clinical use. + References + 1. Ahmed, Hashim U., et al. "Diagnostic accuracy of multi?parametric MRI and TRUS biopsy in prostate cancer PROMIS: a paired validating confirmatory + study." The Lancet 389.10071 2017: 815?822. + 2. Rosenkrantz, Andrew B., et al. "Interobserver reproducibility of the PI?RADS version 2 lexicon: a multicenter study of six experienced prostate + radiologists." Radiology 280.3 2016: 793?804. + 3. Barentsz JO, et al. Synopsis of the PI?RADS v2 Guidelines for Multiparametric Prostate Magnetic Resonance Imaging and Recommendations for Use. + Eur. Urol. 2016;691:41-49. + 4. Vos, Pieter C., et al. "Computer?assisted analysis of peripheral zone prostate lesions using T2?weighted and dynamic contrast?enhanced T1?weighted + MRI." Physics Med. & Biol. 55.6 2010: 1719}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Stoi17b, + author = {Laura Stoilescu and Henkjan Huisman}, + title = {Feasibility of multireferencetissue normalization of T2weighted prostate {MRI}}, + booktitle = RSNA, + year = {2017}, + abstract = {PURPOSE + To explore a novel multireferencetissue normalization method applied to t2weighted prostate MRI. + METHOD AND MATERIALS + Assuming the availability of a set of distinct reference tissue segmentations, the hypothesis is that it allows computing a patient specific sequence model + that can normalize MRI. The normalization should produce similar scalar values in the same reference regions for different patients/scanners/sequences + and interpolate in between reference values for other tissue areas. Regions of interest (ROI) were drawn in four distinct tissue types in a cohort of sixtyfive t2weighted images from regular multiparametric prostate MRI (mpMRI). The four reference tissue types were: skeletal muscle, body fat, femur + head, bladder lumen. Four average ROI signals were computed per patient. Each reference tissue was assigned a fixed reference value (t2 relaxation + found in literature). Per patient, a smooth sequence model was fitted to the (average, reference) pairs. The estimated sequence model was then + inverted to map patients' raw t2weighted image scalar values to normalized values. To test the method, the effect of normalization on observed + variance and tissue discriminability was analyzed. A leaveoneout experiment was performed in which for each ROI its normalized value was computed + using the sequence model estimate using the three remaining reference ROIs. The difference between original t2weighted and normalized scalar MRI + was analyzed by means of variability and ROC analysis. + RESULTS + Multireferencetissue normalization significantly (p<0.05) decreased variability and increased the area under the ROC curve for discriminating each + reference tissue combination. The ROC curves in the figure show the effect of the normalization (T2n) on the discrimination between body fat and + femur head tissue. + CONCLUSION + Semiautomatic multireferencetissue normalization shows reduced interpatient variability and may allow better quantitative discrimination between + tissue types. + CLINICAL RELEVANCE/APPLICATION + Multireferencetissue t2weighted MRI normalization seems feasible. In combination with automatic segmentation, this could be providing clinical + quantitative imaging support to mpMRI diagnosis of prostate cancer. This result motivates us to continue to explore the ability of this novel method to + help detect and discriminate prostate cancer in mpMR}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Stou01, + author = {M. J. Stoutjesdijk and J. O. Barentsz}, + title = {Prophylactic mastectomy in carriers of BRCA mutations}, + journal = NEJM, + year = {2001}, + volume = {345}, + pages = {1499; author reply 1499--1499.}, + doi = {10.1056/NEJM200111153452014}, + file = {Stou01.pdf:pdf\\Stou01.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {20}, + pmid = {11794206}, + month = {11}, +} + +@article{Stou01a, + author = {M. J. Stoutjesdijk and C. Boetes and G. J. Jager and L. Beex and P. Bult and J. H. Hendriks and R. J. Laheij and L. Massuger and L. E. van Die and T. Wobbes and J. O. Barentsz}, + title = {Magnetic resonance imaging and mammography in women with a hereditary risk of breast cancer}, + journal = JNCI, + year = {2001}, + volume = {93}, + pages = {1095--1102}, + abstract = {Although breast cancer screening is recommended to start at a younger age for women with a hereditary risk of breast cancer, the sensitivity of mammography for these women is reduced. We compared magnetic resonance imaging (MRI) with mammography to determine which is more sensitive and whether MRI could play a role in the early detection of breast cancer for these women.We constructed a retrospective cohort of all breast MRI and mammography surveillance reports made in our department from November 1994 to February 2001. All of the 179 women in the cohort had received biannual palpation in addition to annual imaging by MRI, mammography, or both. The 258 MRI images and the 262 mammograms were classified with the use of the BI-RADS (i.e., Breast Imaging Reporting and Data System) scoring system, which has five categories to indicate the level of suspicion of a lesion. Receiver operator characteristic curves were generated for MRI and mammography, and the area under each curve (AUC) was assessed for the entire cohort of 179 women and for a subset of 75 women who had received both an MRI and a mammographic examination within a 4-month period. All statistical tests were two-sided.In the cohort of 179 women, we detected 13 breast cancers. Seven cancers were not revealed by mammography, but all were detected by MRI. For the entire cohort, the AUC for mammography was 0.74 (95\% confidence interval [CI] = 0.68 to 0.79), and the AUC for MRI was 0.99 (95\% CI = 0.98 to 1.0). For the subset of women who had both examinations, the AUC for mammography was 0.70 (95\% CI = 0.60 to 0.80), and the AUC for MRI was 0.98 (95\% CI = 0.95 to 1.0).MRI was more accurate than mammography in annual breast cancer surveillance of women with a hereditary risk of breast cancer. Larger prospective studies to examine the role of MRI in screening programs are justified.}, + file = {Stou01a.pdf:pdf\\Stou01a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {14}, + pmid = {11459871}, + month = {7}, +} + +@article{Stou05, + author = {Mark J Stoutjesdijk and Jurgen J F\"utterer and Carla Boetes and Lya E van Die and Gerrit Jager and Jelle O Barentsz}, + title = {Variability in the description of morphologic and contrast enhancement characteristics of breast lesions on magnetic resonance imaging}, + journal = IR, + year = {2005}, + volume = {40}, + pages = {355--362}, + abstract = {The objective of this study was to evaluate the interobserver variability in reporting descriptive kinetic and morphologic enhancement features at breast magnetic resonance imaging.Four observers evaluated 103 lesions, 49 malignant and 54 benign, proven by histopathology. They used standardized terminology with the following characteristics: "early enhancement kinetics" and "late enhancement kinetics" in curves from both reader-determined and preset regions of interest (ROIs), "enhancement pattern," "shape," "margin," "internal enhancement," and a final assessment score. Agreement was calculated using the kappa statistic. Differences in agreement were calculated using Fisher exact test.kappa was 0.27 for both early and late enhancement; preset ROIs improved kappa to 0.47 and 0.67, respectively (odds ratios, 1.7 and 4.5). kappa was 0.45 for pattern, 0.42 for shape, 0.26 for margin, 0.25 for internal enhancement, and 0.28 for final assessment.There was considerable variability in the use of most generally accepted terms. The preparation of ROIs was a major source of variability in the interpretation of enhancement curves.}, + file = {Stou05.pdf:pdf\\Stou05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {15905722}, +} + +@article{Stou07, + author = {M. J. Stoutjesdijk and J. Veltman and H. Huisman and N. Karssemeijer and J. O. Barentsz and J. G. Blickman and C. Boetes}, + title = {Automated analysis of contrast enhancement in breast {MRI} lesions using mean shift clustering for {ROI} selection}, + journal = JMRI, + year = {2007}, + volume = {26}, + pages = {606--614}, + doi = {10.1002/jmri.21026}, + abstract = {{PURPOSE}: {T}o evaluate a new method for automated determination of a region of interest ({ROI}) for the analysis of contrast enhancement in breast {MRI}. {MATERIALS} {AND} {METHODS}: {M}ean shift multidimensional clustering ({MS}-{MDC}) was employed to divide 92 lesions into several spatially contiguous clusters each, based on multiple enhancement parameters. {T}he {ROI}s were defined as the clusters with the highest probability of malignancy. {T}he performance of enhancement analysis within these {ROI}s was estimated using the area under the receiver operator characteristic curve ({AUC}), and compared against a radiologist's final assessment and a classifier using histogram analysis ({HA}). {F}or {HA}, the first, second, and third quartiles were evaluated. {RESULTS}: {MS}-{MDC} resulted in {AUC} = 0.88 with a 95\% confidence interval ({CI}) of 0.81-0.95. {T}he {AUC} for the radiologist's assessment was 0.93 (95\%{CI} = 0.87-0.97). {B}est {HA} performance was found using the first quartile, with {AUC} = 0.79 (95\%{CI} = 0.69-0.88). {T}here was no significant difference between {MS}-{MDC} and the radiologist ({P} = 0.40). {T}he improvement of {MS}-{MDC} over {HA} was significant ({P} = 0.018). {CONCLUSION}: {M}ean shift clustering followed by automated selection of the most suspicious cluster resulted in accurate {ROI}s in breast {MRI} lesions.}, + file = {Stou07.pdf:pdf\\Stou07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {17729367}, + gsid = {14001259981424088431}, + gscites = {41}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/52499}, + ss_id = {35b0e9227e6a3141518aa6791d233272d003ce3a}, + all_ss_ids = {['35b0e9227e6a3141518aa6791d233272d003ce3a']}, +} + +@phdthesis{Stou11, + author = {M. J. Stoutjesdijk}, + title = {Automated analysis of contrast enhancement in magnetic resonance imaging of the breast}, + year = {2011}, + url = {http://repository.ubn.ru.nl/handle/2066/95664}, + abstract = {This work covers research on the application of breast MRI and on computer aided diagnosis (CAD), aimed at improving the accuracy of the imaging interpretation. First, a study is described that was designed to answer whether magnetic resonance imaging (MRI) can be used to periodically examine women with a hereditary increased risk of breast cancer. MRI appeared more accurate than conventional mammography, when used in annual breast cancer surveillance of women with an increased hereditary risk of breast cancer. The thesis then describes the observer variability in reporting lesion features, including an approach to improve it. Surprisingly, a considerable variability was found in the use of most generally accepted terms. The preparation of a region of interest for analysis of contrast enhancement turned out to be a major source of variability in the interpretation of enhancement curves. Technological advances made it possible to run both very fast imaging series and slower, high-definition series, within the same breast MRI study. Such a hybrid set of series was used in a clinical study performed by us to determine if slow, fast or a combination of series would result in the best diagnostic performance. We concluded that a combination of series yielded the best results. Next is the description of a method for automated analysis of contrast enhancement in breast MRI (mean shift clustering followed by iterative selection of the region of interest) . Finally, an improvement of our CAD method is described. This thesis describes that: 1) Breast MRI can be used for periodical surveillance of women with hereditary increased risk of breast cancer; 2) Breast MRI suffers from limited specificity of breast MRI, partly caused by the inter-observer variability in the placement of the region of interest for pharmacokinetic analysis; 3) Our CAD method is a feasible technique to automatically place the region of interest and to obtain a probability of malignancy, especially if expanded with pharmacokinetic modeling.}, + copromotor = {H. J. Huisman}, + file = {Stou11.pdf:pdf\\Stou11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {J. O. Barentsz, N. Karssemeijer and C. Boetes}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Stou12, + author = {Stoutjesdijk, Mark J. and Zijp, Miranda and Boetes, Carla and Karssemeijer, Nico and Barentsz, Jelle O. and Huisman, Henkjan}, + title = {Computer aided analysis of breast {MRI} enhancement kinetics using mean shift c lustering and multifeature iterative region of interest selection}, + journal = JMRI, + year = {2012}, + volume = {36}, + pages = {1104-1112}, + doi = {10.1002/jmri.23746}, + abstract = {PURPOSE: To evaluate automatic characterization of a breast MR lesion by its spatially coherent region of interest (ROI). MATERIALS AND METHODS: The method delineated 247 enhancing lesions using Otsu thresholding after manually placing a sphere. Mean Shift Clustering subdivided each volume, based on features including pharmacokinetic parameters. An iteratively trained classifier to predict the most suspicious ROI (IsR) was used, to predict the malignancy likelihood of each lesion. Performance was evaluated using receiver operator characteristic (ROC) analysis, and compared with a previous prototype. IsR was compared with noniterative training. The effect of adding BI-RADSA-A?A 1/2 morphology (from a radiologist) to the classifier was investigated. RESULTS: The area under the ROC curve (AUC) was 0.83 (95\% confidence interval [CI] of 0.77-0.88), and was 0.75 (95\%CI = 0.68-0.81; P = 0.029) without pharmacokinetic features. IsR performed better than conventional selection, based on one feature (AUC 0.75, 95\%CI = 0.68-0.81; P = 0.035). With morphology, the AUC was 0.84 (95\%CI = 0.78-0.88) versus 0.82 without (P = 0.40). CONCLUSION: Breast lesions can be characterized by their most suspicious, contiguous ROI using multi-feature clustering and iterative training. Characterization was improved by including pharmacokinetic modeling, while in our experiments, including morphology did not improve characterization. J. Magn. Reson. Imaging 2012;. A-A?A 1/2 2012 Wiley Periodicals, Inc.}, + file = {Stou12.pdf:pdf\\Stou12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {22786883}, + month = {7}, + gsid = {3993348269768594312}, + gscites = {8}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/110666}, + ss_id = {e556afa60fd6588be8120f74d3a270930cb9869f}, + all_ss_ids = {['e556afa60fd6588be8120f74d3a270930cb9869f']}, +} + +@book{Stoy18, + author = {Danail Stoyanov and Zeike Taylor and Bernhard Kainz and Gabriel Maicas and Reinhard Beichel and Anne Martel and Lena Maier-Hein and Bhatia Kanwal and Tom Vercauteren and Oktay Ozan and Gustavo Carneiro and Andrew P. Bradley and Jacinto Nascimento and Hang Min and Matthew S. Brown and Colin Jacobs and Bianca Lassen-Schmidt and Kensaku Mori and Jens Petersen and Raul San Jose Estepar and Alexander Schmidt-Richberg and Catarina Veiga}, + title = {Image Analysis for Moving Organ, Breast and Thoracic Images}, + year = {2018}, + volume = {11040}, + series = LNCS, + publisher = {Springer}, + doi = {10.1007/978-3-030-00946-5}, + url = {https://www.springer.com/gp/book/9783030009458}, + abstract = {This book constitutes the refereed joint proceedings of the Third International Workshop on Reconstruction and Analysis of Moving Body Organs, RAMBO 2018, the Fourth International Workshop on Breast Image Analysis, BIA 2018, and the First International Workshop on Thoracic Image Analysis, TIA 2018, held in conjunction with the 21st International Conference on Medical Imaging and Computer-Assisted Intervention, MICCAI 2018, in Granada, Spain, in September 2018.}, + file = {Stoy18.pdf:pdf/Stoy18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Stud20, + author = {L. Studer and J. Bokhorst and I. Zlobec and A. Lugli and A. Fischer and F. Ciompi and J. van der Laak and I. Nagtegaal and H. Dawson}, + title = {Validation of computer-assisted tumour-bud and T-cell detection in pT1 colorectal cancer}, + journal = {European Congress of pathology}, + year = {2020}, + optnote = {DIAG, RADIOLOGY}, + abstract = {Background & objectives: Tumour budding, and T-cells are robust prognostic biomarkers in colorectal cancer. A combined analysis is complex and can be greatly expedited and automated using deep learning. The implementation of computer-based analysis in diagnostics is challenging and necessitates extensive validation. + + Methods: Randomly selected (n=61) double-stained immunohistochemical slides (AE1-AE3 pancytokeratin for tumour buds and CD8 for cytotoxic T-cells) from our pT1 cohort from 3 different institutions were used to validate the deep learning algorithms for tumour budding and CD8 T-cell detection developed by the International Budding Consortium Computational Pathology Group. Staining and scanning were performed in a single laboratory. + + Results: In the visually identified tumour budding hotspot (0.785 mm2), tumour buds were manually annotated, and the output of the T-cell algorithm manually corrected by a single observer. For budding, 645 out of the 1'306 buds were correctly identified by the algorithm. Recall and precision were 49.4% and 61.4%, respectively. For the T-cells, 89.3% were correctly detected (from a total of 16'296). The recall was 90.3% and the precision was 87.3%. Reasons for misclassified T-cells included staining intensity, suboptimal tissue recognition and slide artifacts. + + Conclusion: Our preliminary data demonstrates satisfactory results for T-cell detection. Automated budding detection is more difficult, as inter-observer variability of bud calling is high among experts. These issues merit consideration when developing reliable deep learning algorithms examining the tumour/host interface.}, +} + +@inproceedings{Stud22, + author = {Linda Studer and John-Melle Bokhorst and Francesco Ciompi and Andreas Fischer and Heather Dawson}, + title = {Building-T-cell score is a potential predictor for more aggressive treatment in pT1 colorectal cancers}, + journal = {Proceedings of the ECDP 2022 18th European Congress on Digital Pathology}, + year = {2022}, + optnote = {DIAG, RADIOLOGY}, + file = {Stud22.pdf:pdf/Stud22.pdf:PDF}, + abstract = {Introduction + As pT1 colorectal cancers (CRC) tend to be overtreated, we investigate the previously proposed BTS (budding-T-cell-score = (#tumor-buds+1)/(#T-cells+1)) as a predictive marker to assess patients' need for resection. BTS was shown to be a better predictor of survival and other clinical factors than individual scoring. + + Materials and Methods + We consider hotspots annotated by a pathologist according to the ITBCC guidelines on double-stained (AE1-AE3 pan-cytokeratin and CD8+) WSI from our pT1 CRC cohort (N=573). Within hotspots, tumor-buds and T-cells are automatically detected using convolutional neural networks and counted. The patients are divided into two groups based on their need for resection (no: N0 / follow-up without recurrence; yes: N1 / follow-up with recurrence). The dataset is imbalanced (89.2%/10.8%). To predict the patient group, we train a support-vector machine with data-balancing using the tumor-buds or T-cell counts individually, together, and just the BTS. We report the weighted accuracy, and sensitivity and specificity for the "yes" group. + + Results + The highest weighted accuracy (62.8Tu 6.5%) and precision (17.6Tu 3.7%) are achieved using the tumor-buds count. Using the BTS achieves a sensitivity of 98.3Tu 2.9%, which outperforms the other models by more than 30%. + + Conclusion + We show that combined assessment of tumor-buds and T-cells has the potential to serve as a predictive marker for the need of resection in pT1 cancers. However, there is still much room for improvement, as the low specificity still leads to overtreatment. We aim to address this in future work by also considering the spatial relationship of tumor-buds and T-cells and other predictive factors of nodal metastasis.}, +} + +@article{Stur19, + author = {Sturm, Bart and Creytens, David and Cook, Martin G and Smits, Jan and van Dijk, Marcory C R F and Eijken, Erik and Kurpershoek, Eline and Kusters-Vandevelde, Heidi V N and Ooms, Ariadne H A G and Wauters, Carla and Blokx, Willeke A M and van der Laak, Jeroen A W M}, + title = {Validation of Whole-slide Digitally Imaged Melanocytic Lesions: Does Z-Stack Scanning Improve Diagnostic Accuracy?}, + journal = JPATINF, + year = {2019}, + volume = {10}, + pages = {6}, + doi = {10.4103/jpi.jpi_46_18}, + url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6415522/}, + abstract = {Accurate diagnosis of melanocytic lesions is challenging, even for expert pathologists. Nowadays, whole-slide imaging (WSI) is used for routine clinical pathology diagnosis in several laboratories. One of the limitations of WSI, as it is most often used, is the lack of a multiplanar focusing option. In this study, we aim to establish the diagnostic accuracy of WSI for melanocytic lesions and investigate the potential accuracy increase of z-stack scanning. Z-stack enables pathologists to use a software focus adjustment, comparable to the fine-focus knob of a conventional light microscope. Melanocytic lesions ( = 102) were selected from our pathology archives: 35 nevi, 5 spitzoid tumors of unknown malignant potential, and 62 malignant melanomas, including 10 nevoid melanomas. All slides were scanned at a magnification comparable to use of a x40 objective, in z-stack mode. A ground truth diagnosis was established on the glass slides by four academic dermatopathologists with a special interest in the diagnosis of melanoma. Six nonacademic surgical pathologists subspecialized in dermatopathology examined the cases by WSI. An expert consensus diagnosis was achieved in 99 (97%) of cases. Concordance rates between surgical pathologists and the ground truth varied between 75% and 90%, excluding nevoid melanoma cases. Concordance rates of nevoid melanoma varied between 10% and 80%. Pathologists used the software focusing option in 7%-28% of cases, which in 1 case of nevoid melanoma resulted in correcting a misdiagnosis after finding a dermal mitosis. Diagnostic accuracy of melanocytic lesions based on glass slides and WSI is comparable with previous publications. A large variability in diagnostic accuracy of nevoid melanoma does exist. Our results show that z-stack scanning, in general, does not increase the diagnostic accuracy of melanocytic.}, + file = {Stur19.pdf:pdf\\Stur19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30972225}, + gsid = {13384271727437089166}, + gscites = {4}, + ss_id = {9ff29e9553f0d93ef599c5afa90f311cc907d507}, + all_ss_ids = {['9ff29e9553f0d93ef599c5afa90f311cc907d507']}, +} + +@article{Stög17, + author = {St\"{o}ger, Lauran and Schaefer-Prokop, Cornelia and Geurts, Bram H.J.}, + title = {Imaging of nontraumatic thoracic emergencies}, + doi = {10.1097/mcp.0000000000000355}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1097/MCP.0000000000000355}, + file = {Stog17.pdf:pdf\Stog17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Current Opinion in Pulmonary Medicine}, + citation-count = {0}, + automatic = {yes}, + pages = {184-192}, + volume = {23}, +} + +@article{Sunoqrot22a, + author = {Mohammed R. S. Sunoqrot and Anindo Saha and Matin Hosseinzadeh and Mattijs Elschot and Henkjan Huisman}, + title = {Artificial Intelligence for Prostate MRI: Open Datasets, Available Applications, and Grand Challenges}, + journal = {European Radiology Experimental}, + pages = {35}, + year = {2022}, + doi = {10.1186/s41747-022-00288-8}, + url = {https://eurradiolexp.springeropen.com/articles/10.1186/s41747-022-00288-8}, + abstract = {Artificial intelligence (AI) for prostate magnetic resonance imaging (MRI) is starting to play a clinical role for prostate cancer (PCa) patients. AI-assisted reading is feasible, allowing workflow reduction. A total of 3,369 multi-vendor prostate MRI cases are available in open datasets, acquired from 2003 to 2021 in Europe or USA at 3 T (n = 3,018; 89.6%) or 1.5 T (n = 296; 8.8%), 346 cases scanned with endorectal coil (10.3%), 3,023 (89.7%) with phased-array surface coils; 412 collected for anatomical segmentation tasks, 3,096 for PCa detection/classification; for 2,240 cases lesions delineation is available and 56 cases have matching histopathologic images; for 2,620 cases the PSA level is provided; the total size of all open datasets amounts to approximately 253 GB. Of note, quality of annotations provided per dataset highly differ and attention must be paid when using these datasets (e.g., data overlap). Seven grand challenges and commercial applications from eleven vendors are here considered. Few small studies provided prospective validation. More work is needed, in particular validation on large-scale multi-institutional, well-curated public datasets to test general applicability. Moreover, AI needs to be explored for clinical stages other than detection/characterization (e.g., follow-up, prognosis, interventions, and focal treatment).}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {8c739636acd9cc63a1290e73bb34cf49b605d60d}, + all_ss_ids = {['8c739636acd9cc63a1290e73bb34cf49b605d60d']}, + gscites = {17}, +} + +@article{Suzu17, + author = {Suzuki, Yuriko and Fujima, Noriyuki and Ogino, Tetsuo and Meakin, James Alastair and Suwa, Akira and Sugimori, Hiroyuki and Van Cauteren, Marc and van Osch, Matthias J. P.}, + title = {Acceleration of ASL-based time-resolved MR angiography by acquisition of control and labeled images in the same shot (ACTRESS)}, + doi = {10.1002/mrm.26667}, + year = {2017}, + abstract = {PurposeNoncontrast 4D-MR-angiography (MRA) using arterial spin labeling (ASL) is beneficial because high spatial and temporal resolution can be achieved. However, ASL requires acquisition of labeled and control images for each phase. The purpose of this study is to present a new accelerated 4D-MRA approach that requires only a single control acquisition, achieving similar image quality in approximately half the scan time.MethodsIn a multi-phase Look-Locker sequence, the first phase was used as the control image and the labeling pulse was applied before the second phase. By acquiring the control and labeled images within a single Look-Locker cycle, 4D-MRA was generated in nearly half the scan time of conventional ASL. However, this approach potentially could be more sensitive to off-resonance and magnetization transfer (MT) effects. To counter this, careful optimizations of the labeling pulse were performed by Bloch simulations. In in-vivo studies arterial visualization was compared between the new and conventional ASL approaches.ResultsOptimization of the labeling pulse successfully minimized off-resonance effects. Qualitative assessment showed that residual MT effects did not degrade visualization of the peripheral arteries.ConclusionThis study demonstrated that the proposed approach achieved similar image quality as conventional ASL-MRA approaches in just over half the scan time. Magn Reson Med 79:224-233, 2018. (c) 2017 The Authors Magnetic Resonance in Medicine published by Wiley Periodicals, Inc. on behalf of International Society for Magnetic Resonance in Medicine. This is an open access article under the terms of the Creative Commons Attribution NonCommercial License, which permits use, distribution and reproduction in any medium, provided the original work is properly cited and is not used for commercial purposes.}, + url = {http://dx.doi.org/10.1002/mrm.26667}, + file = {Suzu17.pdf:pdf\Suzu17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Magnetic Resonance in Medicine}, + citation-count = {7}, + automatic = {yes}, + pages = {224-233}, + volume = {79}, +} + +@inproceedings{Swid18, + author = {Swiderska-Chadaj, Zaneta and Pinckaers, Hans and van Rijthoven, Mart and Balkenhol, Maschenka and Melnikova, Margarita and Geessink, Oscar and Manson, Quirine and Litjens, Geert and van der Laak, Jeroen and Ciompi, Francesco}, + title = {Convolutional Neural Networks for Lymphocyte detection in Immunohistochemically Stained Whole-Slide Images}, + booktitle = MIDL, + year = {2018}, + url = {https://openreview.net/forum?id=rk0xLisiM}, + abstract = {Recent advances in cancer immunotherapy have boosted the interest in the role played by the immune system in cancer treatment. In particular, the presence of tumor-infiltrating lymphocytes (TILs) have become a central research topic in oncology and pathology. Consequently, a method to automatically detect and quantify immune cells is of great interest. In this paper, we present a comparison of different deep learning (DL) techniques for the detection of lymphocytes in immunohistochemically stained (CD3 and CD8) slides of breast, prostate and colon cancer. The compared methods cover the state-of-the-art in object localization, classification and segmentation: Locality Sensitive Method (LSM), U-net, You Only Look Once (YOLO) and fully-convolutional networks (FCNN). A dataset with 109,841 annotated cells from 58 whole-slide images was used for this study. Overall, U-net and YOLO achieved the highest results, with an F1-score of 0.78 in regular tissue areas. U-net approach was more robust to biological and staining variability and could also handle staining and tissue artifacts.}, + file = {Swid18.pdf:pdf\\Swid18.pdf:PDF}, + optnote = {DIAG}, + gsid = {196320442164707556}, + gscites = {12}, + ss_id = {1bbafc8586925a89fb6ebe0941a9cb7e95de452b}, + all_ss_ids = {['1bbafc8586925a89fb6ebe0941a9cb7e95de452b']}, +} + +@article{Swid19, + author = {Swiderska-Chadaj, Zaneta and Pinckaers, Hans and van Rijthoven, Mart and Balkenhol, Maschenka and Melnikova, Margarita and Geessink, Oscar and Manson, Quirine and Sherman, Mark and Polonia, Antonio and Parry, Jeremy and Abubakar, Mustapha and Litjens, Geert and van der Laak, Jeroen and Ciompi, Francesco}, + title = {Learning to detect lymphocytes in immunohistochemistry with deep learning}, + journal = MIA, + year = {2019}, + volume = {58}, + month = {8}, + pages = {101547}, + doi = {10.1016/j.media.2019.101547}, + url = {https://linkinghub.elsevier.com/retrieve/pii/S1361-8415(19)30082-9}, + abstract = {The immune system is of critical importance in the development of cancer. The evasion of destruction by the immune system is one of the emerging hallmarks of cancer. We have built a dataset of 171,166 manually annotated CD3 and CD8 cells, which we used to train deep learning algorithms for automatic detection of lymphocytes in histopathology images to better quantify immune response. Moreover, we investigate the effectiveness of four deep learning based methods when different subcompartments of the whole-slide image are considered: normal tissue areas, areas with immune cell clusters, and areas containing artifacts. We have compared the proposed methods in breast, colon and prostate cancer tissue slides collected from nine different medical centers. Finally, we report the results of an observer study on lymphocyte quantification, which involved four pathologists from different medical centers, and compare their performance with the automatic detection. The results give insights on the applicability of the proposed methods for clinical use. U-Net obtained the highest performance with an F1-score of 0.78 and the highest agreement with manual evaluation (k=0.72), whereas the average pathologists agreement with reference standard was k=0.64. The test set and the automatic evaluation procedure are publicly available at lyon19.grand-challenge.org.}, + file = {Swid19.pdf:pdf\\Swid19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31476576}, + gsid = {6264470711036776042}, + gscites = {92}, + ss_id = {ddf3d225ccaeb05a912051c554b38f722f365a1b}, + all_ss_ids = {['ddf3d225ccaeb05a912051c554b38f722f365a1b']}, +} + +@inproceedings{Swid20, + author = {Swiderska-Chadaj, Zaneta and Hebeda, Konnie and van den Brand, Michiel and Litjens, Geert}, + title = {Predicting MYC translocation in HE specimens of diffuse large B-cell lymphoma through deep learning}, + booktitle = MI, + volume = {11320}, + pages = {1132010}, + doi = {10.1117/12.2549650}, + series = {SPIE}, + abstract = {Diffuse large B-cell lymphoma (DLBCL) is the most common type of B-cell lymphoma. It is characterized by a heterogeneous morphology, genetic changes and clinical behavior. A small specific subgroup of DLBCL, harbouring a MYC gene translocation is associated with worse patient prognosis and outcome. Typically, the MYC translocation is assessed with a molecular test (FISH), that is expensive and time-consuming. Our hypothesis is that genetic changes, such as translocations could be visible as changes in the morphology of an HE-stained specimen. However, it has not proven possible to use morphological criteria for the detection of a MYC translocation in the diagnostic setting due to lack of specificity. + In this paper, we apply a deep learning model to automate detection of the MYC translocations in DLBCL based on HE-stained specimens. The proposed method works at the whole-slide level and was developed based on a multicenter data cohort of 91 patients. All specimens were stained with HE, and the MYC translocation was confirmed using fluorescence in situ hybridization (FISH). The system was evaluated on an additional 66 patients, and obtained AUROC of 0.83 and accuracy of 0.77. The proposed method presents proof of a concept giving insights in the applicability of deep learning methods for detection of a genetic changes in DLBCL. In future work we will evaluate our algorithm for automatic pre-screen of DLBCL specimens to obviate FISH analysis in a large number of patients.}, + file = {:pdf/Swid20.pdf:PDF}, + optnote = {DIAG}, + year = {2020}, + ss_id = {a9dc7e9f174dbd624d5a8294ca6dc9b671e1ae97}, + all_ss_ids = {['a9dc7e9f174dbd624d5a8294ca6dc9b671e1ae97']}, + gscites = {2}, +} + +@inproceedings{Swid20a, + author = {Swiderska-Chadaj, Zaneta and Nurzynska, Karolina and Bartlomiej, Grala and Grunberg, Katrien and van der Woude, Lieke and Looijen-Salamon, Monika and Walts, Ann E. and Markiewicz, Tomasz and Ciompi, Francesco and Gertych, Arkadiusz}, + title = {A deep learning approach to assess the predominant tumor growth pattern in whole-slide images of lung adenocarcinoma}, + booktitle = MI, + year = {2020}, + volume = {11320}, + series = SPIE, + month = {3}, + pages = {113200D}, + doi = {doi: 10.1117/12.2549742}, + abstract = {When diagnosing and reporting lung adenocarcinoma (LAC), pathologists currently include an assessment of histologic tumor growth patterns because the predominant growth pattern has been reported to impact prognosis. However, the subjective nature of manual slide evaluation contributes to suboptimal inter-pathologist variability in tumor growth pattern assessment. We applied a deep learning approach to identify and automatically delineate areas of four tumor growth patterns (solid, acinar, micropapillary, and cribriform) and non-tumor areas in whole-slide images (WSI) from resected LAC specimens. We trained a DenseNet model using patches from 109 slides collected at two institutions. The model was tested using 56 WSIs including 20 that were collected at a third institution. Using the same slide set, the concordance between the DenseNet model and an experienced pathologist (blinded to the DenseNet results) in determining the predominant tumor growth pattern was substantial(kappa score = 0.603). Using a subset of 36 test slides that were manually annotated for tumor growth patterns, we also measured the F1-score for each growth pattern: 0.95 (solid), 0.78 (acinar), 0.76 (micropapillary), 0.28 (cribriform) and 0.97 (non-tumor). Our results suggest that DenseNet assessment of WSIs with solid, acinar, and micropapillary predominant tumor growth is more robust than for the WSIs with predominant cribriform growth which are less frequently encountered.}, + file = {Swid20a.pdf:pdf\\Swid20a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {424bfbd0fcda2f8e5985acf173a5111af22f553d}, + all_ss_ids = {['424bfbd0fcda2f8e5985acf173a5111af22f553d']}, + gscites = {4}, +} + +@article{Swid20b, + author = {Swiderska-Chadaj, Zaneta and de Bel, Thomas and Blanchet, Lionel and Baidoshvili, Alexi and Vossen, Dirk and van der Laak, Jeroen and Litjens, Geert}, + title = {Impact of rescanning and normalization on convolutional neural network performance in multi-center, whole-slide classification of prostate cancer}, + journal = SCIREP, + year = {2020}, + volume = {10}, + issue = {1}, + month = sep, + pages = {14398}, + doi = {10.1038/s41598-020-71420-0}, + abstract = {Algorithms can improve the objectivity and efficiency of histopathologic slide analysis. In this paper, we investigated the impact of scanning systems (scanners) and cycle-GAN-based normalization on algorithm performance, by comparing different deep learning models to automatically detect prostate cancer in whole-slide images. Specifically, we compare U-Net, DenseNet and EfficientNet. Models were developed on a multi-center cohort with 582 WSIs and subsequently evaluated on two independent test sets including 85 and 50 WSIs, respectively, to show the robustness of the proposed method to differing staining protocols and scanner types. We also investigated the application of normalization as a pre-processing step by two techniques, the whole-slide image color standardizer (WSICS) algorithm, and a cycle-GAN based method. For the two independent datasets we obtained an AUC of 0.92 and 0.83 respectively. After rescanning the AUC improves to 0.91/0.88 and after style normalization to 0.98/0.97. In the future our algorithm could be used to automatically pre-screen prostate biopsies to alleviate the workload of pathologists.}, + file = {Swid20b.pdf:pdf\\Swid20b.pdf:PDF}, + optnote = {DIAG}, + pmid = {32873856}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/225966}, + ss_id = {ec43e78642d133ac0b19192ca26d0fcb0ba14a99}, + all_ss_ids = {['ec43e78642d133ac0b19192ca26d0fcb0ba14a99']}, + gscites = {39}, +} + +@article{Swid20c, + author = {Swiderska-Chadaj, Zaneta and Hebeda, Konnie M. and van den Brand, Michiel and Litjens, Geert}, + title = {Artificial intelligence to detect MYC translocation in slides of diffuse large B-cell lymphoma}, + journal = VIRA, + year = {2020}, + doi = {https://doi.org/10.1007/s00428-020-02931-4}, + pmid = {32979109}, + abstract = {In patients with suspected lymphoma, the tissue biopsy provides lymphoma confirmation, classification, and prognostic factors, including genetic changes. We developed a deep learning algorithm to detect MYC rearrangement in scanned histological slides of diffuse large B-cell lymphoma. The H&E-stained slides of 287 cases from 11 hospitals were used for training and evaluation. The overall sensitivity to detect MYC rearrangement was 0.93 and the specificity 0.52, showing that prediction of MYC translocation based on morphology alone was possible in 93% of MYC-rearranged cases. This would allow a simple and fast prescreening, saving approximately 34% of genetic tests with the current algorithm.}, + file = {Swid20c.pdf:pdf\\Swid20c.pdf:PDF}, + optnote = {DIAG}, + ss_id = {0a79db68b14b74dfd7637ed5b8a0a7aa2956e3ea}, + all_ss_ids = {['0a79db68b14b74dfd7637ed5b8a0a7aa2956e3ea']}, + gscites = {14}, +} + +@inproceedings{Swid20d, + author = {Swiderska-Chadaj, Zaneta and Stoelinga, Emiel and Gertych, Arkadiusz and Ciompi, Francesco}, + title = {Multi-Patch Blending improves lung cancer growth pattern segmentation in whole-slide images}, + booktitle = CPEE, + year = {2020}, + doi = {10.1109/CPEE50798.2020.9238710}, + abstract = {In this study, we introduce a technique to generate synthetic histologic image data by blending parts of different images into a new image patch. The proposed approach, which we call multi-patch blending(MPB), crops parts of two histologic images of tumor growth patterns annotated with single but different labels and pastes them into a newly created image patch comprising areas with two different annotations. A Cycle-GAN model is employed in MPB to smooth out transitions between the pasted image crops and make the output image patch look realistic. The goal of implementing the MPB is to support the task of semantic segmentation of lung adenocarcinoma grown patterns in whole-slide images (WSI). We used MPB to increase the number of training patches extracted from a set of 18 WSIs with sparse annotations. De facto, MPB was implemented as a novel data augmentation strategy. U-Net trained with MPB-generated patches achieved 13% higher F1-score than the U-Net trained with original (sparsely annotated) patches in a 4-classsemantic segmentation task.}, + file = {Swid20d.pdf:pdf\\Swid20d.pdf:PDF}, + optnote = {DIAG}, + ss_id = {71cda147901a9122f82bb855f7e91c74e44eabbf}, + all_ss_ids = {['71cda147901a9122f82bb855f7e91c74e44eabbf']}, + gscites = {1}, +} + +@article{Swil23, + author = {Swillens, Julie E. M. and Nagtegaal, Iris D. and Engels, Sam and Lugli, Alessandro and Hermens, Rosella P. M. G. and van der Laak, Jeroen A. W. M.}, + title = {Pathologists' first opinions on barriers and facilitators of computational pathology adoption in oncological pathology: an international study}, + doi = {10.1038/s41388-023-02797-1}, + year = {2023}, + abstract = {AbstractComputational pathology (CPath) algorithms detect, segment or classify cancer in whole slide images, approaching or even exceeding the accuracy of pathologists. Challenges have to be overcome before these algorithms can be used in practice. We therefore aim to explore international perspectives on the future role of CPath in oncological pathology by focusing on opinions and first experiences regarding barriers and facilitators. We conducted an international explorative eSurvey and semi-structured interviews with pathologists utilizing an implementation framework to classify potential influencing factors. The eSurvey results showed remarkable variation in opinions regarding attitude, understandability and validation of CPath. Interview results showed that barriers focused on the quality of available evidence, while most facilitators concerned strengths of CPath. A lack of consensus was present for multiple factors, such as the determination of sufficient validation using CPath, the preferred function of CPath within the digital workflow and the timing of CPath introduction in pathology education. The diversity in opinions illustrates variety in influencing factors in CPath adoption. A next step would be to quantitatively determine important factors for adoption and initiate validation studies. Both should include clear case descriptions and be conducted among a more homogenous panel of pathologists based on sub specialization.}, + url = {http://dx.doi.org/10.1038/s41388-023-02797-1}, + file = {Swil23.pdf:pdf\Swil23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Oncogene}, + citation-count = {0}, + automatic = {yes}, + pages = {2816-2827}, + volume = {42}, +} + +@article{Takx14, + author = {Takx, Richard A P. and Vliegenthart, Rozemarijn and Mohamed Hoesein, Firdaus A A and I{\v{s}}gum, Ivana and de Koning, Harry J. and Mali, Willem P Th M. and van der Aalst, Carlijn M. and Zanen, Pieter and Lammers, Jan-Willem J. and Groen, Harry J M. and van Rikxoort, Eva M. and Schmidt, Michael and van Ginneken, Bram and Oudkerk, Matthijs and Leiner, Tim and de Jong, Pim A.}, + title = {Pulmonary function and {CT} biomarkers as risk factors for cardiovascular events in male lung cancer screening participants: the NELSON study}, + journal = ER, + year = {2015}, + volume = {25}, + pages = {65-71}, + doi = {10.1007/s00330-014-3384-6}, + abstract = {The objective of this study was to investigate the association of spirometry and pulmonary CT biomarkers with cardiovascular events.In this lung cancer screening trial 3,080 male participants without a prior cardiovascular event were analysed. Fatal and non-fatal cardiovascular events were included. Spirometry included forced expiratory volume measured in units of one-second percent predicted (FEV1\%predicted) and FEV1 divided by forced vital capacity (FVC; FEV1/FVC). CT examinations were quantified for coronary artery calcium volume, pulmonary emphysema (perc15) and bronchial wall thickness (pi10). Data were analysed via a Cox proportional hazard analysis, net reclassification improvement (NRI) and C-indices.184 participants experienced a cardiovascular event during a median follow-up of 2.9 years. Age, pack-years and smoking status adjusted hazard ratios were 0.992 (95 \% confidence interval (CI) 0.985-0.999) for FEV1\%predicted, 1.000 (95\%CI 0.986-1.015) for FEV1/FVC, 1.014 (95\%CI 1.005-1.023) for perc15 per 10 HU, and 1.269 (95\%CI 1.024-1.573) for pi10 per 1 mm. The incremental C-index (<0.015) and NRI (<2.8 \%) were minimal. Coronary artery calcium volume had a hazard ratio of 1.046 (95\%CI 1.034-1.058) per 100 mm(3), an increase in C-index of 0.076 and an NRI of 16.9 \% (PAC/a,!aEURdeg=3 mm in mean diameter were analysed using the CIRRUS Lung Screening Workstation (Radboud University Medical Center, Nijmegen, the Netherlands). In the NLST sample, nodules with cancer had been matched on size to nodules without cancer. Results: Both CAD-based mean diameter and volume models showed excellent discrimination and calibration, with similar areas under the receiver-operating-characteristic curves of 0.947. The two CAD models had similar predictive performance to the radiologist-based model. In NLST validation data, the CAD Mean Diameter and Volume models also demonstrated excellent discrimination: AUC's 0.810 and 0.821, respectively. These performance statistics are similar to the PanCan Malignancy Probability Model in these data using radiologist measured maximum diameter. Conclusion: Either CAD-based nodule diameter or volume can be used to assist in predicting nodule malignancy risk.}, + file = {Tamm18.pdf:pdf\\Tamm18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30368011}, + month = {2}, + gsid = {13773357066499407544}, + gscites = {34}, + ss_id = {e6a0940e90b9a661ff2accfa5e8eefe56fd35263}, + all_ss_ids = {['e6a0940e90b9a661ff2accfa5e8eefe56fd35263']}, +} + +@inproceedings{Tan11, + author = {T. Tan and H.J. Huisman and B. Platel and A. Grivignee and R. Mus and N. Karssemeijer}, + title = {Classification of Breast Lesions in Automated {3D} Breast Ultrasound}, + booktitle = MI, + year = {2011}, + volume = {7963}, + series = SPIE, + pages = {79630X}, + doi = {10.1117/12.877924}, + abstract = {In this paper we investigated classification of malignant and benign lesions in automated 3D breast ultrasound (ABUS). As a new imaging modality, ABUS overcomes the drawbacks of 2D hand-held ultrasound (US) such as its operator dependence and limited capability in visualizing the breast in 3D. The classification method we present includes a 3D lesion segmentation stage based on dynamic programming, which effectively deals with limited visibility of lesion boundaries due to shadowing and speckle. A novel aspect of ABUS imaging, in which the breast is compressed by means of a dedicated membrane, is the presence of spiculation in coronal planes perpendicular to the transducer. Spiculation patterns, or architectural distortion, are characteristic for malignant lesions. Therefore, we compute a spiculation measure in coronal planes and combine this with more traditional US features related to lesion shape, margin, posterior acoustic behavior, and echo pattern. However, in our work the latter features are defined in 3D. Classification experiments were performed with a dataset of 40 lesions including 20 cancers. Linear discriminant analysis (LDA) was used in combination with leaveone- patient-out and feature selection in each training cycle. We found that spiculation and margin contrast were the most discriminative features and that these features were most often chosen during feature selection. An Az value of 0.86 was obtained by merging all features, while an Az value of 0.91 was obtained by feature selection.}, + file = {Tan11.pdf:pdf/Tan11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {15037460857605703476}, + gscites = {8}, + ss_id = {2704781e2ff16f7f31bd97e4d393e685b9ab0875}, + all_ss_ids = {['2704781e2ff16f7f31bd97e4d393e685b9ab0875']}, +} + +@inproceedings{Tan11c, + author = {T. Tan and B. Platel and H. Huisman and N. Karssemeijer}, + title = {Chest wall segmentation in automated 3{D} breast ultrasound using a cylinder model}, + booktitle = {{MICCAI} {W}orkshop: {B}reast {I}mage {A}nalysis}, + year = {2011}, + file = {Tan11c.pdf:pdf\\Tan11c.pdf:PDF}, + optnote = {DIAG}, +} + +@inproceedings{Tan11d, + author = {T. Tan and B. Platel and T. Twellmann and G. van Schie and R. Mus and A. Grivegnee and L. Tabar and N. Karssemeijer}, + title = {Computer aided interpretation of lesions in automated 3{D} breast ultrasound}, + booktitle = {{MICCAI} {W}orkshop: {B}reast {I}mage {A}nalysis}, + year = {2011}, + file = {Tan11d.pdf:pdf\\Tan11d.pdf:PDF}, + optnote = {DIAG}, +} + +@inproceedings{Tan12, + author = {Tao Tan and Bram Platel and Roel Mus and Nico Karssemeijer}, + title = {Detection of Breast Cancer in Automated {3D} Breast Ultrasound}, + booktitle = MI, + year = {2012}, + volume = {8315}, + series = SPIE, + pages = {831505-1-831505-8}, + doi = {10.1117/12.911068}, + abstract = {Automated 3D breast ultrasound (ABUS) is a novel imaging modality, in which motorized scans of the breasts are made with a wide transducer through a membrane under modest compression. The technology has gained high interest and may become widely used in screening of dense breasts, where sensitivity of mammography is poor. ABUS has a high sensitivity for detecting solid breast lesions. However, reading ABUS images is time consuming, and subtle abnormalities may be missed. Therefore, we are developing a computer aided detection (CAD) system to help reduce reading time and errors. In the multi-stage system we propose, segmentations of the breast and nipple are performed, providing landmarks for the detection algorithm. Subsequently, voxel features characterizing coronal spiculation patterns, blobness, contrast, and locations with respect to landmarks are extracted. Using an ensemble of classifiers, a likelihood map indicating potential malignancies is computed. Local maxima in the likelihood map are determined using a local maxima detector and form a set of candidate lesions in each view. These candidates are further processed in a second detection stage, which includes region segmentation, feature extraction and a final classification. Region segmentation is performed using a 3D spiral-scanning dynamic programming method. Region features include descriptors of shape, acoustic behavior and texture. Performance was determined using a 78-patient dataset with 93 images, including 50 malignant lesions. We used 10-fold cross-validation. Using FROC analysis we found that the system obtains a lesion sensitivity of 60\% and 70\% at 2 and 4 false positives per image respectively.}, + file = {:./pdf/Tan12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {1527121090592760430}, + gscites = {6}, + ss_id = {443e44a373875423637e9b519b451031635f2d35}, + all_ss_ids = {['443e44a373875423637e9b519b451031635f2d35']}, +} + +@article{Tan12a, + author = {Tan, T. and Platel, B. and Huisman, H. and S\'{a}nchez, C. I. and Mus, R. and Karssemeijer, N.}, + title = {Computer Aided Lesion Diagnosis in Automated {3D} Breast Ultrasound Using Coronal Spiculation}, + journal = TMI, + year = {2012}, + volume = {31}, + pages = {1034--1042}, + doi = {10.1109/TMI.2012.2184549}, + abstract = {A computer-aided diagnosis (CAD) system for the classification of lesions as malignant or benign in automated 3D breast ultrasound (ABUS) images, is presented. Lesions are automatically segmented when a seed point is provided, using dynamic programming in combination with a spiral scanning technique. A novel aspect of ABUS imaging is the presence of spiculation patterns in coronal planes perpendicular to the transducer. Spiculation patterns are characteristic for malignant lesions. Therefore, we compute spiculation features and combine them with features related to echotexture, echogenicity, shape, posterior acoustic behavior and margins. Classification experiments were performed using a support vector machine (SVM) classifier and evaluation was done with leave-one-patientout cross-validation. Receiver Operator Characteristic (ROC) analysis was used to determine performance of the system on a dataset of 201 lesions. We found that spiculation was among the most discriminative features. Using all features, the area under the ROC curve (Az) was 0.93, which was significantly higher than the performance without spiculation features (Az=0.90, p=0.02). On a subset of 88 cases, classification performance of CAD (Az=0.90) was comparable to the average performance of 10 readers (Az=0.87).}, + file = {:/pdf/Tan12a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {22271831}, + month = {5}, + gsid = {920988454317757286}, + gscites = {75}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/110664}, + ss_id = {f3b57b52566e70cc625e741ef1b2a388175f989d}, + all_ss_ids = {['f3b57b52566e70cc625e741ef1b2a388175f989d']}, +} + +@inproceedings{Tan13, + author = {Tao Tan and Bram Platel and Michael Hicks and Ritse M. Mann and Nico Karssemeijer,}, + title = {Finding Lesion Correspondences in Different Views of Automated {3D} Breast Ultrasound}, + booktitle = MI, + year = {2013}, + series = SPIE, + doi = {10.1117/12.2007475}, + abstract = {Screening with automated 3D breast ultrasound (ABUS) is gaining popularity. However, the acquisition of multiple views required to cover an entire breast makes radiologic reading time-consuming. Linking lesions across views can facilitate the reading process. In this paper, we propose a method to automatically predict the position of a lesion in the target ABUS views, given the location of the lesion in a source ABUS view. We combine features describing the lesion location with respect to the nipple, the transducer and the chestwall, with features describing lesion properties such as intensity, spiculation, blobness, contrast and lesion likelihood. By using a grid search strategy, the location of the lesion was predicted in the target view. Our method achieved an error of 15.64 mmA,A+-16.13 mm. The error is small enough to help locate the lesion with minor additional interaction.}, + file = {:./pdf/Tan13.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {2}, + gsid = {13915316277612875833}, + gscites = {4}, + ss_id = {0a0094567a4b50f0274c729c55d272608d1cdafc}, + all_ss_ids = {['0a0094567a4b50f0274c729c55d272608d1cdafc']}, +} + +@article{Tan13a, + author = {Tan, Tao and Platel, Bram and Mus, Roel and Tabar, Laszlo and Mann, Ritse and Karssemeijer, Nico}, + title = {Computer-aided Detection of Cancer in Automated 3D Breast Ultrasound}, + journal = TMI, + year = {2013}, + volume = {32}, + pages = {1698-1706}, + doi = {10.1109/TMI.2013.2263389}, + abstract = {Automated 3D breast ultrasound (ABUS) has gained a lot of interest and may become widely used in screening of dense breasts, where sensitivity of mammography is poor. However, reading ABUS images is time consuming, and subtle abnormalities may be missed. Therefore, we are developing a computer aided detection (CAD) system to help reduce reading time and prevent errors. In the multi-stage system we propose, segmentations of the breast, the nipple and the chestwall are performed, providing landmarks for the detection algorithm. Subsequently, voxel features characterizing coronal spiculation patterns, blobness, contrast, and depth are extracted. Using an ensemble of neuralnetwork classifiers, a likelihood map indicating potential abnormality is computed. Local maxima in the likelihood map are determined and form a set of candidates in each image. These candidates are further processed in a second detection stage, which includes region segmentation, feature extraction and a final classification. On region level, classification experiments were performed using different classifiers including an ensemble of neural networks, a support vector machine, a k-nearest neighbors, a linear discriminant, and a gentle boost classifier. Performance was determined using a dataset of 238 patients with 348 images (views), including 169 malignant and 154 benign lesions. Using free response receiver operating characteristic (FROC) analysis, the system obtains a view-based sensitivity of 64\% at 1 false positives per image using an ensemble of neuralnetwork classifiers.}, + file = {Tan13a.pdf:pdf\\Tan13a.pdf:PDF}, + optnote = {DIAG}, + pmid = {23693128}, + month = {9}, + gsid = {7682914345881547785}, + gscites = {93}, + ss_id = {8e7a787fc98d409a3f6c63826e53c60210894c19}, + all_ss_ids = {['8e7a787fc98d409a3f6c63826e53c60210894c19']}, +} + +@article{Tan13c, + author = {T. Tan and B. Platel and R. M. Mann and H. Huisman and N. Karssemeijer}, + title = {Chest Wall Segmentation in Automated {3D} Breast Ultrasound Scans}, + journal = MIA, + year = {2013}, + volume = {17}, + pages = {1273AC/a,!aEURoe1281}, + doi = {10.1016/j.media.2012.11.005}, + abstract = {In this paper, we present an automatic method to segment the chest wall in automated 3D breast ultrasound images. Determining the location of the chest wall in automated 3D breast ultrasound images is necessary in computer-aided detection systems to remove automatically detected cancer candidates beyond the chest wall and it can be of great help for inter- and intra-modal image registration. We show that the visible part of the chest wall in an automated 3D breast ultrasound image can be accurately modeled by a cylinder. We fit the surface of our cylinder model to a set of automatically detected rib-surface points. The detection of the rib-surface points is done by a classifier using features representing local image intensity patterns and presence of rib shadows. Due to attenuation of the ultrasound signal, a clear shadow is visible behind the ribs. Evaluation of our segmentation method is done by computing the distance of manually annotated rib points to the surface of the automatically detected chest wall. We examined the performance on images obtained with the two most common 3D breast ultrasound devices in the market. In a dataset of 142 images, the average mean distance of the annotated points to the segmented chest wall was 5.59A,A+-3.08 mm.}, + file = {Tan13c.pdf:pdf\\Tan13c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {23273891}, + month = {12}, + gsid = {17157262863361426204}, + gscites = {19}, + ss_id = {534510f570af42f0affc5967dbcaefeb25948aa8}, + all_ss_ids = {['534510f570af42f0affc5967dbcaefeb25948aa8']}, +} + +@inproceedings{Tan13d, + author = {T. Tan and B. Eiben and B. Platel and J.van Zelst and L. Han and T. Mertzanidou and S. Johnsen and J. Hipwell and R.M. Mann and D. Hawkes and N. Karssemeijer}, + title = {Registration of automated 3{D} breast ultrasound views}, + booktitle = {{MICCAI} {W}orkshop: {B}reast {I}mage {A}nalysis}, + year = {2013}, + file = {Tan13d.pdf:pdf\\Tan13d.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Tan13e, + author = {Tan, Tao and Platel, Bram and Twellmann, Thorsten and van Schie, Guido and Mus, Roel and Grivegn\'{e}e, Andr\'{e} and Mann, Ritse M. and Karssemeijer, Nico}, + title = {Evaluation of the Effect of Computer-Aided Classification of Benign and Malignant Lesions on Reader Performance in Automated Three-dimensional Breast Ultrasound}, + journal = AR, + year = {2013}, + volume = {20}, + pages = {1381--1388}, + doi = {10.1016/j.acra.2013.07.013}, + abstract = {To investigate the effect of a newly developed computer-aided diagnosis (CAD) system on reader interpretation of breast lesions in automated three-dimensional (3D) breast ultrasound.A CAD system was developed to differentiate malignant lesions from benign lesions including automated lesion segmentation in three dimensions; extraction of lesion features such as spiculation, margin contrast, and posterior acoustic behavior; and a classification stage. Eighty-eight patients with breast lesions were included for an observer study: 47 lesions were malignant and 41 were benign. Eleven readers (seven radiologists and four residents) read the cases with and without CAD. We compared the performance of readers with and without CAD using receiver operating characteristic (ROC) analysis.The CAD system had an area under the ROC curve (AUC) of 0.92 for discriminating benign and malignant lesions, whereas the unaided reader AUC ranged from 0.77 to 0.92. Mean performance of inexperienced readers improved when CAD was used (AUC = 0.85 versus 0.90; P = .007), whereas mean performance of experienced readers did not change with CAD (AUC = 0.89).By using the CAD system for classification of lesions in automated 3D breast ultrasound, which on its own performed as good as the best readers, the performance of inexperienced readers improved while that of experienced readers remained unaffected.}, + file = {Tan13e.pdf:pdf\\Tan13e.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pmid = {24119350}, + month = {11}, + gsid = {15955819716225519808}, + gscites = {34}, + ss_id = {8139e856e3807c839ad70161f5e815552a083a64}, + all_ss_ids = {['8139e856e3807c839ad70161f5e815552a083a64']}, +} + +@phdthesis{Tan14, + author = {Tao Tan}, + title = {Automated 3D Breast Ultrasound Image Analysis}, + year = {2014}, + url = {http://repository.ubn.ru.nl/handle/2066/121931}, + copromotor = {B. Platel and R. M. Mann}, + file = {Tan14.pdf:pdf\\Tan14.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Tan15, + author = {Tan, Tao and Mordang, Jan-Jurre and van Zelst, Jan and Grivegn\'{e}e , Andr\'{e} and Gubern-M\'{e}rida,, Albert and Melendez , Jaime and Mann , Ritse M and Zhang , Wei and Platel, Bram and Karssemeijer, Nico}, + title = {Computer-aided detection of breast cancers using Haar-like features in automated 3D breast ultrasound}, + journal = MP, + year = {2015}, + volume = {42}, + pages = {1498-1504}, + doi = {10.1118/1.4914162}, + abstract = {Automated 3D breast ultrasound (ABUS) has gained interest in breast imaging. Especially for screening women with dense breasts, ABUS appears to be beneficial. However, since the amount of data generated is large, the risk of oversight errors is substantial. Computer aided detection (CADe) may be used as a second reader to prevent oversight errors. When CADe is used in this fashion, it is essential that small cancers are detected, while the number of false positive findings should remain acceptable. In this work, the authors improve their previously developed CADe system in the initial candidate detection stage. The authors use a large number of 2D Haar-like features to differentiate lesion structures from false positives. Using a cascade of GentleBoost classifiers that combines these features, a likelihood score, highly specific for small cancers, can be efficiently computed. The likelihood scores are added to the previously developed voxel features to improve detection. The method was tested in a dataset of 414 ABUS volumes with 211 cancers. Cancers had a mean size of 14.72 mm. Free-response receiver operating characteristic analysis was performed to evaluate the performance of the algorithm with and without using the aforementioned Haar-like feature likelihood scores. After the initial detection stage, the number of missed cancer was reduced by 18.8% after adding Haar-like feature likelihood scores. The proposed technique significantly improves our previously developed CADe system in the initial candidate detection stage.}, + file = {Tan15.pdf:pdf\\Tan15.pdf:PDF}, + optnote = {DIAG}, + number = {7}, + pmid = {25832040}, + month = {3}, + gsid = {16482522359454166873}, + gscites = {35}, + ss_id = {61d67f082bb776ac3a897dd2109bcb61d3660e7b}, + all_ss_ids = {['61d67f082bb776ac3a897dd2109bcb61d3660e7b']}, +} + +@article{Tan16, + author = {Tan, Tao and Gubern-M\'{e}rida, Albert and Borelli, Cristina and Manniesing, Rashindra and {van Zelst}, Jan and Wang, Lei and Zhang, Wei and Platel, Bram and Mann, Ritse M. and Karssemeijer, Nico}, + title = {Segmentation of malignant lesions in {3D} breast ultrasound using a depth-dependent model}, + journal = MP, + year = {2016}, + volume = {43}, + number = {7}, + pages = {4074-4084}, + doi = {10.1118/1.4953206}, + url = {http://scitation.aip.org/content/aapm/journal/medphys/43/7/10.1118/1.4953206}, + abstract = {Purpose: Automated 3D breast ultrasound (ABUS) has been proposed as a complementary screening + modality to mammography for early detection of breast cancers. To facilitate the interpretation + of ABUS images, automated diagnosis and detection techniques are being developed, in which + malignant lesion segmentation plays an important role. However, automated segmentation of cancer + in ABUS is challenging since lesion edges might not be well defined. In this study, the authors aim + at developing an automated segmentation method for malignant lesions in ABUS that is robust to + ill-defined cancer edges and posterior shadowing. + + Methods: A segmentation method using depth-guided dynamic programming based on spiral scanning + is proposed. The method automatically adjusts aggressiveness of the segmentation according + to the position of the voxels relative to the lesion center. Segmentation is more aggressive in the + upper part of the lesion (close to the transducer) than at the bottom (far away from the transducer), + where posterior shadowing is usually visible. The authors used Dice similarity coefficient (Dice) + for evaluation. The proposed method is compared to existing state of the art approaches such as + graph cut, level set, and smart opening and an existing dynamic programming method without depth + dependence. + + Results: In a dataset of 78 cancers, our proposed segmentation method achieved a mean Dice of + 0.73+-0.14. The method outperforms an existing dynamic programming method (0.70+-0.16) on this + task (p = 0.03) and it is also significantly (p < 0.001) better than graph cut (0.66+-0.18), level set + based approach (0.63+-0.20) and smart opening (0.65+-0.12). + + Conclusions: The proposed depth-guided dynamic programming method achieves accurate breast + malignant lesion segmentation results in automated breast ultrasound.}, + file = {Tan16.pdf:pdf\\Tan16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {27370126}, + month = {6}, + gsid = {1950157939420291392}, + gscites = {15}, + ss_id = {16018e9f637a1353bcf8136c4643cea5d0de8bbd}, + all_ss_ids = {['16018e9f637a1353bcf8136c4643cea5d0de8bbd']}, +} + +@article{Tanc10, + author = {E. Tanck and J. C. W. Deenen and H. J. Huisman and J. G. Kooloos and H. Huizenga and N. Verdonschot}, + title = {An anatomically shaped lower body model for {CT} scanning of cadaver femurs}, + journal = PMB, + year = {2010}, + volume = {55}, + pages = {N57--N62}, + doi = {10.1088/0031-9155/55/2/N03}, + abstract = {{B}one specific, {CT}-based finite element ({FE}) analyses have great potential to accurately predict the fracture risk of deteriorated bones. {H}owever, it has been shown that differences exist between {FE}-models of femora scanned in a water basin or scanned in situ within the human body, as caused by differences in measured bone mineral densities ({BMD}). {I}n this study we hypothesized that these differences can be reduced by re-creating the patient {CT}-conditions by using an anatomically shaped physical model of the lower body. {BMD} distributions were obtained from four different femora that were scanned under three conditions: (1) in situ within the cadaver body, (2) in a water basin and (3) in the body model. {T}he {BMD} of the three scanning protocols were compared at two locations: proximally, in the trabecular bone of the femoral head, and in the cortical bone of the femoral shaft. {P}roximally, no significant differences in {BMD} were found between the in situ scans and the scans in the body model, whereas the densities from the water basin scans were on average 10.8\% lower than in situ. {I}n the femoral shaft the differences between the three scanning protocols were insignificant. {I}n conclusion, the body model better approached the in situ situation than a water basin. {F}uture studies can use this body model to mimic patient situations and to develop protocols to improve the performance of the {FE}-models in actual patients.}, + file = {Tanc10.pdf:pdf/Tanc10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {20023325}, + month = {12}, + gsid = {684371408799077124}, + gscites = {4}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/88094}, + ss_id = {d1a97815a9fa5d25ea4b55acb51cbc582c35407e}, + all_ss_ids = {['d1a97815a9fa5d25ea4b55acb51cbc582c35407e']}, +} + +@inproceedings{Tann11a, + author = {Christine Tanner and Guido van Schie and Nico Karssemeijer and Gabor Szekely}, + title = {Matching Regions for Mammographic Views: Comparison and Compensation for Deformations}, + booktitle = {{MICCAI} {W}orkshop: {B}reast {I}mage {A}nalysis}, + year = {2011}, + abstract = {Matching features on MLO and CC mammograms has shown to improve performance of computer aided detection systems. False positive rates can be lowered by reducing the search space during matching. Various methods have been proposed to de ne the search space. These do either not explicitly compensate for the breast deformation or simulate the whole process. In this study, we investigated the benefit of extending the common approaches by some basic compensations for breast deformations and compare several methods. Performance was tested on 50 pairs of mammograms with corresponding masses annotated by a radiologist. No clear advantage of straight strips or annular bands was observed for the common methods. Extensions to remove the pectoral muscle by shearing, compensating for gravity on MLO views by shearing and a volume-preserving transformation to get similar breast shapes in both views worked well when breast shapes were elliptical (mean (maximum) error of 5.3 (18.6) mm for 25 pairs). Using this strategy for elliptical breast shapes and straight strips after pectoral shearing for the rest resulted in lowest mean errors (6.7 mm) for the whole dataset.}, + file = {Tann11a.pdf:pdf/Tann11a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {18022156499077991752}, + gscites = {1}, +} + +@article{Tell18, + author = {Tellez, David and Balkenhol, Maschenka and Otte-Holler, Irene and van de Loo, Rob and Vogels, Rob and Bult, Peter and Wauters, Carla and Vreuls, Willem and Mol, Suzanne and Karssemeijer, Nico and Litjens, Geert and van der Laak, Jeroen and Ciompi, Francesco}, + title = {Whole-Slide Mitosis Detection in H\&E Breast Histology Using PHH3 as a Reference to Train Distilled Stain-Invariant Convolutional Networks}, + journal = TMI, + year = {2018}, + volume = {37}, + number = {9}, + pages = {2126 - 2136}, + doi = {10.1109/TMI.2018.2820199}, + abstract = {Manual counting of mitotic tumor cells in tissue sections constitutes one of the strongest prognostic markers for breast cancer. This procedure, however, is time-consuming and error-prone. We developed a method to automatically detect mitotic figures in breast cancer tissue sections based on convolutional neural networks (CNNs). Application of CNNs to hematoxylin and eosin (H&E) stained histological tissue sections is hampered by: (1) noisy and expensive reference standards established by pathologists, (2) lack of generalization due to staining variation across laboratories, and (3) high computational requirements needed to process gigapixel whole-slide images (WSIs). In this paper, we present a method to train and evaluate CNNs to specifically solve these issues in the context of mitosis detection in breast cancer WSIs. First, by combining image analysis of mitotic activity in phosphohistone-H3 (PHH3) restained slides and registration, we built a reference standard for mitosis detection in entire H&E WSIs requiring minimal manual annotation effort. Second, we designed a data augmentation strategy that creates diverse and realistic H&E stain variations by modifying the hematoxylin and eosin color channels directly. Using it during training combined with network ensembling resulted in a stain invariant mitosis detector. Third, we applied knowledge distillation to reduce the computational requirements of the mitosis detection ensemble with a negligible loss of performance. The system was trained in a single-center cohort and evaluated in an independent multicenter cohort from The Cancer Genome Atlas on the three tasks of the Tumor Proliferation Assessment Challenge (TUPAC). We obtained a performance within the top-3 best methods for most of the tasks of the challenge.}, + file = {:pdf/Tell18.pdf:PDF}, + optnote = {DIAG}, + pmid = {29994086}, + month = {9}, + gsid = {664853976239237267}, + gscites = {188}, + ss_id = {d188bdc40ffea984ce94b88b21fd5d3d3d9972d3}, + all_ss_ids = {['d188bdc40ffea984ce94b88b21fd5d3d3d9972d3']}, +} + +@inproceedings{Tell18a, + author = {Tellez, D. and Balkenhol, M. and Karssemeijer, N. and Litjens, G. and van der Laak, J. and Ciompi, F.}, + title = {{H\&E stain augmentation improves generalization of convolutional networks for histopathological mitosis detection}}, + booktitle = MI, + year = {2018}, + volume = {10581}, + series = SPIE, + doi = {10.1117/12.2293048}, + abstract = {The number of mitotic figures per tumor area observed in hematoxylin and eosin (H and E) histological tissue sections under light microscopy is an important biomarker for breast cancer prognosis. Whole-slide imaging and computational pathology have enabled the development of automatic mitosis detection algorithms based on convolutional neural networks (CNNs). These models can suffer from high generalization error, i.e. trained networks often underperform on datasets originating from pathology laboratories different than the one that provided the training data, mainly due to the presence of inter-laboratory stain variations. We propose a novel data augmentation strategy that exploits the properties of the H and E color space to simulate a broad range of realistic H and E stain variations. To our best knowledge, this is the first time that data augmentation is performed directly in the H and E color space, instead of RGB. The proposed technique uses color deconvolution to transform RGB images into the H and E color space, modifies the H and E color channels stochastically, and projects them back to RGB space. We trained a CNN-based mitosis detector on homogeneous data from a single institution, and tested its performance on an external, multicenter cohort that contained a wide range of unseen H and E stain variations. We compared CNNs trained with and without the proposed augmentation strategy and observed a significant improvement in performance and robustness to unseen stain variations when the new color augmentation technique was included. In essence, we have shown that CNNs can be made robust to inter-lab stain variation by incorporating extensive stain augmentation techniques.}, + file = {Tell18a.pdf:pdf/Tell18b.pdf:PDF}, + optnote = {DIAG}, + month = {3}, + ss_id = {6f55cc94e6a7b1e7f54b77aecf6b57773378fc21}, + all_ss_ids = {['6f55cc94e6a7b1e7f54b77aecf6b57773378fc21']}, + gscites = {38}, +} + +@inproceedings{Tell18b, + author = {Tellez, D. and van der Laak, J. and Ciompi, F.}, + title = {{Gigapixel Whole-Slide Image Classification Using Unsupervised Image Compression And Contrastive Training}}, + booktitle = MIDL, + year = {2018}, + url = {https://openreview.net/forum?id=Hk2YYqssf}, + abstract = {{We propose a novel two-step methodology for entire whole-slide image (WSI) classification. First, all tissue patches in a WSI are mapped into vector embeddings using an encoder trained in an unsupervised fashion. The spatial arrangement of these embeddings is maintained with respect to the tissue patches, forming a stack of 2D feature maps representing the WSI. Second, a convolutional neural network is trained on these compact representations to predict weak labels associated with entire WSIs. We investigated several unsupervised schemes to train the encoder model: convolutional autoencoders (CAE), variational autoencoders (VAE), and a novel approach based on contrastive training. We validated the proposed methodology by predicting the existence of tumor metastasis at WSI-level using the Camelyon16 dataset. Our experimental results showed that the proposed methodology can be used to predict weak labels from entire WSIs. Furthermore, the novel contrastive encoder proved to be superior to the CAE and VAE approaches.}}, + file = {:pdf/Tell18b.pdf:PDF}, + optnote = {DIAG}, + gsid = {13709089197553633779}, + gscites = {10}, + ss_id = {eebf6be2e906c922ee045c946b230b99dd9c86e0}, + all_ss_ids = {['eebf6be2e906c922ee045c946b230b99dd9c86e0']}, +} + +@article{Tell19a, + author = {Tellez, David and Litjens, Geert and B\'{a}ndi, P\'{e}ter and Bulten, Wouter and Bokhorst, John-Melle and Ciompi, Francesco and van der Laak, Jeroen}, + title = {Quantifying the effects of data augmentation and stain color normalization in convolutional neural networks for computational pathology}, + journal = MIA, + year = {2019}, + volume = {58}, + month = {8}, + pages = {101544}, + doi = {10.1016/j.media.2019.101544}, + url = {https://www.sciencedirect.com/science/article/pii/S1361841519300799}, + abstract = {Stain variation is a phenomenon observed when distinct pathology laboratories stain tissue slides that exhibit similar but not identical color appearance. Due to this color shift between laboratories, convolutional neural networks (CNNs) trained with images from one lab often underperform on unseen images from the other lab. Several techniques have been proposed to reduce the generalization error, mainly grouped into two categories: stain color augmentation and stain color normalization. The former simulates a wide variety of realistic stain variations during training, producing stain-invariant CNNs. The latter aims to match training and test color distributions in order to reduce stain variation. For the first time, we compared some of these techniques and quantified their effect on CNN classification performance using a heterogeneous dataset of hematoxylin and eosin histopathology images from 4 organs and 9 pathology laboratories. Additionally, we propose a novel unsupervised method to perform stain color normalization using a neural network. Based on our experimental results, we provide practical guidelines on how to use stain color augmentation and stain color normalization in future computational pathology applications.}, + file = {Tell19a.pdf:pdf\\Tell19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31466046}, + gsid = {9638664310854063699}, + gscites = {341}, + ss_id = {35ee6606ec99b5bf282a0c5f400edbd16a6e22d9}, + all_ss_ids = {['35ee6606ec99b5bf282a0c5f400edbd16a6e22d9', 'd6e975989345b69f539dbb8f22cb3437f5cc5039']}, +} + +@inproceedings{Tell20, + author = {Tellez, David and Hoppener, Diederik and Verhoef, Cornelis and Grunhagen, Dirk and Nierop, Pieter and Drozdzal, Michal and van der Laak, Jeroen and Ciompi, Francesco}, + title = {Extending Unsupervised Neural Image Compression With Supervised Multitask Learning}, + booktitle = MIDL, + year = {2020}, + abstract = {We focus on the problem of training convolutional neural networks on gigapixel histopathology images to predict image-level targets. For this purpose, we extend Neural Image Compression (NIC), an image compression framework that reduces the dimensionality of these images using an encoder network trained unsupervisedly. We propose to train this encoder using supervised multitask learning (MTL) instead. We applied the proposed MTL NIC to two histopathology datasets and three tasks. First, we obtained state-of-the-art results in the Tumor Proliferation Assessment Challenge of 2016 (TUPAC16). Second, we successfully classified histopathological growth patterns in images with colorectal liver metastasis (CLM). Third, we predicted patient risk of death by learning directly from overall survival in the same CLM data. Our experimental results suggest that the representations learned by the MTL objective are: (1) highly specific, due to the supervised training signal, and (2) transferable, since the same features perform well across different tasks. Additionally, we trained multiple encoders with different training objectives, e.g. unsupervised and variants of MTL, and observed a positive correlation between the number of tasks in MTL and the system performance on the TUPAC16 dataset.}, + file = {Tell20.pdf:pdf\\Tell20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + ss_id = {6441d36a0af25f29abc3a21a012daf1ea9d28594}, + all_ss_ids = {['6441d36a0af25f29abc3a21a012daf1ea9d28594']}, + gscites = {18}, +} + +@article{Tell21, + author = {Tellez, David and Litjens, Geert and van der Laak, Jeroen and Ciompi, Francesco}, + title = {Neural Image Compression for Gigapixel Histopathology Image Analysis.}, + doi = {10.1109/TPAMI.2019.2936841}, + issue = {2}, + pages = {567--578}, + volume = {43}, + abstract = {We propose Neural Image Compression (NIC), a two-step method to build convolutional neural networks for gigapixel image analysis solely using weak image-level labels. First, gigapixel images are compressed using a neural network trained in an unsupervised fashion, retaining high-level information while suppressing pixel-level noise. Second, a convolutional neural network (CNN) is trained on these compressed image representations to predict image-level labels, avoiding the need for fine-grained manual annotations. We compared several encoding strategies, namely reconstruction error minimization, contrastive training and adversarial feature learning, and evaluated NIC on a synthetic task and two public histopathology datasets. We found that NIC can exploit visual cues associated with image-level labels successfully, integrating both global and local visual information. Furthermore, we visualized the regions of the input gigapixel images where the CNN attended to, and confirmed that they overlapped with annotations from human experts.}, + file = {:pdf/Tell21.pdf:PDF}, + journal = TPAMI, + month = feb, + optnote = {DIAG, RADIOLOGY}, + pmid = {31442971}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/230096}, + ss_id = {fed7c0d369d8991f6b76a661dedd0d1768a402d2}, + all_ss_ids = {['44454c9090606d0332272ff38df6c87eac15f5f7', 'fed7c0d369d8991f6b76a661dedd0d1768a402d2']}, + gscites = {154}, +} + +@phdthesis{Tell21a, + author = {David Tellez}, + title = {Advancing computational pathology with deep learning: from patches to gigapixel image-level classification}, + url = {https://repository.ubn.ru.nl/handle/2066/233752}, + abstract = {The main focus of this work is to investigate novel deep learning based methodologies to improve breast cancer prognostic tools within the context of Computational Pathology. This research can be divided into three key blocks: + 1. Fundamental challenges in Computational Pathology. We address some of the issues that arise when developing deep learning based models across applications and organs. First, scaling the generation of pixel-level annotated data (Chapter 2). Second, addressing intra- and inter-center stain variation (Chapters 2 and 3). Third, developing accurate and fast models to process entire whole-slide images (Chapters 2 and 4). + 2. Automating the core component of breast cancer grading: performing mitosis detection at scale, that is, processing thousands of unseen multicenter entire whole-slide images, while deriving actionable insights for pathologists (Chapter 2). + 3. Performing whole-slide image classification. We propose a method that enables feeding entire whole-slide images to a single deep learning based model , targeting patient-level labels and outcome data such as overall survival(Chapters 4 and 5).}, + copromotor = {F. Ciompi and G. Litjens}, + file = {Tell21a.pdf:pdf\\Tell21a.pdf:PDF}, + optnote = {DIAG}, + promotor = {J.A.W.M. van der Laak and N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + year = {2021}, + journal = {PhD thesis}, +} + +@conference{Teus13, + author = {M. M. Teussink and M. J. J. P. van Grinsven and B. Cense and C. B. Hoyng and J. Klevering and T. Theelen}, + title = {Functional optical coherence tomography with a commercial device - a pilot study}, + booktitle = ARVO, + year = {2013}, + abstract = {Purpose: To evaluate reproducibility and reliability of measurements of visually evoked retinal intrinsic optical signals {(IOS)} in humans by a commercially available spectral-domain {OCT}. Methods: We measured visually evoked {IOS} in 16 healthy volunteers with a {S}pectralis {OCT}, extended by a custom made optical stimulator. After 20 minutes of dark adaptation, a 3 mm2 area of the temporal perimacula was stimulated with a white flash of 0.1 seconds duration and 5.9 log scot. tld. luminance. A single-line {OCT} scan was recorded on-line before (baseline), during and after stimulation. From this {OCT} time-series we extracted {IOS} by calculating changes in reflectivity from baseline after application of the light stimulus. Custom software was used to compare reflectivity changes in the stimulated area to the non-stimulated area. To improve the signal-to-noise ratio, we averaged the results of three single {IOS} recordings of each eye. Results: {IOS} of the stimulated area could repeatedly be recorded with reproducible results. Axial motion of the {OCT} scans during the measurements caused substantial noise which could significantly be reduced by averaging. Conclusions: We were able to reproducibly record visually evoked {IOS} with the {S}pectralis {OCT} coupled to an optical stimulator. Due to the high noise level, averaging of multiple measurements is necessary to clearly identify and analyze retinal signals. {IOS} imaging may be improved by three-dimensional eye tracking, since axial motion artifacts seem to be of significant influence.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Teus15, + author = {Michel M. Teussink and Barry Cense and Mark J. J. P. van Grinsven and B. Jeroen Klevering and Carel B. Hoyng and Thomas Theelen}, + title = {Impact of motion-associated noise on intrinsic optical signal imaging in humans with optical coherence tomography}, + journal = BOE, + year = {2015}, + volume = {6}, + pages = {1632--1647}, + doi = {10.1364/BOE.6.001632}, + url = {http://www.opticsinfobase.org/boe/abstract.cfm?URI=boe-6-5-1632}, + abstract = {A growing body of evidence suggests that phototransduction can be studied in the human eye in vivo by imaging of fast intrinsic optical signals (IOS). There is consensus concerning the limiting influence of motion-associated imaging noise on the reproducibility of IOS-measurements, especially in those employing spectral-domain optical coherence tomography (SD-OCT). However, no study to date has conducted a comprehensive analysis of this noise in the context of IOS-imaging. In this study, we discuss biophysical correlates of IOS, and we address motion-associated imaging noise by providing correctional post-processing methods. In order to avoid cross-talk of adjacent IOS of opposite signal polarity, cellular resolution and stability of imaging to the level of individual cones is likely needed. The optical Stiles-Crawford effect can be a source of significant IOS-imaging noise if alignment with the peak of the Stiles-Crawford function cannot be maintained. Therefore, complete head stabilization by implementation of a bite-bar may be critical to maintain a constant pupil entry position of the OCT beam. Due to depth-dependent sensitivity fall-off, heartbeat and breathing associated axial movements can cause tissue reflectivity to vary by 29\% over time, although known methods can be implemented to null these effects. Substantial variations in reflectivity can be caused by variable illumination due to changes in the beam pupil entry position and angle, which can be reduced by an adaptive algorithm based on slope-fitting of optical attenuation in the choriocapillary lamina.}, + file = {Teus15.pdf:pdf\\Teus15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {26137369}, + publisher = {OSA}, + month = {4}, +} + +@article{Teus15a, + author = {Teussink, Michel M. and Breukink, Myrte B. and van Grinsven, Mark J J P. and Hoyng, Carel B. and Klevering, B Jeroen and Boon, Camiel J F. and de Jong, Eiko K. and Theelen, Thomas}, + title = {OCT Angiography Compared to Fluorescein and Indocyanine Green Angiography in Chronic Central Serous Chorioretinopathy}, + journal = IOVS, + year = {2015}, + volume = {56}, + pages = {5229--5237}, + doi = {10.1167/iovs.15-17140}, + abstract = {Abnormal choroidal blood flow is considered important in the pathogenesis of chronic central serous chorioretinopathy (CSC). Optical coherence tomography (OCT) angiography can image ocular blood cell flow and could thus provide novel insights in disease mechanisms of CSC. We evaluated depth-resolved flow in chronic CSC by OCT angiography compared to fluorescein angiography (FA) and indocyanine green angiography (ICGA).Eighteen eyes with chronic CSC, and six healthy controls, were included. Two human observers annotated areas of staining, hypofluorescence, and hotspots on FA and ICGA, and areas of abnormal flow on OCT angiography. Interobserver agreement in annotating OCT angiography and FA/ICGA was measured by Jaccard indices (JIs). We assessed colocation of flow abnormalities and subretinal fluid visible on OCT, and the distance between hotspots on ICGA from flow abnormalities.Abnormal areas were most frequently annotated in late-phase ICGA and choriocapillary OCT angiography, with moderately high (median JI, 0.74) and moderate (median JI, 0.52) interobserver agreement, respectively. Abnormalities on late-phase ICGA and FA colocated with those on OCT angiography. Aberrant choriocapillary OCT angiography presented as foci of reduced flow surrounded by hyperperfused areas. Hotspots on ICGA were located near hypoperfused spots on OCT angiography (mean distance, 168 AZA 1/4 m). Areas with current or former subretinal fluid were colocated with flow abnormalities.On OCT angiography, chronic CSC showed irregular choriocapillary flow patterns, corresponding to ICGA abnormalities. These results suggest focal choriocapillary ischemia with surrounding hyperperfusion that may lead to subretinal fluid leakage.}, + file = {Teus15a.pdf:pdf\\Teus15a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {9}, + pmid = {26244299}, + month = {8}, +} + +@article{Teuw14, + author = {Teuwen, J.}, + title = {A note on Gaussian maximal function}, + journal = INDAMAT, + year = {2015}, + volume = {26}, + number = {1}, + month = {1}, + pages = {106--112}, + doi = {10.1016/j.indag.2014.07.017}, + url = {http://linkinghub.elsevier.com/retrieve/pii/S0019357714000718}, + abstract = {This note presents a proof that the non-tangential maximal function of the Ornstein-Uhlenbeck semigroup is bounded pointwise by the Gaussian Hardy-Littlewood maximal function. In particular this entails an extension on a result by Pineda and Urbina [1] who proved a similar result for a 'truncated' version with fixed parameters of the non-tangential maximal function. We actually obtain boundedness of the maximal function on non-tangential cones of arbitrary aperture. {\textcopyright} 2014 Royal Dutch Mathematical Society (KWG).}, + file = {:pdf/Teuw15.pdf:PDF}, + optnote = {DIAG}, +} + +@article{Teuw16b, + author = {Teuwen, J.}, + title = {{On the integral kernels of derivatives of the Ornstein-Uhlenbeck semigroup}}, + journal = IDAQPRT, + year = {2016}, + volume = {19}, + number = {04}, + month = {12}, + pages = {1650030}, + doi = {10.1142/S0219025716500302}, + url = {http://www.worldscientific.com/doi/abs/10.1142/S0219025716500302}, + file = {:pdf/Teuw16b.pdf:PDF}, + optnote = {DIAG}, +} + +@conference{Teuw17b, + author = {Jonas Teuwen and Michel Kallenberg and Albert Gubern-Merida and Alejandro Rodriguez-Ruiz and Nico Karssemeijer and Ritse Mann}, + title = {Automated pre-selection of mammograms without abnormalities using deep learning}, + booktitle = RSNA, + year = {2017}, + abstract = {PURPOSE + In this study we evaluated the potential of a computer system to select exams with low likelihood of + containing cancer. + + METHOD AND MATERIALS + We collected a representative set of 1649 referrals with different screening outcome from the Dutch + breast cancer screening. The dataset comprised 489 true positives (TP) exams and 1160 false + positive (FP) exams. In addition, we collected 1000 true negative (TN) exams from the same + screening population. All exams were automatically analyzed with Transpara v1.2.0 (ScreenPoint + Medical, Nijmegen, The Netherlands). Transpara uses deep learning algorithms to, based on + soft-tissue lesions and calcifications findings, categorize every mammogram on a 10-point scale. This + computerized score represents the likelihood that a cancer is present in the exam at hand, where 10 + represents the highest likelihood that a cancer is present. It is defined in such a way that, in a + screening setting, the number of mammograms in each category is roughly equal. + + In this study, we determined the distribution of the computerized cancer likelihood scores for the TP, + FP and TN exams. In particular we quantified for each category the fraction of cases with a cancer + likelihood score below or equal to 5, including about 50% of the mammograms. Additionally we + evaluated the positive predictive value (PPV) of referrals in each likelihood category. + + RESULTS + 5.11% of the TPs, 20.3% of the FPs and 45.0% of the TNs were assigned to the likelihood categories + 1 to 5. This corresponds to 0.7 cancers per 1000 in the group with score 1-5 and 11.2 per 1000 with a + score higher than 5, based on the cancer detection rate of 6.5/1000 in the Dutch screening program. + The PPV was 8.00%, 8.14%, and 44.9% for cancer likelihood scores 1, 5 and 10, respectively. + + CONCLUSION + Automated identification of a fraction of screening mammograms that most likely are normal is + feasible.}, + optnote = {DIAG}, +} + +@article{Teuw18, + author = {Jonas Teuwen and Paul Urbach}, + title = {On Maximum Focused Electric Energy in Bounded Regions}, + journal = {arXiv:1801.02450}, + year = {2018}, + abstract = {A general method is presented for determining the maximum electric energy in a bounded region of optical fields with given time-averaged flux of electromagnetic energy. Time-harmonic fields are considered whose plane wave expansion consists of propagating plane waves only, i.e., evanescent waves are excluded. The bounded region can be quite general: it can consist of finitely many points, or be a curve, a curved surface or a bounded volume. The optimum optical field is eigenfield corresponding to the maximum eigenvalue of a compact linear integral operator which depends on the bounded region. It is explained how these optimum fields can be realized by focusing appropriate pupil fields. The special case that the region is a circular disc perpendicular to the direction of optical axis is investigated by numerical simulations.}, + optnote = {DIAG}, + month = {1}, + ss_id = {da42a50fd33cb07f6419f0cd1004b2eda9bdc06a}, + all_ss_ids = {['da42a50fd33cb07f6419f0cd1004b2eda9bdc06a']}, + gscites = {0}, +} + +@article{Thag23, + author = {Thagaard, Jeppe and Broeckx, Glenn and Page, David B and Jahangir, Chowdhury Arif and Verbandt, Sara and Kos, Zuzana and Gupta, Rajarsi and Khiroya, Reena and Abduljabbar, Khalid and Acosta Haab, Gabriela and Acs, Balazs and Akturk, Guray and Almeida, Jonas S and Alvarado-Cabrero, Isabel and Amgad, Mohamed and Azmoudeh-Ardalan, Farid and Badve, Sunil and Baharun, Nurkhairul Bariyah and Balslev, Eva and Bellolio, Enrique R and Bheemaraju, Vydehi and Blenman, Kim RM and Botinelly Mendon\c{c}a Fujimoto, Luciana and Bouchmaa, Najat and Burgues, Octavio and Chardas, Alexandros and Chon U Cheang, Maggie and Ciompi, Francesco and Cooper, Lee AD and Coosemans, An and Corredor, Germ\'{a}n and Dahl, Anders B and Dantas Portela, Flavio Luis and Deman, Frederik and Demaria, Sandra and Dor\'{e} Hansen, Johan and Dudgeon, Sarah N and Ebstrup, Thomas and Elghazawy, Mahmoud and Fernandez-Mart\'{i}n, Claudio and Fox, Stephen B and Gallagher, William M and Giltnane, Jennifer M and Gnjatic, Sacha and Gonzalez-Ericsson, Paula I and Grigoriadis, Anita and Halama, Niels and Hanna, Matthew G and Harbhajanka, Aparna and Hart, Steven N and Hartman, Johan and Hauberg, S\oren and Hewitt, Stephen and Hida, Akira I and Horlings, Hugo M and Husain, Zaheed and Hytopoulos, Evangelos and Irshad, Sheeba and Janssen, Emiel AM and Kahila, Mohamed and Kataoka, Tatsuki R and Kawaguchi, Kosuke and Kharidehal, Durga and Khramtsov, Andrey I and Kiraz, Umay and Kirtani, Pawan and Kodach, Liudmila L and Korski, Konstanty and Kov\'{a}cs, Anik\'{o} and Laenkholm, Anne-Vibeke and Lang-Schwarz, Corinna and Larsimont, Denis and Lennerz, Jochen K and Lerousseau, Marvin and Li, Xiaoxian and Ly, Amy and Madabhushi, Anant and Maley, Sai K and Manur Narasimhamurthy, Vidya and Marks, Douglas K and McDonald, Elizabeth S and Mehrotra, Ravi and Michiels, Stefan and Minhas, Fayyaz ul Amir Afsar and Mittal, Shachi and Moore, David A and Mushtaq, Shamim and Nighat, Hussain and Papathomas, Thomas and Penault-Llorca, Frederique and Perera, Rashindrie D and Pinard, Christopher J and Pinto-Cardenas, Juan Carlos and Pruneri, Giancarlo and Pusztai, Lajos and Rahman, Arman and Rajpoot, Nasir Mahmood and Rapoport, Bernardo Leon and Rau, Tilman T and Reis-Filho, Jorge S and Ribeiro, Joana M and Rimm, David and Roslind, Anne and Vincent-Salomon, Anne and Salto-Tellez, Manuel and Saltz, Joel and Sayed, Shahin and Scott, Ely and Siziopikou, Kalliopi P and Sotiriou, Christos and Stenzinger, Albrecht and Sughayer, Maher A and Sur, Daniel and Fineberg, Susan and Symmans, Fraser and Tanaka, Sunao and Taxter, Timothy and Tejpar, Sabine and Teuwen, Jonas and Thompson, E Aubrey and Tramm, Trine and Tran, William T and van der Laak, Jeroen and van Diest, Paul J and Verghese, Gregory E and Viale, Giuseppe and Vieth, Michael and Wahab, Noorul and Walter, Thomas and Waumans, Yannick and Wen, Hannah Y and Yang, Wentao and Yuan, Yinyin and Zin, Reena Md and Adams, Sylvia and Bartlett, John and Loibl, Sibylle and Denkert, Carsten and Savas, Peter and Loi, Sherene and Salgado, Roberto and Specht Stovgaard, Elisabeth}, + title = {Pitfalls in machine learning-based assessment of tumor-infiltrating lymphocytes in breast cancer: A report of the International Immuno-Oncology Biomarker Working Group on Breast Cancer}, + doi = {10.1002/path.6155}, + year = {2023}, + abstract = {AbstractThe clinical significance of the tumor-immune interaction in breast cancer is now established, and tumor-infiltrating lymphocytes (TILs) have emerged as predictive and prognostic biomarkers for patients with triple-negative (estrogen receptor, progesterone receptor, and HER2-negative) breast cancer and HER2-positive breast cancer. How computational assessments of TILs might complement manual TIL assessment in trial and daily practices is currently debated. Recent efforts to use machine learning (ML) to automatically evaluate TILs have shown promising results. We review state-of-the-art approaches and identify pitfalls and challenges of automated TIL evaluation by studying the root cause of ML discordances in comparison to manual TIL quantification. We categorize our findings into four main topics: (1) technical slide issues, (2) ML and image analysis aspects, (3) data challenges, and (4) validation issues. The main reason for discordant assessments is the inclusion of false-positive areas or cells identified by performance on certain tissue patterns or design choices in the computational implementation. To aid the adoption of ML for TIL assessment, we provide an in-depth discussion of ML and image analysis, including validation issues that need to be considered before reliable computational reporting of TILs can be incorporated into the trial and routine clinical management of patients with triple-negative breast cancer. (c) 2023 The Authors. The Journal of Pathology published by John Wiley & Sons Ltd on behalf of The Pathological Society of Great Britain and Ireland.}, + url = {http://dx.doi.org/10.1002/path.6155}, + file = {Thag23.pdf:pdf\Thag23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {The Journal of Pathology}, + citation-count = {2}, + automatic = {yes}, + pages = {498-513}, + volume = {260}, + ss_id = {13a37cf9771c8fbabc261e13c8a246346244f4ca}, + all_ss_ids = {['13a37cf9771c8fbabc261e13c8a246346244f4ca']}, + gscites = {2}, +} + +@article{Than22, + author = {Thannhauser, Jos and Nas, Joris and van der Sluijs, Koen and Zwart, Hans and de Boer, Menko-Jan and van Royen, Niels and Bonnes, Judith and Brouwer, Marc}, + title = {Pilot study on VF-waveform based algorithms for early detection of acute myocardial infarction during out-of-hospital cardiac arrest}, + doi = {10.1016/j.resuscitation.2022.03.025}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.resuscitation.2022.03.025}, + file = {Than22.pdf:pdf\Than22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Resuscitation}, + citation-count = {2}, + automatic = {yes}, + pages = {62-67}, + volume = {174}, +} + +@article{Thee20, + author = {Thee, Eric F. and Luttikhuizen, Daniel T. and Lemij, Hans G. and Verbraak, Frank D. and S\'{a}nchez, Clara I. and Klaver, Caroline C.W.}, + title = {Artificial intelligence for eye care}, + url = {https://www.ntvg.nl/artikelen/artificiele-intelligentie-de-oogzorg}, + abstract = {Technological developments in ophthalmic imaging and artificial intelligence (AI) create new possibilities for diagnostics in eye care. AI has already been applied in ophthalmic diabetes care. AI-systems currently detect diabetic retinopathy in general practice with a high sensitivity and specificity. AI-systems for the screening, monitoring and treatment of age-related macular degeneration and glaucoma are promising and are still being developed. AI-algorithms, however, only perform tasks for which they have been specifically trained and highly depend on the data and reference-standard that were used to train the system in identifying a certain abnormality or disease. How the data and the gold standard were established and determined, influences the performance of the algorithm. Furthermore, interpretability of deep learning algorithms is still an ongoing issue. By highlighting on images the areas that were critical for the decision of the algorithm, users can gain more insight into how algorithms come to a particular result.}, + journal = NTVG, + optnote = {DIAG, RADIOLOGY}, + year = {2020}, + month = {9}, + ss_id = {95d2ec56339169d30a774899c6f5b93540a3ce0f}, + all_ss_ids = {['95d2ec56339169d30a774899c6f5b93540a3ce0f']}, + gscites = {0}, +} + +@article{Thij23, + author = {Thijssen, Linda C.P. and de Rooij, Maarten and Barentsz, Jelle O. and Huisman, Henkjan J.}, + title = {Radiomics based automated quality assessment for T2W prostate MR images}, + doi = {10.1016/j.ejrad.2023.110928}, + year = {2023}, + abstract = {Purpose: The guidelines for prostate cancer recommend the use of MRI in the prostate cancer pathway. Due to the variability in prostate MR image quality, the reliability of this technique in the detection of prostate cancer is highly variable in clinical practice. This leads to the need for an objective and automated assessment of image quality to ensure an adequate acquisition and hereby to improve the reliability of MRI. The aim of this study is to investigate the feasibility of Blind/referenceless image spatial quality evaluator (Brisque) and radiomics in automated image quality assessment of T2-weighted (T2W) images. Method: Anonymized axial T2W images from 140 patients were scored for quality using a five-point Likert scale (low, suboptimal, acceptable, good, very good quality) in consensus by two readers. Images were dichotomized into clinically acceptable (very good, good and acceptable quality images) and clinically unacceptable (low and suboptimal quality images) in order to train and verify the model. Radiomics and Brisque features were extracted from a central cuboid volume including the prostate. A reduced feature set was used to fit a Linear Discriminant Analysis (LDA) model to predict image quality. Two hundred times repeated 5-fold cross-validation was used to train the model and test performance by assessing the classification accuracy, the discrimination accuracy as receiver operating curve - area under curve (ROC-AUC), and by generating confusion matrices. Results: Thirty-four images were classified as clinically unacceptable and 106 were classified as clinically acceptable. The accuracy of the independent test set (mean +- standard deviation) was 85.4 +- 5.5%. The ROC-AUC was 0.856 (0.851 - 0.861) (mean; 95% confidence interval). Conclusions:Radiomics AI can automatically detect a significant portion of T2W images of suboptimal image quality. This can help improve image quality at the time of acquisition, thus reducing repeat scans and improving diagnostic accuracy.}, + url = {http://dx.doi.org/10.1016/j.ejrad.2023.110928}, + file = {Thij23.pdf:pdf\Thij23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Radiology}, + citation-count = {0}, + automatic = {yes}, + ss_id = {59545ee081348d9f15f20f3e3c8d64e67039d262}, + all_ss_ids = {['59545ee081348d9f15f20f3e3c8d64e67039d262']}, + gscites = {0}, +} + +@inproceedings{Timp02, + author = {S. Timp and N. Karssemeijer and J. Hendriks}, + title = {Analysis of changes in masses using contrast and size measures}, + booktitle = {IWDM '02: Proceedings of the 6th international workshop on Digital Mammography}, + year = {2002}, + publisher = {Springer-Verlag}, + doi = {10.1007/978-3-642-59327-7_58}, + pages = {240--242}, + optnote = {DIAG, RADIOLOGY}, + gsid = {17461913310296744924}, + gscites = {11}, + ss_id = {0dba24303682aab3795c1493f5b639bd38d4358d}, + all_ss_ids = {['0dba24303682aab3795c1493f5b639bd38d4358d']}, +} + +@article{Timp04, + author = {Timp, Sheila and Karssemeijer, Nico}, + title = {A new {2D} segmentation method based on dynamic programming applied to computer aided detection in mammography}, + journal = MP, + year = {2004}, + volume = {31}, + pages = {958--971}, + doi = {10.1118/1.1688039}, + abstract = {{M}ass segmentation plays a crucial role in computer-aided diagnosis ({CAD}) systems for classification of suspicious regions as normal, benign, or malignant. {I}n this article we present a robust and automated segmentation technique?based on dynamic programming?to segment mass lesions from surrounding tissue. {I}n addition, we propose an efficient algorithm to guarantee resulting contours to be closed. {T}he segmentation method based on dynamic programming was quantitatively compared with two other automated segmentation methods (region growing and the discrete contour model) on a dataset of 1210 masses. {F}or each mass an overlap criterion was calculated to determine the similarity with manual segmentation. {T}he mean overlap percentage for dynamic programming was 0.69, for the other two methods 0.60 and 0.59, respectively. {T}he difference in overlap percentage was statistically significant. {T}o study the influence of the segmentation method on the performance of a {CAD} system two additional experiments were carried out. {T}he first experiment studied the detection performance of the {CAD} system for the different segmentation methods. {F}ree-response receiver operating characteristics analysis showed that the detection performance was nearly identical for the three segmentation methods. {I}n the second experiment the ability of the classifier to discriminate between malignant and benign lesions was studied. {F}or region based evaluation the area {A}z under the receiver operating characteristics curve was 0.74 for dynamic programming, 0.72 for the discrete contour model, and 0.67 for region growing. {T}he difference in {A}z values obtained by the dynamic programming method and region growing was statistically significant. {T}he differences between other methods were not significant.}, + file = {Timp04.pdf:pdf\\Timp04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {15191279}, + month = {4}, + gsid = {1462730622144634090}, + gscites = {220}, + ss_id = {475acaf8c5b6f148dd4faabc6a31d7a10605f748}, + all_ss_ids = {['475acaf8c5b6f148dd4faabc6a31d7a10605f748']}, +} + +@article{Timp05, + author = {S. Timp and S. van Engeland and N. Karssemeijer}, + title = {A regional registration method to find corresponding mass lesions in temporal mammogram pairs}, + journal = MP, + year = {2005}, + volume = {32}, + pages = {2629--2638}, + doi = {10.1118/1.1984323}, + abstract = {{I}n this paper we develop an automatic regional registration method to find corresponding masses on prior and current mammograms. {T}he method contains three steps. {I}n the first, we globally align both images. {T}hen, for each mass lesion on the current view, we define a search area on the prior view, which is likely to contain the same mass lesion. {T}hird, at each location in this search area we calculate a registration measure to quantify how well this location matches the mass lesion on the current view. {F}inally we select the best location. {T}o determine the performance of our method we compare it to several other registration methods. {O}n a dataset of 389 temporal mass pairs our method correctly links 82% of prior and current mass lesions, whereas other methods achieve at most 72%.}, + file = {Timp05.pdf:pdf\\Timp05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {8}, + pmid = {16193793}, + month = {7}, + gsid = {2725768412742731883}, + gscites = {31}, + ss_id = {d41140f0a6e06d1c94715dcd3ce6c4a9cda8ef48}, + all_ss_ids = {['d41140f0a6e06d1c94715dcd3ce6c4a9cda8ef48']}, +} + +@article{Timp06, + author = {S. Timp and N. Karssemeijer}, + title = {Interval change analysis to improve computer aided detection in mammography}, + journal = MIA, + year = {2006}, + volume = {10}, + pages = {82--95}, + doi = {10.1016/j.media.2005.03.007}, + abstract = {{W}e are developing computer aided diagnosis ({CAD}) techniques to study interval changes between two consecutive mammographic screening rounds. {W}e have previously developed methods for the detection of malignant masses based on features extracted from single mammographic views. {T}he goal of the present work was to improve our detection method by including temporal information in the {CAD} program. {T}oward this goal, we have developed a regional registration technique. {T}his technique links a suspicious location on the current mammogram with a corresponding location on the prior mammogram. {T}he novelty of our method is that the search for correspondence is done in feature space. {T}his has the advantage that very small lesions and architectural distortions may be found as well. {F}ollowing the linking process several features are calculated for the current and prior region. {T}emporal features are obtained by combining the feature values from both regions. {W}e evaluated the detection performance with and without the use of temporal features on a data set containing 2873 temporal film pairs from 938 patients. {T}here were 589 cases in which the current mammogram contained exactly one malignant mass. {C}ross validation was used to partition the data set into a train set and a test set. {T}he train set was used for feature selection and classifier training, the test set for classifier evaluation. {FROC} (free response operating characteristic) analysis showed an improvement in detection performance with the use of temporal features.}, + file = {Timp06.pdf:pdf\\Timp06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {15996893}, + month = {2}, + gsid = {7132504051186922195}, + gscites = {59}, + ss_id = {0cac166d49f3f7193b4ef0ba591bc1155aa0a762}, + all_ss_ids = {['0cac166d49f3f7193b4ef0ba591bc1155aa0a762']}, +} + +@phdthesis{Timp06a, + author = {S. Timp}, + title = {Analysis of Temporal Mammogram Pairs to Detect and Characterise Mass Lesions}, + year = {2006}, + url = {http://repository.ubn.ru.nl/handle/2066/50630}, + abstract = {This preparatory chapter provides some background material and literature required for this thesis. For further reading we suggest one of the following books: Vainio & Bianchini (2002); Homer (1997); Underwood (1992); Friedrich & Sickles (2000). This chapter is organised as follows. Section 1.1 to 1.5 give general information about breast cancer, about screening programmes to detect breast cancer, and about modalities that are used to image the breast. In Section 1.6 we describe the use of computer aided detection and diagnosis (CAD) systems and review some important studies that evaluate potential benets of using CAD. At the moment multi view CAD systems are being developed that include information from multiple views. Section 1.7 summarises recent advances in this eld. In this thesis we focus on the design of a multi view CAD system that incorporates information about temporal changes that take place between two consecutive screening rounds. Section 1.8 shortly discusses the objective for this approach. Section 1.9 claries denitions and nomenclature used in this thesis. Finally, in Section 1.10, we present an overview of this thesis.}, + copromotor = {N. Karssemeijer}, + file = {Timp06a.pdf:pdf/Timp06a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {C. C. A. M. Gielen}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Timp07, + author = {S. Timp and C. Varela and N. Karssemeijer}, + title = {Temporal change analysis for characterization of mass lesions in mammography}, + journal = TMI, + year = {2007}, + volume = {26}, + pages = {945--953}, + doi = {10.1109/TMI.2007.897392}, + abstract = {{I}n this paper, we present a fully automated computer-aided diagnosis ({CAD}) program to detect temporal changes in mammographic masses between two consecutive screening rounds. {T}he goal of this work was to improve the characterization of mass lesions by adding information about the tumor behavior over time. {T}owards this goal we previously developed a regional registration technique that finds for each mass lesion on the current view a location on the prior view where the mass was most likely to develop. {F}or the task of interval change analysis, we designed two kinds of temporal features: difference features and similarity features. {D}ifference features indicate the (relative) change in feature values determined on prior and current views. {T}hese features may be especially useful for lesions that are visible on both views. {S}imilarity features measure whether two regions are comparable in appearance and may be useful for lesions that are visible on the prior view as well as for newly developing lesions. {W}e evaluated the classification performance with and without the use of temporal features on a dataset consisting of 465 temporal mammogram pairs, 238 benign, and 227 malignant. {W}e used cross validation to partition the dataset into a training set and a test set. {T}he training set was used to train a support vector machine classifier and the test set to evaluate the classifier. {T}he average {A}(z) value (area under the receiver operating characteristic curve) for classifying each lesion was 0.74 without temporal features and 0.77 with the use of temporal features. {T}he improvement obtained by adding temporal features was statistically significant ({P} = 0.005). {I}n particular, similarity features contributed to this improvement. {F}urthermore, we found that the improvement was comparable for masses that were visible and for masses that were not visible on the prior view. {T}hese results show that the use of temporal features is an effective approach to improve the characterization of masses.}, + file = {Timp07.pdf:pdf\\Timp07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {7}, + pmid = {17649908}, + month = {7}, + gsid = {1885078493541058117}, + gscites = {90}, + ss_id = {6151c62ea8b3f95b4b4545807cc7b0c9794e9fbf}, + all_ss_ids = {['6151c62ea8b3f95b4b4545807cc7b0c9794e9fbf']}, +} + +@article{Timp10, + author = {Sheila Timp and Celia Varela and Nico Karssemeijer}, + title = {Computer-aided diagnosis with temporal analysis to improve radiologists' interpretation of mammographic mass lesions}, + journal = TITB, + year = {2010}, + volume = {14}, + pages = {803--808}, + doi = {10.1109/TITB.2010.2043296}, + abstract = {The purpose of this study was to evaluate the effect of independent reading with computer-aided diagnosis (CAD) and independent double reading on radiologists' performance to characterize mass lesions on serial mammograms. Six radiologists rated 198 cases, 99 benign and 99 malignant. For each case, the mammograms from two consecutive screening rounds were available. The mass was visible on the prior view in 40\% of the cases. Independently, a CAD programe also rated each mass lesion making use of information from prior and current views. The following reading situations were compared: single reading, independent reading with CAD, and independent double reading. Independent reading with CAD was implemented by averaging the scaled ratings from each radiologist and the scaled CAD scores. We implemented independent double reading by averaging the scaled scores from two radiologists. Results were evaluated using receiver-operating characteristic (ROC) methodology and multiple reader multiple case analysis. The average performance, measured as the area under the ROC curve (A(z) value), was 0.80 for the single-reading mode. For independent double reading, the average performance improved to 0.81. This improvement was not significant. For independent interpretation with CAD, the average performance significantly increased to 0.83 (P < 0.05). We conclude that CAD technology with temporal analysis has the potential to help radiologists with the task of discriminating between benign and malignant masses.}, + file = {Timp10.pdf:pdf/Timp10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {20403792}, + month = {5}, + gsid = {15819462963198853544}, + gscites = {35}, + ss_id = {01bb3eaa558de011fe6cf83f854ed64208cf6785}, + all_ss_ids = {['01bb3eaa558de011fe6cf83f854ed64208cf6785']}, +} + +@article{Toga23, + author = {Togawa, Riku and Pfob, Andr\'{e} and B\"{u}sch, Christopher and Alwafai, Zaher and Balleyguier, Corinne and Clevert, Dirk-Andr\'{e} and Duda, Volker and Fastner, Sarah and Goncalo, Manuela and Gomez, Christina and Gruber, Ines and Hahn, Markus and Hennigs, Andr\'{e} and Kapetas, Panagiotis and Nees, Juliane and Ohlinger, Ralf and Riedel, Fabian and Rutten, Matthieu and Sch\"{a}fgen, Benedikt and Stieber, Anne and Tozaki, Mitsuhiro and Wojcinski, Sebastian and Rauch, Geraldine and Heil, J\"{o}rg and Barr, Richard and Golatta, Michael}, + title = {Potential of Lesion-to-Fat Elasticity Ratio Measured by Shear Wave Elastography to Reduce Benign Biopsies in BI-RADS 4 Breast Lesions}, + doi = {10.1002/jum.16192}, + year = {2023}, + abstract = {ObjectivesWe evaluated whether lesion-to-fat ratio measured by shear wave elastography in patients with Breast Imaging Reporting and Data System (BI-RADS) 3 or 4 lesions has the potential to further refine the assessment of B-mode ultrasound alone in breast cancer diagnostics.MethodsThis was a secondary analysis of an international diagnostic multicenter trial (NCT02638935). Data from 1288 women with breast lesions categorized as BI-RADS 3 and 4a-c by conventional B-mode ultrasound were analyzed, whereby the focus was placed on differentiating lesions categorized as BI-RADS 3 and BI-RADS 4a. All women underwent shear wave elastography and histopathologic evaluation functioning as reference standard. Reduction of benign biopsies as well as the number of missed malignancies after reclassification using lesion-to-fat ratio measured by shear wave elastography were evaluated.ResultsBreast cancer was diagnosed in 368 (28.6%) of 1288 lesions. The assessment with conventional B-mode ultrasound resulted in 53.8% (495 of 1288) pathologically benign lesions categorized as BI-RADS 4 and therefore false positives as well as in 1.39% (6 of 431) undetected malignancies categorized as BI-RADS 3. Additional lesion-to-fat ratio in BI-RADS 4a lesions with a cutoff value of 1.85 resulted in 30.11% biopsies of benign lesions which correspond to a reduction of 44.04% of false positives.ConclusionsAdding lesion-to-fat ratio measured by shear wave elastography to conventional B-mode ultrasound in BI-RADS 4a breast lesions could help reduce the number of benign biopsies by 44.04%. At the same time, however, 1.98% of malignancies were missed, which would still be in line with American College of Radiology BI-RADS 3 definition of <2% of undetected malignancies.}, + url = {http://dx.doi.org/10.1002/jum.16192}, + file = {Toga23.pdf:pdf\Toga23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Ultrasound in Medicine}, + citation-count = {0}, + automatic = {yes}, + pages = {1729-1736}, + volume = {42}, +} + +@conference{Tom22a, + author = {Perik, Tom and Hermans, John and Huisman, Henkjan}, + booktitle = ECR, + title = {AI-assisted analysis of CT perfusion to predict response in patients with pancreatic adenocarcinoma}, + abstract = {CT perfusion (CTP) shows potential for treatment response in patients pancreatic ductal adenocarcinoma (PDAC). However, current pharmacokinetic models are difficult to use in clinical decision-making as they do not always accurately reflect changes in perfusion. Visual changes in the time-intensity curve (TIC) are not translated in perfusion parameters. We developed a kinetic model-independent method to analyze time-intensity curves, based on experiences in dynamic-contrast-enhanced MRI in prostate cancer. Methods or Background: Initial data (n=12) from a prospective study evaluating chemotherapy response in patients with PDAC. CTP was performed at baseline and after 3 months. A bolus-timing optimized scan protocol with 23 perfusion images was used. The tumor section with the largest diameter was free-hand annotated at baseline and matching follow-up section to create TICs. Our method used a trilinear fit, separating the static phase, enhancement phase, and wash-out phase. Linear discriminate analysis (LDA) was trained to predict response based on the curve changes after treatment. One-sided T-test was used to test the statistical differences between groups. Results or Findings: Using this method we could discriminate responders (n=4) from non-responders (n=8), classified with RECIST. Our curve fit showed that after treatment maximum enhancement increased by 42% in responders and 7% in non-responders (p=0.02). Enhancement slope increased with 140% in responders and 3% in non-responders (p=0.06). Changes in static enhancement and TTP did not significantly differ. Linear discriminant analysis with all four features classified treatment response with 92% accuracy. Conclusion: We developed an AI-assisted tool with a robust, kinetic model-independent method for TIC analysis in CTP resulting in perfusion features that distinguish between responders and non-responders in PDAC after chemotherapy. Limitations:Early results of a feasibility study, future work will include more patients and comparison with pharmacokinetic models.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@inproceedings{Trom12, + author = {Christopher Tromans and Guido van Schie and Nico Karssemeijer and Michael Brady}, + title = {A Hypothesis-Test Framework for Quantitative Lesion Detection and Diagnosis}, + booktitle = {IWDM '12: Proceedings of the 11th International Workshop on Breast Imaging}, + year = {2012}, + volume = {7361}, + series = LNCS, + pages = {458--465}, + doi = {10.1007/978-3-642-31271-7_59}, + abstract = {A method is presented which quantifies the radiodensity of lesions in projection images, providing a diagnostic indicator to better inform the decisions of both human readers and computer algorithms. The models of image formation underlying the Standard Attenuation Rate (SAR) are used to facilitate the forward simulation of the appearance of a lesion in a breast. By forming hypotheses, informed from measurements on the acquired image, virtual 3D scenes are constructed which predict the size, position and radiodensity of a suspect lesion and the surrounding breast tissue. Comparisons between simulations of this scene, and the acquired image enable both the refinement of the hypothesis, and the assessment of the likelihood of the hypothesis being correct. In the event of a high likelihood of correctness, the hypothesised lesion informs diagnosis. The application of the method to a patient image containing a cyst shows it has an attenuation corresponding to water (SAR 1.246), and an invasive carcinoma which is considerably denser at SAR 2.27. Thus the technique yields a quantitative radiodensity measure for discrimination in diagnostic decision making.}, + file = {Trom12.pdf:pdf\\Trom12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {14231121447187167510}, + gscites = {4}, + ss_id = {6c9c323e06155fb6d4c3adff3ae733a45c55a4e6}, + all_ss_ids = {['6c9c323e06155fb6d4c3adff3ae733a45c55a4e6']}, +} + +@article{Turn21, + author = {Turner, Oliver C and Knight, Brian and Zuraw, Aleksandra and Litjens, Geert and Rudmann, Daniel G}, + title = {Mini Review: The Last Mile-Opportunities and Challenges for Machine Learning in Digital Toxicologic Pathology.}, + doi = {10.1177/0192623321990375}, + issue = {4}, + pages = {714--719}, + volume = {49}, + abstract = {The 2019 manuscript by the Special Interest Group on Digital Pathology and Image Analysis of the Society of Toxicologic pathology suggested that a synergism between artificial intelligence (AI) and machine learning (ML) technologies and digital toxicologic pathology would improve the daily workflow and future impact of toxicologic pathologists globally. Now 2 years later, the authors of this review consider whether, in their opinion, there is any evidence that supports that thesis. Specifically, we consider the opportunities and challenges for applying ML (the study of computer algorithms that are able to learn from example data and extrapolate the learned information to unseen data) algorithms in toxicologic pathology and how regulatory bodies are navigating this rapidly evolving field. Although we see similarities with the "Last Mile" metaphor, the weight of evidence suggests that toxicologic pathologists should approach ML with an equal dose of skepticism and enthusiasm. There are increasing opportunities for impact in our field that leave the authors cautiously excited and optimistic. Toxicologic pathologists have the opportunity to critically evaluate ML applications with a "call-to-arms" mentality. Why should we be late adopters? There is ample evidence to encourage engagement, growth, and leadership in this field.}, + file = {:pdf/Turn21.pdf:PDF}, + journal = TP, + month = jun, + pmid = {33590805}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/235783}, + ss_id = {ffa308f1ec81d2c2e1e75103af34ba450c33ee25}, + all_ss_ids = {['ffa308f1ec81d2c2e1e75103af34ba450c33ee25']}, + gscites = {4}, +} + +@article{Twil21, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/235582}, +} + +@article{Twil21a, + author = {Twilt, Jasper J. and van Leeuwen, Kicky G. and Huisman, Henkjan J. and F\"{u}tterer, Jurgen J. and de Rooij, Maarten}, + title = {Artificial Intelligence Based Algorithms for Prostate Cancer Classification and Detection on Magnetic Resonance Imaging: A Narrative Review}, + doi = {10.3390/diagnostics11060959}, + year = {2021}, + abstract = {Due to the upfront role of magnetic resonance imaging (MRI) for prostate cancer (PCa) diagnosis, a multitude of artificial intelligence (AI) applications have been suggested to aid in the diagnosis and detection of PCa. In this review, we provide an overview of the current field, including studies between 2018 and February 2021, describing AI algorithms for (1) lesion classification and (2) lesion detection for PCa. Our evaluation of 59 included studies showed that most research has been conducted for the task of PCa lesion classification (66%) followed by PCa lesion detection (34%). Studies showed large heterogeneity in cohort sizes, ranging between 18 to 499 patients (median = 162) combined with different approaches for performance validation. Furthermore, 85% of the studies reported on the stand-alone diagnostic accuracy, whereas 15% demonstrated the impact of AI on diagnostic thinking efficacy, indicating limited proof for the clinical utility of PCa AI applications. In order to introduce AI within the clinical workflow of PCa assessment, robustness and generalizability of AI applications need to be further validated utilizing external validation and clinical workflow experiments.}, + url = {http://dx.doi.org/10.3390/diagnostics11060959}, + file = {Twil21a.pdf:pdf\Twil21a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Diagnostics}, + citation-count = {34}, + automatic = {yes}, + pages = {959}, + volume = {11}, +} + +@conference{Twil23a, + author = {Jasper Twilt and Anindo Saha and Joeran S. Bosma and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Futterer and Henkjan Huisman and Maarten de Rooij}, + title = {Diagnostic Value of Dynamic Contrast-Enhanced MRI for the Detection of Clinically Significant Prostate Cancer in a Multi-Reader Study: Preliminary Results from the PI-CAI Consortium}, + booktitle = ECR, + year = {2023}, + abstract = {PURPOSE: The study aimed to compare the detection performance of ISUP grade group (GG) >= 2 using bi-parametric (bp)MRI, and multi-parametric (mp)MRI (which includes dynamic contrast IV administration), in an international reader study. METHODS: This retrospective reader study uses a cohort of 400 prostate mpMRI exams acquired between 2012-2021 at three Dutch and one Norwegian centres. Patients suspected of GG>=2 cancers and without prior prostate treatment or prior GG>=2 findings in an MRI-first approach are included. 65 radiologists (18 countries, 43 centres) with 1-23 years (median: 8) prostate reading experience were enlisted. Readers and cases are divided into blocks of 100 cases. For each case, readers assessed bpMRI and mpMRI in sequence to mimic theclinical routine. Suspected GG>=2 cancer findings were assigned a PI-RADS 3-5 score. Additionally, a patient-level suspicion score (0-100) of harbouring GG>=2 was indicated. Multi-reader multi-case (MRMC) analysis was used to compare the patient-level addedvalue of mpMRI. RESULTS: Preliminary results from the first 14 readers with 2-15 years (median: 9) of experience indicate that overall, there is little improvement in GG>=2 detections between bpMRI and mpMRI readings with AUROCs of 0.857 (95% CI: 0.83, 0.89) and 0.860 (95% CI: 0.83, 0.89), respectively. For individual readers, absolute differences in AUROC ranged between 0.00-0.03 (95% CI: 0.00, 0.01). CONCLUSION: MRI assessments in bpMRI had similar GG>=2 detections to mpMRI assessments at a per-case level. Multivariable influencers such as experience, workflow, image quality and protocol familiarity must be evaluated. LIMITATIONS: Preliminary results are limited by the sample size. mpMRI readings of the original data were used to guide histologic verification. 13 out of 14 readers had high expertise as per 2020 ESUR/ESUI consensus statements.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Twil23b, + author = {Jasper Twilt and Anindo Saha and Joeran S. Bosma and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Futterer and Henkjan Huisman and Maarten de Rooij}, + title = {EAU Plenary Gamechanging Session - Artificial Intelligence and Radiologists at Prostate Cancer Detection in MRI: Preliminary Results from the PI-CAI Challenge}, + booktitle = EAU, + year = {2023}, + abstract = {PURPOSE: The PI-CAI (Prostate Imaging: Cancer AI) Challenge is an international study, with over 10,000 carefully-curated prostate MRI exams to validate modern AI algorithms and estimate radiologists' performance at csPCa detection and diagnosis. PI-CAI primarily consists of two sub-studies: an AI study (Grand Challenge) and a reader study. In the end PI-CAI will benchmark state-of-the-art AI algorithms developed in the Grand Challenge, against prostate radiologists participating in the reader study. The aim is to present the study design and share the preliminary results of both sub-studies. METHODS: For the AI study, an annotated multi-center, multi-vendor dataset of 1500 bpMRI exams (including their basic clinical and acquisition variables) was made publicly available for all participating AI teams and the research community at large. Teams used this dataset to develop AI models, and at the end of this open development phase, all algorithms were ranked, based on their performance on a hidden testing cohort of 1000 unseen scans. In the ongoing closed testing phase, organizers will retrain the top-ranking 5 AI algorithms using a larger dataset of 9107 bpMRI scans (including additional training scans from a private dataset). Finally, their performance will be re-evaluated on the hidden testing cohort. For the reader study, 59 international prostate radiologists assessed a subset of 400 scans from the hidden testing cohort. Readers and cases were divided into blocks of 100 cases. For each case, readers assessed bpMRI and mpMRI in sequence to mimic clinical routine. Suspected GG>=2 cancer findings were assigned a PI-RADS 3-5 score. Additionally, a patient-level suspicion score (0-100) of harboring GG>=2 was indicated. Multi-reader multi-case (MRMC) analysis was used to compare the patient-level added value of mpMRI. RESULTS: From the AI study, the ranked results from the open development phase will be presented. From the reader study, preliminary results from the first 14 readers will be presented. Readers with 2-15 years (median: 9) of experience indicate that overall, there is little improvement in GG>=2 detections between bpMRI and mpMRI readings with AUROCs of 0.857 (95% CI: 0.83, 0.89) and 0.860 (95% CI: 0.83, 0.89), respectively. For individual readers, absolute differences in AUROC ranged between 0.00-0.03 (95% CI: 0.00, 0.01). CONCLUSION: The top 5 results from the open development phase of the AI study of the PI-CAI Challenge will be presented. Preliminary results from the PI-CAI reader study show that bpMRI had similar GG>=2 detection to mpMRI assessments at a per-case level. Multivariable influencers such as experience, workflow, image quality and protocol familiarity need to be evaluated. LIMITATIONS: Preliminary results are limited by the sample size. mpMRI readings of the original data were used to guide histologic verification. 13 out of 14 readers had high expertise as per 2020 ESUR/ESUI consensus statements.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Uden15, + author = {I. van Uden and H.M. van der Holst and A.M. Tuladhar and A.G.W. van Norden and K. de Laat and R. Arntz and M. Ghafoorian and B. Platel and J.A.H.R. Claassen and R.P.C. Kessels and E.J. van Dijk and F.E. de Leeuw}, + title = {Both {WMH} increase and decline increase the risk of incident dementia; a prospective cohort study: the RUN DMC study}, + booktitle = {European Stroke Organization}, + year = {2015}, + optnote = {DIAG}, +} + +@article{Uffm04, + author = {Uffmann, Martin and Schaefer-Prokop, Cornelia and Neitzel, Ulrich and Weber, Michael and Herold, Christian J. and Prokop, Mathias}, + title = {Skeletal applications for flat-panel versus storage-phosphor radiography: effect of exposure on detection of low-contrast details}, + journal = Radiology, + year = {2004}, + volume = {231}, + pages = {506--514}, + doi = {10.1148/radiol.2312021662}, + abstract = {To compare exposure requirements for similar detection performance with flat-panel detectors and the most recent generation of storage-phosphor plates in the simulated scatter of typical skeletal radiographic examinations.A contrast-detail test object was covered with varying thicknesses of acrylic to simulate skeletal exposure conditions in the wrist, knee, and pelvis. Three series were obtained with increasing thicknesses of a simulated soft-tissue layer (5, 10, and 20 cm) and increasing tube voltage (50, 70, and 90 kVp). A fourth series was obtained with exposure conditions adapted to the phantom instructions (75 kVp). Images were acquired with a flat-panel detector (cesium iodide scintillator) and storage-phosphor plates at five exposure levels (speed class range, 100-1,600). Five readers evaluated 84 images to determine the threshold contrast of 12 lesion diameters (range, 0.25-11.1 mm). Statistical significance of differences between the two digital systems was assessed with two-way analysis of variance.A linear relationship was found between the number of detected lesions and the logarithm of exposure (R(2) > 0.98 for all series). On average, the flat-panel system required 45\% less exposure than did the phosphor plates when 20-cm-thick acrylic was superimposed on the test object. Differences in exposure requirements were smaller with decreasing thicknesses of simulated soft-tissue layers and lower tube voltages (39\% at 10 cm and 70 kVp, and 17\% at 5 cm and 50 kVp). All differences were statistically significant.Flat-panel radiography provides improved contrast detectability and a potential for exposure reduction compared with those with storage-phosphor radiography. The best performance was achieved with conditions comparable to those for radiography of the trunk and lowest for conditions that simulate radiography of the extremities.}, + optnote = {DIAG}, + number = {2}, + pmid = {15128995}, + month = {5}, + gsid = {3414762767467135895}, + gscites = {33}, +} + +@article{Uffm04a, + author = {Uffmann, M. and Schaefer-Prokop, C.}, + title = {[Radiological diagnostics of Hodgkin- and non-Hodgkin lymphomas of the thorax]}, + journal = Radiologe, + year = {2004}, + volume = {44}, + pages = {444--456}, + doi = {10.1007/s00117-004-1059-z}, + abstract = {Malignant lymphomas belong to the most important malignant diseases in western countries with an increasing incidence of Non-Hodgkin lymphoma. The thorax is the location of primary manifestation especially in patients with Hodgkin's disease. Progression of disease and therapy associated complications are frequently located in the chest. Based on morphological imaging criteria the two types of lymphoma cannot be differentiated, helpful for differentiation is, however, the way of disease spread. Primary and secondary thoracic lymphoma represent a diagnostic challenge in radiology: the patterns are variable in radiography as well as in computed tomography and alter under therapy. Radiological studies, especially CT, are an integral part of the staging process. MRI is considered advantageous for chest wall disease. PET as functional imaging technique has its proven role for staging of high grade lymphomas, the combination of functional and morphological information provided by PET-CT will become the first diagnostic standard in the future.}, + optnote = {DIAG}, + number = {5}, + pmid = {15114475}, + month = {5}, +} + +@article{Uffm05, + author = {Uffmann, Martin and Prokop, Mathias and Kupper, Walter and Mang, Thomas and Fiedler, Volker and Schaefer-Prokop, Cornelia}, + title = {Soft-copy reading of digital chest radiographs: effect of ambient light and automatic optimization of monitor luminance}, + journal = IR, + year = {2005}, + volume = {40}, + pages = {180--185}, + abstract = {We sought to evaluate whether soft-copy reading of simulated pulmonary chest lesions is influenced by ambient light and automatic optimization of cathode ray tube (CRT) monitor luminance.Four types of simulated lesions (nodules, lines, micronodules, and patchy opacities) were superimposed over an anthropomorphic chest phantom. Lesion detection with soft-copy reading was assessed using a high-contrast grayscale 2K CRT monitor under the following conditions: (1) subdued lighting (<50 lux); (2) normal lighting conditions (450 lux) without, and (3) with a sensitivity modulation to automatically adjust the CRT luminance to the increased amount of ambient light. Reading data were analyzed according to receiver operating curve. Significance of differences was tested using an analysis of variance for repeated measures.Ambient room light of 450 lux did not significantly influence the detection of nodules and patchy opacities. However, bright ambient light significantly decreased detection of micronodules (0.60 vs. 0.74) and lines (0.52 vs. 0.66) relative to subdued lighting conditions. Automatic luminance adjustment could compensate the effect of ambient light for the micronodules (0.77) but not for the lines (0.53).Bright ambient light significantly decreases detection of small low-contrast structures. This may be partially but not completely compensated by an automatic luminance adaptation.}, + file = {Uffm05.pdf:pdf\\Uffm05.pdf:PDF}, + optnote = {CXR, DIAG, RADIOLOGY}, + number = {3}, + pmid = {15714093}, + gsid = {6142198956091043481}, + gscites = {40}, +} + +@article{Uffm05a, + author = {Uffmann, Martin and Prokop, Mathias and Eisenhuber, Edith and Fuchsj\"ager, Michael and Weber, Michael and Schaefer-Prokop, Cornelia}, + title = {Computed radiography and direct radiography: influence of acquisition dose on the detection of simulated lung lesions}, + journal = IR, + year = {2005}, + volume = {40}, + pages = {249--256}, + abstract = {We sought to evaluate the performance of dual-readout and single-readout computed radiography compared with direct radiography for detecting subtle lung abnormalities with a standard and a low-dose technique.Posteroanterior radiographs of an anthropomorphic chest phantom were obtained with a single-readout storage phosphor radiography system (CRS, pixel size 200 microm), a dual-readout storage phosphor radiography system (CRD, pixel size 100 microm), and a direct detector (DR, pixel size 143 microm) at dose levels of 400 and 800 speed. Ten templates were superimposed to project 4 types of lesions over low- and high-attenuation areas, simulating nodules, micronodules, lines, and patchy opacities. Six radiologists evaluated 60 hard-copy images for the presence or absence of lesions. Statistical significance of differences was evaluated using receiver operating characteristic analysis and analysis of variance.For both low- and high-attenuation areas, CRD (Az = 0.85 and 0.66) was superior to CRS (Az = 0.75 and 0.58) for overall performance and all lesion subtypes (P < 0.05). DR (Az = 0.87 and 0.67) performed slightly better than CRD, being significant only for the detection of micronodules. Acquisition dose significantly affected only the detection of lines and micronodules, whereas the detection of nodules and patchy opacities was not significantly different with reduced exposure, regardless of the system used.The dual-readout CR system significantly outperformed the single-readout CR and almost equaled the performance of DR. Dose reduction was more critical for small-sized lesions (micronodules, lines) than for nodular or patchy opacifications and affected mainly the lesions in high attenuation areas.}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {15829821}, + gsid = {2634154385315352846}, + gscites = {25}, +} + +@article{Uffm05b, + author = {Uffmann, Martin and Neitzel, Ulrich and Prokop, Mathias and Kabalan, Nahla and Weber, Michael and Herold, Christian J. and Schaefer-Prokop, Cornelia}, + title = {Flat-panel-detector chest radiography: effect of tube voltage on image quality}, + journal = Radiology, + year = {2005}, + volume = {235}, + pages = {642--650}, + doi = {10.1148/radiol.2352031730}, + abstract = {To compare the visibility of anatomic structures in direct-detector chest radiographs acquired with different tube voltages at equal effective doses to the patient.The study protocol was approved by the institutional internal review board, and written informed consent was obtained from all patients. Posteroanterior chest radiographs of 48 consecutively selected patients were obtained at 90, 121, and 150 kVp by using a flat-panel-detector unit that was based on cesium iodide technology and automated exposure control. Monte Carlo simulations were used to verify that the effective dose for all kilovoltage settings was equal. Five radiologists subjectively and independently rated the delineation of anatomic structures on hard-copy images by using a five-point scale. They also ranked image quality in a blinded side-by-side comparison. Average ranking scores were compared by using one-way analysis of variance with repeated measures. Data were analyzed for the entire patient group and for two patient subgroups that were formed according to body mass index (BMI).The visibility scores of most anatomic structures were significantly superior with the 90-kVp images (mean score, 3.11), followed by the 121-kVp (mean score, 2.95) and 150-kVp images (mean score, 2.80). Differences did not reach significance (P > .05) only for the delineation of the peripheral vessels, the heart contours, and the carina. This was also true for the subgroup of patients (n = 24) with a BMI greater than and the subgroup of patients (n = 24) with a BMI less than the mean BMI (26.9 kg/m(2)). At side-by-side comparison, the readers rated 90-kVp images as having superior image quality in the majority of image triplets; the percentage of 90-kVp images rated as "first choice" ranged from 60\% (29 of 48 patients) to 90\% (43 of 48 patients), with a median of 88\% (42 of 48 patients), among the readers.Delineation of most anatomic structures and overall image quality were ranked superior in digital radiographs acquired with lower kilovoltage at a constant effective patient dose.}, + optnote = {CXR, RADIOLOGY, TB}, + number = {2}, + pmid = {15858104}, + month = {5}, + gsid = {11265685017709774673}, + gscites = {68}, +} + +@article{Uffm08, + author = {Uffmann, M. and Schaefer-Prokop, C. and Neitzel, U.}, + title = {[Balance of required dose and image quality in digital radiography]}, + journal = Radiologe, + year = {2008}, + volume = {48}, + pages = {249--257}, + doi = {10.1007/s00117-008-1617-x}, + abstract = {Projection radiography is in an advanced stage of progressive transition from conventional screen-film imaging to digital image acquisition modalities. The radiographic technique, including examination parameters such as tube voltage, tube current and filtration has frequently been adopted from screen-film technology. Digital systems, however, are characterized by their flexibility as the dose can be reduced at the expense of image quality and vice versa. The imaging parameters need to be individually optimized according to the best performance of a system. The traditional means of dose adjustment, such as positioning and collimation, are as valid for digital techniques as they were for conventional techniques. Digital techniques increasingly offer options for dose reduction. At the same time there is a risk to accidentally substantially increase patient dose due to the lack of visual control. Therefore, the implementation of dose indicators and dose monitoring is mandatory for digital radiography. The use of image quality classes according to the dose requirements of given clinical indications are a further step towards modern radiation protection.}, + optnote = {DIAG}, + number = {3}, + pmid = {18259724}, + month = {2}, +} + +@article{Uffm09, + author = {Uffmann, Martin and Schaefer-Prokop, Cornelia}, + title = {Digital radiography: the balance between image quality and required radiation dose}, + journal = EJR, + year = {2009}, + volume = {72}, + pages = {202--208}, + doi = {10.1016/j.ejrad.2009.05.060}, + abstract = {Although the transition from conventional screen-film imaging to digital image acquisition has been almost completed during the last couple of years, examination parameters, such as tube voltage, tube current, and filtration have been adopted from screen-film technology without further adjustments. Digital systems, however, are characterised by their flexibility: the acquisition dose can be reduced at the expense of image quality and vice versa. The imaging parameters must be optimised according to the best performance of a particular system. The traditional means of dose containment, such as positioning and collimation, are as valid for digital techniques as they were for conventional techniques. Digital techniques increasingly offer options for dose reduction. At the same time, there is a risk of substantially increasing the patient dose, possibly unawares, due to the lack of visual control. Therefore, implementation of dose indicators and dose monitoring is mandatory for digital radiography. The use of image quality classes according to the dose requirements of given clinical indications are a further step toward modern radiation protection.}, + optnote = {DIAG}, + number = {2}, + pmid = {19628349}, + month = {11}, +} + +@mastersthesis{Vala19, + author = {Valacchi, Lorenzo}, + title = {Analysis and endotracheal tube detection in chest x-rays using deep learning}, + abstract = {The following work is focusing on the development of two deep learning models applied to chest x-rays. The first model, Imagesorter, provides a solution for sorting chest x-ray images where metadata is not available or is unreliable. This is frequently the case when accessing large collection of radiographs and can result in very time consuming procedures to obtain reliable data. Specifically, the algorithm returns four properties of the image: the type of image presented, rotation (wheather the image is rotated), inversion (whether the grayscale level of the radiograph inverted) and orientation (whether a lateral chest x-ray is mirrored). Nearly 30,000 radiographs were gathered and used to train, validate and test a deep convolutional neural network. For the purpose, a ResNet50 network pretrained on ImageNet and finetuned on the chest x-ray dataset was used. Moreover, the network architechture was modified to return all the four features at once. The model achieved very good results over the test set and can be consider a valid tool to efficiently explore and sort large x-ray collections. The second model, Endotracheal-Tube, detect the presence of an endotracheal tube in a chest x-ray. Many automated methods require to gather chest x-rays where an endotracheal tube is present. The presented algorithm can help gather reliable data from large collection in a short amount of time. A large dataset was created for the project and a preprocessing method to crop a square area of the image where the tube lays is presented. Four models are trained, validated and tested over the same dataset to assess the best. At the end an InceptionV3 network pretrained on ImageNet and finetuned on the dataset achieved the best results (AUC = 0.993). + Both projects are part of OpenCXR, an open source library developed by the Chest X-Ray teams at the Dignostic Image Analysis Group at the Radboud University Medical Center, Nijmegen, The Netherlands.}, + file = {Vala19.pdf:pdf\\Vala19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + school = {Universita di Siena}, + year = {2019}, + journal = {Master thesis}, +} + +@conference{Valk19, + author = {Dyon Valkenburg and Esmee Runhart and Bart Liefers and Stanley Lambertus and S\'{a}nchez, Clara I. and Cremers, Frans P. and Nathalie M, Bax and Hoyng, Carel C B}, + booktitle = ARVO, + title = {Familial discordance in disease phenotype in siblings with Stargardt disease}, + abstract = {Purpose: + To investigate intersibling discordance of the Stargardt disease (STGD1) phenotype. + + Methods: + We performed a retrospective cohort study among siblings with genetically confirmed STGD1 and at least one available fundus autofluorescence (FAF) image of both eyes. We compared age of onset within families using the youngest patient as the reference and a predetermined threshold value of 10 years for significant differences. Disease duration was matched to investigate differences in best-corrected visual acuity, and we determined and compared the survival time for reaching severe visual impairment (SVI); (<20/200 Snellen or > 1.3 Logarithm of the Minimal Angle of Resolution (LogMAR)). Central retinal atrophy surface area was quantified and compared by two independent graders using the semi-automated EyeNED software. Additionally, both graders performed qualitative assessment of FAF patterns to identify phenotypic differences and commonalities. Main outcome measures included differences in age of onset, best-corrected visual acuity (BCVA), time to develop legal blindness, FAF atrophy surface area and autofluorescence patterns. + + Results: + Significant differences in age of onset were present in 5/17 families, ranging from 13 to 39 years. BCVA was matched in 12/17 families and the median difference was 0.41 (0 - 1.10) LogMAR for the right and 0.41 (0 - 1.08) LogMAR for the left eye, and we found extreme differences in five families ranging from 0.58 to 1.1 LogMAR. The median age at which patients developed SVI was 14 years. We observed significant differences in time to develop SVI in three out of 12 families with matched survival times, ranging from 14 to 29 years. Median central retinal atrophy surface area was 11.38 mm2 in the right (range 1.98 - 44.78 mm2) and 10.59 mm2 in the left (range 1.61 - 40.59 mm2) eyes and was highly comparable between siblings, with the exception of family one. Qualitative FAF phenotypes were comparable in all sibling pairs. + + Conclusions: + Phenotypic discordance between siblings with STGD1 disease carrying the same ABCA4 variants is a prevalent phenomenon. Functional outcomes can differ substantially despite highly comparable FAF phenotypes, which complicates sibling-based prognosis. While environmental factor are likely to modify the disease course, the relatively young median age at which patients develop SVI indicates an important role for genetic factors as disease modifiers.}, + optnote = {DIAG, RADIOLOGY}, + year = {2019}, +} + +@article{Valk19a, + author = {Valkenburg, Dyon and Runhart, Esmee H and Bax, Nathalie M and Liefers, Bart and Lambertus, Stanley L and S\'{a}nchez, Clara I. and Cremers, Frans PM and Hoyng, Carel B}, + title = {Highly variable disease courses in siblings with {Stargardt} disease}, + journal = Ophthalmology, + year = {2019}, + volume = {126}, + issue = {12}, + month = {7}, + pages = {1712-1721}, + doi = {10.1016/j.ophtha.2019.07.010}, + url = {https://www.sciencedirect.com/science/article/pii/S0161642019306578?via%3Dihub}, + abstract = {Purpose + To investigate intersibling phenotypic concordance in Stargardt disease (STGD1). + + Design + Retrospective cohort study. + + Participants + Siblings with genetically confirmed STGD1 and at least 1 available fundus autofluorescence (FAF) image of both eyes. + + Methods + We compared age at onset within families. Disease duration was matched to investigate differences in best-corrected visual acuity (BCVA) and compared the survival time for reaching severe visual impairment (<20/200 Snellen or >1.0 logarithm of the minimum angle of resolution [logMAR]). Central retinal atrophy area was quantified independently by 2 experienced graders using semiautomated software and compared between siblings. Both graders performed qualitative assessment of FAF and spectral-domain (SD) OCT images to identify phenotypic differences. + + Main Outcome Measures + Differences in age at onset, disease duration-matched BCVA, time to severe visual impairment development, FAF atrophy area, FAF patterns, and genotypes. + + Results + Substantial differences in age at onset were present in 5 of 17 families, ranging from 13 to 39 years. Median BCVA at baseline was 0.60 logMAR (range, -0.20 to 2.30 logMAR; Snellen equivalent, 20/80 [range, 20/12-hand movements]) in the right eye and 0.50 logMAR (range, -0.20 to 2.30 logMAR; Snellen equivalent, 20/63 [range, 20/12-hand movements]) in the left eye. Disease duration-matched BCVA was investigated in 12 of 17 families, and the median difference was 0.41 logMAR (range, 0.00-1.10 logMAR) for the right eye and 0.41 logMAR (range, 0.00-1.08 logMAR) for the left eye. We observed notable differences in time to severe visual impairment development in 7 families, ranging from 1 to 29 years. Median central retinal atrophy area was 11.38 mm2 in the right eye (range, 1.98-44.78 mm2) and 10.59 mm2 in the left eye (range, 1.61-40.59 mm2) and highly comparable between siblings. Similarly, qualitative FAF and SD OCT phenotypes were highly comparable between siblings. + + Conclusions + Phenotypic discordance between siblings with STGD1 carrying the same ABCA4 variants is a prevalent phenomenon. Although the FAF phenotypes are highly comparable between siblings, functional outcomes differ substantially. This complicates both sibling-based prognosis and genotype-phenotype correlations and has important implications for patient care and management.}, + file = {Valk19a.pdf:pdf\\Valk19a.pdf:PDF}, + optnote = {DIAG}, + pmid = {31522899}, + ss_id = {edb057ef58b113686d38a747196234aa2409d517}, + all_ss_ids = {['edb057ef58b113686d38a747196234aa2409d517']}, + gscites = {16}, +} + +@inproceedings{Vare03, + author = {C. Varela and J. M. Muller and N. Karssemeijer}, + title = {Mammographic mass characterization using sharpness and lobulation measures}, + booktitle = MI, + year = {2003}, + volume = {5032}, + series = SPIE, + pages = {120-129}, + doi = {10.1117/12.480161}, + abstract = {For radiologists lesion margin appearance is of high importance whenclassifying breast masses as malignant or benign lesions. In thisstudy, we developed different measures to characterize the margin of alesion. Towards this goal, we developed a series of algorithms toquantify the degree of sharpness and lobulation of a massmargin. Besides, to estimate spiculation of a margin, featurespreviously developed for mass detection were used. Images selectedfrom the publicly available data set "Digital Database for ScreeningMammography" were used for development and evaluation of thesealgorithms. The data set consisted of 777 images corresponding to 382patients. To extract lesions from the mammograms a segmentationalgorithm based on dynamic programming was used. Features wereextracted for each lesion. A k-nearest neighbor algorithm was used incombination with a leave-one-out procedure to select the best featuresfor classification purposes. Classification accuracy was evaluatedusing the area Az under the receiver operating characteristic curve. The average test Az value for the task of classifying masses on a single mammographic view was 0.79. In a case-based evaluation we obtained an Az value of 0.84.}, + file = {Vare03.pdf:pdf\\Vare03.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {1721350618346932046}, + gscites = {2}, + ss_id = {25296bf46f2f414dcb6a595df5cbd178db33760f}, + all_ss_ids = {['25296bf46f2f414dcb6a595df5cbd178db33760f']}, +} + +@article{Vare05, + author = {C. Varela and N. Karssemeijer and J. H. C. L. Hendriks and R. Holland}, + title = {Use of prior mammograms in the classification of benign and malignant masses}, + journal = EJR, + year = {2005}, + volume = {56}, + pages = {248--255}, + doi = {10.1016/j.ejrad.2005.04.007}, + abstract = {{T}he purpose of this study was to determine the importance of using prior mammograms for classification of benign and malignant masses. {F}ive radiologists and one resident classified mass lesions in 198 mammograms obtained from a population-based screening program. {C}ases were interpreted twice, once without and once with comparison of previous mammograms, in a sequential reading order using soft copy image display. {T}he radiologists? performances in classifying benign and malignant masses without and with previous mammograms were evaluated with receiver operating characteristic ({ROC}) analysis. {T}he statistical significance of the difference in performances was calculated using analysis of variance. {T}he use of prior mammograms improved the classification performance of all participants in the study. {T}he mean area under the {ROC} curve of the readers increased from 0.763 to 0.796. {T}his difference in performance was statistically significant ({P}=0.008).}, + file = {Vare05.pdf:pdf\\Vare05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {15890483}, + month = {11}, + gsid = {13242018771544321665}, + gscites = {52}, + ss_id = {dd41e6d7a1923e0b947c4a7368c9a23b22570ec1}, + all_ss_ids = {['dd41e6d7a1923e0b947c4a7368c9a23b22570ec1']}, +} + +@article{Vare06, + author = {C. Varela and S. Timp and N. Karssemeijer}, + title = {Use of border information in the classification of mammographic masses}, + journal = PMB, + year = {2006}, + volume = {51}, + pages = {425--441}, + doi = {10.1088/0031-9155/51/2/016}, + abstract = {{W}e are developing a new method to characterize the margin of a mammographic mass lesion to improve the classification of benign and malignant masses. {T}owards this goal, we designed features that measure the degree of sharpness and microlobulation of mass margins. {W}e calculated these features in a border region of the mass defined as a thin band along the mass contour. {T}he importance of these features in the classification of benign and malignant masses was studied in relation to existing features used for mammographic mass detection. {F}eatures were divided into three groups, each representing a different mass segment: the interior region of a mass, the border and the outer area. {T}he interior and the outer area of a mass were characterized using contrast and spiculation measures. {C}lassification was done in two steps. {F}irst, features representing each of the three mass segments were merged into a neural network classifier resulting in a single regional classification score for each segment. {S}econdly, a classifier combined the three single scores into a final output to discriminate between benign and malignant lesions. {W}e compared the classification performance of each regional classifier and the combined classifier on a data set of 1076 biopsy proved masses (590 malignant and 486 benign) from 481 women included in the {D}igital {D}atabase for {S}creening {M}ammography. {R}eceiver operating characteristic ({ROC}) analysis was used to evaluate the accuracy of the classifiers. {T}he area under the {ROC} curve ({A}(z)) was 0.69 for the interior mass segment, 0.76 for the border segment and 0.75 for the outer mass segment. {T}he performance of the combined classifier was 0.81 for image-based and 0.83 for case-based evaluation. {T}hese results show that the combination of information from different mass segments is an effective approach for computer-aided characterization of mammographic masses. {A}n advantage of this approach is that it allows the assessment of the contribution of regions rather than individual features. {R}esults suggest that the border and the outer areas contained the most valuable information for discrimination between benign and malignant masses.}, + file = {Vare06.pdf:pdf\\Vare06.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {2}, + pmid = {16394348}, + month = {1}, + gsid = {11079177198401411462}, + gscites = {103}, + ss_id = {35ad1f356614dfd1fa89f11720f00e25546eb996}, + all_ss_ids = {['35ad1f356614dfd1fa89f11720f00e25546eb996']}, +} + +@article{Veld00, + author = {W. J. Veldkamp and N. Karssemeijer and J. D. Otten and J. H. Hendriks}, + title = {Automated classification of clustered microcalcifications into malignant and benign types}, + journal = MP, + year = {2000}, + volume = {27}, + pages = {2600--2608}, + doi = {10.1118/1.1318221}, + abstract = {{T}he objectives in this study were to design and test a fully automated method for classification of microcalcification clusters into malignant and benign types, and to compare the method's performance with that of radiologists. {A} novel aspect of the approach is that the relative location and orientation of clusters inside the breast was taken into account for feature calculation. {F}urthermore, correspondence of location of clusters in mediolateral oblique ({MLO}) and cranio-caudal ({CC}) views, was used in feature calculation and in final classification. {I}nitially, microcalcifications were automatically detected by using a statistical method based on {B}ayesian techniques and a {M}arkov random field model. {T}o determine malignancy or benignancy of a cluster, a method based on two classification steps was developed. {I}n the first step, classification of clusters was performed and in the second step a patient based classification was done. {A} total of 16 features was used in the study. {T}o identify meaningful features, a feature selection was applied, using the area under the receiver operating characteristic ({ROC}) curve ({A}z value) as a criterion. {F}or classification the k-nearest-neighbor method was used in a leave-one-patient-out procedure. {A} database of 192 mammograms with 280 true positive detected microcalcification clusters was used for evaluation of the method. {T}he set consisted of cases that were selected for diagnostic work up during a 4 year period of screening in the {N}ijmegen region ({T}he {N}etherlands). {B}ecause of the high positive predictive value in the screening program (50%), this set did not contain obvious benign cases. {T}he method's best patient-based performance on this set corresponded with {A}z = 0.83, using nine features. {A} subset of the data set, containing mammograms from 90 patients, was used for comparing the computer results to radiologists' performance. {T}en radiologists read these cases on a light-box and assessed the probability of malignancy for each patient. {A}ll participants had experience in clinical mammography and participated in our observer study during the last 2 days of a 2-week training session leading to screening mammography certification. {R}esults on the subset showed that the method's performance ({A}z = 0.83) was considerably higher than that of the radiologists ({A}z = 0.63).}, + file = {Veld00.pdf:pdf\\Veld00.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pmid = {11128313}, + month = {11}, + gsid = {13216335598903608465}, + gscites = {133}, + ss_id = {0d8168e66dd34b4039f31cc5f47482262c0be316}, + all_ss_ids = {['0d8168e66dd34b4039f31cc5f47482262c0be316']}, +} + +@article{Veld00a, + author = {W. J. Veldkamp and N. Karssemeijer}, + title = {Normalization of local contrast in mammograms}, + journal = TMI, + year = {2000}, + volume = {19}, + pages = {731--738}, + doi = {10.1109/42.875197}, + file = {Veld00a.pdf:pdf\\Veld00a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {7}, + pmid = {11055788}, + month = {7}, + gsid = {5530446128328601250}, + gscites = {88}, + ss_id = {a89e968c04e1a53c00ade6242c95151f7b0306a1}, + all_ss_ids = {['a89e968c04e1a53c00ade6242c95151f7b0306a1']}, +} + +@phdthesis{Veld00b, + author = {W. J. H. Veldkamp}, + title = {Computer Aided Characterization of Microcalcification Clusters in Mammograms}, + year = {2000}, + url = {http://repository.ubn.ru.nl/handle/2066/18902}, + abstract = {To detect breast cancer as early as possible and hence to increase the survival chance, breast cancer screening programs have been introduced in a number of countries. One of the important radiologic tasks in the Dutch screening program is detection and characterization of microcalcification clusters. Clustered microcalcification may indicate breast cancer at an early stage, but 80% of the clusters encountered in the female breast are due to benign processes. This thesis describes methods for automated detection and characterization of microcalcification clusters. It is hoped that detection and characterization methods as described in this thesis can improve the diagnostic performance of radiologists in screening mammography.}, + copromotor = {N. Karssemeijer and J. H. C. L. Hendriks}, + file = {Veld00b.pdf:pdf\\Veld00b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {C. C. A. M. Gielen}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@inproceedings{Veld99a, + author = {Wouter J. H. Veldkamp and Nico Karssemeijer}, + title = {Improved method for detection of microcalcification clusters in digital mammograms}, + booktitle = MI, + year = {1999}, + volume = {3661}, + series = SPIE, + pages = {512-522}, + doi = {10.1117/12.348607}, + url = {http://link.aip.org/link/?PSI/3661/512/1}, + abstract = {In this study it is shown that the performance of a statistical method for detection of microcalcification clusters in digital mammograms, can be improved substantially by using a second step of classification. During this second step, detected clusters are automatically classified into true positive and false positive detected clusters. For classification the k-nearest neighbor method was used in a leave-one-patient-out procedure. The sensitivity level of the method was adjusted both in the first detection step as in the second classification step. The Mahalanobis distance was used as criterion in the sequential forward selection procedure for selection of features. This primary feature selection method was combined with a classification performance criterion for the final feature selection. By applying the initial detection at various levels of sensitivity, various sets of false and true positive detected clusters were created. At each of these sets the classification ca be performed. Results show that the overall best FROC performance after secondary classification is obtained by varying sensitivity levels in both the first and second step. Furthermore, it was shown that performing a new feature selection for each different set of false and true positives is essential. A large database of 245 digitized mammograms with 341 clusters was used for evaluation of the method.}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {12269146113897155166}, + gscites = {30}, + ss_id = {c928564a62aecc7fa44d7df8351dddb4205f5a2b}, + all_ss_ids = {['c928564a62aecc7fa44d7df8351dddb4205f5a2b']}, +} + +@article{Vele21, + author = {Velema, M S and Canu, L and Dekkers, T and Hermus, A R M M and Timmers, H J L M and Schultze Kool, L J and Groenewoud, H J M M and Jacobs, C and Deinum, J and SPARTACUS Investigators}, + title = {Volumetric evaluation of CT images of adrenal glands in primary aldosteronism.}, + journal = {Journal of endocrinological investigation}, + year = {2021}, + volume = {44}, + number = {11}, + pages = {2359--2366}, + doi = {10.1007/s40618-021-01540-5}, + abstract = {To investigate whether adrenal volumetry provides better agreement with adrenal vein sampling (AVS) than conventional CT for subtyping PA. Furthermore, we evaluated whether the size of this contralateral adrenal was a prognostic factor for clinical outcome after unilateral adrenalectomy. We retrospectively analyzed volumes of both adrenal glands of the 180 CT-scans (88/180 with unilateral and 92/180 with bilateral disease) of the patients with PA included in the SPARTACUS trial of which 85 also had undergone an AVS. In addition, we examined CT-scans of 20 healthy individuals to compare adrenal volumes with published normal values. Adrenal volume was higher for the left than the right adrenal (mean and SD: 6.49 +- 2.77 ml versus 5.25 +- 1.87 ml for the right adrenal; p < 0.001). Concordance between volumetry and AVS in subtyping was 58.8%, versus 51.8% between conventional CT results and AVS (p = NS). The volumes of the contralateral adrenals in the patients with unilateral disease (right 4.78 +- 1.37 ml; left 6.00 +- 2.73 ml) were higher than those of healthy controls reported in the literature (right 3.62 +- 1.23 ml p < 0.001; left 4.84 +- 1.67 ml p = 0.02). In a multivariable analysis the contralateral volume was not associated with biochemical or clinical success, nor with the defined daily doses of antihypertensive agents at 1 year follow-up. Volumetry of the adrenal glands is not superior to current assessment of adrenal size by CT for subtyping patients with PA. Furthermore, in patients with unilateral disease the size of the contralateral adrenal is enlarged but its size is not associated with outcome.}, + file = {Vele21.pdf:pdf\\Vele21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {33666874}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/244324}, +} + +@inproceedings{Veli07, + author = {M. Velikova and N. de Carvalho Ferreira and P. Lucas}, + title = {Bayesian {N}etwork {D}ecomposition for {M}odeling {B}reast {C}ancer {D}etection}, + booktitle = {Proceedings of the 11th Conference on Artificial Intelligence in Medicine}, + year = {2007}, + volume = {4594}, + series = LNAI, + doi = {10.1007/978-3-540-73599-1_47}, + abstract = {The automated differentiation between benign and malignant abnormalities is a difficult problem in the breast cancer domain. While previous studies consider a single Bayesian network approach, in this paper we propose a novel perspective based on Bayesian network decomposition. We consider three methods that allow for different (levels of) network topological or structural decomposition. Through examples, we demonstrate some advantages of Bayesian network decomposition for the problem at hand: (i) natural and more intuitive representation of breast abnormalities and their features (ii) compact representation and efficient manipulation of large conditional probability tables, and (iii) a possible improvement in the knowledge acquisition and representation processes.}, + file = {Veli07.pdf:pdf/Veli07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Veli08, + author = {Velikova, Marina and Lucas, Peter J. F. and Ferreira, Nivea and Samulski, Maurice and Karssemeijer, Nico}, + title = {A decision support system for breast cancer detection in screening programs}, + booktitle = {Proceeding of the 2008 conference on ECAI 2008}, + year = {2008}, + publisher = {IOS Press}, + pages = {658--662}, + abstract = {{T}he goal of breast cancer screening programs is to detect cancers at an early (preclinical) stage, by using periodic mammographic examinations in asymptomatic women. {I}n evaluating cases, mammographers insist on reading multiple images (at least two) of each breast as a cancerous lesion tends to be observed in different breast projections (views). {M}ost computer-aided detection ({CAD}) systems, on the other hand, only analyze single views independently, and thus fail to account for the interaction between the views. {I}n this paper, we propose a {B}ayesian framework for exploiting multi-view dependencies between the suspected regions detected by a single-view {CAD} system. {T}he results from experiments with real-life data show that our approach outperforms the singleview {CAD} system in distinguishing between normal and abnormal cases. {S}uch a system can support screening radiologists to improve the evaluation of breast cancer cases.}, + optnote = {DIAG, RADIOLOGY}, + gsid = {18112098624718871737}, + gscites = {15}, + ss_id = {284f37574c118db59ec67a3b1580faa4b9eb6c7e}, + all_ss_ids = {['284f37574c118db59ec67a3b1580faa4b9eb6c7e']}, +} + +@inproceedings{Veli08a, + author = {Velikova, Marina and Daniels, Hennie and Samulski, Maurice}, + title = {Partially {M}onotone {N}etworks {A}pplied to {B}reast {C}ancer {D}etection on {M}ammograms}, + booktitle = {ICANN '08: Proceedings of the 18th international conference on Artificial Neural Networks, Part I}, + year = {2008}, + publisher = {Springer-Verlag}, + pages = {917--926}, + doi = {10.1007/978-3-540-87536-9_94}, + abstract = {{I}n many prediction problems it is known that the response variable depends monotonically on most of the explanatory variables but not on all. {O}ften such partially monotone problems cannot be accurately solved by unconstrained methods such as standard neural networks. {I}n this paper we propose so-called {MIN}-{MAX} networks that are partially monotone by construction. {W}e prove that this type of networks have the uniform approximation property, which is a generalization of the result by {S}ill on totally monotone networks. {I}n a case study on breast cancer detection on mammograms we show that enforcing partial monotonicity constraints in {MIN}-{MAX} networks leads to models that not only comply with the domain knowledge but also outperform in terms of accuracy standard neural networks especially if the data set is relative small.}, + file = {Veli08a.pdf:pdf\\Veli08a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Veli08d, + author = {Velikova, Marina and Samulski, Maurice and Karssemeijer, Nico and Lucas, Peter}, + title = {Toward {E}xpert {K}nowledge {R}epresentation for {A}utomatic {B}reast {C}ancer {D}etection}, + booktitle = {AIMSA '08: Proceedings of the 13th international conference on Artificial Intelligence}, + year = {2008}, + publisher = {Springer-Verlag}, + pages = {333--344}, + doi = {10.1007/978-3-540-85776-1_28}, + abstract = {{I}n reading mammograms, radiologists judge for the presence of a lesion by comparing at least two breast projections (views) as a lesion is to be observed in both of them. {M}ost computer-aided detection ({CAD}) systems, on the other hand, treat single views independently and thus they fail to account for the interaction between the breast views. {F}ollowing the radiologist's practice, in this paper, we develop a {B}ayesian network framework for automatic multi-view mammographic analysis based on causal independence models and the regions detected as suspicious by a single-view {CAD} system. {W}e have implemented two versions of the framework based on different definitions of multi-view correspondences. {T}he proposed approach is evaluated and compared against the single-view {CAD} system in an experimental study with real-life data. {T}he results show that using expert knowledge helps to increase the cancer detection rate at a patient level.}, + file = {Veli08d.pdf:pdf\\Veli08d.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {2016380398303743237}, + gscites = {6}, + ss_id = {77d011099d6238973e9650c55120bb45da1606c9}, + all_ss_ids = {['77d011099d6238973e9650c55120bb45da1606c9']}, +} + +@article{Veli09, + author = {M. Velikova and M. Samulski and P. J. F. Lucas and N. Karssemeijer}, + title = {Improved mammographic {CAD} performance using multi-view information: a {B}ayesian network framework}, + journal = PMB, + year = {2009}, + volume = {54}, + pages = {1131--1147}, + doi = {10.1088/0031-9155/54/5/003}, + abstract = {{M}ammographic reading by radiologists requires the comparison of at least two breast projections (views) for the detection and the diagnosis of breast abnormalities. {D}espite their reported potential to support radiologists, most mammographic computer-aided detection ({CAD}) systems have a major limitation: as opposed to the radiologist's practice, computerized systems analyze each view independently. {T}o tackle this problem, in this paper, we propose a {B}ayesian network framework for multi-view mammographic analysis, with main focus on breast cancer detection at a patient level. {W}e use causal-independence models and context modeling over the whole breast represented as links between the regions detected by a single-view {CAD} system in the two breast projections. {T}he proposed approach is implemented and tested with screening mammograms for 1063 cases of whom 385 had breast cancer. {T}he single-view {CAD} system is used as a benchmark method for comparison. {T}he results show that our multi-view modeling leads to significantly better performance in discriminating between normal and cancerous patients. {W}e also demonstrate the potential of our multi-view system for selecting the most suspicious cases.}, + file = {Veli09.pdf:pdf\\Veli09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {19174596}, + month = {1}, + gsid = {17762538587165435446}, + gscites = {62}, + ss_id = {035397ac5e8e55ade890c76064260a11bccd20a2}, + all_ss_ids = {['035397ac5e8e55ade890c76064260a11bccd20a2']}, +} + +@inproceedings{Veli09a, + author = {Velikova, Marina and Samulski, Maurice and Lucas, Peter J. and Karssemeijer, Nico}, + title = {Causal {P}robabilistic {M}odelling for {T}wo-{V}iew {M}ammographic {A}nalysis}, + booktitle = {AIME '09: Proceedings of the 12th Conference on Artificial Intelligence in Medicine}, + year = {2009}, + publisher = {Springer-Verlag}, + pages = {395--404}, + doi = {10.1007/978-3-642-02976-9_56}, + abstract = {{M}ammographic analysis is a difficult task due to the complexity of image interpretation. {T}his results in diagnostic uncertainty, thus provoking the need for assistance by computer decision-making tools. {P}robabilistic modelling based on {B}ayesian networks is among the suitable tools, as it allows for the formalization of the uncertainty about parameters, models, and predictions in a statistical manner, yet such that available background knowledge about characteristics of the domain can be taken into account. {I}n this paper, we investigate a specific class of {B}ayesian networks--causal independence models--for exploring the dependencies between two breast image views. {T}he proposed method is based on a multi-stage scheme incorporating domain knowledge and information obtained from two computer-aided detection systems. {T}he experiments with actual mammographic data demonstrate the potential of the proposed two-view probabilistic system for supporting radiologists in detecting breast cancer, both at a location and a patient level.}, + file = {Veli09a.pdf:pdf\\Veli09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {16954029226551321177}, + gscites = {3}, + ss_id = {eeb232556baa984bf39d678b2619e58d47ee700d}, + all_ss_ids = {['eeb232556baa984bf39d678b2619e58d47ee700d']}, +} + +@inbook{Veli10, + author = {Velikova, M. and Ferreira, N. and Samulski, M. and Lucas, P. and Karssemeijer, N.}, + title = {An Advanced Probabilistic Framework for Assisting Screening Mammogram Interpretation}, + booktitle = {{C}omputational {I}ntelligence in {H}ealthcare 4}, + year = {2010}, + volume = {309}, + series = {{S}tudies in {C}omputational {I}ntelligence}, + publisher = {Springer Berlin / Heidelberg}, + pages = {371--395}, + doi = {10.1007/978-3-642-14464-6_17}, + url = {http://www.springer.com/engineering/book/978-3-642-14463-9}, + abstract = {Breast cancer is the most common form of cancer among women world-wide. One in nine women will be diagnosed with a form of breast cancer in her lifetime. In an effort to diagnose cancer at an early stage, screening programs have been introduced by using periodic mammographic examinations in asymptomatic women. In evaluating screening cases, radiologists are usually presented with two mammographic images of each breast as a cancerous lesion tends to be observed in different breast projections (views). Most computer-aided detection (CAD) systems, on the other hand, only analyse single views independently, and thus fail to account for the interaction between the views and the breast cancer detection can be obscured due to the lack of consistency in lesion marking. This limits the usability and the trust in the performance of such systems. In this chapter, we propose a unified Bayesian network framework for exploiting multi-view dependencies between the suspicious regions detected by a single-view CAD system. The framework is based on a multi-stage scheme, which models the way radiologists interpret mammograms, at four different levels: region, view, breast and case. At each level, we combine all available image information for the patient obtained from a single-view CAD system using a special class of Bayesian networks?causal independence models. The results from experiments with actual screening data of 1063 cases, from which 383 were cancerous, show that our approach outperforms the single-view CAD system in distinguishing between normal and abnormal cases. This is a promising step towards the development of automated systems that can provide a valuable ?second opinion? to the screening radiologists for improved evaluation of breast cancer cases.}, + file = {Veli10.pdf:pdf/Veli10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + gsid = {11048587563929834311}, + gscites = {1}, +} + +@inproceedings{Veli10a, + author = {M. Velikova and P. J. F. Lucas and N. Karssemeijer}, + title = {Using local context information to improve automatic mammographic mass detection}, + booktitle = SHTI, + year = {2010}, + volume = {160}, + series = {MEDINFO 2010 - Proceedings of the 13th World Congress on Medical Informatics}, + pages = {1291--1295}, + doi = {10.3233/978-1-60750-588-4-1291}, + abstract = {Despite their promising application, current Computer-Aided Detection (CAD) systems face difficulties, especially in the detection of malignant masses -a major mammographic sign for breast cancer. One of the main problems is the large number of false positives prompted, which is a critical issue in screening programs where the number of normal cases is considerably large. A crucial determinant for this problem is the dependence of the CAD output on the single pixel-based locations initially detected. To refine the initial detection step, in this paper, we propose a novel approach by considering the context information between the neighbouring pixel features and classes for every initially detected suspicious location. Our modelling scheme is based on the Conditional Random Field technique and the mammographic features extracted by image processing techniques. In experimental study, we demonstrated the practical application of the approach and we compared its performance to that of a previously developed CAD system. The results demonstrated the superiority of the context modelling in terms of significantly improved accuracy without increase in computation efforts.}, + file = {Veli10a.pdf:pdf/Veli10a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {20841893}, + gsid = {9105855608591043142}, + gscites = {1}, + ss_id = {5afc31c4d231f5f54b5ee029573b80ee72ead116}, + all_ss_ids = {['5afc31c4d231f5f54b5ee029573b80ee72ead116']}, +} + +@article{Veli12, + author = {Velikova, Marina and Lucas, Peter J F. and Samulski, Maurice and Karssemeijer, Nico}, + title = {A probabilistic framework for image information fusion with an application to mammographic analysis}, + journal = MIA, + year = {2012}, + volume = {16}, + pages = {865--875}, + doi = {10.1016/j.media.2012.01.003}, + abstract = {The recent increased interest in information fusion methods for solving complex problem, such as in image analysis, is motivated by the wish to better exploit the multitude of information, available from different sources, to enhance decision-making. In this paper, we propose a novel method, that advances the state of the art of fusing image information from different views, based on a special class of probabilistic graphical models, called causal independence models. The strength of this method is its ability to systematically and naturally capture uncertain domain knowledge, while performing information fusion in a computationally efficient way. We examine the value of the method for mammographic analysis and demonstrate its advantages in terms of explicit knowledge representation and accuracy (increase of at least 6.3\% and 5.2\% of true positive detection rates at 5\% and 10\% false positive rates) in comparison with previous single-view and multi-view systems, and benchmark fusion methods such as naA-A?A 1/2 ve Bayes and logistic regression.}, + file = {Veli12.pdf:pdf\\Veli12.pdf:PDF}, + optnote = {DIAG}, + pmid = {22326491}, + month = {5}, + gsid = {10983236706673950383}, + gscites = {34}, + ss_id = {6e1eac7fb957bdd979dca2a234a9e02ee4656245}, + all_ss_ids = {['6e1eac7fb957bdd979dca2a234a9e02ee4656245']}, +} + +@article{Veli13, + author = {Velikova, Marina and Lucas, Peter J F. and Samulski, Maurice and Karssemeijer, Nico}, + title = {On the interplay of machine learning and background knowledge in image interpretation by Bayesian Networks}, + journal = AIM, + year = {2013}, + volume = {57}, + pages = {73AC/a,!aEURoe86}, + doi = {10.1016/j.artmed.2012.12.004}, + abstract = {OBJECTIVES: To obtain a balanced view on the role and place of expert knowledge and learning methods in building Bayesian networks for medical image interpretation. METHODS AND MATERIALS: The interpretation of mammograms was selected as the example medical image interpretation problem. Medical image interpretation has its own common standards and procedures. The impact of these on two complementary methods for Bayesian network construction was explored. Firstly, methods for the discretisation of continuous features were investigated, yielding multinomial distributions that were compared to the original Gaussian probabilistic parameters of the network. Secondly, the structure of a manually constructed Bayesian network was tested by structure learning from image data. The image data used for the research came from screening mammographic examinations of 795 patients, of whom 344 were cancerous. RESULTS: The experimental results show that there is an interesting interplay of machine learning results and background knowledge in medical image interpretation. Networks with discretised data lead to better classification performance (increase in the detected cancers of up to 11.7\%), easier interpretation, and a better fit to the data in comparison to the expert-based Bayesian network with Gaussian probabilistic parameters. Gaussian probability distributions are often used in medical image interpretation because of the continuous nature of many of the image features. The structures learnt supported many of the expert-originated relationships but also revealed some novel relationships between the mammographic features. Using discretised features and performing structure learning on the mammographic data has further improved the cancer detection performance of up to 17\% compared to the manually constructed Bayesian network model. CONCLUSION: Finding the right balance between expert knowledge and data-derived knowledge, both at the level of network structure and parameters, is key to using Bayesian networks for medical image interpretation. A balanced approach to building Bayesian networks for image interpretation yields more accurate and understandable Bayesian network models.}, + file = {Veli13.pdf:pdf\\Veli13.pdf:PDF}, + optnote = {DIAG}, + pmid = {23395008}, + month = {1}, + gsid = {15041417062850503864}, + gscites = {45}, + ss_id = {73de8d539bce0fed3f1d23153b169b00f35b4861}, + all_ss_ids = {['73de8d539bce0fed3f1d23153b169b00f35b4861']}, +} + +@article{Velt08, + author = {J. Veltman and M. Stoutjesdijk and R. Mann and H. J. Huisman and J. O. Barentsz and J. G. Blickman and C. Boetes}, + title = {Contrast-enhanced magnetic resonance imaging of the breast: the value of pharmacokinetic parameters derived from fast dynamic imaging during initial enhancement in classifying lesions}, + journal = ER, + year = {2008}, + volume = {18}, + pages = {1123--1133}, + doi = {10.1007/s00330-008-0870-8}, + abstract = {{T}he value of pharmacokinetic parameters derived from fast dynamic imaging during initial enhancement in characterizing breast lesions on magnetic resonance imaging ({MRI}) was evaluated. {S}ixty-eight malignant and 34 benign lesions were included. {I}n the scanning protocol, high temporal resolution imaging was combined with high spatial resolution imaging. {T}he high temporal resolution images were recorded every 4.1 s during initial enhancement (fast dynamic analysis). {T}he high spatial resolution images were recorded at a temporal resolution of 86 s (slow dynamic analysis). {I}n the fast dynamic evaluation pharmacokinetic parameters ({K}(trans), {V}(e) and k(ep)) were evaluated. {I}n the slow dynamic analysis, each lesion was scored according to the {BI}-{RADS} classification. {T}wo readers evaluated all data prospectively. {ROC} and multivariate analysis were performed. {T}he slow dynamic analysis resulted in an {AUC} of 0.85 and 0.83, respectively. {T}he fast dynamic analysis resulted in an {AUC} of 0.83 in both readers. {T}he combination of both the slow and fast dynamic analyses resulted in a significant improvement of diagnostic performance with an {AUC} of 0.93 and 0.90 ({P} = 0.02). {T}he increased diagnostic performance found when combining both methods demonstrates the additional value of our method in further improving the diagnostic performance of breast {MRI}.}, + file = {Velt08.pdf:pdf\\Velt08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {18270714}, + month = {2}, + gsid = {8693224903597491185}, + gscites = {70}, +} + +@inproceedings{Velz19, + author = {Sanne van Velzen and Majd Zreik and Nikolas Lessmann and Max A. Viergever and Pim A. de Jong and Helena M. Verkooijen and Ivana I{\v{s}}gum}, + title = {Direct prediction of cardiovascular mortality from low-dose chest {CT} using deep learning}, + booktitle = MI, + year = {2019}, + series = SPIE, + publisher = {{SPIE}}, + month = {mar}, + doi = {10.1117/12.2512400}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Velz19a, + author = {van Velzen, Sanne G. M. and Terry, James G. and de Vos, Bob D. and Lessmann, Nikolas and Nair, Sangeeta and Correa, Adolfo and Verkooijen, Helena M. and Carr, John J. and Isgum, Ivana}, + title = {Automatic prediction of coronary heart disease events using coronary and thoracic aorta calcium among african americans in the {Jackson Heart Study}}, + booktitle = RSNA, + year = {2019}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Velz19c, + author = {van Velzen, Sanne G. M. and Lessmann, Nikolas and Emaus, Marleen and van den Bongard, H. J. G. D. and Verkooijen, Helena M. and Isgum, Ivana}, + title = {Deep learning for calcium scoring in radiotherapy treatment planning {CT} scans in breast cancer patients}, + booktitle = RSNA, + year = {2019}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Velz20, + author = {Sanne G. M. van Velzen and Nikolas Lessmann and Birgitta K. Velthuis and Ingrid E. M. Bank and Desiree H. J. G. van den Bongard and Tim Leiner and Pim A. de Jong and Wouter B. Veldhuis and Adolfo Correa and James G. Terry and John Jeffrey Carr and Max A. Viergever and Helena M. Verkooijen and Ivana I{\v{s}}gum}, + title = {Deep learning for automatic calcium scoring in {CT}: Validation using multiple cardiac {CT} and chest {CT} protocols}, + journal = Radiology, + year = {2020}, + volume = {295}, + number = {1}, + pages = {66-79}, + doi = {10.1148/radiol.2020191621}, + abstract = {Background: Although several deep learning (DL) calcium scoring methods have achieved excellent performance for specific CT protocols, their performance in a range of CT examination types is unknown. Purpose: To evaluate the performance of a DL method for automatic calcium scoring across a wide range of CT examination types and to investigate whether the method can adapt to different types of CT examinations when representative images are added to the existing training data set. Materials and Methods: The study included 7240 participants who underwent various types of nonenhanced CT examinations that included the heart: coronary artery calcium (CAC) scoring CT, diagnostic CT of the chest, PET attenuation correction CT, radiation therapy treatment planning CT, CAC screening CT, and low-dose CT of the chest. CAC and thoracic aorta calcification (TAC) were quantified using a convolutional neural network trained with (a) 1181 low-dose chest CT examinations (baseline), (b) a small set of examinations of the respective type supplemented to the baseline (data specific), and (c) a combination of examinations of all available types (combined). Supplemental training sets contained 199-568 CT images depending on the calcium burden of each population. The DL algorithm performance was evaluated with intraclass correlation coefficients (ICCs) between DL and manual (Agatston) CAC and (volume) TAC scoring and with linearly weighted k values for cardiovascular risk categories (Agatston score; cardiovascular disease risk categories: 0, 1-10, 11-100, 101-400, >400). Results: At baseline, the DL algorithm yielded ICCs of 0.79-0.97 for CAC and 0.66-0.98 for TAC across the range of different types of CT examinations. ICCs improved to 0.84-0.99 (CAC) and 0.92-0.99 (TAC) for CT protocol-specific training and to 0.85-0.99 (CAC) and 0.96-0.99 (TAC) for combined training. For assignment of cardiovascular disease risk category, the k value for all test CT scans was 0.90 (95% confidence interval [CI]: 0.89, 0.91) for the baseline training. It increased to 0.92 (95% CI: 0.91, 0.93) for both data-specific and combined training. Conclusion: A deep learning calcium scoring algorithm for quantification of coronary and thoracic calcium was robust, despite substantial differences in CT protocol and variations in subject population. Augmenting the algorithm training with CT protocol-specific images further improved algorithm performance.}, + file = {Velz20.pdf:pdf\\Velz20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {32043947}, + month = {4}, + gsid = {5150322759857162575}, + gscites = {121}, + ss_id = {13c0cf335efd48efcb2f35187c5392e4a6e213b5}, + all_ss_ids = {['13c0cf335efd48efcb2f35187c5392e4a6e213b5']}, +} + +@conference{Ven10b, + author = {van de Ven, W. J. M. and Kleine, B. U. and van Elswijk, G. A. and Oostenveld, R. and Stegeman, D. F.}, + title = {The alpha-motoneuron pool as transmitter of rhythmicities in cortical motor drive}, + booktitle = {International Society of Electrophysiology and Kinesiology}, + year = {2010}, + abstract = {AIM: Corticospinal interaction gives support to the conceptual mechanism called communication through coherence, which is the assumption that neuronal groups communicate through coherent oscillatory activity. In this respect it is interesting how centrally evoked oscillatory phenomena are translated into EMG activity. We investigated the transfer properties of central drive transmission via the alpha-motoneuron pool to the muscle. METHODS: A model based on that of P. Matthews (J Physiol. 1996;492:597-628) was constructed (Figure 1). This model was used for the simulation of alpha-motoneuron firing patterns and the EMG signal as response to central drive input. Short-term synchrony was introduced by assuming a common input to each pair of alpha-motoneurons. The cortical input was modulated to investigate the transfer through the alpha-motoneuron pool in the frequency domain. Coherence between stochastical central input and EMG signal is also evaluated. Furthermore, the effect due to the often-used EMG rectification is investigated. RESULTS: Modulated cortical input is transferred with only a limited level of non-linearity. The alpha-motoneuron firing frequencies do play a role in the frequency distribution of the amplitude spectrum. However, no preference over proportionality in the region of the firing frequencies was found. Coherence analysis between the summed central input to the alpha-motoneuron pool and the EMG signal is large whereby the coupling strength does not depend on frequency in a range from 1 to 100 Hz. Common central input to pairs of alpha-motoneurons strongly increases the coherence levels as well as the amplitudes in the frequency spectrum. Rectification of the EMG signal introduces a clear frequency dependence. Especially the motoneuron firing frequency range is emphasized. CONCLUSION: Centrally evoked oscillatory phenomena are strongly transmitted via the alpha-motoneuron pool. The motoneuron firing frequencies do play a role in the transmission gain, but do not influence the coherence levels. Rectification of the EMG signal enhances the transmission gain, but lowers the coherence and introduces a strong frequency dependency. Because of its non-linearity, we think that rectification should better be avoided.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Ven10c, + author = {van de Ven, W. J. M. and van Basten, J. P. and de Kruif, B. J. and Dunias, P. and Wieringa, F. P.}, + title = {[Broad spectrum in vivo diffuse spectrometry - An exploration for improvements to the distinction of the neurovasular bundle during robot prostatectomy (RALP)]}, + booktitle = {Nederlands Tijdschrift voor Urologie}, + year = {2010}, + volume = {6}, + pages = {159}, + abstract = {Inleiding Tijdens radicale prostatectomie is het belangrijk de neurovasculaire bundel (NVB) visueel te kunnen onderscheiden om schade hieraan te voorkomen. Het menselijk oog neemt golflengten waar van 400 AC/a,!aEURoe 680 nm, echter elektronisch kan een veel breder spectraalgebied worden waargenomen. Er is onderzocht of in het brede spectraalgebied meetbare verschillen tussen prostaat en NVB zijn, die gebruikt kunnen worden om de weefseltypes voor de chirurg duidelijker zichtbaar te maken. Omdat van de interne organen weinig optische spectra buiten het zichtbare gebied bekend zijn, is een meetopstelling op maat ontwikkeld. Materiaal en methoden Tijdens 19 robotgeassisteerde laparascopische prostatectomieAfA<=8 mm measured in at least one direction. We considered a Gleason score >=7 being csPCa. Descriptive statistics with 95% confidence intervals (CI) were used to determine any differences. + RESULTS: + We included 51 patients with FGB (59 PI-RADS 4 and 41% PI-RADS 5) and 227 patients with MRGB (34 PI-RADS 4 and 66% PI-RADS 5). Included patients had a median age of 69 years (IQR, 65-72) and a median PSA level of 11.0 ng/ml (IQR, 7.4-15.1) and a median age of 67 years (IQR, 61-70), the median PSA 12.8 ng/ml (IQR, 9.1-19.0) within the FGB and the MRGB group, respectively. Detection rates of csPCA did not differ significantly between FGB and MRGB, 49 vs. 61%, respectively. + CONCLUSION: + We did not detect significant differences between FGB and MRGB in the detection of csPCa. The differences in detection ratios between both biopsy techniques are narrow with an increasing lesion size. This study warrants further studies to optimize selection of best biopsy modality.}, + file = {Vend17c.pdf:pdf\\Vend17c.pdf:PDF}, + optnote = {DIAG, MAGIC, RADIOLOGY}, + pmid = {28871396}, + month = {9}, + gsid = {9029962091723191841}, + gscites = {36}, + ss_id = {b51d0edf0370b89b246e7e67f58640ffebd2e224}, + all_ss_ids = {['b51d0edf0370b89b246e7e67f58640ffebd2e224']}, +} + +@article{Vend18, + author = {Venderink, Wulphert and de Rooij, Maarten and Sedelaar, Michiel J. and Huisman, Henkjan J. and Futterer, Jurgen J.}, + title = {Elastic versus rigid image registration in {MRI-TRUS} fusion prostate biopsy: a systematic review and meta-analysis}, + doi = {10.1016/j.euf.2016.07.003}, + pages = {219-227}, + volume = {4}, + abstract = {CONTEXT: The main difference between the available magnetic resonance imaging-transrectal ultrasound (MRI-TRUS) fusion platforms for prostate biopsy is the method of image registration being either rigid or elastic. As elastic registration compensates for possible deformation caused by the introduction of an ultrasound probe for example, it is expected that it would perform better than rigid registration. + OBJECTIVE: The aim of this meta-analysis is to compare rigid with elastic registration by calculating the detection odds ratio (OR) for both subgroups. The detection OR is defined as the ratio of the odds of detecting clinically significant prostate cancer (csPCa) by MRI-TRUS fusion biopsy compared with systematic TRUS biopsy. Secondary objectives were the OR for any PCa and the OR after pooling both registration techniques. + EVIDENCE ACQUISITION: The electronic databases PubMed, Embase, and Cochrane were systematically searched for relevant studies according to the Preferred Reporting Items for Systematic Review and Meta-analysis Statement. Studies comparing MRI-TRUS fusion and systematic TRUS-guided biopsies in the same patient were included. The quality assessment of included studies was performed using the Quality Assessment of Diagnostic Accuracy Studies version 2. + EVIDENCE SYNTHESIS: Eleven papers describing elastic and 10 describing rigid registration were included. Meta-analysis showed an OR of csPCa for elastic and rigid registration of 1.45 (95% confidence interval [CI]: 1.21-1.73, p<0.0001) and 1.40 (95% CI: 1.13-1.75, p=0.002), respectively. No significant difference was seen between the subgroups (p=0.83). Pooling subgroups resulted in an OR of 1.43 (95% CI: 1.25-1.63, p<0.00001). + CONCLUSIONS: No significant difference was identified between rigid and elastic registration for MRI-TRUS fusion-guided biopsy in the detection of csPCa; however, both techniques detected more csPCa than TRUS-guided biopsy alone. + PATIENT SUMMARY: We did not identify any significant differences in prostate cancer detection between two distinct magnetic resonance imaging-transrectal ultrasound fusion systems which vary in their method of compensating for prostate deformation.}, + file = {:pdf/Vend18.pdf:PDF}, + journal = EUF, + optnote = {DIAG, MAGIC, RADIOLOGY}, + pmid = {28753777}, + year = {2018}, + month = {3}, + all_ss_ids = {['acaba0aa6a8a55104ae2cb09f093aa30621ffb47', '974e8ae2bfa594ba157da2794a3254d12eb7bf26', '2a846d2cba5a8f0db089e7f2af275a535b15d82f']}, + gscites = {54}, +} + +@inproceedings{Vend23, + author = {Vendittelli, Pierpaolo and Bokhorst, John-Melle and Smeets, Esther Markus and Kryklyva, Valentyna and Brosens, Lodewijk and Verbeke, Caroline and Litjens, Geert}, + booktitle = MIDL, + title = {Automatic quantification of {TSR} as a prognostic marker for pancreatic cancer.}, + url = {https://openreview.net/forum?id=Dtz_iaUpGc}, + abstract = {The current diagnostic and outcome prediction methods for pancreatic cancer lack prognostic power. As such, identifying novel biomarkers using machine learning has become of increasing interest. In this study, we introduce a novel method for estimating the tumor-stroma ratio (TSR) in whole slide images (WSIs) of pancreatic tissue and assess its potential as a prognostic biomarker. A multi-step strategy for estimating TSR is proposed, including epithelium segmentation based on an immunohistochemical reference standard, a coarse pancreatic cancer segmentation, and a post-processing pipeline for TSR quantification. The resultant segmentation models are validated on external test sets using the Dice coefficient, and additionally, the TSR's potential as a prognostic factor is assessed using survival analysis, resulting in a C-index of 0.61.}, + file = {:pdf/Vend23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + year = {2023}, +} + +@conference{Venh15a, + author = {Freerk G. Venhuizen and Myrte B. Breukink and Bram van Ginneken and Mark J. J. P. van Grinsven and Bart Bloemen and Carel B. Hoyng and Thomas Theelen and Camiel J. F. Boon and Clara I. S\'{a}nchez}, + title = {Automated Quantification of Subretinal Fluid in Central Serous Chorioretinopathy in {3D} Optical Coherence Tomography Images}, + booktitle = ARVO, + year = {2015}, + abstract = {Purpose: Central serous chorioretinopathy ({CSC}) is an ocular disorder characterized by serous retinal detachment and associated with fluid accumulation beneath the retina. Obtaining accurate measures on the size and volume of the fluid deposit may be an important biomarker to assess disease progression and treatment outcome. We developed a system for automatic volumetric quantification of subretinal fluid in optical coherence tomography ({OCT}). Methods: OCT images obtained from 15 patients with varying presence of subretinal fluid were selected from the clinic. A 3D region growing based algorithm was developed to segment the fluid after selecting an arbitrary seed point located in the fluid deposit. The obtained total volume, and the extent of the segmented fluid volume were compared to manual delineations made by two experienced human graders. Results: A high intra-class correlation coefficient ({ICC}) value (0.997) was obtained when comparing the fluid volume calculated by the proposed method with the volume delineated by the two graders. Similarly, the spatial overlap agreement on the obtained volumes, measured with the Dice similarity coefficient ({DC}), between the manual delineations and the software output was high ({DC}=0.87) and comparable to the overlap agreement between observersAC/a,!a,,C/ delineations ({DC}=0.85). In addition, the quantification time was reduced substantially by a factor of 5 compared to manual assessment. The quantified values obtained by the algorithm were shown to be highly reproducible, obtaining a {DC} value of 0.99 and an ICC value of 0.98 when varying the seed point used for initializing the algorithm. Conclusions: An image analysis algorithm for the automatic quantification of subretinal fluid in {OCT} images of {CSC} patients was developed. The proposed algorithm is able to accurately quantify the extent of fluid deposits in a fast and reproducible manner, allowing accurate assessment of disease progression and treatment outcome.}, + optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {a11db962a303ac6cb6ff4216659189c2ed378c21}, + gscites = {0}, +} + +@inproceedings{Venh15b, + author = {Freerk G. Venhuizen and Bram van Ginneken and Bart Bloemen and Mark J. J. P. van Grinsven and Rick Philipsen and Carel Hoyng and Thomas Theelen and Clara I. S\'{a}nchez}, + title = {Automated Age-Related Macular Degeneration Classification in {OCT} using Unsupervised Feature Learning}, + booktitle = MI, + year = {2015}, + volume = {9414}, + series = SPIE, + doi = {10.1117/12.2081521}, + abstract = {Age-related Macular Degeneration (AMD) is a common eye disorder with high prevalence in elderly people. The disease mainly affects the central part of the retina, and could ultimately lead to permanent vision loss. Optical Coherence Tomography (OCT) is becoming the standard imaging modality in diagnosis of AMD and the assessment of its progression. However, the evaluation of the obtained volumetric scan is time consuming, expensive and the signs of early AMD are easy to miss. In this paper we propose a classification method to automatically distinguish AMD patients from healthy subjects with high accuracy. The method is based on an unsupervised feature learning approach, and processes the complete image without the need for an accurate pre-segmentation of the retina. The method can be divided in two steps: an unsupervised clustering stage that extracts a set of small descriptive image patches from the training data, and a supervised training stage that uses these patches to create a patch occurrence histogram for every image on which a random forest classifier is trained. Experiments using 384 volume scans show that the proposed method is capable of identifying AMD patients with high accuracy, obtaining an area under the Receiver Operating Curve of 0:984. Our method allows for a quick and reliable assessment of the presence of AMD pathology in OCT volume scans without the need for accurate layer segmentation algorithms.}, + file = {:pdf\\Venh15b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {94141I}, + month = {3}, + gsid = {6530553375448056275}, + gscites = {72}, + ss_id = {ae6efa478f3ecc99f2273effad86f7a8a78a333a}, + all_ss_ids = {['ae6efa478f3ecc99f2273effad86f7a8a78a333a']}, +} + +@conference{Venh15c, + author = {Freerk G. Venhuizen and Mark J.J.P. van Grinsven and Carel B. Hoyng and Thomas Theelen and Bram van Ginneken and Clara I. S\'{a}nchez}, + title = {Vendor Independent Cyst Segmentation in Retinal {SD-OCT} Volumes using a Combination of Multiple Scale Convolutional Neural Networks}, + booktitle = MICCAI, + year = {2015}, + abstract = {Major causes of blindness in developed countries are retinal vascular diseases, essentially neovascular age-related macular degeneration ({AMD}), retinal vein occlusion ({RVO}) and diabetic maculopathy ({DMP}). Automated computer-aided detection and diagnosis~({CAD}) systems capable of detecting, classifying and quantifying the characteristics of the pathology associated with these retinal diseases is highly beneficial in the diagnosis, treatment prediction and the assessment of treatment progression. Among others, the presence of retinal cysts are an important biomarker in AMD and RVO, thus their detection and segmentation is beneficial to clinical disease analysis. Optical Coherence Tomography ({OCT}) imaging has the ability to visualize and analyze the morphology of the retina, as well as its abnormalities. Due to the technological advances in OCT imaging with regard to imaging speed, image quality, and functional analysis, OCT is rapidly becoming one of the main imaging modalities used in clinical practice, and for the quantification and analysis of disease-specific biomarkers such as cyst volume. In this work we propose a fully automatic CAD system for retinal cyst segmentation in OCT volume scans. The system is capable of detecting cysts in OCT volumes acquired with OCT scanners from different vendors, for which the amount of noise, image quality and contrast varies strongly.}, + file = {venh15a.pdf:pdf\\venh15a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {1d3c69edf9e573412de0c758b3db1b8f9996f2c1}, + gscites = {13}, +} + +@conference{Venh16a, + author = {Freerk G. Venhuizen and Mark J. J. P. van Grinsven and Bram van Ginneken and Carel B. Hoyng and Thomas Theelen and Clara I. S\'{a}nchez}, + booktitle = ARVO, + title = {Fully automated quantification of intraretinal cysts in 3D optical coherence tomography}, + abstract = {Purpose: In age-related macular degeneration (AMD) the presence of intraretinal cysts is an important prognostic biomarker. Optical coherence tomography imaging has shown to be capable of accurately visualizing and characterizing the three-dimensional shape and extent of cysts. The detection and quantification of intraretinal cysts is highly beneficial for the prediction of treatment outcome and the assessment of the treatment progression. To aid the clinician with quantified information regarding cysts, we developed a fully automated system capable of detecting and delineating intraretinal cysts in 3D optical coherence tomography images.Methods: For this study a total of 30 OCT volumes acquired using four different brand OCT scanners were provided by the OPTIMA cyst segmentation challenge, containing a wide variety of retinal cysts together with manual cyst delineations. A pixel classifier based on a multiscale convolutional neural network (CNN) was developed to predict if an image pixel belongs to a cyst or to the background by considering a small neighborhood around the pixel of interest. The CNN follows a two stage approach, where in the first stage, multiple CNN's are used in parallel to obtain a segmentation at different image scales. In the second stage, the individual segmentations are merged, combining local information obtained with the lower scale network with contextual information obtained from the higher scale networks. After providing the neural network with enough training samples, the network can automatically detect and segment cysts in OCT volumes. The obtained segmentations were compared to manual delineations made by two experienced human graders. Results: The spatial overlap agreement on the obtained volumes, measured by the Dice similarity coefficient, between the manual delineations and the software output was 0.55, which is substantial considering the difficulty of the task. In addition, the quantification time was reduced dramatically, and takes only a few seconds for a complete OCT volume. Conclusions: An image analysis algorithm for the automatic quantification of intraretinal cysts in OCT images was developed. The proposed algorithm is able to detect and quantify the three dimensional shape and extent of cysts in a fast and reproducible manner, allowing accurate assessment of disease progression and treatment outcome.}, + optnote = {DIAG, RADIOLOGY}, + year = {2016}, + all_ss_ids = {a1b328c04b54decb972bf83458f9df4ab5608af3}, + gscites = {11}, +} + +@conference{Venh17, + author = {Freerk G. Venhuizen and Samuel Schaffhauser and Vivian Schreur and Bart Liefers and Bram van Ginneken and Carel B. Hoyng and Thomas Theelen and Eiko K. de Jong and Clara I. S\'{a}nchez}, + booktitle = ARVO, + title = {Fully automated detection of hyperreflective foci in optical coherence tomography}, + abstract = {Purpose: Diabetic macular edema ({DME}) is a retinal disorder characterized by a buildup of cystoidal fluid in the retina. + The typical treatment consists of monthly intravitreal anti vascular endothelial growth factor (anti-VEGF) injections. + However, the efficacy of this treatment varies strongly. + Recent studies have indicated that the presence and number of hyperreflective foci can possibly be considered a prognostic biomarker for treatment response in {DME}. + As the detection of foci is difficult and time-consuming manual foci quantification seems infeasible. + We therefore developed a fully automated system capable of detecting and quantifying foci in optical coherence tomography ({OCT}) images. + Methods: + 119 fovea centered B-scans obtained from 49 patients with {DME} were selected from a clinical database. + The data was divided in a training set of 96 {B}-scans from 40 patients, and a test set containing 23 {B}-scans from 9 patients. + A convolutional neural network ({CNN}) was developed to predict if an image pixel belongs to a hyperreflective focus by considering a small neighborhood around the pixel of interest. + The {CNN} consists of 7 convolutional layers and 2 max pooling layers. + After providing the system with enough training samples, the network automatically detects pixels with a high probability of being part of a hyperreflective focus. + Connected detections are considered as a single detection. + The obtained results were compared to manual annotations made by two experienced human graders in consensus for the central 3 mm surrounding the fovea. + Hyperreflective foci were only annotated in the layers ranging from the inner plexiform layer ({IPL}) to the outer nuclear layer ({ONL}) as manual detection is challenging in the other layers. + When a detection is overlapping with an annotated focus it is considered a true positive, otherwise it is counted as a false positive. + + Results: + + In the independent test set a sensitivity of 0.83 was obtained. + At this level of sensitivity, an average of 8.3 false positives per {B}-scan were detected. + False positives were mainly caused by detections outside the selected range ({ILP} to {ONL}) and misdetections by the graders. + + Conclusions: + + An image analysis algorithm for the automatic detection and quantification of hyperreflective foci in {OCT} {B}-scans was developed. + The experiments show promising results to obtain quantitative foci based biomarkers that can be used for the prediction of treatment response in {DME}.}, + optnote = {DIAG, RADIOLOGY}, + year = {2017}, +} + +@article{Venh17a, + author = {Freerk G. Venhuizen and Bram van Ginneken and Freekje van Asten and Mark J.J.P van Grinsven and Sascha Fauser and Carel B Hoyng and Thomas Theelen and Clara I. S\'{a}nchez}, + title = {Automated Staging of Age-Related Macular Degeneration Using Optical Coherence Tomography}, + journal = IOVS, + year = {2017}, + volume = {58}, + number = {4}, + pages = {2318-2328}, + doi = {10.1167/iovs.16-20541}, + abstract = {Purpose: To evaluate a machine learning algorithm that automatically grades age-related macular degeneration (AMD) severity stages from optical coherence tomography (OCT) scans. + Methods: A total of 3265 {OCT} scans from 1016 patients with either no signs of {AMD} or with signs of early, intermediate, or advanced {AMD} were randomly selected from a large European multicenter database. + A machine learning system was developed to automatically grade unseen {OCT} scans into different {AMD} severity stages without requiring retinal layer segmentation. + The ability of the system to identify high-risk {AMD} stages and to assign the correct severity stage was determined by using receiver operator characteristic ({ROC}) analysis and {C}ohen's Kappa statistics, respectively. + The results were compared to those of two human observers. + Reproducibility was assessed in an independent, publicly available data set of 384 {OCT} scans. + Results: + The system achieved an area under the {ROC} curve of 0.980 with a sensitivity of 98.2% at a specificity of 91.2%. + This compares favorably with the performance of human observers who achieved sensitivities of 97.0% and 99.4% at specificities of 89.7% and 87.2%, respectively. + A good level of agreement with the reference was obtained (Kappa = 0.713) and was in concordance with the human observers (Kappa = 0.775 and Kappa = 0.755, respectively). + Conclusions: + A machine learning system capable of automatically grading {OCT} scans into {AMD} severity stages was developed and showed similar performance as human observers. + The proposed automatic system allows for a quick and reliable grading of large quantities of {OCT} scans, which could increase the efficiency of large-scale AMD studies and pave the way for {AMD} screening using {OCT}.}, + file = {Venh17a.pdf:pdf\\Venh17a.pdf:PDF}, + optnote = {DIAG}, + pmid = {28437528}, + month = {4}, + gsid = {8934997778421574924}, + gscites = {89}, + ss_id = {e29513095896df886ebef0e1c8f8e40b6990cd70}, + all_ss_ids = {['e29513095896df886ebef0e1c8f8e40b6990cd70']}, +} + +@article{Venh17b, + author = {Freerk G. Venhuizen and Bram van Ginneken and Bart Liefers and Schreur and Mark J.J.P. van Grinsven and Sascha Fauser and Carel B. Hoyng and Thomas Theelen and Clara I. S\'{a}nchez}, + title = {Robust Total Retina Thickness Segmentation in Optical Coherence Tomography Images using Convolutional Neural Networks}, + journal = BOE, + year = {2017}, + volume = {8}, + number = {7}, + pages = {3292-3316}, + doi = {10.1364/BOE.8.003292}, + abstract = {We developed a fully automated system using a convolutional neural network ({CNN}) for total retina segmentation in optical coherence tomography ({OCT}) that is robust to the presence of severe retinal pathology. + A generalized U-net network architecture was introduced to include the large context needed to account for large retinal changes. + The proposed algorithm outperformed qualitative and quantitatively two available algorithms. + The algorithm accurately estimated macular thickness with an error of 14.0 +- 22.1 micrometer, substantially lower than the error obtained using the other algorithms (42.9 +- 22.1 micrometer and 27.1 +- 69.3 micrometer, respectively). + These results highlighted the proposed algorithm's capability of modeling the wide variability in retinal appearance and obtained a robust and reliable retina segmentation even in severe pathological cases.}, + file = {Venh17b.pdf:pdf\\Venh17b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28717568}, + month = {6}, + gsid = {14231747073311085579}, + gscites = {112}, + ss_id = {855320ff1a3bda22505a8f9da2c24f56c5ee7712}, + all_ss_ids = {['855320ff1a3bda22505a8f9da2c24f56c5ee7712']}, +} + +@article{Venh18, + author = {Venhuizen, F. G. and van Ginneken, B. and Liefers, B. and van Asten, F. and Schreur, V. and Fauser, S. and Hoyng, C. B. and Theelen, T. and S\'{a}nchez, C. I.}, + title = {A Deep Learning Approach for Detection and Quantification of Intraretinal Cystoid Fluid in Multivendor Optical Coherence Tomography}, + journal = BOE, + year = {2018}, + volume = {9}, + number = {4}, + pages = {1545-1569}, + doi = {10.1364/BOE.9.001545}, + abstract = {We developed a deep learning algorithm for the automatic segmentation and quantification of intraretinal cystoid fluid (IRC) in spectral domain optical coherence tomography (SD-OCT) volumes independent of the device used for acquisition. A cascade of neural networks was introduced to include prior information on the retinal anatomy, boosting performance significantly. The proposed algorithm approached human performance reaching an overall Dice coefficient of 0.754 +- 0.136 and an intraclass correlation coefficient of 0.936, for the task of IRC segmentation and quantification, respectively. The proposed method allows for fast quantitative IRC volume measurements that can be used to improve patient care, reduce costs, and allow fast and reliable analysis in large population studies.}, + file = {:pdf/Venh18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29675301}, + month = {3}, + ss_id = {c4c99492376b43d7120067f806aa2d000fbc0b2f}, + all_ss_ids = {['374f4a7676183c95f901e655f2caf170cdd9ec9d', 'c4c99492376b43d7120067f806aa2d000fbc0b2f']}, + gscites = {112}, +} + +@phdthesis{Venh19, + author = {Freerk Venhuizen}, + title = {Machine Learning for Quantification of Age-Related Macular Degeneration Imaging Biomarkers in Optical Coherence Tomography}, + url = {https://repository.ubn.ru.nl/handle/2066/212465}, + abstract = {Age related macular degeneration (AMD) is the most important cause of severe vision loss in the elderly. With the rapid growth of the aging population, the estimated number of people suffering from AMD is 196 million in 2020. As AMD is a disease that requires frequent treatment, it causes a substantial burden on the patient and on the health care system. We therefore need to work towards more efficient treatment procedures, treatment monitoring, and diagnostic tools. In this thesis an effort is made to improve patient care and reduce the burden on the health care system by providing tools aimed at automating various tasks in the field of ophthalmology.}, + copromotor = {C. S\'{a}nchez, T. Theelen}, + file = {Venh19.pdf:pdf\Venh19.pdf:PDF}, + optnote = {DIAG}, + promotor = {B. van Ginneken, C. Hoyng}, + school = {Radboud University, Nijmegen, The Netherlands}, + year = {2019}, + journal = {PhD thesis}, +} + +@article{Veni04, + author = {T. Veninga and H. Huisman and R. W. M. van der Maazen and H. Huizenga}, + title = {Clinical validation of the normalized mutual information method for registration of {CT} and {MR} images in radiotherapy of brain tumors}, + journal = JACMP, + year = {2004}, + volume = {5}, + pages = {66--79}, + abstract = {{I}mage registration integrates information of different imaging modalities and has the potential to improve target volume determination in radiotherapy planning. {T}his paper describes the implementation and validation of a 3{D} fully automated registration procedure in the process of radiotherapy treatment planning of brain tumors. 15 {P}atients with various brain tumors received {CT} and {MR} brain imaging before the start of radiotherapy. {T}he normalized mutual information ({NMI}) method was used for image registration. {R}egistration accuracy was estimated by performing statistical analysis of coordinate differences between {CT} and {MR} anatomical landmarks along the x-, y- and z-axes. {S}econd, a visual validation protocol was developed to validate the quality of individual registration solutions and this protocol was tested in a series of 36 {CT}-{MR} registration procedures with intentionally applied registration errors. {T}he mean coordinate differences between {CT} and {MR} landmarks along the x- and y-axes were in general within 0.5 mm. {T}he mean coordinate differences along the z-axis were within 1.0 mm, which is of the same magnitude as the applied slice thickness in scanning. {S}econd, the detection of intentionally applied registration errors by employment of a standardized visual validation protocol resulted in low false-negative and low false-positive rates. {A}pplication of the {NMI} method for the brain results in excellent automatic registration accuracy and the method has been incorporated in daily routine within our institute. {A} standardized validation protocol is proposed that ensures the quality of individual registrations by detecting registration errors with high sensitivity and specificity. {T}his protocol is proposed for the validation of other linear registration methods.}, + file = {Veni04.pdf:pdf\\Veni04.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {15753941}, + month = {6}, + gsid = {9566812802239413729}, + gscites = {53}, + ss_id = {62f44c20139e5638ca3f9b67bb446ebc8c4bac25}, + all_ss_ids = {['62f44c20139e5638ca3f9b67bb446ebc8c4bac25']}, +} + +@conference{Venk20, + author = {Venkadesh, Kiran Vaidhya and Setio, Arnaud Arindra Adiyoso and Saghir, Zaigham and van Ginneken, Bram and Jacobs, Colin}, + booktitle = RSNA, + title = {Deep Learning for Lung Nodule Malignancy Prediction: Comparison With Clinicians and the Brock Model on an Independent Dataset From a Large Lung Screening Trial}, + abstract = {PURPOSE: The majority of studies on automated lung nodule malignancy prediction utilize subjective labels provided by radiologists instead of using a histopathological reference standard. The aim of this study was to investigate the performance of a deep learning system that was trained using subjective labels from LIDC-IDRI by testing it on two independent datasets of nodules from the Danish Lung Cancer Screening Trial (DLCST) with histopathological proof or follow-up over a period of at least 2 years, and comparing performance with a panel of 11 clinicians and the clinically established Brock risk model. METHOD AND MATERIALS: We considered nodules annotated by at least 3 out of 4 radiologists from the LIDC-IDRI dataset. The malignancy ratings were averaged and indeterminate nodules were excluded resulting in 680 nodules (352 benign and 328 malignant) for development. We trained a deep learning system based on 2D multi-view CNN and 3D extension of VGGNet on the development set. We tested the system on two sets of nodules from DLCST. The first set, dataset A, consisted of 62 cancers and 120 random benign nodules and the second set, dataset B, consisted of the same 62 cancers and a size-matched group of 118 benign nodules. A group of 11 clinicians, consisting of 4 radiologists, 5 radiology residents, and 2 pulmonologists, were tasked with grading the nodules on a continuous scale from 0 to 100. Finally, the Brock risk model was also applied to all nodules. RESULTS: On dataset A, the deep learning system produced an AUC of 0.941, which is better than the average clinician (0.892, p = 0.02) and comparable to the Brock model (0.924, p = 0.35). On dataset B, the system produced an AUC of 0.737, which is comparable to the Brock model (0.70, p = 0.268) but worse than the average clinician (0.80, p = 0.034). CONCLUSION: The deep learning system trained with subjective labels performed comparably with humans and the Brock model but showed certain vulnerabilities when presented with large benign nodules. It is important to recognize the challenges involved in classifying indeterminate lung nodules and we think the field would benefit from publicly available datasets with a reference standard set by histopathological proof or follow-up.}, + optnote = {DIAG, RADIOLOGY}, + year = {2020}, +} + +@article{Venk21, + author = {Venkadesh, Kiran Vaidhya and Setio, Arnaud A. A. and Schreuder, Anton and Scholten, Ernst T. and Chung, Kaman and W Wille, Mathilde M. and Saghir, Zaigham and van Ginneken, Bram and Prokop, Mathias and Jacobs, Colin}, + journal = Radiology, + title = {Deep Learning for Malignancy Risk Estimation of Pulmonary Nodules Detected at Low-Dose Screening {CT}.}, + doi = {10.1148/radiol.2021204433}, + issue = {2}, + pages = {438--447}, + volume = {300}, + abstract = {Background Accurate estimation of the malignancy risk of pulmonary nodules at chest CT is crucial for optimizing management in lung cancer screening. Purpose To develop and validate a deep learning (DL) algorithm for malignancy risk estimation of pulmonary nodules detected at screening CT. Materials and Methods In this retrospective study, the DL algorithm was developed with 16 077 nodules (1249 malignant) collected -between 2002 and 2004 from the National Lung Screening Trial. External validation was performed in the following three -cohorts -collected between 2004 and 2010 from the Danish Lung Cancer Screening Trial: a full cohort containing all 883 nodules (65 -malignant) and two cancer-enriched cohorts with size matching (175 nodules, 59 malignant) and without size matching (177 -nodules, 59 malignant) of benign nodules selected at random. Algorithm performance was measured by using the area under the receiver operating characteristic curve (AUC) and compared with that of the Pan-Canadian Early Detection of Lung Cancer (PanCan) model in the full cohort and a group of 11 clinicians composed of four thoracic radiologists, five radiology residents, and two pulmonologists in the cancer-enriched cohorts. Results The DL algorithm significantly outperformed the PanCan model in the full cohort (AUC, 0.93 [95% CI: 0.89, 0.96] vs 0.90 [95% CI: 0.86, 0.93]; = .046). The algorithm performed comparably to thoracic radiologists in cancer-enriched cohorts with both random benign nodules (AUC, 0.96 [95% CI: 0.93, 0.99] vs 0.90 [95% CI: 0.81, 0.98]; = .11) and size-matched benign nodules (AUC, 0.86 [95% CI: 0.80, 0.91] vs 0.82 [95% CI: 0.74, 0.89]; = .26). Conclusion The deep learning algorithm showed excellent performance, comparable to thoracic radiologists, for malignancy risk estimation of pulmonary nodules detected at screening CT. This algorithm has the potential to provide reliable and reproducible malignancy risk scores for clinicians, which may help optimize management in lung cancer screening. (c) RSNA, 2021 See also the editorial by Tammemagi in this issue.}, + algorithm = {https://grand-challenge.org/algorithms/pulmonary-nodule-malignancy-prediction/}, + file = {:pdf/Venk21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {34003056}, + year = {2021}, + ss_id = {80ec09e2cb8244cab245f1108496af731d160ebf}, + all_ss_ids = {['80ec09e2cb8244cab245f1108496af731d160ebf']}, + gscites = {51}, +} + +@conference{Venk21a, + author = {Venkadesh, Kiran Vaidhya and Schreuder, Anton and Scholten, Ernst Th. and Atkar-Khattra, Sukhinder and Mayo, John R. and Saghir, Zaigham and Wille, Mathilde M. W. and van Ginneken, Bram and Lam, Stephen and Prokop, Mathias and Jacobs, Colin}, + booktitle = RSNA, + title = {Integration Of A Deep Learning Algorithm Into The Clinically Established PanCan Model For Malignancy Risk Estimation Of Screen-detected Pulmonary Nodules In First Screening CT}, + abstract = {PURPOSE: To quantify the added value of integrating a deep learning algorithm (DLA)'s output to the existing Pan-Canadian Early Detection of Lung Cancer Study (PanCan) models for estimating malignancy risk of screen-detected pulmonary nodules. + METHODS AND MATERIALS: Our DLA was trained on a cohort of 14,828 benign and 1,249 malignant nodules from the National Lung Screening Trial. In the present study, we derived a new multivariable logistic regression model on the PanCan data that included the DLA risk score and the original variables from the PanCan model 2b except for "nodule type" and "spiculation" as these are already encoded in the DLA risk score. The new model was externally validated on baseline nodules from the Danish Lung Cancer Screening Trial (DLCST). For comparison, the performances of the existing PanCan model 2b and of our DLA stand-alone were also calculated. + RESULTS: 6024 benign and 86 malignant nodules from the PanCan data were included as the development set, and 818 benign and 34 malignant nodules from the Danish Lung Cancer Screening Trial (DLCST) were included as the validation set. The area under the receiver operating characteristic curve (AUC) for the DLA, PanCan model 2b, and the new model in the PanCan cohort were 0.944 (95% confidence interval = 0.917 - 0.968), 0.941 (0.908 - 0.969), and 0.944 (0.909 - 0.975), respectively. In the DLCST cohort, the AUCs were 0.917 (0.851 - 0.968), 0.896 (0.841 - 0.944), and 0.927 (0.878 - 0.969), respectively. + CONCLUSIONS: Using our DLA risk score to derive a new multivariable logistic regression model on the PanCan data does not appear to significantly improve the predictive performance in high-risk screening participants, but may serve as a replacement for the "nodule type" and "spiculation" parameters that are known to have substantial interobserver variability. + CLINICAL RELEVANCE / APPLICATION: Our DLA has a comparable nodule malignancy risk estimation performance to the PanCan models. This may help to make the computation of nodule risk scores easier and less subjective.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, +} + +@conference{Venk22, + author = {Venkadesh, Kiran Vaidhya and Aleef, Tajwar Abrar and Schreuder, Anton and Scholten, Ernst Th. and van Ginneken, Bram and Prokop, Mathias and Jacobs, Colin}, + booktitle = ECR, + title = {Deep learning for estimating pulmonary nodule malignancy risk using prior CT examinations in lung cancer screening}, + abstract = {Purpose or Learning Objective: Nodule size, morphology, and growth are important factors for accurately estimating nodule malignancy risk in lung cancer screening CT examinations. In this work, we aimed to develop a deep learning (DL) algorithm that uses a current and a prior CT examination to estimate the malignancy risk of pulmonary nodules. + + Methods or Background: We developed a dual time-point DL algorithm by stacking the nodules from the current and prior CT examinations in the input channels of convolutional neural networks. We used 3,011 nodules (286 malignant) and 994 nodules (73 malignant) as development and hold-out test cohorts from the National Lung Screening Trial, respectively. The reference standard was set by histopathologic confirmation or CT follow-up of more than two years. We compared the performance of the algorithm against PanCan model 2b and a previously published single time-point DL algorithm that only processed a single CT examination. We used the area under the receiver operating characteristic curve (AUC) to measure discrimination performance and a standard permutation test with 10,000 random permutations to compute p-values. + + Results or Findings: The dual time-point DL algorithm achieved an AUC of 0.94 (95% CI: 0.91 - 0.97) on the hold-out test cohort. The algorithm outperformed the single time-point DL algorithm and the PanCan model, which had AUCs of 0.92 (95% CI: 0.89 - 0.95; p = 0.055) and 0.88 (95% CI: 0.85 - 0.91; p < 0.001), respectively. + + Conclusion: Deep learning algorithms using current and prior CT examinations have the potential to accurately estimate the malignancy risk of pulmonary nodules. + + Limitations: External validation is needed on other screening datasets to generate further evidence. + + Ethics committee approval: Institutional review board approval was obtained at each of the 33 centers involved in the NLST. + + Funding for this study: Research grant from MeVis Medical Solutions AG.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@article{Venk23, + author = {Venkadesh, Kiran Vaidhya and Aleef, Tajwar Abrar and Scholten, Ernst T. and Saghir, Zaigham and Silva, Mario and Sverzellati, Nicola and Pastorino, Ugo and van Ginneken, Bram and Prokop, Mathias and Jacobs, Colin}, + title = {Prior CT Improves Deep Learning for Malignancy Risk Estimation of Screening-detected Pulmonary Nodules}, + doi = {10.1148/radiol.223308}, + url = {http://dx.doi.org/10.1148/radiol.223308}, + volume = {308}, + number = {2}, + algorithm = {https://grand-challenge.org/algorithms/temporal-nodule-analysis/}, + abstract = {Background + Prior chest CT provides valuable temporal information (eg, changes in nodule size or appearance) to accurately estimate malignancy risk. + + Purpose + To develop a deep learning (DL) algorithm that uses a current and prior low-dose CT examination to estimate 3-year malignancy risk of pulmonary nodules. + + Materials and Methods + In this retrospective study, the algorithm was trained using National Lung Screening Trial data (collected from 2002 to 2004), wherein patients were imaged at most 2 years apart, and evaluated with two external test sets from the Danish Lung Cancer Screening Trial (DLCST) and the Multicentric Italian Lung Detection Trial (MILD), collected in 2004-2010 and 2005-2014, respectively. Performance was evaluated using area under the receiver operating characteristic curve (AUC) on cancer-enriched subsets with size-matched benign nodules imaged 1 and 2 years apart from DLCST and MILD, respectively. The algorithm was compared with a validated DL algorithm that only processed a single CT examination and the Pan-Canadian Early Lung Cancer Detection Study (PanCan) model. + + Results + The training set included 10 508 nodules (422 malignant) in 4902 trial participants (mean age, 64 years +- 5 [SD]; 2778 men). The size-matched external test sets included 129 nodules (43 malignant) and 126 nodules (42 malignant). The algorithm achieved AUCs of 0.91 (95% CI: 0.85, 0.97) and 0.94 (95% CI: 0.89, 0.98). It significantly outperformed the DL algorithm that only processed a single CT examination (AUC, 0.85 [95% CI: 0.78, 0.92; P = .002]; and AUC, 0.89 [95% CI: 0.84, 0.95; P = .01]) and the PanCan model (AUC, 0.64 [95% CI: 0.53, 0.74; P < .001]; and AUC, 0.63 [95% CI: 0.52, 0.74; P < .001]). + + Conclusion + A DL algorithm using current and prior low-dose CT examinations was more effective at estimating 3-year malignancy risk of pulmonary nodules than established models that only use a single CT examination.}, + citation-count = {0}, + file = {Venk23.pdf:pdf\Venk23.pdf:PDF}, + journal = {Radiology}, + pages = {e223308}, + pmid = {37526548}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {f9dd350b17a4b0bce9beef6c372a375315361aad}, + all_ss_ids = {['f9dd350b17a4b0bce9beef6c372a375315361aad']}, + gscites = {1}, +} + +@conference{Vent20, + author = {de Vente, Coen and van Grinsven, Mark and De Zanet, Sandro and Mosinska, Agata and Sznitman, Raphael and Klaver, Caroline and S\'{a}nchez, Clara I.}, + booktitle = ARVO, + title = {Estimating Uncertainty of Deep Neural Networks for Age-related Macular Degeneration Grading using Optical Coherence Tomography}, + abstract = {Purpose: Deep convolutional neural networks (CNNs) are increasingly being used for eye disease screening and diagnosis. Especially the best performing variants, however, are generally overconfident in their predictions. For usefulness in clinical practice and increasing clinicians' trust on the estimated diagnosis, well-calibrated uncertainty estimates are necessary. We present a method for providing confidence scores of CNNs for age-related macular degeneration (AMD) grading in optical coherence tomography (OCT). + + + Methods: 1,264 OCT volumes from 633 patients from the European Genetic Database (EUGENDA) were graded as one of five stages of AMD (No AMD, Early AMD, Intermediate AMD, Advanced AMD: GA, and Advanced AMD: CNV). Ten different 3D DenseNet-121 models that take a full OCT volume as input were used to predict the corresponding AMD stage. These networks were all trained on the same dataset. However, each of these networks were initialized differently. The class with the maximum average softmax output of these models was used as the final prediction. The confidence measure was the normalized average softmax output for that class. + + Results: The algorithm achieved an area under the Receiver Operating Characteristic of 0.9785 and a quadratic-weighted kappa score of 0.8935. The mean uncertainty, calculated as 1 - the mean confidence score, for incorrect predictions was 1.9 times as high as the mean uncertainty for correct predictions. When only using the probability output of a single network, this ratio was 1.4. Another measure for uncertainty estimation performance is the Expected Calibration Error (ECE), where a lower value is better. When comparing the method to the probability output of a single network, the ECE improved from 0.0971 to 0.0324. Figure 1 shows examples of both confident and unconfident predictions. + + Conclusions: We present a method for improving uncertainty estimation for AMD grading in OCT, by combining the output of multiple individually trained CNNs. This increased reliability of system confidences can contribute to building trust in CNNs for retinal disease screening. Furthermore, this technique is a first step towards selective prediction in retinal disease screening, where only cases with high uncertainty predictions need to be referred for expert evaluation.}, + optnote = {DIAG, RADIOLOGY}, + year = {2020}, + month = {6}, + all_ss_ids = {fc12f80e0fe56243c26f628d311577507f34b39c}, + gscites = {2}, +} + +@article{Vent21, + author = {Coen de Vente and Luuk H. Boulogne and Kiran Vaidhya Venkadesh and Cheryl Sital and Nikolas Lessmann and Colin Jacobs and Clara I. S\'{a}nchez and Bram van Ginneken}, + title = {Automated COVID-19 Grading with Convolutional Neural Networks in Computed Tomography Scans: A Systematic Comparison}, + journal = TAI, + year = {2022}, + volume = {3}, + number = {2}, + pages = {129-138}, + pmid = {35582210}, + doi = {10.1109/TAI.2021.3115093}, + abstract = {Amidst the ongoing pandemic, the assessment of computed tomography (CT) images for COVID-19 presence can exceed the workload capacity of radiologists. Several studies addressed this issue by automating COVID-19 classification and grading from CT images with convolutional neural networks (CNNs). Many of these studies reported initial results of algorithms that were assembled from commonly used components. However, the choice of the components of these algorithms was often pragmatic rather than systematic and systems were not compared to each other across papers in a fair manner. We systematically investigated the effectiveness of using 3D CNNs instead of 2D CNNs for seven commonly used architectures, including DenseNet, Inception, and ResNet variants. For the architecture that performed best, we furthermore investigated the effect of initializing the network with pre-trained weights, providing automatically computed lesion maps as additional network input, and predicting a continuous instead of a categorical output. A 3D DenseNet-201 with these components achieved an area under the receiver operating characteristic curve (AUC) of 0.930 on our test set of 105 CT scans and an AUC of 0.919 on a publicly available set of 742 CT scans, a substantial improvement in comparison with a previously published 2D CNN. output contributed the least to improving the model performance. This paper provides insights into the performance benefits of various components for COVID-19 classification and grading systems. We have created a challenge on grand-challenge.org to allow for a fair comparison between the results of this and future research.}, + file = {Vent21.pdf:pdf\\Vent21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/251973}, + all_ss_ids = {['41f386d38567e38132525cad9bdc7da1ad6e8f1c', '441d6e8ba2cc2314d7d44425a158c25e27f9cb96']}, + gscites = {16}, +} + +@conference{Vent21a, + author = {de Vente, Coen and Gonz\'{a}lez-Gonzalo, Cristina and Thee, Eric F. and van Grinsven, Mark and Klaver, Caroline C.W. and S\'{a}nchez, Clara I.}, + booktitle = ARVO, + url = {https://iovs.arvojournals.org/article.aspx?articleid=2775505}, + title = {Making AI Transferable Across OCT Scanners from Different Vendors}, + abstract = {Purpose: Deep neural networks (DNNs) for optical coherence tomography (OCT) classification have been proven to work well on images from scanners that were used during training. However, since the appearance of OCT scans can differ greatly between vendors, these DNNs often fail when they are applied to scans from different manufacturers. We propose a DNN architecture for age-related macular degeneration (AMD) grading that maintains performance on OCTs from vendors not included during training. + + Methods: 2,598 and 680 Heidelberg Spectralis OCT scans from the European Genetic Database were used for development and testing, respectively. We tested transferability with 339 AMD-enriched Topcon OCTs from the Rotterdam Study. AMD severity classification was determined manually in accordance with the Cologne Image Reading Center and Laboratory and Rotterdam Classification, respectively. Classifications were harmonized for the evaluation of the DNNs. The proposed DNN considers each B-scan separately using a 2D ResNet-18, and internally combines the intermediate outputs related to each B-scan using a multiple instance learning approach. Even though the proposed DNN provides both B-scan level and OCT-volume level decisions, the architecture is trained end-to-end using only full volume gradings. This specific architecture makes our method robust to the variability of scanning protocols across vendors, as it is invariant to B-scan spacing. We compare this approach to a baseline that classifies the full OCT scan directly using a 3D ResNet-18. + + Results: The quadratic weighted kappa (QWK) for the baseline method dropped from 0.852 on the Heidelberg Spectralis dataset to 0.523 on the Topcon dataset. This QWK drop was smaller (p = 0.001) for our approach, which dropped from 0.849 to 0.717. The difference in area under the Receiver Operating Characteristic (AUC) drop was also smaller (p < 0.001) for our approach (0.969 to 0.906, -6.5%) than for the baseline method (0.971 to 0.806, -17.0%). + + Conclusions: We present a DNN for AMD classification on OCT scans that transfers well to scans from vendors that were not used for development. This alleviates the need for retraining on data from these scanner types, which is an expensive process in terms of data acquisition, model development, and human annotation time. Furthermore, this increases the applicability of AI for OCT classification in broader scopes than the settings in which they were developed.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, +} + +@article{Vent23, + author = {De Vente, Coen and Vermeer, Koenraad A. and Jaccard, Nicolas and Wang, He and Sun, Hongyi and Khader, Firas and Truhn, Daniel and Aimyshev, Temirgali and Zhanibekuly, Yerkebulan and Le, Tien-Dung and Galdran, Adrian and Ballester, Miguel \'{A}ngel Gonz\'{a}lez and Carneiro, Gustavo and Devika, R G and Hrishikesh, P S and Puthussery, Densen and Liu, Hong and Yang, Zekang and Kondo, Satoshi and Kasai, Satoshi and Wang, Edward and Durvasula, Ashritha and Heras, J\'{o}nathan and Zapata, Miguel \'{A}ngel and Ara\'{u}jo, Teresa and Aresta, Guilherme and Bogunovi\'{c}, Hrvoje and Arikan, Mustafa and Lee, Yeong Chan and Cho, Hyun Bin and Choi, Yoon Ho and Qayyum, Abdul and Razzak, Imran and Van Ginneken, Bram and Lemij, Hans G. and S\'{a}nchez, Clara I.}, + title = {AIROGS: Artificial Intelligence for RObust Glaucoma Screening Challenge}, + doi = {10.1109/tmi.2023.3313786}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1109/TMI.2023.3313786}, + file = {Vent23.pdf:pdf\Vent23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {IEEE Transactions on Medical Imaging}, + citation-count = {0}, + automatic = {yes}, + pages = {1-1}, + ss_id = {7981606bf8110ec6cc64baa22d694096f7862939}, + all_ss_ids = {['7981606bf8110ec6cc64baa22d694096f7862939']}, + gscites = {6}, +} + +@mastersthesis{Verb21, + author = {Jeroen Verboom}, + title = {Deep Learning for Fracture Detection in the Radius and Ulna on Conventional Radiographs}, + abstract = {This work gives a compartmentalized overview of a fracture detection tool to detect and localize fractures in the radius and ulna on conventional radiographs using deep learning. + This contrasts earlier studies in that it proposes a more efficient object detector, demonstrates the generalizability of fracture detection models to data from a different hospital, and employs more advanced class activation mapping methods for fracture localization. + Both RadboudUMC and the Jeroen Bosch Ziekenhuis provided data to create a multi-institutional dataset. + The two data sources enabled me to demonstrate how fracture detection classifiers trained on data from only one institution significantly perform less when tested on data from another institution. + Moreover, this study demonstrates a more efficient bone localization method that yields adequate performance to be used for cropping regions of interest, and a newer fracture localization method (ScoreCAM) that outperforms its predecessors in terms of highlighting less redundant information. + I conclude that the algorithms presented in this work show the potential to be incorporated in a clinically usable fracture detection tool. + However, more research needs to be conducted using multi-institutional for training fracture detection classifiers.}, + file = {Verb21.pdf:pdf/Verb21.pdf:PDF}, + optnote = {DIAG}, + school = {Tilburg University}, + year = {2021}, + journal = {Master thesis}, +} + +@inproceedings{Verm21, + author = {Vermazeren, Jeroen and van Eekelen, Leander and Meesters, Luca and Looijen-Salamon, Monika and Vos, Shoko and Munari, Enrico and Mercan, Caner and Ciompi, Francesco}, + booktitle = MIDL, + title = {muPEN: Multi-class PseudoEdgeNet for PD-L1 assessment}, + url = {https://openreview.net/forum?id=rHAiz2pnxkB}, + abstract = {In this paper, we take the recently presented PseudoEdgeNet model to the level of multi-class cell segmentation in histopathology images solely trained with point annotations. We tailor its loss function to the challenges of multi-class segmentation and equip it with an additional false positive loss term. We evaluate it on the assessment of tumor and immune cells in PD-L1 stained lung cancer histopathology images, and compare it with YOLOv5.}, + optnote = {DIAG, PATHOLOGY}, + year = {2021}, +} + +@article{Vers20, + author = {Versteegh, VE and Welvaart, WN and Oberink-Gustafsson, EEM and Lindenholz, A and Staaks, GHA and Schaefer-Prokop, CM}, + title = {Non-traumatic complications of a solitary rib osteochondroma; an unusual cause of hemoptysis and pneumothorax}, + doi = {10.1259/bjrcr.20200015}, + year = {2020}, + abstract = { Osteochondromas are a very common and usually asymptomatic entity which may originate anywhere in the appendicular and axial skeleton. However, the ribs are a rare site of origin and here they may prove symptomatic for mechanical reasons. In this case report, we describe an unusual case of a symptomatic osteochondroma of the rib secondary to its location and unique shape, ultimately requiring surgical intervention. }, + url = {http://dx.doi.org/10.1259/bjrcr.20200015}, + file = {Vers20.pdf:pdf\Vers20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {BJR|case reports}, + citation-count = {1}, + automatic = {yes}, + pages = {20200015}, + volume = {6}, +} + +@article{Veta18, + author = {Mitko Veta and Yujing J. Heng and Nikolas Stathonikos and Babak Ehteshami Bejnordi and Francisco Beca and Thomas Wollmann and Karl Rohr and Manan A. Shah and Dayong Wang and Mikael Rousson and Martin Hedlund and David Tellez and Francesco Ciompi and Erwan Zerhouni and David Lanyi and Matheus Viana and Vassili Kovalev and Vitali Liauchuk and Hady Ahmady Phoulady and Talha Qaiser and Simon Graham and Nasir Rajpoot and Erik Sjoblom and Jesper Molin and Kyunghyun Paeng and Sangheum Hwang and Sunggyun Park and Zhipeng Jia and Eric I-Chao Chang and Yan Xu and Andrew H. Beck and Paul J. van Diest and Josien P. W. Pluim}, + title = {Predicting breast tumor proliferation from whole-slide images: the {TUPAC16} challenge}, + journal = MIA, + year = {2019}, + volume = {54}, + number = {5}, + pages = {111-121}, + doi = {10.1016/j.media.2019.02.012}, + abstract = {Tumor proliferation is an important biomarker indicative of the prognosis of breast cancer patients. Assessment of tumor proliferation in a clinical setting is a highly subjective and labor-intensive task. Previous efforts to automate tumor proliferation assessment by image analysis only focused on mitosis detection in predefined tumor regions. However, in a real-world scenario, automatic mitosis detection should be performed in whole-slide images (WSIs) and an automatic method should be able to produce a tumor proliferation score given a WSI as input. To address this, we organized the TUmor Proliferation Assessment Challenge 2016 (TUPAC16) on prediction of tumor proliferation scores from WSIs. The challenge dataset consisted of 500 training and 321 testing breast cancer histopathology WSIs. In order to ensure fair and independent evaluation, only the ground truth for the training dataset was provided to the challenge participants. The first task of the challenge was to predict mitotic scores, i.e., to reproduce the manual method of assessing tumor proliferation by a pathologist. The second task was to predict the gene expression based PAM50 proliferation scores from the WSI. The best performing automatic method for the first task achieved a quadratic-weighted Cohen's kappa score of k = 0.567, 95% CI [0.464, 0.671] between the predicted scores and the ground truth. For the second task, the predictions of the top method had a Spearman's correlation coefficient of r = 0.617, 95% CI [0.581 0.651] with the ground truth. This was the first comparison study that investigated tumor proliferation assessment from WSIs. The achieved results are promising given the difficulty of the tasks and weakly-labeled nature of the ground truth. However, further research is needed to improve the practical utility of image analysis methods for this task.}, + file = {Veta18.pdf:pdf\\Veta18.pdf:PDF}, + optnote = {DIAG}, + pmid = {30861443}, + month = {5}, + gsid = {11612591462765985317}, + gscites = {205}, + ss_id = {c377304af802f1ea67c6d696806a69f516a4472f}, + all_ss_ids = {['c377304af802f1ea67c6d696806a69f516a4472f']}, +} + +@inproceedings{Vijv16, + author = {Koen Vijverberg and Mohsen Ghafoorian and Inge W M van Uden and Frank-Erik de Leeuw and Bram Platel and Tom Heskes}, + title = {A single-layer network unsupervised feature learning method for white matter hyperintensity segmentation}, + booktitle = MI, + year = {2016}, + series = SPIE, + doi = {10.1117/12.2216409}, + url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?articleid=2507199}, + abstract = {Cerebral small vessel disease (SVD) is a disorder frequently found among the old people and is associated with deterioration in cognitive performance, parkinsonism, motor and mood impairments. White matter hyperintensities (WMH) as well as lacunes, microbleeds and subcortical brain atrophy are part of the spectrum of image findings, related to SVD. Accurate segmentation of WMHs is important for prognosis and diagnosis of multiple neurological disorders such as MS and SVD. Almost all of the published (semi-)automated WMH detection models employ multiple complex hand-crafted features, which require in-depth domain knowledge. In this paper we propose to apply a single-layer network unsupervised feature learning (USFL) method to avoid hand-crafted features, but rather to automatically learn a more efficient set of features. Experimental results show that a computer aided detection system with a USFL system outperforms a hand-crafted approach. Moreover, since the two feature sets have complementary properties, a hybrid system that makes use of both hand-crafted and unsupervised learned features, shows a significant performance boost compared to each system separately, getting close to the performance of an independent human expert.}, + file = {:pdf\\Vijv16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {1469412038484421444}, + gscites = {3}, + ss_id = {259805d3206e98af32a66eaf8fc0dfe73f595c4d}, + all_ss_ids = {['259805d3206e98af32a66eaf8fc0dfe73f595c4d']}, +} + +@article{Vina22, + author = {Vinayahalingam, Shankeeth and van Nistelrooij, Niels and van Ginneken, Bram and Bressem, Keno and Troltzsch, Daniel and Heiland, Max and Flugge, Tabea and Gaudin, Robert}, + title = {Detection of mandibular fractures on panoramic radiographs using deep learning.}, + doi = {10.1038/s41598-022-23445-w}, + issue = {1}, + number = {1}, + pages = {19596}, + volume = {12}, + abstract = {Mandibular fractures are among the most frequent facial traumas in oral and maxillofacial surgery, accounting for 57% of cases. An accurate diagnosis and appropriate treatment plan are vital in achieving optimal re-establishment of occlusion, function and facial aesthetics. This study aims to detect mandibular fractures on panoramic radiographs (PR) automatically. 1624 PR with fractures were manually annotated and labelled as a reference. A deep learning approach based on Faster R-CNN and Swin-Transformer was trained and validated on 1640 PR with and without fractures. Subsequently, the trained algorithm was applied to a test set consisting of 149 PR with and 171 PR without fractures. The detection accuracy and the area-under-the-curve (AUC) were calculated. The proposed method achieved an F1 score of 0.947 and an AUC of 0.977. Deep learning-based assistance of clinicians may reduce the misdiagnosis and hence the severe complications.}, + file = {Vina22.pdf:pdf\\Vina22.pdf:PDF}, + journal = {Scientific reports}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36379971}, + publisher = {Springer Science and Business Media {LLC}}, + year = {2022}, + ss_id = {6df1dd59f69d05d00e0b41b3220781b614584362}, + all_ss_ids = {['6df1dd59f69d05d00e0b41b3220781b614584362']}, + gscites = {3}, +} + +@article{Vina23, + author = {Vinayahalingam, Shankeeth and Kempers, Steven and Schoep, Julian and Hsu, Tzu-Ming Harry and Moin, David Anssari and van Ginneken, Bram and Fl\"{u}gge, Tabea and Hanisch, Marcel and Xi, Tong}, + title = {Intra-oral scan segmentation using deep learning}, + doi = {10.1186/s12903-023-03362-8}, + url = {http://dx.doi.org/10.1186/s12903-023-03362-8}, + volume = {23}, + abstract = {Abstract + Objective + Intra-oral scans and gypsum cast scans (OS) are widely used in orthodontics, prosthetics, implantology, and orthognathic surgery to plan patient-specific treatments, which require teeth segmentations with high accuracy and resolution. Manual teeth segmentation, the gold standard up until now, is time-consuming, tedious, and observer-dependent. This study aims to develop an automated teeth segmentation and labeling system using deep learning. + + Material and methods + As a reference, 1750 OS were manually segmented and labeled. A deep-learning approach based on PointCNN and 3D U-net in combination with a rule-based heuristic algorithm and a combinatorial search algorithm was trained and validated on 1400 OS. Subsequently, the trained algorithm was applied to a test set consisting of 350 OS. The intersection over union (IoU), as a measure of accuracy, was calculated to quantify the degree of similarity between the annotated ground truth and the model predictions. + + Results + The model achieved accurate teeth segmentations with a mean IoU score of 0.915. The FDI labels of the teeth were predicted with a mean accuracy of 0.894. The optical inspection showed excellent position agreements between the automatically and manually segmented teeth components. Minor flaws were mostly seen at the edges. + + Conclusion + The proposed method forms a promising foundation for time-effective and observer-independent teeth segmentation and labeling on intra-oral scans. + + Clinical significance + Deep learning may assist clinicians in virtual treatment planning in orthodontics, prosthetics, implantology, and orthognathic surgery. The impact of using such models in clinical practice should be explored.}, + citation-count = {0}, + file = {Vina23.pdf:pdf\Vina23.pdf:PDF}, + journal = {BMC Oral Health}, + optnote = {DIAG, RADIOLOGY}, + pmid = {37670290}, + year = {2023}, +} + +@article{Vink88, + author = {P. Vink and N. Karssemeijer}, + title = {Low back muscle activity and pelvic rotation during walking}, + journal = AE, + year = {1988}, + volume = {178}, + pages = {455--460}, + doi = {10.1007/BF00306052}, + abstract = {Gait variables, pelvic rotations in the frontal and sagittal plane and RA-EMG (rectified and averaged EMG) of the three columns of the intrinsic lumbar back muscles (= ILBM) were recorded simultaneously during 48 succeeding strides of 11 subjects on a treadmill. Bilateral activity is found in all parts of the ILBM during the double support phase. After right heel strike the right ILBM (and after left heel strike the left) show in most cases more activity than the contralateral ILBM. This is especially so in the intermediate and lateral columns, which consist mainly of the longissimus thoracis and the iliocostalis lumborum muscle and less so in the medial column, made up mainly by the multifidus and spinalis muscle. This difference is probably due to the difference in moment arm for the two directions. Pelvic rotations are described, but no evident relationships between pelvic rotations in the different planes and ILBM-activity could be seen, probably because the changes in the position of the torso are of more importance.}, + file = {Vink88.pdf:pdf/Vink88.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {5}, + pmid = {3177896}, + month = {8}, + ss_id = {8f9a31c0e676328f5beb217a9d0a17773eb663ab}, + all_ss_ids = {['8f9a31c0e676328f5beb217a9d0a17773eb663ab']}, + gscites = {19}, +} + +@article{Viss12, + author = {Visser, Roelant and Veldkamp, Wouter J H. and Beijerinck, David and Bun, Petra A M. and Deurenberg, Jan J M. and Imhof-Tas, Mechli W. and Schuur, Klaas H. and Snoeren, Miranda M. and den Heeten, Gerard J. and Karssemeijer, Nico and Broeders, Mireille J M.}, + title = {Increase in perceived case suspiciousness due to local contrast optimisation in digital screening mammography}, + journal = ER, + year = {2012}, + volume = {22}, + pages = {908--914}, + doi = {10.1007/s00330-011-2320-2}, + abstract = {To determine the influence of local contrast optimisation on diagnostic accuracy and perceived suspiciousness of digital screening mammograms.Data were collected from a screening region in the Netherlands and consisted of 263 digital screening cases (153 recalled,110 normal). Each case was available twice, once processed with a tissue equalisation (TE) algorithm and once with local contrast optimisation (PV). All cases had digitised previous mammograms. For both algorithms, the probability of malignancy of each finding was scored independently by six screening radiologists. Perceived case suspiciousness was defined as the highest probability of malignancy of all findings of a radiologist within a case. Differences in diagnostic accuracy of the processing algorithms were analysed by comparing the areas under the receiver operating characteristic curves (A(z)). Differences in perceived case suspiciousness were analysed using sign tests.There was no significant difference in A(z) (TE: 0.909, PV 0.917, P = 0.46). For all radiologists, perceived case suspiciousness using PV was higher than using TE more often than vice versa (ratio: 1.14-2.12). This was significant (P <0.0083) for four radiologists.Optimisation of local contrast by image processing may increase perceived case suspiciousness, while diagnostic accuracy may remain similar.Variations among different image processing algorithms for digital screening mammography are large. Current algorithms still aim for optimal local contrast with a low dynamic range. Although optimisation of contrast may increase sensitivity, diagnostic accuracy is probably unchanged. Increased local contrast may render both normal and abnormal structures more conspicuous.}, + file = {Viss12.pdf:pdf\\Viss12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {4}, + pmid = {22071778}, + month = {11}, + gsid = {8488911132953566154}, + gscites = {19}, + ss_id = {cebcb3fcae6f033310f2b02000109d815f58ce4f}, + all_ss_ids = {['cebcb3fcae6f033310f2b02000109d815f58ce4f']}, +} + +@article{Vlie22, + author = {Rozemarijn Vliegenthart and Andreas Fouras and Colin Jacobs and Nickolas Papanikolaou}, + title = {Innovations in thoracic imaging: CT, radiomics, AI and x-ray velocimetry}, + journal = Respirology, + year = {2022}, + doi = {https://doi.org/10.1111/resp.14344}, + file = {Vlie22.pdf:pdf\\Vlie22.pdf:PDF}, + url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/resp.14344}, + abstract = {Abstract In recent years, pulmonary imaging has seen enormous progress, with the introduction, validation and implementation of new hardware and software. There is a general trend from mere visual evaluation of radiological images to quantification of abnormalities and biomarkers, and assessment of 'non visual' markers that contribute to establishing diagnosis or prognosis. Important catalysts to these developments in thoracic imaging include new indications (like computed tomography [CT] lung cancer screening) and the COVID-19 pandemic. This review focuses on developments in CT, radiomics, artificial intelligence (AI) and x-ray velocimetry for imaging of the lungs. Recent developments in CT include the potential for ultra-low-dose CT imaging for lung nodules, and the advent of a new generation of CT systems based on photon-counting detector technology. Radiomics has demonstrated potential towards predictive and prognostic tasks particularly in lung cancer, previously not achievable by visual inspection by radiologists, exploiting high dimensional patterns (mostly texture related) on medical imaging data. Deep learning technology has revolutionized the field of AI and as a result, performance of AI algorithms is approaching human performance for an increasing number of specific tasks. X-ray velocimetry integrates x-ray (fluoroscopic) imaging with unique image processing to produce quantitative four dimensional measurement of lung tissue motion, and accurate calculations of lung ventilation.}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35965430}, + volume = {27}, + number = {10}, + pages = {818--833}, + ss_id = {08c1b02b22c53480dcf792b8346a5ae416a7bc00}, + all_ss_ids = {['08c1b02b22c53480dcf792b8346a5ae416a7bc00']}, + gscites = {12}, +} + +@article{Voge05, + author = {W. V. Vogel and J. A. van Dalen and H. Huisman and W. J. G. Oyen and N. Karssemeijer}, + title = {Sliced alternating {DICOM} series: convenient visualisation of image fusion on {PACS}}, + journal = EJNMMI, + year = {2005}, + volume = {32}, + pages = {247--248}, + doi = {10.1007/s00259-004-1711-x}, + file = {Voge05.pdf:pdf\\Voge05.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TRACER}, + number = {2}, + pmid = {15690224}, + month = {11}, + gsid = {7276742739793309998}, + gscites = {6}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/47875}, + ss_id = {f215d3cfc9eca1506102addb38c3556bbfbcebfb}, + all_ss_ids = {['f215d3cfc9eca1506102addb38c3556bbfbcebfb']}, +} + +@article{Voge07, + author = {W. V. Vogel and J. A. van Dalen and B. Wiering and H. Huisman and F. H. M. Corstens and T. J. M. Ruers and W. J. G. Oyen}, + title = {Evaluation of image registration in {PET}/{CT} of the liver and recommendations for optimized imaging}, + journal = JNM, + year = {2007}, + volume = {48}, + pages = {910--919}, + doi = {10.2967/jnumed.107.041517}, + abstract = {{M}ultimodality {PET}/{CT} of the liver can be performed with an integrated (hybrid) {PET}/{CT} scanner or with software fusion of dedicated {PET} and {CT}. {A}ccurate anatomic correlation and good image quality of both modalities are important prerequisites, regardless of the applied method. {R}egistration accuracy is influenced by breathing motion differences on {PET} and {CT}, which may also have impact on (attenuation correction-related) artifacts, especially in the upper abdomen. {T}he impact of these issues was evaluated for both hybrid {PET}/{CT} and software fusion, focused on imaging of the liver. {METHODS}: {T}hirty patients underwent hybrid {PET}/{CT}, 20 with {CT} during expiration breath-hold ({EB}) and 10 with {CT} during free breathing ({FB}). {T}en additional patients underwent software fusion of dedicated {PET} and dedicated expiration breath-hold {CT} ({SF}). {T}he image registration accuracy was evaluated at the location of liver borders on {CT} and uncorrected {PET} images and at the location of liver lesions. {A}ttenuation-correction artifacts were evaluated by comparison of liver borders on uncorrected and attenuation-corrected {PET} images. {CT} images were evaluated for the presence of breathing artifacts. {RESULTS}: {I}n {EB}, 40\% of patients had an absolute registration error of the diaphragm in the craniocaudal direction of >1 cm (range, -16 to 44 mm), and 45\% of lesions were mispositioned >1 cm. {I}n 50\% of cases, attenuation-correction artifacts caused a deformation of the liver dome on {PET} of >1 cm. {P}oor compliance to breath-hold instructions caused {CT} artifacts in 55\% of cases. {I}n {FB}, 30\% had registration errors of >1 cm (range, -4 to 16 mm) and {PET} artifacts were less extensive, but all {CT} images had breathing artifacts. {A}s {SF} allows independent alignment of {PET} and {CT}, no registration errors or artifacts of >1 cm of the diaphragm occurred. {CONCLUSION}: {H}ybrid {PET}/{CT} of the liver may have significant registration errors and artifacts related to breathing motion. {T}he extent of these issues depends on the selected breathing protocol and the speed of the {CT} scanner. {N}o protocol or scanner can guarantee perfect image fusion. {O}n the basis of these findings, recommendations were formulated with regard to scanner requirements, breathing protocols, and reporting.}, + file = {Voge07.pdf:pdf\\Voge07.pdf:PDF}, + optnote = {DIAG, RADIOLOGY, TRACER}, + number = {6}, + pmid = {17504865}, + month = {6}, + gsid = {5484873770485079748}, + gscites = {45}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/51475}, + ss_id = {3abdd710e114c17d1575d5f3342cf6a941dc9264}, + all_ss_ids = {['3abdd710e114c17d1575d5f3342cf6a941dc9264']}, +} + +@inproceedings{Vos07, + author = {Vos, P.C. and Hambrock, T. and F\"utterer, J.J. and Hulsbergen-van De Kaa, C.A. and Barentsz, J.O. and Huisman, H.J.}, + title = {Effect of calibration on computerized analysis of prostate lesions using quantitative dynamic contrast-enhanced magnetic resonance imaging}, + booktitle = MICAD, + year = {2007}, + volume = {6514}, + series = SPIE, + publisher = {SPIE}, + optnote = {DIAG, RADIOLOGY}, + gsid = {13280836963282463277}, + gscites = {7}, + ss_id = {795bde2ea7cf38546ce966acc3899fbef2bdeeb9}, + all_ss_ids = {['795bde2ea7cf38546ce966acc3899fbef2bdeeb9']}, +} + +@article{Vos08, + author = {P. C. Vos and T. Hambrock and C. A. Hulsbergen-van de Kaa and J. J. F\"utterer and J. O. Barentsz and H. J. Huisman}, + title = {Computerized analysis of prostate lesions in the peripheral zone using dynamic contrast enhanced {MRI}}, + journal = MP, + year = {2008}, + volume = {35}, + pages = {888--899}, + doi = {10.1118/1.2836419}, + abstract = {{A} novel automated computerized scheme has been developed for determining a likelihood measure of malignancy for cancer suspicious regions in the prostate based on dynamic contrast-enhanced magnetic resonance imaging ({MRI}) ({DCE}-{MRI}) images. {O}ur database consisted of 34 consecutive patients with histologically proven adenocarcinoma in the peripheral zone of the prostate. {B}oth carcinoma and non-malignant tissue were annotated in consensus on {MR} images by a radiologist and a researcher using whole mount step-section histopathology as standard of reference. {T}he annotations were used as regions of interest ({ROI}s). {A} feature set comprising pharmacokinetic parameters and a {T}1 estimate was extracted from the {ROI}s to train a support vector machine as classifier. {T}he output of the classifier was used as a measure of likelihood of malignancy. {D}iagnostic performance of the scheme was evaluated using the area under the {ROC} curve. {T}he diagnostic accuracy obtained for differentiating prostate cancer from non-malignant disorders in the peripheral zone was 0.83 (0.75-0.92). {T}his suggests that it is feasible to develop a computer aided diagnosis system capable of characterizing prostate cancer in the peripheral zone based on {DCE}-{MRI}.}, + file = {Vos08.pdf:pdf\\Vos08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {3}, + pmid = {18404925}, + month = {2}, + gsid = {13212291345759536000}, + gscites = {104}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/71452}, + ss_id = {f83088af99113dbba4978eea89b86d853b6e6a4c}, + all_ss_ids = {['f83088af99113dbba4978eea89b86d853b6e6a4c']}, +} + +@inproceedings{Vos08a, + author = {Vos, P. C. and Hambrock, T. and Barentsz, J. O. and Huisman, H. J.}, + title = {Combining T2-weighted with dynamic {MR} images for computerized classification of prostate lesions}, + booktitle = MICAD, + year = {2008}, + volume = {6915}, + series = SPIE, + publisher = {SPIE}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {10763355599207768493}, + gscites = {12}, + ss_id = {4a7d40986eb610dcc4ad156951d40518370e8682}, + all_ss_ids = {['4a7d40986eb610dcc4ad156951d40518370e8682']}, +} + +@inproceedings{Vos09, + author = {P. C. Vos and T. Hambrock and J. O. Barentsz and H. J. Huisman}, + title = {Automated calibration for computerized analysis of prostate lesions using pharmacokinetic magnetic resonance images}, + booktitle = MICCAI, + year = {2009}, + volume = {12}, + series = LNCS, + pages = {836--843}, + abstract = {{T}he feasibility of an automated calibration method for estimating the arterial input function when calculating pharmacokinetic parameters from {D}ynamic {C}ontrast {E}nhanced {MRI} is shown. {I}n a previous study, it was demonstrated that the computer aided diagnoses ({CAD}x) system performs optimal when per patient calibration was used, but required manual annotation of reference tissue. {I}n this study we propose a fully automated segmentation method that tackles this limitation and tested the method with our {CAD}x system when discriminating prostate cancer from benign areas in the peripheral zone. {A} method was developed to automatically segment normal peripheral zone tissue ({PZ}). {C}ontext based segmentation using the {O}tsu histogram based threshold selection method and by {H}essian based blob detection, was developed to automatically select {PZ} as reference tissue for the per patient calibration. {I}n 38 consecutive patients carcinoma, benign and normal tissue were annotated on {MR} images by a radiologist and a researcher using whole mount step-section histopathology as standard of reference. {A} feature set comprising pharmacokinetic parameters was computed for each {ROI} and used to train a support vector machine ({SVM}) as classifier. {I}n total 42 malignant, 29 benign and 37 normal regions were annotated. {T}he diagnostic accuracy obtained for differentiating malignant from benign lesions using a conventional general patient plasma profile showed an accuracy of 0.65 (0.54-0.76). {U}sing the automated segmentation per patient calibration method the diagnostic value improved to 0.80 (0.71-0.88), whereas the manual segmentation per patient calibration showed a diagnostic performance of 0.80 (0.70-0.90). {T}hese results show that an automated per-patient calibration is feasible, a significant better discriminating performance compared to the conventional fixed calibration was obtained and the diagnostic accuracy is similar to using manual per-patient calibration.}, + file = {Vos09.pdf:pdf\\Vos09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {20426189}, + gsid = {16369511798735227224}, + gscites = {15}, + ss_id = {fb2365989a762a4e4bf749a126266d6a1cc1de68}, + all_ss_ids = {['fb2365989a762a4e4bf749a126266d6a1cc1de68']}, +} + +@article{Vos10, + author = {P. C. Vos and T. Hambrock and J. O. Barentsz and H. J. Huisman}, + title = {Computer-assisted analysis of peripheral zone prostate lesions using {T2-weighted} and dynamic contrast enhanced {T1-weighted MRI}}, + journal = PMB, + year = {2010}, + volume = {55}, + pages = {1719--1734}, + doi = {10.1088/0031-9155/55/6/012}, + abstract = {{I}n this study, computer-assisted analysis of prostate lesions was researched by combining information from two different magnetic resonance ({MR}) modalities: {T}2-weighted ({T}2-w) and dynamic contrast-enhanced ({DCE}) {T}1-w images. {T}wo issues arise when incorporating {T}2-w images in a computer-aided diagnosis ({CAD}x) system: {T}2-w values are position as well as sequence dependent and images can be misaligned due to patient movement during the acquisition. {A} method was developed that computes {T}2 estimates from a {T}2-w and proton density value and a known sequence model. {A} mutual information registration strategy was implemented to correct for patient movement. {G}lobal motion is modelled by an affine transformation, while local motion is described by a volume preserving non-rigid deformation based on {B}-splines. {T}he additional value to the discriminating performance of a {DCE} {T}1-w-based {CAD}x system was evaluated using bootstrapped {ROC} analysis. {T}2 estimates were successfully computed in 29 patients. {T}2 values were extracted and added to the {CAD}x system from 39 malignant, 19 benign and 29 normal annotated regions. {T}2 values alone achieved a diagnostic accuracy of 0.85 (0.77-0.92) and showed a significantly improved discriminating performance of 0.89 (0.81-0.95), when combined with {DCE} {T}1-w features. {I}n conclusion, the study demonstrated a simple {T}2 estimation method that has a diagnostic performance such that it complements a {DCE} {T}1-w-based {CAD}x system in discriminating malignant lesions from normal and benign regions. {A}dditionally, the {T}2 estimate is beneficial to visual inspection due to the removed coil profile and fixed window and level settings.}, + file = {Vos10.pdf:pdf\\Vos10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {20197602}, + month = {3}, + gsid = {8022713700082473019}, + gscites = {116}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/89494}, + ss_id = {c27e6795e38902084b5b4d113ff6fff3927c1873}, + all_ss_ids = {['c27e6795e38902084b5b4d113ff6fff3927c1873']}, +} + +@conference{Vos10a, + author = {P. C. Vos and J J F\"utterer and N. Karssemeijer and J O Barentszand H. Huisman}, + title = {Computer-assisted Diagnosis of Prostate Cancer with Multimodal{3T} {MR} Imaging}, + booktitle = RSNA, + year = {2010}, + abstract = {PURPOSE/AIM 1. Demonstrate our in-house developed clinical diagnostic workstation. 2. Present multi-parameter (T2-w, DWI, DCE-MRI and MRS) MRI data. 3. Demontrate prostate cancer interpretation with histopathologic correlation 4. Train the radiologist in using the CAD system. 5. Investigate the effect of CAD on the discriminative performance of the radiologist. CONTENT ORGANIZATION The workstation enables joint analysis of anatomic, functional, metabolic MR imaging, CAD and structured reporting. Training cases are presented to instruct the radiologist how to use the system, how to interpret MR images and provide correlation with whole-mount step-section histology tumor maps. The radiologist can diagnose a number of test cases and use CAD as a 2nd opinion. The discriminative performance of the radiologist is presented along with added value of CAD. SUMMARY The clinical value of Multi-Modal MR imaging for diagnosing prostate cancer is demonstrated to the radiologist. Certain imaging features (alone or in combination) MRI can help to differentiate prostate cancer from benign cases such as prostatitis or benign prostatic hyperplasia (BPH), but requires experience. The potential benefit of using CAD as a 2nd opinion is demonstrated in a clinical workflow setting.}, + optnote = {DIAG, RADIOLOGY}, +} + +@phdthesis{Vos11, + author = {Pieter Vos}, + title = {Computer Aided Diagnosis of Prostate Cancer with Magnetic Resonance Imaging}, + year = {2011}, + url = {http://repository.ubn.ru.nl/handle/2066/91387}, + abstract = {Prostate cancer is the most commonly diagnosed cancer among men and remains the second leading cause of cancer death in men. In 2010, more than 217,000 men in the United States (US) were diagnosed with prostate cancer . The American Cancer Society estimated that approximately 32,000 men died from the disease in the US in 2010. In Europe, more than 338,00 males were diagnosed with prostate cancer in 2008 and almost 71000 men died because of prostate cancer. The growth of the population and, more importantly, the aging population is a major cause of the high number of prostate cancer cases and will contribute to an increase in cancer burden. For that reason, there is a ongoing debate whether screening for prostate cancer should be performed. Screening can help find cancers in an early stage when they are more easily cured. An important trial to determine the effect of screening for breast cancer was performed between 1977 and 1984 in Sweden. The trial showed that after seven years of follow up a reduction of 31\% in breast cancer mortality was achieved when screening was applied. This led to the introduction of breast cancer screening in most western countries. Recently, several studies have been performed that looked at whether prostate cancer screening with the prostate-specific antigen (PSA) blood test saves lives. For example, the European Randomized Study of Screening for Prostate Cancer (ERSPC) has shown significant reductions in PCa mortality in an intention-to-screen analysis. The reduction in mortality comes, however, at the price of over-diagnosis and over-treatment. In the study of Schr\"oder et al. the authors specifically warn that, in order to prevent one death from prostate cancer, 1410 men would need to be screened and 48 additional cases of prostate cancer would need to be treated. Hence, controversy still exists regarding the effectiveness of prostate cancer screening. The ongoing debate is essentially a public demand for a more reliable, non-invasive method that has a sufficiently high specificity in detecting prostate cancer. Magnetic resonance imaging (MRI) has evolved this decade to a competitive imaging modality for the localization of prostate cancer. The non-invasive nature and ability to provide structural, functional and metabolic information in a single examination makes the technique suitable to improve specificity when screening for prostate cancer. Many studies showed that multiparametric MRI, consisting of high resolution 3D T2-weighted sequences, 3D dynamic contrast enhanced MRI, 3D diffusion weighted imaging or spectroscopic imaging, leads to a sufficiently high accuracy for prostate cancer detection. Unfortunately, multiparametric MRI analysis requires a high level of expertise, suffers from observer variability and is a labor intensive procedure. For that reason the technique is considered cost inefficient and, as a result, has not been implemented in a screening environment. Computer aided diagnosis (CAD) can be of benefit to improve the consistency and accuracy of interpreting radiographic images by the radiologist. Additionally, it can speed up the reading time considerably. CAD research has been successfully pursued in other diagnostic areas such as mammography, CT chest, CT colonography as well as retinal imaging. However, published literature on prostate CAD research is still relatively immature. The motivation of this thesis was therefore to research state of the art CAD methods that can assist in a better diagnosis of prostate cancer, reduce the observer variability and be of benefit to a more efficient workflow for the radiologist.}, + copromotor = {H. Huisman}, + file = {Vos11.pdf:pdf/Vos11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {N. Karssemeijer and J. O. Barentsz}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@conference{Vos12, + author = {Vos, Eline and Litjens, G. and Kobus, Thiele and Hambrock, Thomas and Hulsbergen van de Kaa, Christina and Huisman, Henkjan and Scheenen, Tom}, + title = {Dynamic contrast enhanced {MR} imaging for the assessment of prostate cancer aggressiveness at 3{T}}, + booktitle = ISMRM, + year = {2012}, + file = {Vos12.pdf:pdf\\Vos12.pdf:PDF}, + optnote = {BioMR, DIAG, RADIOLOGY}, +} + +@article{Vos12a, + author = {Vos, P. C. and Barentsz, J. O. and Karssemeijer, N. and Huisman, H. J.}, + title = {Automatic computer-aided detection of prostate cancer based on multiparametric magnetic resonance image analysis}, + journal = PMB, + year = {2012}, + volume = {57}, + pages = {1527--1542}, + doi = {10.1088/0031-9155/57/6/1527}, + abstract = {In this paper, a fully automatic computer-aided detection (CAD) method is proposed for the detection of prostate cancer. The CAD method consists of multiple sequential steps in order to detect locations that are suspicious for prostate cancer. In the initial stage, a voxel classification is performed using a Hessian-based blob detection algorithm at multiple scales on an apparent diffusion coefficient map. Next, a parametric multi-object segmentation method is applied and the resulting segmentation is used as a mask to restrict the candidate detection to the prostate. The remaining candidates are characterized by performing histogram analysis on multiparametric MR images. The resulting feature set is summarized into a malignancy likelihood by a supervised classifier in a two-stage classification approach. The detection performance for prostate cancer was tested on a screening population of 200 consecutive patients and evaluated using the free response operating characteristic methodology. The results show that the CAD method obtained sensitivities of 0.41, 0.65 and 0.74 at false positive (FP) levels of 1, 3 and 5 per patient, respectively. In conclusion, this study showed that it is feasible to automatically detect prostate cancer at a FP rate lower than systematic biopsy. The CAD method may assist the radiologist to detect prostate cancer locations and could potentially guide biopsy towards the most aggressive part of the tumour.}, + file = {Vos12a.pdf:pdf\\Vos12a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {6}, + pmid = {22391091}, + month = {3}, + gsid = {13455779673289424103}, + gscites = {119}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/110343}, + ss_id = {aa4b53329efa2fa1bc2aae5ab34baa945945125b}, + all_ss_ids = {['aa4b53329efa2fa1bc2aae5ab34baa945945125b']}, +} + +@article{Vos13, + author = {Vos, Eline K and Litjens, G. and Kobus, Thiele and Hambrock, Thomas and Kaa, Christina A and Barentsz, Jelle O and Huisman, Henkjan and Scheenen, Tom WJ}, + title = {Assessment of Prostate Cancer Aggressiveness Using Dynamic Contrast-enhanced Magnetic Resonance Imaging at {3T}}, + journal = EU, + year = {2013}, + volume = {64}, + pages = {448-455}, + url = {https://www.sciencedirect.com/science/article/pii/S0302283813005137?via%3Dihub}, + file = {Vos13.pdf:pdf\\Vos13.pdf:PDF}, + optnote = {BioMR, DIAG, RADIOLOGY}, + pmid = {23751135}, + month = {9}, + gsid = {115460116805624360}, + gscites = {162}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/117989}, + ss_id = {7c673c88257a140be530d9f6503e4737c06d8a7a}, + all_ss_ids = {['7c673c88257a140be530d9f6503e4737c06d8a7a']}, +} + +@conference{Vos14c, + author = {Eline Vos and Thiele Kobus and Geert Litjens and Thomas Hambrock and Christina Hulsbergen - van de Kaa and Marnix Maas and Tom Scheenen}, + title = {Multiparametric {MR} imaging for the assessment of prostate cancer aggressiveness at 3 {T}esla}, + booktitle = ISMRM, + year = {2014}, + optnote = {BioMR, DIAG, RADIOLOGY}, +} + +@article{Vos15, + author = {Vos, Eline K. and Kobus, Thiele and Litjens, Geert J S. and Hambrock, Thomas and Hulsbergen-van de Kaa, Christina A. and Barentsz, Jelle O. and Maas, Marnix C. and Scheenen, Tom W J.}, + title = {Multiparametric Magnetic Resonance Imaging for Discriminating Low-Grade From High-Grade Prostate Cancer}, + volume = {50}, + pages = {490-497}, + doi = {10.1097/RLI.0000000000000157}, + abstract = {The aim of this study was to determine and validate the optimal combination of parameters derived from 3-T diffusion-weighted imaging, dynamic contrast-enhanced imaging, and magnetic resonance (MR) spectroscopic imaging for discriminating low-grade from high-grade prostate cancer (PCa).The study was approved by the institutional review board, and the need for informed consent was waived. Ninety-four patients with PCa who had undergone multiparametric MR imaging (MRI) before prostatectomy were included. Cancer was indicated on T2-weighted images, blinded to any functional data, with prostatectomy specimens as the reference standard. Tumors were classified as low grade or high grade based on Gleason score; peripheral zone (PZ) and transition zone (TZ) tumors were analyzed separately. In a development set (43 patients), the optimal combination of multiparametric MRI parameters was determined using logistic regression modeling. Subsequently, this combination was evaluated in a separate validation set (51 patients).In the PZ, the 25th percentile of apparent diffusion coefficient (ADC) derived from diffusion-weighted imaging and washout (WO25) derived from dynamic contrast-enhanced MRI offered the optimal combination of parameters. In the TZ, WO25 and the choline over spermine + creatine ratio (C/SC) derived from MR spectroscopic imaging showed the highest discriminating performance. Using the models built with the development set, 48 (74\%) of 65 cancer lesions were classified correctly in the validation set.Multiparametric MRI is a useful tool for the discrimination between low-grade and high-grade PCa and performs better than any individual functional parameter in both the PZ and TZ. The 25th percentile of ADC + WO25 offered the optimal combination in the PZ, and the choline over spermine + creatine ratio + WO25 offered the optimal combination in the TZ. The ADC parameter has no additional value for the assessment of PCa aggressiveness in the TZ.}, + file = {Vos15.pdf:pdf\\Vos15.pdf:PDF}, + journal = IR, + month = {Aug.}, + optnote = {ProstateCancer, RADIOLOGY}, + pmid = {25867656}, + year = {2015}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/153448}, +} + +@article{Vos19, + author = {de Vos, Bob D. and Wolterink, Jelmer M. and Leiner, Tim and de Jong, Pim A. and Lessmann, Nikolas and Isgum, Ivana}, + title = {Direct automatic coronary calcium scoring in cardiac and chest {CT}}, + journal = TMI, + year = {2019}, + volume = {38}, + pages = {2127--38}, + doi = {10.1109/TMI.2019.2899534}, + optnote = {DIAG, RADIOLOGY}, + file = {Vos19.pdf:pdf\\Vos19.pdf:PDF}, + ss_id = {825003ce25765c8b21815246ebf739eafcce5621}, + all_ss_ids = {['825003ce25765c8b21815246ebf739eafcce5621']}, + gscites = {72}, +} + +@article{Vos21, + author = {de Vos, Bob D. and Lessmann, Nikolas and de Jong, Pim A. and Isgum, Ivana}, + title = {Deep Learning-Quantified Calcium Scores for Automatic Cardiovascular Mortality Prediction at Lung Screening Low-Dose CT}, + journal = {Radiology: Cardiothoracic Imaging}, + volume = {3}, + number = {2}, + pages = {e190219}, + year = {2021}, + doi = {10.1148/ryct.2021190219}, + pmid = {33969304}, + abstract = {Purpose: To examine the prognostic value of location-specific arterial calcification quantities at lung screening low-dose CT for the prediction of cardiovascular disease (CVD) mortality. Materials and Methods: This retrospective study included 5564 participants who underwent low-dose CT from the National Lung Screening Trial between August 2002 and April 2004, who were followed until December 2009. A deep learning network was trained to quantify six types of vascular calcification: thoracic aorta calcification (TAC); aortic and mitral valve calcification; and coronary artery calcification (CAC) of the left main, the left anterior descending, and the right coronary artery. TAC and CAC were determined in six evenly distributed slabs spatially aligned among chest CT images. CVD mortality prediction was performed with multivariable logistic regression using least absolute shrinkage and selection operator. The methods were compared with semiautomatic baseline prediction using self-reported participant characteristics, such as age, history of smoking, and history of illness. Statistical significance between the prediction models was tested using the nonparametric DeLong test. Results: The prediction model was trained with data from 4451 participants (median age, 61 years; 37.9\% women) and then tested on data from 1113 participants (median age, 61 years; 37.9\% women). The prediction model using calcium scores achieved a C statistic of 0.74 (95\% CI: 0.69, 0.79), and it outperformed the baseline model using only participant characteristics (C statistic, 0.69; P = .049). Best results were obtained when combining all variables (C statistic, 0.76; P < .001). Conclusion: Five-year CVD mortality prediction using automatically extracted image-based features is feasible at lung screening low-dose CT.}, + file = {Vos21.pdf:pdf\\Vos21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/235733}, + ss_id = {f29eb9082f4fd1b632eeea5f668671e6ca685d29}, + all_ss_ids = {['f29eb9082f4fd1b632eeea5f668671e6ca685d29']}, + gscites = {5}, +} + +@conference{Vree15, + author = {S. Vreemann and A. Gubern-M\'{e}rida and S. Lardenoije and B. Platel and N. Karssemeijer and R.M. Mann}, + title = {A critical audit of a breast {MRI} screening programme for intermediate and high risk patients in clinical practice}, + booktitle = ECR, + year = {2015}, + abstract = {Purpose: Breast MRI is used to screen for breast cancer in women at elevated risk. However, contrary to common practice in mammography screening, no structural quality assessment of MRI screening is performed. The purpose of this audit was to evaluate the visibility of MR screen-detected cancers on prior MR examinations. Methods and Materials: Patient files from all women who were diagnosed with breast cancer after a positive MRI screen between 2003 and 201 3 were reviewed. We selected all cases where a previous MR examination was performed between 6 and 24 months before detection. This yielded 42 cancers (3 lobular-, 38 ductal carcinomas, 1 secretory carcinoma) in 40 women (1 bilateral, mean age 50 years A,A+- 9.7 years). The diagnostic and prior MRI studies were evaluated side-by-side in consensus by two dedicated breast radiologists. The visibility of the cancers on the prior scans was rated as: Visible (BIRADS 4/5), Minimal sign (BIRADS 2/3), and Occult (BIRADS 1). Results: The mean interval between the prior and diagnostic MRI was 1 1 .9 months A,A+- 3.3 months. All 42 lesions were identified on the diagnostic scans. Eleven lesions (26%) were visible in the prior MRI and should have already been recalled. Thirteen lesions (31 %) showed a minimal sign in the prior MRI. Only 18 lesions (43%) were completely occult. Conclusion: In 26% of cases, evident lesions were missed in prior MRI screening exams. To prevent these errors structural double reading of MR screening examinations may be indicated.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Vree15a, + author = {Vreemann, S and Gubern-M\'{e}rida, A. and Lardenoije, S. and Platel, B. and Karssemeijer, N and Mann, R.}, + title = {Breast cancers not detected by MRI in a high and intermediate risk screening program}, + booktitle = RSNA, + year = {2015}, + abstract = {PURPOSE + The purpose of this study was to evaluate the visibility of MR screen detected cancers on prior MR examinations in a population with an elevated risk for breast cancer. + + METHOD AND MATERIALS + An IRB approved, retrospective review of patient files from women screened with breast MRI between 2003 and 2013 was conducted at our academic center. We selected all cases detected in MRI with a prior negative MR examination performed between 6 and 24 months before a cancer was revealed (mean: 12.8 A-A?A 1/2 3.7 months). This yielded 43 cancers (3 invasive lobular-, 33 invasive ductal carcinomas, 5 ductal carcinoma in situ and 2 others) in 41 patients (age: 49 A-A?A 1/2 9.8 years, 21 BRCA patients). The MR scans where the cancers were detected (diagnostic MR scan) and the prior MR scans were evaluated side-by-side in consensus by two dedicated breast radiologists. The visibility of the cancers on prior scans was rated as: visible (BIRADS 4/5), minimal sign (BIRADS 2/3), or invisible (BIRADS 1). Chi-square tests were used to test the correlation between patient and cancer characteristics, image quality (IQ), background parenchymal enhancement (BPE), and visibility of the tumor in the prior MR scan. + + RESULTS + All lesions were retrospectively evident on the diagnostic MR scan. Review of the prior examinations of the 43 cancers detected in follow-up rounds revealed that 11 lesions (26%) were visible in the prior MRI and should have been recalled at the time of this scan. 15 lesions (35%) showed a minimal sign in the prior MRI. Only 17 lesions (40%) were completely invisible. High grade, ER negative, and PR negative tumors were more often invisible in the prior scan (p=0.016, p=0.005, and p=0.002). Moreover, tumors in BRCA patients were more likely to be invisible in the prior scan, than in non-BRCA carriers (p=0.025). IQ and BPE were not significantly related to the visibility of tumors in the prior scan. + + CONCLUSION + About 26% of the breast cancers could have been recalled earlier and only 40% of the breast cancers were invisible in retrospect. + + CLINICAL RELEVANCE/APPLICATION + To prevent screening errors regular auditing of clinical practice is indicated. Moreover, like in mammography, structural double reading of MRI screening examinations may be recommended.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Vree15b, + author = {Vreemann, S and Gubern-M\'{e}rida, A. and Lardenoije, S. and Platel, B. and Karssemeijer, N and Mann, R.}, + title = {Longitudinal results of a breast MRI screening program for patients at high and intermediate risk; does BRCA status matter?}, + booktitle = RSNA, + year = {2015}, + abstract = {PURPOSE + Breast cancer screening in women at elevated risk is performed with yearly MRI and mammography. This includes women with BRCA mutations and women at elevated risk for other causes (mainly family history). The purpose of this study was to assess differences between BRCA mutation carriers and non-BRCA patients in a longitudinal MRI screening program in terms of recall rate, positive predictive value, and detection. + + METHOD AND MATERIALS + An IRB approved, retrospective review of patient files from women screened with breast MRI between 2003 and 2013 was performed at our academic center. We analysed 9.504 screening MR examinations in 2843 women (age: 45 A-A?A 1/2 12.09 years), including 761 BRCA patients, and 2082 non-BRCA patients. Recall rate (RR), positive predictive value (PPV), and cancer detection rate (CDR) were evaluated for first round examinations and follow-up examinations separately. BRCA patients were compared with non-BRCA patients. Chi-square tests were used to determine statistical significance. + + RESULTS + The RR for BRCA patients in the first round of screening was 86.07 per 1000 examinations and 52.58 per 1000 examinations in non-BRCA patients (p<0.001). The PPV for BRCA patients in the first round of screening was found to be 0.44, compared to 0.50 in non-BRCA patients (p=0.013). The CDR was 38.25 per 1000 examinations for BRCA patients and 26.53 per 1000 examinations for non-BRCA patients (p<0.001). In follow up, the RR was found to be 24.92 per 1000 examinations for BRCA patients and 22.81 per 1000 examinations for non-BRCA patients (p<0.001). The PPV was 0.46 for BRCA patients and 0.21 for non-BRCA patients (p<0.001). CDR was 11.42 per 1000 examinations for BRCA patients and 4.86 per 1000 examinations for non-BRCA patients (p<0.001). + + CONCLUSION + RR and CDR are high for all patients in the first round. RR and CDR significantly decreased in follow-up rounds (p<0.001). PPV remained at an acceptable level for both patient groups, and remains particularly high in BRCA carriers. RR, PPV, and CDR differed significantly between BRCA and non-BRCA patients in both first and follow up rounds. + + CLINICAL RELEVANCE/APPLICATION + These results underline that MRI is an excellent tool for screening high risk patients. Cancer detection is very high in the first round in all patients, but remains high only in BRCA carriers in follow up rounds.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Vree16, + author = {Vreemann, S and Gubern-M\'{e}rida, A. and Lardenoije, S. and Karssemeijer, N and Mann, R.M.}, + title = {The performance of MRI screening in the detection of breast cancer in an intermediate and high risk screening program}, + booktitle = ISMRM, + year = {2016}, + abstract = {Synopsis: Women at increased risk for breast cancer require annual mammography and MRI. The purpose of this study is to evaluate cancers detected in MRI screening and assess the visibility on prior MRI-examinations. MRI-scans of breast cancers detected in our MRI screening program were re-evaluated and lesions on the diagnostic MRI and prior MRI were scored according to Breast Imaging Reporting and Data (BI-RADS) MR-lexicon. The visibility of the lesions on the prior MRI was rated as visible, minimal sign and invisible. Our results show that almost one third of the breast cancers should have been recalled based on consensus review. + Purpose: + Breast cancer is a main cause of cancer death, especially in women at increased risk for breast cancer. This risk is defined as a cumulative lifetime risk of more than 20%, and can be as high as 57% at the age of 70 in BRCA1-carriers.1 Screening with only mammography is insufficient in these women and supplemental annual breast MRI is currently required.2 In mammography screening it is regular practice to evaluate breast cancers detected in incident screening rounds and cancers detected in between screening round (interval cancers), and assess whether these cancers could have been detected earlier.3,4 This is rare for MRI screening. The purpose of this study is to evaluate breast cancers detected in an intermediate and high risk screening program, and assess the visibility of these cancers on prior MRI examinations. To detect possible causes for non-recall, we investigated imaging, patient, and cancer characteristics. + Methods: + This retrospective study was approved by our local institutional board and the requirement for informed consent was waived. We collected all breast MRI screening examinations in the period from January 2003 - January 2014. To find all malignant lesions in this population, corresponding patient files were linked to the Netherlands Cancer Registry (NCR). For each patient with breast cancer detected on an MRI-screen or interval cancer, we identified whether another MRI-screen in 6 - 24 months before cancer detection was available (prior MRI). These MRI-scans were re-evaluated together with the MRI-scan in which cancer was detected (diagnostic MRI) in consensus by two radiologists with 8 and 12 years experience. The review was performed on an in-house developed breast MRI workstation, which provided T1-weighted images for all time points for both current and prior DCE-MRI, subtraction images, and their maximum intensity projection. Images were motion corrected using an algorithm described in Gubern-MA-A?A 1/2 rida et al.5 No T2-weighted images or diffusion-weighted images were shown. On the diagnostic MRI morphological and enhancement characteristics of the known cancer were scored according to the Breast Imaging Reporting and Data (BI-RADS) MR-lexicon.6 In addition, background parenchymal enhancement (BPE) was scored as minimal (<25%), mild (25-50%), moderate (50-75%) or marked (>75%), and image quality (IQ) was scored as perfect, sufficient or bad. Thereafter, the prior MRI was analyzed. The visibility of the lesion, previously identified in the diagnostic MRI, was rated as visible (BI-RADS 4/5), minimally visible (BI-RADS 2/3), or invisible (BI-RADS 1) (Fig.1). In lesions classified as visible or minimally visible morphology and enhancement characteristics were scored. Pearsons chi-square tests were used to test if imaging, patient, and cancer characteristics affect the visibility of the tumor on the prior MRI. Statistics were performed in SPSS. + Results: + From January 2003 - January 2014, 10120 MRI-examinations were performed in 2797 women, including 807 BRCA-mutation carriers. In total, 153 cancers were found. For 69 screen-detected tumors a prior MRI was available (36 tumors in patients with a BRCA mutation). In retrospect, 20 (29%) tumors were visible on the prior MRI, 26 (38%) showed a minimal sign, and 23 (33%) were invisible. Furthermore, prior MRIs were also available for 12 interval cancers (6 tumors in patients with a BRCA mutation); 3 (25%) were visible, 4 (33%) showed a minimal sign, and 5 (33%) were not visible on the prior MRI. Tumors in BRCA patients, small tumors, tumors of high grade and hormone-negative tumors were more likely to be invisible on the prior MRI (p<0.001, p=0.039, p<0.001, p<0.001, respectively). The lack of detection of lesions scored as visible on the prior MRI was not related to BPE or IQ. + Discussion: + A successful MRI screening program is based on the balance between the early detection and the false positive findings that result in unnecessary biopsies and anxiety. This might explain why not all visible lesions get recalled. However, in our study we show that almost one third of cancers were already visible on the prior MRI scan in retrospect and should have been recalled according to our consensus review. This fraction was similar for screen detected and interval cancers. A possible reason for the non-recall could be that the visible lesions were already present at an earlier time point and were regarded stable over time. Non-recall was not related to BPE or IQ. + Conclusion: + It was seen that 28% of breast cancers should have been recalled earlier based on consensus review. Only 35% was completely invisible in retrospect. This indicates that even highly specialized breast cancer screening programs can still be improved and that regular evaluation of screening practice is essential. + References: + 1.S. Chen et al. Meta-analysis of BRCA1 and BRCA2 penetrance. JCO (2007), 25(11):1329-33 + 2.D. Saslow et al. American Cancer Society guidelines for breast screening with MRI as an adjunct to mammography. CA (2007), 57:75-89 + 3.D.M. Ikeda et al. Analysis of 172 subtle findings on prior normal mammograms in women with breast cancer detected at follow-up screening. Radiology (2003), 226(2):494-503 + 4.A.J. Maxwell et al. A study of breast cancers detected in the incident round of the UK NHS Breast Screening Programme: the importance of early detection and treatment of ductal carcinoma in situ. Breast (2001), 10(5):392-8 + 5.A. Gubern-Merida et al. Automated localization of breast cancer in DCE-MRI. Med Imag Anal (2015),20(1):265-74 + 6.BI-RADS Atlas, 5th ed 2013 + + Acknowledgements: European Unions 7FP (Grant 601040)}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Vree16a, + author = {Vreemann, S and Gubern-M\'{e}rida, A. and Lardenoije, S. and Karssemeijer, N and Mann, R.}, + title = {Differences between cancers detected in prophylactic mastectomy specimen, screen detected cancers and true interval cancers in women participating in an intermediate and high risk screening program}, + booktitle = EBCC, + year = {2016}, + abstract = {Background: Intensive screening with annual mammography and MRI is offered to women at high risk for the development of breast cancer. Although most cancers are screen detected, screening does not prevent breast cancers from occurring and some are still detected between screening rounds (true interval cancers). Consequently, some women opt for prophylactic mastectomy rather than intensive screening since this reduces the incidence of breast cancer. Unfortunately, detection of cancer in a prophylactic mastectomy specimen (incident cancers) is not a rare occurrence. It is unsure whether these cancers should be considered as interval cancers. This study evaluates the prognostic factors of cancers stratified by the mode of tumor detection in these women. + Material and methods: Review of our intermediate and high risk screening program from 2003 to 2013 identified 177 cancers. Of these, 136 were detected in screening, 15 cancers were true interval carcinomas detected due to symptoms, and 26 cancers were detected in prophylactic mastectomy specimens. Patient- and cancer characteristics (invasive versus in-situ disease, grade, pT-stage, age, menopausal state, cancer receptor status and pN-stage) between these three groups were compared using a Pearson's chi-square test for categorical variables or one-way ANOVA for continuous variables. + Results: The fraction of invasive disease was 8/26 (30.8%), 109/136 (80.1%) and 15/15 (100%) for cancers in prophylactic mastectomy specimens, screen detected cancers and interval cancers, respectively (p<0.001). The fraction of cancers larger than two centimeters was 1/26 (3.8%), 24/136 (17.6%) and 3/15 (20.0%), respectively. A similar increase was observed for the overall pT-stage (p<0.001). Moreover, tumor grade was higher in true interval cancers than in cancers detected in prophylactic mastectomy specimens (p=0.001). Most cancers were node negative (p=0.233). There were no significant differences in patient age, menopausal state, cancer receptor status, and pN-stage between true interval cancers and prophylactic mastectomy specimens. + Conclusions: True interval cancers are more often invasive, generally larger, and commonly of higher grade than screen detected cancers or cancers in prophylactic mastectomy specimens. The prognosis of cancers detected in prophylactic mastectomy specimens is particularly good as most of these lesions are in situ cancers only. Therefore, these incident cancers should not be regarded as interval cancers.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Vree16b, + author = {Vreemann, S and Gubern-M\'{e}rida, A. and Lardenoije, S. and Karssemeijer, N and Mann, R.}, + title = {Prognostic factors of interval carcinomas occurring in an intermediate and high risk breast cancer screening program}, + booktitle = ECR, + year = {2016}, + abstract = {Purpose: Women at increased risk for breast cancer are screened with annual MRI and mammography. However, + despite this intensive surveillance interval cancers still occur. The purpose of this study is to evaluate prognostic + factors of interval carcinomas and compare these to prognostic factors of screen detected cancers. + Methods and Materials: In a review of our intermediate and high risk screening program from 2003 to 2013, 170 + cancers in 159 women were identified. Of these, 14 cancers were true interval carcinomas presenting with symptoms, + and 132 were detected in screening. TwentyA-A?A 1/2 four further cancers were detected in prophylactic mastectomy + specimens, and were excluded from this study. PatientA-A?A 1/2 and cancer characteristics of screen detected cancers and + interval cancers were compared using a Pearson's chiA-A?A 1/2 squared test for categorical variables and a Student's tA-A?A 1/2 test for + continuous variables. + Results: Interval cancers occurred in younger patients (p=0.001), had a higher pTA-A?A 1/2 stage (p=0.046), and were more + often ERA-A?A 1/2 negative and PRA-A?A 1/2 negative (p=0.002, and p=0.002, respectively). Tumor grade appeared worse in interval + carcinomas and were more often invasive, but this did not reach statistical significance (p=0.062, and p=0.063, + respectively). HER2A-A?A 1/2 status was not significantly different. Fortunately, no difference was observed in pNA-A?A 1/2 stage or + presence of metastatic disease. + Conclusion: Interval cancers occurring in women participating in intensive surveillance programs are of more + aggressive nature than screen detected cancers. However, our results suggest that interval cancers are detected when + the disease is local. This still results in a relatively good prognosis for patients with interval cancer.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Vree16c, + author = {Vreemann, S and Gubern-M\'{e}rida, A. and Borelli, C. and Karssemeijer, N and Mann, R.}, + title = {Background Parenchymal Enhancement as a predictor of breast cancer grade: a pilot study}, + booktitle = ECR, + year = {2016}, + abstract = {Purpose: Breast MRI background parenchymal enhancement (BPE) has been identified as a risk factor for breast + cancer and has been associated to certain tumor characteristics. However, it is not known whether its presence is + related to tumor aggressiveness in high risk screening patients. The purpose of this study is to evaluate this + association between BPE and tumor grade in high risk screen detected breast cancers. + Methods and Materials: Review of our intermediate and high risk screening program from 2003A-A?A 1/2 2013 identified MRIscans + of 80 cancers in 79 patients (48A-A?A 1/2 9.8 years) with biopsy proven unilateral cancer and no previous breast cancer. + The level of BPE in the contralateral breast was scored as minimal, mild, moderate, and marked by two readers (one + 5th year resident (R1) and one experienced radiologist (R2)). Odds ratios (OR) were calculated for grade in relation to + BPE. Observer variability was computed using kappa statistics. + Results: A significant association was found between tumor grade and level of BPE in the contralateral breast for both + readers (the OR for high grade tumor was 0.394 (p=0.007) for R1 and 0.310 (p=0.002) for R2). After adjusting for + significant factors, the OR for high grade cancers was 0.924 for R1 and 2.066 for R2. Kappa value for BPE + assessment between readers was K=0.592. + Conclusion: Lower BPE might be associated to higher tumor grade, when only evaluating BPE. However, our results + suggest that other factors play a major role in this association. This limits the usefulness of BPE as a parameter for + therapy stratification.}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Vree17, + author = {Vreemann, Suzan and Rodriguez-Ruiz, Alejandro and Nickel, Dominik and Heacock, Laura and Appelman, Linda and van Zelst, Jan and Karssemeijer, Nico and Weiland, Elisabeth and Maas, Marnix and Moy, Linda and Kiefer, Berthold and Mann, Ritse M}, + title = {Compressed Sensing for Breast MRI: Resolving the Trade-Off Between Spatial and Temporal Resolution}, + journal = IR, + year = {2017}, + volume = {52}, + issue = {10}, + month = {10}, + pages = {574--582}, + doi = {10.1097/RLI.0000000000000384}, + abstract = {Ultrafast dynamic contrast-enhanced magnetic resonance imaging of the breast enables assessment of the contrast inflow dynamics while providing images with diagnostic spatial resolution. However, the slice thickness of common ultrafast techniques still prevents multiplanar reconstruction. In addition, some temporal blurring of the enhancement characteristics occurs in case view-sharing is used. We evaluate a prototype compressed-sensing volume-interpolated breath-hold examination (CS-VIBE) sequence for ultrafast breast MRI that improves through plane spatial resolution and avoids temporal blurring while maintaining an ultrafast temporal resolution (less than 5 seconds per volume). Image quality (IQ) of the new sequence is compared with an ultrafast view-sharing sequence (time-resolved angiography with interleaved stochastic trajectories [TWIST]), and assessment of lesion morphology is compared with a regular T1-weighted 3D Dixon sequence (VIBE-DIXON) with an acquisition time of 91 seconds. From April 2016 to October 2016, 30 women were scanned with the CS-VIBE sequence, replacing the routine ultrafast TWIST sequence in a hybrid breast MRI protocol. The need for informed consent was waived. All MRI scans were performed on a 3T MAGNETOM Skyra system (Siemens Healthcare, Erlangen, Germany) using a 16-channel bilateral breast coil. Two reader studies were conducted involving 5 readers. In the first study, overall IQ of CS-VIBE and TWIST in the axial plane was independently rated for 23 women for whom prior MRI examinations with TWIST were available. In addition, the presence of several types of artifacts was rated on a 5-point scale. The second study was conducted in women (n = 16) with lesions. In total, characteristics of 31 lesions (5 malignant and 26 benign) were described independently for CS-VIBE and VIBE-DIXON, according to the BI-RADS MRI-lexicon. In addition, a lesion conspicuity score was given. Using CS-VIBE, a much higher through-plane spatial resolution was achieved in the same acquisition time as with TWIST, without affecting in-plane IQ (P = 0.260). Time-resolved angiography with interleaved stochastic trajectories showed slightly more motion artifacts and infolding and ghosting artifacts compared with CS-VIBE, whereas CS-VIBE showed more breathing and pulsation artifacts. For morphologic assessment, intrareader agreement between CS-VIBE and the more time-consuming VIBE-DIXON was slight to almost perfect, and generally higher than interreader agreement. Mean sensitivity (84.0% and 92.0% for CS-VIBE and VIBE-DIXON, P = 0.500) and specificity (60.0% and 55.4% for CS-VIBE and VIBE-DIXON, P = 0.327) were comparable for both sequences. Compressed-sensing volume-interpolated breath-hold examination allows an increase of the through-plane spatial resolution of ultrafast dynamic contrast-enhanced magnetic resonance imaging compared with TWIST at a comparable in-plane IQ. Morphological assessment of lesions using CS-VIBE is comparable to VIBE-DIXON, which takes 18 times longer. Consequently, CS-VIBE enables 3D evaluation of breast lesions in ultrafast breast MRI.}, + file = {Vree17.pdf:pdf\\Vree17.pdf:PDF}, + optnote = {DIAG}, + pmid = {28463932}, + gsid = {8986280950670397521}, + gscites = {39}, + ss_id = {f0dff8ace53e7e0c2bd64dd5f18996eedf10e4cb}, + all_ss_ids = {['f0dff8ace53e7e0c2bd64dd5f18996eedf10e4cb']}, +} + +@article{Vree18, + author = {Vreemann, Suzan and Gubern-M\'{e}rida, Albert and Borelli, Cristina and Bult, Peter and Karssemeijer, Nico and Mann, Ritse M}, + title = {The correlation of background parenchymal enhancement in the contralateral breast with patient and tumor characteristics of {MRI-screen} detected breast cancers}, + journal = PLOSONE, + year = {2018}, + volume = {13}, + issue = {1}, + pages = {e0191399}, + doi = {10.1371/journal.pone.0191399}, + abstract = {Higher background parenchymal enhancement (BPE) could be used for stratification of MRI screening programs since it might be related to a higher breast cancer risk. Therefore, the purpose of this study is to correlate BPE to patient and tumor characteristics in women with unilateral MRI-screen detected breast cancer who participated in an intermediate and high risk screening program. As BPE in the affected breast may be difficult to discern from enhancing cancer, we assumed that BPE in the contralateral breast is a representative measure for BPE in women with unilateral breast cancer. This retrospective study was approved by our local institutional board and a waiver for consent was granted. MR-examinations of women with unilateral breast cancers screen-detected on breast MRI were evaluated by two readers. BPE in the contralateral breast was rated according to BI-RADS. Univariate analyses were performed to study associations. Observer variability was computed. Analysis included 77 breast cancers in 76 patients (age: 48+-9.8 years), including 62 invasive and 15 pure ductal carcinoma in-situ cases. A negative association between BPE and tumor grade (p<=0.016) and a positive association with progesterone status (p<=0.021) was found. The correlation was stronger when only considering invasive disease. Inter-reader agreement was substantial. Lower BPE in the contralateral breast in women with unilateral breast cancer might be associated to higher tumor grade and progesterone receptor negativity. Great care should be taken using BPE for stratification of patients to tailored screening programs.}, + file = {Vree18.pdf:pdf\\Vree18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {29351560}, + month = {1}, + gsid = {5339776334232974796}, + gscites = {17}, + ss_id = {6492ea59487a07b4a2c380daeebebb26a6ff8bc2}, + all_ss_ids = {['6492ea59487a07b4a2c380daeebebb26a6ff8bc2']}, +} + +@phdthesis{Vree18a, + author = {Suzan Vreemann}, + title = {Breast MRI for screening: evaluation of clinical practice and future perspectives}, + year = {2018}, + url = {https://repository.ubn.ru.nl/handle/2066/200483}, + abstract = {Breast cancer is one of the leading causes of cancer death among women. In the Netherlands, one out of 6.6 women faces the diagnosis of breast cancer during her life. The survival chances are increased by the early detection of breast cancer. To ensure early detection, breast cancer screening was introduced in the Netherlands, in which women from the age of 50 are invited for a biennial mammography. Unfortunately, mammography has its limitations, causing that breast parenchyma can obscure cancer. In addition, research has already proven that high breast density is an independent risk factor for breast cancer. Other significant risk factors are personal history of breast cancer, radiation to the chest at a young age, extensive family history of breast cancer, and a proven BRCA gene mutation.}, + copromotor = {A. Gubern-M\'{e}rida and R. Mann}, + file = {:pdf/Vree18a.pdf:PDF;:png/publications/thumbs/Vree18a.png:PNG image;:png/publications/Vree18a.png:PNG image}, + optnote = {DIAG}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + journal = {PhD thesis}, +} + +@article{Vree18b, + author = {Vreemann, S and Gubern-Merida, A and Lardenoije, S and Bult, P and Karssemeijer, N and Pinker, K and Mann, R M}, + title = {The frequency of missed breast cancers in women participating in a high-risk MRI screening program}, + journal = BRECRT, + year = {2018}, + volume = {169}, + issue = {2}, + month = {6}, + pages = {323--331}, + doi = {10.1007/s10549-018-4688-z}, + abstract = {To evaluate the frequency of missed cancers on breast MRI in women participating in a high-risk screening program. Patient files from women who participated in an increased risk mammography and MRI screening program (2003-2014) were coupled to the Dutch National Cancer Registry. For each cancer detected, we determined whether an MRI scan was available (0-24 months before cancer detection), which was reported to be negative. These negative MRI scans were in consensus re-evaluated by two dedicated breast radiologists, with knowledge of the cancer location. Cancers were scored as invisible, minimal sign, or visible. Additionally, BI-RADS scores, background parenchymal enhancement, and image quality (IQ; perfect, sufficient, bad) were determined. Results were stratified by detection mode (mammography, MRI, interval cancers, or cancers in prophylactic mastectomies) and patient characteristics (presence of BRCA mutation, age, menopausal state). Negative prior MRI scans were available for 131 breast cancers. Overall 31% of cancers were visible at the initially negative MRI scan and 34% of cancers showed a minimal sign. The presence of a BRCA mutation strongly reduced the likelihood of visible findings in the last negative MRI (19 vs. 46%, P < 0.001). Less than perfect IQ increased the likelihood of visible findings and minimal signs in the negative MRI (P = 0.021). This study shows that almost one-third of cancers detected in a high-risk screening program are already visible at the last negative MRI scan, and even more in women without BRCA mutations. Regular auditing and double reading for breast MRI screening is warranted.}, + file = {Vree18b.pdf:pdf\\Vree18b.pdf:PDF}, + optnote = {DIAG}, + pmid = {29383629}, + gsid = {7496291364902253050}, + gscites = {24}, + ss_id = {cd47d4eae40407bb844a7322d417cb2fe667e6eb}, + all_ss_ids = {['cd47d4eae40407bb844a7322d417cb2fe667e6eb']}, +} + +@article{Vree18c, + author = {Vreemann, Suzan and van Zelst, Jan C M and Schlooz-Vries, Margrethe and Bult, Peter and Hoogerbrugge, Nicoline and Karssemeijer, Nico and Gubern-Merida, Albert and Mann, Ritse M}, + title = {The added value of mammography in different age-groups of women with and without BRCA mutation screened with breast MRI}, + journal = BRECR, + year = {2018}, + volume = {20}, + issue = {1}, + month = {8}, + pages = {84}, + doi = {10.1186/s13058-018-1019-6}, + abstract = {Breast magnetic resonance imaging (MRI) is the most sensitive imaging method for breast cancer detection and is therefore offered as a screening technique to women at increased risk of developing breast cancer. However, mammography is currently added from the age of 30 without proven benefits. The purpose of this study is to investigate the added cancer detection of mammography when breast MRI is available, focusing on the value in women with and without BRCA mutation, and in the age groups above and below 50 years. This retrospective single-center study evaluated 6553 screening rounds in 2026 women at increased risk of breast cancer (1 January 2003 to 1 January 2014). Risk category (BRCA mutation versus others at increased risk of breast cancer), age at examination, recall, biopsy, and histopathological diagnosis were recorded. Cancer yield, false positive recall rate (FPR), and false positive biopsy rate (FPB) were calculated using generalized estimating equations for separate age categories (< 40, 40-50, 50-60, >= 60 years). Numbers of screens needed to detect an additional breast cancer with mammography (NSN) were calculated for the subgroups. Of a total of 125 screen-detected breast cancers, 112 were detected by MRI and 66 by mammography: 13 cancers were solely detected by mammography, including 8 cases of ductal carcinoma in situ. In BRCA mutation carriers, 3 of 61 cancers were detected only on mammography, while in other women 10 of 64 cases were detected with mammography alone. While 77% of mammography-detected-only cancers were detected in women >= 50 years of age, mammography also added more to the FPR in these women. Below 50 years the number of mammographic examinations needed to find an MRI-occult cancer was 1427. Mammography is of limited added value in terms of cancer detection when breast MRI is available for women of all ages who are at increased risk. While the benefit appears slightly larger in women over 50 years of age without BRCA mutation, there is also a substantial increase in false positive findings in these women.}, + file = {Vree18c.pdf:pdf\\Vree18c.pdf:PDF}, + optnote = {DIAG}, + pmid = {30075794}, + gsid = {10033587502840045193}, + gscites = {34}, + ss_id = {7c3b43419293d766b7b6a69c4606d43cbbc13cd2}, + all_ss_ids = {['7c3b43419293d766b7b6a69c4606d43cbbc13cd2']}, +} + +@article{Vree18d, + author = {Vreemann, Suzan and Gubern-Merida, Albert and Schlooz-Vries, Margrethe S and Bult, Peter and van Gils, Carla H and Hoogerbrugge, Nicoline and Karssemeijer, Nico and Mann, Ritse M}, + title = {Influence of Risk Category and Screening Round on the Performance of an MR Imaging and Mammography Screening Program in Carriers of the BRCA Mutation and Other Women at Increased Risk}, + journal = Radiology, + year = {2018}, + volume = {286}, + issue = {2}, + month = {2}, + pages = {443--451}, + doi = {10.1148/radiol.2017170458}, + abstract = {Purpose To evaluate the real-life performance of a breast cancer screening program for women with different categories of increased breast cancer risk with multiple follow-up rounds in an academic hospital with a large screening population. Materials and Methods Screening examinations (magnetic resonance [MR] imaging and mammography) for women at increased breast cancer risk (January 1, 2003, to January 1, 2014) were evaluated. Risk category, age, recall for workup of screening-detected abnormalities, biopsy, and histopathologic diagnosis were recorded. Recall rate, biopsy rate, positive predictive value of recall, positive predictive value of biopsy, cancer detection rate, sensitivity, and specificity were calculated for first and follow-up rounds. Results There were 8818 MR and 6245 mammographic examinations performed in 2463 women. Documented were 170 cancers; of these, there were 129 screening-detected cancers, 16 interval cancers, and 25 cancers discovered at prophylactic mastectomy. Overall sensitivity was 75.9% including the cancers discovered at prophylactic mastectomy (95% confidence interval: 69.5%, 82.4%) and 90.0% excluding those cancers (95% confidence interval: 83.3%, 93.7%). Sensitivity was lowest for carriers of the BRCA1 mutation (66.1% and 81.3% when including and not including cancers in prophylactic mastectomy specimens, respectively). Specificity was higher at follow-up (96.5%; 95% confidence interval: 96.0%, 96.9%) than in first rounds (85.1%; 95% confidence interval: 83.4%, 86.5%) and was high for both MR imaging (97.1%; 95% confidence interval: 96.7%, 97.5%) and mammography (98.7%; 95% confidence interval: 98.3%, 99.0%). Positive predictive value of recall and positive predictive value of biopsy were lowest in women who had only a family history of breast cancer. Conclusion Screening performance was dependent on risk category. Sensitivity was lowest in carriers of the BRCA1 mutation. The specificity of high-risk breast screening improved at follow-up rounds. RSNA, 2017 Online supplemental material is available for this article.}, + file = {Vree18d.pdf:pdf\\Vree18d.pdf:PDF}, + optnote = {DIAG}, + pmid = {29040037}, + gsid = {4123505121536739933}, + gscites = {44}, + ss_id = {37dd5df4c173aac442ea358dd6135100e21c4451}, + all_ss_ids = {['37dd5df4c173aac442ea358dd6135100e21c4451']}, +} + +@article{Vree19, + author = {Vreemann, Suzan and Dalmis, Mehmet U. and Bult, Peter and Karssemeijer, Nico and Broeders, Mireille J. M. and Gubern-M\'{e}rida, Albert and Mann, Ritse M.}, + title = {Amount of fibroglandular tissue FGT and background parenchymal enhancement BPE in relation to breast cancer risk and false positives in a breast MRI screening program}, + doi = {10.1007/s00330-019-06020-2}, + year = {2019}, + abstract = {The purpose of this study is to evaluate the predictive value of the amount of fibroglandular tissue (FGT) and background parenchymal enhancement (BPE), measured at baseline on breast MRI, for breast cancer development and risk of false-positive findings in women at increased risk for breast cancer. Negative baseline MRI scans of 1533 women participating in a screening program for women at increased risk for breast cancer between January 1, 2003, and January 1, 2014, were selected. Automated tools based on deep learning were used to obtain quantitative measures of FGT and BPE. Logistic regression using forward selection was used to assess relationships between FGT, BPE, cancer detection, false-positive recall, and false-positive biopsy. Sixty cancers were detected in follow-up. FGT was only associated to short-term cancer risk; BPE was not associated with cancer risk. High FGT and BPE did lead to more false-positive recalls at baseline (OR 1.259, p = 0.050, and OR 1.475, p = 0.003) and to more frequent false-positive biopsies at baseline (OR 1.315, p = 0.049, and OR 1.807, p = 0.002), but were not predictive for false-positive findings in subsequent screening rounds. FGT and BPE, measured on baseline MRI, are not predictive for overall breast cancer development in women at increased risk. High FGT and BPE lead to more false-positive findings at baseline. * Amount of fibroglandular tissue is only predictive for short-term breast cancer risk in women at increased risk. * Background parenchymal enhancement measured on baseline MRI is not predictive for breast cancer development in women at increased risk. * High amount of fibroglandular tissue and background parenchymal enhancement lead to more false-positive findings at baseline MRI.}, + url = {http://dx.doi.org/10.1007/s00330-019-06020-2}, + file = {Vree19.pdf:pdf\Vree19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + citation-count = {23}, + automatic = {yes}, + pages = {4678-4690}, + volume = {29}, +} + +@inproceedings{Vug18, + author = {van Vugt, Joris and Marchiori, Elena and Mann, Ritse and Gubern-Merida, Albert and Moriakov, Nikita and Teuwen, Jonas}, + booktitle = MI, + title = {Vendor-independent soft tissue lesion detection using weakly supervised and unsupervised adversarial domain adaptation}, + doi = {10.1117/12.2512940}, + series = SPIE, + abstract = {Computer-aided detection aims to improve breast cancer screening programs by helping radiologists to evaluate digital mammography (DM) exams. DM exams are generated by devices from different vendors, with diverse characteristics between and even within vendors. Physical properties of these devices and postprocessing of the images can greatly influence the resulting mammogram. This results in the fact that a deep learning model trained on data from one vendor cannot readily be applied to data from another vendor. This paper investigates the use of tailored transfer learning methods based on adversarial learning to tackle this problem. We consider a database of DM exams (mostly bilateral and two views) generated by Hologic and Siemens vendors. We analyze two transfer learning settings: 1) unsupervised transfer, where Hologic data with soft lesion annotation at pixel level and Siemens unlabelled data are used to annotate images in the latter data; 2) weak supervised transfer, where exam level labels for images from the Siemens mammograph are available. We propose tailored variants of recent state-of-the-art methods for transfer learning which take into account the class imbalance and incorporate knowledge provided by the annotations at exam level. Results of experiments indicate the beneficial effect of transfer learning in both transfer settings. Notably, at 0.02 false positives per image, we achieve a sensitivity of 0.37, compared to 0.30 of a baseline with no transfer. Results indicate that using exam level annotations gives an additional increase in sensitivity.}, + file = {:pdf/Vugt19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + year = {2019}, + month = {3}, + ss_id = {bbb3afacdbf1275e48ff46f21d9ac37574f281a6}, + all_ss_ids = {['bbb3afacdbf1275e48ff46f21d9ac37574f281a6']}, + gscites = {0}, +} + +@inproceedings{Vuka08, + author = {D. Vukadinovic and T. {van Walsum} and R. Manniesing and A. {van der Lugt} and T.T. {de Weert} and W.J. Niessen}, + title = {AdaBoost Classification for Model Based Segmentation of the Outer Vessel Wall}, + booktitle = MI, + year = {2008}, + volume = {6914}, + series = SPIE, + pages = {691418-1 -- 691418-8}, + doi = {10.1117/12.770232}, + abstract = {A novel 2D slice based fully automatic method for model based segmentation of the outer vessel wall of the common carotid artery in CTA data set is introduced. The method utilizes a lumen segmentation and AdaBoost, a fast and robust machine learning algorithm, to initially classify (mark) regions outside and inside the vessel wall using the distance from the lumen and intensity profiles sampled radially from the gravity center of the lumen. A similar method using the distance from the lumen and the image intensity as features is used to classify calcium regions. Subsequently, an ellipse shaped deformable model is fitted to the classification result. The method achieves smaller detection error than the inter observer variability, and the method is robust against variation of the training data sets.}, + file = {Vuka08.pdf:pdf\\Vuka08.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, +} + +@inproceedings{Vuka09, + author = {Vukadinovic, D. and van Walsum, T. and Rozie, S. and de Weert, T. and Manniesing, R. and van der Lugt, A. and Niessen, W.}, + title = {Carotid artery segmentation and plaque quantification in {CTA}}, + booktitle = {Proc. IEEE Int. Symp. Biomedical Imaging: From Nano to Macro ISBI '09}, + year = {2009}, + pages = {835--838}, + doi = {10.1109/ISBI.2009.5193182}, + abstract = {A novel, slice-based, semi-automatic method for plaque segmentation and quantification in CTA of carotid arteries is introduced. The method starts with semi-automatic, levelset based, lumen segmentation initialized with three points. Pixel based GentleBoost classification is used to segment the inner and outer vessel wall region using distance from the lumen, intensity and Gaussian derivatives as features. 3D calcified regions located within the vessel wall are segmented using a similar set of features and the same classification method. Subsequently, an ellipse-shaped deformable model is fitted using the inner-outer vessel wall and calcium classification, and plaque components within the wall are characterized using HU ranges. The method is quantitatively evaluated on 5 carotid arteries. Vessel and plaque segmentation are compared to the interobserver variability. Furthermore, correlation of slice-based plaque component quantification with the ground truth values is determined. The accuracy of our method is comparable to the interobserver variability.}, + file = {Vuka09.pdf:pdf\\Vuka09.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {6}, + gsid = {16630912988899317295}, + gscites = {11}, +} + +@conference{Vuka09a, + author = {D. Vukadinovic and T. {van Walsum} and R. Manniesing and S. Rozie and T. T. {de Weert} and A. {van der Lugt} and W. J. Niessen}, + title = {Automated Segmentation of Atherosclerotic Plaque in {MDCT} Angiogaphy of the Carotid Artery}, + booktitle = {2nd Dutch Conference on Biomedical Engineering}, + year = {2009}, + pages = {56}, + abstract = {There is large interest in automated carotid artery plaque analysis, as there is increasing evidence that atherosclerotic plaque volume and composition are related to acute cerebrovascular events. In this study, a novel semi-automatic method was developed to segment the plaque in the common carotid artery. First the carotid lumen is semi-automatically segmented using a level set approach initialized with three seed points [1]. Then calcified regions located within the vessel are automatically detected with the GentleBoost framework using a number of descriptive features. In the next step, voxels are classified as being inside or outside vessel with the GentleBoost framework with a similar set of features [2]. Finally, a 2D ellipse shape deformable model is fitted to the combined result of the calcium and vessel wall classification. The ellipses are fitted in a way that automatically detected calcified regions are positioned inside each ellipse, at the same time the sum of intensities along the ellipse is minimized. The method was trained on 40 CTA datasets, and tested on 60 data sets. For 20 datasets from the test set, second observer manual segmentations were available. The evaluation included comparison between the automated and manual segmentation and a comparison with interobserver variability. Automated segmentation revealed a vessel volume of 897A-A?A 1/2 722mm3, while difference between manual and automated segmentation was 3.1A-A?A 1/2 116 mm3 (p=0.92), the similarity index (SI) was 91% and the coefficient of variation (CoV) is 13%. This was better than the interobserver variability with a difference of -20A-A?A 1/2 118 mm3 (p=0.46), an SI of 92% and a CoV of 15%. Automated segmentation revealed a plaque volume (vessel volumeA-A?A 1/2 lumen volume) of 393A-A?A 1/2 295 mm3, while difference between manual and automated segmentation was -4.7A-A?A 1/2 116 mm3 (p=0.75), the SI was 78% and the CoV is 29%. This was better than the interobserver variability with a difference of -37A-A?A 1/2 121 mm3 (p=0.19), an SI of 80% and a CoV of 33%. We conclude that automated segmentation of the atherosclerotic plaque is robust and accurate when compared to the manual interobserver variability. REFERENCES [1]Manniesing R, Viergever MA, Niessen WJ. A-A?A 1/2 Vessel Axis Tracking Using Topology Constrained Surface Evolution,?? IEEE Transaction on Medical Imaging, 26(3), 309-316 (2007) [2]J.Friedman, T.Hastie, R. Tibishirani, A-A?A 1/2 Additive Logistic Regression: A Statistical View of Boosting??, The Annals of Statistics, vol. 28, pp.337-407, 2000}, + file = {Vuka09a.pdf:pdf\\Vuka09a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Vuka10, + author = {Danijela Vukadinovic and Theo van Walsum and Rashindra Manniesing and Sietske Rozie and Reinhard Hameeteman and Thomas T de Weert and Aad van der Lugt and Wiro J Niessen}, + title = {Segmentation of the outer vessel wall of the common carotid artery in {CTA}}, + journal = TMI, + year = {2010}, + volume = {29}, + pages = {65--76}, + doi = {10.1109/TMI.2009.2025702}, + abstract = {A novel method is presented for carotid artery vessel wall segmentation in computed tomography angiography (CTA) data. First the carotid lumen is semi-automatically segmented using a level set approach initialized with three seed points. Subsequently, calcium regions located within the vessel wall are automatically detected and classified using multiple features in a GentleBoost framework. Calcium regions segmentation is used to improve localization of the outer vessel wall because it is an easier task than direct outer vessel wall segmentation. In a third step, pixels outside the lumen area are classified as vessel wall or background, using the same GentleBoost framework with a different set of image features. Finally, a 2-D ellipse shape deformable model is fitted to a cost image derived from both the calcium and vessel wall classifications. The method has been validated on a dataset of 60 CTA images. The experimental results show that the accuracy of the method is comparable to the interobserver variability.}, + file = {Vuka10.pdf:pdf\\Vuka10.pdf:PDF;Vuka10.png:png\\Vuka10.png:PNG image}, + optnote = {DIAG, RADIOLOGY}, + number = {1}, + pmid = {19556191}, + month = {1}, + gsid = {3053704105909674321}, + gscites = {51}, + ss_id = {4ea221e633c584c7abce455e17071d4d95b5fa9c}, + all_ss_ids = {['4ea221e633c584c7abce455e17071d4d95b5fa9c']}, +} + +@inproceedings{Vuka11, + author = {D. Vukadinovic and T. {van Walsum} and R. Manniesing and S. Rozie and A. {van der Lugt} and W.J. Niessen}, + title = {Region Based Level Set Segmentation of the Outer Wall of the Carotid Bifurcation in {CTA}}, + booktitle = MI, + year = {2011}, + volume = {7962}, + series = SPIE, + pages = {79623P-1 -- 79623P-8}, + doi = {10.1117/12.878114}, + abstract = {This paper presents a level set based method for segmenting the outer vessel wall and plaque components of the carotid artery in CTA. The method employs a GentleBoost classification framework that classifies pixels as calcified region or not, and inside or outside the vessel wall. The combined result of both classifications is used to construct a speed function for level set based segmentation of the outer vessel wall; the segmented lumen is used to initialize the level set. The method has been optimized on 20 datasets and evaluated on 80 datasets for which manually annotated data was available as reference. The average Dice similarity of the outer vessel wall segmentation was 92%, which compares favorably to previous methods.}, + file = {Vuka11.pdf:pdf/Vuka11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {3}, + gsid = {13919107544468215275}, + gscites = {6}, + ss_id = {c072c0566f86cc9bbd967741584f0b5f2d70f0d4}, + all_ss_ids = {['c072c0566f86cc9bbd967741584f0b5f2d70f0d4']}, +} + +@article{Vuka12, + author = {Danijela Vukadinovic and Sietske Rozie and Marjon van Gils and Theo van Walsum and Rashindra Manniesing and Aad van der Lugt and Wiro J Niessen}, + title = {Automated versus manual segmentation of atherosclerotic carotid plaque volume and components in {CTA}: associations with cardiovascular risk factors}, + journal = IJCVI, + year = {2012}, + volume = {28}, + pages = {877--887}, + doi = {10.1007/s10554-011-9890-6}, + abstract = {The purpose of this study was to validate automated atherosclerotic plaque measurements in carotid arteries from CT angiography (CTA). We present an automated method (three initialization points are required) to measure plaque components within the carotid vessel wall in CTA. Plaque components (calcifications, fibrous tissue, lipids) are determined by different ranges of Hounsfield Unit values within the vessel wall. On CTA scans of 40 symptomatic patients with atherosclerotic plaque in the carotid artery automatically segmented plaque volume, calcified, fibrous and lipid percentages were 0.97 A-A?A 1/2 0.51 cm3, 10 A-A?A 1/2 11%, 63 A-A?A 1/2 10% and 25 A-A?A 1/2 5%; while manual measurements by first observer were 0.95 A-A?A 1/2 0.60 cm3, 14 A-A?A 1/2 16%, 63 A-A?A 1/2 13% and 21 A-A?A 1/2 9%, respectively and manual measurement by second observer were 1.05 A-A?A 1/2 0.75 cm3, 11 A-A?A 1/2 12%, 61 A-A?A 1/2 11% and 27 A-A?A 1/2 10%. In 90 datasets, significant associations were found between age, gender, hypercholesterolemia, diabetes, smoking and previous cerebrovascular disease and plaque features. For both automated and manual measurements, significant associations were found between: age and calcium and fibrous tissue percentage; gender and plaque volume and lipid percentage; diabetes and calcium, smoking and plaque volume; previous cerebrovascular disease and plaque volume. Significant associations found only by the automated method were between age and plaque volume, hypercholesterolemia and plaque volume and diabetes and fibrous tissue percentage. Significant association found only by the manual method was between previous cerebrovascular disease and percentage of fibrous tissue. Automated analysis of plaque composition in the carotid arteries is comparable with the manual analysis and has the potential to replace it.}, + file = {Vuka12.pdf:pdf\\Vuka12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {21614484}, + month = {5}, + gsid = {4492406858408739423}, + gscites = {28}, + ss_id = {3e95abdc1bec8622e45fc2258e27f31820754c7b}, + all_ss_ids = {['3e95abdc1bec8622e45fc2258e27f31820754c7b']}, +} + +@mastersthesis{Vyaw22, + author = {Sanyog Vyawahare}, + title = {Automated Cephalometric Analysis on Lateral Headplates for Orthodontic Diagnosis}, + abstract = {Cephalometric analysis is the process of placing anatomical landmark points on lateral headplate X-ray scans. Placing these points with precision is crucial as the angle and distancemeasurements required during treatment are calculated using these points. This project aimed to automate the process of cephalometric analysis on lateral headplates X-ray scans. An object detection model followed by a heatmap regression was proposed to find the landmark locations. In total, 1000 images were acquired fromthe Radboud UMC, Department of Dentistry. Experiments with different network architecture, bounding box sizes, anchor optimisation methods, augmentations, and heatmap regression strategies were performed to converge the model resulting in low loss and high average precision and recall score. The best performing model was Cascade RCNN with a pre-trained ResNet101 as the backbone, without the additional heatmap regressor unit. The final network achieved a mean radial error (MRE) of 1.12 with a standard deviation (SD) of 0.81 and success detection rate(SDR) of 85.76%. The average precision with intersection over union (IOU) ranging 0.5:0.9 and step size increase of 0.05 was 0.857. The automated cephalometric analysis model will save time, provide objective results, and help the orthodontists and surgeons in initial screening and mid-treatment assessment.}, + file = {Vyaw22.pdf:pdf/Vyaw22.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2022}, + journal = {Master thesis}, +} + +@mastersthesis{Vyaw23, + author = {Vyawahare, Sanyog and Venkadesh, Kiran Vaidhya and Jacobs, Colin}, + title = {Automated segmentation of subsolid pulmonary nodules in CT scans using deep learning}, + abstract = {Lung cancer is the second most diagnosed cancer and is the leading cause of cancer-related deaths globally. A pulmonary nodule can turn into cancer, leading to fatal outcomes if left undetected. Compared to other types of pulmonary nodules, subsolid nodules (SSN) pose a higher risk of malignancy. Subsolid nodules can be categorized into two subtypes: ground-glass opacities (GGOs) or part-solid nodules (PSNs). The assessment of SSNs by physicians on cancer risk and the stage is highly dependent on the size and volume of the nodule. Therefore accurate segmentations are crucial for volumetric calculations when dealing with SSNs. Currently, semi-automated methods are deployed to segment the boundaries of SSNs. This requires a radiologist's manual inputs and fine- tuning and could lead to sub-optimal results. Furthermore, there is no study to date which focuses on evaluating the performance of deep learning in SSNs. Over the past decade, deep learning has made significant strides in medical imaging segmentation, and networks like nnUNet have demonstrated great potential in adapting to new datasets. In this research, nnUNet was used to build a fully-automated segmentation model. However, the successful application of the model requires a high-quality dataset with accurate annotations, particularly for the segmentation of SSNs, which has been an area of limited research. To address this, our research focused on creating a carefully curated dataset with annotations provided by an experienced radiologist using a dedicated lung screening workstation. The model achieved a Dice similarity coefficient of 83.3% for the GGOs and 77.6% & 76% for non-solid and solid core respectively for the PSNs on an external validation dataset. The model provides satisfactory segmentation results in a minimal time, without any external input. It is able to learn the behaviour of the semi-automated method to produce similar segmentation. The model has shown promising potential to generate accurate and objective segmentation without human input for the subsolid nodules. The proposed model acts as a good benchmark in the segmentation of subsolid pulmonary nodules.}, + file = {Vyaw23.pdf:pdf\\Vyaw23.pdf:PDF}, + journal = {Master thesis}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2023}, +} + +@article{Waal15, + author = {{van der Waal}, Dani{\"{e}}lle and Emaus, Marleen J. and Bakker, Marije F. and {den Heeten}, Gerard J. and Karssemeijer, Nico and Pijnappel, Ruud M. and Veldhuis, Wouter B. and Verbeek, Andr{\'{e}} L M. and {van Gils}, Carla H. and Broeders, Mireille J M.}, + title = {Geographic variation in volumetric breast density between screening regions in the Netherlands}, + journal = ER, + year = {2015}, + volume = {25}, + number = {11}, + month = {11}, + pages = {3328--3337}, + doi = {10.1007/s00330-015-3742-z}, + url = {http://dx.doi.org/10.1007/s00330-015-3742-z}, + abstract = {Objectives: Differences in breast density between populationsmay explain part of the variation in regional breast cancerscreening performance. This study aimed to determine whether regional differences in breast density distribution are present in the Dutch screening population. Methods: As part of the DENSE trial, mammographic density was measured using a fully-automated volumetric method. The regions in our study were based on the geographic coverage of 14 reading units representing a large part of the Netherlands. General linear models were used. Results: Four hundred eighty-five thousand and twenty-one screening participants with a median age of 60 years were included (2013-2014). The proportion of women with hetero-geneously or extremely dense breasts ranged from 32.5 % to 45.7 % between regions. Mean percent dense volume varied between 6.51 % (95 % confidence interval [CI]: 6.46-6.55)and 7.68 % (95 % CI: 7.66-7.71). Age differences could not explain the variation. Socio-economic status (SES) was positively associated with volumetric density in all analyses (lowSES: 6.95 % vs. high SES: 7.63 %;ptrend<0.0001),whereas a potential association between urbanisation and breast densityonly became apparent after SES adjustment. Conclusion: There appears to be geographic variation in mammographic density in the Netherlands, emphasizing the importance of including breast density as parameter in the evaluation of screening performance.}, + file = {Waal15.pdf:pdf\\Waal15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {26134996}, + gsid = {87862472582705823}, + gscites = {17}, + ss_id = {1ed7b8605b403640ecb3443ac8097d8cabf60fe0}, + all_ss_ids = {['1ed7b8605b403640ecb3443ac8097d8cabf60fe0']}, +} + +@article{Wal14a, + author = {van der Wal, S and Vaneker, M and Steegers, M and van Berkum B and Kox, M and van der Laak, J and van der Hoeven, J and Vissers, K and Scheffer, G J}, + title = {Lidocaine increases the anti-inflammatory cytokine IL-10 following mechanical ventilation in healthy mice}, + journal = ACTANASCA, + year = {2014}, + volume = {59}, + month = {8}, + pages = {47-55}, + doi = {10.1111/aas.12417}, + abstract = {Background: Mechanical ventilation (MV) induces an inflammatory response that may result in (acute) lung injury. Lidocaine, an amide local anesthetic, has anti-inflammatory properties in vitro and in vivo, possibly due to an attenuation of pro-inflammatory cytokines, intracellular adhesion molecule-1 (ICAM-1), and reduction of neutrophils influx. We hypothesized an attenuation of MV-induced inflammatory response with intravenously administered lidocaine. Methods: Lidocaine (Lido) (2, 4, and 8 mg/kg/h) was intravenously administered during 4 h of MV with a tidal volume of 8 ml/ kg, positive end expiratory pressure 1,5 cmH2O and FiO2 0.4. We used one ventilated control (CON) group receiving vehicle. After MV, mice were euthanized, and lungs and blood were immediately harvested, and cytokine levels and ICAM-1 levels were measured in plasma and lung homogenates. Pulmonary neutrophils influx was determined in LEDER-stained slices of lungs. Anesthetic need was determined by painful hind paw stimulation. Results: Lidocaine-treated animals (Lido 2, 4 and 8 mg/kg/h) showed higher interleukin (IL)-10 plasma levels compared to control animals. Lidocaine treatment with 8 mg/kg/h (Lido 8) resulted in higher IL-10 in lung homogenates. No differences were observed in pro-inflammatory cytokines, ICAM-1, and pulmonary influx between the different ventilated groups. Conclusions: Intravenously administered lidocaine increases levels of plasma IL-10 with infusion from 2, 4, and 8 mg/kg/h and pulmonary levels of IL-10 with 8 mg/kg/h in a murine mechanical ventilation model. Intravenously administered lidocaine appears to reduce anesthetic need in mice.}, + file = {Wal14a.pdf:pdf\\Wal14a.pdf:PDF}, + optnote = {DIAG}, + gsid = {16264230633057199082}, + gscites = {22}, +} + +@conference{Wals00, + author = {Th. van Walsum and B. van Ginneken and J. J. Staal and E. J. Vonken and M. A. Viergever}, + title = {Image{X}plorer ({iX}), an integrated {W}indows {NT} programming environment for rapid development of medical imaging applications}, + booktitle = ECR, + year = {2000}, + pages = {166}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Wand15, + author = {Wanders, J.O.P. and Holland, K. and Veldhuis, W.B. and Mann, R.M. and Peeters, P.H.M. and van Gils, C.H. and Karssemeijer, N}, + title = {Effect of volumetric mammographic density on performance of a breast cancer screening program using full-field digital mammography}, + booktitle = ECR, + year = {2015}, + abstract = {Purpose: We examined to what extent mammographic density affects screening performance when using full field digital mammography (FFDM). + Methods and Materials: We collected a consecutive series of 69,874 FFDM examinations (2003-2009) from one screening unit of the Dutch biennial screening program (50-75 years). Volumetric mammographic density was automatically assessed with Volpara version 1.5.0 (Matakina, New Zealand). Recall and breast cancer detection information was obtained from the screening registration system. Interval cancers were identified through linkage with the Netherlands Cancer Registry. Within four density categories, comparable to ACR breast density categories, we determined screening performance measures and linear trends with a Chi Square linear trend test. + Results: 19.7% of the examinations was categorised as density category 1 ('almost entirely fatty'), 43.1% as category 2, 29.4% as category 3 and 7.7% as category 4 ('extremely dense'). In total 421 screen-detected and 150 interval tumours were identified. Cancer detection rates were 3.7?, 6.4?, 6.6? and 6.3? in categories 1 to 4 respectively (p=0.005). Interval cancer rates increased with increasing density categories: 0.7?, 1.9?, 3.0? and 4.5?, respectively (p< 0.001). As a result, the sensitivity (proportion of screen-detected tumours of screen-detected and interval tumours) was lower in higher density categories: 85.0%, 77.6%, 69.0% and 58.6% respectively (p< 0.001). The number of false positives was higher in women with dense breasts: 11.4?, 14.1?, 18.3? and 28.6? for categories 1 to 4, respectively (p< 0.001). + Conclusion: Also when FFDM is used in breast cancer screening higher interval cancer and false-positive rates are observed in women with mammographically dense breasts.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Wand15a, + author = {Wanders, Johanna O P and Holland, Katharina and Peeters, Petra H M and Karssemeijer, Nico and van Gils, Carla H}, + title = {Combined effect of dense and nondense breast volume on breast cancer risk}, + booktitle = {7th International Workshop on Breast Densitometry and Cancer Risk Assessment}, + year = {2015}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Wand15b, + author = {Wanders, Johanna O P and Holland, Katharina and Peeters, Petra H M and Karssemeijer, Nico and van Gils, Carla H}, + title = {Volumetric breast density and the risk of screen detected and interval breast cancer}, + booktitle = {7th International Workshop on Breast Densitometry and Cancer Risk Assessment}, + year = {2015}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Wand15c, + author = {Wanders, Johanna O P and Holland, Katharina and Veldhuis, Wouter B and Mann, Ritse M and Peeters, Petra H M and van Gils, Carla H and Karssemeijer, Nico}, + title = {Effect of volumetric mammographic density on performance of a breast cancer screening program using full-field digital mammography}, + booktitle = {7th International Workshop on Breast Densitometry and Cancer Risk Assessment}, + year = {2015}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Wand16, + author = {Wanders, J. and Holland, K. and Peeters, P. and Karssemeijer, N. and {van Gils}, C.}, + title = {Volumetric Breast Density And The Risk Of Screen-Detected And Interval Breast Cancer}, + booktitle = {Annual conference of the International Agency for Research on Cancer}, + year = {2016}, + abstract = {Purpose: In light of breast density legislation and discussions about supplemental screening it is important to know not only one's risk of breast cancer, but particularly the risk of a tumor that is not detected through mammographic screening. We investigated the relationship between volumetric breast density and the risk of screen-detected and interval cancer within a digital mammography (DM) screening program. + Methods: Mammographic density was automatically assessed with Volpara version 1.5.0 (Matakina, New Zealand) on the first available digital mammogram of 43,211 women (50-75 years) participating in the Dutch biennial breast cancer screening program (2003-2009). Screen-detected and interval breast cancer information was obtained from the screening registration system and through linkage with the Netherlands Cancer Registry. We estimated risks of screen-detected and interval cancers in relation to breast density using multinomial logistic regression analysis (adjusted for age). No other confounders were available in this routine screening database. + Results: 413 screen-detected and 150 interval tumors were identified. Screen-detected breast cancer risk was significantly higher in the higher breast density categories compared to the lowest (OR: 1.65, 95% CI: 1.21-2.24, OR: 1.78, 95% CI: 1.29-2.47, OR: 1.69, 95% CI: 1.08-2.63, for density categories 2 to 4 respectively compared to 1). Interval cancer risk increased with increasing breast density (OR: 2.45, 95% CI: 1.20-4.99, OR: 5.24, 95% CI: 2.59-10.59 and OR: 6.86, 95% CI: 3.12-15.11, for density categories 2 to 4 respectively compared to 1). The relationship with interval cancers was statistically significantly stronger than with screen-detected cancers (p<0.01) for density categories 3 and 4. + Conclusions: Although higher breast density is related to a higher risk of a screen-detected breast cancer, it is particularly strongly related to the risk of a breast cancer that is not detected through mammographic screening (interval cancer).}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Wand17, + author = {Wanders, Johanna O P and Holland, Katharina and Veldhuis, Wouter B and Mann, Ritse M and Pijnappel, Ruud M and Peeters, Petra H M and van Gils, Carla H and Karssemeijer, Nico}, + title = {Volumetric breast density affects performance of digital screening mammography}, + journal = BRECRT, + year = {2017}, + volume = {162}, + number = {1}, + pages = {95--103}, + doi = {10.1007/s10549-016-4090-7}, + url = {https://doi.org/10.1007%2Fs10549-016-4090-7}, + abstract = {To determine to what extent automatically measured volumetric mammographic density influences screening performance when using digital mammography (DM). We collected a consecutive series of 111,898 DM examinations (2003-2011) from one screening unit of the Dutch biennial screening program (age 50-75 years). Volumetric mammographic density was automatically assessed using Volpara. We determined screening performance measures for four density categories comparable to the American College of Radiology (ACR) breast density categories. Of all the examinations, 21.6% were categorized as density category 1 ('almost entirely fatty') and 41.5, 28.9, and 8.0% as category 2-4 ('extremely dense'), respectively. We identified 667 screen-detected and 234 interval cancers. Interval cancer rates were 0.7, 1.9, 2.9, and 4.4 \textperthousand and false positive rates were 11.2, 15.1, 18.2, and 23.8 \textperthousand for categories 1-4, respectively (both p-trend < 0.001). The screening sensitivity, calculated as the proportion of screen-detected among the total of screen-detected and interval tumors, was lower in higher density categories: 85.7, 77.6, 69.5, and 61.0% for categories 1-4, respectively (p-trend < 0.001). Volumetric mammographic density, automatically measured on digital mammograms, impacts screening performance measures along the same patterns as established with ACR breast density categories. Since measuring breast density fully automatically has much higher reproducibility than visual assessment, this automatic method could help with implementing density-based supplemental screening.}, + file = {:pdf/Wand17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28012087}, + month = {12}, + gsid = {1629763110736326637}, + gscites = {102}, + ss_id = {f450e015384d3f06fb1b1885536cdec1d634c785}, + all_ss_ids = {['f450e015384d3f06fb1b1885536cdec1d634c785']}, +} + +@article{Wand17a, + author = {Wanders, Johanna O.P. and Holland, Katharina and Karssemeijer, Nico and Peeters, Petra H.M. and Veldhuis, Wouter B. and Mann, Ritse M. and {van Gils}, Carla H.}, + title = {The effect of volumetric breast density on the risk of screen-detected and interval breast cancers: a cohort study}, + journal = BRECR, + year = {2017}, + volume = {19}, + number = {1}, + pages = {67}, + doi = {10.1186/s13058-017-0859-9}, + abstract = {Background In the light of the breast density legislation in the USA, it is important to know a woman's breast cancer risk, but particularly her risk of a tumor that is not detected through mammographic screening (interval cancer). Therefore, we examined the associations of automatically measured volumetric breast density with screen-detected and interval cancer risk, separately. + Methods Volumetric breast measures were assessed automatically using Volpara version 1.5.0 (Matakina, New Zealand) for the first available digital mammography (DM) examination of 52,814 women (age 50-75 years) participating in the Dutch biennial breast cancer screening program between 2003 and 2011. Breast cancer information was obtained from the screening registration system and through linkage with the Netherlands Cancer Registry. We excluded all screen-detected breast cancers diagnosed as a result of the first digital screening examination. During a median follow-up period of 4.2 (IQR 2.0-6.2) years, 523 women were diagnosed with breast cancer of which 299 were screen-detected and 224 were interval breast cancers. The associations between volumetric breast measures and breast cancer risk were determined using Cox proportional hazards analyses. + Results Percentage dense volume was found to be positively associated with both interval and screen-detected breast cancers (hazard ratio (HR) 8.37 (95% CI 4.34-16.17) and HR 1.39 (95% CI 0.82-2.36), respectively, for Volpara density grade category (VDG) 4 compared to VDG1 (p for heterogeneity < 0.001)). Dense volume (DV) was also found to be positively associated with both interval and screen-detected breast cancers (HR 4.92 (95% CI 2.98-8.12) and HR 2.30 (95% CI 1.39-3.80), respectively, for VDG-like category (C)4 compared to C1 (p for heterogeneity = 0.041)). The association between percentage dense volume categories and interval breast cancer risk (HR 8.37) was not significantly stronger than the association between absolute dense volume categories and interval breast cancer risk (HR 4.92). + Conclusions Our results suggest that both absolute dense volume and percentage dense volume are strong markers of breast cancer risk, but that they are even stronger markers for predicting the occurrence of tumors that are not detected during mammography breast cancer screening.}, + file = {:pdf/Wand17a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28583146}, + month = {6}, + gsid = {897339558877213208}, + gscites = {54}, + all_ss_ids = {['10123038b80006871abc7396810bed15bfa29387', 'd677e08922a1c646ef739f06bae679cc795ba362']}, +} + +@article{Wand18, + author = {Wanders, Johanna O. P. and van Gils, Carla H. and Karssemeijer, Nico and Holland, Katharina and Kallenberg, Michiel and Peeters, Petra H. M. and Nielsen, Mads and Lillholm, Martin}, + title = {The combined effect of mammographic texture and density on breast cancer risk: a cohort study}, + doi = {10.1186/s13058-018-0961-7}, + year = {2018}, + abstract = {Texture patterns have been shown to improve breast cancer risk segregation in addition to area-based mammographic density. The additional value of texture pattern scores on top of volumetric mammographic density measures in a large screening cohort has never been studied. Volumetric mammographic density and texture pattern scores were assessed automatically for the first available digital mammography (DM) screening examination of 51,400 women (50-75 years of age) participating in the Dutch biennial breast cancer screening program between 2003 and 2011. The texture assessment method was developed in a previous study and validated in the current study. Breast cancer information was obtained from the screening registration system and through linkage with the Netherlands Cancer Registry. All screen-detected breast cancers diagnosed at the first available digital screening examination were excluded. During a median follow-up period of 4.2 (interquartile range (IQR) 2.0-6.2) years, 301 women were diagnosed with breast cancer. The associations between texture pattern scores, volumetric breast density measures and breast cancer risk were determined using Cox proportional hazard analyses. Discriminatory performance was assessed using c-indices. The median age of the women at the time of the first available digital mammography examination was 56 years (IQR 51-63). Texture pattern scores were positively associated with breast cancer risk (hazard ratio (HR) 3.16 (95% CI 2.16-4.62) (p value for trend <0.001), for quartile (Q) 4 compared to Q1). The c-index of texture was 0.61 (95% CI 0.57-0.64). Dense volume and percentage dense volume showed positive associations with breast cancer risk (HR 1.85 (95% CI 1.32-2.59) (p value for trend <0.001) and HR 2.17 (95% CI 1.51-3.12) (p value for trend <0.001), respectively, for Q4 compared to Q1). When adding texture measures to models with dense volume or percentage dense volume, c-indices increased from 0.56 (95% CI 0.53-0.59) to 0.62 (95% CI 0.58-0.65) (p < 0.001) and from 0.58 (95% CI 0.54-0.61) to 0.60 (95% CI 0.57-0.63) (p = 0.054), respectively. Deep-learning-based texture pattern scores, measured automatically on digital mammograms, are associated with breast cancer risk, independently of volumetric mammographic density, and augment the capacity to discriminate between future breast cancer and non-breast cancer cases.}, + url = {http://dx.doi.org/10.1186/s13058-018-0961-7}, + file = {Wand18.pdf:pdf\Wand18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Breast Cancer Research}, + citation-count = {27}, + automatic = {yes}, + volume = {20}, +} + +@article{Wand22, + author = {Wanders, Alexander J. T. and Mees, Willem and Bun, Petra A.M. and Janssen, Natasja and Rodr\'{i}guez-Ruiz, Alejandro and Dalm\i\c{s}, Mehmet Ufuk and Karssemeijer, Nico and van Gils, Carla H. and Sechopoulos, Ioannis and Mann, Ritse M. and van Rooden, Cornelis Jan}, + title = {Interval Cancer Detection Using a Neural Network and Breast Density in Women with Negative Screening Mammograms}, + doi = {10.1148/radiol.210832}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.210832}, + file = {Wand22.pdf:pdf\Wand22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + citation-count = {24}, + automatic = {yes}, + pages = {269-275}, + volume = {303}, +} + +@conference{Wang12, + author = {Lei Wang and Bram Platel and Tatyana Ivanovskaya and Markus Harz and Horst Hahn}, + title = {Fully automatic breast segmentation in 3{D} breast {MRI}}, + booktitle = ISBI, + year = {2012}, + abstract = {In computer-aided diagnosis of breast MRI, a precise segmentation of the breast is often required as a fundamental step to facilitate further diagnostic tasks, e.g., breast density measurement, lesion detection and automatic reporting. In this work, a fully automatic method dedicated to breast segmentation is proposed, which comprises four major steps: sheet-like structures enhancement, pectoralis muscle boundary segmentation, breast-air boundary segmentation and breast extraction. To validate the proposed method, the segmented breast boundaries of 84 breast MR images, acquired in five different sites with variant imaging protocols, were compared to the manual segmentation. An average distance of 2.56mm with a standard deviation of 3.26mm was achieved.}, + file = {Wang12.pdf:pdf/Wang12.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + month = {5}, + gsid = {8233055784581323373}, + gscites = {50}, +} + +@inproceedings{Wang15, + author = {Lei Wang and Jan Strehlow and Jan Ruehaak and Florian Weiler and Yago Diez and Albert Gubern-M\'{e}rida and Susanne Diekmann and Hendrik Laue and Horst K. Hahn}, + booktitle = MI, + title = {A fast alignment method for breast {MRI} follow-up studies using automated breast segmentation and current-prior registration}, + doi = {10.1117/12.2082700}, + pages = {941334-941334-8}, + series = SPIE, + volume = {9413}, + abstract = {In breast cancer screening for high-risk women, follow-up magnetic resonance images (MRI) are acquired with a time interval ranging from several months up to a few years. Prior MRI studies may provide additional clinical value when examining the current one and thus have the potential to increase sensitivity and specificity of screening. To build a spatial correlation between suspicious findings in both current and prior studies, a reliable alignment method between follow-up studies is desirable. However, long time interval, different scanners and imaging protocols, and varying breast compression can result in a large deformation, which challenges the registration process. In this work, we present a fast and robust spatial alignment framework, which combines automated breast segmentation and current-prior registration techniques in a multi-level fashion. First, fully automatic breast segmentation is applied to extract the breast masks that are used to obtain an initial affine transform. Then, a non-rigid registration algorithm using normalized gradient fields as similarity measure together with curvature regularization is applied. A total of 29 subjects and 58 breast MR images were collected for performance assessment. To evaluate the global registration accuracy, the volume overlap and boundary surface distance metrics are calculated, resulting in an average Dice Similarity Coefficient (DSC) of 0.96 and root mean square distance (RMSD) of 1.64 mm. In addition, to measure local registration accuracy, for each subject a radiologist annotated 10 pairs of markers in the current and prior studies representing corresponding anatomical locations. The average distance error of marker pairs dropped from 67.37 mm to 10.86 mm after applying registration.}, + file = {Wang15.pdf:pdf\\Wang15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + year = {2015}, + month = {3}, +} + +@conference{Wang15a, + author = {L. Wang and A. Gubern-M\'{e}rida and O. Diaz and Y. Diez and R. M. Mann and S. Diekmann and F. Zohrer and H. Laue and J. Schwaab}, + title = {Automated detection of motion in breast {DCE-MRI} to assess study quality and prevent unnecessary call-backs}, + booktitle = {ECR}, + year = {2015}, + doi = {10.1594/ecr2015/C-1845}, + file = {Wang15a.pdf:pdf\\Wang15a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Wang15b, + author = {L. Wang and A. Gubern-M\'{e}rida and S. D. Diekmann and R. M. Mann and H. O. Laue and H. K. Hahn}, + title = {Automatic Spatial Linking of Lesions in Breast MRI Follow-up Images}, + booktitle = RSNA, + year = {2015}, + abstract = {PURPOSE Automatically linking the lesions detected in breast MRI follow-up examinations is required for the development of a computer-aided diagnosis (CAD) system to quantify characteristic changes of the lesions. In this work, we develop a registration-based method that enables automatic linking of lesions detected in breast MRI follow-up studies. METHOD AND MATERIALS From 51 subjects participating in a MRI screening program, we collected 102 dynamic contrast enhanced MRI (DCE-MRI) images, forming 51 pairs of follow-up studies. Current and prior examinations were acquired in different scanners with a time interval of one year, using transversal and coronal views, respectively. One experienced radiologist manually placed 71 pairs of markers, indicating the center locations of 71 pairs of lesions found in both current and prior studies. Automatic lesion linking is achieved by registering current and prior MRI examinations. First, a motion correction algorithm is performed on both current and prior DCE-MRI. Then, fully automatic breast segmentation is applied on the current and prior pre-contrast images to extract breast masks, which are used to obtain an initial affine transform. Then, a non-rigid registration algorithm using normalized gradient fields as similarity measure together with curvature regularization is exploited to register the current and prior pre-contrast images. Since the follow-up scans may have inconsistent field of views, the registration only focuses on the segmented breast regions to enforce the alignment accuracy in breast areas, such that non-breast regions will not attract and influence the registration process. RESULTS Based on the deformation fields obtained by registration, markers labeling the lesions in the current image were transformed to the prior image frame, where the distance between the transformed markers and the markers originally labeled in prior images was computed. The average distance error was 9.6 A,A+- 9.3 mm. CONCLUSION The proposed system is potentially applicable to automatically link the lesions detected in a CAD system to investigate the characteristic changes. CLINICAL RELEVANCE/APPLICATION Visual assessment and comparison of characteristic change of the lesions in breast DCE-MRI follow-up exams is time consuming, and computer-aided lesion comparison may increase clinical effectiveness.}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Wiel10, + author = {P.A. Wielopolski and P. Ciet and G.P. Krestin and M.H. Lequin and H. Tiddens and R. Manniesing}, + title = {Automated airway lumen segmentation and characterization for use in patients with tracheomalacya: a feasibility study}, + booktitle = ISMRM, + year = {2010}, + abstract = {Introduction: Tracheomalacia (TM) refers to a weakness of the trachea, frequently due to reduction and/or atrophy of the longitudinal elastic fibers of the pars membranacea, or impaired cartilage integrity, such that the airway is softer and more susceptible to collapse. Various degrees of tracheal collapse, and therefore airway obstruction, can result from this narrowing. Diagnosis of TM includes history and physical examination, e.g. expiratory manoeuvre and cough. Pulmonary function tests include the determination of flow limitations during expiration. However, endoscopy is the essential and invaluable tool and remains the gold standard method for evaluating the airways. From the imaging perspective, conventional radiographs have had a lower sensitivity (62%), and are used in conjunction with endoscopy. A CT-scan is the initial radiologic test in cases of suspect TM. MRI is another imaging possibility for evaluating central airway abnormalities, however, not often used because of severe drawbacks in an area with large magnetic susceptibility gradients, poor signal homogeneity and prone to low spatial resolution and motion artifacts. The majority of papers diagnosis of TM considers imaging during end-inspiration and end-expiration. Nonetheless, more recently, some authors have demonstrated the importance of dynamic CINE acquisitions, indicating that dynamic-MRI studies during coughing may facilitate the evaluation of the collapsibility of the trachea in patients with TM. Purpose:The purpose of this work was to provide: first, a suitable acquisition scenario including static and dynamic 3D MRI sequences with sufficient temporal and spatial resolution to provide good morphological information and visualization of dynamic events in the central airways and, secondly, to provide the means for an automatic analysis program suitable to segment the airway lumen and a dynamic evaluation of cross-sectional areas of the central airways down to the 2nd generation branching. Materials and Methods: 10 healthy adult volunteers between 18 and 50 years of age were recruited as pilot group to optimize image acquisition for the static and dynamic portions of the MRI examination at 1.5T. Volunteers were trained to perform spirometry controlled breathing manoeuvres using a MRI compatible spirometer. Each subject was instructed additionally to perform forced expiration and cough maneuvers. A-A?A 1/2 Static?? 13-second breath-hold scans covering the entire thoracic region were acquired at end-inspiration and endexpiration using a 3D rf-spoiled gradient echo sequence with TR/TE=1.2/0.5 ms, flip angle 2A-A?A 1/2 , sagittal volume acquisition with isotropic (2.8) 3 mm3 voxels. A-A?A 1/2 Dynamic?? scans were performed with the same scan parameters but covering only the central thorax (1/3 volume) with a temporal resolution of 500 ms per volume using the TRICKS (time resolved imaging of contrast kinetics) platform and accelerated imaging options. In-house developed software for segmentation and analysis was used. To initiate the timedomain analysis 3 seeds were placed corresponding to the beginning of the trachea and ends of the left and right primary bronchi to produce a centerline. The lumen is then segmented and a surface created to produce a unique reference frame to ease the timeanalysis (Figure 1). A cross-sectional analysis can then be performed to determine stenosis and distensibility parameters. Likewise, longitudinal and geometrical analyses (e.g., bifurcation angles and planarity) are generated. Results and Discussion: The software tracks the level of the branching automatically and provides a uniquely defined origin per data set thus enabling time comparisons in the same individual and across healthy and patients with TM. The analysis is completely automated (except for three seed points for lumen), providing as output any lumen based parameters that are desired and/or are clinical relevant. With optimized parameter settings the method successfully tracked the central airway paths in all volunteers. Conclusions: The results show that + robust and accurate segmentation of the airways is feasible with the acquired MRI datasets. This work is highly relevant for clinical research and practice: automated lumen segmentation in patients with TM (or other related disease of the airways) is the first step for automatic grading of airway malignancy.}, + file = {Wiel10.pdf:pdf\\Wiel10.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Wiel11, + author = {P.A. Wielopolski and P. Ciet and R. Manniesing and S. Lever and M. Lequin and G. Krestin and H.A. Tiddens}, + title = {Automated Airway Lumen Segmentation and Characterization in Patients with Tracheomalacia: a Feasibility Study}, + booktitle = ISMRM, + year = {2011}, + abstract = {Tracheomalacia is an excessive narrowing of the intra-thoracic part of the trachea such that the airway is softer and more susceptible to collapse. We demonstrate a suitable acquisition scenario using static and dynamic 3D MRI sequences with sufficient temporal and spatial resolution to provide good morphological information and visualization of dynamic events in the central airways. We evaluate a segmentation and dynamic analysis tool to compute the cross-sectional areas of the central airways down to the 2nd generation branching and detect airway narrowing during the respiratory manouvers. We conclude that cine-MRI is a feasible non-invasive radiation free alternative for bronchoscopy.}, + file = {Wiel11.pdf:pdf\\Wiel11.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Wild21, + author = {de Wilde, Bram and ten Broek, Richard PG and Huisman, Henkjan}, + booktitle = MIDL, + title = {Cine-MRI detection of abdominal adhesions with spatio-temporal deep learning}, + url = {https://arxiv.org/abs/2106.08094}, + abstract = {Adhesions are an important cause of chronic pain following abdominal surgery. Recent developments in abdominal cine-MRI have enabled the non-invasive diagnosis of adhesions. Adhesions are identified on cine-MRI by the absence of sliding motion during movement. Diagnosis and mapping of adhesions improves the management of patients with pain. Detection of abdominal adhesions on cine-MRI is challenging from both a radiological and deep learning perspective. We focus on classifying presence or absence of adhesions in sagittal abdominal cine-MRI series. We experimented with spatio-temporal deep learning architectures centered around a ConvGRU architecture. A hybrid architecture comprising a ResNet followed by a ConvGRU model allows to classify a whole time-series. Compared to a stand-alone ResNet with a two time-point (inspiration/expiration) input, we show an increase in classification performance (AUROC) from 0.74 to 0.83 (p<0.05). Our full temporal classification approach adds only a small amount (5%) of parameters to the entire architecture, which may be useful for other medical imaging problems with a temporal dimension.}, + file = {:pdf/Wild21.pdf:PDF}, + optnote = {DIAG, INPRESS, RADIOLOGY}, + year = {2021}, + ss_id = {5b84c0f6d94934c501de0301707c50be97897122}, + all_ss_ids = {['5b84c0f6d94934c501de0301707c50be97897122']}, + gscites = {1}, +} + +@article{Wild23a, + author = {de Wilde, Bram and Joosten, Frank and Venderink, Wulphert and Davidse, Mirjam E. J. and Geurts, Juliette and Kruijt, Hanneke and Vermeulen, Afke and Martens, Bibi and Schyns, Maxime V. P. and Huige, Josephine C. B. M. and de Boer, Myrte C. and Tonino, Bart A. R. and Zandvoort, Herman J. A. and Lammert, Kirsti and Parviainen, Helka and Vuorinen, Aino-Maija and Syvaranta, Suvi and Vogels, Ruben R. M. and Prins, Wiesje and Coppola, Andrea and Bossa, Nancy and ten Broek, Richard P. G. and Huisman, Henkjan}, + title = {Inter-and Intra-Observer Variability and the Effect of Experience in Cine-MRI for Adhesion Detection}, + doi = {https://doi.org/10.3390/jimaging9030055}, + number = {3}, + pages = {55}, + volume = {9}, + journal = {Journal of Imaging}, + optnote = {DIAG, RADIOLOGY}, + publisher = {MDPI}, + year = {2023}, + ss_id = {74612633c1b07655ed9a081d1540deb22a257430}, + all_ss_ids = {['74612633c1b07655ed9a081d1540deb22a257430']}, + gscites = {1}, +} + +@article{Wild23b, + author = {de Wilde, Bram and Saha, Anindo and ten Broek, Richard PG and Huisman, Henkjan}, + title = {Medical diffusion on a budget: textual inversion for medical image generation}, + journal = {arXiv:2303.13430}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {41579777836a07a1bd8b4d8593fcda7983b68e67}, + all_ss_ids = {['41579777836a07a1bd8b4d8593fcda7983b68e67']}, + gscites = {1}, +} + +@article{Wild23c, + author = {van den Beukel, Bastiaan A. W. and de Wilde, Bram and Joosten, Frank and van Goor, Harry and Venderink, Wulphert and Huisman, Henkjan J. and ten Broek, Richard P. G.}, + title = {Quantifiable Measures of Abdominal Wall Motion for Quality Assessment of Cine-MRI Slices in Detection of Abdominal Adhesions}, + doi = {10.3390/jimaging9050092}, + number = {5}, + volume = {9}, + journal = {Journal of Imaging}, + optnote = {DIAG, RADIOLOGY}, + publisher = {MDPI}, + year = {2023}, + ss_id = {31c44bb386353af1a2dc76bd533eff7433424c45}, + all_ss_ids = {['31c44bb386353af1a2dc76bd533eff7433424c45']}, + gscites = {0}, +} + +@article{Wink15a, + author = {Winkler Wille, Mathilde M. and van Riel, Sarah J. and Saghir, Zaigham and Dirksen, Asger and Pedersen, Jesper Holst and Jacobs, Colin and Thomsen, Laura Hohw\"u and Scholten, Ernst Th and Skovgaard, Lene T. and van Ginneken, Bram}, + title = {Predictive Accuracy of the {PanCan} Lung Cancer Risk Prediction Model -External Validation based on {CT} from the {Danish} {Lung} {Cancer} {Screening} {Trial}}, + journal = ER, + year = {2015}, + volume = {25}, + pages = {3093--3099}, + doi = {10.1007/s00330-015-3689-0}, + abstract = {Lung cancer risk models should be externally validated to test generalizability and clinical usefulness. The Danish Lung Cancer Screening Trial (DLCST) is a population-based prospective cohort study, used to assess the discriminative performances of the PanCan models.From the DLCST database, 1,152 nodules from 718 participants were included. Parsimonious and full PanCan risk prediction models were applied to DLCST data, and also coefficients of the model were recalculated using DLCST data. Receiver operating characteristics (ROC) curves and area under the curve (AUC) were used to evaluate risk discrimination.AUCs of 0.826-0.870 were found for DLCST data based on PanCan risk prediction models. In the DLCST, age and family history were significant predictors (pAC/a,!aEURdeg=AC/a,!aEURdeg0.001 and pAC/a,!aEURdeg=AC/a,!aEURdeg0.013). Female sex was not confirmed to be associated with higher risk of lung cancer; in fact opposing effects of sex were observed in the two cohorts. Thus, female sex appeared to lower the risk (pAC/a,!aEURdeg=AC/a,!aEURdeg0.047 and pAC/a,!aEURdeg=AC/a,!aEURdeg0.040) in the DLCST.High risk discrimination was validated in the DLCST cohort, mainly determined by nodule size. Age and family history of lung cancer were significant predictors and could be included in the parsimonious model. Sex appears to be a less useful predictor.AC/a,!AC/ High accuracy in logistic modelling for lung cancer risk stratification of nodules. AC/a,!AC/ Lung cancer risk prediction is primarily based on size of pulmonary nodules. AC/a,!AC/ Nodule spiculation, age and family history of lung cancer are significant predictors. AC/a,!AC/ Sex does not appear to be a useful risk predictor.}, + file = {Wink15a.pdf:pdf\\Wink15a.pdf:PDF}, + optnote = {DIAG}, + pmid = {25764091}, + month = {3}, + gsid = {5188005511580764305}, + gscites = {65}, + ss_id = {8b3995db48fa7ab9539e3aaa72eb71cba8e68802}, + all_ss_ids = {['8b3995db48fa7ab9539e3aaa72eb71cba8e68802']}, +} + +@mastersthesis{Wink19, + author = {Jim Winkens}, + title = {Out-of-distribution detection for computational pathology with multi-head ensembles}, + year = {2019}, + abstract = {Distribution shift is a common phenomenon in real-life safety-critical situations that is detrimental to the performance of current deep learning models. Constructing a principled method to detect such a shift is critical to building safe and predictable automated image analysis pipelines for medical imaging. In this work, we interpret the problem of out-of-distribution detection for computational pathology in an epistemic uncertainty estimation setting. Given the difficulty of obtaining a sufficiently multi-modal predictive distribution for uncertainty estimation, we present a multiple heads topology in CNNs as a highly diverse ensembling method. We empirically prove that the method exhibits greater representational diversity than various popular ensembling methods, such as MC dropout and Deep Ensembles. The fast gradient sign method is repurposed and we show that it separates the softmax scores of in-distribution samples and out-of-distribution samples. We identify the challenges for this task in the domain of computational pathology and extensively demonstrate the effectiveness of the proposed method on two clinically relevant tasks in this ?eld.}, + file = {:pdf/Wink19.pdf:PDF}, + optnote = {DIAG}, + school = {University of Amsterdam}, + journal = {Master thesis}, +} + +@article{Wink21, + author = {David J. Winkel and Angela Tong and Bin Lou and Ali Kamen and Dorin Comaniciu and Jonathan A. Disselhorst and Alejandro Rodr{\'{\i}}guez-Ruiz and Henkjan Huisman and Dieter Szolar and Ivan Shabunin and Moon Hyung Choi and Pengyi Xing and Tobias Penzkofer and Robert Grimm and Heinrich von Busch and Daniel T. Boll}, + title = {A Novel Deep Learning Based Computer-Aided Diagnosis System Improves the Accuracy and Efficiency of Radiologists in Reading Biparametric Magnetic Resonance Images of the Prostate}, + journal = IR, + year = {2021}, + volume = {Publish Ahead of Print}, + month = {mar}, + doi = {10.1097/rli.0000000000000780}, + url = {https://doi.org/10.1097%2Frli.0000000000000780}, + publisher = {Ovid Technologies (Wolters Kluwer Health)}, + ss_id = {24ecb4a2f6f46d782edc6a04517480b78d896ac1}, + all_ss_ids = {['24ecb4a2f6f46d782edc6a04517480b78d896ac1']}, + gscites = {18}, +} + +@article{Wink21a, + author = {van Winkel, Suzanne L. and Rodr\'{i}guez-Ruiz, Alejandro and Appelman, Linda and Gubern-M\'{e}rida, Albert and Karssemeijer, Nico and Teuwen, Jonas and Wanders, Alexander J. T. and Sechopoulos, Ioannis and Mann, Ritse M.}, + title = {Impact of artificial intelligence support on accuracy and reading time in breast tomosynthesis image interpretation: a multi-reader multi-case study}, + doi = {10.1007/s00330-021-07992-w}, + year = {2021}, + abstract = {Abstract + Objectives + Digital breast tomosynthesis (DBT) increases sensitivity of mammography and is increasingly implemented in breast cancer screening. However, the large volume of images increases the risk of reading errors and reading time. This study aims to investigate whether the accuracy of breast radiologists reading wide-angle DBT increases with the aid of an artificial intelligence (AI) support system. Also, the impact on reading time was assessed and the stand-alone performance of the AI system in the detection of malignancies was compared to the average radiologist. + + Methods + A multi-reader multi-case study was performed with 240 bilateral DBT exams (71 breasts with cancer lesions, 70 breasts with benign findings, 339 normal breasts). Exams were interpreted by 18 radiologists, with and without AI support, providing cancer suspicion scores per breast. Using AI support, radiologists were shown examination-based and region-based cancer likelihood scores. Area under the receiver operating characteristic curve (AUC) and reading time per exam were compared between reading conditions using mixed-models analysis of variance. + + Results + On average, the AUC was higher using AI support (0.863 vs 0.833; p = 0.0025). Using AI support, reading time per DBT exam was reduced (p < 0.001) from 41 (95% CI = 39-42 s) to 36 s (95% CI = 35- 37 s). The AUC of the stand-alone AI system was non-inferior to the AUC of the average radiologist (+0.007, p = 0.8115). + + Conclusions + Radiologists improved their cancer detection and reduced reading time when evaluating DBT examinations using an AI reading support system. + + Key Points + * Radiologists improved their cancer detection accuracy in digital breast tomosynthesis (DBT) when using an AI system for support, while simultaneously reducing reading time. + * The stand-alone breast cancer detection performance of an AI system is non-inferior to the average performance of radiologists for reading digital breast tomosynthesis exams. + * The use of an AI support system could make advanced and more reliable imaging techniques more accessible and could allow for more cost-effective breast screening programs with DBT. + }, + url = {http://dx.doi.org/10.1007/s00330-021-07992-w}, + file = {Wink21a.pdf:pdf\Wink21a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + citation-count = {33}, + automatic = {yes}, + pages = {8682-8691}, + volume = {31}, +} + +@article{Witt10, + author = {Rianne Wittenberg and Joost F Peters and Jeroen J Sonnemans and Mathias Prokop and Cornelia M Schaefer-Prokop}, + title = {Computer-assisted detection of pulmonary embolism: evaluation of pulmonary {CT} angiograms performed in an on-call setting}, + journal = ER, + year = {2010}, + volume = {20}, + pages = {801--806}, + doi = {10.1007/s00330-009-1628-7}, + abstract = {The purpose of the study was to assess the stand-alone performance of computer-assisted detection (CAD) for evaluation of pulmonary CT angiograms (CTPA) performed in an on-call setting.In this institutional review board-approved study, we retrospectively included 292 consecutive CTPA performed during night shifts and weekends over a period of 16 months. Original reports were compared with a dedicated CAD system for pulmonary emboli (PE). A reference standard for the presence of PE was established using independent evaluation by two readers and consultation of a third experienced radiologist in discordant cases.Original reports had described 225 negative studies and 67 positive studies for PE. CAD found PE in seven patients originally reported as negative but identified by independent evaluation: emboli were located in segmental (n = 2) and subsegmental arteries (n = 5). The negative predictive value (NPV) of the CAD algorithm was 92\% (44/48). On average there were 4.7 false positives (FP) per examination (median 2, range 0-42). In 72\% of studies or=10 FP.CAD identified small emboli originally missed under clinical conditions and found 93\% of the isolated subsegmental emboli. On average there were 4.7 FP per examination.}, + file = {Witt10.pdf:pdf\\Witt10.pdf:PDF}, + optnote = {DIAG}, + number = {4}, + pmid = {19862534}, + month = {10}, + gsid = {17426830576290188674}, + gscites = {32}, +} + +@article{Witt11, + author = {Rianne Wittenberg and Joost F Peters and Jeroen J Sonnemans and Shandra Bipat and Mathias Prokop and Cornelia M Schaefer-Prokop}, + title = {Impact of image quality on the performance of computer-aided detection of pulmonary embolism}, + journal = AJR, + year = {2011}, + volume = {196}, + pages = {95--101}, + doi = {10.2214/AJR.09.4165}, + abstract = {The purpose of this article is to assess the relationship between CT image quality and the number and type of false-positive (FP) findings found by a prototype computer-aided detection (CAD) algorithm for automatic detection of pulmonary embolism (PE).This retrospective study included 278 subjects (138 men and 140 women; mean age, 57 years; range, 18-88 years) who underwent consecutive CT pulmonary angiographies performed during off hours. Twenty-four percent (68/278) of studies were reported as positive for PE. CAD findings were classified as true-positive or FP by two independent readers and, in cases of discordance, by a third radiologist. Each FP result was classified according to underlying cause. The degree of vascular enhancement, image noise, motion artifacts, overall quality, and presence of underlying lung disease were rated on a 4- or 5-point scale. Chi-square tests and t tests were used to test significance of differences.The mean number of FP CAD findings was 4.7 (median, 2) per examination. Most were caused by veins (30\% [389/1,298]) or airspace consolidations (22\% [286/1,298]). There was a significant positive association between the number of FP findings and image noise, motion artifacts, low vascular enhancement, low overall quality, and the extent of underlying disease. On a per-embolism basis, sensitivity decreased from 70.6\% (214/303) for scans with zero to five FP findings, to 62.3\% (33/53) for scans with six to 10 FP findings, to 60\% (12/20) for scans with more than 10 FP findings.There is a strong association between CT image quality and the number of FP findings indicated by a CAD algorithm for the detection of PE.}, + file = {Witt11.pdf:pdf\\Witt11.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {21178052}, + month = {1}, + gsid = {9265698792924388926}, + gscites = {24}, + ss_id = {afda5ff4f4b03cd95f1f52ac8886397e46b5300d}, + all_ss_ids = {['afda5ff4f4b03cd95f1f52ac8886397e46b5300d']}, +} + +@article{Witt12, + author = {Wittenberg, Rianne and Berger, Ferco H. and Peters, Joost F. and Weber, Michael and van Hoorn, Francois and Beenen, Ludo F M. and van Doorn, Martine M A C. and van Schuppen, Joost and Zijlstra, Ijsbrand A J. and Prokop, Mathias and Schaefer-Prokop, Cornelia M.}, + title = {Acute pulmonary embolism: effect of a computer-assisted detection prototype on diagnosis--an observer study}, + journal = Radiology, + year = {2012}, + volume = {262}, + pages = {305--313}, + doi = {10.1148/radiol.11110372}, + abstract = {PURPOSE: To assess the effect of a computer-assisted detection (CAD) prototype on observer performance for detection of acute pulmonary embolism (PE) with computed tomographic (CT) pulmonary angiography. MATERIALS AND METHODS: In this institutional review board-approved retrospective study, six observers with varying experience evaluated 158 PE-negative and 51 PE-positive CT pulmonary angiographic studies (mean age, 57 years; 111 women, 98 men) obtained consecutively during nights and weekends. Observers were asked to determine the presence of PE and to rank their diagnostic confidence without CAD and subsequently with CAD within a single reading session. Reading time was separately measured for both readings. Reader data were compared with an independent standard established by two readers, with a third in case of discordant results. Statistical evaluation was performed on a per-patient basis by using logistic regression for repeated measurements and Pearson correlation. RESULTS: With CAD, there was a significant increase in readers' sensitivity (P = .014) without loss of specificity (P = .853) on a per-patient basis. CAD assisted the readers in correcting an initial false-negative diagnosis in 15 cases, with the most proximal embolus at the segmental level in four cases and at the subsegmental level in 11 cases. In eight cases, readers accepted false-positive CAD candidate lesions on scans negative for PE, and in one case, a reader dismissed a true-positive finding. Reading time was extended by a mean of 22 seconds with the use of CAD. CONCLUSION: At the expense of increased reading time, CAD has the potential to increase reader sensitivity for detecting segmental and subsegmental PE without significant loss of specificity. A-A?A 1/2 RSNA, 2012.}, + file = {Witt12.pdf:pdf\\Witt12.pdf:PDF}, + optnote = {DIAG}, + number = {1}, + pmid = {22190659}, + month = {1}, + gsid = {10444596397988543040}, + gscites = {41}, + ss_id = {2f3bebbb947b31dc3f81d4ac5f7e1414e3172f25}, + all_ss_ids = {['2f3bebbb947b31dc3f81d4ac5f7e1414e3172f25']}, +} + +@article{Witt12a, + author = {Wittenberg, R. and van Vliet, J. W. and Ghaye, B. and Peters, J. F. and Schaefer-Prokop, C. M. and Coche, E.}, + title = {Comparison of automated 4-chamber cardiac views versus axial views for measuring right ventricular enlargement in patients with suspected pulmonary embolism}, + journal = EJR, + year = {2012}, + volume = {81}, + pages = {218--222}, + doi = {10.1016/j.ejrad.2011.01.041}, + abstract = {Compare the right ventricle to left ventricle (RV/LV) diameter ratio obtained from axial pulmonary CT angiograms (CTPA) with those derived from automatically generated 4-chamber (4-CH) reformats in patients with suspected pulmonary embolism (PE).In this institutional review board-approved study we included 120 consecutive non ECG-gated CTPA from 3 institutions (mean age 60A-A?A 1/2 16 years; 71 women). Twenty 64-slice CTPA with PE and 20 without PE were selected per institution. For each patient the RV/LV diameter ratio was obtained from both axial CTPA images and automatically generated 4-CH reformats. Measurements were performed twice in two separated sessions by 2 experienced radiologists and 2 residents. The differences between the measurements on both views were evaluated.The 4-CH view was successfully obtained in 113 patients. The mean axial and 4-CH diameter ratios were comparable for three of the four readers (p=0.56, p=0.13, p=0.08). Although the mean diameters (1.0 and 1.03 respectively) for one resident were significantly different (p=0.013), the difference of 0.03 seems negligible in clinical routine. Three readers achieved equally high intra-reader agreements with both measurements (ICCs of 0.94, 0.95 and 0.96), while one reader showed a different variability with ICCs of 0.96 for the axial view and 0.91 for the 4-CH view. The inter-reader agreement was equally high for both measurement types with ICCs of 0.95 and 0.94, respectively.In patients with suspected PE, RV/LV diameters ratio can be measured with the same reproducibility and accuracy using an automatically generated 4-CH view compared to the axial view.}, + file = {Witt12a.pdf:pdf\\Witt12a.pdf:PDF}, + optnote = {DIAG}, + number = {2}, + pmid = {21315530}, + month = {2}, + ss_id = {2f10f440accedc9e5cac27b58f606939d41b9c0b}, + all_ss_ids = {['2f10f440accedc9e5cac27b58f606939d41b9c0b']}, + gscites = {17}, +} + +@article{Witt12b, + author = {Wittenberg, R. and Peters, J. F. and Weber, M. and Lely, R. J. and Cobben, L P J. and Prokop, M. and Schaefer-Prokop, C. M.}, + title = {Stand-alone performance of a computer-assisted detection prototype for detection of acute pulmonary embolism: a multi-institutional comparison}, + journal = BJR, + year = {2012}, + volume = {85}, + pages = {758--764}, + doi = {10.1259/bjr/26769569}, + abstract = {Objective: To assess whether the performance of a computer-assisted detection (CAD) algorithm for acute pulmonary embolism (PE) differs in pulmonary CT angiographies acquired at various institutions. Methods: In this retrospective study, we included 40 consecutive scans with and 40 without PE from 3 institutions (n=240) using 64-slice scanners made by different manufacturers (General Electric; Philips; Siemens). CAD markers were classified as true or false positive (FP) using independent evaluation by two readers and consultation of a third chest radiologist in discordant cases. Image quality parameters were subjectively scored using 4/5-point scales. Image noise and vascular enhancement were measured. Statistical analysis was done to correlate image quality of the three institutions with CAD stand-alone performance. Results: Patient groups were comparable with respect to age (p=0.22), accompanying lung disease (p=0.12) and inpatient/outpatient ratio (p=0.67). The sensitivity was 100\% (34/34), 97\% (37/38) and 92\% (33/36), and the specificity was 18\% (8/44), 15\% (6/41) and 13\% (5/39). Neither significantly differed between the institutions (p=0.21 and p=0.820, respectively). The mean number of FP findings (4.5, 6.2 and 3.7) significantly varied (p=0.02 and p=0.03), but median numbers (2, 3 and 3) were comparable. Image quality parameters were significantly associated with the number of FP findings (p<0.05) but not with sensitivity. After correcting for noise and vascular enhancement, the number of FPs did not significantly differ between the three institutions (p=0.43). Conclusions: CAD stand-alone performance is independent of scanner type but strongly related to image quality and thus scanning protocols.}, + file = {Witt12b.pdf:pdf\\Witt12b.pdf:PDF}, + optnote = {DIAG}, + number = {1014}, + pmid = {22167514}, + month = {6}, + gsid = {10609484598656160943}, + gscites = {14}, + ss_id = {55531229b8103969912f6d12933d5c2b30c616e2}, + all_ss_ids = {['55531229b8103969912f6d12933d5c2b30c616e2']}, +} + +@phdthesis{Witt12c, + author = {R. Wittenberg}, + title = {Computer-Assisted Detection of Acute Pulmonary Embolism}, + year = {2012}, + url = {http://igitur-archive.library.uu.nl/dissertations/2012-0229-201217/UUindex.html}, + abstract = {Pulmonary embolism (PE) is a common and often potentially life threatening disease. Severe morbidity and mortality can be prevented if PE is diagnosed and treated in time. CT pulmonary angiography (CTPA) has emerged as an effective method for diagnosis of PE, but interpreting a CTPA is complicated by the size of the image date set, the various PE look-alikes and also human factors such as experience and attention span. In this thesis, we therefore evaluated the performance of a computer-assisted detection (CAD) prototype for the detection of pulmonary embolism (PE) to help radiologists detect PE. The first part of this thesis summarises the latest technical development of CT scanners, the new challenges for protocol design associated with these new techniques and the options offered by new processing and evaluation tools. In the second part the standalone performance of a CAD prototype is described in 278 CTPA scans that had been consecutively obtained during night and weekend shifts. The CAD performance was compared with a reference standard obtained by consensus of three readers and with the reports made after acquisition of the clinical scans. The results suggested that CAD can help to find subtle isolated segmental and subsegmental emboli and to exclude PE. Furthermore, we found that image quality parameters and thus scanning protocosl effect the standalone performance more than scanner type, using 240 CTPA scans in 3 different hospitals. In the third part of this thesis we tested if CAD has indeed an impact on the performance of readers. Per study set up we used 6 readers with varying radiological experience to analyse the effect of CAD when CAD was used as a second or as a concurrent reader. Using CAD as a second reader in 209 CTPA scans, we showed an increase in readers' confidence and sensitivity without loss of specificity. Similarly as seen in the second part, we found the greatest impact of CAD on the detection of small segmental and subsegmental emboli. However, these results came with an increase of reading time, which is undesirable in clinical practice. When we tested CAD as a concurrent reader in 196 CTPA scans, we found that readers' sensitivity improved or remained at the same level and readers' confidence increased compared to the readings without CAD, but with a significant shorter reading time. In summary, CAD has the potential to aid readers to find patients with isolated segmental and/or subsegmental emboli, to reassure readers in excluding PE and to decrease reading time. However, before making CAD available for broader application in clinical practice, its impact on readers' decisions and patient outcome needs to be studied in a larger prospective trial.}, + copromotor = {C. M. Schaefer-Prokop}, + file = {Witt12c.pdf:pdf/Witt12c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + promotor = {W. M. Prokop and J. S. Lam\'{e}ris}, + school = {Utrecht University}, + journal = {PhD thesis}, +} + +@article{Xie20, + author = {Xie, Weiyi and Jacobs, Colin and Charbonnier, Jean-Paul and van Ginneken, Bram}, + title = {Relational Modeling for Robust and Efficient Pulmonary Lobe Segmentation in {CT} Scans}, + doi = {10.1109/TMI.2020.2995108}, + issue = {8}, + pages = {2664--2675}, + volume = {39}, + abstract = {Pulmonary lobe segmentation in computed tomography scans is essential for regional assessment of pulmonary diseases. Recent works based on convolution neural networks have achieved good performance for this task. However, they are still limited in capturing structured relationships due to the nature of convolution. The shape of the pulmonary lobes affect each other and their borders relate to the appearance of other structures, such as vessels, airways, and the pleural wall. We argue that such structural relationships play a critical role in the accurate delineation of pulmonary lobes when the lungs are affected by diseases such as COVID-19 or COPD. In this paper, we propose a relational approach (RTSU-Net) that leverages structured relationships by introducing a novel non-local neural network module. The proposed module learns both visual and geometric relationships among all convolution features to produce self-attention weights. With a limited amount of training data available from COVID- 19 subjects, we initially train and validate RTSU-Net on a cohort of 5000 subjects from the COPDGene study (4000 for training and 1000 for evaluation). Using models pre-trained on COPDGene, we apply transfer learning to retrain and evaluate RTSU-Net on 470 COVID-19 suspects (370 for retraining and 100 for evaluation). Experimental results show that RTSU-Net outperforms three baselines and performs robustly on cases with severe lung infection due to COVID-19.}, + file = {Xie20.pdf:pdf\\Xie20.pdf:PDF}, + journal = TMI, + optnote = {DIAG, RADIOLOGY}, + algorithm = {https://grand-challenge.org/algorithms/pulmonary-lobe-segmentation/}, + pmid = {32730216}, + year = {2020}, + gsid = {13494064903459246916}, + gscites = {91}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/221569}, + all_ss_ids = {['32a8259a781ea7255621b0cbdeecfa8d03d7e0bd', 'a114f0f6dbeb11d47d0736a1ece21cbf4be27fb5', '2e571724830cb8ca6e8dbe9cc1f92fdcfc517ec4']}, +} + +@inproceedings{Xie21, + author = {Xie, Weiyi and Jacobs, Colin and van Ginneken, Bram}, + booktitle = MIDL, + year = {2021}, + title = {Deep Clustering Activation Maps for Emphysema Subtyping}, + url = {https://openreview.net/pdf?id=pOFGaVQeXAk}, + file = {Xie21.pdf:pdf\\Xie21.pdf:PDF}, + optnote = {DIAG, INPRESS, RADIOLOGY}, + ss_id = {29595323a6789fec907a39c4de1ad56b75c74630}, + all_ss_ids = {['29595323a6789fec907a39c4de1ad56b75c74630']}, + gscites = {0}, +} + +@article{Xie21a, + author = {Xie, Weiyi and Jacobs, Colin and van Ginneken, Bram}, + title = {Dense regression activation maps for lesion segmentation in {CT} scans of {COVID-19} patients}, + journal = {arXiv:2105.11748}, + year = {2021}, + abstract = {Automatic lesion segmentation on thoracic CT enables rapid quantitative analysis of lung involvement in COVID-19 infections. However, obtaining a large amount of voxel-level annotations for training segmentation networks is prohibitively expensive. Therefore, we propose a weakly-supervised segmentation method based on dense regression activation maps (dRAMs). Most weakly-supervised segmentation approaches exploit class activation maps (CAMs) to localize objects. However, because CAMs were trained for classification, they do not align precisely with the object segmentations. Instead, we produce high-resolution activation maps using dense features from a segmentation network that was trained to estimate a per-lobe lesion percentage. In this way, the network can exploit knowledge regarding the required lesion volume. In addition, we propose an attention neural network module to refine dRAMs, optimized together with the main regression task. We evaluated our algorithm on 90 subjects. Results show our method achieved 70.2\% Dice coefficient, substantially outperforming the CAM-based baseline at 48.6\%.}, + optnote = {DIAG}, +} + +@article{Xie23, + author = {Xie, Weiyi and Jacobs, Colin and Charbonnier, Jean-Paul and van Ginneken, Bram}, + title = {Dense regression activation maps for lesion segmentation in CT scans of COVID-19 patients}, + doi = {10.1016/j.media.2023.102771}, + pages = {102771}, + abstract = {Automatic lesion segmentation on thoracic CT enables rapid quantitative analysis of lung involvement in COVID-19 infections. However, obtaining a large amount of voxel-level annotations for training segmentation networks is prohibitively expensive. Therefore, we propose a weakly-supervised segmentation method based on dense regression activation maps (dRAMs). Most weakly-supervised segmentation approaches exploit class activation maps (CAMs) to localize objects. However, because CAMs were trained for classification, they do not align precisely with the object segmentations. Instead, we produce high-resolution activation maps using dense features from a segmentation network that was trained to estimate a per-lobe lesion percentage. In this way, the network can exploit knowledge regarding the required lesion volume. In addition, we propose an attention neural network module to refine dRAMs, optimized together with the main regression task. We evaluated our algorithm on 90 subjects. Results show our method achieved 70.2% Dice coefficient, substantially outperforming the CAM-based baseline at 48.6%. We published our source code at https://github.com/DIAGNijmegen/bodyct-dram.}, + file = {Xie23.pdf:pdf\\Xie23.pdf:PDF}, + journal = MIA, + optnote = {DIAG}, + pmid = {36848720}, + code = {https://github.com/DIAGNijmegen/bodyct-dram}, + year = {2023}, + volume = {86}, + ss_id = {b0255ad2c81689cfd14ec8573780400a5fbf7b99}, + all_ss_ids = {['9a749224752f101c29f177f941bb5c967855db27', 'b0255ad2c81689cfd14ec8573780400a5fbf7b99']}, + gscites = {2}, +} + +@phdthesis{Xie23a, + author = {Weiyi Xie}, + title = {Deep Learning for Treatment Planning in Chronic Obstructive Pulmonary Diseases}, + url = {https://repository.ubn.ru.nl/bitstream/handle/2066/294845/294845.pdf}, + abstract = {In Chapter 1, we introduced chronic obstructive pulmonary disease (COPD) and gave background information on COPD diagnosis and treatment planning. We described the role of quantitative CT analysis in COPD treatment planning. Furthermore, we provided a short history of image analysis, from applying low-level image processing to deep learning-based CT analysis, explaining the reason behind deep learning prosperity on the road to being data-driven. + In Chapter 2, we presented a novel method using relational two-stage convolu-tion neural networks for segmenting pulmonary lobes in CT images. The proposed method uses a non-local neural network to capture visual and geometric correspondence between high-level convolution features, which represents the relationships between objects and object parts. Our results demonstrate that learning feature correspondence improves the lobe segmentation performance substantially than the baseline on the COPD and the COVID-19 data set. + In Chapter 3, we presented a method for labeling segmental airways given a segmented airway tree. First, we train a convolution neural network to extract features for representing airway branches. Then, these features are iteratively enriched in agraph neural network by collecting information from neighbors, where the graph is based on the airway tree connectivity. Furthermore, we leverage positional information in our graph neural network, where the position of each branch is encoded by its topological distance to a set of anchor branches. As a result, the learned features are structure- and position-aware, contributing to substantially improved branch classification results compared with methods that use only convolution features or standard graph neural networks. + In Chapter 4, we proposed a novel weakly-supervised segmentation framework trained end-to-end, using only image-level supervision. We show that this approach can produce high-resolution segmentation maps without voxel-level annotations.The proposed method substantially outperforms other weakly-supervised methods,although a gap with the fully-supervised performance remains. Our method trained a segmentation network to predict per-image lesion percentage. We made this training possible by proposing an interval regression loss, given only the upper and lower bound of the target percentage, not the exact percentage as supervision. Furthermore, we stabilized the regression training using equivariant regularization. In the refinement process, we proposed an attention neural network module that updated activation maps in one location using nearby activations, acting similar to random walkers, and seeded regional growth in standard post-processing pipelines, yet ours is trained end-to-end. + In Chapter 5, we expanded on the method outlined in Chapter 4 to predict emphysema subtypes. Our proposed algorithm generates high-resolution emphysema segmentation maps to aid in interpreting the prediction process, offering an improved model interpretability compared to the baseline. To predict both subtypes together, we employ the overlapping loss to ensure that each voxel is only assigned to onesubtype (centrilobular or paraseptal). We also use low-attenuation areas in the lung(LAA-950) as visual cues in regression training, providing the network with localized information. Our approach generates categorical visual scores, estimated emphysema percentages, and high-resolution segmentation maps for both centrilobularand paraseptal subtypes, making it more versatile than the baseline approach. + Finally, in Chapter 6, we reflected on this thesis's main findings and contributions.We also analyzed the advances and impact in the field and the existing limitations of the proposed methods. Additionally, we provided a future outlook for research opportunities in the field of deep learning for medical image analysis.}, + copromotor = {C. Jacobs}, + file = {Xie23a.pdf:pdf\\Xie23a.pdf:PDF}, + optnote = {DIAG}, + promotor = {B. van Ginneken}, + school = {Radboud University, Nijmegen}, + year = {2023}, + journal = {PhD thesis}, +} + +@article{Xie23b, + author = {Weiyi Xie and Colin Jacobs and Jean-Paul Charbonnier and Dirk Jan Slebos and Bram van Ginneken}, + title = {Emphysema subtyping on thoracic computed tomography scans using deep neural networks}, + journal = {Scientific Reports}, + volume = {13}, + year = {2023}, + doi = {10.1038/s41598-023-40116-6}, + pages = {14147}, + ss_id = {3c7c167e0619911a29ce6082372100fbbc5ca7af}, + all_ss_ids = {['3c7c167e0619911a29ce6082372100fbbc5ca7af']}, + gscites = {0}, +} + +@article{Yaka10, + author = {Yakar, Derya and Hambrock, Thomas and Huisman, Henkjan and Hulsbergen-van de Kaa, Christina A. and van Lin, Emile and Vergunst, Henk and Hoeks, Caroline M A. and van Oort, Inge M. and Witjes, J Alfred and Barentsz, Jelle O. and F\"utterer, Jurgen J.}, + title = {Feasibility of {3T} dynamic contrast-enhanced magnetic resonance-guided biopsy in localizing local recurrence of prostate cancer after external beam radiation therapy}, + journal = IR, + year = {2010}, + volume = {45}, + pages = {121--125}, + doi = {10.1097/RLI.0b013e3181c7bcda}, + abstract = {The objective of this study was to assess the feasibility of the combination of magnetic resonance (MR)-guided biopsy (MRGB) and diagnostic 3T MR imaging in the localization of local recurrence of prostate cancer (PCa) after external beam radiation therapy (EBRT).Twenty-four consecutive men with biochemical failure suspected of local recurrence after initial EBRT were enrolled prospectively in this study. All patients underwent a diagnostic 3T MR examination of the prostate. T2-weighted and dynamic contrast-enhanced MR images (DCE-MRI) were acquired. Two radiologists evaluated the MR images in consensus for tumor suspicious regions (TSRs) for local recurrence. Subsequently, these TSRs were biopsied under MR-guidance and histopathologically evaluated for the presence of recurrent PCa. Descriptive statistical analysis was applied.Tissue sampling was successful in all patients and all TSRs. The positive predictive value on a per patient basis was 75\% (15/20) and on a per TSR basis 68\% (26/38). The median number of biopsies taken per patient was 3, and the duration of an MRGB session was 31 minutes. No intervention-related complications occurred.The combination of MRGB and diagnostic MR imaging of the prostate was a feasible technique to localize PCa recurrence after EBRT using a low number of cores in a clinically acceptable time.}, + file = {Yaka10.pdf:pdf\\Yaka10.pdf:PDF}, + optnote = {DIAG, MAGIC, RADIOLOGY}, + number = {3}, + pmid = {20065860}, + month = {3}, + gsid = {511205715423392373}, + gscites = {66}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/88584}, + ss_id = {45bced603af6c4b11db85018291705262a98768f}, + all_ss_ids = {['45bced603af6c4b11db85018291705262a98768f']}, +} + +@article{Yaka11c, + author = {Yakar, Derya and Heijmink, Stijn W T P J. and Hulsbergen-van de Kaa, Christina A. and Huisman, Henkjan and Barentsz, Jelle O. and F\"utterer, Jurgen J. and Scheenen, Tom W J.}, + title = {Initial results of 3-dimensional {1H}-magnetic resonance spectroscopic imaging in the localization of prostate cancer at 3 {Tesla}: should we use an endorectal coil?}, + journal = IR, + year = {2011}, + volume = {46}, + pages = {301--306}, + doi = {10.1097/RLI.0b013e3182007503}, + abstract = {PURPOSE: The purpose of this study was to compare the diagnostic performance of 3 Tesla, 3-dimensional (3D) magnetic resonance spectroscopic imaging (MRSI) in the localization of prostate cancer (PCa) with and without the use of an endorectal coil (ERC). MATERIALS AND METHODS: Our prospective study was approved by the institutional review board, and written informed consent was obtained from all patients. Between October 2004 and January 2006, 18 patients with histologically proven PCa on biopsy and scheduled for radical prostatectomy were included and underwent 3D-MRSI with and without an ERC. The prostate was divided into 14 regions of interest (ROIs). Four readers independently rated (on a 5-point scale) their confidence that cancer was present in each of these ROIs. These findings were correlated with whole-mount prostatectomy specimens. Areas under the receiver-operating characteristic curve were determined. A difference with a P < 0.05 was considered significant. RESULTS: A total of 504 ROIs were rated for the presence and absence of PCa. Localization of PCa with MRSI with the use of an ERC had a significantly higher areas under the receiver-operating characteristic curve (0.68) than MRSI without the use of an ERC (0.63) (P = 0.015). CONCLUSION: The use of an ERC in 3D MRSI in localizing PCa at 3 Tesla slightly but significantly increased the localization performance compared with not using an ERC.}, + file = {Yaka11c.pdf:pdf\\Yaka11c.pdf:PDF}, + optnote = {BioMR, DIAG, MAGIC, RADIOLOGY}, + number = {5}, + pmid = {21217527}, + month = {5}, + gsid = {11125890377300683611}, + gscites = {30}, + ss_id = {51c17b6d2b1fcd46fef6ceaa5569e39a951cb8a6}, + all_ss_ids = {['51c17b6d2b1fcd46fef6ceaa5569e39a951cb8a6']}, +} + +@article{Yaka12, + author = {Derya Yakar and Oscar A Debats and Joyce G R Bomers and Martijn G Schouten and Pieter C Vos and Emile van Lin and Jurgen J F\"utterer and Jelle O Barentsz}, + title = {Predictive value of {MRI} in the localization, staging, volume estimation, assessment of aggressiveness, and guidance of radiotherapy and biopsies in prostate cancer}, + journal = JMRI, + year = {2012}, + volume = {35}, + pages = {20--31}, + doi = {10.1002/jmri.22790}, + abstract = {Multiparametric magnetic resonance imaging (MRI) has the potential of being the ideal prostate cancer (PCa) assessment tool. Information gathered with multiparametric MRI can serve therapy choice, guidance of interventions, and treatments. The purpose of this review is to discuss the potential role of multiparametric MRI in focal therapy with respect to patient selection and directing (robot-guided) biopsies and intensity-modulated radiation therapy (IMRT). Multiparametric MRI is a versatile and promising technique. It appears to be the best available imaging technique at the moment in localizing, staging (primary as well as recurrent disease, and local as well as distant disease), determining aggressiveness, and volume of PCa. However, larger study populations in multicenter settings have to confirm these promising results. However, before such studies can be performed more research is needed in order to achieve standardized imaging protocols.}, + file = {Yaka12.pdf:pdf\\Yaka12.pdf:PDF}, + optnote = {DIAG, MAGIC, RADIOLOGY}, + number = {1}, + pmid = {22174000}, + month = {12}, +} + +@article{Youn21, + author = {Youn, Seo Yeon and Choi, Moon Hyung and Kim, Dong Hwan and Lee, Young Joon and Huisman, Henkjan and Johnson, Evan and Penzkofer, Tobias and Shabunin, Ivan and Winkel, David Jean and Xing, Pengyi and Szolar, Dieter and Grimm, Robert and von Busch, Heinrich and Son, Yohan and Lou, Bin and Kamen, Ali}, + title = {Detection and PI-RADS classification of focal lesions in prostate MRI: Performance comparison between a deep learning-based algorithm (DLA) and radiologists with various levels of experience}, + doi = {10.1016/j.ejrad.2021.109894}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejrad.2021.109894}, + file = {Youn21.pdf:pdf\Youn21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Radiology}, + citation-count = {17}, + automatic = {yes}, + pages = {109894}, + volume = {142}, +} + +@article{Yous17, + author = {Yousaf-Khan, Uraujh and van der Aalst, Carlijn and de Jong, Pim A and Heuvelmans, Marjolein and Scholten, Ernst and Walter, Joan and Nackaerts, Kristiaan and Groen, Harry and Vliegenthart, Rozemarijn and Ten Haaf, Kevin and Oudkerk, Matthijs and de Koning, Harry}, + title = {Risk stratification based on screening history: the NELSON lung cancer screening study}, + journal = Thorax, + year = {2017}, + volume = {72}, + issue = {9}, + pages = {819--824}, + doi = {10.1136/thoraxjnl-2016-209892}, + abstract = {Debate about the optimal lung cancer screening strategy is ongoing. In this study, previous screening history of the Dutch-Belgian Lung Cancer Screening trial (NELSON) is investigated on if it predicts the screening outcome (test result and lung cancer risk) of the final screening round. 15 792 participants were randomised (1:1) of which 7900 randomised into a screening group. CT screening took place at baseline, and after 1, 2 and 2.5 years. Initially, three screening outcomes were possible: negative, indeterminate or positive scan result. Probability for screening outcome in the fourth round was calculated for subgroups of participants. Based on results of the first three rounds, three subgroups were identified: (1) those with exclusively negative results (n=3856; 73.0%); (2) those with >=1 indeterminate result, but never a positive result (n=1342; 25.5%); and (3) with >=1 positive result (n=81; 1.5%). Group 1 had the highest probability for having a negative scan result in round 4 (97.2% vs 94.8% and 90.1%, respectively, p<0.001), and the lowest risk for detecting lung cancer in round 4 (0.6% vs 1.6%, p=0.001). 'Smoked pack-years' and 'screening history' significantly predicted the fourth round test result. The third round results implied that the risk for detecting lung cancer (after an interval of 2.5 years) was 0.6% for those with negative results compared with 3.7% of those with indeterminate results. Previous CT lung cancer screening results provides an opportunity for further risk stratifications of those who undergo lung cancer screening. Results, ISRCTN63545820.}, + file = {Yous17.pdf:pdf\\Yous17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28360223}, + month = {3}, +} + +@article{Yous21, + author = {Yousif, Mustafa and van Diest, Paul J. and Laurinavicius, Arvydas and Rimm, David and van der Laak, Jeroen and Madabhushi, Anant and Schnitt, Stuart and Pantanowitz, Liron}, + title = {Artificial intelligence applied to breast pathology}, + doi = {10.1007/s00428-021-03213-3}, + year = {2021}, + abstract = {The convergence of digital pathology and computer vision is increasingly enabling computers to perform tasks performed by humans. As a result, artificial intelligence (AI) is having an astoundingly positive effect on the field of pathology, including breast pathology. Research using machine learning and the development of algorithms that learn patterns from labeled digital data based on "deep learning" neural networks and feature-engineered approaches to analyze histology images have recently provided promising results. Thus far, image analysis and more complex AI-based tools have demonstrated excellent success performing tasks such as the quantification of breast biomarkers and Ki67, mitosis detection, lymph node metastasis recognition, tissue segmentation for diagnosing breast carcinoma, prognostication, computational assessment of tumor-infiltrating lymphocytes, and prediction of molecular expression as well as treatment response and benefit of therapy from routine H&E images. This review critically examines the literature regarding these applications of AI in the area of breast pathology.}, + url = {http://dx.doi.org/10.1007/s00428-021-03213-3}, + file = {Yous21.pdf:pdf\Yous21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Virchows Archiv}, + citation-count = {13}, + automatic = {yes}, + pages = {191-209}, + volume = {480}, +} + +@inproceedings{Yu20, + author = {Xin Yu and Bin Lou and Bibo Shi and David Winkel and Nacim Arrahmane and Mamadou Diallo and Tongbai Meng and Heinrich von Busch and Robert Grimm and Berthold Kiefer and Dorin Comaniciu and Ali Kamen and Henkjan Huisman and Andrew Rosenkrantz and Tobias Penzkofer and Ivan Shabunin and Moon Hyung Choi and Qingsong Yang and Dieter Szolar}, + title = {False Positive Reduction Using Multiscale Contextual Features for Prostate Cancer Detection in Multi-Parametric {MRI} Scans}, + booktitle = ISBI, + year = {2020}, + publisher = {{IEEE}}, + month = {apr}, + doi = {10.1109/isbi45749.2020.9098338}, + abstract = {Prostate cancer (PCa) is the most prevalent and one of the leading causes of cancer death among men. Multi-parametric MRI (mp-MRI) is a prominent diagnostic scan, which could help in avoiding unnecessary biopsies for men screened for PCa. Artificial intelligence (AI) systems could help radiologists to be more accurate and consistent in diagnosing clinically significant cancer from mp-MRI scans. Lack of specificity has been identified recently as one of weak points of such assistance systems. In this paper, we propose a novel false positive reduction network to be added to the overall detection system to further analyze lesion candidates. The new network utilizes multiscale 2D image stacks of these candidates to discriminate between true and false positive detections. We trained and validated our network on a dataset with 2170 cases from seven different institutions and tested it on a separate independent dataset with 243 cases. With the proposed model, we achieved area under curve (AUC) of 0.876 on discriminating between true and false positive detected lesions and improved the AUC from 0.825 to 0.867 on overall identification of clinically significant cases.}, + ss_id = {af4eb854f4bb71a4007bdea42ccd6c79b2c70984}, + all_ss_ids = {['af4eb854f4bb71a4007bdea42ccd6c79b2c70984']}, + gscites = {24}, +} + +@conference{Zaid14, + author = {Zaidi, A. and Khalid, N. and Philipsen, R. and van Ginneken, B. and Khowaja, S. and Khan, A.}, + title = {Symptomatic screening and computer-aided radiography for active-case finding of tuberculosis: a prediction model for {TB} case detection}, + booktitle = {45th World Conference on Lung Health}, + year = {2014}, + abstract = {Background: Scale-up of rapid tuberculosis (TB) diagnostics through GeneXpert MTB/Rif has supported active-case finding for TB in high burden countries and is being utilized to increase case-notification as part of the TB Reach initiative. However, the high cost per test necessitates investigation of screening approaches that can better rationalize the use of GeneXpert. The aim of this study was to investigate predictive accuracy and validation of a prediction model based on symptomatic screening and computer-aided detection (CAD) radiography compared with GeneXpert MTB/Rif for TB case detection in a high TB burden setting. Methods: Screening for TB was carried out at private Family Practitioner clinics in three low-income towns of Karachi, Pakistan. Suspects for TB were identified on the basis of the presence of cough, fever, hemoptysis, weight loss and night-sweats and were referred for chest X-ray (CXR). All CXRs were analyzed by CAD4TB v3.07 (Diagnostic Image Analysis Group, Nijmegen, The Netherlands), a CAD system developed for TB diagnosis. This system computes an abnormality score (0-100) by analyzing the shape, symmetry and texture of the lung fields. GeneXpert testing was carried out on all cases where good quality sputum samples could be obtained. Results: 324 consecutive cases referred for CXR and with sputum samples were recruited into the study. Prediction models were constructed using logistic regressions with TB detection as a binary outcome variable and sequentially adding CAD4TB scores, demographics and symp-tomatic screening as explanatory variables. The final prediction model was constructed using backwards stepwise Akaike's Information Criteria multiple logistic regression and included CAD4TB scores (OR 1.08, 95% CI: 1.04 - 1.12), cough >2 weeks (OR 3.10, 95% CI: 1.09 AC/a,!aEUR? 5.51), age (OR 0.97, 95% CI: 0.95 AC/a,!aEUR? 0.98) and gender (OR 1.01, 95% CI: 0.73 AC/a,!aEUR? 1.38). The area under receiver operator curve (AUC) for the model was 0.87 (95% CI: 0.83 AC/a,!aEUR? 0.91). The AUC of a split-set cross validation model for assessing internal validation was 0.84 (95% CI: 0.78 AC/a,!aEUR?0.89). The model was appropriately calibrated (Hosmer-Lemeshow X' 8.99, p-value 0.45). Conclusion: Combining CAD4TB scores with patient demographics and symptomatic screening data offers high predictive accuracy for TB. Multi-center studies are required for external validation of the model in order to provide appropriate evidence for its use in screening in high TB burden settings.}, + optnote = {DIAG}, +} + +@article{Zaid18, + author = {Zaidi, Syed Mohammad Asad and Habib, Shifa Salman and van Ginneken, Bram and Ferrand, Rashida Abbas and Creswell, Jacob and Khowaja, Saira and Khan, Aamir}, + title = {Evaluation of the diagnostic accuracy of Computer-Aided Detection of tuberculosis on Chest radiography among private sector patients in {P}akistan}, + journal = NATSCIREP, + year = {2018}, + volume = {8}, + issue = {1}, + month = {8}, + pages = {12339}, + doi = {10.1038/s41598-018-30810-1}, + abstract = {The introduction of digital CXR with automated computer-aided interpretation, has given impetus to the role of CXR in TB screening, particularly in low resource, high-burden settings. The aim of this study was to evaluate the diagnostic accuracy of CAD4TB as a screening tool, implemented in the private sector in Karachi, Pakistan. This study analyzed retrospective data from CAD4TB and Xpert MTB/RIF testing carried out at two private TB treatment and diagnostic centers in Karachi. Sensitivity, specificity, potential Xperts saved, were computed and the receiver operator characteristic curves were constructed for four different models of CAD4TB. A total of 6,845 individuals with presumptive TB were enrolled in the study, 15.2% of which had MTB + ve result on Xpert. A high sensitivity (range 65.8-97.3%) and NPV (range 93.1-98.4%) were recorded for CAD4TB. The Area under the ROC curve (AUC) for CAD4TB was 0.79. CAD4TB with patient demographics (age and gender) gave an AUC of 0.83. CAD4TB offered high diagnostic accuracy. In low resource settings, CAD4TB, as a triage tool could minimize use of Xpert. Using CAD4TB in combination with age and gender data enhanced the performance of the software. Variations in demographic information generate different individual risk probabilities for the same CAD4TB scores.}, + file = {Ziad18.pdf:pdf\\Zaid18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {30120345}, + gsid = {14712131535144362806}, + gscites = {45}, + all_ss_ids = {['ebb0d21c567909e31be5d43d72c8b3020f2708c9', 'fd2239f855b044b6ebe9fac5e0e01951bc9eddb4']}, +} + +@inproceedings{Zanj18, + author = {Zanjani, Farhad Ghazvinian and Zinger, Svitlana and Bejnordi, Babak Ehteshami and van der Laak, Jeroen A W M and de With, Peter H. N.}, + title = {Stain normalization of histopathology images using generative adversarial networks}, + doi = {10.1109/isbi.2018.8363641}, + year = {2018}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1109/ISBI.2018.8363641}, + file = {Zanj18.pdf:pdf\Zanj18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018)}, + citation-count = {70}, + automatic = {yes}, +} + +@conference{Zeel19, + author = {Harm van Zeeland and James Meakin and Bart Liefers and Cristina Gonz\'{a}lez-Gonzalo and Akshayaa Vaidyanathan and Bram van Ginneken and Klaver, Caroline C W and S\'{a}nchez, Clara I.}, + booktitle = ARVO, + title = {{EyeNED} workstation: Development of a multi-modal vendor-independent application for annotation, spatial alignment and analysis of retinal images}, + abstract = {Purpose: + Researchers and specialists in the field of ophthalmology currently rely on suboptimal vendor-specific software solutions for viewing and annotating retinal images. Our goal was to develop a fully-featured vendor-independent application that allows researchers and specialists to visualize multi-modal retinal images, perform spatial alignment and annotations, and review outputs of artificial intelligence (AI) algorithms. + + Methods: + The application consists of a web-based front-end that allows users to analyze baseline and follow-up images in a multi-modal viewer. It communicates with a back-end interface for grader authentication, loading and storing of images and annotation data. Several types of annotation techniques are available, ranging from image-level classification to point-based and region-based lesion-level annotations. + + The user can select color fundus (CF) images, optical coherence tomography (OCT) volumes, infrared (IR) and autofluorescence (AF) images to be shown simultaneously in the viewer. Spatial alignment of the different modalities can be performed using an integrated affine registration method by clicking on corresponding landmarks, after which a synchronized cursor will appear. After several graders have annotated lesions, the application can be used to compare these and create a consensus grading. + + Results : + The application was used by graders and researchers in the EyeNED research group. Region based annotations of geographic atrophy were made for 313 studies containing 488 CF images and 68 OCT images; and of drusen in 100 OCT b-scans. Semi-automatic annotation of the area of central retinal atrophy in Stargardt disease was performed for 67 AF images. Point-based annotation was carried out on lesions in 50 CF images of diabetic retinopathy patients. The multimodal viewing and localisation of lesions was perceived as particularly helpful in the grading of lesions and consensus discussions. + + Conclusions : + A software solution has been developed to assist researchers and specialists to view and annotate retinal images. The application was successfully used for annotating lesions in various imaging modalities, facilitating the grading of images in large studies and the collection of annotations for AI solutions.}, + optnote = {DIAG, RADIOLOGY}, + year = {2019}, + gsid = {5177328248453722349}, + gscites = {1}, +} + +@conference{Zels12, + author = {J.C.M. van Zelst and R.D.M. Mus and T. Tan and N. Karssemeijer and R.M. Mann}, + title = {Feasibility of automated 3D breast ultrasound scanning in screening of women with high risk}, + booktitle = ECR, + year = {2013}, + abstract = {Purpose: Automated 3D-breast ultrasound (ABVS) is investigated as a six-monthly addition to annual breast cancer screening with MRI+mammography (MM) in highrisk women (LTR> 50%). ABVS, an inexpensive radiation-free technique, allows more frequent screening and temporal comparison. This study assesses effects of additional ABVS examinations at baseline. Methods and Materials: The study population consists of 234 women in whom ABVS and MM were performed on the same day. All ABVS and MM examinations were read by one of 4 breast radiologist. The recall rate (RR), biopsy rate (BR), cancer detection rate (CDR), sensitivity and specificity of ABVS and MM screening were analysed. Results: Based upon MM, 28 patients were recalled for further examination (RR=12%). With ABVS 12 of these patients were also recalled, as well as 17 other women. Consequently, the RR increased to 45/234. Biopsies were deemed necessary in 21 patients after MM and increased to 26 with ABVS added, an increase from 9%-11%. 17 additional ABVS findings were resolved with targeted ultrasound. In total 4 cancers were found by MM (CDR 1.7%, sensitivity 100%, specificity 89%). Two of these cancers were also detected by ABVS (CDR 0.9%, sensitivity 50% specificity 88%). The two missed cancers were retrospectivelyvisible, but misinterpreted due to post-operative scarring. Conclusion: Adding ABVS to high-risk MM screening increased RR and BR at baseline. Whether these negative effects are reduced when radiologists gain more experience and whether they are balanced by earlier detection of breast cancer due to the six-month interval of ABVS remain to be determined.}, + optnote = {DIAG}, + gsid = {13883022505501021105}, + gscites = {2}, +} + +@conference{Zels13, + author = {Jan van Zelst and Tao Tan and Bram Platel and Nico Karssemeijer and Ritse Mann}, + title = {Evaluation of spiculation and retraction patterns in coronal reconstructions in 3{D} Automated Breast Ultrasound (ABUS) improve differentiation between benign and malignant breast lesions}, + booktitle = RSNA, + year = {2013}, + abstract = {PURPOSE To investigate the value of coronal reconstructions of transversal 3D ABUS data in differentiation of benign from malignant breast lesions. METHOD AND MATERIALS This study received a waiver by the local ethics committee. For this reader study we obtained 96 3D ABUS casus with 37 malignant and 59 benign ultrasound guided core biopsied breast lesions. The localization of the lesions was done by a physician with 1 year 3D ABUS experience based on the primary radiology reports and biopsy results. The 3D ABUS view in which the lesion was best visible, was presented to 3 experienced breast radiologists with 2 years experience with 3D ABUS. They were first asked to detect and classify the most suspicious lesion in the view using the BIRADS lexicon and scoring system. A likelihood of malignancy (LOM) score between 0 and 100 was also requested. Thereafter the coronal reconstruction was shown and readers were asked to reassess the lesion. Spiculation and retraction in the coronal plane were scored on a five point scale (Spiculation and Retraction Severity Index (SRSI)). Subsequently LOM and BI-RADS scores could be adjusted. Az values for differentiation of benign from malignant lesions based on LOM scores were computed with DBM MRMC method. PearsonAC/a,!a,,C/s correlation coefficient between the reassessed LOM on the coronal reconstructions and SRSI was calculated for all readers. RESULTS Three readers respectively pointed out 92%, 97% and 92% of 37 cancers as the most suspicious lesion in the 3D ABUS acquisition. After evaluation of the coronal reconstructions, detected malignant lesions were not downgraded to BI-RADS 2, however, 7 benign lesions were downgraded from BI-RADS 3 to BI-RADS 2. After reevaluation the discrimination between benign and malignant lesions significantly improved from 0.84 (95% CI 0.76- 0.92) to 0.87(95% CI 0.80-0.94) (p=0.02). SRSI scores correlated significantly with the reassessed LOM scores for all readers, r = 0.85, 0.75 and 0.75, respectively (p<0.001). CONCLUSION Coronal reconstructions in 3D ABUS significantly improve the differentiation between benign and malignant breast lesions, by providing new visual information on breast cancer spiculation and retraction. CLINICAL RELEVANCE/APPLICATION Better differentiation between malignant and benign breast lesions may reduce false positive biopsies in evaluation of automated breast ultrasound}, + optnote = {DIAG, RADIOLOGY}, +} + +@article{Zels15, + author = {Van Zelst, Jan CM and Platel, Bram and Karssemeijer, Nico and Mann, Ritse M}, + title = {Multiplanar reconstructions of 3D automated breast ultrasound improve lesion differentiation by radiologists}, + journal = AR, + year = {2015}, + volume = {22}, + number = {12}, + pages = {1489--1496}, + doi = {10.1016/j.acra.2015.08.006}, + abstract = {RATIONALE AND OBJECTIVES: + To investigate the value of multiplanar reconstructions (MPRs) of automated three-dimensional (3D) breast ultrasound (ABUS) compared to transverse evaluation only, in differentiation of benign and malignant breast lesions. + + MATERIALS AND METHODS: + Five breast radiologists evaluated ABUS scans of 96 female patients with biopsy-proven abnormalities (36 malignant and 60 benign). They classified the most suspicious lesion based on the breast imaging reporting and data system (BI-RADS) lexicon using the transverse scans only. A likelihood-of-malignancy (LOM) score (0-100) and a BI-RADS final assessment were assigned. Thereafter, the MPR was provided and readers scored the cases again. In addition, they rated the presence of spiculation and retraction in the coronal plane on a five-point scale called Spiculation and Retraction Severity Index (SRSI). Reader performance was analyzed with receiver-operating characteristics analysis. + + RESULTS: + The area under the curve increased from 0.82 to 0.87 (P = .01) after readers were shown the reconstructed planes. The SRSI scores are highly correlated (Spearman's r) with the final LOM scores (range, r = 0.808-0.872) and DLOM scores (range, r = 0.525-0.836). Readers downgraded 3%-18% of the biopsied benign lesions to BI-RADS 2 after MPR evaluation. Inter-reader agreement for SRSI was substantial (intraclass correlation coefficient, 0.617). Inter-reader agreement of the BI-RADS final assessment improved from 0.367 to 0.536 after MPRs were read. + + CONCLUSIONS: + Full 3D evaluation of ABUS using MPR improves differentiation of breast lesions in comparison to evaluating only transverse planes. Results suggest that the added value of MPR might be related to visualization of spiculation and retraction patterns in the coronal reconstructions.}, + file = {Zels15.pdf:pdf\\Zels15.pdf:PDF}, + optnote = {DIAG}, + publisher = {Elsevier}, + month = {12}, + gsid = {5747981011507824307}, + gscites = {34}, + ss_id = {ff84e1c5bae04497d875a331da0120f8a337d226}, + all_ss_ids = {['ff84e1c5bae04497d875a331da0120f8a337d226']}, +} + +@article{Zels17a, + author = {van Zelst, Jan C M and Balkenhol, Maschenka and Tan, Tao and Rutten, Matthieu and Imhof-Tas, Mechli and Bult, Peter and Karssemeijer, Nico and Mann, Ritse M}, + title = {Sonographic Phenotypes of Molecular Subtypes of Invasive Ductal Cancer in Automated 3-D Breast Ultrasound}, + journal = UMB, + year = {2017}, + volume = {43}, + issue = {9}, + month = {9}, + pages = {1820--1828}, + doi = {10.1016/j.ultrasmedbio.2017.03.019}, + abstract = {Our aim was to investigate whether Breast Imaging Reporting and Data System-Ultrasound (BI-RADS-US) lexicon descriptors can be used as imaging biomarkers to differentiate molecular subtypes (MS) of invasive ductal carcinoma (IDC) in automated breast ultrasound (ABUS). We included 125 IDCs diagnosed between 2010 and 2014 and imaged with ABUS at two institutes retrospectively. IDCs were classified as luminal A or B, HER2 enriched or triple negative based on reports of histopathologic analysis of surgical specimens. Two breast radiologists characterized all IDCs using the BI-RADS-US lexicon and specific ABUS features. Univariate and multivariate analyses were performed. A multinomial logistic regression model was built to predict the MSs from the imaging characteristics. BI-RADS-US descriptor margins and the retraction phenomenon are significantly associated with MSs (both p < 0.001) in both univariate and multivariate analysis. Posterior acoustic features and spiculation pattern severity were only significantly associated in univariate analysis (p < 0.001). Luminal A IDCs tend to have more prominent retraction patterns than luminal B IDCs. HER2-enriched and triple-negative IDCs present significantly less retraction than the luminal subtypes. The mean accuracy of MS prediction was 0.406. Overall, several BI-RADS-US descriptors and the coronal retraction phenomenon and spiculation pattern are associated with MSs, but prediction of MSs on ABUS is limited.}, + file = {Zels17a.pdf:pdf\\Zels17a.pdf:PDF}, + optnote = {DIAG}, + pmid = {28576620}, + gsid = {9326086829770411776}, + gscites = {9}, + ss_id = {deda0fc0508e72e9cece3d27b74b0776e593d055}, + all_ss_ids = {['deda0fc0508e72e9cece3d27b74b0776e593d055']}, +} + +@article{Zels17b, + author = {van Zelst, Jan C M and Mus, Roel D M and Woldringh, Gwendolyn and Rutten, Matthieu J C M and Bult, Peter and Vreemann, Suzan and de Jong, Mathijn and Karssemeijer, Nico and Hoogerbrugge, Nicoline and Mann, Ritse M}, + title = {Surveillance of Women with the {BRCA1} or {BRCA2} Mutation by Using Biannual Automated Breast {US}, {MR} Imaging, and Mammography}, + journal = Radiology, + year = {2017}, + volume = {285}, + issue = {2}, + month = {11}, + pages = {376--388}, + doi = {10.1148/radiol.2017161218}, + abstract = {Purpose To evaluate a multimodal surveillance regimen including yearly full-field digital (FFD) mammography, dynamic contrast agent-enhanced (DCE) magnetic resonance (MR) imaging, and biannual automated breast (AB) ultrasonography (US) in women with BRCA1 and BRCA2 mutations. Materials and Methods This prospective multicenter trial enrolled 296 carriers of the BRCA mutation (153 BRCA1 and 128 BRCA2 carriers, and 15 women with first-degree untested relatives) between September 2010 and November 2012, with follow-up until November 2015. Participants underwent 2 years of intensified surveillance including biannual AB US, and routine yearly DCE MR imaging and FFD mammography. The surveillance performance for each modality and possible combinations were determined. Results Breast cancer was screening-detected in 16 women (age range, 33-58 years). Three interval cancers were detected by self-examination, all in carriers of the BRCA1 mutation under age 43 years. One cancer was detected in a carrier of the BRCA1 mutation with a palpable abnormality in the contralateral breast. One incidental breast cancer was detected in a prophylactic mastectomy specimen. Respectively, sensitivity of DCE MR imaging, FFD mammography, and AB US was 68.1% (14 of 21; 95% confidence interval [CI]: 42.9%, 85.8%), 37.2% (eight of 21; 95% CI: 19.8%, 58.7%), and 32.1% (seven of 21; 95% CI: 16.1%, 53.8%); specificity was 95.0% (643 of 682; 95% CI: 92.7%, 96.5%), 98.1% (638 of 652; 95% CI: 96.7%, 98.9%), and 95.1% (1030 of 1088; 95% CI: 93.5%, 96.3%); cancer detection rate was 2.0% (14 of 702), 1.2% (eight of 671), and 1.0% (seven of 711) per 100 women-years; and positive predictive value was 25.2% (14 of 54), 33.7% (nine of 23), and 9.5% (seven of 68). DCE MR imaging and FFD mammography combined yielded the highest sensitivity of 76.3% (16 of 21; 95% CI: 53.8%, 89.9%) and specificity of 93.6% (643 of 691; 95% CI: 91.3%, 95.3%). AB US did not depict additional cancers. FFD mammography yielded no additional cancers in women younger than 43 years, the mean age at diagnosis. In carriers of the BRCA2 mutation, sensitivity of FFD mammography with DCE MR imaging surveillance was 90.9% (10 of 11; 95% CI: 72.7%, 100%) and 60.0% (six of 10; 95% CI: 30.0%, 90.0%) in carriers of the BRCA1 mutation because of the high interval cancer rate in carriers of the BRCA1 mutation. Conclusion AB US may not be of added value to yearly FFD mammography and DCE MR imaging surveillance of carriers of the BRCA mutation. Study results suggest that carriers of the BRCA mutation younger than 40 years may not benefit from FFD mammography surveillance in addition to DCE MR imaging. (c) RSNA, 2017 Online supplemental material is available for this article.}, + file = {Zels17b.pdf:pdf\\Zels17b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {28609204}, + gsid = {3304914079633955853}, + gscites = {51}, + ss_id = {c2da52941ea09be5f3a56087d52454da941e4fb5}, + all_ss_ids = {['c2da52941ea09be5f3a56087d52454da941e4fb5']}, +} + +@article{Zels17c, + author = {van Zelst, J.C.M. and Tan, T. and Platel, B. and de Jong, M. and Steenbakkers, A. and Mourits, M. and Grivegnee, A. and Borelli, C. and Karssemeijer, N. and Mann, R.M.}, + title = {Improved cancer detection in automated breast ultrasound by radiologists using Computer Aided Detection}, + doi = {10.1016/j.ejrad.2017.01.021}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejrad.2017.01.021}, + file = {Zels17c.pdf:pdf\Zels17c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Radiology}, + citation-count = {42}, + automatic = {yes}, + pages = {54-59}, + volume = {89}, +} + +@article{Zels18, + author = {van Zelst, Jan C M and Tan, Tao and Clauser, Paola and Domingo, Angels and Dorrius, Monique D and Drieling, Daniel and Golatta, Michael and Gras, Francisca and de Jong, Mathijn and Pijnappel, Ruud and Rutten, Matthieu J C M and Karssemeijer, Nico and Mann, Ritse M}, + title = {Dedicated computer-aided detection software for automated {3D} breast ultrasound; an efficient tool for the radiologist in supplemental screening of women with dense breasts}, + journal = ER, + year = {2018}, + volume = {28}, + issue = {7}, + month = {7}, + pages = {2996--3006}, + doi = {10.1007/s00330-017-5280-3}, + abstract = {To determine the effect of computer-aided-detection (CAD) software for automated breast ultrasound (ABUS) on reading time (RT) and performance in screening for breast cancer. Unilateral ABUS examinations of 120 women with dense breasts were randomly selected from a multi-institutional archive of cases including 30 malignant (20/30 mammography-occult), 30 benign, and 60 normal cases with histopathological verification or >= 2 years of negative follow-up. Eight radiologists read once with (CAD-ABUS) and once without CAD (ABUS) with > 8 weeks between reading sessions. Readers provided a BI-RADS score and a level of suspiciousness (0-100). RT, sensitivity, specificity, PPV and area under the curve (AUC) were compared. Average RT was significantly shorter using CAD-ABUS (133.4 s/case, 95% CI 129.2-137.6) compared with ABUS (158.3 s/case, 95% CI 153.0-163.3) (p < 0.001). Sensitivity was 0.84 for CAD-ABUS (95% CI 0.79-0.89) and ABUS (95% CI 0.78-0.88) (p = 0.90). Three out of eight readers showed significantly higher specificity using CAD. Pooled specificity (0.71, 95% CI 0.68-0.75 vs. 0.67, 95% CI 0.64-0.70, p = 0.08) and PPV (0.50, 95% CI 0.45-0.55 vs. 0.44, 95% CI 0.39-0.49, p = 0.07) were higher in CAD-ABUS vs. ABUS, respectively, albeit not significantly. Pooled AUC for CAD-ABUS was comparable with ABUS (0.82 vs. 0.83, p = 0.53, respectively). CAD software for ABUS may decrease the time needed to screen for breast cancer without compromising the screening performance of radiologists. * ABUS with CAD software may speed up reading time without compromising radiologists' accuracy. * CAD software for ABUS might prevent non-detection of malignant breast lesions by radiologists. * Radiologists reading ABUS with CAD software might improve their specificity without losing sensitivity.}, + file = {:pdf/Zels18.pdf:PDF;:Zels18 - Dedicated computer-aided detection software for automated 3D breast ultrasound\; an efficient tool for the radiologist in supplemental screening of women with dense breasts..pdf:PDF}, + optnote = {DIAG}, + pmid = {29417251}, + gsid = {14669183418477071058}, + gscites = {51}, + ss_id = {c6b1c8590269a5789095eca5f28d49413045fd82}, + all_ss_ids = {['c6b1c8590269a5789095eca5f28d49413045fd82']}, +} + +@article{Zels18a, + author = {van Zelst, Jan C M and Vreemann, Suzan and Witt, Hans-Joerg and Gubern-Merida, Albert and Dorrius, Monique D and Duvivier, Katya and Lardenoije-Broker, Susanne and Lobbes, Marc B I and Loo, Claudette and Veldhuis, Wouter and Veltman, Jeroen and Drieling, Daniel and Karssemeijer, Nico and Mann, Ritse M}, + title = {Multireader Study on the Diagnostic Accuracy of Ultrafast Breast Magnetic Resonance Imaging for Breast Cancer Screening}, + journal = IR, + year = {2018}, + volume = {53}, + number = {10}, + month = {10}, + pages = {579-586}, + doi = {10.1097/RLI.0000000000000494}, + abstract = {Breast cancer screening using magnetic resonance imaging (MRI) has limited accessibility due to high costs of breast MRI. Ultrafast dynamic contrast-enhanced breast MRI can be acquired within 2 minutes. We aimed to assess whether screening performance of breast radiologist using an ultrafast breast MRI-only screening protocol is as good as performance using a full multiparametric diagnostic MRI protocol (FDP). The institutional review board approved this study, and waived the need for informed consent. Between January 2012 and June 2014, 1791 consecutive breast cancer screening examinations from 954 women with a lifetime risk of more than 20% were prospectively collected. All women were scanned using a 3 T protocol interleaving ultrafast breast MRI acquisitions in a full multiparametric diagnostic MRI protocol consisting of standard dynamic contrast-enhanced sequences, diffusion-weighted imaging, and T2-weighted imaging. Subsequently, a case set was created including all biopsied screen-detected lesions in this period (31 malignant and 54 benign) and 116 randomly selected normal cases with more than 2 years of follow-up. Prior examinations were included when available. Seven dedicated breast radiologists read all 201 examinations and 153 available priors once using the FDP and once using ultrafast breast MRI only in 2 counterbalanced and crossed-over reading sessions. For reading the FDP versus ultrafast breast MRI alone, sensitivity was 0.86 (95% confidence interval [CI], 0.81-0.90) versus 0.84 (95% CI, 0.78-0.88) (P = 0.50), specificity was 0.76 (95% CI, 0.74-0.79) versus 0.82 (95% CI, 0.79-0.84) (P = 0.002), positive predictive value was 0.40 (95% CI, 0.36-0.45) versus 0.45 (95% CI, 0.41-0.50) (P = 0.14), and area under the receiver operating characteristics curve was 0.89 (95% CI, 0.82-0.96) versus 0.89 (95% CI, 0.82-0.96) (P = 0.83). Ultrafast breast MRI reading was 22.8% faster than reading FDP (P < 0.001). Interreader agreement is significantly better for ultrafast breast MRI (k = 0.730; 95% CI, 0.699-0.761) than for the FDP (k = 0.665; 95% CI, 0.633-0.696). Breast MRI screening using only an ultrafast breast MRI protocol is noninferior to screening with an FDP and may result in significantly higher screening specificity and shorter reading time.}, + file = {:pdf/Zels18a.pdf:PDF}, + optnote = {DIAG}, + pmid = {29944483}, + gsid = {14278207744774406949}, + gscites = {44}, + ss_id = {87b0a4836bd2d2caeaea950037ba2bdc9a2443b1}, + all_ss_ids = {['87b0a4836bd2d2caeaea950037ba2bdc9a2443b1']}, +} + +@article{Zels18b, + author = {van Zelst, Jan C M and Mann, Ritse M}, + title = {Automated Three-dimensional Breast {US} for Screening: Technique, Artifacts, and Lesion Characterization}, + journal = {Radiographics}, + year = {2018}, + volume = {38}, + issue = {3}, + pages = {663--683}, + doi = {10.1148/rg.2018170162}, + file = {:pdf/Zels18b.pdf:PDF}, + abstract = {Automated breast (AB) ultrasonography (US) scanners have recently been brought to market for breast imaging. AB US devices use mechanically driven wide linear-array transducers that can image whole-breast US volumes in three dimensions. AB US is proposed for screening as a supplemental modality to mammography in women with dense breasts and overcomes important limitations of whole-breast US using handheld devices, such as operator dependence and limited reproducibility. A literature review of supplemental whole-breast US for screening was performed, which showed that both AB US and handheld US allow detection of mammographically negative early-stage invasive breast cancers but also increase the false-positive recall rate. Technicians with limited training can perform AB US; nevertheless, there is a learning curve for acquiring optimal images. Proper acquisition technique may allow avoidance of common artifacts that could impair interpretation of AB US results. Regardless, interpretation of AB US results can be challenging. This article reviews the US appearance of common benign and malignant lesions and presents examples of false-positive and false-negative AB US results. In situ breast cancers are rarely detected with supplemental whole-breast US. The most discriminating feature that separates AB US from handheld US is the retraction phenomenon on coronal reformatted images. The retraction phenomenon is rarely seen with benign findings but accompanies almost all breast cancers. In conclusion, women with dense breasts may benefit from supplemental AB US examinations. Understanding the pitfalls in acquisition technique and lesion interpretation, both of which can lead to false-positive recalls, might reduce the potential harm of performing supplemental AB US. Online supplemental material is available for this article. RSNA, 2018.}, + optnote = {DIAG}, + pmid = {29624482}, + month = {5}, +} + +@phdthesis{Zels19, + author = {Jan van Zelst}, + title = {Automated 3D breast ultrasound Advances in breast cancer detection, diagnosis and screening}, + url = {https://repository.ubn.ru.nl/handle/2066/205657}, + abstract = {Automated whole breast ultrasound systems were already available in the 1980's but the technique was not viable for clinical use at that time. The newest generation of automated breast ultrasound systems such as GE Invenia ABUS, Siemens Acuson S2000 ABVS and Hitachi's Sofia are equipped with high-end hardware an software that allows these systems to be easily implemented into breast care clinics.}, + copromotor = {R. Mann, B. Platel}, + file = {:pdf/Zels19.pdf:PDF;:png/publications/thumbs/Zels19.png:PNG image}, + optnote = {DIAG}, + promotor = {N. Karssemeijer}, + school = {Radboud University, Nijmegen}, + year = {2019}, + journal = {PhD thesis}, +} + +@article{Zels19a, + author = {van Zelst, Jan CM and Tan, Tao and Mann, Ritse M and Karssemeijer, Nico}, + title = {Validation of radiologists' findings by computer-aided detection (CAD) software in breast cancer detection with automated 3D breast ultrasound: a concept study in implementation of artificial intelligence software}, + journal = ACTR, + year = {2020}, + volume = {61}, + issue = {3}, + month = {7}, + pages = {312-320}, + doi = {10.1177/0284185119858051}, + abstract = {Background: Computer-aided detection software for automated breast ultrasound has been shown to have potential in improving the accuracy of radiologists. Alternative ways of implementing computer-aided detection, such as independent validation or preselecting suspicious cases, might also improve radiologists' accuracy. + Purpose: To investigate the effect of using computer-aided detection software to improve the performance of radiologists by validating findings reported by radiologists during screening with automated breast ultrasound. Material and Methods: Unilateral automated breast ultrasound exams were performed in 120 women with dense breasts that included 60 randomly selected normal exams, 30 exams with benign lesions, and 30 malignant cases (20 mammography-negative). Eight radiologists were instructed to detect breast cancer and rate lesions using BI-RADS and level-of-suspiciousness scores. Computer-aided detection software was used to check the validity of radiologists' findings. Findings found negative by computer-aided detection were not included in the readers' performance analysis; however, the nature of these findings were further analyzed. The area under the curve and the partial area under the curve for an interval in the range of 80%-100% specificity before and after validation of computer-aided detection were compared. Sensitivity was computed for all readers at a simulation of 90% specificity. Results: Partial AUC improved significantly from 0.126 (95% confidence interval [CI] = 0.098-0.153) to 0.142 (95% CI = 0.115-0.169) (P = 0.037) after computer-aided detection rejected mostly benign lesions and normal tissue scored BI-RADS 3 or 4. The full areas under the curve (0.823 vs. 0.833, respectively) were not significantly different (P = 0.743). Four cancers detected by readers were completely missed by computer-aided detection and four other cancers were detected by both readers and computer-aided detection but falsely rejected due to technical limitations of our implementation of computer-aided detection validation. In this study, validation of computer-aided detection discarded 42.6% of findings that were scored BI-RADS >=3 by the radiologists, of which 85.5% were non-malignant findings.Conclusion: Validation of radiologists' findings using computer-aided detection software for automated breast ultrasound has the potential to improve the performance of radiologists. Validation of computer-aided detection might be an efficient tool for double-reading strategies by limiting the amount of discordant cases needed to be double-read.}, + file = {Zels19a.pdf:pdf\\Zels19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {31324132}, + ss_id = {f0d2a9258dcc592bee57f97fe523c1a0c1af5856}, + all_ss_ids = {['f0d2a9258dcc592bee57f97fe523c1a0c1af5856']}, + gscites = {14}, +} + +@article{Zhou20, + author = {S. Kevin Zhou and Hayit Greenspan and Christos Davatzikos and James S. Duncan and Bram van Ginneken and Anant Madabhushi and Jerry L. Prince and Daniel Rueckert and Ronald M. Summers}, + title = {A review of deep learning in medical imaging: Image traits, technology trends, case studies with progress highlights, and future promises}, + journal = {arXiv:2008.09104}, + year = {2020}, + abstract = {Since its renaissance, deep learning has been widely used in various medical imaging tasks and has achieved remarkable success in many medical imaging applications, thereby propelling us into the so-called artificial intelligence (AI) era. It is known that the success of AI is mostly attributed to the availability of big data with annotations for a single task and the advances in high performance computing. However, medical imaging presents unique challenges that confront deep learning approaches. In this survey paper, we first highlight both clinical needs and technical challenges in medical imaging and describe how emerging trends in deep learning are addressing these issues. We cover the topics of network architecture, sparse and noisy labels, federating learning, interpretability, uncertainty quantification, etc. Then, we present several case studies that are commonly found in clinical practice, including digital pathology and chest, brain, cardiovascular, and abdominal imaging. Rather than presenting an exhaustive literature survey, we instead describe some prominent research highlights related to these case study applications. We conclude with a discussion and presentation of promising future directions.}, + file = {:http\://arxiv.org/pdf/2008.09104v1:PDF}, + optnote = {DIAG}, + ss_id = {4043785dacd1c04ed93ec1c08ecf779f4e1717fc}, + all_ss_ids = {['4043785dacd1c04ed93ec1c08ecf779f4e1717fc']}, + gscites = {334}, +} + +@conference{Zrei17, + author = {Zreik, M. and Lessmann, N. and van Hamersvelt, R. and Wolterink, J. and Voskuil, M. and Viergever, M. A. and Leiner, T. and Isgum, I.}, + booktitle = RSNA, + title = {Deep learning analysis of the left ventricular myocardium in cardiac {CT} images enables detection of functionally significant coronary artery stenosis regardless of coronary anatomy}, + optnote = {DIAG}, + year = {2017}, +} + +@article{Zrei18, + author = {Majd Zreik and Nikolas Lessmann and Robbert W. van Hamersvelt and Jelmer M. Wolterink and Michiel Voskuil and Max A. Viergever and Tim Leiner and Ivana I{\v{s}}gum}, + title = {Deep learning analysis of the myocardium in coronary {CT} angiography for identification of patients with functionally significant coronary artery stenosis}, + journal = MIA, + year = {2018}, + volume = {44}, + pages = {72--85}, + doi = {10.1016/j.media.2017.11.008}, + optnote = {DIAG}, + file = {Zrei18.pdf:pdf\\Zrei18.pdf:PDF}, + abstract = {In patients with coronary artery stenoses of intermediate severity, the functional significance needs to be determined. Fractional flow reserve (FFR) measurement, performed during invasive coronary angiography (ICA), is most often used in clinical practice. To reduce the number of ICA procedures, we present a method for automatic identification of patients with functionally significant coronary artery stenoses, employing deep learning analysis of the left ventricle (LV) myocardium in rest coronary CT angiography (CCTA). The study includes consecutively acquired CCTA scans of 166 patients who underwent invasive FFR measurements. To identify patients with a functionally significant coronary artery stenosis, analysis is performed in several stages. First, the LV myocardium is segmented using a multiscale convolutional neural network (CNN). To characterize the segmented LV myocardium, it is subsequently encoded using unsupervised convolutional autoencoder (CAE). As ischemic changes are expected to appear locally, the LV myocardium is divided into a number of spatially connected clusters, and statistics of the encodings are computed as features. Thereafter, patients are classified according to the presence of functionally significant stenosis using an SVM classifier based on the extracted features. Quantitative evaluation of LV myocardium segmentation in 20 images resulted in an average Dice coefficient of 0.91 and an average mean absolute distance between the segmented and reference LV boundaries of 0.7 mm. Twenty CCTA images were used to train the LV myocardium encoder. Classification of patients was evaluated in the remaining 126 CCTA scans in 50 10-fold cross-validation experiments and resulted in an area under the receiver operating characteristic curve of 0.74+-0.02. At sensitivity levels 0.60, 0.70 and 0.80, the corresponding specificity was 0.77, 0.71 and 0.59, respectively. The results demonstrate that automatic analysis of the LV myocardium in a single CCTA scan acquired at rest, without assessment of the anatomy of the coronary arteries, can be used to identify patients with functionally significant coronary artery stenosis. This might reduce the number of patients undergoing unnecessary invasive FFR measurements.}, +} + +@conference{deVos2017, + author = {de Vos, B. D. and Lessmann, N. and de Jong, P. A. and Viergever, M. A. and Isgum, I.}, + title = {Direct coronary artery calcium scoring in low-dose chest {CT} using deep learning analysis}, + booktitle = RSNA, + year = {2017}, + optnote = {DIAG}, +}