diff --git a/Colab_notebooks/StarDist_2D_ZeroCostDL4Mic.ipynb b/Colab_notebooks/StarDist_2D_ZeroCostDL4Mic.ipynb index 00f09d4..d3897a2 100644 --- a/Colab_notebooks/StarDist_2D_ZeroCostDL4Mic.ipynb +++ b/Colab_notebooks/StarDist_2D_ZeroCostDL4Mic.ipynb @@ -70,7 +70,7 @@ "\n", "*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n", "\n", - "*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n", + "*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here.\n", "\n", "**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n", "\n", @@ -115,7 +115,7 @@ " - **Quality control dataset**\n", " - Images of nuclei\n", " - img_1.tif, img_2.tif\n", - " - Masks \n", + " - Masks\n", " - img_1.tif, img_2.tif\n", " - **Data to be predicted**\n", " - **Results**\n", @@ -149,7 +149,7 @@ "source": [ "## **1.1. Install key dependencies**\n", "---\n", - " " + "" ] }, { @@ -179,8 +179,6 @@ "# The [bioimageio] extras are installed for bioimageio model support\n", "!pip install 'bioimageio.core>=0.5,<0.6'\n", "\n", - "# Package for improving STARDIST performance using GPU acceleration\n", - "!pip install -q gputools\n", "\n", "# Package for downloading files from the web\n", "!pip install -q wget\n", @@ -238,7 +236,7 @@ "source": [ "## **1.3. Load key dependencies**\n", "---\n", - " " + "" ] }, { @@ -252,13 +250,13 @@ "source": [ "from __future__ import print_function, unicode_literals, absolute_import, division\n", "#@markdown ##Load key dependencies\n", - "Notebook_version = '1.20.1'\n", + "Notebook_version = '1.20.2'\n", "Network = 'StarDist 2D'\n", "\n", "from builtins import any as b_any\n", "\n", "def get_requirements_path():\n", - " # Store requirements file in 'contents' directory \n", + " # Store requirements file in 'contents' directory\n", " current_dir = os.getcwd()\n", " dir_count = current_dir.count('/') - 1\n", " path = '../' * (dir_count) + 'requirements.txt'\n", @@ -278,14 +276,14 @@ " !pip freeze > $path\n", "\n", " # Get minimum requirements file\n", - " df = pd.read_csv(path) \n", + " df = pd.read_csv(path)\n", " mod_list = [m.split('.')[0] for m in after if not m in before]\n", " req_list_temp = df.values.tolist()\n", " req_list = [x[0] for x in req_list_temp]\n", "\n", " # Replace with package name and handle cases where import name is different to module name\n", " mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]\n", - " mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list] \n", + " mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]\n", " filtered_list = filter_files(req_list, mod_replace_list)\n", " print(filtered_list)\n", "\n", @@ -342,7 +340,7 @@ "from matplotlib import pyplot as plt\n", "import urllib\n", "import os, random\n", - "import shutil \n", + "import shutil\n", "import zipfile\n", "from tifffile import imread, imsave, imwrite\n", "import time\n", @@ -363,7 +361,7 @@ "from astropy.visualization import simple_norm\n", "from skimage import img_as_float32, img_as_ubyte, img_as_float\n", "from skimage.util import img_as_ubyte\n", - "from tqdm import tqdm \n", + "from tqdm import tqdm\n", "import cv2\n", "from fpdf import FPDF, HTMLMixin\n", "from datetime import datetime\n", @@ -421,16 +419,16 @@ " pdf = MyFPDF()\n", " pdf.add_page()\n", " pdf.set_right_margin(-1)\n", - " pdf.set_font(\"Arial\", size = 11, style='B') \n", + " pdf.set_font(\"Arial\", size = 11, style='B')\n", "\n", " day = datetime.now()\n", " datetime_str = str(day)[0:10]\n", "\n", " Header = 'Training report for '+Network+' model ('+model_name+')\\nDate: '+datetime_str\n", - " pdf.multi_cell(180, 5, txt = Header, align = 'L') \n", + " pdf.multi_cell(180, 5, txt = Header, align = 'L')\n", " pdf.ln(1)\n", - " \n", - " # add another cell \n", + "\n", + " # add another cell\n", " if trained:\n", " training_time = \"Training time: \"+str(hour)+ \"hour(s) \"+str(mins)+\"min(s) \"+str(round(sec))+\"sec(s)\"\n", " pdf.cell(190, 5, txt = training_time, ln = 1, align='L')\n", @@ -488,7 +486,7 @@ " pdf.set_font('')\n", " if augmentation:\n", " aug_text = 'The dataset was augmented by a factor of '+str(Multiply_dataset_by)\n", - " \n", + "\n", " else:\n", " aug_text = 'No augmentation was used for training.'\n", " pdf.multi_cell(190, 5, txt=aug_text, align='L')\n", @@ -501,7 +499,7 @@ " pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')\n", " pdf.cell(200, 5, txt='The following parameters were used for training:')\n", " pdf.ln(1)\n", - " html = \"\"\" \n", + " html = \"\"\"\n", " \n", " \n", " \n", @@ -595,7 +593,7 @@ " pdf = MyFPDF()\n", " pdf.add_page()\n", " pdf.set_right_margin(-1)\n", - " pdf.set_font(\"Arial\", size = 11, style='B') \n", + " pdf.set_font(\"Arial\", size = 11, style='B')\n", "\n", " Network = 'Stardist 2D'\n", "\n", @@ -603,7 +601,7 @@ " datetime_str = str(day)[0:10]\n", "\n", " Header = 'Quality Control report for '+Network+' model ('+QC_model_name+')\\nDate: '+datetime_str\n", - " pdf.multi_cell(180, 5, txt = Header, align = 'L') \n", + " pdf.multi_cell(180, 5, txt = Header, align = 'L')\n", " pdf.ln(1)\n", "\n", " all_packages = ''\n", @@ -713,7 +711,7 @@ " \"\"\".format(str(i),str(round(float(PvGT_IoU),3)),fp,tp,fn,str(round(float(precision),3)),str(round(float(recall),3)),str(round(float(acc),3)),str(round(float(f1),3)),n_true,n_pred,str(round(float(mean_true),3)),str(round(float(mean_matched),3)),str(round(float(panoptic),3)))\n", " html = html+cells\n", " html = html+\"\"\"
Parameter
\"\"\"\n", - " \n", + "\n", " pdf.write_html(html)\n", "\n", " pdf.ln(1)\n", @@ -797,8 +795,8 @@ "#@markdown ##Run this cell to check if you have GPU access\n", "import tensorflow as tf\n", "if tf.test.gpu_device_name()=='':\n", - " print('You do not have GPU access.') \n", - " print('Did you change your runtime ?') \n", + " print('You do not have GPU access.')\n", + " print('Did you change your runtime ?')\n", " print('If the runtime setting is correct then Google did not allocate a GPU for your session')\n", " print('Expect slow performance. To access GPU try reconnecting later')\n", "\n", @@ -847,7 +845,7 @@ "id": "xyjdhq0Zt5TF" }, "source": [ - "** If you cannot see your files, reactivate your session by connecting to your hosted runtime.** \n", + "** If you cannot see your files, reactivate your session by connecting to your hosted runtime.**\n", "\n", "\n", "\"Example
Connect to a hosted runtime.
" @@ -884,7 +882,7 @@ }, "outputs": [], "source": [ - "#@markdown ##Download a test dataset \n", + "#@markdown ##Download a test dataset\n", "\n", "\n", "import requests\n", @@ -917,7 +915,7 @@ "source": [ "## **3.2. Setting main training parameters**\n", "---\n", - " " + "" ] }, { @@ -948,9 +946,9 @@ "\n", "**`patch_size`:** Input the size of the patches use to train StarDist 2D (length of a side). The value should be smaller or equal to the dimensions of the image. Make the patch size as large as possible and divisible by 8. **Default value: dimension of the training images** \n", "\n", - "**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10** \n", + "**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during the training. **Default value: 10**\n", "\n", - "**`n_rays`:** Set number of rays (corners) used for StarDist (for instance, a square has 4 corners). **Default value: 32** \n", + "**`n_rays`:** Set number of rays (corners) used for StarDist (for instance, a square has 4 corners). **Default value: 32**\n", "\n", "**`grid_parameter`:** increase this number if the cells/nuclei are very large or decrease it if they are very small. **Default value: 2**\n", "\n", @@ -971,17 +969,17 @@ }, "outputs": [], "source": [ - "#@markdown ###Path to training images: \n", + "#@markdown ###Path to training images:\n", "Training_source = '/data/Stardist/Training - Images' #@param {type:\"string\"}\n", "\n", "Training_target = '/data/Stardist/Training - Masks' #@param {type:\"string\"}\n", "\n", "\n", "#@markdown ###Name of the model and path to model folder:\n", - "model_name = '' #@param {type:\"string\"}\n", + "model_name = 'test' #@param {type:\"string\"}\n", "\n", "model_path = '' #@param {type:\"string\"}\n", - "#trained_model = model_path \n", + "#trained_model = model_path\n", "\n", "\n", "#@markdown ### Other parameters for training:\n", @@ -1001,7 +999,7 @@ "grid_parameter = 2#@param [1, 2, 4, 8, 16, 32] {type:\"raw\"}\n", "initial_learning_rate = 0.0001 #@param {type:\"number\"}\n", "\n", - "if (Use_Default_Advanced_Parameters): \n", + "if (Use_Default_Advanced_Parameters):\n", " print(\"Default advanced parameters enabled\")\n", " batch_size = 2\n", " n_rays = 32\n", @@ -1035,10 +1033,10 @@ "# If default parameters, patch size is the same as image size\n", "if (Use_Default_Advanced_Parameters):\n", " patch_size = min(Image_Y, Image_X)\n", - " \n", + "\n", "#Hyperparameters failsafes\n", "\n", - "# Here we check that patch_size is smaller than the smallest xy dimension of the image \n", + "# Here we check that patch_size is smaller than the smallest xy dimension of the image\n", "\n", "if patch_size > min(Image_Y, Image_X):\n", " patch_size = min(Image_Y, Image_X)\n", @@ -1106,7 +1104,7 @@ "Data augmentation is performed here via random rotations, flips, and intensity changes.\n", "\n", "\n", - " **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.** " + " **However, data augmentation is not a magic solution and may also introduce issues. Therefore, we recommend that you train your network with and without augmentation, and use the QC section to validate that it improves overall performances.**" ] }, { @@ -1127,17 +1125,17 @@ "Multiply_dataset_by = 4 #@param {type:\"slider\", min:1, max:10, step:1}\n", "\n", "\n", - "def random_fliprot(img, mask): \n", + "def random_fliprot(img, mask):\n", " assert img.ndim >= mask.ndim\n", " axes = tuple(range(mask.ndim))\n", " perm = tuple(np.random.permutation(axes))\n", - " img = img.transpose(perm + tuple(range(mask.ndim, img.ndim))) \n", - " mask = mask.transpose(perm) \n", - " for ax in axes: \n", + " img = img.transpose(perm + tuple(range(mask.ndim, img.ndim)))\n", + " mask = mask.transpose(perm)\n", + " for ax in axes:\n", " if np.random.rand() > 0.5:\n", " img = np.flip(img, axis=ax)\n", " mask = np.flip(mask, axis=ax)\n", - " return img, mask \n", + " return img, mask\n", "\n", "def random_intensity_change(img):\n", " img = img*np.random.uniform(0.6,2) + np.random.uniform(-0.2,0.2)\n", @@ -1165,7 +1163,7 @@ "\n", "if not Use_Data_augmentation:\n", " augmenter = None\n", - " print(bcolors.WARNING+\"Data augmentation disabled\") \n", + " print(bcolors.WARNING+\"Data augmentation disabled\")\n", "\n", "\n" ] @@ -1179,11 +1177,11 @@ "\n", "## **3.4. Using weights from a pre-trained model as initial weights**\n", "---\n", - " Here, you can set the path to a pre-trained model or the ID of a model from the [BioImage Model Zoo](https://bioimage.io/) (*e.g.*, 10.5281/zenodo.5749843) from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a StarDist model**. \n", + " Here, you can set the path to a pre-trained model or the ID of a model from the [BioImage Model Zoo](https://bioimage.io/) (*e.g.*, 10.5281/zenodo.5749843) from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a StarDist model**.\n", "\n", " This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n", "\n", - " In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. " + " In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used." ] }, { @@ -1219,7 +1217,7 @@ " %rm -rf $h5_file_path\n", " model_imported = import_bioimageio(bioimageio_model, h5_file_path)\n", " del model_imported\n", - " biomodel = load_resource_description(os.path.join(h5_file_path, \"bioimageio\", \"rdf.yaml\")) \n", + " biomodel = load_resource_description(os.path.join(h5_file_path, \"bioimageio\", \"rdf.yaml\"))\n", " h5_file_path = os.path.join(h5_file_path, \"bioimageio\", biomodel.config['stardist']['weights'])\n", "\n", "# --------------------- Download the Demo 2D model provided in the Stardist 2D github ------------------------\n", @@ -1233,7 +1231,7 @@ " os.makedirs(pretrained_model_path)\n", " wget.download(\"https://github.com/mpicbg-csbd/stardist/raw/master/models/examples/2D_demo/config.json\", pretrained_model_path)\n", " wget.download(\"https://github.com/mpicbg-csbd/stardist/raw/master/models/examples/2D_demo/thresholds.json\", pretrained_model_path)\n", - " wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/2D_demo/weights_best.h5?raw=true\", pretrained_model_path) \n", + " wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/2D_demo/weights_best.h5?raw=true\", pretrained_model_path)\n", " wget.download(\"https://github.com/mpicbg-csbd/stardist/blob/master/models/examples/2D_demo/weights_last.h5?raw=true\", pretrained_model_path)\n", " h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".h5\")\n", "\n", @@ -1243,16 +1241,16 @@ " print(\"Downloading the 2D_versatile_fluo_from_Stardist_Fiji\")\n", " pretrained_model_name = \"2D_versatile_fluo\"\n", " pretrained_model_path = base_path + \"/\" + pretrained_model_name\n", - " \n", + "\n", " if os.path.exists(pretrained_model_path):\n", " shutil.rmtree(pretrained_model_path)\n", " os.makedirs(pretrained_model_path)\n", - " \n", + "\n", " wget.download(\"https://cloud.mpi-cbg.de/index.php/s/1k5Zcy7PpFWRb0Q/download?path=/versatile&files=2D_versatile_fluo.zip\", pretrained_model_path)\n", - " \n", + "\n", " with zipfile.ZipFile(pretrained_model_path+\"/2D_versatile_fluo.zip\", 'r') as zip_ref:\n", " zip_ref.extractall(pretrained_model_path)\n", - " \n", + "\n", " h5_file_path = os.path.join(pretrained_model_path, \"weights_best.h5\")\n", "\n", "# --------------------- Download the Versatile (H&E nuclei)_fluo_from_Stardist_Fiji ------------------------\n", @@ -1261,16 +1259,16 @@ " print(\"Downloading the Versatile_H&E_nuclei from_Stardist_Fiji\")\n", " pretrained_model_name = \"2D_versatile_he\"\n", " pretrained_model_path = base_path + \"/\" + pretrained_model_name\n", - " \n", + "\n", " if os.path.exists(pretrained_model_path):\n", " shutil.rmtree(pretrained_model_path)\n", " os.makedirs(pretrained_model_path)\n", - " \n", + "\n", " wget.download(\"https://cloud.mpi-cbg.de/index.php/s/1k5Zcy7PpFWRb0Q/download?path=/versatile&files=2D_versatile_he.zip\", pretrained_model_path)\n", - " \n", + "\n", " with zipfile.ZipFile(pretrained_model_path+\"/2D_versatile_he.zip\", 'r') as zip_ref:\n", " zip_ref.extractall(pretrained_model_path)\n", - " \n", + "\n", " h5_file_path = os.path.join(pretrained_model_path, \"weights_best.h5\")\n", "\n", "\n", @@ -1279,13 +1277,13 @@ "\n", "\n", "# --------------------- Check the model exist ------------------------\n", - "# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n", + "# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled,\n", " if not os.path.exists(h5_file_path):\n", " print(bcolors.WARNING+'WARNING: weights_last.h5 pretrained model does not exist' + W)\n", " Use_pretrained_model = False\n", "\n", - " \n", - "# If the model path contains a pretrain model, we load the training rate, \n", + "\n", + "# If the model path contains a pretrain model, we load the training rate,\n", " if os.path.exists(h5_file_path):\n", "#Here we check if the learning rate can be loaded from the quality control folder\n", " if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n", @@ -1293,7 +1291,7 @@ " with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n", " csvRead = pd.read_csv(csvfile, sep=',')\n", " #print(csvRead)\n", - " \n", + "\n", " if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n", " print(\"pretrained network learning rate found\")\n", " #find the last learning rate\n", @@ -1382,8 +1380,8 @@ "mask_images_tiff=Training_target_dir+\"/*.tif\"\n", "\n", "# this funtion imports training images and masks and sorts them suitable for the network\n", - "X = sorted(glob(training_images_tiff)) \n", - "Y = sorted(glob(mask_images_tiff)) \n", + "X = sorted(glob(training_images_tiff))\n", + "Y = sorted(glob(mask_images_tiff))\n", "\n", "# assert -funtion check that X and Y really have images. If not this cell raises an error\n", "assert all(Path(x).name==Path(y).name for x,y in zip(X,Y))\n", @@ -1403,24 +1401,24 @@ "\n", "if not Use_Data_augmentation:\n", " augmenter = None\n", - " \n", + "\n", "\n", "#Normalize images and fill small label holes.\n", - "axis_norm = (0,1) \n", + "axis_norm = (0,1)\n", "if n_channel == 1:\n", " # normalize channels independently\n", " print(\"Normalizing image channels independently\")\n", "if n_channel > 1:\n", " # normalize channels independently\n", " # axis_norm = (0,1,2) # normalize channels jointly\n", - " print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently')) \n", + " print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n", " sys.stdout.flush()\n", "\n", "X = [normalize(x,1,99.8,axis=axis_norm) for x in tqdm(X)]\n", "Y = [fill_label_holes(y) for y in tqdm(Y)]\n", "\n", - "#Here we split the your training dataset into training images (90 %) and validation images (10 %). \n", - "#It is advisable to use 10 % of your training dataset for validation. This ensures the truthfull validation error value. If only few validation images are used network may choose too easy or too challenging images for validation. \n", + "#Here we split the your training dataset into training images (90 %) and validation images (10 %).\n", + "#It is advisable to use 10 % of your training dataset for validation. This ensures the truthfull validation error value. If only few validation images are used network may choose too easy or too challenging images for validation.\n", "# split training data (images and masks) into training images and validation images.\n", "assert len(X) > 1, \"not enough training data\"\n", "rng = np.random.RandomState(42)\n", @@ -1428,7 +1426,7 @@ "n_val = max(1, int(round(percentage * len(ind))))\n", "ind_train, ind_val = ind[:-n_val], ind[-n_val:]\n", "X_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]\n", - "X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train] \n", + "X_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train]\n", "print('number of images: %3d' % len(X))\n", "print('- training: %3d' % len(X_trn))\n", "print('- validation: %3d' % len(X_val))\n", @@ -1439,7 +1437,7 @@ "\n", "#Here we ensure that our network has a minimal number of steps\n", "\n", - "if (Use_Default_Advanced_Parameters) or (number_of_steps == 0): \n", + "if (Use_Default_Advanced_Parameters) or (number_of_steps == 0):\n", " # number_of_steps= (int(len(X)/batch_size)+1)\n", " number_of_steps = int(Image_X*Image_Y/(patch_size*patch_size))*(int(len(X)/batch_size)+1)\n", " if (Use_Data_augmentation):\n", @@ -1453,7 +1451,7 @@ " if Weights_choice == \"last\":\n", " initial_learning_rate = lastLearningRate\n", "\n", - " if Weights_choice == \"best\": \n", + " if Weights_choice == \"best\":\n", " initial_learning_rate = bestLearningRate\n", "# --------------------- ---------------------- ------------------------\n", "\n", @@ -1473,7 +1471,7 @@ "model = StarDist2D(conf, name=model_name, basedir=model_path)\n", "\n", "# --------------------- Using pretrained model ------------------------\n", - "# Load the pretrained weights \n", + "# Load the pretrained weights\n", "if Use_pretrained_model:\n", " try:\n", " model.load_weights(h5_file_path)\n", @@ -1538,15 +1536,15 @@ "\n", "print(\"Done\")\n", "\n", - "# convert the history.history dict to a pandas DataFrame: \n", - "lossData = pd.DataFrame(history.history) \n", + "# convert the history.history dict to a pandas DataFrame:\n", + "lossData = pd.DataFrame(history.history)\n", "\n", "if os.path.exists(model_path+\"/\"+model_name+\"/Quality Control\"):\n", " shutil.rmtree(model_path+\"/\"+model_name+\"/Quality Control\")\n", "\n", "os.makedirs(model_path+\"/\"+model_name+\"/Quality Control\")\n", "\n", - "# The training evaluation.csv is saved (overwrites the Files if needed). \n", + "# The training evaluation.csv is saved (overwrites the Files if needed).\n", "lossDataCSVpath = model_path+'/'+model_name+'/Quality Control/training_evaluation.csv'\n", "with open(lossDataCSVpath, 'w') as f:\n", " writer = csv.writer(f)\n", @@ -1558,8 +1556,8 @@ "\n", "# Displaying the time elapsed for training\n", "dt = time.time() - start\n", - "mins, sec = divmod(dt, 60) \n", - "hour, mins = divmod(mins, 60) \n", + "mins, sec = divmod(dt, 60)\n", + "hour, mins = divmod(mins, 60)\n", "print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n", "\n", "# model.export_TF()\n", @@ -1609,14 +1607,14 @@ "QC_model_name = os.path.basename(QC_model_folder)\n", "QC_model_path = os.path.dirname(QC_model_folder)\n", "\n", - "if (Use_the_current_trained_model): \n", + "if (Use_the_current_trained_model):\n", " QC_model_name = model_name\n", " QC_model_path = model_path\n", "\n", "full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'\n", "if os.path.exists(full_QC_model_path):\n", " print(\"The \"+QC_model_name+\" network will be evaluated\")\n", - "else: \n", + "else:\n", " print(bcolors.WARNING+'!! WARNING: The chosen model does not exist !!')\n", " print('Please make sure you provide a valid model path and model name before proceeding further.')\n" ] @@ -1697,11 +1695,11 @@ "---\n", "This section will calculate the Intersection over Union score for all the images provided in the Source_QC_folder and Target_QC_folder ! The result for one of the image will also be displayed.\n", "\n", - "The **Intersection over Union** (IuO) metric is a method that can be used to quantify the percent overlap between the target mask and your prediction output. **Therefore, the closer to 1, the better the performance.** This metric can be used to assess the quality of your model to accurately predict nuclei. \n", + "The **Intersection over Union** (IuO) metric is a method that can be used to quantify the percent overlap between the target mask and your prediction output. **Therefore, the closer to 1, the better the performance.** This metric can be used to assess the quality of your model to accurately predict nuclei.\n", "\n", "Here, the IuO is both calculated over the whole image and on a per-object basis. The value displayed below is the IuO value calculated over the entire image. The IuO value calculated on a per-object basis is used to calculate the other metrics displayed.\n", "\n", - "“n_true” refers to the number of objects present in the ground truth image. “n_pred” refers to the number of objects present in the predicted image. \n", + "“n_true” refers to the number of objects present in the ground truth image. “n_pred” refers to the number of objects present in the predicted image.\n", "\n", "When a segmented object has an IuO value above 0.5 (compared to the corresponding ground truth), it is then considered a true positive. The number of “**true positives**” is available in the table below. The number of “false positive” is then defined as “**false positive**” = “n_pred” - “true positive”. The number of “false negative” is defined as “false negative” = “n_true” - “true positive”.\n", "\n", @@ -1724,7 +1722,7 @@ "#@markdown ##Choose the folders that contain your Quality Control dataset\n", "\n", "from stardist.matching import matching\n", - "from stardist.plot import render_label, render_label_pred \n", + "from stardist.plot import render_label, render_label_pred\n", "\n", "Source_QC_folder = '/data/Stardist/Test - Images' #@param{type:\"string\"}\n", "Target_QC_folder = '/data/Stardist/Test - Masks' #@param{type:\"string\"}\n", @@ -1754,7 +1752,7 @@ "n_channel = 1 if Z[0].ndim == 2 else Z[0].shape[-1]\n", "\n", "print('Number of test dataset found in the folder: '+str(len(Z)))\n", - " \n", + "\n", "#Normalize images.\n", "\n", "if n_channel == 1:\n", @@ -1763,17 +1761,17 @@ "\n", "if n_channel > 1:\n", " axis_norm = (0,1,2) # normalize channels jointly\n", - " print(\"Normalizing image channels jointly\") \n", + " print(\"Normalizing image channels jointly\")\n", "\n", "model = StarDist2D(None, name=QC_model_name, basedir=QC_model_path)\n", "\n", "names = [os.path.basename(f) for f in sorted(glob(Source_QC_folder_tif))]\n", "\n", - " \n", + "\n", "# modify the names to suitable form: path_images/image_numberX.tif\n", - " \n", + "\n", "lenght_of_Z = len(Z)\n", - " \n", + "\n", "for i in range(lenght_of_Z):\n", " img = normalize(Z[i], 1,99.8, axis=axis_norm)\n", " labels, polygons = model.predict_instances(img)\n", @@ -1784,12 +1782,12 @@ "\n", "with open(QC_model_path+\"/\"+QC_model_name+\"/Quality Control/Quality_Control for \"+QC_model_name+\".csv\", \"w\", newline='') as file:\n", " writer = csv.writer(file, delimiter=\",\")\n", - " writer.writerow([\"image\",\"Prediction v. GT Intersection over Union\", \"false positive\", \"true positive\", \"false negative\", \"precision\", \"recall\", \"accuracy\", \"f1 score\", \"n_true\", \"n_pred\", \"mean_true_score\", \"mean_matched_score\", \"panoptic_quality\"]) \n", + " writer.writerow([\"image\",\"Prediction v. GT Intersection over Union\", \"false positive\", \"true positive\", \"false negative\", \"precision\", \"recall\", \"accuracy\", \"f1 score\", \"n_true\", \"n_pred\", \"mean_true_score\", \"mean_matched_score\", \"panoptic_quality\"])\n", "\n", "# define the images\n", "\n", " for n in os.listdir(Source_QC_folder):\n", - " \n", + "\n", " if not os.path.isdir(os.path.join(Source_QC_folder,n)):\n", " print('Running QC on: '+n)\n", " test_input = imread(os.path.join(Source_QC_folder,n))\n", @@ -1797,7 +1795,7 @@ " test_ground_truth_image = imread(os.path.join(Target_QC_folder, n))\n", "\n", " # Calculate the matching (with IoU threshold `thresh`) and all metrics\n", - " \n", + "\n", " stats = matching(test_ground_truth_image, test_prediction, thresh=0.5)\n", "\n", " #Convert pixel values to 0 or 255\n", @@ -1828,7 +1826,7 @@ "print('--------------------------------------------------------------')\n", "@interact\n", "def show_QC_results(file = os.listdir(Source_QC_folder)):\n", - " \n", + "\n", "\n", " plt.figure(figsize=(25,5))\n", " if n_channel > 1:\n", @@ -1844,7 +1842,7 @@ " target_image_mask = np.empty_like(target_image)\n", " target_image_mask[target_image > 0] = 255\n", " target_image_mask[target_image == 0] = 0\n", - " \n", + "\n", " prediction_mask = np.empty_like(prediction)\n", " prediction_mask[prediction > 0] = 255\n", " prediction_mask[prediction == 0] = 0\n", @@ -1896,11 +1894,11 @@ "source": [ "## **5.3. Export your model into the BioImage Model Zoo format**\n", "---\n", - "This section exports the model into the [BioImage Model Zoo](https://bioimage.io/#/) format so it can be used directly with deepImageJ or Ilastik. The new files will be stored in the model folder specified at the beginning of Section 5. \n", + "This section exports the model into the [BioImage Model Zoo](https://bioimage.io/#/) format so it can be used directly with deepImageJ or Ilastik. The new files will be stored in the model folder specified at the beginning of Section 5.\n", "\n", "Once the cell is executed, you will find a new zip file with the name specified in `trained_model_name.bioimage.io.model`.\n", "\n", - "To use it with deepImageJ, download it and install it suing DeepImageJ Install Model > Install from a local file. \n", + "To use it with deepImageJ, download it and install it suing DeepImageJ Install Model > Install from a local file.\n", "\n", "To try the model in ImageJ, go to Plugins > DeepImageJ > DeepImageJ Run, choose this model from the list and click on Test Model.\n", "\n", @@ -1928,8 +1926,8 @@ "Trained_model_authors_affiliation = \"[Affiliation 1, Affiliation 2, Affiliation 3]\" #@param {type:\"string\"}\n", "Trained_model_description = \"\" #@param {type:\"string\"}\n", "Trained_model_license = 'MIT'#@param {type:\"string\"}\n", - "Trained_model_references = [ \"Uwe Schmidt et al. MICCAI 2018\", \"Martin Weigert et al. WACV 2020\", \"Lucas von Chamier et al. biorXiv 2020\"] \n", - "Trained_model_DOI = [\"https://doi.org/10.1007/978-3-030-00934-2_30\", \"https://doi.org/10.1109/WACV45572.2020.9093435\", \"https://doi.org/10.1101/2020.03.20.000133\"] \n", + "Trained_model_references = [ \"Uwe Schmidt et al. MICCAI 2018\", \"Martin Weigert et al. WACV 2020\", \"Lucas von Chamier et al. biorXiv 2020\"]\n", + "Trained_model_DOI = [\"https://doi.org/10.1007/978-3-030-00934-2_30\", \"https://doi.org/10.1109/WACV45572.2020.9093435\", \"https://doi.org/10.1101/2020.03.20.000133\"]\n", "\n", "#@markdown ##Introduce the pixel size (in microns) of the image provided as an example of the model processing:\n", "# information about the example image\n", @@ -2030,7 +2028,7 @@ "In stardist the following results can be exported:\n", "- Region of interest (ROI) that can be opened in ImageJ / Fiji. The ROI are saved inside of a .zip file in your choosen result folder. To open the ROI in Fiji, just drag and drop the zip file !**\n", "- The predicted mask images\n", - "- A CSV file that contains the coordinate the centre of each detected nuclei (single image only). \n", + "- A CSV file that contains the coordinate the centre of each detected nuclei (single image only).\n", "\n" ] }, @@ -2060,7 +2058,7 @@ "\n", "# model name and path\n", "#@markdown ###Do you want to use the current trained model?\n", - "Use_the_current_trained_model = False #@param {type:\"boolean\"}\n", + "Use_the_current_trained_model = True #@param {type:\"boolean\"}\n", "\n", "#@markdown ###If not, please provide the path to the model folder:\n", "\n", @@ -2070,7 +2068,7 @@ "Prediction_model_name = os.path.basename(Prediction_model_folder)\n", "Prediction_model_path = os.path.dirname(Prediction_model_folder)\n", "\n", - "if (Use_the_current_trained_model): \n", + "if (Use_the_current_trained_model):\n", " print(\"Using current trained network\")\n", " Prediction_model_name = model_name\n", " Prediction_model_path = model_path\n", @@ -2094,7 +2092,7 @@ " X = sorted(glob(Data_folder))\n", " X = list(map(imread,X))\n", " n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]\n", - " \n", + "\n", " # axis_norm = (0,1,2) # normalize channels jointly\n", " if n_channel == 1:\n", " axis_norm = (0,1) # normalize channels independently\n", @@ -2102,12 +2100,12 @@ "\n", " if n_channel > 1:\n", " axis_norm = (0,1,2) # normalize channels jointly\n", - " print(\"Normalizing image channels jointly\") \n", - " sys.stdout.flush() \n", - " \n", + " print(\"Normalizing image channels jointly\")\n", + " sys.stdout.flush()\n", + "\n", " model = StarDist2D(None, name = Prediction_model_name, basedir = Prediction_model_path)\n", - " \n", - " names = [os.path.basename(f) for f in sorted(glob(Data_folder))] \n", + "\n", + " names = [os.path.basename(f) for f in sorted(glob(Data_folder))]\n", " Nuclei_number = []\n", "\n", " # modify the names to suitable form: path_images/image_numberX.tif\n", @@ -2117,35 +2115,35 @@ " FILEnames.append(m)\n", "\n", " # Create a list of name with no extension\n", - " \n", + "\n", " name_no_extension=[]\n", " for n in names:\n", " name_no_extension.append(os.path.splitext(n)[0])\n", - " \n", + "\n", " # Save all ROIs and masks into results folder\n", - " \n", + "\n", " for i in range(len(X)):\n", - " \n", + "\n", " img = normalize(X[i], 1,99.8, axis = axis_norm)\n", " labels, polygons = model.predict_instances(img)\n", - " \n", + "\n", " os.chdir(Results_folder)\n", " imwrite(FILEnames[i], labels)\n", - " export_imagej_rois(name_no_extension[i], polygons['coord']) \n", + " export_imagej_rois(name_no_extension[i], polygons['coord'])\n", " Nuclei_centre_coordinate = polygons['points']\n", " my_df2 = pd.DataFrame(Nuclei_centre_coordinate)\n", " my_df2.columns =['Y', 'X']\n", - " \n", + "\n", " my_df2.to_csv(Results_folder+'/'+name_no_extension[i]+'_Nuclei_centre.csv', index=False, header=True)\n", "\n", " Nuclei_array = polygons['coord']\n", " Nuclei_array2 = [names[i], Nuclei_array.shape[0]]\n", - " Nuclei_number.append(Nuclei_array2) \n", + " Nuclei_number.append(Nuclei_array2)\n", "\n", " my_df = pd.DataFrame(Nuclei_number)\n", " my_df2.columns =['Frame number', 'Number of objects']\n", " my_df.to_csv(Results_folder+'/Nuclei_count.csv', index=False, header=False)\n", - " \n", + "\n", "# One example is displayed\n", "\n", " print(\"One example image is displayed bellow:\")\n", @@ -2163,10 +2161,10 @@ " lbl_cmap = random_label_cmap()\n", "\n", " # normalize channels independently\n", - " axis_norm = (0,1) \n", - " \n", + " axis_norm = (0,1)\n", + "\n", " model = StarDist2D(None, name = Prediction_model_name, basedir = Prediction_model_path)\n", - " \n", + "\n", " for image in os.listdir(Data_folder):\n", " print(\"Performing prediction on: \"+image)\n", "\n", @@ -2175,24 +2173,24 @@ "\n", " timelapse = imread(Data_folder+\"/\"+image)\n", "\n", - " short_name = os.path.splitext(image) \n", - " \n", + " short_name = os.path.splitext(image)\n", + "\n", " timelapse = normalize(timelapse, 1,99.8, axis=(0,)+tuple(1+np.array(axis_norm)))\n", - " \n", "\n", - " if Region_of_interests: \n", - " polygons = [model.predict_instances(frame)[1]['coord'] for frame in tqdm(timelapse)] \n", - " export_imagej_rois(Results_folder+\"/\"+str(short_name[0]), polygons, compression=ZIP_DEFLATED) \n", + "\n", + " if Region_of_interests:\n", + " polygons = [model.predict_instances(frame)[1]['coord'] for frame in tqdm(timelapse)]\n", + " export_imagej_rois(Results_folder+\"/\"+str(short_name[0]), polygons, compression=ZIP_DEFLATED)\n", "\n", "\n", "# Analyse each time points one after the other\n", - " \n", + "\n", " n_timepoint = timelapse.shape[0]\n", " prediction_stack = np.zeros((n_timepoint, timelapse.shape[1], timelapse.shape[2]))\n", - " \n", + "\n", " for t in range(n_timepoint):\n", " img_t = timelapse[t]\n", - " labels, polygons = model.predict_instances(img_t) \n", + " labels, polygons = model.predict_instances(img_t)\n", " prediction_stack[t] = labels\n", " Nuclei_array = polygons['coord']\n", " Nuclei_array2 = [str(t), Nuclei_array.shape[0]]\n", @@ -2205,7 +2203,7 @@ "# Export a csv file containing the number of nuclei detected at each frame\n", " my_df = pd.DataFrame(Number_of_nuclei_list)\n", " my_df.to_csv(Results_folder+'/'+str(short_name[0])+'_Nuclei_number.csv', index=False, header=False)\n", - " os.chdir(Results_folder) \n", + " os.chdir(Results_folder)\n", " imsave(str(short_name[0])+\".tif\", prediction_stack, compression ='zlib')\n", "\n", " # Object detected vs frame number\n", @@ -2219,9 +2217,9 @@ " plt.show()\n", " del prediction_stack\n", " del timelapse\n", - " \n", "\n", - "print(\"Predictions completed\") " + "\n", + "print(\"Predictions completed\")" ] }, { @@ -2284,7 +2282,7 @@ "Prediction_model_path = os.path.dirname(Prediction_model_folder)\n", "\n", "\n", - "#@markdown #####To analyse very large image, your images need to be divided into blocks. Each blocks will then be processed independently and re-assembled to generate the final image. \n", + "#@markdown #####To analyse very large image, your images need to be divided into blocks. Each blocks will then be processed independently and re-assembled to generate the final image.\n", "#@markdown #####Here you can choose the dimension of the block.\n", "\n", "block_size_Y = 1024#@param {type:\"number\"}\n", @@ -2294,18 +2292,18 @@ "#@markdown #####Here you can the amount of overlap between each block.\n", "min_overlap = 50#@param {type:\"number\"}\n", "\n", - "#@markdown #####To analyse large blocks, your blocks need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final block. \n", + "#@markdown #####To analyse large blocks, your blocks need to be divided into tiles. Each tile will then be processed independently and re-assembled to generate the final block.\n", "\n", "n_tiles_Y = 1#@param {type:\"number\"}\n", "n_tiles_X = 1#@param {type:\"number\"}\n", "\n", "\n", - "#@markdown ###What outputs would you like to generate? \n", + "#@markdown ###What outputs would you like to generate?\n", "Mask_images = True #@param {type:\"boolean\"}\n", "\n", "Region_of_interests = True #@param {type:\"boolean\"}\n", "\n", - "if (Use_the_current_trained_model): \n", + "if (Use_the_current_trained_model):\n", " print(\"Using current trained network\")\n", " Prediction_model_name = model_name\n", " Prediction_model_path = model_path\n", @@ -2354,7 +2352,7 @@ " short_name = os.path.splitext(image)\n", "\n", " n_channel = 1 if X.ndim == 2 else X.shape[-1]\n", - " \n", + "\n", " # axis_norm = (0,1,2) # normalize channels jointly\n", " if n_channel == 1:\n", " axis_norm = (0,1) # normalize channels independently\n", @@ -2370,24 +2368,24 @@ " axes=\"YXC\"\n", " block_size = (block_size_Y, block_size_X, 3)\n", " n_tiles = (n_tiles_Y, n_tiles_X, 1)\n", - " min_overlap = (min_overlap, min_overlap, 0) \n", + " min_overlap = (min_overlap, min_overlap, 0)\n", " sys.stdout.flush()\n", - " \n", + "\n", " zarr.save_array(str(Temp_folder+\"/image.zarr\"), X)\n", " del X\n", " img = zarr.open(str(Temp_folder+\"/image.zarr\"), mode='r')\n", - " \n", + "\n", " labels = zarr.open(str(Temp_folder+\"/labels.zarr\"), mode='w', shape=img.shape[:3], chunks=img.chunks[:3], dtype=np.int32)\n", - " \n", - " \n", + "\n", + "\n", " labels, polygons = model.predict_instances_big(img, axes=axes, block_size=block_size, min_overlap=min_overlap, context=None,\n", " normalizer=normalizer, show_progress=True, n_tiles=n_tiles)\n", - " \n", + "\n", "# Save the predicted mask in the result folder\n", " os.chdir(Results_folder)\n", " if Mask_images:\n", " imsave(str(short_name[0])+\".tif\", labels)\n", - " if Region_of_interests: \n", + " if Region_of_interests:\n", " export_imagej_rois(str(short_name[0])+'labels_roi.zip', polygons['coord'], compression=ZIP_DEFLATED)\n", "\n", " del img\n", @@ -2395,11 +2393,11 @@ "\n", "# Displaying the time elapsed for training\n", "dt = time.time() - start\n", - "mins, sec = divmod(dt, 60) \n", - "hour, mins = divmod(mins, 60) \n", + "mins, sec = divmod(dt, 60)\n", + "hour, mins = divmod(mins, 60)\n", "print(\"Time elapsed:\",hour, \"hour(s)\",mins,\"min(s)\",round(sec),\"sec(s)\")\n", "\n", - "# One example image \n", + "# One example image\n", "\n", "fig, (a,b) = plt.subplots(1,2, figsize=(20,20))\n", "a.imshow(labels[::8,::8], cmap='tab20b')\n", @@ -2485,8 +2483,7 @@ "metadata": { "accelerator": "GPU", "colab": { - "provenance": [], - "toc_visible": true + "provenance": [] }, "kernelspec": { "display_name": "Python 3", @@ -2508,4 +2505,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file