From 1b92c7af34785bc841b2d511059d21c3370e1ce7 Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Thu, 14 Dec 2023 15:44:10 +1100 Subject: [PATCH 01/15] Test for colab --- examples/AutoSegmentation.ipynb | 907 ++++++++++++++++---------------- 1 file changed, 455 insertions(+), 452 deletions(-) diff --git a/examples/AutoSegmentation.ipynb b/examples/AutoSegmentation.ipynb index e501496..bbb1237 100644 --- a/examples/AutoSegmentation.ipynb +++ b/examples/AutoSegmentation.ipynb @@ -1,453 +1,456 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Auto-segmentation Inference & Analysis\n", - "\n", - "A common task when working the medical imaging data, it to run an auto-segmentation model\n", - "(inference) on the images in your dataset. If you have manual definitions of the same structures\n", - "available in your dataset, you will typically want to compare the auto and the manual\n", - "segmentations, computing metrics and produce plots and visualisation.\n", - "\n", - "This example notebook will guide you through the process of performing model inference and analysis\n", - "of those structures. We will use a single atlas-based segmentation model for demonstration\n", - "purposes." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "try:\n", - " from pydicer import PyDicer\n", - "except ImportError:\n", - " !pip install pydicer\n", - " from pydicer import PyDicer\n", - "\n", - "import os\n", - "import logging\n", - "\n", - "from pathlib import Path\n", - "\n", - "import SimpleITK as sitk\n", - "\n", - "from platipy.imaging.registration.utils import apply_transform\n", - "from platipy.imaging.registration.linear import linear_registration\n", - "from platipy.imaging.registration.deformable import fast_symmetric_forces_demons_registration\n", - "\n", - "from pydicer.utils import fetch_converted_test_data\n", - "\n", - "from pydicer.generate.segmentation import segment_image, read_all_segmentation_logs\n", - "\n", - "from pydicer.analyse.compare import (\n", - " compute_contour_similarity_metrics,\n", - " get_all_similarity_metrics_for_dataset,\n", - " prepare_similarity_metric_analysis\n", - ")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setup PyDicer\n", - "\n", - "For this example, we will use the LCTSC test data which has already been converted using PyDicer.\n", - "We also initialise our PyDicer object." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "working_directory = fetch_converted_test_data(\"./lctsc_autoseg\", dataset=\"LCTSC\")\n", - "pydicer = PyDicer(working_directory)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Prepare Atlas\n", - "\n", - "Since we will use a single atlas-based segmentation model, we must\n", - "split our data, selecting one case as our `atlas` and the remaining cases as the `validation` set.\n", - "We use the PyDicer `dataset preparation` module to split our dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "df = pydicer.read_converted_data()\n", - "\n", - "# Specify the patient ID to use as the atlas case\n", - "atlas_dataset = \"atlas\"\n", - "atlas_case = \"LCTSC-Train-S1-001\"\n", - "df_atlas = df[df.patient_id==atlas_case]\n", - "\n", - "# And the remaining cases will make up our validation set\n", - "validation_dataset = \"validation\"\n", - "df_validation = df[df.patient_id!=atlas_case]\n", - "\n", - "# Use the dataset preparation module to prepare these two data subsets\n", - "pydicer.dataset.prepare_from_dataframe(atlas_dataset, df_atlas)\n", - "pydicer.dataset.prepare_from_dataframe(validation_dataset, df_validation)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Define Segmentation Function\n", - "\n", - "Now that our `atlas` and `validation` sets are ready, we will define our function which will run\n", - "our simple single atlas-based auto-segmentation model for us. This example uses the [atlas-based\n", - "segmentation tools available in PlatiPy](https://pyplati.github.io/platipy/_examples/atlas_segmentation.html#).\n", - "\n", - "In a real-world scenario, you will have your own segmentation model you wish to apply. You should\n", - "integrate this model into such a function which accepts an image and returns a dictionary of\n", - "structures.\n", - "\n", - "To get started, we recommend you try running the [`TotalSegmentator`](https://github.com/wasserth/TotalSegmentator) \n", - "model on your CT data. PyDicer already has a function ready which runs this model, check out\n", - "`run_total_segmentator`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def single_atlas_segmentation(img):\n", - " \"\"\"Segment an image using a single atlas case\n", - "\n", - " Args:\n", - " img (SimpleITK.Image): The SimpleITK image to segment.\n", - "\n", - " Returns:\n", - " dict: The segmented structure dictionary\n", - " \"\"\"\n", - "\n", - " # Load the atlas case image\n", - " atlas_img_row = df_atlas[df_atlas.modality==\"CT\"].iloc[0]\n", - " atlas_img = sitk.ReadImage(str(Path(atlas_img_row.path).joinpath(\"CT.nii.gz\")))\n", - "\n", - " # Load the atlas case structures\n", - " atlas_structures = {}\n", - " atlas_struct_row = df_atlas[df_atlas.modality==\"RTSTRUCT\"].iloc[0]\n", - " for struct_path in Path(atlas_struct_row.path).glob(\"*.nii.gz\"):\n", - " struct_name = struct_path.name.replace(\".nii.gz\", \"\")\n", - " atlas_structures[struct_name] = sitk.ReadImage(str(struct_path))\n", - "\n", - " # Use a simple linear (rigid) registration to align the input image with the atlas image\n", - " img_ct_atlas_reg_linear, tfm_linear = linear_registration(\n", - " fixed_image = img,\n", - " moving_image = atlas_img,\n", - " reg_method='similarity',\n", - " metric='mean_squares',\n", - " optimiser='gradient_descent',\n", - " shrink_factors=[4, 2],\n", - " smooth_sigmas=[2, 0],\n", - " sampling_rate=1.0,\n", - " number_of_iterations=50,\n", - " )\n", - "\n", - " # Perform a fast deformable registration\n", - " img_ct_atlas_reg_dir, tfm_dir, dvf = fast_symmetric_forces_demons_registration(\n", - " img,\n", - " img_ct_atlas_reg_linear,\n", - " ncores=4,\n", - " isotropic_resample=True,\n", - " resolution_staging=[4],\n", - " iteration_staging=[20],\n", - " )\n", - "\n", - " # Combine the two transforms\n", - " tfm_combined = sitk.CompositeTransform((tfm_linear, tfm_dir))\n", - "\n", - " # Apply the transform to the atlas structures\n", - " auto_segmentations = {}\n", - " for s in atlas_structures:\n", - " auto_segmentations[s] = apply_transform(\n", - " atlas_structures[s],\n", - " reference_image=img,\n", - " transform=tfm_combined\n", - " )\n", - "\n", - " return auto_segmentations" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Run Auto-segmentation\n", - "\n", - "The `segment_dataset` function will run over all images in our dataset and will pass the images to\n", - "a function we define for segmentation. We pass in the name of our `validation_dataset` so that only\n", - "the images in this dataset will be segmented." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "segment_id = \"atlas\" # Used to generate the ID of the resulting auto-segmented structure sets\n", - "\n", - "pydicer.segment_dataset(segment_id, single_atlas_segmentation, dataset_name=validation_dataset)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can use PyDicer's `visualisation` module to produce snapshots of the auto-segmentations\n", - "produced." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pydicer.visualise.visualise(force=False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Read Segmentation Logs\n", - "\n", - "After running the auto-segmentation on across the dataset, we can fetch the logs to confirm that\n", - "everything went well. This will also let us inspect the runtime of the segmentation. In case\n", - "something went wrong, we can use these logs to help debug the issue." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Read the segmentation log DataFrame\n", - "df_logs = read_all_segmentation_logs(working_directory)\n", - "df_logs" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Use some Pandas magic to produce some stats on the segmentation runtime\n", - "df_success = df_logs[df_logs.success_flag]\n", - "agg_stats = [\"mean\", \"std\", \"max\", \"min\", \"count\"]\n", - "df_success[[\"segment_id\", \"total_time_seconds\"]].groupby(\"segment_id\").agg(agg_stats)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Auto-segmentation Analysis\n", - "\n", - "Now that our auto-segmentation model has been run on our `validation` set, we can compare these\n", - "structures to the manual structures available on this dataset. PyDicer provides functionality to\n", - "compute similarity metrics, but we must first prepare a DataFrame containing our auto structure\n", - "sets (`df_target`) and a separate DataFrame with our manual structure sets (`df_reference`)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "df = pydicer.read_converted_data(dataset_name=validation_dataset)\n", - "df_structs = df[df.modality==\"RTSTRUCT\"]\n", - "\n", - "df_reference = df_structs[~df_structs.hashed_uid.str.startswith(f\"atlas_\")]\n", - "df_target = df_structs[df_structs.hashed_uid.str.startswith(f\"atlas_\")]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "df_reference" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "df_target" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Compute Similarity \n", - "\n", - "We use the `compute_contour_similarity_metrics` function to compute the metrics comparing our\n", - "target structures to our reference structures.\n", - "\n", - "We can specify which metrics we want to compute, in this example we compute the Dice Similarity\n", - "Coefficient (DSC), Hausdorff Distance, Mean Surface Distance and the Surface DSC.\n", - "\n", - "> Structure names must match exactly, so we use a structure name mapping to standardise our\n", - "> structure names prior to computing the similarity metrics." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Add our structure name mapping\n", - "mapping_id = \"nnunet_lctsc\"\n", - "mapping = {\n", - " \"Esophagus\": [],\n", - " \"Heart\": [],\n", - " \"Lung_L\": [\"L_Lung\", \"Lung_Left\"],\n", - " \"Lung_R\": [\"Lung_Right\"],\n", - " \"SpinalCord\": [\"SC\"],\n", - "}\n", - "pydicer.add_structure_name_mapping(mapping, mapping_id)\n", - "\n", - "# Specify the metrics we want to compute\n", - "compute_metrics = [\"DSC\", \"hausdorffDistance\", \"meanSurfaceDistance\", \"surfaceDSC\"]\n", - "\n", - "# Compute the similarity metrics\n", - "compute_contour_similarity_metrics(\n", - " df_target,\n", - " df_reference,\n", - " segment_id,\n", - " compute_metrics=compute_metrics,\n", - " mapping_id=mapping_id\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Fetch the similarity metrics\n", - "\n", - "Here we fetch the metrics computed and output some stats. Note that if a segmentation fails, \n", - "surface metrics will return NaN and will be excluded from these stats." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Fetch the similarity metrics\n", - "df_metrics = get_all_similarity_metrics_for_dataset(\n", - " working_directory,\n", - " dataset_name=validation_dataset,\n", - " structure_mapping_id=mapping_id\n", - ")\n", - "\n", - "# Aggregate the stats using Pandas\n", - "df_metrics[\n", - " [\"segment_id\", \"structure\", \"metric\", \"value\"]\n", - " ].groupby(\n", - " [\"segment_id\", \"structure\", \"metric\"]\n", - " ).agg(\n", - " [\"mean\", \"std\", \"min\", \"max\", \"count\"]\n", - " )\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Perform Analysis\n", - "\n", - "There are various plots and visualisations you may wish to produce following computation of\n", - "similarity metrics. The `prepare_similarity_metric_analysis` will generate several useful plots\n", - "which will serve as a useful starting point when analysing your auto-segmentation results.\n", - "\n", - "Plots are generated in a directory you provide. In this example, plots and tables (`.csv`) are\n", - "output in the `testdata_lctsc/analysis/atlas` directory. Run the following cell, then navigate\n", - "to that directory to explore the results." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "analysis_output_directory = working_directory.joinpath(\n", - " \"analysis\",\n", - " segment_id\n", - ")\n", - "analysis_output_directory.mkdir(parents=True, exist_ok=True)\n", - "\n", - "prepare_similarity_metric_analysis(\n", - " working_directory,\n", - " analysis_output_directory=analysis_output_directory,\n", - " dataset_name=validation_dataset,\n", - " structure_mapping_id=mapping_id\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "interpreter": { - "hash": "814af119db7f8f2860617be3dcd1d37c560587d11c65bd58c45b1679d3ee6ea4" - }, - "kernelspec": { - "display_name": "Python 3.8.0 64-bit ('pydicer': pyenv)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.0" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Auto-segmentation Inference & Analysis\n", + "\n", + "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/AutoSegmentation.ipynb)\n", + "\n", + "A common task when working the medical imaging data, it to run an auto-segmentation model\n", + "(inference) on the images in your dataset. If you have manual definitions of the same structures\n", + "available in your dataset, you will typically want to compare the auto and the manual\n", + "segmentations, computing metrics and produce plots and visualisation.\n", + "\n", + "This example notebook will guide you through the process of performing model inference and analysis\n", + "of those structures. We will use a single atlas-based segmentation model for demonstration\n", + "purposes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " from pydicer import PyDicer\n", + "except ImportError:\n", + " !pip install pydicer\n", + " from pydicer import PyDicer\n", + "\n", + "import os\n", + "import logging\n", + "\n", + "from pathlib import Path\n", + "\n", + "import SimpleITK as sitk\n", + "\n", + "from platipy.imaging.registration.utils import apply_transform\n", + "from platipy.imaging.registration.linear import linear_registration\n", + "from platipy.imaging.registration.deformable import fast_symmetric_forces_demons_registration\n", + "\n", + "from pydicer.utils import fetch_converted_test_data\n", + "\n", + "from pydicer.generate.segmentation import segment_image, read_all_segmentation_logs\n", + "\n", + "from pydicer.analyse.compare import (\n", + " compute_contour_similarity_metrics,\n", + " get_all_similarity_metrics_for_dataset,\n", + " prepare_similarity_metric_analysis\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup PyDicer\n", + "\n", + "For this example, we will use the LCTSC test data which has already been converted using PyDicer.\n", + "We also initialise our PyDicer object." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "working_directory = fetch_converted_test_data(\"./lctsc_autoseg\", dataset=\"LCTSC\")\n", + "pydicer = PyDicer(working_directory)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prepare Atlas\n", + "\n", + "Since we will use a single atlas-based segmentation model, we must\n", + "split our data, selecting one case as our `atlas` and the remaining cases as the `validation` set.\n", + "We use the PyDicer `dataset preparation` module to split our dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df = pydicer.read_converted_data()\n", + "\n", + "# Specify the patient ID to use as the atlas case\n", + "atlas_dataset = \"atlas\"\n", + "atlas_case = \"LCTSC-Train-S1-001\"\n", + "df_atlas = df[df.patient_id==atlas_case]\n", + "\n", + "# And the remaining cases will make up our validation set\n", + "validation_dataset = \"validation\"\n", + "df_validation = df[df.patient_id!=atlas_case]\n", + "\n", + "# Use the dataset preparation module to prepare these two data subsets\n", + "pydicer.dataset.prepare_from_dataframe(atlas_dataset, df_atlas)\n", + "pydicer.dataset.prepare_from_dataframe(validation_dataset, df_validation)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define Segmentation Function\n", + "\n", + "Now that our `atlas` and `validation` sets are ready, we will define our function which will run\n", + "our simple single atlas-based auto-segmentation model for us. This example uses the [atlas-based\n", + "segmentation tools available in PlatiPy](https://pyplati.github.io/platipy/_examples/atlas_segmentation.html#).\n", + "\n", + "In a real-world scenario, you will have your own segmentation model you wish to apply. You should\n", + "integrate this model into such a function which accepts an image and returns a dictionary of\n", + "structures.\n", + "\n", + "To get started, we recommend you try running the [TotalSegmentator](https://github.com/wasserth/TotalSegmentator) \n", + "model on your CT data. PyDicer already has a function ready which runs this model, check out\n", + "[run_total_segmentator](https://australiancancerdatanetwork.github.io/pydicer/generate.html#pydicer.generate.models.run_total_segmentator)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def single_atlas_segmentation(img):\n", + " \"\"\"Segment an image using a single atlas case\n", + "\n", + " Args:\n", + " img (SimpleITK.Image): The SimpleITK image to segment.\n", + "\n", + " Returns:\n", + " dict: The segmented structure dictionary\n", + " \"\"\"\n", + "\n", + " # Load the atlas case image\n", + " atlas_img_row = df_atlas[df_atlas.modality==\"CT\"].iloc[0]\n", + " atlas_img = sitk.ReadImage(str(Path(atlas_img_row.path).joinpath(\"CT.nii.gz\")))\n", + "\n", + " # Load the atlas case structures\n", + " atlas_structures = {}\n", + " atlas_struct_row = df_atlas[df_atlas.modality==\"RTSTRUCT\"].iloc[0]\n", + " for struct_path in Path(atlas_struct_row.path).glob(\"*.nii.gz\"):\n", + " struct_name = struct_path.name.replace(\".nii.gz\", \"\")\n", + " atlas_structures[struct_name] = sitk.ReadImage(str(struct_path))\n", + "\n", + " # Use a simple linear (rigid) registration to align the input image with the atlas image\n", + " img_ct_atlas_reg_linear, tfm_linear = linear_registration(\n", + " fixed_image = img,\n", + " moving_image = atlas_img,\n", + " reg_method='similarity',\n", + " metric='mean_squares',\n", + " optimiser='gradient_descent',\n", + " shrink_factors=[4, 2],\n", + " smooth_sigmas=[2, 0],\n", + " sampling_rate=1.0,\n", + " number_of_iterations=50,\n", + " )\n", + "\n", + " # Perform a fast deformable registration\n", + " img_ct_atlas_reg_dir, tfm_dir, dvf = fast_symmetric_forces_demons_registration(\n", + " img,\n", + " img_ct_atlas_reg_linear,\n", + " ncores=4,\n", + " isotropic_resample=True,\n", + " resolution_staging=[4],\n", + " iteration_staging=[20],\n", + " )\n", + "\n", + " # Combine the two transforms\n", + " tfm_combined = sitk.CompositeTransform((tfm_linear, tfm_dir))\n", + "\n", + " # Apply the transform to the atlas structures\n", + " auto_segmentations = {}\n", + " for s in atlas_structures:\n", + " auto_segmentations[s] = apply_transform(\n", + " atlas_structures[s],\n", + " reference_image=img,\n", + " transform=tfm_combined\n", + " )\n", + "\n", + " return auto_segmentations" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run Auto-segmentation\n", + "\n", + "The `segment_dataset` function will run over all images in our dataset and will pass the images to\n", + "a function we define for segmentation. We pass in the name of our `validation_dataset` so that only\n", + "the images in this dataset will be segmented." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "segment_id = \"atlas\" # Used to generate the ID of the resulting auto-segmented structure sets\n", + "\n", + "pydicer.segment_dataset(segment_id, single_atlas_segmentation, dataset_name=validation_dataset)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can use PyDicer's `visualisation` module to produce snapshots of the auto-segmentations\n", + "produced." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pydicer.visualise.visualise(force=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Read Segmentation Logs\n", + "\n", + "After running the auto-segmentation on across the dataset, we can fetch the logs to confirm that\n", + "everything went well. This will also let us inspect the runtime of the segmentation. In case\n", + "something went wrong, we can use these logs to help debug the issue." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Read the segmentation log DataFrame\n", + "df_logs = read_all_segmentation_logs(working_directory)\n", + "df_logs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Use some Pandas magic to produce some stats on the segmentation runtime\n", + "df_success = df_logs[df_logs.success_flag]\n", + "agg_stats = [\"mean\", \"std\", \"max\", \"min\", \"count\"]\n", + "df_success[[\"segment_id\", \"total_time_seconds\"]].groupby(\"segment_id\").agg(agg_stats)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Auto-segmentation Analysis\n", + "\n", + "Now that our auto-segmentation model has been run on our `validation` set, we can compare these\n", + "structures to the manual structures available on this dataset. PyDicer provides functionality to\n", + "compute similarity metrics, but we must first prepare a DataFrame containing our auto structure\n", + "sets (`df_target`) and a separate DataFrame with our manual structure sets (`df_reference`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df = pydicer.read_converted_data(dataset_name=validation_dataset)\n", + "df_structs = df[df.modality==\"RTSTRUCT\"]\n", + "\n", + "df_reference = df_structs[~df_structs.hashed_uid.str.startswith(f\"atlas_\")]\n", + "df_target = df_structs[df_structs.hashed_uid.str.startswith(f\"atlas_\")]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df_reference" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "df_target" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Compute Similarity \n", + "\n", + "We use the `compute_contour_similarity_metrics` function to compute the metrics comparing our\n", + "target structures to our reference structures.\n", + "\n", + "We can specify which metrics we want to compute, in this example we compute the Dice Similarity\n", + "Coefficient (DSC), Hausdorff Distance, Mean Surface Distance and the Surface DSC.\n", + "\n", + "> Structure names must match exactly, so we use a structure name mapping to standardise our\n", + "> structure names prior to computing the similarity metrics." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Add our structure name mapping\n", + "mapping_id = \"nnunet_lctsc\"\n", + "mapping = {\n", + " \"Esophagus\": [],\n", + " \"Heart\": [],\n", + " \"Lung_L\": [\"L_Lung\", \"Lung_Left\"],\n", + " \"Lung_R\": [\"Lung_Right\"],\n", + " \"SpinalCord\": [\"SC\"],\n", + "}\n", + "pydicer.add_structure_name_mapping(mapping, mapping_id)\n", + "\n", + "# Specify the metrics we want to compute\n", + "compute_metrics = [\"DSC\", \"hausdorffDistance\", \"meanSurfaceDistance\", \"surfaceDSC\"]\n", + "\n", + "# Compute the similarity metrics\n", + "compute_contour_similarity_metrics(\n", + " df_target,\n", + " df_reference,\n", + " segment_id,\n", + " compute_metrics=compute_metrics,\n", + " mapping_id=mapping_id\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Fetch the similarity metrics\n", + "\n", + "Here we fetch the metrics computed and output some stats. Note that if a segmentation fails, \n", + "surface metrics will return NaN and will be excluded from these stats." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Fetch the similarity metrics\n", + "df_metrics = get_all_similarity_metrics_for_dataset(\n", + " working_directory,\n", + " dataset_name=validation_dataset,\n", + " structure_mapping_id=mapping_id\n", + ")\n", + "\n", + "# Aggregate the stats using Pandas\n", + "df_metrics[\n", + " [\"segment_id\", \"structure\", \"metric\", \"value\"]\n", + " ].groupby(\n", + " [\"segment_id\", \"structure\", \"metric\"]\n", + " ).agg(\n", + " [\"mean\", \"std\", \"min\", \"max\", \"count\"]\n", + " )\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Perform Analysis\n", + "\n", + "There are various plots and visualisations you may wish to produce following computation of\n", + "similarity metrics. The `prepare_similarity_metric_analysis` will generate several useful plots\n", + "which will serve as a useful starting point when analysing your auto-segmentation results.\n", + "\n", + "Plots are generated in a directory you provide. In this example, plots and tables (`.csv`) are\n", + "output in the `testdata_lctsc/analysis/atlas` directory. Run the following cell, then navigate\n", + "to that directory to explore the results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "analysis_output_directory = working_directory.joinpath(\n", + " \"analysis\",\n", + " segment_id\n", + ")\n", + "analysis_output_directory.mkdir(parents=True, exist_ok=True)\n", + "\n", + "prepare_similarity_metric_analysis(\n", + " working_directory,\n", + " analysis_output_directory=analysis_output_directory,\n", + " dataset_name=validation_dataset,\n", + " structure_mapping_id=mapping_id\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "814af119db7f8f2860617be3dcd1d37c560587d11c65bd58c45b1679d3ee6ea4" + }, + "kernelspec": { + "display_name": "Python 3.8", + "language": "python", + "name": "py38" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.0" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file From 1db285c789a235e1337cce14703ff22dcedbc7ef Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Thu, 14 Dec 2023 15:49:13 +1100 Subject: [PATCH 02/15] Update kernal name --- examples/AutoSegmentation.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/AutoSegmentation.ipynb b/examples/AutoSegmentation.ipynb index bbb1237..91e0e5f 100644 --- a/examples/AutoSegmentation.ipynb +++ b/examples/AutoSegmentation.ipynb @@ -435,7 +435,7 @@ "kernelspec": { "display_name": "Python 3.8", "language": "python", - "name": "py38" + "name": "python38" }, "language_info": { "codemirror_mode": { From d72c12107f74d05f1b83444e68a5b8d88aadaaa9 Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Thu, 14 Dec 2023 15:50:29 +1100 Subject: [PATCH 03/15] Update kern --- examples/AutoSegmentation.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/AutoSegmentation.ipynb b/examples/AutoSegmentation.ipynb index 91e0e5f..67d85a5 100644 --- a/examples/AutoSegmentation.ipynb +++ b/examples/AutoSegmentation.ipynb @@ -7,7 +7,7 @@ "# Auto-segmentation Inference & Analysis\n", "\n", "\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/AutoSegmentation.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/finalise-docs/examples/AutoSegmentation.ipynb)\n", "\n", "A common task when working the medical imaging data, it to run an auto-segmentation model\n", "(inference) on the images in your dataset. If you have manual definitions of the same structures\n", From c9871e22c1f79631c3c46e6be05a78405249a607 Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Thu, 14 Dec 2023 16:11:23 +1100 Subject: [PATCH 04/15] Attempt loosening pyradiomics version requirement --- poetry.lock | 135 ++++++++++++++++++++++++++----------------------- pyproject.toml | 2 +- 2 files changed, 74 insertions(+), 63 deletions(-) diff --git a/poetry.lock b/poetry.lock index e5f9052..6315047 100644 --- a/poetry.lock +++ b/poetry.lock @@ -197,18 +197,17 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte [[package]] name = "babel" -version = "2.13.1" +version = "2.14.0" description = "Internationalization utilities" optional = false python-versions = ">=3.7" files = [ - {file = "Babel-2.13.1-py3-none-any.whl", hash = "sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed"}, - {file = "Babel-2.13.1.tar.gz", hash = "sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900"}, + {file = "Babel-2.14.0-py3-none-any.whl", hash = "sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287"}, + {file = "Babel-2.14.0.tar.gz", hash = "sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363"}, ] [package.dependencies] pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} -setuptools = {version = "*", markers = "python_version >= \"3.12\""} [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] @@ -525,6 +524,7 @@ files = [ {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18a64814ae7bce73925131381603fff0116e2df25230dfc80d6d690aa6e20b37"}, {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90c81f22b4f572f8a2110b0b741bb64e5a6427e0a198b2cdc1fbaf85f352a3aa"}, {file = "contourpy-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53cc3a40635abedbec7f1bde60f8c189c49e84ac180c665f2cd7c162cc454baa"}, + {file = "contourpy-1.1.0-cp310-cp310-win32.whl", hash = "sha256:9b2dd2ca3ac561aceef4c7c13ba654aaa404cf885b187427760d7f7d4c57cff8"}, {file = "contourpy-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f795597073b09d631782e7245016a4323cf1cf0b4e06eef7ea6627e06a37ff2"}, {file = "contourpy-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0b7b04ed0961647691cfe5d82115dd072af7ce8846d31a5fac6c142dcce8b882"}, {file = "contourpy-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27bc79200c742f9746d7dd51a734ee326a292d77e7d94c8af6e08d1e6c15d545"}, @@ -533,6 +533,7 @@ files = [ {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5cec36c5090e75a9ac9dbd0ff4a8cf7cecd60f1b6dc23a374c7d980a1cd710e"}, {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f0cbd657e9bde94cd0e33aa7df94fb73c1ab7799378d3b3f902eb8eb2e04a3a"}, {file = "contourpy-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:181cbace49874f4358e2929aaf7ba84006acb76694102e88dd15af861996c16e"}, + {file = "contourpy-1.1.0-cp311-cp311-win32.whl", hash = "sha256:edb989d31065b1acef3828a3688f88b2abb799a7db891c9e282df5ec7e46221b"}, {file = "contourpy-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fb3b7d9e6243bfa1efb93ccfe64ec610d85cfe5aec2c25f97fbbd2e58b531256"}, {file = "contourpy-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bcb41692aa09aeb19c7c213411854402f29f6613845ad2453d30bf421fe68fed"}, {file = "contourpy-1.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5d123a5bc63cd34c27ff9c7ac1cd978909e9c71da12e05be0231c608048bb2ae"}, @@ -541,6 +542,7 @@ files = [ {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:317267d915490d1e84577924bd61ba71bf8681a30e0d6c545f577363157e5e94"}, {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d551f3a442655f3dcc1285723f9acd646ca5858834efeab4598d706206b09c9f"}, {file = "contourpy-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e7a117ce7df5a938fe035cad481b0189049e8d92433b4b33aa7fc609344aafa1"}, + {file = "contourpy-1.1.0-cp38-cp38-win32.whl", hash = "sha256:108dfb5b3e731046a96c60bdc46a1a0ebee0760418951abecbe0fc07b5b93b27"}, {file = "contourpy-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:d4f26b25b4f86087e7d75e63212756c38546e70f2a92d2be44f80114826e1cd4"}, {file = "contourpy-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc00bb4225d57bff7ebb634646c0ee2a1298402ec10a5fe7af79df9a51c1bfd9"}, {file = "contourpy-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:189ceb1525eb0655ab8487a9a9c41f42a73ba52d6789754788d1883fb06b2d8a"}, @@ -549,6 +551,7 @@ files = [ {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143dde50520a9f90e4a2703f367cf8ec96a73042b72e68fcd184e1279962eb6f"}, {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e94bef2580e25b5fdb183bf98a2faa2adc5b638736b2c0a4da98691da641316a"}, {file = "contourpy-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ed614aea8462735e7d70141374bd7650afd1c3f3cb0c2dbbcbe44e14331bf002"}, + {file = "contourpy-1.1.0-cp39-cp39-win32.whl", hash = "sha256:71551f9520f008b2950bef5f16b0e3587506ef4f23c734b71ffb7b89f8721999"}, {file = "contourpy-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:438ba416d02f82b692e371858143970ed2eb6337d9cdbbede0d8ad9f3d7dd17d"}, {file = "contourpy-1.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a698c6a7a432789e587168573a864a7ea374c6be8d4f31f9d87c001d5a843493"}, {file = "contourpy-1.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:397b0ac8a12880412da3551a8cb5a187d3298a72802b45a3bd1805e204ad8439"}, @@ -965,13 +968,13 @@ files = [ [[package]] name = "imageio" -version = "2.33.0" +version = "2.33.1" description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats." optional = false python-versions = ">=3.8" files = [ - {file = "imageio-2.33.0-py3-none-any.whl", hash = "sha256:d580d6576d0ae39c459a444a23f6f61fe72123a3df2264f5fce8c87784a4be2e"}, - {file = "imageio-2.33.0.tar.gz", hash = "sha256:39999d05eb500089e60be467dd7d618f56e142229b44c3961c2b420eeb538d7e"}, + {file = "imageio-2.33.1-py3-none-any.whl", hash = "sha256:c5094c48ccf6b2e6da8b4061cd95e1209380afafcbeae4a4e280938cce227e1d"}, + {file = "imageio-2.33.1.tar.gz", hash = "sha256:78722d40b137bd98f5ec7312119f8aea9ad2049f76f434748eb306b6937cc1ce"}, ] [package.dependencies] @@ -1153,20 +1156,17 @@ arrow = ">=0.15.0" [[package]] name = "isort" -version = "5.12.0" +version = "5.13.2" description = "A Python utility / library to sort Python imports." optional = false python-versions = ">=3.8.0" files = [ - {file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"}, - {file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"}, + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, ] [package.extras] -colors = ["colorama (>=0.4.3)"] -pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"] -plugins = ["setuptools"] -requirements-deprecated-finder = ["pip-api", "pipreqs"] +colors = ["colorama (>=0.4.6)"] [[package]] name = "jedi" @@ -1342,13 +1342,13 @@ test = ["click", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "p [[package]] name = "jupyter-server" -version = "2.12.0" +version = "2.12.1" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_server-2.12.0-py3-none-any.whl", hash = "sha256:3482912efa4387bb1edc23ba60531796aff3b6d6a6e93a5810f5719e2bdb48b7"}, - {file = "jupyter_server-2.12.0.tar.gz", hash = "sha256:9fa74ed3bb931cf33f42b3d9046e2788328ec9e6dcc59d48aa3e0910a491e3e4"}, + {file = "jupyter_server-2.12.1-py3-none-any.whl", hash = "sha256:fd030dd7be1ca572e4598203f718df6630c12bd28a599d7f1791c4d7938e1010"}, + {file = "jupyter_server-2.12.1.tar.gz", hash = "sha256:dc77b7dcc5fc0547acba2b2844f01798008667201eea27c6319ff9257d700a6d"}, ] [package.dependencies] @@ -1378,13 +1378,13 @@ test = ["flaky", "ipykernel", "pre-commit", "pytest (>=7.0)", "pytest-console-sc [[package]] name = "jupyter-server-terminals" -version = "0.4.4" +version = "0.5.0" description = "A Jupyter Server Extension Providing Terminals." optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_server_terminals-0.4.4-py3-none-any.whl", hash = "sha256:75779164661cec02a8758a5311e18bb8eb70c4e86c6b699403100f1585a12a36"}, - {file = "jupyter_server_terminals-0.4.4.tar.gz", hash = "sha256:57ab779797c25a7ba68e97bcfb5d7740f2b5e8a83b5e8102b10438041a7eac5d"}, + {file = "jupyter_server_terminals-0.5.0-py3-none-any.whl", hash = "sha256:2fc0692c883bfd891f4fba0c4b4a684a37234b0ba472f2e97ed0a3888f46e1e4"}, + {file = "jupyter_server_terminals-0.5.0.tar.gz", hash = "sha256:ebcd68c9afbf98a480a533e6f3266354336e645536953b7abcc7bdeebc0154a3"}, ] [package.dependencies] @@ -1392,8 +1392,8 @@ pywinpty = {version = ">=2.0.3", markers = "os_name == \"nt\""} terminado = ">=0.8.3" [package.extras] -docs = ["jinja2", "jupyter-server", "mistune (<3.0)", "myst-parser", "nbformat", "packaging", "pydata-sphinx-theme", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxcontrib-spelling", "sphinxemoji", "tornado"] -test = ["coverage", "jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-cov", "pytest-jupyter[server] (>=0.5.3)", "pytest-timeout"] +docs = ["jinja2", "jupyter-server", "mistune (<4.0)", "myst-parser", "nbformat", "packaging", "pydata-sphinx-theme", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxcontrib-spelling", "sphinxemoji", "tornado"] +test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (>=0.5.3)", "pytest-timeout"] [[package]] name = "jupyterlab-pygments" @@ -2269,13 +2269,13 @@ testing = ["docopt", "pytest (<6.0.0)"] [[package]] name = "pathspec" -version = "0.11.2" +version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"}, - {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"}, + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, ] [[package]] @@ -2468,13 +2468,13 @@ twisted = ["twisted"] [[package]] name = "prompt-toolkit" -version = "3.0.41" +version = "3.0.43" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.41-py3-none-any.whl", hash = "sha256:f36fe301fafb7470e86aaf90f036eef600a3210be4decf461a5b1ca8403d3cb2"}, - {file = "prompt_toolkit-3.0.41.tar.gz", hash = "sha256:941367d97fc815548822aa26c2a269fdc4eb21e9ec05fc5d447cf09bad5d75f0"}, + {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"}, + {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"}, ] [package.dependencies] @@ -2650,19 +2650,23 @@ tests = ["pyfakefs", "pytest", "sqlalchemy"] [[package]] name = "pyorthanc" -version = "1.15.0" +version = "1.16.0" description = "Orthanc REST API python wrapper with additional utilities" optional = false python-versions = ">=3.8,<4.0" files = [ - {file = "pyorthanc-1.15.0-py3-none-any.whl", hash = "sha256:491ef164ad3905dafd5670caecf2cb5ef66479c5849bbdcfc78a75598e9e108b"}, - {file = "pyorthanc-1.15.0.tar.gz", hash = "sha256:d4f1ea8d00b4cb59c9f262375173069079d2a49bae170b432b62ce9eba0255a4"}, + {file = "pyorthanc-1.16.0-py3-none-any.whl", hash = "sha256:bf215ff241ea113b465f86bf69a71762ceeade5b3bc312f9ebaf615763b419e2"}, + {file = "pyorthanc-1.16.0.tar.gz", hash = "sha256:b4ced0d43d0b9a3d39b5e0fe1f0f728451f901a5dc6f4fbda93c1238c465b313"}, ] [package.dependencies] httpx = ">=0.24.1,<1.0.0" pydicom = ">=2.3.0,<3.0.0" +[package.extras] +all = ["tqdm (>=4.66.1,<5.0.0)"] +progress = ["tqdm (>=4.66.1,<5.0.0)"] + [[package]] name = "pyparsing" version = "3.1.1" @@ -2850,6 +2854,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2857,8 +2862,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2875,6 +2887,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2882,6 +2895,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -2994,13 +3008,13 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "referencing" -version = "0.31.1" +version = "0.32.0" description = "JSON Referencing + Python" optional = false python-versions = ">=3.8" files = [ - {file = "referencing-0.31.1-py3-none-any.whl", hash = "sha256:c19c4d006f1757e3dd75c4f784d38f8698d87b649c54f9ace14e5e8c9667c01d"}, - {file = "referencing-0.31.1.tar.gz", hash = "sha256:81a1471c68c9d5e3831c30ad1dd9815c45b558e596653db751a2bfdd17b3b9ec"}, + {file = "referencing-0.32.0-py3-none-any.whl", hash = "sha256:bdcd3efb936f82ff86f993093f6da7435c7de69a3b3a5a06678a6050184bee99"}, + {file = "referencing-0.32.0.tar.gz", hash = "sha256:689e64fe121843dcfd57b71933318ef1f91188ffb45367332700a86ac8fd6161"}, ] [package.dependencies] @@ -3205,37 +3219,51 @@ python-versions = ">=3.6" files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win_amd64.whl", hash = "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b"}, {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win32.whl", hash = "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win32.whl", hash = "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win32.whl", hash = "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-win_amd64.whl", hash = "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15"}, {file = "ruamel.yaml.clib-0.2.8.tar.gz", hash = "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512"}, @@ -3268,7 +3296,6 @@ files = [ {file = "scikit_image-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ef5d8d1099317b7b315b530348cbfa68ab8ce32459de3c074d204166951025c"}, {file = "scikit_image-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b1e96c59cab640ca5c5b22c501524cfaf34cbe0cb51ba73bd9a9ede3fb6e1d"}, {file = "scikit_image-0.21.0-cp39-cp39-win_amd64.whl", hash = "sha256:9cffcddd2a5594c0a06de2ae3e1e25d662745a26f94fda31520593669677c010"}, - {file = "scikit_image-0.21.0.tar.gz", hash = "sha256:b33e823c54e6f11873ea390ee49ef832b82b9f70752c8759efd09d5a4e3d87f0"}, ] [package.dependencies] @@ -3413,22 +3440,6 @@ nativelib = ["pyobjc-framework-Cocoa", "pywin32"] objc = ["pyobjc-framework-Cocoa"] win32 = ["pywin32"] -[[package]] -name = "setuptools" -version = "69.0.2" -description = "Easily download, build, install, upgrade, and uninstall Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"}, - {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] - [[package]] name = "simpleitk" version = "2.3.1" @@ -3877,13 +3888,13 @@ files = [ [[package]] name = "typing-extensions" -version = "4.8.0" +version = "4.9.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, - {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, + {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, + {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, ] [[package]] @@ -4077,4 +4088,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "c442777592786263e5690e48bf4f4b0e9f3acb5ef2bff7ab2aef75e413bd2cb9" +content-hash = "7be4818635f947e98bed201a13f3b9b2193aeae2ba9cb7858d2a1ed704f3b005" diff --git a/pyproject.toml b/pyproject.toml index 039da22..4194b09 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,7 +25,7 @@ argparse = ">=1.4.0" seaborn = "^0.12.0" tqdm = "^4.55.1" scikit-learn = "^1.2.2" -pyradiomics = ">=3.1.0" +pyradiomics = ">=3.0.1" [tool.poetry.group.dev.dependencies] pylint = "^2.13.5" From 5eee7b8c9b0d0810f89e27b80b2efbabf9da7424 Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Thu, 14 Dec 2023 16:16:08 +1100 Subject: [PATCH 05/15] Add 3.10 back into to github action test grid --- .github/workflows/pull-request.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml index 3ff9189..fc894d2 100644 --- a/.github/workflows/pull-request.yml +++ b/.github/workflows/pull-request.yml @@ -18,9 +18,8 @@ jobs: strategy: fail-fast: false matrix: - # python-version: ["3.8", "3.9", "3.10"] - python-version: ["3.8", "3.9"] - poetry-version: [1.3.2] + python-version: ["3.8", "3.9", "3.10"] + poetry-version: [1.7.1] # os: [ubuntu-20.04, macos-latest, windows-latest] steps: - uses: actions/checkout@v2 From f70a258ff5debfd641086fe6877f67d29003ddb6 Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Thu, 14 Dec 2023 16:27:12 +1100 Subject: [PATCH 06/15] Take 3.10 back out of test grid --- .github/workflows/pull-request.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml index fc894d2..cf1bde9 100644 --- a/.github/workflows/pull-request.yml +++ b/.github/workflows/pull-request.yml @@ -18,7 +18,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9"] poetry-version: [1.7.1] # os: [ubuntu-20.04, macos-latest, windows-latest] steps: From 11e2d42a3e17a184090a0819a7a0b228bb11268a Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Sat, 16 Dec 2023 13:32:53 +1100 Subject: [PATCH 07/15] Link example notebooks to Colab --- examples/AutoSegmentation.ipynb | 11 +++++++---- examples/Configuration.ipynb | 2 ++ examples/ConvertingData.ipynb | 2 ++ examples/DatasetPreparation.ipynb | 6 ++++-- examples/DoseMetrics.ipynb | 2 ++ examples/GettingStarted.ipynb | 2 ++ examples/ObjectGeneration.ipynb | 2 ++ examples/Radiomics.ipynb | 2 ++ examples/VisualiseData.ipynb | 2 ++ examples/WorkingWithData.ipynb | 4 +++- examples/WorkingWithStructures.ipynb | 2 ++ examples/nnUNet.ipynb | 11 ++++++++--- 12 files changed, 38 insertions(+), 10 deletions(-) diff --git a/examples/AutoSegmentation.ipynb b/examples/AutoSegmentation.ipynb index 67d85a5..9f77740 100644 --- a/examples/AutoSegmentation.ipynb +++ b/examples/AutoSegmentation.ipynb @@ -6,8 +6,7 @@ "source": [ "# Auto-segmentation Inference & Analysis\n", "\n", - "\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/finalise-docs/examples/AutoSegmentation.ipynb)\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/AutoSegmentation.ipynb)\n", "\n", "A common task when working the medical imaging data, it to run an auto-segmentation model\n", "(inference) on the images in your dataset. If you have manual definitions of the same structures\n", @@ -16,7 +15,11 @@ "\n", "This example notebook will guide you through the process of performing model inference and analysis\n", "of those structures. We will use a single atlas-based segmentation model for demonstration\n", - "purposes." + "purposes.\n", + "\n", + "> Warning: The auto-segmentation results produced by the example in this notebook are poor. The\n", + "> atlas based segmentation function is optimised for runtime is purely provided to demonstrate how\n", + "> to run and analyse an auto-segmentation model." ] }, { @@ -435,7 +438,7 @@ "kernelspec": { "display_name": "Python 3.8", "language": "python", - "name": "python38" + "name": "python" }, "language_info": { "codemirror_mode": { diff --git a/examples/Configuration.ipynb b/examples/Configuration.ipynb index 0559bd6..bfa833f 100644 --- a/examples/Configuration.ipynb +++ b/examples/Configuration.ipynb @@ -7,6 +7,8 @@ "source": [ "# Configuration\n", "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/Configuration.ipynb)\n", + "\n", "PyDicer provides various options which you may configure to change the behaviour of the tool." ] }, diff --git a/examples/ConvertingData.ipynb b/examples/ConvertingData.ipynb index 1b3513c..9de8e49 100644 --- a/examples/ConvertingData.ipynb +++ b/examples/ConvertingData.ipynb @@ -7,6 +7,8 @@ "source": [ "# Converting Data\n", "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/ConvertingData.ipynb)\n", + "\n", "In this example, the preprocessing and conversion of DICOM data is demonstrated. These are\n", "essential first steps before data can be analysed using PyDicer." ] diff --git a/examples/DatasetPreparation.ipynb b/examples/DatasetPreparation.ipynb index 7cc81c7..f725397 100644 --- a/examples/DatasetPreparation.ipynb +++ b/examples/DatasetPreparation.ipynb @@ -7,6 +7,8 @@ "source": [ "# Dataset Preparation\n", "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/DatasetPreparation.ipynb)\n", + "\n", "When working with real-world DICOM datasets, you will often need to tackle the task of cleaning the\n", "dataset. Often you will have several image series, structure set and even dose grids for each\n", "patient. However you typically want to select one relevant DICOM object in each category.\n", @@ -98,7 +100,7 @@ "Here we use the `dataset preparation` module to extract the latest dose grid by date. We refer to\n", "this subset of data as `dose_project`.\n", "\n", - "We use the built in data extraction function, named [`rt_latest_dose`](https://australiancancerdatanetwork.github.io/pydicer/dataset.html#pydicer.dataset.functions.rt_latest_dose)." + "We use the built in data extraction function, named [rt_latest_dose](https://australiancancerdatanetwork.github.io/pydicer/dataset.html#pydicer.dataset.functions.rt_latest_dose)." ] }, { @@ -167,7 +169,7 @@ "\n", "In this example, we not only select the latest structure set by date, but we specify the\n", "`StudyDescription` values of the DICOM metadata of the data objects we want to select. To achieve\n", - "this, we use the build in [`rt_latest_struct`](https://australiancancerdatanetwork.github.io/pydicer/dataset.html#pydicer.dataset.functions.rt_latest_struct) function.\n", + "this, we use the build in [rt_latest_struct](https://australiancancerdatanetwork.github.io/pydicer/dataset.html#pydicer.dataset.functions.rt_latest_struct) function.\n", "\n", "Observe the output of the following cell and explore the `testdata_hnscc` directory. We not have\n", "one structure set and the linked image for each patient." diff --git a/examples/DoseMetrics.ipynb b/examples/DoseMetrics.ipynb index e364024..050641c 100644 --- a/examples/DoseMetrics.ipynb +++ b/examples/DoseMetrics.ipynb @@ -7,6 +7,8 @@ "source": [ "# Dose Metrics\n", "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/DoseMetrics.ipynb)\n", + "\n", "In this example notebook we will compute Dose Volume Histograms (DVH) for our `RTDOSE` objects\n", "across structures found in `RTSTRUCT` objects in our dataset. We use\n", "[HNSCC](https://wiki.cancerimagingarchive.net/display/Public/HNSCC) data from the Cancer Imaging\n", diff --git a/examples/GettingStarted.ipynb b/examples/GettingStarted.ipynb index 1ccf807..2a09cfe 100644 --- a/examples/GettingStarted.ipynb +++ b/examples/GettingStarted.ipynb @@ -7,6 +7,8 @@ "source": [ "# Getting Started\n", "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/GettingStarted.ipynb)\n", + "\n", "This notebook provides a basic example to run the PyDicer pipeline using some test data." ] }, diff --git a/examples/ObjectGeneration.ipynb b/examples/ObjectGeneration.ipynb index 6f3d84b..a61ee80 100644 --- a/examples/ObjectGeneration.ipynb +++ b/examples/ObjectGeneration.ipynb @@ -7,6 +7,8 @@ "source": [ "# Generating Data Objects\n", "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/ObjectGeneration.ipynb)\n", + "\n", "While PyDicer primarily deals with converting data objects from DICOM, there are instances where\n", "you may want to generate a new data object and have it integrated into your PyDicer dataset.\n", "\n", diff --git a/examples/Radiomics.ipynb b/examples/Radiomics.ipynb index 29b121a..ccddc69 100644 --- a/examples/Radiomics.ipynb +++ b/examples/Radiomics.ipynb @@ -7,6 +7,8 @@ "source": [ "# Compute Radiomics\n", "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/Radiomics.ipynb)\n", + "\n", "In this example notebook we use [PyRadiomics](https://github.com/AIM-Harvard/pyradiomics) to\n", "compute various type of radiomics features. We use some\n", "[LCTSC](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=24284539) data from the\n", diff --git a/examples/VisualiseData.ipynb b/examples/VisualiseData.ipynb index d139f5f..c74a36e 100644 --- a/examples/VisualiseData.ipynb +++ b/examples/VisualiseData.ipynb @@ -6,6 +6,8 @@ "source": [ "# Visualise Data\n", "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/VisualiseData.ipynb)\n", + "\n", "PyDicer's `visualise` module will produce cross-sections of data objects and store them\n", "in `.png` format within the data object directory. This is particularly useful for fast inspection\n", "of the data to ensure that nothing has gone wrong during conversion.\n", diff --git a/examples/WorkingWithData.ipynb b/examples/WorkingWithData.ipynb index aad36f3..815af68 100644 --- a/examples/WorkingWithData.ipynb +++ b/examples/WorkingWithData.ipynb @@ -7,6 +7,8 @@ "source": [ "# Working with Data\n", "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/WorkingWithData.ipynb)\n", + "\n", "Here we present some useful tips & tricks which to help working with data which has been converted\n", "using PyDicer. As you will see, working with data in PyDicer is heavily oriented around DataFrames\n", "provided by the Pandas library. If you aren't familiar with Pandas, we recommend working through \n", @@ -136,7 +138,7 @@ "Here we load only the `StudyDate`, `PatientSex` and `Manufacturer`.\n", "\n", "> Tip: These tags are defined by the DICOM standard, and we use `pydicom` to load this metadata. In\n", - "> fact, the metadata returned is a `pydicom` Dataset. Check out the [`pydicom` documentation](https://pydicom.github.io/pydicom/dev/old/pydicom_user_guide.html) for more information." + "> fact, the metadata returned is a `pydicom` Dataset. Check out the [pydicom documentation](https://pydicom.github.io/pydicom/dev/old/pydicom_user_guide.html) for more information." ] }, { diff --git a/examples/WorkingWithStructures.ipynb b/examples/WorkingWithStructures.ipynb index 70ee45f..19ebb85 100644 --- a/examples/WorkingWithStructures.ipynb +++ b/examples/WorkingWithStructures.ipynb @@ -7,6 +7,8 @@ "source": [ "# Working with Structures\n", "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/WorkingWithStructures.ipynb)\n", + "\n", "In PyDicer, structure sets which have been converted from the DICOM `RTSTRUCT` modality are stored\n", "within the directory structure. One common issue when working with real world datasets is that\n", "structure names are often inconsistent requiring standardisation of names prior to analysing data.\n", diff --git a/examples/nnUNet.ipynb b/examples/nnUNet.ipynb index f966785..def2b74 100644 --- a/examples/nnUNet.ipynb +++ b/examples/nnUNet.ipynb @@ -6,6 +6,8 @@ "source": [ "# nnUNet Data Preparation\n", "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/nnUNet.ipynb)\n", + "\n", "The [nnUNet](https://github.com/MIC-DKFZ/nnUNet) is a self-configuring method for deep\n", "learning-based biomedical image segmentation. However it does require data to be formatted in a\n", "specific way on the file system. In this notebook, we demonstrate some useful functionality to\n", @@ -157,7 +159,7 @@ "DataFrame to confirm that this is the case.\n", "\n", "If your dataset isn't yet in such as state, you can use the \n", - "[`dataset preparation`](https://australiancancerdatanetwork.github.io/pydicer/_examples/DatasetPreparation.html)\n", + "[dataset preparation](https://australiancancerdatanetwork.github.io/pydicer/_examples/DatasetPreparation.html)\n", "module in PyDicer to prepare a subset of data. Once the dataset is prepared, pass the\n", "`dataset_name` argument when creating the `NNUNetDataset` object above." ] @@ -199,7 +201,7 @@ "\n", "Here we randomly split our dataset into a training and testing set. You can specify the\n", "`training_cases` and `testing_cases` to use in the `split_dataset` function. If these aren't\n", - "supplied, the [`train_test_split`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)\n", + "supplied, the [train_test_split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html)\n", "function from `sklearn` will be used. You can pass keyword arguments to this function via the\n", "`split_dataset` function." ] @@ -326,7 +328,10 @@ "]\n", "\n", "script_path = nnunet.generate_training_scripts(script_header=script_header)\n", - "print(f\"Training script ready in: {script_path}\")" + "print(f\"Training script ready in: {script_path}\")\n", + "\n", + "# Set the logging verbosity back to NOTSET\n", + "pydicer.set_verbosity(logging.NOTSET)" ] }, { From 18690f893fc5eba4412c5c75193f4f117dc47751 Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Sun, 17 Dec 2023 16:37:07 +1100 Subject: [PATCH 08/15] Increase version to 0.2.0 --- CITATION.cff | 12 ++++++------ docs/conf.py | 10 +++++++--- pydicer/__init__.py | 15 +++++++++++++++ pyproject.toml | 2 +- 4 files changed, 29 insertions(+), 10 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index 3251236..2d94e14 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -12,14 +12,14 @@ authors: family-names: Chlap email: phillip.chlap@unsw.edu.au affiliation: University of New South Wales - orcid: 'https://orcid.org/0000-0002-6517-8745' + orcid: "https://orcid.org/0000-0002-6517-8745" - given-names: Daniel family-names: Al Mouiee affiliation: Ingham Institute - given-names: Robert N. family-names: Finnegan affiliation: University of Sydney - orcid: 'https://orcid.org/0000-0003-4728-8462' + orcid: "https://orcid.org/0000-0003-4728-8462" - given-names: Xinyi family-names: Cui affiliation: University of New South Wales @@ -34,12 +34,12 @@ authors: - given-names: Lois family-names: Holloway affiliation: University of New South Wales -repository-code: 'https://github.com/AustralianCancerDataNetwork/pydicer' -url: 'https://australiancancerdatanetwork.github.io/pydicer/' +repository-code: "https://github.com/AustralianCancerDataNetwork/pydicer" +url: "https://australiancancerdatanetwork.github.io/pydicer/" keywords: - medical imaging - DICOM - radiotherapy license: Apache-2.0 -version: 0.1.0 -date-released: '2023-06-04' +version: 0.2.0 +date-released: "2023-06-04" diff --git a/docs/conf.py b/docs/conf.py index d74d07e..761ece0 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -29,7 +29,7 @@ author = "Ingham Medical Physics" # The full version, including alpha/beta/rc tags -release = "0.1.0" +release = "0.2.0" # -- General configuration --------------------------------------------------- @@ -85,9 +85,13 @@ def setup(app): shutil.copy("../examples/VisualiseData.ipynb", "_examples/VisualiseData.ipynb") shutil.copy("../examples/DoseMetrics.ipynb", "_examples/DoseMetrics.ipynb") shutil.copy("../examples/Radiomics.ipynb", "_examples/Radiomics.ipynb") -shutil.copy("../examples/DatasetPreparation.ipynb", "_examples/DatasetPreparation.ipynb") +shutil.copy( + "../examples/DatasetPreparation.ipynb", "_examples/DatasetPreparation.ipynb" +) shutil.copy("../examples/WorkingWithData.ipynb", "_examples/WorkingWithData.ipynb") -shutil.copy("../examples/WorkingWithStructures.ipynb", "_examples/WorkingWithStructures.ipynb") +shutil.copy( + "../examples/WorkingWithStructures.ipynb", "_examples/WorkingWithStructures.ipynb" +) shutil.copy("../examples/Configuration.ipynb", "_examples/Configuration.ipynb") shutil.copy("../examples/ObjectGeneration.ipynb", "_examples/ObjectGeneration.ipynb") shutil.copy("../examples/AutoSegmentation.ipynb", "_examples/AutoSegmentation.ipynb") diff --git a/pydicer/__init__.py b/pydicer/__init__.py index eccc21c..013d3f6 100644 --- a/pydicer/__init__.py +++ b/pydicer/__init__.py @@ -1 +1,16 @@ from .tool import PyDicer + +__project__ = "pydicer" +__version__ = "0.2.0" +__keywords__ = [ + "medical imaging", + "visualisation", + "conversion", + "DICOM", + "radiotherapy", + "image analysis", +] +__author__ = "Ingham Medical Physics" +__author_email__ = "phillip.chlap@unsw.edu.au" +__url__ = "https://australiancancerdatanetwork.github.io/pydicer/" +__platforms__ = "ALL" diff --git a/pyproject.toml b/pyproject.toml index 4194b09..bd530c3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "pydicer" -version = "0.1.0" +version = "0.2.0" description = "PYthon Dicom Image ConvertER" authors = ["Ingham Medical Physics"] license = "Apache License 2.0" From 9d64aa77f004fc4fdbdbe2bef989fdea919fb212 Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Wed, 20 Dec 2023 11:11:30 +1100 Subject: [PATCH 09/15] Link documentation examples --- docs/analyse.rst | 3 +++ examples/AutoSegmentation.ipynb | 23 +++++++++++------------ examples/Configuration.ipynb | 9 ++++----- examples/ConvertingData.ipynb | 8 ++++---- examples/DatasetPreparation.ipynb | 17 +++++++---------- examples/DoseMetrics.ipynb | 8 ++++---- examples/GettingStarted.ipynb | 22 +++++++++++++--------- examples/ObjectGeneration.ipynb | 6 +++--- examples/Radiomics.ipynb | 2 +- examples/VisualiseData.ipynb | 8 ++++---- examples/WorkingWithData.ipynb | 6 +++--- examples/WorkingWithStructures.ipynb | 14 +++++++------- 12 files changed, 64 insertions(+), 62 deletions(-) diff --git a/docs/analyse.rst b/docs/analyse.rst index 9564e86..9c9f854 100644 --- a/docs/analyse.rst +++ b/docs/analyse.rst @@ -4,3 +4,6 @@ Analyse .. automodule:: pydicer.analyse.data :members: + +.. automodule:: pydicer.analyse.compare + :members: diff --git a/examples/AutoSegmentation.ipynb b/examples/AutoSegmentation.ipynb index 9f77740..51701d4 100644 --- a/examples/AutoSegmentation.ipynb +++ b/examples/AutoSegmentation.ipynb @@ -82,9 +82,8 @@ "source": [ "## Prepare Atlas\n", "\n", - "Since we will use a single atlas-based segmentation model, we must\n", - "split our data, selecting one case as our `atlas` and the remaining cases as the `validation` set.\n", - "We use the PyDicer `dataset preparation` module to split our dataset." + "Since we will use a single atlas-based segmentation model, we must split our data, selecting one\n", + "case as our `atlas` and the remaining cases as the `validation` set. We use the PyDicer [dataset preparation module](https://australiancancerdatanetwork.github.io/pydicer/_examples/DatasetPreparation.html) to create these subsets of data." ] }, { @@ -199,7 +198,7 @@ "source": [ "## Run Auto-segmentation\n", "\n", - "The `segment_dataset` function will run over all images in our dataset and will pass the images to\n", + "The [segment_dataset](https://australiancancerdatanetwork.github.io/pydicer/generate.html#pydicer.generate.segmentation.segment_dataset) function will run over all images in our dataset and will pass the images to\n", "a function we define for segmentation. We pass in the name of our `validation_dataset` so that only\n", "the images in this dataset will be segmented." ] @@ -219,8 +218,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can use PyDicer's `visualisation` module to produce snapshots of the auto-segmentations\n", - "produced." + "We can use PyDicer's [visualisation module](https://australiancancerdatanetwork.github.io/pydicer/_examples/VisualiseData.html) to produce snapshots of the auto-segmentations produced." ] }, { @@ -239,8 +237,9 @@ "## Read Segmentation Logs\n", "\n", "After running the auto-segmentation on across the dataset, we can fetch the logs to confirm that\n", - "everything went well. This will also let us inspect the runtime of the segmentation. In case\n", - "something went wrong, we can use these logs to help debug the issue." + "everything went well using the [read_all_segmentation_logs](https://australiancancerdatanetwork.github.io/pydicer/generate.html#pydicer.generate.segmentation.read_all_segmentation_logs) function.\n", + "This will also let us inspect the runtime of the segmentation. In case something went wrong, we can\n", + "use these logs to help debug the issue." ] }, { @@ -315,13 +314,13 @@ "source": [ "### Compute Similarity \n", "\n", - "We use the `compute_contour_similarity_metrics` function to compute the metrics comparing our\n", + "We use the [compute_contour_similarity_metrics](https://australiancancerdatanetwork.github.io/pydicer/analyse.html#pydicer.analyse.compare.compute_contour_similarity_metrics) function to compute the metrics comparing our\n", "target structures to our reference structures.\n", "\n", "We can specify which metrics we want to compute, in this example we compute the Dice Similarity\n", "Coefficient (DSC), Hausdorff Distance, Mean Surface Distance and the Surface DSC.\n", "\n", - "> Structure names must match exactly, so we use a structure name mapping to standardise our\n", + "> Structure names must match exactly, so we use a [structure name mapping](https://australiancancerdatanetwork.github.io/pydicer/_examples/WorkingWithStructures.html#Add-Structure-Name-Mapping) to standardise our\n", "> structure names prior to computing the similarity metrics." ] }, @@ -395,7 +394,7 @@ "### Perform Analysis\n", "\n", "There are various plots and visualisations you may wish to produce following computation of\n", - "similarity metrics. The `prepare_similarity_metric_analysis` will generate several useful plots\n", + "similarity metrics. The [prepare_similarity_metric_analysis](https://australiancancerdatanetwork.github.io/pydicer/analyse.html#pydicer.analyse.compare.prepare_similarity_metric_analysis) function will generate several useful plots\n", "which will serve as a useful starting point when analysing your auto-segmentation results.\n", "\n", "Plots are generated in a directory you provide. In this example, plots and tables (`.csv`) are\n", @@ -456,4 +455,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/examples/Configuration.ipynb b/examples/Configuration.ipynb index bfa833f..09bb45e 100644 --- a/examples/Configuration.ipynb +++ b/examples/Configuration.ipynb @@ -39,8 +39,7 @@ "source": [ "## Getting and Setting Options\n", "\n", - "Use the `get_config` and `set_config` functions of the `config` module to get and set configuration\n", - "options respectively." + "Use the `get_config` and `set_config` functions of the [config module](https://australiancancerdatanetwork.github.io/pydicer/config.html) to get and set configuration options respectively." ] }, { @@ -67,7 +66,7 @@ "\n", "### Logging Verbosity\n", "\n", - "Level of output for standard out. Value indicates the Python built-in log level. A value of 0\n", + "Level of output for standard out. Value indicates the [Python built-in log level](https://docs.python.org/3/library/logging.html#logging-levels). A value of 0\n", "(not set) will display the process bar. Logs of all levels are available in the .pydicer directory.\n", "\n", "Valid options are: `[logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR]`" @@ -88,7 +87,7 @@ "source": [ "### Frame of Reference Fallback Linkage\n", "\n", - "Determine whether to fallback on linking objects via their Frame of Reference if no more stable\n", + "Determine whether to fallback on linking objects via their [Frame of Reference UID](https://dicom.innolitics.com/ciods/ct-image/frame-of-reference/00200052) if no more stable\n", "link exists.\n", "\n", "Valid options are: `True` or `False`" @@ -174,7 +173,7 @@ "### Generate NRRD\n", "\n", "Whether or not to generate an additional NRRD file when converting RTSTRUCT. This allows loading\n", - "easily into 3D slicer, but it takes up more disk space and takes time to generate the file.\n", + "easily into [3D slicer](https://www.slicer.org/), but it takes up more disk space and takes time to generate the file.\n", "\n", "Valid options are: `True` or `False`" ] diff --git a/examples/ConvertingData.ipynb b/examples/ConvertingData.ipynb index 9de8e49..74664c8 100644 --- a/examples/ConvertingData.ipynb +++ b/examples/ConvertingData.ipynb @@ -36,7 +36,7 @@ "source": [ "## Setup PyDicer\n", "\n", - "As in the `Getting Started` example, we must first define a working directory for our dataset. We\n", + "As in the [Getting Started example](https://australiancancerdatanetwork.github.io/pydicer/_examples/GettingStarted.html), we must first define a working directory for our dataset. We\n", "also create a `PyDicer` object." ] }, @@ -56,7 +56,7 @@ "source": [ "## Fetch some data\n", "\n", - "A TestInput class is provided in pydicer to download some sample data to work with. Several other\n", + "A [TestInput class](https://australiancancerdatanetwork.github.io/pydicer/input.html#pydicer.input.test.TestInput) is provided in pydicer to download some sample data to work with. Several other\n", "input classes exist if you'd like to retrieve DICOM data for conversion from somewhere else. See \n", "the [docs for information](https://australiancancerdatanetwork.github.io/pydicer/html/input.html)\n", "on how the PyDicer input classes work.\n", @@ -85,7 +85,7 @@ "source": [ "## Preprocess\n", "\n", - "With some DICOM data ready to work with, we must first use the PyDicer `preprocess` module. This\n", + "With some DICOM data ready to work with, we must first use the PyDicer [preprocess module](https://australiancancerdatanetwork.github.io/pydicer/preprocess.html). This\n", "module will crawl over all DICOM data available and will index all information required for\n", "conversion of the data." ] @@ -132,7 +132,7 @@ "look inside the `working/data` directory to see how the converted data is being stored.\n", "\n", "Notice the `converted.csv` file stored for each patient. This tracks each converted data object.\n", - "This will be loaded as a Pandas DataFrame for use throughout PyDicer.\n" + "This will be loaded as a [Pandas DataFrame](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) for use throughout PyDicer.\n" ] }, { diff --git a/examples/DatasetPreparation.ipynb b/examples/DatasetPreparation.ipynb index f725397..c6f5834 100644 --- a/examples/DatasetPreparation.ipynb +++ b/examples/DatasetPreparation.ipynb @@ -13,19 +13,17 @@ "dataset. Often you will have several image series, structure set and even dose grids for each\n", "patient. However you typically want to select one relevant DICOM object in each category.\n", "\n", - "To help solve this, PyDicer provides a `dataset preparation` module which can be used to extract\n", + "To help solve this, PyDicer provides a [dataset preparation module](https://australiancancerdatanetwork.github.io/pydicer/dataset.html) which can be used to extract\n", "a subset of data from your overall set. Two example use cases where this might be useful are:\n", "\n", - "**Analysing dose to structures for a radiotherapy treatment**\n", - "You will want to extract the dose grid which was calculated from the plan used to treat the\n", + "- **Analysing dose to structures for a radiotherapy treatment**: You will want to extract the dose grid which was calculated from the plan used to treat the\n", "patient, as well as the linked structure set and planning CT image.\n", "\n", - "**Validating an Auto-segmentation tool**\n", - "A structure set may have been prepared for the purposes of validation and saved off with a specific\n", + "- **Validating an Auto-segmentation tool**: A structure set may have been prepared for the purposes of validation and saved off with a specific\n", "`SeriesDescription`. You select the latest structure set with that description as well as the\n", "linked image series to perform the auto-segmentation validation.\n", "\n", - "As you will see in the examples below, you can provide your own logic to extract subsets of data\n", + "As you will see in the examples below, you can also provide your own logic to extract subsets of data\n", "using PyDicer." ] }, @@ -74,7 +72,7 @@ "source": [ "## Explore data\n", "\n", - "When we use the `read_converted_data` function, by default it will return all data which has been\n", + "When we use the [read_converted_data](https://australiancancerdatanetwork.github.io/pydicer/utils.html#pydicer.utils.read_converted_data) function, by default it will return all data which has been\n", "converted and is stored in the `testdata_hnscc/data` directory.\n", "\n", "Let's use this function and output the entire DataFrame of converted data to see what we have\n", @@ -97,8 +95,7 @@ "source": [ "## Prepare dose data\n", "\n", - "Here we use the `dataset preparation` module to extract the latest dose grid by date. We refer to\n", - "this subset of data as `dose_project`.\n", + "Here we use the [dataset preparation module](https://australiancancerdatanetwork.github.io/pydicer/dataset.html) to extract the latest dose grid by date along with the linked structure sets and planning image series. We refer to this subset of data as `dose_project`.\n", "\n", "We use the built in data extraction function, named [rt_latest_dose](https://australiancancerdatanetwork.github.io/pydicer/dataset.html#pydicer.dataset.functions.rt_latest_dose)." ] @@ -169,7 +166,7 @@ "\n", "In this example, we not only select the latest structure set by date, but we specify the\n", "`StudyDescription` values of the DICOM metadata of the data objects we want to select. To achieve\n", - "this, we use the build in [rt_latest_struct](https://australiancancerdatanetwork.github.io/pydicer/dataset.html#pydicer.dataset.functions.rt_latest_struct) function.\n", + "this, we use the build in [rt_latest_struct](https://australiancancerdatanetwork.github.io/pydicer/dataset.html#pydicer.dataset.functions.rt_latest_struct) function which will also extract the image series linked to the structure set selected.\n", "\n", "Observe the output of the following cell and explore the `testdata_hnscc` directory. We not have\n", "one structure set and the linked image for each patient." diff --git a/examples/DoseMetrics.ipynb b/examples/DoseMetrics.ipynb index 050641c..6abb4ba 100644 --- a/examples/DoseMetrics.ipynb +++ b/examples/DoseMetrics.ipynb @@ -9,7 +9,7 @@ "\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/DoseMetrics.ipynb)\n", "\n", - "In this example notebook we will compute Dose Volume Histograms (DVH) for our `RTDOSE` objects\n", + "In this example notebook we will compute [Dose Volume Histograms (DVH)](https://pyplati.github.io/platipy/dose.html#module-platipy.imaging.dose.dvh) for our `RTDOSE` objects\n", "across structures found in `RTSTRUCT` objects in our dataset. We use\n", "[HNSCC](https://wiki.cancerimagingarchive.net/display/Public/HNSCC) data from the Cancer Imaging\n", "Archive which has already been converted using PyDicer for demonstration purposes." @@ -76,7 +76,7 @@ "## Compute DVH\n", "\n", "Before we can extract dose metrics, we must compute Dose Volume Histograms for all dose objects and\n", - "structure sets." + "structure sets. This is done using the [compute_dvh](https://australiancancerdatanetwork.github.io/pydicer/analyse.html#pydicer.analyse.data.AnalyseData.compute_dvh) function." ] }, { @@ -100,7 +100,7 @@ "stores the raw DVH values.\n", "\n", "The DVHs can for this dataset can be loaded into a pandas DataFrame with the\n", - "`get_all_dvhs_for_dataset` function." + "[get_all_dvhs_for_dataset](https://australiancancerdatanetwork.github.io/pydicer/analyse.html#pydicer.analyse.data.AnalyseData.get_all_dvhs_for_dataset) function." ] }, { @@ -119,7 +119,7 @@ "source": [ "## Compute Dose Metrics\n", "\n", - "The `compute_dose_metrics` function in the `analyse` module can compute **D**, **V** and **Dcc**\n", + "The [compute_dose_metrics](https://australiancancerdatanetwork.github.io/pydicer/analyse.html#pydicer.analyse.data.AnalyseData.compute_dose_metrics) function in the `analyse` module can compute **D**, **V** and **Dcc**\n", "metrics. Specify the points at which to compute those values. For example, the following cell\n", "computes the **D95**, **D50**, **V5** and **Dcc10**." ] diff --git a/examples/GettingStarted.ipynb b/examples/GettingStarted.ipynb index 2a09cfe..3ffe77d 100644 --- a/examples/GettingStarted.ipynb +++ b/examples/GettingStarted.ipynb @@ -54,7 +54,7 @@ "source": [ "## Create a PyDicer object\n", "\n", - "The PyDicer class provides all functionlity to run the pipeline and work with the data stored and\n", + "The [PyDicer class](https://australiancancerdatanetwork.github.io/pydicer/tool.html) provides all functionlity to run the pipeline and work with the data stored and\n", "converted in your project directory" ] }, @@ -73,7 +73,7 @@ "source": [ "## Fetch some data\n", "\n", - "A TestInput class is provided in pydicer to download some sample data to work with. Several other\n", + "A [TestInput class](https://australiancancerdatanetwork.github.io/pydicer/input.html#pydicer.input.test.TestInput) is provided in pydicer to download some sample data to work with. Several other\n", "input classes exist if you'd like to retrieve DICOM data for conversion from somewhere else, [see \n", "the docs for information on how these work](https://australiancancerdatanetwork.github.io/pydicer/html/input.html)." ] @@ -99,11 +99,11 @@ "## Run the pipeline\n", "\n", "The function runs the entire PyDicer pipeline on the test DICOM data. This includes:\n", - "- Preprocessing the DICOM data (data which can't be handled or is corrupt will be placed in Quarantine)\n", - "- Convert the data to Nifti format (see the output in the `data` directory)\n", - "- Visualise the data (png files will be placed alongside the converted Nifti files)\n", - "- Compute Radiomics features (Results are stored in a csv alongside the converted structures)\n", - "- Compute Dose Volume Histograms (results are stored alongside converted dose data)\n", + "- [Preprocessing](https://australiancancerdatanetwork.github.io/pydicer/preprocess.html) the DICOM data (data which can't be handled or is corrupt will be placed in Quarantine)\n", + "- [Convert](https://australiancancerdatanetwork.github.io/pydicer/convert.html) the data to Nifti format (see the output in the `data` directory)\n", + "- [Visualise](https://australiancancerdatanetwork.github.io/pydicer/visualise.html) the data (png files will be placed alongside the converted Nifti files)\n", + "- [Compute Radiomics features](https://australiancancerdatanetwork.github.io/pydicer/analyse.html#pydicer.analyse.data.AnalyseData.compute_radiomics) (Results are stored in a csv alongside the converted structures)\n", + "- [Compute Dose Volume Histograms](https://australiancancerdatanetwork.github.io/pydicer/analyse.html#pydicer.analyse.data.AnalyseData.compute_dvh) (results are stored alongside converted dose data)\n", "\n", "> Note that the entire Pipeline can be quite time consuming to run. Depending on your project's\n", "> dataset you will likely want to run only portions of the pipeline with finer control over each\n", @@ -130,7 +130,9 @@ "but here we use a somewhat common approach of extracting the latest structure set for each patient\n", "and the image linked to that.\n", "\n", - "The resulting dataset is stored in a folder with your dataset name (`clean` for this example).\n" + "The resulting dataset is stored in a folder with your dataset name (`clean` for this example).\n", + "\n", + "See the [dataset preparation example](https://australiancancerdatanetwork.github.io/pydicer/_examples/DatasetPreparation.html) for a more detailed description on how this works.\n" ] }, { @@ -149,7 +151,9 @@ "## Analyse the dataset\n", "\n", "The pipeline computes first-order radiomics features by default, as well as dose volume histograms.\n", - "Here we can extract out the results easily into a Pandas DataFrame for analysis." + "Here we can extract out the results easily into a Pandas DataFrame for analysis.\n", + "\n", + "Check out the [Compute Radiomics](https://australiancancerdatanetwork.github.io/pydicer/_examples/Radiomics.html) and the [Dose Metrics](https://australiancancerdatanetwork.github.io/pydicer/_examples/DoseMetrics.html) examples for further details on how to use these functions." ] }, { diff --git a/examples/ObjectGeneration.ipynb b/examples/ObjectGeneration.ipynb index a61ee80..be55e75 100644 --- a/examples/ObjectGeneration.ipynb +++ b/examples/ObjectGeneration.ipynb @@ -138,7 +138,7 @@ "- Load the CT image using SimpleITK, and apply a threshold to segment bones\n", "- Save the segmented bones as a new structure set object\n", "\n", - "> Note: This specific functionality is supported by the auto-segmentation inference module. If you\n", + "> Note: This specific functionality is supported by the [auto-segmentation inference module](https://australiancancerdatanetwork.github.io/pydicer/generate.html#module-pydicer.generate.segmentation). If you\n", "> are using this to generate auto-segmentations it is recommended you use that functionality." ] }, @@ -201,7 +201,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "And we can also run the visualise module. Use the `force=False` flag to ensure that only the newly\n", + "And we can also run the [visualise module](https://australiancancerdatanetwork.github.io/pydicer/visualise.html). Use the `force=False` flag to ensure that only the newly\n", "generated objects are visualised" ] }, @@ -271,7 +271,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we can visualise the images and produce snapshots once more. Find the sharpen images in the\n", + "Now we can visualise the images and produce snapshots once more. Find the sharpened images in the\n", "working directory. Can you see the difference between the sharpened CT and the original?" ] }, diff --git a/examples/Radiomics.ipynb b/examples/Radiomics.ipynb index ccddc69..6f0b897 100644 --- a/examples/Radiomics.ipynb +++ b/examples/Radiomics.ipynb @@ -96,7 +96,7 @@ "source": [ "## Fetch computed Radiomics\n", "\n", - "Use the `get_all_computed_radiomics_for_dataset` function to fetch all radiomics features computed\n", + "Use the [get_all_computed_radiomics_for_dataset](https://australiancancerdatanetwork.github.io/pydicer/analyse.html#pydicer.analyse.data.AnalyseData.get_all_computed_radiomics_for_dataset) function to fetch all radiomics features computed\n", "in the last step.\n", "\n", "The `.head()` function on a Pandas DataFrame output the first 5 rows for inspection." diff --git a/examples/VisualiseData.ipynb b/examples/VisualiseData.ipynb index c74a36e..1db6834 100644 --- a/examples/VisualiseData.ipynb +++ b/examples/VisualiseData.ipynb @@ -8,12 +8,12 @@ "\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/AustralianCancerDataNetwork/pydicer/blob/main/examples/VisualiseData.ipynb)\n", "\n", - "PyDicer's `visualise` module will produce cross-sections of data objects and store them\n", + "PyDicer's [visualise module](https://australiancancerdatanetwork.github.io/pydicer/visualise.html) will produce cross-sections of data objects and store them\n", "in `.png` format within the data object directory. This is particularly useful for fast inspection\n", "of the data to ensure that nothing has gone wrong during conversion.\n", "\n", - "The `visualise` module can be run at any time after conversion. If you are using advanced features\n", - "of PyDicer, such as `auto-segmentation inference` and `object generation`, you can run the\n", + "The visualise module can be run at any time after conversion. If you are using advanced features\n", + "of PyDicer, such as [auto-segmentation inference](https://australiancancerdatanetwork.github.io/pydicer/_examples/AutoSegmentation.html) and [object generation](https://australiancancerdatanetwork.github.io/pydicer/_examples/ObjectGeneration.html), you can run the\n", "visualise module following the generation of the new data objects to produce the cross-section\n", "`.png` files." ] @@ -62,7 +62,7 @@ "source": [ "## Visualise Data\n", "\n", - "We simply call the `visualise()` function of the `visualise` module to produce the cross-sections.\n", + "We simply call the [visualise](https://australiancancerdatanetwork.github.io/pydicer/visualise.html#pydicer.visualise.data.VisualiseData.visualise) function of the `visualise` module to produce the cross-sections.\n", "\n", "Inspect some of the data object directories in `testdata_hnscc/data` and look for the `.png`\n", "cross-sections. The `{hashed_uid}` in files named `vis_{hashed_uid}.png` refers to a UID hash\n", diff --git a/examples/WorkingWithData.ipynb b/examples/WorkingWithData.ipynb index 815af68..e70b9cf 100644 --- a/examples/WorkingWithData.ipynb +++ b/examples/WorkingWithData.ipynb @@ -64,7 +64,7 @@ "source": [ "## Read Converted Data\n", "\n", - "To obtain a DataFrame of the converted data, use the `read_converted_data` function." + "To obtain a DataFrame of the converted data, use the [read_converted_data](https://australiancancerdatanetwork.github.io/pydicer/utils.html#pydicer.utils.read_converted_data) function." ] }, { @@ -110,7 +110,7 @@ "## Loading Object Metadata\n", "\n", "The metadata from the DICOM headers is stored by PyDicer and can be easily loaded using the\n", - "`load_object_metadata` function. Simply pass a row from the converted DataFrame into this function\n", + "[load_object_metadata](https://australiancancerdatanetwork.github.io/pydicer/utils.html#pydicer.utils.load_object_metadata) function. Simply pass a row from the converted DataFrame into this function\n", "to load the metadata for that object." ] }, @@ -179,7 +179,7 @@ "\n", "There are several DICOM header tags which could define the date of an object. The DICOM standard\n", "doesn't require all of these to be set within the metadata. PyDicer provides the \n", - "`determine_dcm_datetime` function to extract the date from the DICOM header." + "[determine_dcm_datetime](https://australiancancerdatanetwork.github.io/pydicer/utils.html#pydicer.utils.determine_dcm_datetime) function to extract the date from the DICOM header." ] }, { diff --git a/examples/WorkingWithStructures.ipynb b/examples/WorkingWithStructures.ipynb index 19ebb85..5c71798 100644 --- a/examples/WorkingWithStructures.ipynb +++ b/examples/WorkingWithStructures.ipynb @@ -13,12 +13,12 @@ "within the directory structure. One common issue when working with real world datasets is that\n", "structure names are often inconsistent requiring standardisation of names prior to analysing data.\n", "\n", - "In PyDicer, structure name standardisation is achieved by defining structure mapping dictionaries\n", + "In PyDicer, structure name standardisation is achieved by [defining structure mapping dictionaries](https://australiancancerdatanetwork.github.io/pydicer/utils.html#pydicer.utils.add_structure_name_mapping)\n", "which can be stored globally (applied to all structure sets) or locally (specific mapping per\n", "structure set or per patient).\n", "\n", "In this guide we present some examples on how to define such structure name mappings and will\n", - "introduce the `StructureSet` class which simplifies loading and working with structure objects." + "introduce the [StructureSet class](https://australiancancerdatanetwork.github.io/pydicer/dataset.html#module-pydicer.dataset.structureset) which simplifies loading and working with structure objects." ] }, { @@ -132,7 +132,7 @@ "Structure name mappings are defined as Python dictionaries, with the standardised structure name\n", "as the key, and the value a list of name variations which should map to the standardised name.\n", "\n", - "Use the `add_structure_name_mapping` to add a mapping. A `mapping_id` may be supplied to refer\n", + "Use the [add_structure_name_mapping](https://australiancancerdatanetwork.github.io/pydicer/utils.html#pydicer.utils.add_structure_name_mapping) to add a mapping. A `mapping_id` may be supplied to refer\n", "to different mappings. If no `mapping_id` is supplied, a default mapping id is used.\n", "\n", "If a `structure_set_row` or `patient_id` is supplied, then the mapping will be stored at the\n", @@ -282,10 +282,10 @@ "## Using Mappings in PyDicer\n", "\n", "Once mappings are defined, these can be used when you:\n", - "- Compute Dose Metrics\n", - "- Fetch Radiomics Features\n", - "- Analyse Auto-segmentations\n", - "- Prepare data for nnUNet training\n", + "- [Compute Dose Metrics](https://australiancancerdatanetwork.github.io/pydicer/_examples/DoseMetrics.html)\n", + "- [Fetch Radiomics Features](https://australiancancerdatanetwork.github.io/pydicer/_examples/Radiomics.html)\n", + "- [Analyse Auto-segmentations](https://australiancancerdatanetwork.github.io/pydicer/_examples/AutoSegmentation.html)\n", + "- [Prepare data for nnUNet training](https://australiancancerdatanetwork.github.io/pydicer/_examples/nnUNet.html)\n", "\n", "Check out the documentation for those modules to see where you can supply your `mapping_id` to have\n", "the structure set standardisation applied. If you have used the default `mapping_id`, the\n", From 54d88152712ec567928abab62f16ceabc7cddb12 Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Wed, 20 Dec 2023 11:42:48 +1100 Subject: [PATCH 10/15] Update README --- README.md | 55 ++++++++++++++++++++++------------------- docs/_static/custom.css | 8 +++--- 2 files changed, 33 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index 1eb91a0..7d5d5cf 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,13 @@ # PyDicer: PYthon Dicom Image ConvertER -Welcome to pydicer, a tool to ease the process of converting DICOM data objects into a format typically used for research purposes. In addition to data conversion, functionality is provided to help analyse the data such as the computing radiomic features or radiotherapy dose metrics. PyDicer uses the NIfTI format to store data is a well defined file system structure. Tracking of these data objects in CSV files, also stored on the file system, provides an easy and flexible way to work with the converted data in your research. +Welcome to pydicer, a tool to ease the process of converting Radiotherapy DICOM data objects into a format typically used for research purposes. In addition to data conversion, functionality is provided to help analyse the data. This includes computing radiomic features, radiotherapy dose metrics and auto-segmentation metrics. PyDicer uses the NIfTI format to store data is a well defined file system structure. Tracking of these data objects in CSV files, also stored on the file system, provides an easy and flexible way to work with the converted data in your research. + +The [PyDicer documentation](https://australiancancerdatanetwork.github.io/pydicer/index.html) provides several examples and guides to help you get started with the tool. Here are a few **PyDicer principles** to keep in mind as you get started: + +- The [working directory structure](https://australiancancerdatanetwork.github.io/pydicer/index.html#directory-structure) is standardised and generalisable for use with any DICOM dataset. +- Use [Pandas DataFrame's](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html) to work with converted data objects. +- [SimpleITK](https://simpleitk.readthedocs.io/en/master/) and [PlatiPy](https://pyplati.github.io/platipy/) are used under the hood for the image conversion, visualisation and analysis tasks. +- Always inspect visualisations, plots and metrics produced by PyDicer in your working directory. Remember, PyDicer is a research tool so only use it for research purposes and expect the unexpected! ## Installation @@ -11,9 +18,20 @@ environment using `pip`: pip install pydicer ``` +## Supported Modalities + +PyDicer currently supports converting and analysing the following DICOM modalities: + +- CT +- MR +- PT (Experimental) +- RTSTRUCT +- RTPLAN (Not converted since this only consists of meta data) +- RTDOSE + ## Directory Structure -pydicer will place converted and intermediate files into a specific directory structure. Within the configured working directory `[working]`, the following directories will be generated: +PyDicer will place converted and intermediate files into a specific directory structure. Visualisation, metrics computed and plots are also stored along side the converted data objects. Within the configured working directory `[working]`, the following directories will be generated: - `[working]/data`: Directory in which converted data will be placed - `[working]/quarantine`: Files which couldn't be preprocessed or converted will be placed in here for you to investigate further @@ -66,31 +84,16 @@ pydicer.run_pipeline() ## Contributing -PyDicer is a research tool and adding to its functionality is encouraged. All GitHub Pull Requests -are welcome. We do ask that you abide by our code of conduct and follow our coding standards. - -### Coding standards +PyDicer is an open-source tool and contributions are welcome! Here are some ways you might consider contributing to the project: -Code in pydicer must conform to Python's PEP-8 standards to ensure consistent formatting between contributors. To ensure this, pylint is used to check code conforms to these standards before a Pull Request can be merged. You can run pylint from the command line using the following command: +- Reporting issues on GitHub. +- Correcting/extending the documentation. +- Contributing a bug fix or extending some functionality. +- Providing functionality to support additional DICOM modalities. +- Giving the PyDicer project a star on GitHub. -```bash -pylint pydicer -``` +For more information, see the [Contributing documentation](https://australiancancerdatanetwork.github.io/pydicer/contributing.html). -But a better idea is to ensure you are using a Python IDE which supports linting (such as [VSCode](https://code.visualstudio.com/docs/python/linting) or PyCharm). Make sure you resolve all suggestions from pylint before submitting your pull request. - -If you're new to using pylint, you may like to [read this guide](https://docs.pylint.org/en/v2.11.1/tutorial.html). - -### Automated tests - -A test suite is included in pydicer which ensures that code contributed to the repository functions as expected and continues to function as further development takes place. Any code submitted via a pull request should include appropriate automated tests for the new code. - -pytest is used as a testing library. Running the tests from the command line is really easy: - -```bash -pytest -``` +## Authors -Add your tests to the appropriate file in the `tests/` directory. See the [pytest documention](https://docs.pytest.org/en/6.2.x/getting-started.html) for more information. -or check out the [Getting Started Example](https://australiancancerdatanetwork.github.io/pydicer/_examples/GettingStarted.html). -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/australiancancerdatanetwork/pydicer/blob/main/examples/GettingStarted.ipynb) +PyDicer was developed by the [Ingham Medical Physics team](https://www.unsw.edu.au/medicine-health/our-schools/clinical-medicine/research-impact/research-groups/cancer/ingham-medical-physics) in South-Western Sydney. It's development is part of the [Australian Cancer Data Network](https://australian-cancer-data.network/) supported by the [Australian Research Data Commons](https://ardc.edu.au/). diff --git a/docs/_static/custom.css b/docs/_static/custom.css index a11c442..a876179 100644 --- a/docs/_static/custom.css +++ b/docs/_static/custom.css @@ -1,16 +1,16 @@ -table.dataframe { +table { color: var(--color-foreground-primary) !important; } -table.dataframe thead { +table thead { background-color: var(--color-card-marginals-background) !important; } -table.dataframe tr:nth-child(odd) { +table tr:nth-child(odd) { background: var(--color-background-border) !important; } -table.dataframe tr:nth-child(even) { +table tr:nth-child(even) { background: var(--color-card-marginals-background) !important; } From a608ad6d8bcda879a53fec826e2bac36ab9d1ae3 Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Wed, 20 Dec 2023 11:46:16 +1100 Subject: [PATCH 11/15] Update release date in citation file --- CITATION.cff | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CITATION.cff b/CITATION.cff index 2d94e14..1716c1d 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -42,4 +42,4 @@ keywords: - radiotherapy license: Apache-2.0 version: 0.2.0 -date-released: "2023-06-04" +date-released: "2023-12-20" From 9843e1c25f505a04b74e96d6f2ff7e5960eef01f Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Wed, 20 Dec 2023 11:46:48 +1100 Subject: [PATCH 12/15] Change casing of tool name --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7d5d5cf..dccccaf 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # PyDicer: PYthon Dicom Image ConvertER -Welcome to pydicer, a tool to ease the process of converting Radiotherapy DICOM data objects into a format typically used for research purposes. In addition to data conversion, functionality is provided to help analyse the data. This includes computing radiomic features, radiotherapy dose metrics and auto-segmentation metrics. PyDicer uses the NIfTI format to store data is a well defined file system structure. Tracking of these data objects in CSV files, also stored on the file system, provides an easy and flexible way to work with the converted data in your research. +Welcome to PyDicer, a tool to ease the process of converting Radiotherapy DICOM data objects into a format typically used for research purposes. In addition to data conversion, functionality is provided to help analyse the data. This includes computing radiomic features, radiotherapy dose metrics and auto-segmentation metrics. PyDicer uses the NIfTI format to store data is a well defined file system structure. Tracking of these data objects in CSV files, also stored on the file system, provides an easy and flexible way to work with the converted data in your research. The [PyDicer documentation](https://australiancancerdatanetwork.github.io/pydicer/index.html) provides several examples and guides to help you get started with the tool. Here are a few **PyDicer principles** to keep in mind as you get started: From 4950b462032db09def25eca5cc7cac125d300d29 Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Wed, 20 Dec 2023 11:51:04 +1100 Subject: [PATCH 13/15] Add link to pydicer github --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dccccaf..0b2f08f 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ PyDicer is an open-source tool and contributions are welcome! Here are some ways - Correcting/extending the documentation. - Contributing a bug fix or extending some functionality. - Providing functionality to support additional DICOM modalities. -- Giving the PyDicer project a star on GitHub. +- Giving the [PyDicer project](https://github.com/AustralianCancerDataNetwork/pydicer) a star on GitHub. For more information, see the [Contributing documentation](https://australiancancerdatanetwork.github.io/pydicer/contributing.html). From 8833fcef0d18e7c4535e154bec53218229c8010a Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Thu, 21 Dec 2023 07:13:44 +1100 Subject: [PATCH 14/15] Remove version from docs --- docs/conf.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 761ece0..fce8db2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -28,9 +28,6 @@ copyright = f"{year}, Ingham Medical Physics" author = "Ingham Medical Physics" -# The full version, including alpha/beta/rc tags -release = "0.2.0" - # -- General configuration --------------------------------------------------- From f72146d1e402ab0a6e7a7b3de5846718796d55d3 Mon Sep 17 00:00:00 2001 From: Phillip Chlap Date: Thu, 21 Dec 2023 10:51:13 +1100 Subject: [PATCH 15/15] Add emails to author list --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0b2f08f..b47cbce 100644 --- a/README.md +++ b/README.md @@ -96,4 +96,7 @@ For more information, see the [Contributing documentation](https://australiancan ## Authors -PyDicer was developed by the [Ingham Medical Physics team](https://www.unsw.edu.au/medicine-health/our-schools/clinical-medicine/research-impact/research-groups/cancer/ingham-medical-physics) in South-Western Sydney. It's development is part of the [Australian Cancer Data Network](https://australian-cancer-data.network/) supported by the [Australian Research Data Commons](https://ardc.edu.au/). +PyDicer was developed by the [Ingham Medical Physics team](https://www.unsw.edu.au/medicine-health/our-schools/clinical-medicine/research-impact/research-groups/cancer/ingham-medical-physics) in South-Western Sydney. It was developed as part of the [Australian Cancer Data Network](https://australian-cancer-data.network/) supported by the [Australian Research Data Commons](https://ardc.edu.au/). + +- **Phillip Chlap** - [phillip.chlap@unsw.edu.au](phillip.chlap@unsw.edu.au) +- **Daniel Al Mouiee** - [d.almouiee@unsw.edu.au](d.almouiee@unsw.edu.au)