From 68fd2011d875e39919b2e1d1ec10e7269ddee5b6 Mon Sep 17 00:00:00 2001 From: lorenzo Date: Tue, 8 Oct 2024 17:08:14 +0200 Subject: [PATCH] first masking roi implementation --- docs/notebooks/image.ipynb | 42 +++++++++---------- docs/notebooks/processing.ipynb | 27 ++++++++----- pyproject.toml | 1 + src/ngio/core/image_handler.py | 51 ++++++++++++++++++++++++ src/ngio/core/image_like_handler.py | 32 +++++++-------- src/ngio/core/label_handler.py | 51 +++++++++++++++++++----- src/ngio/core/ngff_image.py | 12 ++++-- src/ngio/tables/v1/masking_roi_tables.py | 11 ++++- tests/core/test_image_handler.py | 11 +++-- tests/core/test_image_like_handler.py | 15 ++++--- tests/core/test_label_handler.py | 8 ++-- tests/core/test_ngff_image.py | 8 +++- 12 files changed, 185 insertions(+), 84 deletions(-) diff --git a/docs/notebooks/image.ipynb b/docs/notebooks/image.ipynb index 78335ab..6ff48bf 100644 --- a/docs/notebooks/image.ipynb +++ b/docs/notebooks/image.ipynb @@ -72,7 +72,7 @@ "array = image.array\n", "print(f\"{array}\")\n", "\n", - "dask_array = image.dask_array\n", + "dask_array = image.on_disk_dask_array\n", "dask_array" ] }, @@ -80,9 +80,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Note, directly accessing the `.array` or `.dask_array` attributes will load the image as stored in the file.\n", + "Note, directly accessing the `.on_disk_array` or `.on_disk_dask_array` attributes will load the image as stored in the file.\n", "\n", - "Since in principle the images can have different axes order. A safer way to access the image data is to use the `.get_data()` method, which will return the image data in canonical order (TCZYX)." + "Since in principle the images can have different axes order. A safer way to access the image data is to use the `.array` method, which will return the image data in canonical order (TCZYX)." ] }, { @@ -91,7 +91,7 @@ "metadata": {}, "outputs": [], "source": [ - "image_numpy = image.get_data(c=0, x=slice(0, 250), y=slice(0, 250), preserve_dimensions=False, mode=\"numpy\")\n", + "image_numpy = image.array(c=0, x=slice(0, 250), y=slice(0, 250), preserve_dimensions=False, mode=\"numpy\")\n", "\n", "print(f\"{image_numpy.shape=}\")" ] @@ -115,7 +115,7 @@ "roi = roi_table.get_roi(\"FOV_1\")\n", "print(f\"{roi=}\")\n", "\n", - "image_roi_1 = image.get_data_from_roi(roi=roi, c=0, preserve_dimensions=True, mode=\"dask\")\n", + "image_roi_1 = image.array_from_roi(roi=roi, c=0, preserve_dimensions=True, mode=\"dask\")\n", "image_roi_1" ] }, @@ -138,10 +138,10 @@ "print(f\"{image_2.pixel_size=}\")\n", "\n", "# Get roi for higher resolution image\n", - "image_1_roi_1 = image.get_data_from_roi(roi=roi, c=0, preserve_dimensions=False)\n", + "image_1_roi_1 = image.array_from_roi(roi=roi, c=0, preserve_dimensions=False)\n", "\n", "# Get roi for lower resolution image\n", - "image_2_roi_1 = image_2.get_data_from_roi(roi=roi, c=0, preserve_dimensions=False)\n", + "image_2_roi_1 = image_2.array_from_roi(roi=roi, c=0, preserve_dimensions=False)\n", "\n", "# Plot the two images side by side\n", "fig, axs = plt.subplots(1, 2, figsize=(10, 5))\n", @@ -156,7 +156,7 @@ "source": [ "# Writing Images\n", "\n", - "Similarly to the `.get_data()` we can use the `.set_data()` (or `set_data_roi`) method to write part of an image to disk." + "Similarly to the `.array()` we can use the `.set_array()` (or `set_array_from_roi`) method to write part of an image to disk." ] }, { @@ -168,25 +168,25 @@ "import numpy as np\n", "\n", "# Get a small slice of the image\n", - "small_slice = image.get_data(x=slice(1000, 2000), y=slice(1000, 2000))\n", + "small_slice = image.array(x=slice(1000, 2000), y=slice(1000, 2000))\n", "\n", "# Set the sample slice to zeros\n", "zeros_slice = np.zeros_like(small_slice)\n", - "image.set_data(patch=zeros_slice, x=slice(1000, 2000), y=slice(1000, 2000))\n", + "image.set_array(patch=zeros_slice, x=slice(1000, 2000), y=slice(1000, 2000))\n", "\n", "\n", "# Load the image from disk and show the edited image\n", "nuclei = ngff_image.label.get(\"nuclei\")\n", "fig, axs = plt.subplots(1, 2, figsize=(10, 5))\n", - "axs[0].imshow(image.array[0, 0], cmap=\"gray\")\n", - "axs[1].imshow(nuclei.array[0])\n", + "axs[0].imshow(image.on_disk_array[0, 0], cmap=\"gray\")\n", + "axs[1].imshow(nuclei.on_disk_array[0])\n", "for ax in axs:\n", " ax.axis(\"off\")\n", "plt.tight_layout()\n", "plt.show()\n", "\n", "# Add back the original slice to the image\n", - "image.set_data(patch=small_slice, x=slice(1000, 2000), y=slice(1000, 2000))" + "image.set_array(patch=small_slice, x=slice(1000, 2000), y=slice(1000, 2000))" ] }, { @@ -207,13 +207,13 @@ "# Create a a new label object and set it to a simple segmentation\n", "new_label = ngff_image.label.derive(\"new_label\", overwrite=True)\n", "\n", - "simple_segmentation = image.array[0] > 100\n", - "new_label.array[...] = simple_segmentation\n", + "simple_segmentation = image.on_disk_array[0] > 100\n", + "new_label.on_disk_array[...] = simple_segmentation\n", "\n", "# make a subplot with two image show side by side\n", "fig, axs = plt.subplots(1, 2, figsize=(10, 5))\n", - "axs[0].imshow(image.array[0, 0], cmap=\"gray\")\n", - "axs[1].imshow(new_label.array[0], cmap=\"gray\")\n", + "axs[0].imshow(image.on_disk_array[0, 0], cmap=\"gray\")\n", + "axs[1].imshow(new_label.on_disk_array[0], cmap=\"gray\")\n", "for ax in axs:\n", " ax.axis(\"off\")\n", "plt.tight_layout()\n", @@ -240,12 +240,12 @@ "label_0 = ngff_image.label.get(\"new_label\", path=\"0\")\n", "label_2 = ngff_image.label.get(\"new_label\", path=\"2\")\n", "\n", - "label_before_consolidation = label_2.array[...]\n", + "label_before_consolidation = label_2.on_disk_array[...]\n", "\n", "# Consolidate the label\n", "label_0.consolidate()\n", "\n", - "label_after_consolidation = label_2.array[...]\n", + "label_after_consolidation = label_2.on_disk_array[...]\n", "\n", "\n", "# make a subplot with two image show side by side\n", @@ -285,7 +285,7 @@ "# Create a table with random features for each nuclei in each ROI\n", "list_of_records = []\n", "for roi in roi_table.list_rois:\n", - " nuclei_in_roi = nuclei.get_data_from_roi(roi, mode='numpy')\n", + " nuclei_in_roi = nuclei.array_from_roi(roi, mode='numpy')\n", " for nuclei_id in np.unique(nuclei_in_roi)[1:]:\n", " list_of_records.append(\n", " {\"label\": nuclei_id,\n", @@ -333,7 +333,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.6" + "version": "3.12.7" } }, "nbformat": 4, diff --git a/docs/notebooks/processing.ipynb b/docs/notebooks/processing.ipynb index a11f4ba..831a487 100644 --- a/docs/notebooks/processing.ipynb +++ b/docs/notebooks/processing.ipynb @@ -91,15 +91,15 @@ "# - set the data in the MIP image\n", "for roi in roi_table.list_rois:\n", " print(f\" - Processing ROI {roi.infos.get(\"field_index\")}\")\n", - " patch = source_image.get_data_from_roi(roi)\n", + " patch = source_image.array_from_roi(roi)\n", " mip_patch = patch.max(axis=1, keepdims=True)\n", - " mip_image.set_data_from_roi(patch=mip_patch, roi=roi)\n", + " mip_image.set_array_from_roi(patch=mip_patch, roi=roi)\n", " \n", "print(\"MIP image saved\")\n", "\n", "plt.figure(figsize=(5, 5))\n", "plt.title(\"Mip\")\n", - "plt.imshow(mip_image.array[0, 0, :, :], cmap=\"gray\")\n", + "plt.imshow(mip_image.on_disk_array[0, 0, :, :], cmap=\"gray\")\n", "plt.axis('off')\n", "plt.tight_layout()\n", "plt.show()\n" @@ -124,12 +124,12 @@ "# Get the MIP image at a lower resolution\n", "mip_image_2 = mip_ngff.get_image(path=\"2\")\n", "\n", - "image_before_consolidation = mip_image_2.get_data(c=0, z=0)\n", + "image_before_consolidation = mip_image_2.array(c=0, z=0)\n", "\n", "# Consolidate the pyramid\n", "mip_image.consolidate()\n", "\n", - "image_after_consolidation = mip_image_2.get_data(c=0, z=0)\n", + "image_after_consolidation = mip_image_2.array(c=0, z=0)\n", "\n", "fig, axs = plt.subplots(2, 1, figsize=(10, 5))\n", "axs[0].set_title(\"Before consolidation\")\n", @@ -256,13 +256,13 @@ "max_label = 0\n", "for roi in roi_table.list_rois:\n", " print(f\" - Processing ROI {roi.infos.get(\"field_index\")}\")\n", - " patch = source_image.get_data_from_roi(roi, c=dapi_idx)\n", + " patch = source_image.array_from_roi(roi, c=dapi_idx)\n", " segmentation = otsu_threshold_segmentation(patch, max_label)\n", "\n", " # Add the max label of the previous segmentation to avoid overlapping labels\n", " max_label = segmentation.max()\n", "\n", - " nuclei_image.set_data_from_roi(patch=segmentation, roi=roi)\n", + " nuclei_image.set_array_from_roi(patch=segmentation, roi=roi)\n", "\n", "# Consolidate the segmentation image\n", "nuclei_image.consolidate()\n", @@ -270,14 +270,21 @@ "print(\"Segmentation image saved\")\n", "fig, axs = plt.subplots(2, 1, figsize=(10, 5))\n", "axs[0].set_title(\"MIP\")\n", - "axs[0].imshow(source_image.array[0, 0], cmap=\"gray\")\n", + "axs[0].imshow(source_image.on_disk_array[0, 0], cmap=\"gray\")\n", "axs[1].set_title(\"Nuclei segmentation\")\n", - "axs[1].imshow(nuclei_image.array[0], cmap=rand_cmap, interpolation='nearest')\n", + "axs[1].imshow(nuclei_image.on_disk_array[0], cmap=rand_cmap, interpolation='nearest')\n", "for ax in axs:\n", " ax.axis('off')\n", "plt.tight_layout()\n", "plt.show()\n" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -296,7 +303,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.6" + "version": "3.12.7" } }, "nbformat": 4, diff --git a/pyproject.toml b/pyproject.toml index 7f5265a..bad0aff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,6 +41,7 @@ dependencies = [ "aiohttp", "dask[array]", "dask[distributed]", + "dask-image", ] # https://peps.python.org/pep-0621/#dependencies-optional-dependencies diff --git a/src/ngio/core/image_handler.py b/src/ngio/core/image_handler.py index 9044cf4..ce48203 100644 --- a/src/ngio/core/image_handler.py +++ b/src/ngio/core/image_handler.py @@ -1,6 +1,13 @@ """A module to handle OME-NGFF images stored in Zarr format.""" +from typing import Literal + +import dask.array as da +import numpy as np + +from ngio._common_types import ArrayLike from ngio.core.image_like_handler import ImageLike +from ngio.core.roi import WorldCooROI from ngio.io import StoreOrGroup from ngio.ngff_meta.fractal_image_meta import ImageMeta, PixelSize @@ -22,6 +29,7 @@ def __init__( highest_resolution: bool = False, strict: bool = True, cache: bool = True, + label_group=None, ) -> None: """Initialize the the Image Object. @@ -47,6 +55,7 @@ def __init__( meta_mode="image", cache=cache, ) + self._label_group = label_group @property def metadata(self) -> ImageMeta: @@ -65,3 +74,45 @@ def get_channel_idx( ) -> int: """Return the index of the channel.""" return self.metadata.get_channel_idx(label=label, wavelength_id=wavelength_id) + + def masked_array( + self, + roi: WorldCooROI, + t: int | slice | None = None, + c: int | slice | None = None, + mask_mode: Literal["bbox", "mask"] = "bbox", + mode: Literal["numpy"] = "numpy", + preserve_dimensions: bool = False, + ) -> ArrayLike: + """Return the image data from a region of interest (ROI). + + Args: + roi (WorldCooROI): The region of interest. + t (int | slice | None): The time index or slice. + c (int | slice | None): The channel index or slice. + mask_mode (str): Masking mode + mode (str): The mode to return the data. + preserve_dimensions (bool): Whether to preserve the dimensions of the data. + """ + data_pipe = self._build_roi_pipe( + roi=roi, t=t, c=c, preserve_dimensions=preserve_dimensions + ) + + if mask_mode == "bbox": + return self._get_pipe(data_pipe=data_pipe, mode=mode) + + label = self._label_group.get( + roi.infos["label_name"], pixel_size=self.pixel_size + ) + + mask = label.mask( + roi, + t=t, + mode=mode, + ) + array = self._get_pipe(data_pipe=data_pipe, mode=mode) + if mode == "numpy": + return_array = np.where(mask, array, 0) + else: + return_array = da.where(mask, array, 0) + return return_array diff --git a/src/ngio/core/image_like_handler.py b/src/ngio/core/image_like_handler.py index 81488bc..d5f6ff3 100644 --- a/src/ngio/core/image_like_handler.py +++ b/src/ngio/core/image_like_handler.py @@ -171,14 +171,14 @@ def pixel_size(self) -> PixelSize: # Method to get the data of the image @property - def array(self) -> zarr.Array: + def on_disk_array(self) -> zarr.Array: """Return the image data as a Zarr array.""" return self._array @property - def dask_array(self) -> da.core.Array: + def on_disk_dask_array(self) -> da.core.Array: """Return the image data as a Dask array.""" - return da.from_zarr(self.array) + return da.from_zarr(self.on_disk_array) @property def dimensions(self) -> Dimensions: @@ -211,9 +211,9 @@ def _get_pipe( ) -> ArrayLike: """Return the data transform pipe.""" if mode == "numpy": - return data_pipe.get(data=self.array) + return data_pipe.get(data=self.on_disk_array) elif mode == "dask": - return data_pipe.get(data=self.dask_array) + return data_pipe.get(data=self.on_disk_dask_array) else: raise ValueError(f"Invalid mode {mode}") @@ -224,13 +224,13 @@ def _set_pipe( ) -> None: """Set the data transform pipe.""" if isinstance(patch, np.ndarray): - data_pipe.set(data=self.array, patch=patch) + data_pipe.set(data=self.on_disk_array, patch=patch) elif isinstance(patch, (da.core.Array, Delayed)): # noqa: UP038 if self._dask_lock is None: - return data_pipe.set(data=self.array, patch=patch) + return data_pipe.set(data=self.on_disk_array, patch=patch) - array = self.array + array = self.on_disk_array with self._dask_lock: data_pipe.set(data=array, patch=patch) else: @@ -282,7 +282,7 @@ def _build_naive_pipe( ) return DataTransformPipe(slicer=slicer) - def get_data_from_roi( + def array_from_roi( self, roi: WorldCooROI, t: int | slice | None = None, @@ -305,7 +305,7 @@ def get_data_from_roi( return_array = self._get_pipe(data_pipe=data_pipe, mode=mode) return return_array - def set_data_from_roi( + def set_array_from_roi( self, roi: WorldCooROI, patch: ArrayLike, @@ -327,7 +327,7 @@ def set_data_from_roi( ) self._set_pipe(data_pipe=data_pipe, patch=patch) - def get_data( + def array( self, x: int | slice | None = None, y: int | slice | None = None, @@ -353,7 +353,7 @@ def get_data( ) return self._get_pipe(data_pipe=data_pipe, mode=mode) - def set_data( + def set_array( self, patch: ArrayLike, x: int | slice | None = None, @@ -420,12 +420,12 @@ def consolidate(self, order: int = 1) -> None: partial_zoom = partial(zoom, zoom=zoom_factors, order=order) out_image = da.map_blocks( partial_zoom, - source_image.dask_array, + source_image.on_disk_dask_array, # dtype = source_image.dask_array, - chunks=target_image.array.chunks, + chunks=target_image.on_disk_array.chunks, ) - out_image = out_image.astype(target_image.array.dtype) + out_image = out_image.astype(target_image.on_disk_array.dtype) # da.to_zarr(out_image, target_image.array, overwrite=True) - target_image.array[...] = out_image.compute() + target_image.on_disk_array[...] = out_image.compute() # compute the transformation processed_paths.append(target_image) diff --git a/src/ngio/core/label_handler.py b/src/ngio/core/label_handler.py index f500979..caa1982 100644 --- a/src/ngio/core/label_handler.py +++ b/src/ngio/core/label_handler.py @@ -61,7 +61,7 @@ def metadata(self) -> LabelMeta: """Return the metadata of the image.""" return super().metadata - def get_data_from_roi( + def array_from_roi( self, roi: WorldCooROI, t: int | slice | None = None, @@ -76,7 +76,7 @@ def get_data_from_roi( mode (str): The mode to return the data. preserve_dimensions (bool): Whether to preserve the dimensions of the data. """ - return super().get_data_from_roi( + return super().array_from_roi( roi=roi, t=t, c=None, mode=mode, preserve_dimensions=preserve_dimensions ) @@ -99,7 +99,7 @@ def set_data_from_roi( patch=patch, roi=roi, t=t, c=None, preserve_dimensions=preserve_dimensions ) - def get_data( + def array( self, x: int | slice | None = None, y: int | slice | None = None, @@ -118,7 +118,7 @@ def get_data( mode (str): The mode to return the data. preserve_dimensions (bool): Whether to preserve the dimensions of the data. """ - return super().get_data( + return super().array( x=x, y=y, z=z, @@ -128,7 +128,7 @@ def get_data( preserve_dimensions=preserve_dimensions, ) - def set_data( + def set_array( self, patch: ArrayLike, x: int | slice | None = None, @@ -147,7 +147,7 @@ def set_data( t (int | slice | None): The time index or slice. preserve_dimensions (bool): Whether to preserve the dimensions of the data. """ - return super().set_data( + return super().set_array( patch=patch, x=x, y=y, @@ -157,6 +157,35 @@ def set_data( preserve_dimensions=preserve_dimensions, ) + def mask( + self, + roi: WorldCooROI, + t: int | slice | None = None, + mode: Literal["numpy"] = "numpy", + preserve_dimensions: bool = False, + ) -> ArrayLike: + """Return the image data from a region of interest (ROI). + + Args: + roi (WorldCooROI): The region of interest. + t (int | slice | None): The time index or slice. + c (int | slice | None): The channel index or slice. + mask_mode (str): Masking mode + mode (str): The mode to return the data. + preserve_dimensions (bool): Whether to preserve the dimensions of the data. + """ + mask = self.array_from_roi( + roi=roi, t=t, mode=mode, preserve_dimensions=preserve_dimensions + ) + + label = roi.infos.get("label", None) + if label is None: + raise ValueError( + "Label not found in the ROI. Please provide a valid ROI Object." + ) + mask = mask == label + return mask + def consolidate(self) -> None: """Consolidate the label group. @@ -250,7 +279,7 @@ def derive( label_0 = self.get(list_of_labels[0]) metadata = label_0.metadata on_disk_shape = label_0.on_disk_shape - chunks = label_0.array.chunks + chunks = label_0.on_disk_array.chunks dataset = label_0.dataset else: label_0 = self._image_ref @@ -262,12 +291,12 @@ def derive( + label_0.on_disk_shape[channel_index + 1 :] ) chunks = ( - label_0.array.chunks[:channel_index] - + label_0.array.chunks[channel_index + 1 :] + label_0.on_disk_array.chunks[:channel_index] + + label_0.on_disk_array.chunks[channel_index + 1 :] ) else: on_disk_shape = label_0.on_disk_shape - chunks = label_0.array.chunks + chunks = label_0.on_disk_array.chunks metadata = metadata.remove_axis("c") dataset = metadata.get_highest_resolution_dataset() @@ -276,7 +305,7 @@ def derive( "store": new_label_group, "shape": on_disk_shape, "chunks": chunks, - "dtype": label_0.array.dtype, + "dtype": label_0.on_disk_array.dtype, "on_disk_axis": dataset.on_disk_axes_names, "pixel_sizes": dataset.pixel_size, "xy_scaling_factor": metadata.xy_scaling_factor, diff --git a/src/ngio/core/ngff_image.py b/src/ngio/core/ngff_image.py index d57bf98..66ad0e6 100644 --- a/src/ngio/core/ngff_image.py +++ b/src/ngio/core/ngff_image.py @@ -121,14 +121,18 @@ def get_image( path=path, pixel_size=pixel_size, highest_resolution=highest_resolution, + label_group=LabelGroup(self.group, image_ref=None), ) def _update_omero_window(self) -> None: """Update the OMERO window.""" meta = self.image_meta image = self.get_image(highest_resolution=True) - max_dtype = np.iinfo(image.array.dtype).max - start, end = image.dask_array.min().compute(), image.dask_array.max().compute() + max_dtype = np.iinfo(image.on_disk_array.dtype).max + start, end = ( + image.on_disk_dask_array.min().compute(), + image.on_disk_dask_array.max().compute(), + ) channel_list = meta.omero.channels @@ -169,8 +173,8 @@ def derive_new_image( default_kwargs = { "store": store, "shape": image_0.on_disk_shape, - "chunks": image_0.array.chunks, - "dtype": image_0.array.dtype, + "chunks": image_0.on_disk_array.chunks, + "dtype": image_0.on_disk_array.dtype, "on_disk_axis": image_0.dataset.on_disk_axes_names, "pixel_sizes": image_0.pixel_size, "xy_scaling_factor": self.image_meta.xy_scaling_factor, diff --git a/src/ngio/tables/v1/masking_roi_tables.py b/src/ngio/tables/v1/masking_roi_tables.py index 4ccbb42..5dcf25f 100644 --- a/src/ngio/tables/v1/masking_roi_tables.py +++ b/src/ngio/tables/v1/masking_roi_tables.py @@ -134,10 +134,13 @@ def from_label(self, label: Label, overwrite: bool = False) -> None: def get_roi(self, label: int) -> WorldCooROI: """Get an ROI from the table.""" if label not in self.list_labels: - raise ValueError(f"Field index {label} not found in the table.") + raise ValueError(f"Label {label} not found in the table.") table_df = self.table.loc[label] + region_path = self.meta.region["path"] + label_name = region_path.split("/")[-1] + roi = WorldCooROI( x=table_df.loc["x_micrometer"], y=table_df.loc["y_micrometer"], @@ -146,7 +149,11 @@ def get_roi(self, label: int) -> WorldCooROI: y_length=table_df.loc["len_y_micrometer"], z_length=table_df.loc["len_z_micrometer"], unit="micrometer", - infos={"label": label, "label_image": self.meta.region["path"]}, + infos={ + "label": label, + "label_image": region_path, + "label_name": label_name, + }, ) return roi diff --git a/tests/core/test_image_handler.py b/tests/core/test_image_handler.py index 6e4c9c0..7c7d276 100644 --- a/tests/core/test_image_handler.py +++ b/tests/core/test_image_handler.py @@ -11,16 +11,15 @@ def test_ngff_image(self, ome_zarr_image_v04_path): assert image_handler.get_channel_idx(wavelength_id="A01_C01") == 0 shape = image_handler.shape - assert image_handler.get_data(c=0).shape == shape[1:] + assert image_handler.array(c=0).shape == shape[1:] assert ( - image_handler.get_data(c=0, preserve_dimensions=True).shape - == (1,) + shape[1:] + image_handler.array(c=0, preserve_dimensions=True).shape == (1,) + shape[1:] ) - image_handler.set_data(patch=np.ones((3, 10, 256, 256), dtype=np.uint16)) - assert image_handler.get_data(c=0, t=0, z=0, x=0, y=0) == 1 + image_handler.set_array(patch=np.ones((3, 10, 256, 256), dtype=np.uint16)) + assert image_handler.array(c=0, t=0, z=0, x=0, y=0) == 1 image_handler.consolidate(order=0) image_handler_1 = Image(store=ome_zarr_image_v04_path, path="1") - assert image_handler_1.get_data(c=0, t=0, z=0, x=0, y=0) == 1 + assert image_handler_1.array(c=0, t=0, z=0, x=0, y=0) == 1 diff --git a/tests/core/test_image_like_handler.py b/tests/core/test_image_like_handler.py index a71d2db..33fb99e 100644 --- a/tests/core/test_image_like_handler.py +++ b/tests/core/test_image_like_handler.py @@ -21,22 +21,21 @@ def test_ngff_image(self, ome_zarr_image_v04_path): assert not image_handler.dimensions.is_time_series() assert image_handler.dimensions.has_multiple_channels() - assert image_handler.array.shape == shape - assert image_handler.dask_array.shape == shape + assert image_handler.on_disk_array.shape == shape + assert image_handler.on_disk_dask_array.shape == shape - assert image_handler.get_data(c=0).shape == shape[1:] + assert image_handler.array(c=0).shape == shape[1:] assert ( - image_handler.get_data(c=0, preserve_dimensions=True).shape - == (1,) + shape[1:] + image_handler.array(c=0, preserve_dimensions=True).shape == (1,) + shape[1:] ) - image_handler.set_data(patch=np.ones((3, 10, 256, 256), dtype=np.uint16)) - assert image_handler.get_data(c=0, t=0, z=0, x=0, y=0) == 1 + image_handler.set_array(patch=np.ones((3, 10, 256, 256), dtype=np.uint16)) + assert image_handler.array(c=0, t=0, z=0, x=0, y=0) == 1 image_handler.consolidate(order=0) image_handler_1 = ImageLike(store=ome_zarr_image_v04_path, path="1") - assert image_handler_1.get_data(c=0, t=0, z=0, x=0, y=0) == 1 + assert image_handler_1.array(c=0, t=0, z=0, x=0, y=0) == 1 @pytest.mark.skip("Not implemented yet") def test_ngff_image_from_pixel_size(self, ome_zarr_image_v04_path): diff --git a/tests/core/test_label_handler.py b/tests/core/test_label_handler.py index 1ec2950..6e27501 100644 --- a/tests/core/test_label_handler.py +++ b/tests/core/test_label_handler.py @@ -14,12 +14,12 @@ def test_label_image(self, ome_zarr_image_v04_path): assert label_handler.shape == image_handler.shape[1:] shape = label_handler.shape - assert label_handler.get_data(mode="dask").shape == shape + assert label_handler.array(mode="dask").shape == shape - label_handler.set_data(patch=da.ones((10, 256, 256), dtype=np.uint16)) - assert label_handler.get_data(t=0, z=0, x=0, y=0) == 1 + label_handler.set_array(patch=da.ones((10, 256, 256), dtype=np.uint16)) + assert label_handler.array(t=0, z=0, x=0, y=0) == 1 label_handler.consolidate() label_handler_1 = ngff_image.label.get(name="label") - assert label_handler_1.get_data(t=0, z=0, x=0, y=0) == 1 + assert label_handler_1.array(t=0, z=0, x=0, y=0) == 1 diff --git a/tests/core/test_ngff_image.py b/tests/core/test_ngff_image.py index b136207..591b1ea 100644 --- a/tests/core/test_ngff_image.py +++ b/tests/core/test_ngff_image.py @@ -21,5 +21,9 @@ def test_ngff_image(self, ome_zarr_image_v04_path): assert new_image_handler.channel_labels == image_handler.channel_labels assert new_image_handler.shape == image_handler.shape assert new_image_handler.pixel_size.zyx == image_handler.pixel_size.zyx - assert new_image_handler.array.shape == image_handler.array.shape - assert new_image_handler.array.chunks == image_handler.array.chunks + assert ( + new_image_handler.on_disk_array.shape == image_handler.on_disk_array.shape + ) + assert ( + new_image_handler.on_disk_array.chunks == image_handler.on_disk_array.chunks + )