Skip to content

Commit

Permalink
Merge branch 'icechunk-append' of github.com:zarr-developers/virtuali…
Browse files Browse the repository at this point in the history
…zarr into icechunk-append
  • Loading branch information
abarciauskas-bgse committed Dec 5, 2024
2 parents 84a4d01 + 258c92f commit 299f580
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 14 deletions.
2 changes: 1 addition & 1 deletion ci/upstream.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,6 @@ dependencies:
- fsspec
- pip
- pip:
- icechunk==0.1.0a7 # Installs zarr v3 as dependency
- icechunk>=0.1.0a7 # Installs zarr v3 as dependency
# - git+https://github.com/fsspec/kerchunk@main # kerchunk is currently incompatible with zarr-python v3 (https://github.com/fsspec/kerchunk/pull/516)
- imagecodecs-numcodecs==2024.6.1
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ hdf_reader = [
"numcodecs"
]
icechunk = [
"icechunk==0.1.0a7",
"icechunk>=0.1.0a7",
]
test = [
"codecov",
Expand Down
2 changes: 1 addition & 1 deletion virtualizarr/accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def to_icechunk(
Any variables backed by ManifestArray objects will be be written as virtual references, any other variables will be loaded into memory before their binary chunk data is written into the store.
If `append_dim` is provided, the virtual dataset will be appended on the append_dim axis to the existing IcechunkStore.
If `append_dim` is provided, the virtual dataset will be appended to the existing IcechunkStore along the `append_dim` dimension.
Parameters
----------
Expand Down
16 changes: 8 additions & 8 deletions virtualizarr/readers/hdf/hdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,11 @@

h5py = soft_import("h5py", "For reading hdf files", strict=False)
if h5py:
Dataset = h5py.Dataset
Group = h5py.Group
Dataset = h5py.Dataset # type: ignore
Group = h5py.Group # type: ignore
else:
Dataset = dict()
Group = dict()
Dataset = dict() # type: ignore
Group = dict() # type: ignore


class HDFVirtualBackend(VirtualBackend):
Expand Down Expand Up @@ -183,14 +183,14 @@ def _dataset_dims(dataset: Dataset) -> Union[List[str], List[None]]:
rank = len(dataset.shape)
if rank:
for n in range(rank):
num_scales = len(dataset.dims[n])
num_scales = len(dataset.dims[n]) # type: ignore
if num_scales == 1:
dims.append(dataset.dims[n][0].name[1:])
dims.append(dataset.dims[n][0].name[1:]) # type: ignore
elif h5py.h5ds.is_scale(dataset.id):
dims.append(dataset.name[1:])
elif num_scales > 1:
raise ValueError(
f"{dataset.name}: {len(dataset.dims[n])} "
f"{dataset.name}: {len(dataset.dims[n])} " # type: ignore
f"dimension scales attached to dimension #{n}"
)
elif num_scales == 0:
Expand Down Expand Up @@ -287,7 +287,7 @@ def _dataset_to_variable(path: str, dataset: Dataset) -> Optional[Variable]:
fill_value = fill_value.item()
filters = [codec.get_config() for codec in codecs]
zarray = ZArray(
chunks=chunks,
chunks=chunks, # type: ignore
compressor=None,
dtype=dtype,
fill_value=fill_value,
Expand Down
6 changes: 3 additions & 3 deletions virtualizarr/tests/test_writers/test_icechunk.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from itertools import product
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Tuple, cast
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, cast

import pytest

Expand Down Expand Up @@ -338,8 +338,8 @@ def icechunk_storage(tmpdir) -> "StorageConfig":

def generate_chunk_manifest(
netcdf4_file: str,
shape: Tuple[int, ...],
chunks: Tuple[int, ...],
shape: tuple[int, ...],
chunks: tuple[int, ...],
offset=6144,
length=48,
) -> ChunkManifest:
Expand Down

0 comments on commit 299f580

Please sign in to comment.