diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index cf5f956e..d657617b 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -9,7 +9,6 @@ dependencies: - c-compiler - click - cmake>=3.26.4,!=3.30.0 -- cuda-python>=11.7.1,<12.0a0 - cuda-version=11.8 - cudatoolkit - cupy>=12.0.0 diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml index f4bd88a1..a193f5cd 100644 --- a/conda/environments/all_cuda-125_arch-x86_64.yaml +++ b/conda/environments/all_cuda-125_arch-x86_64.yaml @@ -11,7 +11,6 @@ dependencies: - cmake>=3.26.4,!=3.30.0 - cuda-cudart-dev - cuda-nvcc -- cuda-python>=12.0,<13.0a0 - cuda-version=12.5 - cupy>=12.0.0 - cxx-compiler diff --git a/dependencies.yaml b/dependencies.yaml index 67efbf60..62e2ddef 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -322,13 +322,3 @@ dependencies: packages: # Already added to requirements via docs. This is for tests. - numpydoc>=1.5 - specific: - - output_types: [conda, requirements, pyproject] - matrices: - - matrix: {cuda: "12.*"} - packages: - - cuda-python>=12.0,<13.0a0 - - matrix: {cuda: "11.*"} - packages: &test_cuda_python_cu11 - - cuda-python>=11.7.1,<12.0a0 - - {matrix: null, packages: *test_cuda_python_cu11} diff --git a/python/cucim/pyproject.toml b/python/cucim/pyproject.toml index b8d1121b..25248b3b 100644 --- a/python/cucim/pyproject.toml +++ b/python/cucim/pyproject.toml @@ -57,7 +57,6 @@ Tracker = "https://github.com/rapidsai/cucim/issues" [project.optional-dependencies] test = [ - "cuda-python>=11.7.1,<12.0a0", "imagecodecs>=2021.6.8; platform_machine=='x86_64'", "matplotlib", "numpydoc>=1.5", diff --git a/python/cucim/tests/performance/clara/test_read_region_memory_usage.py b/python/cucim/tests/performance/clara/test_read_region_memory_usage.py index 33a5c3e0..9b17b154 100644 --- a/python/cucim/tests/performance/clara/test_read_region_memory_usage.py +++ b/python/cucim/tests/performance/clara/test_read_region_memory_usage.py @@ -13,7 +13,7 @@ # limitations under the License. # -import cuda.cudart +import cupy as cp import pytest from ...util.io import open_image_cucim @@ -25,14 +25,13 @@ def test_read_region_cuda_memleak(testimg_tiff_stripe_4096x4096_256_jpeg): def get_used_gpu_memory_mib(): """Get the used GPU memory in MiB.""" - status, free, total = cuda.cudart.cudaMemGetInfo() - if status != cuda.cudart.cudaError_t.cudaSuccess: - raise RuntimeError("Failed to get GPU memory info.") + dev = cp.cuda.Device() + free, total = dev.mem_info memory_used = (total - free) / (2**20) return memory_used - status, num_gpus = cuda.cudart.cudaGetDeviceCount() - if status != cuda.cudart.cudaError_t.cudaSuccess or num_gpus == 0: + num_gpus = cp.cuda.runtime.getDeviceCount() + if num_gpus == 0: pytest.skip("No gpu available") img = open_image_cucim(testimg_tiff_stripe_4096x4096_256_jpeg)