Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Handle dpnp functions and tests to run on CUDA devices #2075

Open
wants to merge 30 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
b6bd08a
Testing is adapted for cuda devices
npolina4 Sep 20, 2024
5f1b083
Apply fallback to numpy for all unsupported functions on cuda device.
npolina4 Sep 24, 2024
fe8fe11
update tests
npolina4 Sep 25, 2024
0c64722
Applied review comments
npolina4 Sep 26, 2024
4ad814c
Merge branch 'master' into tests_cuda
npolina4 Oct 22, 2024
6e2c3c7
Update test_indexing.py
npolina4 Oct 25, 2024
4e3c87c
Update test_solve.py
npolina4 Oct 25, 2024
94a418e
Update test_histogram.py
npolina4 Oct 25, 2024
1461e81
Update test_histogram.py
npolina4 Oct 25, 2024
47f51e3
Merge branch 'master' into tests_cuda
npolina4 Oct 25, 2024
2fca5f7
Merge commit '7bfe0c8eec481452dcdd07d99eaf01373769ab5b' into tests_cuda
vlad-perevezentsev Nov 14, 2024
519008e
Update skipped_tests_cuda.tbl
vlad-perevezentsev Nov 14, 2024
a38949d
Apply fallback to numpy for TestRational and test_copy_multigpu
vlad-perevezentsev Nov 14, 2024
139c784
Address remarks
vlad-perevezentsev Nov 14, 2024
f7e3778
Merge master into tests_cuda
vlad-perevezentsev Nov 21, 2024
42f20fc
Merge master into tests_cuda
vlad-perevezentsev Nov 27, 2024
351e12d
Use dpctl.select_default_device() in is_cuda_backend() func
vlad-perevezentsev Nov 29, 2024
590dbc8
Raise NotImplementedError in unsupported functions on CUDA
vlad-perevezentsev Nov 29, 2024
cc48533
Implement is_cuda_device() func for tests in helper.py
vlad-perevezentsev Nov 29, 2024
645c6d9
Skipped tests for unsupported functions on CUDA
vlad-perevezentsev Nov 29, 2024
6f688fe
Update test_arithmetic.py
vlad-perevezentsev Nov 29, 2024
b217cc5
Handle TestSpacing to run on CUDA
vlad-perevezentsev Nov 29, 2024
5179c06
Update fft tests to run on CUDA
vlad-perevezentsev Nov 29, 2024
f1c5eaf
Update linalg tests to run on CUDA
vlad-perevezentsev Dec 2, 2024
0a79e66
Avoid using dpnp.random in cupy tests on CUDA
vlad-perevezentsev Dec 2, 2024
e3e9afe
Remove previously added fixtures for unsupported funcs on CUDA
vlad-perevezentsev Dec 2, 2024
f015b88
Merge master into tests_cuda
vlad-perevezentsev Dec 2, 2024
58fba44
Fix logic for modf()
vlad-perevezentsev Dec 2, 2024
c995da4
Use fallback on numpy for TestChoose
vlad-perevezentsev Dec 2, 2024
a0219b3
Merge master into tests_cuda
vlad-perevezentsev Dec 2, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions dpnp/dpnp_iface.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
"get_normalized_queue_device",
"get_result_array",
"get_usm_ndarray",
"is_cuda_backend",
"get_usm_ndarray_or_scalar",
"is_supported_array_or_scalar",
"is_supported_array_type",
Expand Down Expand Up @@ -736,6 +737,40 @@ def get_usm_ndarray_or_scalar(a):
return a if dpnp.isscalar(a) else get_usm_ndarray(a)


def is_cuda_backend(obj=None):
"""
Checks that object has a CUDA backend.

Parameters
----------
obj : {Device, SyclDevice, SyclQueue, dpnp.ndarray, usm_ndarray, None},
optional
An input object with sycl_device property to check device backend.
If `obj` is ``None``, device backend will be checked for the default
queue.
Default: ``None``.

Returns
-------
out : bool
Return ``True`` if object has a CUDA backend, otherwise ``False``.

"""

if obj is None:
sycl_device = dpctl.select_default_device()
elif isinstance(obj, dpctl.SyclDevice):
sycl_device = obj
else:
sycl_device = getattr(obj, "sycl_device", None)
if (
sycl_device is not None
and sycl_device.backend == dpctl.backend_type.cuda
):
return True
return False


def is_supported_array_or_scalar(a):
"""
Return ``True`` if `a` is a scalar or an array of either
Expand Down
6 changes: 6 additions & 0 deletions dpnp/dpnp_iface_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ def choose(x1, choices, out=None, mode="raise"):
:obj:`dpnp.take_along_axis` : Preferable if choices is an array.

"""

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)

choices_list = []
Expand All @@ -136,6 +137,11 @@ def choose(x1, choices, out=None, mode="raise"):
)

if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)

if any(not desc for desc in choices_list):
pass
elif out is not None:
Expand Down
4 changes: 4 additions & 0 deletions dpnp/dpnp_iface_libmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,10 @@ def erf(in_array1):
in_array1, copy_when_strides=False, copy_when_nondefault_queue=False
)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)
return dpnp_erf(x1_desc).get_pyobj()

result = create_output_descriptor_py(
Expand Down
10 changes: 8 additions & 2 deletions dpnp/dpnp_iface_mathematical.py
Original file line number Diff line number Diff line change
Expand Up @@ -2949,8 +2949,14 @@ def modf(x1, **kwargs):
"""

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc and not kwargs:
return dpnp_modf(x1_desc)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)

if not kwargs:
return dpnp_modf(x1_desc)

return call_origin(numpy.modf, x1, **kwargs)

Expand Down
5 changes: 5 additions & 0 deletions dpnp/dpnp_iface_sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,11 @@ def partition(x1, kth, axis=-1, kind="introselect", order=None):

x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
if x1_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)

if not isinstance(kth, int):
pass
elif x1_desc.ndim == 0:
Expand Down
7 changes: 7 additions & 0 deletions dpnp/dpnp_iface_statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -484,6 +484,13 @@ def correlate(x1, x2, mode="valid"):
x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)
x2_desc = dpnp.get_dpnp_descriptor(x2, copy_when_nondefault_queue=False)
if x1_desc and x2_desc:
if dpnp.is_cuda_backend(x1_desc.get_array()) or dpnp.is_cuda_backend(
x2_desc.get_array()
):
raise NotImplementedError(
"Running on CUDA is currently not supported"
)

if x1_desc.size != x2_desc.size or x1_desc.size == 0:
pass
elif x1_desc.shape != x2_desc.shape:
Expand Down
Loading
Loading