Skip to content

Commit

Permalink
Extend UT test_nonzero_static_large to XPU device and skip some cases (
Browse files Browse the repository at this point in the history
…#1161)

1.Extend UT test_nonzero_static_large to XPU device.
2. Skip some cases for rounding/trunc div accuracy.
3. Skip some cases for unspport tunableop.

---------

Signed-off-by: Cheng <[email protected]>
Signed-off-by: Cheng, Penghui <[email protected]>
  • Loading branch information
PenghuiCheng authored Dec 20, 2024
1 parent cd0873c commit 9ed0a1a
Show file tree
Hide file tree
Showing 4 changed files with 69 additions and 2 deletions.
16 changes: 16 additions & 0 deletions test/xpu/extended/skip_list_arc.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,21 @@
"test_compare_cpu_bincount_xpu_int64",
"test_compare_cpu_bincount_xpu_int8",
"test_compare_cpu_bincount_xpu_uint8",
# RuntimeError: Kernel is incompatible with all devices in devs
# https://github.com/intel/torch-xpu-ops/issues/1150
"test_compare_cpu_logcumsumexp_xpu_float16",
"test_compare_cpu_logcumsumexp_xpu_float32",
"test_compare_cpu_nn_functional_pdist_xpu_float32",
"test_compare_cpu_tril_indices_xpu_int32",
"test_compare_cpu_tril_indices_xpu_int64",
"test_compare_cpu_triu_indices_xpu_int32",
"test_compare_cpu_triu_indices_xpu_int64",
"test_backward_logcumsumexp_xpu_float32",
"test_backward_nn_functional_pdist_xpu_float32",
"test_forward_ad_logcumsumexp_xpu_float32",
"test_operator_logcumsumexp_xpu_float32",
"test_operator_nn_functional_pdist_xpu_float32",
"test_view_replay_logcumsumexp_xpu_float32",
"test_view_replay_nn_functional_pdist_xpu_float32",
),
}
4 changes: 4 additions & 0 deletions test/xpu/extended/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,5 +194,9 @@
# Greatest absolute difference: 0.0625 at index (1,) (up to 0.001 allowed)
# Greatest relative difference: 0.00640869140625 at index (1,) (up to 0.001 allowed)
"test_compare_cpu_xlogy_xpu_bfloat16",
"test_compare_cpu_div_trunc_rounding_xpu_float64",
"test_compare_cpu_div_trunc_rounding_xpu_float16",
"test_compare_cpu_div_floor_rounding_xpu_float16",
"test_compare_cpu_div_floor_rounding_xpu_bfloat16",
),
}
16 changes: 15 additions & 1 deletion test/xpu/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -649,6 +649,14 @@
"test_python_ref__refs_square_xpu_complex64",
"test_python_ref_torch_fallback__refs_square_xpu_complex64",
"test_python_ref_torch_fallback__refs_exp_xpu_complex128",

# Failed on rolling driver, passed on preci
"test_python_ref__refs_div_trunc_rounding_xpu_float64",
"test_python_ref_executor__refs_div_trunc_rounding_executor_aten_xpu_float64",
"test_python_ref_torch_fallback__refs_div_trunc_rounding_xpu_float64",

# TODO: passed from source code building version, investigate
"test_python_ref__refs_log2_xpu_complex128",
),

"test_binary_ufuncs_xpu.py": (
Expand Down Expand Up @@ -1136,6 +1144,7 @@
# Greatest relative difference: 1.9145216356264427e-05 at index (463, 204) (up to 1.3e-06 allowed)
"test_reference_numerics_normal__refs_asinh_xpu_complex64",
"test_reference_numerics_normal_asinh_xpu_complex64",
"test_batch_vs_slicing__refs_sigmoid_xpu_complex128",
# Unexpected success: CUDA uses thrust::sqrt and has accuracy issue. XPU use std::sqrt and has no issue.
"test_reference_numerics_large_rsqrt_xpu_complex32",
# Numeric difference
Expand Down Expand Up @@ -1514,6 +1523,8 @@
# XPU does not support tunable.
"test_bmm_tunableop_rocm_xpu_float32",
"test_numeric_check_leak_tunableop_rocm_xpu_float32",
"test_dump_results_on_exit_tunableop_xpu_float32",
"test_rotating_buffer_tunableop_xpu_float32",
# CUDA bias cases added in latest PyTorch
# AttributeError: module 'torch._C' has no attribute '_cuda_tunableop_enable'
"test_matmul_check_entries_tunableop_xpu_float16",
Expand Down Expand Up @@ -3230,7 +3241,10 @@

"test_type_promotion_xpu.py": None,

"test_distributions_xpu.py": None,
"test_distributions_xpu.py": (
# TODO: Passed on lts driver version, but failed on rolling driver version
"test_gamma_gpu_sample_xpu",
),

"test_optim_xpu.py": (
# oneDNN issues
Expand Down
35 changes: 34 additions & 1 deletion test/xpu/test_unary_ufuncs_xpu.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Owner(s): ["module: intel"]

from torch.testing._internal.common_device_type import instantiate_device_type_tests
import torch
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyXPU
from torch.testing._internal.common_utils import run_tests

try:
Expand All @@ -11,6 +12,38 @@
with XPUPatchForImport(False):
from test_unary_ufuncs import TestUnaryUfuncs

@onlyXPU
def _nonzero_static_large(self, device):
# large enough to have multiple iters per SM even on H100
# with 132 sms
size_inp = 1024 * 16 * 132 + 1024 * 16
x = torch.zeros(size_inp, device=device)
# unique indices
indices = torch.randperm(size_inp, device=device)[: size_inp // 2]
sorted, _ = torch.sort(indices)
x[sorted] = 1
res = torch.nonzero_static(x, size=size_inp // 2).view(-1)
self.assertEqual(res, sorted)
# no oob writes
out = torch.full((size_inp,), 10, device=device, dtype=torch.int64)
res = torch.nonzero_static(x, size=size_inp // 4, out=out[: size_inp // 2])
self.assertEqual(out[: size_inp // 4], sorted[: size_inp // 4])
self.assertEqual(
out[size_inp // 4 :],
torch.tensor(10, device="xpu").expand_as(out[size_inp // 4 :]),
)
# correct fill for 2d
x = x.view(2, size_inp // 2)
ref = x.nonzero()
res = x.nonzero_static(size=size_inp // 2 + 2)
self.assertEqual(res.shape, [size_inp // 2 + 2, 2])
self.assertEqual(ref, res[: size_inp // 2])
self.assertEqual(
res[size_inp // 2 :],
torch.tensor(-1, device="xpu").expand_as(res[size_inp // 2 :]),
)
TestUnaryUfuncs.test_nonzero_static_large = _nonzero_static_large

instantiate_device_type_tests(TestUnaryUfuncs, globals(),only_for=("xpu"), allow_xpu=True)

if __name__ == "__main__":
Expand Down

0 comments on commit 9ed0a1a

Please sign in to comment.