From 1d62db3a45f3aada2034a405daab2a5df14cb23c Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Wed, 19 Jul 2023 17:11:07 -0700 Subject: [PATCH 01/33] Fix Wundefined-var-template clang warning (#992) --- src/cunumeric/arg.inl | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/cunumeric/arg.inl b/src/cunumeric/arg.inl index be19db35e..2b3c57b8a 100644 --- a/src/cunumeric/arg.inl +++ b/src/cunumeric/arg.inl @@ -112,4 +112,35 @@ __CUDA_HD__ inline void Argval::apply(const Argval& rhs) } } +// Declare these here, to work around undefined-var-template warnings + +#define DECLARE_ARGMAX_IDENTITY(TYPE) \ + template <> \ + const Argval ArgmaxReduction::identity; + +#define DECLARE_ARGMIN_IDENTITY(TYPE) \ + template <> \ + const Argval ArgminReduction::identity; + +#define DECLARE_IDENTITIES(TYPE) \ + DECLARE_ARGMAX_IDENTITY(TYPE) \ + DECLARE_ARGMIN_IDENTITY(TYPE) + +DECLARE_IDENTITIES(__half) +DECLARE_IDENTITIES(float) +DECLARE_IDENTITIES(double) +DECLARE_IDENTITIES(bool) +DECLARE_IDENTITIES(int8_t) +DECLARE_IDENTITIES(int16_t) +DECLARE_IDENTITIES(int32_t) +DECLARE_IDENTITIES(int64_t) +DECLARE_IDENTITIES(uint8_t) +DECLARE_IDENTITIES(uint16_t) +DECLARE_IDENTITIES(uint32_t) +DECLARE_IDENTITIES(uint64_t) + +#undef DECLARE_IDENTITIES +#undef DECLARE_ARGMIN_IDENTITY +#undef DECLARE_ARGMAX_IDENTITY + } // namespace cunumeric From 46862c47c695c797beb361cc66a5ae4150491704 Mon Sep 17 00:00:00 2001 From: yimoj <130720840+yimoj@users.noreply.github.com> Date: Thu, 20 Jul 2023 12:26:22 +0800 Subject: [PATCH 02/33] improve random bitgenerator code coverage (#990) * improve random bitgenerator code coverage * add error description to zipf failure * Replace misused assert in generator with appropriate exceptions --- cunumeric/random/generator.py | 12 +- tests/integration/test_random_advanced.py | 102 +++++++++++++- tests/integration/test_random_beta.py | 32 +++++ tests/integration/test_random_bitgenerator.py | 125 ++++++++++++++++++ tests/integration/test_random_creation.py | 32 ++++- tests/integration/test_random_gamma.py | 24 ++++ .../test_random_straightforward.py | 41 ++++++ 7 files changed, 357 insertions(+), 11 deletions(-) diff --git a/cunumeric/random/generator.py b/cunumeric/random/generator.py index 6a00269dd..7145de662 100644 --- a/cunumeric/random/generator.py +++ b/cunumeric/random/generator.py @@ -292,9 +292,15 @@ def random( out: Union[ndarray, None] = None, ) -> ndarray: if out is not None: - if size is not None: - assert out.shape == size - assert out.dtype == dtype + if size is not None and out.shape != size: + raise ValueError( + "size must match out.shape when used together" + ) + if out.dtype != dtype: + raise TypeError( + "Supplied output array has the wrong type. " + "Expected {}, got {}".format(dtype, out.dtype) + ) return self.bit_generator.random(size, dtype, out) def rayleigh( diff --git a/tests/integration/test_random_advanced.py b/tests/integration/test_random_advanced.py index ab8031344..c5ae2bc42 100644 --- a/tests/integration/test_random_advanced.py +++ b/tests/integration/test_random_advanced.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import os import sys import numpy as np @@ -20,6 +21,7 @@ import cunumeric as num +LEGATE_TEST = os.environ.get("LEGATE_TEST", None) == "1" if sys.platform == "darwin": pytestmark = pytest.mark.skip() BITGENERATOR_ARGS = [] @@ -81,7 +83,12 @@ def test_vonmises_float64(t): @pytest.mark.parametrize("t", BITGENERATOR_ARGS, ids=str) -def test_hypergeometric(t): +@pytest.mark.parametrize( + "ngood, nbad, nsample", + ((60, 440, 200), (20.0, 77, 1), ((3, 5, 7), 6, 22)), + ids=str, +) +def test_hypergeometric(t, ngood, nbad, nsample): bitgen = t(seed=42) gen = num.random.Generator(bitgen) N = 500 @@ -110,12 +117,24 @@ def test_geometric(t): @pytest.mark.parametrize("t", BITGENERATOR_ARGS, ids=str) -def test_zipf(t): +@pytest.mark.parametrize( + "s", + ( + 7.5, + pytest.param( + (1.2, 3.1415), + marks=pytest.mark.xfail, + # NumPy returns 1-dim array + # cuNumeric raises TypeError: float() argument must be a string + # or a real number, not 'tuple' + ), + ), + ids=str, +) +def test_zipf(t, s): bitgen = t(seed=42) gen = num.random.Generator(bitgen) - s = 7.5 a = gen.zipf(a=s, size=(1024 * 1024,), dtype=np.uint32) - a = np.random.zipf(s, 1024 * 1024) ref_a = np.random.zipf(s, 1024 * 1024) theo_mean = np.average(ref_a) theo_std = np.std(ref_a) @@ -173,6 +192,81 @@ def test_negative_binomial(t): assert_distribution(a, theo_mean, theo_std) +FUNC_ARGS = ( + ("binomial", (15, 0.666)), + ("negative_binomial", (15, 0.666)), + ("geometric", (0.707,)), + ("hypergeometric", (60, 440, 200)), + ("standard_t", (3.1415,)), + ("vonmises", (1.414, 3.1415)), + ("wald", (1.414, 3.1415)), + ("zipf", (7.5,)), +) + + +@pytest.mark.parametrize("func, args", FUNC_ARGS, ids=str) +@pytest.mark.parametrize("size", ((2048 * 2048), (4096,), 25535), ids=str) +def test_random_sizes(func, args, size): + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + a_np = getattr(gen_np, func)(*args, size=size) + a_num = getattr(gen_num, func)(*args, size=size) + assert a_np.shape == a_num.shape + + +@pytest.mark.xfail +@pytest.mark.parametrize("func, args", FUNC_ARGS, ids=str) +def test_random_size_none(func, args): + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + a_np = getattr(gen_np, func)(*args, size=None) + a_num = getattr(gen_num, func)(*args, size=None) + # cuNumeric returns singleton array + # NumPy returns scalar + assert np.ndim(a_np) == np.ndim(a_num) + + +class TestRandomErrors: + # cuNumeric zipf hangs on the invalid args when LEGATE_TEST=1 + @pytest.mark.skipif(LEGATE_TEST, reason="Test hang when LEGATE_TEST=1") + @pytest.mark.parametrize( + "dist, expected_exc", + ( + (0.77, ValueError), + (-5, ValueError), + (None, TypeError), + ((1, 5, 3), ValueError), + ), + ids=str, + ) + def test_zipf_invalid_dist(self, dist, expected_exc): + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + with pytest.raises(expected_exc): + gen_np.zipf(dist) + with pytest.raises(expected_exc): + gen_num.zipf(dist) + + @pytest.mark.skipif(LEGATE_TEST, reason="Test hang when LEGATE_TEST=1") + @pytest.mark.parametrize( + "ngood, nbad, nsample", + ((200, 60, 500), ((1, 5, 7), 6, 22)), + ids=str, + ) + def test_hypergeometric_invalid_args(self, ngood, nbad, nsample): + expected_exc = ValueError + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + with pytest.raises(expected_exc): + gen_np.hypergeometric(ngood, nbad, nsample) + with pytest.raises(expected_exc): + gen_num.hypergeometric(ngood, nbad, nsample) + + if __name__ == "__main__": import sys diff --git a/tests/integration/test_random_beta.py b/tests/integration/test_random_beta.py index dc451b42e..9d922f792 100644 --- a/tests/integration/test_random_beta.py +++ b/tests/integration/test_random_beta.py @@ -133,6 +133,38 @@ def test_noncentral_f_float64(t): assert_distribution(a, theo_mean, theo_std) +FUNC_ARGS = ( + ("beta", (2.0, 5.0)), + ("f", (1.0, 48.0)), + ("logseries", (0.66,)), + ("noncentral_f", (1.0, 48.0, 1.414)), +) + + +@pytest.mark.parametrize("func, args", FUNC_ARGS, ids=str) +@pytest.mark.parametrize("size", ((2048 * 2048), (4096,), 25535), ids=str) +def test_beta_sizes(func, args, size): + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + a_np = getattr(gen_np, func)(*args, size=size) + a_num = getattr(gen_num, func)(*args, size=size) + assert a_np.shape == a_num.shape + + +@pytest.mark.xfail +@pytest.mark.parametrize("func, args", FUNC_ARGS, ids=str) +def test_beta_size_none(func, args): + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + a_np = getattr(gen_np, func)(*args, size=None) + a_num = getattr(gen_num, func)(*args, size=None) + # cuNumeric returns singleton array + # NumPy returns scalar + assert np.ndim(a_np) == np.ndim(a_num) + + if __name__ == "__main__": import sys diff --git a/tests/integration/test_random_bitgenerator.py b/tests/integration/test_random_bitgenerator.py index 31155d63f..58d82631e 100644 --- a/tests/integration/test_random_bitgenerator.py +++ b/tests/integration/test_random_bitgenerator.py @@ -48,6 +48,28 @@ def test_bitgenerator_type(t): print(f"DONE for type = {t}") +@pytest.mark.parametrize("size", ((2048 * 2048), (4096,), 25535), ids=str) +def test_bitgenerator_size(size): + seed = 42 + gen_np = np.random.PCG64(seed=seed) + gen_num = num.random.XORWOW(seed=seed) + a_np = gen_np.random_raw(size=size) + a_num = gen_num.random_raw(shape=size) + assert a_np.shape == a_num.shape + + +@pytest.mark.xfail +def test_bitgenerator_size_none(): + seed = 42 + gen_np = np.random.PCG64(seed=seed) + gen_num = num.random.XORWOW(seed=seed) + a_np = gen_np.random_raw(size=None) + a_num = gen_num.random_raw(shape=None) + # cuNumeric returns singleton array + # NumPy returns scalar + assert np.ndim(a_np) == np.ndim(a_num) + + @pytest.mark.parametrize("t", BITGENERATOR_ARGS, ids=str) def test_force_build(t): t(42, True) @@ -83,6 +105,15 @@ def test_integers_int16(t): print(f"1024*1024 sum = {a.sum()}") +@pytest.mark.parametrize("t", BITGENERATOR_ARGS, ids=str) +def test_integers_endpoint(t): + high = 10 + bitgen = t(seed=42) + gen = num.random.Generator(bitgen) + a = gen.integers(high, size=100, dtype=np.int16, endpoint=True) + assert np.max(a) == high + + @pytest.mark.parametrize("t", BITGENERATOR_ARGS, ids=str) def test_random_float32(t): # top-level random function has a different signature @@ -105,6 +136,20 @@ def test_random_float64(t): assert_distribution(a, 0.5, num.sqrt(1.0 / 12.0)) +def test_random_out_stats(): + seed = 42 + dtype = np.float32 + out_shape = (2, 3, 1) + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + out_np = np.empty(out_shape, dtype=dtype) + out_num = num.empty(out_shape, dtype=dtype) + gen_np.random(out=out_np, dtype=dtype) + gen_num.random(out=out_num, dtype=dtype) + assert out_np.shape == out_num.shape + assert out_np.dtype == out_np.dtype + + @pytest.mark.parametrize("t", BITGENERATOR_ARGS, ids=str) def test_lognormal_float32(t): bitgen = t(seed=42) @@ -192,6 +237,86 @@ def test_poisson(t): assert_distribution(a, theo_mean, theo_std) +FUNCS = ("poisson", "normal", "lognormal", "uniform") + + +@pytest.mark.parametrize("func", FUNCS, ids=str) +@pytest.mark.parametrize("size", ((2048 * 2048), (4096,), 25535), ids=str) +def test_random_sizes(func, size): + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + a_np = getattr(gen_np, func)(size=size) + a_num = getattr(gen_num, func)(size=size) + assert a_np.shape == a_num.shape + + +@pytest.mark.xfail +@pytest.mark.parametrize("func", FUNCS, ids=str) +def test_random_size_none(func): + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + a_np = getattr(gen_np, func)(size=None) + a_num = getattr(gen_num, func)(size=None) + # cuNumeric returns singleton array + # NumPy returns scalar + assert np.ndim(a_np) == np.ndim(a_num) + + +class TestBitGeneratorErrors: + def test_init_bitgenerator(self): + expected_exc = NotImplementedError + with pytest.raises(expected_exc): + np.random.BitGenerator() + with pytest.raises(expected_exc): + num.random.BitGenerator() + + @pytest.mark.xfail + @pytest.mark.parametrize("dtype", (np.int32, np.float128, str)) + def test_random_invalid_dtype(self, dtype): + expected_exc = TypeError + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + with pytest.raises(expected_exc): + gen_np.random(size=(1024 * 1024,), dtype=dtype) + # TypeError: Unsupported dtype dtype('int32') for random + with pytest.raises(expected_exc): + gen_num.random(size=(1024 * 1024,), dtype=dtype) + # NotImplementedError: type for random.uniform has to be float64 + # or float32 + + def test_random_out_dtype_mismatch(self): + expected_exc = TypeError + seed = 42 + dtype = np.float32 + out_shape = (3, 2, 3) + out_dtype = np.float64 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + out_np = np.empty(out_shape, dtype=out_dtype) + out_num = num.empty(out_shape, dtype=out_dtype) + with pytest.raises(expected_exc): + gen_np.random(out=out_np, dtype=dtype) + with pytest.raises(expected_exc): + gen_num.random(out=out_num, dtype=dtype) + + def test_random_out_shape_mismatch(self): + expected_exc = ValueError + seed = 42 + size = (1024 * 1024,) + out_shape = (3, 2, 3) + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + out_np = np.empty(out_shape) + out_num = num.empty(out_shape) + with pytest.raises(expected_exc): + gen_np.random(size=size, out=out_np) + with pytest.raises(expected_exc): + gen_num.random(size=size, out=out_num) + + if __name__ == "__main__": import sys diff --git a/tests/integration/test_random_creation.py b/tests/integration/test_random_creation.py index 2acd1df6b..15bf70ec1 100644 --- a/tests/integration/test_random_creation.py +++ b/tests/integration/test_random_creation.py @@ -170,9 +170,10 @@ def test_rand(shape): ] SMALL_RNG_SIZES = [5, 1024, (1, 2)] LARGE_RNG_SIZES = [10000, (20, 50, 4)] -ALL_RNG_SIZES = SMALL_RNG_SIZES + LARGE_RNG_SIZES +ALL_RNG_SIZES = SMALL_RNG_SIZES + LARGE_RNG_SIZES + [None] INT_DTYPES = [np.int64, np.int32, np.int16] UINT_DTYPES = [np.uint64, np.uint16, np.uint0] +FLOAT_DTYPES = [np.float16, np.float128, np.float64] @pytest.mark.parametrize("size", ALL_RNG_SIZES, ids=str) @@ -182,10 +183,10 @@ def test_randint_basic_stats(low, high, size, dtype): arr_np, arr_num = gen_random_from_both( "randint", low=low, high=high, size=size, dtype=dtype ) - assert arr_np.dtype == arr_np.dtype + assert arr_np.dtype == arr_num.dtype assert arr_np.shape == arr_num.shape assert np.min(arr_num) >= low - assert np.max(arr_num) <= high + assert np.max(arr_num) < high @pytest.mark.parametrize("low", [1024, 1025, 12345], ids=str) @@ -209,6 +210,13 @@ def test_randint_high_limit(): assert np.max(arr_num) < limit +def test_random_integers_high_limit(): + limit = 10 + arr_np, arr_num = gen_random_from_both("random_integers", 10, size=100) + assert np.max(arr_np) <= limit + assert np.max(arr_num) <= limit + + @pytest.mark.xfail(reason="cuNumeric raises NotImplementedError") @pytest.mark.parametrize( "low, high", [(3000.45, 15000), (123, 456.7), (12.3, 45.6)], ids=str @@ -295,7 +303,7 @@ def test_random_integers(low, high, size): ) -@pytest.mark.parametrize("size", ALL_RNG_SIZES, ids=str) +@pytest.mark.parametrize("size", SMALL_RNG_SIZES + LARGE_RNG_SIZES, ids=str) def test_random_sample_basic_stats(size): arr_np, arr_num = gen_random_from_both("random_sample", size=size) assert arr_np.shape == arr_num.shape @@ -317,6 +325,22 @@ def test_random_sample(size): ) +@pytest.mark.parametrize("size", SMALL_RNG_SIZES, ids=str) +def test_random_std_exponential_basic_stats(size): + arr_np, arr_num = gen_random_from_both("standard_exponential", size=size) + assert arr_np.shape == arr_num.shape + assert arr_np.dtype == arr_num.dtype + + +@pytest.mark.parametrize("size", SMALL_RNG_SIZES, ids=str) +def test_random_std_gamma_basic_stats(size): + arr_np, arr_num = gen_random_from_both( + "standard_gamma", shape=3.1415, size=size + ) + assert arr_np.shape == arr_num.shape + assert arr_np.dtype == arr_num.dtype + + class TestRandomErrors: def assert_exc_from_both(self, func, exc, *args, **kwargs): with pytest.raises(exc): diff --git a/tests/integration/test_random_gamma.py b/tests/integration/test_random_gamma.py index dbca2ddfc..69fbc2f42 100644 --- a/tests/integration/test_random_gamma.py +++ b/tests/integration/test_random_gamma.py @@ -126,6 +126,30 @@ def test_noncentral_chisquare_float64(t): assert_distribution(a, theo_mean, theo_std) +@pytest.mark.parametrize("func", ("gamma", "noncentral_chisquare"), ids=str) +@pytest.mark.parametrize("size", ((2048 * 2048), (4096,), 25535), ids=str) +def test_gamma_sizes(func, size): + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + a_np = getattr(gen_np, func)(3.1415, 1.414, size=size) + a_num = getattr(gen_num, func)(3.1415, 1.414, size=size) + assert a_np.shape == a_num.shape + + +@pytest.mark.xfail +@pytest.mark.parametrize("func", ("gamma", "noncentral_chisquare"), ids=str) +def test_gamma_size_none(func): + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(num.random.XORWOW(seed=seed)) + a_np = getattr(gen_np, func)(3.1415, 1.414, size=None) + a_num = getattr(gen_num, func)(3.1415, 1.414, size=None) + # cuNumeric returns singleton array + # NumPy returns scalar + assert np.ndim(a_np) == np.ndim(a_num) + + if __name__ == "__main__": import sys diff --git a/tests/integration/test_random_straightforward.py b/tests/integration/test_random_straightforward.py index 584719587..b412e5341 100644 --- a/tests/integration/test_random_straightforward.py +++ b/tests/integration/test_random_straightforward.py @@ -339,6 +339,47 @@ def test_bytes(t): assert_distribution(a, theo_mean, theo_std) +FUNC_ARGS = ( + ("exponential", ()), + ("gumbel", ()), + ("laplace", ()), + ("logistic", ()), + ("pareto", (30.0,)), + ("power", (3.0,)), + ("rayleigh", (np.pi,)), + ("standard_cauchy", ()), + ("standard_exponential", ()), + ("triangular", (1.414, 2.7, 3.1415)), + ("weibull", (3.1415,)), +) + + +@pytest.mark.parametrize("t", BITGENERATOR_ARGS, ids=str) +@pytest.mark.parametrize("func, args", FUNC_ARGS, ids=str) +@pytest.mark.parametrize("size", ((2048 * 2048), (4096,), 25535), ids=str) +def test_beta_sizes(t, func, args, size): + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(t(seed=seed)) + a_np = getattr(gen_np, func)(*args, size=size) + a_num = getattr(gen_num, func)(*args, size=size) + assert a_np.shape == a_num.shape + + +@pytest.mark.xfail +@pytest.mark.parametrize("t", BITGENERATOR_ARGS, ids=str) +@pytest.mark.parametrize("func, args", FUNC_ARGS, ids=str) +def test_beta_size_none(t, func, args): + seed = 42 + gen_np = np.random.Generator(np.random.PCG64(seed=seed)) + gen_num = num.random.Generator(t(seed=seed)) + a_np = getattr(gen_np, func)(*args, size=None) + a_num = getattr(gen_num, func)(*args, size=None) + # cuNumeric returns singleton array + # NumPy returns scalar + assert np.ndim(a_np) == np.ndim(a_num) + + if __name__ == "__main__": import sys From 5abc82ef8f0ebb7d8e854cbd2ee0dc18fcf5ad41 Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Fri, 21 Jul 2023 11:59:02 -0700 Subject: [PATCH 03/33] Missing alignment on histogram call (#999) --- cunumeric/deferred.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cunumeric/deferred.py b/cunumeric/deferred.py index 6e8b6fe2c..b927c69fb 100644 --- a/cunumeric/deferred.py +++ b/cunumeric/deferred.py @@ -3626,5 +3626,6 @@ def histogram(self, src: Any, bins: Any, weights: Any) -> None: task.add_broadcast(bins_array.base) task.add_broadcast(dst_array.base) + task.add_alignment(src_array.base, weight_array.base) task.execute() From 388cd3e021147bcb084b3485713a4911a73d6cd0 Mon Sep 17 00:00:00 2001 From: Irina Demeshko Date: Fri, 21 Jul 2023 12:35:44 -0700 Subject: [PATCH 04/33] Updating version for docs (#994) * adding new version for documentation + fixing warning * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- cunumeric/_sphinxext/_comparison_config.py | 1 + docs/cunumeric/source/versions.rst | 1 + 2 files changed, 2 insertions(+) diff --git a/cunumeric/_sphinxext/_comparison_config.py b/cunumeric/_sphinxext/_comparison_config.py index 1cc539f31..dc2f71d6b 100644 --- a/cunumeric/_sphinxext/_comparison_config.py +++ b/cunumeric/_sphinxext/_comparison_config.py @@ -55,6 +55,7 @@ "size", "sometrue", "test", + "Tester", } # these do not have valid intersphinx references diff --git a/docs/cunumeric/source/versions.rst b/docs/cunumeric/source/versions.rst index 0b0073c5e..3df009689 100644 --- a/docs/cunumeric/source/versions.rst +++ b/docs/cunumeric/source/versions.rst @@ -9,3 +9,4 @@ Versions 22.10 23.01 23.03 + 23.07 From fa33ee5f9577d3771f735d5f0fdcea4791b34127 Mon Sep 17 00:00:00 2001 From: Marcin Zalewski Date: Wed, 26 Jul 2023 02:55:14 -0700 Subject: [PATCH 05/33] pin cuda-version (#998) (#1002) Co-authored-by: Mark Vaz --- conda/conda-build/meta.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/conda/conda-build/meta.yaml b/conda/conda-build/meta.yaml index f2035260a..81569e3a2 100644 --- a/conda/conda-build/meta.yaml +++ b/conda/conda-build/meta.yaml @@ -135,6 +135,7 @@ requirements: {% else %} - legate-core ={{ core_version }} - cuda-cudart >={{ cuda_version }},<{{ cuda_major+1 }} + - cuda-version >={{ cuda_version }},<{{ cuda_major+1 }} - cutensor >=1.3 =*_* - libcublas - libcusolver =11.4.1.48-0 From 1883b71526717f7c91711bc803887d9eb99bb5a6 Mon Sep 17 00:00:00 2001 From: Marcin Zalewski Date: Thu, 27 Jul 2023 07:59:43 -0700 Subject: [PATCH 06/33] Bump up cunumeric version and legate.core version (#1004) Bump up cunumeric version to 23.09.00 --- CMakeLists.txt | 2 +- cmake/versions.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2ac5f5604..68dfe83f6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -55,7 +55,7 @@ include(rapids-cuda) include(rapids-export) include(rapids-find) -set(cunumeric_version 23.07.00) +set(cunumeric_version 23.09.00) # For now we want the optimization flags to match on both normal make and cmake # builds so we override the cmake defaults here for release, this changes diff --git a/cmake/versions.json b/cmake/versions.json index 6c3aa0918..b5842f7c7 100644 --- a/cmake/versions.json +++ b/cmake/versions.json @@ -1,11 +1,11 @@ { "packages" : { "legate_core" : { - "version": "23.07.00", + "version": "23.09.00", "git_url" : "https://github.com/nv-legate/legate.core.git", "git_shallow": false, "always_download": false, - "git_tag" : "ac75ac05a9056f49729797415ee489b45686a528" + "git_tag" : "35d0d4bd3c9d19946ccc641ec7074b44bbae8046" } } } From c08bacf09f7cecd57272b5f2cef891bb0c678da1 Mon Sep 17 00:00:00 2001 From: Marcin Zalewski Date: Mon, 31 Jul 2023 04:18:09 -0700 Subject: [PATCH 07/33] Fix file name (#1008) --- .github/workflows/ci-gh.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-gh.yml b/.github/workflows/ci-gh.yml index eeb38e29d..a81802f9d 100644 --- a/.github/workflows/ci-gh.yml +++ b/.github/workflows/ci-gh.yml @@ -76,7 +76,7 @@ jobs: - name: Create conda env shell: su coder {0} - run: cd ~/; exec entrypoint make-conda-env; + run: cd ~/; exec entrypoint get-yaml-and-make-conda-env; - name: Build legate.core C++ library shell: su coder {0} From 7d882df8b7714021dc5b37131049a59c8d66f0f7 Mon Sep 17 00:00:00 2001 From: Rohan Yadav Date: Tue, 1 Aug 2023 13:56:02 -0700 Subject: [PATCH 08/33] src/cunumeric: add missing openmp variants to BitGenerator and UniqueReduce (#1010) * src/cunumeric: add missing openmp variants to BitGenerator and UniqueReduce * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Full OpenMP implementation for unique_reduce * Leave a note to do a full OpenMP implementation --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Manolis Papadakis --- cunumeric_cpp.cmake | 3 +- src/cunumeric/mapper.cc | 3 +- src/cunumeric/random/bitgenerator.h | 5 +++ src/cunumeric/set/unique_reduce.cc | 26 +----------- src/cunumeric/set/unique_reduce.h | 3 ++ src/cunumeric/set/unique_reduce_omp.cc | 29 ++++++++++++++ src/cunumeric/set/unique_reduce_template.inl | 42 ++++++++++++++------ 7 files changed, 71 insertions(+), 40 deletions(-) create mode 100644 src/cunumeric/set/unique_reduce_omp.cc diff --git a/cunumeric_cpp.cmake b/cunumeric_cpp.cmake index f2353be06..4270962ba 100644 --- a/cunumeric_cpp.cmake +++ b/cunumeric_cpp.cmake @@ -212,10 +212,11 @@ if(Legion_USE_OpenMP) src/cunumeric/search/argwhere_omp.cc src/cunumeric/search/nonzero_omp.cc src/cunumeric/set/unique_omp.cc + src/cunumeric/set/unique_reduce_omp.cc src/cunumeric/stat/bincount_omp.cc src/cunumeric/convolution/convolve_omp.cc src/cunumeric/transform/flip_omp.cc - src/cunumeric/stat/histogram_omp.cc + src/cunumeric/stat/histogram_omp.cc ) endif() diff --git a/src/cunumeric/mapper.cc b/src/cunumeric/mapper.cc index 7a1067584..247ded4fd 100644 --- a/src/cunumeric/mapper.cc +++ b/src/cunumeric/mapper.cc @@ -108,7 +108,8 @@ std::vector CuNumericMapper::store_mappings( return {}; } case CUNUMERIC_MATMUL: - case CUNUMERIC_MATVECMUL: { + case CUNUMERIC_MATVECMUL: + case CUNUMERIC_UNIQUE_REDUCE: { // TODO: Our actual requirements are a little less strict than this; we require each array or // vector to have a stride of 1 on at least one dimension. std::vector mappings; diff --git a/src/cunumeric/random/bitgenerator.h b/src/cunumeric/random/bitgenerator.h index 4a4f9a9de..262767856 100644 --- a/src/cunumeric/random/bitgenerator.h +++ b/src/cunumeric/random/bitgenerator.h @@ -84,6 +84,11 @@ class BitGeneratorTask : public CuNumericTask { public: static void cpu_variant(legate::TaskContext& context); +#ifdef LEGATE_USE_OPENMP + // TODO: Fully parallelized OpenMP implementation for BitGenerator + // Doing it this way is safe, but only one thread is being used out of the OpenMP pool. + static void omp_variant(legate::TaskContext& context) { BitGeneratorTask::cpu_variant(context); } +#endif #ifdef LEGATE_USE_CUDA static void gpu_variant(legate::TaskContext& context); #endif diff --git a/src/cunumeric/set/unique_reduce.cc b/src/cunumeric/set/unique_reduce.cc index 29442e371..5be7f7160 100644 --- a/src/cunumeric/set/unique_reduce.cc +++ b/src/cunumeric/set/unique_reduce.cc @@ -19,33 +19,9 @@ namespace cunumeric { -using namespace legate; - -template -struct UniqueReduceImplBody { - using VAL = legate_type_of; - - void operator()(Array& output, const std::vector, Rect<1>>>& inputs) - { - std::set dedup_set; - - for (auto& pair : inputs) { - auto& input = pair.first; - auto& shape = pair.second; - for (coord_t idx = shape.lo[0]; idx <= shape.hi[0]; ++idx) dedup_set.insert(input[idx]); - } - - size_t size = dedup_set.size(); - size_t pos = 0; - auto result = output.create_output_buffer(Point<1>(size), true); - - for (auto e : dedup_set) result[pos++] = e; - } -}; - /*static*/ void UniqueReduceTask::cpu_variant(TaskContext& context) { - unique_reduce_template(context); + unique_reduce_template(context, thrust::host); } namespace // unnamed diff --git a/src/cunumeric/set/unique_reduce.h b/src/cunumeric/set/unique_reduce.h index a87807446..05f90bfd7 100644 --- a/src/cunumeric/set/unique_reduce.h +++ b/src/cunumeric/set/unique_reduce.h @@ -26,6 +26,9 @@ class UniqueReduceTask : public CuNumericTask { public: static void cpu_variant(legate::TaskContext& context); +#ifdef LEGATE_USE_OPENMP + static void omp_variant(legate::TaskContext& context); +#endif }; } // namespace cunumeric diff --git a/src/cunumeric/set/unique_reduce_omp.cc b/src/cunumeric/set/unique_reduce_omp.cc new file mode 100644 index 000000000..825b93379 --- /dev/null +++ b/src/cunumeric/set/unique_reduce_omp.cc @@ -0,0 +1,29 @@ +/* Copyright 2022 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "cunumeric/set/unique_reduce.h" +#include "cunumeric/set/unique_reduce_template.inl" + +#include + +namespace cunumeric { + +/*static*/ void UniqueReduceTask::omp_variant(TaskContext& context) +{ + unique_reduce_template(context, thrust::omp::par); +} + +} // namespace cunumeric diff --git a/src/cunumeric/set/unique_reduce_template.inl b/src/cunumeric/set/unique_reduce_template.inl index 5a6a3aab0..c38d289eb 100644 --- a/src/cunumeric/set/unique_reduce_template.inl +++ b/src/cunumeric/set/unique_reduce_template.inl @@ -20,38 +20,54 @@ #include "cunumeric/set/unique_reduce.h" #include "cunumeric/pitches.h" +#include +#include +#include +#include + namespace cunumeric { using namespace legate; -template -struct UniqueReduceImplBody; - -template +template struct UniqueReduceImpl { template - void operator()(Array& output, std::vector& input_arrs) + void operator()(Array& output, std::vector& input_arrs, const exe_pol_t& exe_pol) { using VAL = legate_type_of; - std::vector, Rect<1>>> inputs; - + size_t res_size = 0; for (auto& input_arr : input_arrs) { auto shape = input_arr.shape<1>(); - auto acc = input_arr.read_accessor(shape); - inputs.push_back(std::make_pair(acc, shape)); + res_size += shape.hi[0] - shape.lo[0] + 1; + } + auto result = output.create_output_buffer(Point<1>(res_size)); + VAL* res_ptr = result.ptr(0); + + size_t offset = 0; + for (auto& input_arr : input_arrs) { + size_t strides[1]; + Rect<1> shape = input_arr.shape<1>(); + size_t volume = shape.volume(); + const VAL* in_ptr = input_arr.read_accessor(shape).ptr(shape, strides); + assert(shape.volume() <= 1 || strides[0] == 1); + thrust::copy(exe_pol, in_ptr, in_ptr + volume, res_ptr + offset); + offset += volume; } + assert(offset == res_size); - UniqueReduceImplBody()(output, inputs); + thrust::sort(exe_pol, res_ptr, res_ptr + res_size); + VAL* actual_end = thrust::unique(exe_pol, res_ptr, res_ptr + res_size); + output.bind_data(result, Point<1>(actual_end - res_ptr)); } }; -template -static void unique_reduce_template(TaskContext& context) +template +static void unique_reduce_template(TaskContext& context, const exe_pol_t& exe_pol) { auto& inputs = context.inputs(); auto& output = context.outputs()[0]; - type_dispatch(output.code(), UniqueReduceImpl{}, output, inputs); + type_dispatch(output.code(), UniqueReduceImpl{}, output, inputs, exe_pol); } } // namespace cunumeric From 89dd7a58050d52f1f9b9fd423f8f8c93a8c262cd Mon Sep 17 00:00:00 2001 From: yimoj <130720840+yimoj@users.noreply.github.com> Date: Wed, 2 Aug 2023 08:45:41 +0800 Subject: [PATCH 09/33] Improve fft code coverage (#1013) --- tests/integration/test_fft_c2c.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tests/integration/test_fft_c2c.py b/tests/integration/test_fft_c2c.py index a202e253a..38ae7a1cb 100644 --- a/tests/integration/test_fft_c2c.py +++ b/tests/integration/test_fft_c2c.py @@ -18,6 +18,7 @@ import numpy as np import pytest from utils.comparisons import allclose as _allclose +from utils.generators import mk_0to1_array import cunumeric as num @@ -239,6 +240,31 @@ def test_4d(): check_4d_c2c(N=(9, 10, 11, 12), dtype=np.float32) +@pytest.mark.parametrize( + "dtype", + ( + np.complex64, + np.float32, + np.float64, + pytest.param(np.int32, marks=pytest.mark.xfail), + pytest.param(np.uint64, marks=pytest.mark.xfail), + pytest.param(np.float16, marks=pytest.mark.xfail), + # NumPy accepts the dtypes + # cuNumeric raises + # TypeError: FFT input not supported (missing a conversion?) + ), + ids=str, +) +@pytest.mark.parametrize("func", ("fftn", "ifftn"), ids=str) +def test_fftn_dtype(dtype, func): + shape = (3, 2, 4) + arr_np = mk_0to1_array(np, shape, dtype=dtype) + arr_num = mk_0to1_array(num, shape, dtype=dtype) + out_np = getattr(np.fft, func)(arr_np) + out_num = getattr(num.fft, func)(arr_num) + assert allclose(out_np, out_num) + + if __name__ == "__main__": import sys From 04ff6d1c8c2987c186054b793756e392974cc88d Mon Sep 17 00:00:00 2001 From: Andrei Schaffer <37386037+aschaffer@users.noreply.github.com> Date: Tue, 1 Aug 2023 22:00:49 -0500 Subject: [PATCH 10/33] Histogram refactor (#1003) * Unification GPU/CPU/OMP refactoring. * Unification GPU/CPU/OMP refactoring. Clean-up. * SFINAE on decltype(exe_pol_obj). * Added synchronization policy to remove some possible artificial code dependencies. --- src/cunumeric/stat/histogram.cc | 61 +--------------- src/cunumeric/stat/histogram.cu | 107 +--------------------------- src/cunumeric/stat/histogram.cuh | 17 +++-- src/cunumeric/stat/histogram_cpu.h | 20 ++++-- src/cunumeric/stat/histogram_gen.h | 8 +++ src/cunumeric/stat/histogram_impl.h | 65 +++++++++++++++++ src/cunumeric/stat/histogram_omp.cc | 46 ++---------- 7 files changed, 109 insertions(+), 215 deletions(-) diff --git a/src/cunumeric/stat/histogram.cc b/src/cunumeric/stat/histogram.cc index 00b648b70..7c0caa6b7 100644 --- a/src/cunumeric/stat/histogram.cc +++ b/src/cunumeric/stat/histogram.cc @@ -55,65 +55,10 @@ struct HistogramImplBody { const AccessorRD, true, 1>& result, const Rect<1>& result_rect) const { - namespace det_acc = detail::accessors; + auto exe_pol = thrust::host; - auto exe_pol = thrust::host; - auto&& [src_size, src_copy, src_ptr] = det_acc::make_accessor_copy(exe_pol, src, src_rect); - - auto&& [weights_size, weights_copy, weights_ptr] = - det_acc::make_accessor_copy(exe_pol, weights, weights_rect); - - assert(weights_size == src_size); - - auto&& [bins_size, bins_ptr] = det_acc::get_accessor_ptr(bins, bins_rect); - - auto num_intervals = bins_size - 1; - Buffer local_result = create_buffer(num_intervals); - - WeightType* local_result_ptr = local_result.ptr(0); - - auto&& [global_result_size, global_result_ptr] = det_acc::get_accessor_ptr(result, result_rect); - -#ifdef _DEBUG - std::cout << "echo src, bins, weights:\n"; - - std::copy_n(src_copy.ptr(0), src_size, std::ostream_iterator{std::cout, ", "}); - std::cout << "\n"; - - std::copy_n(bins_ptr, num_intervals + 1, std::ostream_iterator{std::cout, ", "}); - std::cout << "\n"; - - std::copy_n(weights_copy.ptr(0), src_size, std::ostream_iterator{std::cout, ", "}); - std::cout << "\n"; - -#endif - detail::histogram_weights(exe_pol, - src_copy.ptr(0), - src_size, - bins_ptr, - num_intervals, - local_result_ptr, - weights_copy.ptr(0)); - - // fold into RD result: - // - assert(num_intervals == global_result_size); - -#ifdef _DEBUG - std::cout << "result:\n"; - - std::copy_n( - local_result_ptr, num_intervals, std::ostream_iterator{std::cout, ", "}); - std::cout << "\n"; -#endif - - thrust::transform( - exe_pol, - local_result_ptr, - local_result_ptr + num_intervals, - global_result_ptr, - global_result_ptr, - [](auto local_value, auto global_value) { return local_value + global_value; }); + detail::histogram_wrapper( + exe_pol, src, src_rect, bins, bins_rect, weights, weights_rect, result, result_rect); } }; diff --git a/src/cunumeric/stat/histogram.cu b/src/cunumeric/stat/histogram.cu index cc2eade76..f43fe84d6 100644 --- a/src/cunumeric/stat/histogram.cu +++ b/src/cunumeric/stat/histogram.cu @@ -60,115 +60,12 @@ struct HistogramImplBody { const AccessorRD, true, 1>& result, const Rect<1>& result_rect) const { - namespace det_acc = detail::accessors; - auto stream = get_cached_stream(); cudaStream_t stream_ = static_cast(stream); auto exe_pol = DEFAULT_POLICY.on(stream); - auto&& [src_size, src_copy, src_ptr] = det_acc::make_accessor_copy(exe_pol, src, src_rect); - - auto&& [weights_size, weights_copy, weights_ptr] = - det_acc::make_accessor_copy(exe_pol, weights, weights_rect); - - assert(weights_size == src_size); - - auto&& [bins_size, bins_ptr] = det_acc::get_accessor_ptr(bins, bins_rect); - - auto num_intervals = bins_size - 1; - Buffer local_result = create_buffer(num_intervals); - - WeightType* local_result_ptr = local_result.ptr(0); - - auto&& [global_result_size, global_result_ptr] = det_acc::get_accessor_ptr(result, result_rect); - - CHECK_CUDA_STREAM(stream); - -#ifdef _DEBUG - { - // std::vector: proxy issues; use thrust::host_vector, instead - // - thrust::host_vector v_src(src_size, 0); - VAL* v_src_ptr = v_src.data(); - - CHECK_CUDA(cudaMemcpyAsync( - v_src_ptr, src_ptr, src_size * sizeof(VAL), cudaMemcpyDeviceToHost, stream)); - - thrust::host_vector v_weights(weights_size, 0); - CHECK_CUDA(cudaMemcpyAsync(&v_weights[0], - weights_ptr, - weights_size * sizeof(WeightType), - cudaMemcpyDeviceToHost, - stream)); - - thrust::host_vector v_bins(bins_size, 0); - CHECK_CUDA(cudaMemcpyAsync( - &v_bins[0], bins_ptr, bins_size * sizeof(BinType), cudaMemcpyDeviceToHost, stream)); - - CHECK_CUDA(cudaStreamSynchronize(stream)); - - std::cout << "echo src, bins, weights:\n"; - - // doesn't compile with __half: - // - // using alias_val_t = typename decltype(v_src)::value_type; - // std::copy(v_src.begin(), v_src.end(), std::ostream_iterator{std::cout, ", "}); - - for (auto&& src_val : v_src) { std::cout << static_cast(src_val) << ", "; } - std::cout << "\n"; - - std::copy(v_bins.begin(), v_bins.end(), std::ostream_iterator{std::cout, ", "}); - std::cout << "\n"; - - std::copy( - v_weights.begin(), v_weights.end(), std::ostream_iterator{std::cout, ", "}); - std::cout << "\n"; - } -#endif - - detail::histogram_weights(exe_pol, - src_copy.ptr(0), - src_size, - bins_ptr, - num_intervals, - local_result_ptr, - weights_copy.ptr(0), - stream_); - - CHECK_CUDA_STREAM(stream); - - // fold into RD result: - // - assert(num_intervals == global_result_size); - -#ifdef _DEBUG - { - std::cout << "local result:\n"; - - thrust::host_vector v_result(num_intervals, 0); - CHECK_CUDA(cudaMemcpyAsync(&v_result[0], - local_result_ptr, - num_intervals * sizeof(WeightType), - cudaMemcpyDeviceToHost, - stream)); - - CHECK_CUDA(cudaStreamSynchronize(stream)); - - std::copy( - v_result.begin(), v_result.end(), std::ostream_iterator{std::cout, ", "}); - std::cout << "\n"; - } -#endif - - thrust::transform( - exe_pol, - local_result_ptr, - local_result_ptr + num_intervals, - global_result_ptr, - global_result_ptr, - [] __device__(auto local_value, auto global_value) { return local_value + global_value; }); - - CHECK_CUDA_STREAM(stream); + detail::histogram_wrapper( + exe_pol, src, src_rect, bins, bins_rect, weights, weights_rect, result, result_rect, stream_); } }; diff --git a/src/cunumeric/stat/histogram.cuh b/src/cunumeric/stat/histogram.cuh index e1bc2ee9b..9c7bd74c4 100644 --- a/src/cunumeric/stat/histogram.cuh +++ b/src/cunumeric/stat/histogram.cuh @@ -48,12 +48,10 @@ namespace detail { // device specialization: // template -struct segmented_sum_t< - exe_policy_t, - weight_t, - offset_t, - std::enable_if_t && - !std::is_same_v>> { +struct segmented_sum_t>> { segmented_sum_t(exe_policy_t exe_pol, weight_t const* p_weights, size_t n_samples, @@ -106,5 +104,12 @@ struct segmented_sum_t< allocator_t alloc_scratch_{}; }; +template +struct sync_policy_t>> { + sync_policy_t() {} + + void operator()(cudaStream_t stream) { CHECK_CUDA_STREAM(stream); } +}; + } // namespace detail } // namespace cunumeric diff --git a/src/cunumeric/stat/histogram_cpu.h b/src/cunumeric/stat/histogram_cpu.h index c4292e709..cd0f4304b 100644 --- a/src/cunumeric/stat/histogram_cpu.h +++ b/src/cunumeric/stat/histogram_cpu.h @@ -41,12 +41,10 @@ namespace detail { // host specialization: // template -struct segmented_sum_t< - exe_policy_t, - weight_t, - offset_t, - std::enable_if_t || - std::is_same_v>> { +struct segmented_sum_t>> { segmented_sum_t(exe_policy_t exe_pol, weight_t const* p_weights, size_t n_samples, @@ -83,5 +81,15 @@ struct segmented_sum_t< offset_t* ptr_offsets_{nullptr}; }; +template +struct sync_policy_t>> { + sync_policy_t(void) {} + + void operator()(cudaStream_t stream) + { + // purposely empty: there's nothing to sync on host + } +}; + } // namespace detail } // namespace cunumeric diff --git a/src/cunumeric/stat/histogram_gen.h b/src/cunumeric/stat/histogram_gen.h index ec7c69d21..84f8a620b 100644 --- a/src/cunumeric/stat/histogram_gen.h +++ b/src/cunumeric/stat/histogram_gen.h @@ -23,6 +23,14 @@ namespace detail { template struct segmented_sum_t; +template +inline constexpr bool is_host_policy_v = + std::is_same_v> || + std::is_same_v>; + +template +struct sync_policy_t; + namespace accessors { template diff --git a/src/cunumeric/stat/histogram_impl.h b/src/cunumeric/stat/histogram_impl.h index fbb428d46..d397f6dd8 100644 --- a/src/cunumeric/stat/histogram_impl.h +++ b/src/cunumeric/stat/histogram_impl.h @@ -62,6 +62,14 @@ struct lower_bound_op_t { size_t n_intervs_; }; +template +struct reduction_op_t { + __host__ __device__ weight_t operator()(weight_t local_value, weight_t global_value) + { + return local_value + global_value; + } +}; + template +void histogram_wrapper(exe_policy_t exe_pol, + const AccessorRO& src, + const Rect<1>& src_rect, + const AccessorRO& bins, + const Rect<1>& bins_rect, + const AccessorRO& weights, + const Rect<1>& weights_rect, + const AccessorRD, true, 1>& result, + const Rect<1>& result_rect, + cudaStream_t stream = nullptr) +{ + auto&& [src_size, src_copy, src_ptr] = accessors::make_accessor_copy(exe_pol, src, src_rect); + + auto&& [weights_size, weights_copy, weights_ptr] = + accessors::make_accessor_copy(exe_pol, weights, weights_rect); + + assert(weights_size == src_size); + + auto&& [bins_size, bins_ptr] = accessors::get_accessor_ptr(bins, bins_rect); + + auto num_intervals = bins_size - 1; + Buffer local_result = create_buffer(num_intervals); + + weight_t* local_result_ptr = local_result.ptr(0); + + auto&& [global_result_size, global_result_ptr] = accessors::get_accessor_ptr(result, result_rect); + + sync_policy_t synchronizer; + + synchronizer(stream); + + histogram_weights(exe_pol, + src_copy.ptr(0), + src_size, + bins_ptr, + num_intervals, + local_result_ptr, + weights_copy.ptr(0), + stream); + + synchronizer(stream); + + // fold into RD result: + // + assert(num_intervals == global_result_size); + + thrust::transform(exe_pol, + local_result_ptr, + local_result_ptr + num_intervals, + global_result_ptr, + global_result_ptr, + reduction_op_t{}); + + synchronizer(stream); +} + } // namespace detail } // namespace cunumeric diff --git a/src/cunumeric/stat/histogram_omp.cc b/src/cunumeric/stat/histogram_omp.cc index 1b5657730..b68d7f81e 100644 --- a/src/cunumeric/stat/histogram_omp.cc +++ b/src/cunumeric/stat/histogram_omp.cc @@ -55,48 +55,14 @@ struct HistogramImplBody { const AccessorRD, true, 1>& result, const Rect<1>& result_rect) const { - namespace det_acc = detail::accessors; - - auto exe_pol = thrust::omp::par; - auto&& [global_result_size, global_result_ptr] = det_acc::get_accessor_ptr(result, result_rect); - + auto exe_pol = thrust::omp::par; #ifdef _USE_THRUST_ - auto&& [src_size, src_copy, src_ptr] = det_acc::make_accessor_copy(exe_pol, src, src_rect); - - auto&& [weights_size, weights_copy, weights_ptr] = - det_acc::make_accessor_copy(exe_pol, weights, weights_rect); - - assert(weights_size == src_size); - - auto&& [bins_size, bins_ptr] = det_acc::get_accessor_ptr(bins, bins_rect); - - auto num_intervals = bins_size - 1; - Buffer local_result = create_buffer(num_intervals); - - WeightType* local_result_ptr = local_result.ptr(0); - - detail::histogram_weights(exe_pol, - src_copy.ptr(0), - src_size, - bins_ptr, - num_intervals, - local_result_ptr, - weights_copy.ptr(0), - nullptr); - - // fold into RD result: - // - assert(num_intervals == global_result_size); - - thrust::transform( - exe_pol, - local_result_ptr, - local_result_ptr + num_intervals, - global_result_ptr, - global_result_ptr, - [](auto local_value, auto global_value) { return local_value + global_value; }); + detail::histogram_wrapper( + exe_pol, src, src_rect, bins, bins_rect, weights, weights_rect, result, result_rect); #else - auto&& [src_size, src_ptr] = det_acc::get_accessor_ptr(src, src_rect); + namespace det_acc = detail::accessors; + auto&& [global_result_size, global_result_ptr] = det_acc::get_accessor_ptr(result, result_rect); + auto&& [src_size, src_ptr] = det_acc::get_accessor_ptr(src, src_rect); auto&& [weights_size, weights_ptr] = det_acc::get_accessor_ptr(weights, weights_rect); From d54a3dd6fa22fc1d056071176855e5ee493e5ba4 Mon Sep 17 00:00:00 2001 From: Irina Demeshko Date: Tue, 1 Aug 2023 22:03:19 -0700 Subject: [PATCH 11/33] Fix for control replication violation in test (#1005) * removing the use of python's random * seed python's random to avoid violation of control replication * removing python's random seed from the BOILERPLATE --- tests/integration/test_advanced_indexing.py | 4 +--- tests/integration/test_index_routines.py | 5 ++--- tests/integration/test_indices.py | 10 ++++------ tests/integration/test_prod.py | 6 ++---- tests/integration/test_reduction.py | 6 ++---- tests/integration/test_trace.py | 3 +-- tests/integration/utils/generators.py | 5 ++--- 7 files changed, 14 insertions(+), 25 deletions(-) diff --git a/tests/integration/test_advanced_indexing.py b/tests/integration/test_advanced_indexing.py index 12bc12dcb..fa82b4271 100644 --- a/tests/integration/test_advanced_indexing.py +++ b/tests/integration/test_advanced_indexing.py @@ -13,8 +13,6 @@ # limitations under the License. # -import random - import numpy as np import pytest from legate.core import LEGATE_MAX_DIM @@ -920,7 +918,7 @@ def test(): # we do less than LEGATE_MAX_DIM becasue the dimension will be increased by # 1 when passig 2d index array for ndim in range(2, LEGATE_MAX_DIM): - a_shape = tuple(random.randint(2, 5) for i in range(ndim)) + a_shape = tuple(np.random.randint(2, 5) for i in range(ndim)) np_array = mk_seq_array(np, a_shape) num_array = mk_seq_array(num, a_shape) # check when N of index arrays == N of dims diff --git a/tests/integration/test_index_routines.py b/tests/integration/test_index_routines.py index 9925ed1fc..a89534ebe 100644 --- a/tests/integration/test_index_routines.py +++ b/tests/integration/test_index_routines.py @@ -13,7 +13,6 @@ # limitations under the License. # -import random from itertools import permutations import numpy as np @@ -279,7 +278,7 @@ def test_diagonal(): # test diagonal for ndim in range(2, LEGATE_MAX_DIM + 1): - a_shape = tuple(random.randint(1, 9) for i in range(ndim)) + a_shape = tuple(np.random.randint(1, 9) for i in range(ndim)) np_array = mk_seq_array(np, a_shape) num_array = mk_seq_array(num, a_shape) @@ -294,7 +293,7 @@ def test_diagonal(): # test for diagonal_helper for ndim in range(3, LEGATE_MAX_DIM + 1): - a_shape = tuple(random.randint(1, 9) for i in range(ndim)) + a_shape = tuple(np.random.randint(1, 9) for i in range(ndim)) np_array = mk_seq_array(np, a_shape) num_array = mk_seq_array(num, a_shape) for num_axes in range(3, ndim + 1): diff --git a/tests/integration/test_indices.py b/tests/integration/test_indices.py index d19fa2ebd..2de03c01b 100644 --- a/tests/integration/test_indices.py +++ b/tests/integration/test_indices.py @@ -13,8 +13,6 @@ # limitations under the License. # -import random - import numpy as np import pytest from legate.core import LEGATE_MAX_DIM @@ -79,7 +77,7 @@ def test_indices_zero(self, dimensions): @pytest.mark.parametrize("ndim", range(0, LEGATE_MAX_DIM)) def test_indices_basic(self, ndim): - dimensions = tuple(random.randint(1, 5) for _ in range(ndim)) + dimensions = tuple(np.random.randint(1, 5) for _ in range(ndim)) np_res = np.indices(dimensions) num_res = num.indices(dimensions) @@ -87,7 +85,7 @@ def test_indices_basic(self, ndim): @pytest.mark.parametrize("ndim", range(0, LEGATE_MAX_DIM)) def test_indices_dtype_none(self, ndim): - dimensions = tuple(random.randint(1, 5) for _ in range(ndim)) + dimensions = tuple(np.random.randint(1, 5) for _ in range(ndim)) np_res = np.indices(dimensions, dtype=None) num_res = num.indices(dimensions, dtype=None) @@ -95,14 +93,14 @@ def test_indices_dtype_none(self, ndim): @pytest.mark.parametrize("ndim", range(0, LEGATE_MAX_DIM)) def test_indices_dtype_float(self, ndim): - dimensions = tuple(random.randint(1, 5) for _ in range(ndim)) + dimensions = tuple(np.random.randint(1, 5) for _ in range(ndim)) np_res = np.indices(dimensions, dtype=float) num_res = num.indices(dimensions, dtype=float) assert np.array_equal(np_res, num_res) @pytest.mark.parametrize("ndim", range(0, LEGATE_MAX_DIM)) def test_indices_sparse(self, ndim): - dimensions = tuple(random.randint(1, 5) for _ in range(ndim)) + dimensions = tuple(np.random.randint(1, 5) for _ in range(ndim)) np_res = np.indices(dimensions, sparse=True) num_res = num.indices(dimensions, sparse=True) for i in range(len(np_res)): diff --git a/tests/integration/test_prod.py b/tests/integration/test_prod.py index ef3b217ce..ee6121328 100644 --- a/tests/integration/test_prod.py +++ b/tests/integration/test_prod.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import random - import numpy as np import pytest from utils.comparisons import allclose @@ -144,7 +142,7 @@ def test_initial_empty_array(self): size = (1, 0) arr_np = np.random.random(size) * 10 arr_num = num.array(arr_np) - initial_value = random.uniform(-20.0, 20.0) + initial_value = np.random.uniform(-20.0, 20.0) out_num = num.prod(arr_num, initial=initial_value) out_np = np.prod(arr_np, initial=initial_value) assert allclose(out_np, out_num) @@ -330,7 +328,7 @@ def test_axis_keepdims_true(self, size): def test_initial(self, size): arr_np = np.random.random(size) * 10 arr_num = num.array(arr_np) - initial_value = random.uniform(-20.0, 20.0) + initial_value = np.random.uniform(-20.0, 20.0) out_num = num.prod(arr_num, initial=initial_value) out_np = np.prod(arr_np, initial=initial_value) diff --git a/tests/integration/test_reduction.py b/tests/integration/test_reduction.py index e823040cb..233e79439 100644 --- a/tests/integration/test_reduction.py +++ b/tests/integration/test_reduction.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import random - import numpy as np import pytest from utils.comparisons import allclose @@ -148,7 +146,7 @@ def test_initial_empty_array(self): size = (1, 0) arr_np = np.random.random(size) * 10 arr_num = num.array(arr_np) - initial_value = random.uniform(-20.0, 20.0) + initial_value = np.random.uniform(-20.0, 20.0) out_num = num.sum(arr_num, initial=initial_value) # return 0.0 out_np = np.sum(arr_np, initial=initial_value) # return initial_value assert allclose(out_np, out_num) @@ -289,7 +287,7 @@ def test_axis_keepdims(self, size, keepdims): def test_initial(self, size): arr_np = np.random.random(size) * 10 arr_num = num.array(arr_np) - initial_value = random.uniform(-20.0, 20.0) + initial_value = np.random.uniform(-20.0, 20.0) out_num = num.sum(arr_num, initial=initial_value) out_np = np.sum(arr_np, initial=initial_value) diff --git a/tests/integration/test_trace.py b/tests/integration/test_trace.py index 9f40739ff..6c6d07049 100644 --- a/tests/integration/test_trace.py +++ b/tests/integration/test_trace.py @@ -13,7 +13,6 @@ # limitations under the License. # -import random from itertools import permutations import numpy as np @@ -72,7 +71,7 @@ def test_4d(): @pytest.mark.parametrize("ndim", range(2, LEGATE_MAX_DIM + 1)) def test_ndim(ndim): - a_shape = tuple(random.randint(1, 9) for i in range(ndim)) + a_shape = tuple(np.random.randint(1, 9) for i in range(ndim)) np_array = mk_seq_array(np, a_shape) num_array = mk_seq_array(num, a_shape) diff --git a/tests/integration/utils/generators.py b/tests/integration/utils/generators.py index 90a4407ca..6227a66ce 100644 --- a/tests/integration/utils/generators.py +++ b/tests/integration/utils/generators.py @@ -13,7 +13,6 @@ # limitations under the License. # -import random from itertools import permutations, product import numpy as np @@ -102,6 +101,6 @@ def generate_item(ndim): Generates item location for ndarray.item and ndarray.itemset """ max_index = pow(4, ndim) - 1 - random_index = random.randint(-1, max_index) - random_tuple = tuple(random.randint(0, 3) for i in range(0, ndim)) + random_index = np.random.randint(-1, max_index) + random_tuple = tuple(np.random.randint(0, 3) for i in range(0, ndim)) return [random_index, max_index, random_tuple] From f89dd2ad1fe7eed43f6bdcdb8b2eb848479701d4 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Wed, 2 Aug 2023 09:53:30 -0700 Subject: [PATCH 12/33] fix build instrutions link (#1014) --- docs/cunumeric/source/developer/building.rst | 2 ++ docs/cunumeric/source/user/installation.rst | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/cunumeric/source/developer/building.rst b/docs/cunumeric/source/developer/building.rst index 5304f6862..674c9f34f 100644 --- a/docs/cunumeric/source/developer/building.rst +++ b/docs/cunumeric/source/developer/building.rst @@ -1,3 +1,5 @@ +.. _building cunumeric from source: + Building from source ==================== diff --git a/docs/cunumeric/source/user/installation.rst b/docs/cunumeric/source/user/installation.rst index cb9eb6287..8c496c84f 100644 --- a/docs/cunumeric/source/user/installation.rst +++ b/docs/cunumeric/source/user/installation.rst @@ -14,6 +14,6 @@ The default package contains GPU support, and is compatible with CUDA >= 11.4 also CPU-only packages available, and will be automatically selected by ``conda`` when installing on a machine without GPUs. -See :ref:`building-from-source` for instructions on building cuNumeric manually. +See :ref:`building cunumeric from source` for instructions on building cuNumeric manually. .. _from conda: https://anaconda.org/legate/cunumeric \ No newline at end of file From e6a33ce01b697be93ffd2e7fd7235d532b06450c Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Wed, 2 Aug 2023 12:52:47 -0700 Subject: [PATCH 13/33] Various test fixes (#1015) * Skip advanced RNG tests on any setup that's missing cuRand * Don't compare arrays created with np.empty They contain uninitialized data, that may be equivalent to NaN when interpreted as a floating-point number, and np.array_equal considers that NaN != NaN. --- tests/integration/test_random_advanced.py | 3 +-- tests/integration/test_random_beta.py | 3 +-- tests/integration/test_random_bitgenerator.py | 3 +-- tests/integration/test_random_gamma.py | 3 +-- tests/integration/test_random_straightforward.py | 3 +-- tests/integration/test_reshape.py | 2 +- 6 files changed, 6 insertions(+), 11 deletions(-) diff --git a/tests/integration/test_random_advanced.py b/tests/integration/test_random_advanced.py index c5ae2bc42..242b01821 100644 --- a/tests/integration/test_random_advanced.py +++ b/tests/integration/test_random_advanced.py @@ -13,7 +13,6 @@ # limitations under the License. # import os -import sys import numpy as np import pytest @@ -22,7 +21,7 @@ import cunumeric as num LEGATE_TEST = os.environ.get("LEGATE_TEST", None) == "1" -if sys.platform == "darwin": +if not num.runtime.has_curand: pytestmark = pytest.mark.skip() BITGENERATOR_ARGS = [] else: diff --git a/tests/integration/test_random_beta.py b/tests/integration/test_random_beta.py index 9d922f792..6dc8fd0dd 100644 --- a/tests/integration/test_random_beta.py +++ b/tests/integration/test_random_beta.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import sys import numpy as np import pytest @@ -20,7 +19,7 @@ import cunumeric as num -if sys.platform == "darwin": +if not num.runtime.has_curand: pytestmark = pytest.mark.skip() BITGENERATOR_ARGS = [] else: diff --git a/tests/integration/test_random_bitgenerator.py b/tests/integration/test_random_bitgenerator.py index 58d82631e..09e1dab4a 100644 --- a/tests/integration/test_random_bitgenerator.py +++ b/tests/integration/test_random_bitgenerator.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import sys import numpy as np import pytest @@ -20,7 +19,7 @@ import cunumeric as num -if sys.platform == "darwin": +if not num.runtime.has_curand: pytestmark = pytest.mark.skip() BITGENERATOR_ARGS = [] else: diff --git a/tests/integration/test_random_gamma.py b/tests/integration/test_random_gamma.py index 69fbc2f42..56582190e 100644 --- a/tests/integration/test_random_gamma.py +++ b/tests/integration/test_random_gamma.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import sys import numpy as np import pytest @@ -20,7 +19,7 @@ import cunumeric as num -if sys.platform == "darwin": +if not num.runtime.has_curand: pytestmark = pytest.mark.skip() BITGENERATOR_ARGS = [] else: diff --git a/tests/integration/test_random_straightforward.py b/tests/integration/test_random_straightforward.py index b412e5341..48213fc16 100644 --- a/tests/integration/test_random_straightforward.py +++ b/tests/integration/test_random_straightforward.py @@ -13,7 +13,6 @@ # limitations under the License. # import math -import sys import numpy as np import pytest @@ -21,7 +20,7 @@ import cunumeric as num -if sys.platform == "darwin": +if not num.runtime.has_curand: pytestmark = pytest.mark.skip() BITGENERATOR_ARGS = [] else: diff --git a/tests/integration/test_reshape.py b/tests/integration/test_reshape.py index ac5a4d6ab..b3c81b35f 100644 --- a/tests/integration/test_reshape.py +++ b/tests/integration/test_reshape.py @@ -185,7 +185,7 @@ def test_reshape_empty_array(shape): def test_reshape_same_shape(): shape = (1, 2, 3) - arr = np.empty(shape) + arr = np.random.rand(*shape) assert np.array_equal(np.reshape(arr, shape), num.reshape(arr, shape)) From b31e999c0a014a41f4fbe40eda23bde8ba188be6 Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Wed, 2 Aug 2023 21:25:21 -0700 Subject: [PATCH 14/33] Add back None as an accepted value for axis on some type sigs (#1017) --- cunumeric/deferred.py | 4 ++-- cunumeric/eager.py | 4 ++-- cunumeric/module.py | 8 ++++---- cunumeric/thunk.py | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cunumeric/deferred.py b/cunumeric/deferred.py index b927c69fb..5cbea74bf 100644 --- a/cunumeric/deferred.py +++ b/cunumeric/deferred.py @@ -3486,7 +3486,7 @@ def sort( self, rhs: Any, argsort: bool = False, - axis: int = -1, + axis: Union[int, None] = -1, kind: SortType = "quicksort", order: Union[None, str, list[str]] = None, ) -> None: @@ -3511,7 +3511,7 @@ def partition( rhs: Any, kth: Union[int, Sequence[int]], argpartition: bool = False, - axis: int = -1, + axis: Union[int, None] = -1, kind: SelectKind = "introselect", order: Union[None, str, list[str]] = None, ) -> None: diff --git a/cunumeric/eager.py b/cunumeric/eager.py index 60eb2094a..bc83d49eb 100644 --- a/cunumeric/eager.py +++ b/cunumeric/eager.py @@ -723,7 +723,7 @@ def sort( self, rhs: Any, argsort: bool = False, - axis: int = -1, + axis: Union[int, None] = -1, kind: SortType = "quicksort", order: Union[None, str, list[str]] = None, ) -> None: @@ -1383,7 +1383,7 @@ def partition( rhs: Any, kth: Union[int, Sequence[int]], argpartition: bool = False, - axis: int = -1, + axis: Union[int, None] = -1, kind: SelectKind = "introselect", order: Union[None, str, list[str]] = None, ) -> None: diff --git a/cunumeric/module.py b/cunumeric/module.py index 668f87bcd..1af26e4ef 100644 --- a/cunumeric/module.py +++ b/cunumeric/module.py @@ -6541,7 +6541,7 @@ def unique( @add_boilerplate("a") def argsort( a: ndarray, - axis: int = -1, + axis: Union[int, None] = -1, kind: SortType = "quicksort", order: Optional[Union[str, list[str]]] = None, ) -> ndarray: @@ -6654,7 +6654,7 @@ def searchsorted( @add_boilerplate("a") def sort( a: ndarray, - axis: int = -1, + axis: Union[int, None] = -1, kind: SortType = "quicksort", order: Optional[Union[str, list[str]]] = None, ) -> ndarray: @@ -6740,7 +6740,7 @@ def sort_complex(a: ndarray) -> ndarray: def argpartition( a: ndarray, kth: Union[int, Sequence[int]], - axis: int = -1, + axis: Union[int, None] = -1, kind: SelectKind = "introselect", order: Optional[Union[str, list[str]]] = None, ) -> ndarray: @@ -6796,7 +6796,7 @@ def argpartition( def partition( a: ndarray, kth: Union[int, Sequence[int]], - axis: int = -1, + axis: Union[int, None] = -1, kind: SelectKind = "introselect", order: Optional[Union[str, list[str]]] = None, ) -> ndarray: diff --git a/cunumeric/thunk.py b/cunumeric/thunk.py index 91aac6eda..62417271a 100644 --- a/cunumeric/thunk.py +++ b/cunumeric/thunk.py @@ -601,7 +601,7 @@ def partition( rhs: Any, kth: Union[int, Sequence[int]], argpartition: bool = False, - axis: int = -1, + axis: Union[int, None] = -1, kind: SelectKind = "introselect", order: Union[None, str, list[str]] = None, ) -> None: @@ -628,7 +628,7 @@ def sort( self, rhs: Any, argsort: bool = False, - axis: int = -1, + axis: Union[int, None] = -1, kind: SortType = "quicksort", order: Union[None, str, list[str]] = None, ) -> None: From daf31c2236b67ca1c01a878570e604c185771dd2 Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Thu, 3 Aug 2023 21:41:39 -0700 Subject: [PATCH 15/33] If a scalar ufunc arg is cn.ndarray use its type directly (#1011) * If a scalar ufunc arg is cn.ndarray use its type directly * Fix pedantic type compatibility tests --- cunumeric/_ufunc/ufunc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cunumeric/_ufunc/ufunc.py b/cunumeric/_ufunc/ufunc.py index 6257eb369..3079f3261 100644 --- a/cunumeric/_ufunc/ufunc.py +++ b/cunumeric/_ufunc/ufunc.py @@ -562,7 +562,8 @@ def _find_common_type( array_types = [] for arr, orig_arg in zip(arrs, orig_args): if arr.ndim == 0: - scalar_types.append(orig_arg) + # Make sure all scalar arguments are NumPy arrays + scalar_types.append(np.asarray(orig_arg)) else: array_types.append(arr.dtype) From c41a47e678ff3d40968b1bf6db9423cd1fb4f12b Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Fri, 4 Aug 2023 09:08:11 -0700 Subject: [PATCH 16/33] More RNG test fixes for MacOS (#1019) --- tests/integration/test_random_creation.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_random_creation.py b/tests/integration/test_random_creation.py index 15bf70ec1..60653e585 100644 --- a/tests/integration/test_random_creation.py +++ b/tests/integration/test_random_creation.py @@ -72,7 +72,16 @@ def gen_random_from_both( # cuNumeric: keeps generating different arrays. # seed is respected in Eager mode. ), - None, + pytest.param( + None, + marks=pytest.mark.xfail( + not num.runtime.has_curand, + reason="legacy RNG fallback treats seed(None) as seed(0)", + ), + # https://github.com/nv-legate/cunumeric/issues/1018 + # NumPy: seed(None) is equivalent to seed(()) + # cuNumeric non-cuRAND fallback: seed(None) equivalent to seed(0) + ), pytest.param( (4, 6, 8), marks=pytest.mark.xfail( @@ -125,6 +134,10 @@ def test_default_rng_seed(seed): EAGER_TEST, reason="cuNumeric does not respect seed in Eager mode", ) +@pytest.mark.xfail( + not num.runtime.has_curand, + reason="XORWOW not available without cuRAND", +) def test_default_rng_bitgenerator(): seed = 12345 rng_np_1 = np.random.default_rng(np.random.PCG64(seed)) From 67cd9f9dcaf15ac6e868539a9c47c23fa7829cde Mon Sep 17 00:00:00 2001 From: Marcin Zalewski Date: Wed, 9 Aug 2023 02:22:38 -0700 Subject: [PATCH 17/33] Update to the latest Legion (#1007) Update legate.core SHA --------- Co-authored-by: Manolis Papadakis Co-authored-by: Manolis Papadakis --- cmake/versions.json | 2 +- examples/black_scholes.py | 1 - examples/cg.py | 1 - examples/gemm.py | 1 - examples/ingest.py | 4 ++-- examples/kmeans.py | 1 - examples/kmeans_slow.py | 1 - examples/kmeans_sort.py | 1 - examples/linreg.py | 1 - examples/logreg.py | 1 - 10 files changed, 3 insertions(+), 11 deletions(-) diff --git a/cmake/versions.json b/cmake/versions.json index b5842f7c7..9ac4a7478 100644 --- a/cmake/versions.json +++ b/cmake/versions.json @@ -5,7 +5,7 @@ "git_url" : "https://github.com/nv-legate/legate.core.git", "git_shallow": false, "always_download": false, - "git_tag" : "35d0d4bd3c9d19946ccc641ec7074b44bbae8046" + "git_tag" : "2fb6353407e0498f1cb2f530751b95b8cbc4ca99" } } } diff --git a/examples/black_scholes.py b/examples/black_scholes.py index 55374ea09..b92dd3fe5 100644 --- a/examples/black_scholes.py +++ b/examples/black_scholes.py @@ -90,7 +90,6 @@ def run_black_scholes(N, D): help="number of options to price in thousands", ) parser.add_argument( - "-p", "--precision", type=int, default=32, diff --git a/examples/cg.py b/examples/cg.py index a0399778e..0ac350fc7 100644 --- a/examples/cg.py +++ b/examples/cg.py @@ -236,7 +236,6 @@ def run_preconditioned_cg( help="iterations between convergence tests", ) parser.add_argument( - "-p", "--pre", dest="precondition", action="store_true", diff --git a/examples/gemm.py b/examples/gemm.py index c70a666c1..79af8e476 100644 --- a/examples/gemm.py +++ b/examples/gemm.py @@ -91,7 +91,6 @@ def run_gemm(N, I, warmup, ft): # noqa: E741 help="number of elements in one dimension", ) parser.add_argument( - "-p", "--precision", type=int, default=32, diff --git a/examples/ingest.py b/examples/ingest.py index 90dc9a4e0..12477983f 100644 --- a/examples/ingest.py +++ b/examples/ingest.py @@ -35,8 +35,8 @@ nargs="+", default=[3, 2, 2, 1], ) -parser.add_argument("-p", "--custom-partitioning", action="store_true") -parser.add_argument("-s", "--custom-sharding", action="store_true") +parser.add_argument("--custom-partitioning", action="store_true") +parser.add_argument("--custom-sharding", action="store_true") args = parser.parse_args() dtype = uint16 tile_shape = (1, 301, 704, 360) diff --git a/examples/kmeans.py b/examples/kmeans.py index a12723d94..b5352d197 100644 --- a/examples/kmeans.py +++ b/examples/kmeans.py @@ -164,7 +164,6 @@ def run_kmeans(C, D, T, I, N, S, benchmarking): # noqa: E741 help="number of elements in the data set in thousands", ) parser.add_argument( - "-p", "--precision", type=int, default=32, diff --git a/examples/kmeans_slow.py b/examples/kmeans_slow.py index a4d4c7009..753f6dca3 100644 --- a/examples/kmeans_slow.py +++ b/examples/kmeans_slow.py @@ -165,7 +165,6 @@ def run_kmeans(C, D, T, I, N, S, benchmarking): # noqa: E741 help="number of elements in the data set in thousands", ) parser.add_argument( - "-p", "--precision", type=int, default=32, diff --git a/examples/kmeans_sort.py b/examples/kmeans_sort.py index ae84ca6da..a2f377e3a 100644 --- a/examples/kmeans_sort.py +++ b/examples/kmeans_sort.py @@ -162,7 +162,6 @@ def run_kmeans(C, D, T, I, N, S, benchmarking): # noqa: E741 help="number of elements in the data set in thousands", ) parser.add_argument( - "-p", "--precision", type=int, default=32, diff --git a/examples/linreg.py b/examples/linreg.py index 7ec3d11ba..7d77d11ea 100644 --- a/examples/linreg.py +++ b/examples/linreg.py @@ -104,7 +104,6 @@ def run_linear_regression(N, F, T, I, warmup, S, B): # noqa: E741 help="number of elements in the data set in thousands", ) parser.add_argument( - "-p", "--precision", type=int, default=32, diff --git a/examples/logreg.py b/examples/logreg.py index 78d243ae9..399081cee 100644 --- a/examples/logreg.py +++ b/examples/logreg.py @@ -121,7 +121,6 @@ def run_logistic_regression(N, F, T, I, warmup, S, B): # noqa: E741 help="number of elements in the data set in thousands", ) parser.add_argument( - "-p", "--precision", type=int, default=32, From e8abe211ec21d9d49b856b0fe3f0f41b1eb04c55 Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Wed, 9 Aug 2023 12:21:10 -0700 Subject: [PATCH 18/33] Don't seed np.random in the tests, since the core will do it (#1020) --- cmake/versions.json | 2 +- scripts/hooks/enforce_boilerplate.py | 1 - tests/integration/test_0d_store.py | 2 -- tests/integration/test_advanced_indexing.py | 1 - tests/integration/test_allclose.py | 1 - tests/integration/test_amax_amin.py | 1 - tests/integration/test_append.py | 1 - tests/integration/test_arg_reduce.py | 1 - tests/integration/test_argsort.py | 1 - tests/integration/test_array.py | 1 - tests/integration/test_array_creation.py | 1 - tests/integration/test_array_dunders.py | 1 - tests/integration/test_array_equal.py | 1 - tests/integration/test_array_fallback.py | 2 -- tests/integration/test_array_split.py | 1 - tests/integration/test_astype.py | 1 - tests/integration/test_atleast_nd.py | 1 - tests/integration/test_binary_op_broadcast.py | 1 - tests/integration/test_binary_op_complex.py | 1 - tests/integration/test_binary_op_typing.py | 1 - tests/integration/test_binary_ufunc.py | 1 - tests/integration/test_bincount.py | 1 - tests/integration/test_bits.py | 1 - tests/integration/test_block.py | 1 - tests/integration/test_broadcast.py | 1 - tests/integration/test_cholesky.py | 1 - tests/integration/test_clip.py | 1 - tests/integration/test_complex_ops.py | 1 - tests/integration/test_compress.py | 1 - tests/integration/test_concatenate_stack.py | 1 - tests/integration/test_contains.py | 2 -- tests/integration/test_convolve.py | 1 - tests/integration/test_copy.py | 1 - tests/integration/test_data_interface.py | 2 -- tests/integration/test_diag_indices.py | 1 - tests/integration/test_dot.py | 1 - tests/integration/test_einsum.py | 1 - tests/integration/test_einsum_path.py | 1 - tests/integration/test_exp.py | 1 - tests/integration/test_extract.py | 1 - tests/integration/test_eye.py | 1 - tests/integration/test_fallback.py | 1 - tests/integration/test_fft_c2c.py | 1 - tests/integration/test_fft_c2r.py | 1 - tests/integration/test_fft_hermitian.py | 1 - tests/integration/test_fft_r2c.py | 1 - tests/integration/test_fill.py | 1 - tests/integration/test_fill_diagonal.py | 1 - tests/integration/test_flags.py | 1 - tests/integration/test_flatten.py | 1 - tests/integration/test_flip.py | 1 - tests/integration/test_floating.py | 1 - tests/integration/test_get_item.py | 1 - tests/integration/test_histogram.py | 1 - tests/integration/test_identity.py | 1 - tests/integration/test_index_routines.py | 1 - tests/integration/test_indices.py | 1 - tests/integration/test_ingest.py | 1 - tests/integration/test_inlinemap-keeps-region-alive.py | 2 -- tests/integration/test_inner.py | 2 -- tests/integration/test_input_output.py | 1 - tests/integration/test_intra_array_copy.py | 1 - tests/integration/test_item.py | 1 - tests/integration/test_itemset.py | 1 - tests/integration/test_jacobi.py | 1 - tests/integration/test_length.py | 2 -- tests/integration/test_linspace.py | 1 - tests/integration/test_logic.py | 1 - tests/integration/test_logical.py | 1 - tests/integration/test_lstm_backward_test.py | 1 - tests/integration/test_lstm_simple_forward.py | 2 -- tests/integration/test_map_reduce.py | 1 - tests/integration/test_mask.py | 1 - tests/integration/test_mask_indices.py | 1 - tests/integration/test_matmul.py | 1 - tests/integration/test_matrix_power.py | 1 - tests/integration/test_mean.py | 1 - tests/integration/test_min_on_gpu.py | 2 -- tests/integration/test_moveaxis.py | 1 - tests/integration/test_msort.py | 1 - tests/integration/test_multi_dot.py | 1 - tests/integration/test_nan_reduction.py | 1 - tests/integration/test_nanarg_reduction.py | 1 - tests/integration/test_ndim.py | 1 - tests/integration/test_nonzero.py | 1 - tests/integration/test_norm.py | 1 - tests/integration/test_ones.py | 1 - tests/integration/test_outer.py | 1 - tests/integration/test_overwrite_slice.py | 1 - tests/integration/test_partition.py | 1 - tests/integration/test_prod.py | 1 - tests/integration/test_put.py | 1 - tests/integration/test_put_along_axis.py | 1 - tests/integration/test_putmask.py | 1 - tests/integration/test_randint.py | 2 -- tests/integration/test_random_advanced.py | 1 - tests/integration/test_random_beta.py | 1 - tests/integration/test_random_bitgenerator.py | 1 - tests/integration/test_random_creation.py | 3 --- tests/integration/test_random_gamma.py | 1 - tests/integration/test_random_straightforward.py | 1 - tests/integration/test_reduction.py | 1 - tests/integration/test_repeat.py | 1 - tests/integration/test_reshape.py | 1 - tests/integration/test_scan.py | 1 - tests/integration/test_searchsorted.py | 1 - tests/integration/test_set_item.py | 1 - tests/integration/test_setflags.py | 1 - tests/integration/test_shape.py | 1 - tests/integration/test_singleton_access.py | 1 - tests/integration/test_slicing.py | 1 - tests/integration/test_solve.py | 1 - tests/integration/test_sort.py | 1 - tests/integration/test_sort_complex.py | 1 - tests/integration/test_split.py | 1 - tests/integration/test_squeeze.py | 1 - tests/integration/test_swapaxes.py | 1 - tests/integration/test_take.py | 1 - tests/integration/test_take_along_axis.py | 1 - tests/integration/test_tensordot.py | 2 -- tests/integration/test_tile.py | 1 - tests/integration/test_trace.py | 1 - tests/integration/test_transpose.py | 1 - tests/integration/test_tri.py | 1 - tests/integration/test_trilu.py | 1 - tests/integration/test_trilu_indices.py | 1 - tests/integration/test_unary_functions_2d_complex.py | 1 - tests/integration/test_unary_ufunc.py | 3 --- tests/integration/test_unique.py | 1 - tests/integration/test_update.py | 2 -- tests/integration/test_vdot.py | 1 - tests/integration/test_view.py | 1 - tests/integration/test_where.py | 1 - tests/integration/test_window.py | 1 - tests/unit/cunumeric/_sphinxext/test__comparison_util.py | 1 - tests/unit/cunumeric/random/test_bitgenerator.py | 2 -- tests/unit/cunumeric/test_config.py | 2 -- tests/unit/cunumeric/test_coverage.py | 2 -- tests/unit/cunumeric/test_patch.py | 2 -- tests/unit/cunumeric/test_settings.py | 2 -- tests/unit/cunumeric/test_utils.py | 1 - 141 files changed, 1 insertion(+), 162 deletions(-) diff --git a/cmake/versions.json b/cmake/versions.json index 9ac4a7478..98dbd8c3a 100644 --- a/cmake/versions.json +++ b/cmake/versions.json @@ -5,7 +5,7 @@ "git_url" : "https://github.com/nv-legate/legate.core.git", "git_shallow": false, "always_download": false, - "git_tag" : "2fb6353407e0498f1cb2f530751b95b8cbc4ca99" + "git_tag" : "a405f595603238c8557cb5fefd3981d190a2fb1d" } } } diff --git a/scripts/hooks/enforce_boilerplate.py b/scripts/hooks/enforce_boilerplate.py index ad6163a0f..a1f70eab8 100644 --- a/scripts/hooks/enforce_boilerplate.py +++ b/scripts/hooks/enforce_boilerplate.py @@ -17,7 +17,6 @@ from typing import NoReturn BOILERPLATE = [ - "np.random.seed(12345)", "sys.exit(pytest.main(sys.argv))", ] diff --git a/tests/integration/test_0d_store.py b/tests/integration/test_0d_store.py index c661a7a57..1701983f4 100644 --- a/tests/integration/test_0d_store.py +++ b/tests/integration/test_0d_store.py @@ -15,7 +15,6 @@ from itertools import product -import numpy as np import pytest import cunumeric as num @@ -36,5 +35,4 @@ def test_0d_region_backed_stores(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_advanced_indexing.py b/tests/integration/test_advanced_indexing.py index fa82b4271..d7da99381 100644 --- a/tests/integration/test_advanced_indexing.py +++ b/tests/integration/test_advanced_indexing.py @@ -969,5 +969,4 @@ def test(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_allclose.py b/tests/integration/test_allclose.py index ce821a1dd..b6b1d8d6d 100755 --- a/tests/integration/test_allclose.py +++ b/tests/integration/test_allclose.py @@ -296,5 +296,4 @@ def test_scalar_rtol_atol_false(a, b): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_amax_amin.py b/tests/integration/test_amax_amin.py index 9ca074e69..f21217e43 100755 --- a/tests/integration/test_amax_amin.py +++ b/tests/integration/test_amax_amin.py @@ -290,5 +290,4 @@ def test_out_invalid_shape(self, func_name, axis_out_shape): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_append.py b/tests/integration/test_append.py index ebb737871..353027546 100644 --- a/tests/integration/test_append.py +++ b/tests/integration/test_append.py @@ -88,5 +88,4 @@ def test_bad_shape(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_arg_reduce.py b/tests/integration/test_arg_reduce.py index f061fddef..4fa422726 100644 --- a/tests/integration/test_arg_reduce.py +++ b/tests/integration/test_arg_reduce.py @@ -205,5 +205,4 @@ def test_argmax_and_argmin_out(self, func_name, ndim, keepdims): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_argsort.py b/tests/integration/test_argsort.py index 6f2f4d988..bdc816c04 100644 --- a/tests/integration/test_argsort.py +++ b/tests/integration/test_argsort.py @@ -226,5 +226,4 @@ def test_basic_complex_axis_sort(self, size, sort_type): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_array.py b/tests/integration/test_array.py index 689c1e79a..7adb8b093 100755 --- a/tests/integration/test_array.py +++ b/tests/integration/test_array.py @@ -192,5 +192,4 @@ def test_invalid_dtype(self, obj, dtype): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_array_creation.py b/tests/integration/test_array_creation.py index 802eb4861..7e9745cfa 100644 --- a/tests/integration/test_array_creation.py +++ b/tests/integration/test_array_creation.py @@ -276,5 +276,4 @@ def test_zero_with_0d_ndarray_shape(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_array_dunders.py b/tests/integration/test_array_dunders.py index 7d2156b85..1c52d5ebf 100644 --- a/tests/integration/test_array_dunders.py +++ b/tests/integration/test_array_dunders.py @@ -125,5 +125,4 @@ def test_array_ufunc_defer(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_array_equal.py b/tests/integration/test_array_equal.py index 741167b1e..ef013d0d7 100755 --- a/tests/integration/test_array_equal.py +++ b/tests/integration/test_array_equal.py @@ -112,5 +112,4 @@ def test_equal_nan_complex_values(equal_nan): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_array_fallback.py b/tests/integration/test_array_fallback.py index 6b5c0ddc5..d4fe47aa0 100644 --- a/tests/integration/test_array_fallback.py +++ b/tests/integration/test_array_fallback.py @@ -13,7 +13,6 @@ # limitations under the License. # -import numpy as np import pytest import cunumeric as num @@ -36,5 +35,4 @@ def test_unimplemented_method_self_fallback(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_array_split.py b/tests/integration/test_array_split.py index e34bd26e8..0a9e660eb 100644 --- a/tests/integration/test_array_split.py +++ b/tests/integration/test_array_split.py @@ -144,5 +144,4 @@ def test_array_split(size): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_astype.py b/tests/integration/test_astype.py index 5ceded56f..5a54a7789 100644 --- a/tests/integration/test_astype.py +++ b/tests/integration/test_astype.py @@ -135,5 +135,4 @@ def test_complex_negative(src_dtype): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_atleast_nd.py b/tests/integration/test_atleast_nd.py index 800938a07..3946cb92f 100644 --- a/tests/integration/test_atleast_nd.py +++ b/tests/integration/test_atleast_nd.py @@ -95,5 +95,4 @@ def test_atleast_nd(dim): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_binary_op_broadcast.py b/tests/integration/test_binary_op_broadcast.py index 5ad9c165e..25d1a0757 100644 --- a/tests/integration/test_binary_op_broadcast.py +++ b/tests/integration/test_binary_op_broadcast.py @@ -70,5 +70,4 @@ def test_random(shape, ndim): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_binary_op_complex.py b/tests/integration/test_binary_op_complex.py index 8bac0394e..1eaebc951 100644 --- a/tests/integration/test_binary_op_complex.py +++ b/tests/integration/test_binary_op_complex.py @@ -84,5 +84,4 @@ def test_pow(x, y): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_binary_op_typing.py b/tests/integration/test_binary_op_typing.py index 1987aa525..5cb7ee8f5 100644 --- a/tests/integration/test_binary_op_typing.py +++ b/tests/integration/test_binary_op_typing.py @@ -147,5 +147,4 @@ def test_array_scalar(lhs_np, rhs_np, lhs_num, rhs_num): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_binary_ufunc.py b/tests/integration/test_binary_ufunc.py index 7bc327a25..4d2a9b7db 100644 --- a/tests/integration/test_binary_ufunc.py +++ b/tests/integration/test_binary_ufunc.py @@ -235,5 +235,4 @@ def parse_inputs(in_str, dtype_str): in_np = parse_inputs(args.inputs, args.dtypes) check_ops([args.op], in_np) else: - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_bincount.py b/tests/integration/test_bincount.py index c5ad0b3d4..c57f89584 100644 --- a/tests/integration/test_bincount.py +++ b/tests/integration/test_bincount.py @@ -181,5 +181,4 @@ def test_bincount_size_one(weights): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_bits.py b/tests/integration/test_bits.py index 83fcb69b1..7f5e67cea 100644 --- a/tests/integration/test_bits.py +++ b/tests/integration/test_bits.py @@ -214,5 +214,4 @@ def test_pack_unpack(ndim, bitorder, dtype): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_block.py b/tests/integration/test_block.py index 99f6c3b22..cae10ea7c 100644 --- a/tests/integration/test_block.py +++ b/tests/integration/test_block.py @@ -267,5 +267,4 @@ def test_different_ndims_depths(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_broadcast.py b/tests/integration/test_broadcast.py index 6ce36785e..433dc6bb0 100644 --- a/tests/integration/test_broadcast.py +++ b/tests/integration/test_broadcast.py @@ -190,5 +190,4 @@ def test_broadcast_to_mainpulation(dim): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_cholesky.py b/tests/integration/test_cholesky.py index 9716439c8..6ed8e35f8 100644 --- a/tests/integration/test_cholesky.py +++ b/tests/integration/test_cholesky.py @@ -74,5 +74,4 @@ def test_complex(n): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_clip.py b/tests/integration/test_clip.py index b9720b7db..f8431fda2 100644 --- a/tests/integration/test_clip.py +++ b/tests/integration/test_clip.py @@ -201,5 +201,4 @@ def test_out_with_array_amin(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_complex_ops.py b/tests/integration/test_complex_ops.py index 1653704dc..626b9c781 100644 --- a/tests/integration/test_complex_ops.py +++ b/tests/integration/test_complex_ops.py @@ -95,5 +95,4 @@ def test_assignment(real_val, imag_val): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_compress.py b/tests/integration/test_compress.py index 5bdd89fd5..7247685e6 100644 --- a/tests/integration/test_compress.py +++ b/tests/integration/test_compress.py @@ -175,5 +175,4 @@ def test_ndim_out(ndim): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_concatenate_stack.py b/tests/integration/test_concatenate_stack.py index 52aba1672..754766fff 100644 --- a/tests/integration/test_concatenate_stack.py +++ b/tests/integration/test_concatenate_stack.py @@ -567,5 +567,4 @@ def test_arrays_mismatched_shape(self, arrays): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_contains.py b/tests/integration/test_contains.py index 7d0ccb22b..23811ba74 100644 --- a/tests/integration/test_contains.py +++ b/tests/integration/test_contains.py @@ -15,7 +15,6 @@ from functools import reduce -import numpy as np import pytest from utils.generators import mk_seq_array @@ -61,5 +60,4 @@ def test_complex(size): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_convolve.py b/tests/integration/test_convolve.py index c168bb8dc..7a318668d 100644 --- a/tests/integration/test_convolve.py +++ b/tests/integration/test_convolve.py @@ -140,5 +140,4 @@ def test_ndim(ndim): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_copy.py b/tests/integration/test_copy.py index d4cb667dc..17b2b0aa6 100644 --- a/tests/integration/test_copy.py +++ b/tests/integration/test_copy.py @@ -35,5 +35,4 @@ def test_basic(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_data_interface.py b/tests/integration/test_data_interface.py index c29397b5f..a3329a1b6 100644 --- a/tests/integration/test_data_interface.py +++ b/tests/integration/test_data_interface.py @@ -13,7 +13,6 @@ # limitations under the License. # -import numpy as np import pytest import cunumeric as num @@ -43,5 +42,4 @@ def test_roundtrip(dtype): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_diag_indices.py b/tests/integration/test_diag_indices.py index be2e4906e..567011417 100644 --- a/tests/integration/test_diag_indices.py +++ b/tests/integration/test_diag_indices.py @@ -117,5 +117,4 @@ def test_unequal_length(self, size): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_dot.py b/tests/integration/test_dot.py index 5ae449ae3..40769c354 100644 --- a/tests/integration/test_dot.py +++ b/tests/integration/test_dot.py @@ -75,5 +75,4 @@ def test_out_invalid_dtype(self, dtype): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_einsum.py b/tests/integration/test_einsum.py index f116a86bb..96492b723 100644 --- a/tests/integration/test_einsum.py +++ b/tests/integration/test_einsum.py @@ -312,5 +312,4 @@ def test_order(order): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_einsum_path.py b/tests/integration/test_einsum_path.py index 9e3eff8b1..6efda00b1 100644 --- a/tests/integration/test_einsum_path.py +++ b/tests/integration/test_einsum_path.py @@ -79,5 +79,4 @@ def test_einsum_path_optimize_none(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_exp.py b/tests/integration/test_exp.py index afe54497d..7eff590ae 100644 --- a/tests/integration/test_exp.py +++ b/tests/integration/test_exp.py @@ -129,5 +129,4 @@ def test_casting(casting): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_extract.py b/tests/integration/test_extract.py index 689d4601a..007d93fe0 100644 --- a/tests/integration/test_extract.py +++ b/tests/integration/test_extract.py @@ -294,5 +294,4 @@ def test_vals_empty(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_eye.py b/tests/integration/test_eye.py index d8cec9219..3b2b8acfd 100644 --- a/tests/integration/test_eye.py +++ b/tests/integration/test_eye.py @@ -82,5 +82,4 @@ def testBadK(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_fallback.py b/tests/integration/test_fallback.py index cfda2ded9..4e312d0bb 100644 --- a/tests/integration/test_fallback.py +++ b/tests/integration/test_fallback.py @@ -36,5 +36,4 @@ def test_ufunc(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_fft_c2c.py b/tests/integration/test_fft_c2c.py index 38ae7a1cb..35d192104 100644 --- a/tests/integration/test_fft_c2c.py +++ b/tests/integration/test_fft_c2c.py @@ -268,5 +268,4 @@ def test_fftn_dtype(dtype, func): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_fft_c2r.py b/tests/integration/test_fft_c2r.py index 4b2b4df7d..30e5c9b34 100644 --- a/tests/integration/test_fft_c2r.py +++ b/tests/integration/test_fft_c2r.py @@ -214,5 +214,4 @@ def test_4d(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_fft_hermitian.py b/tests/integration/test_fft_hermitian.py index 7227a855b..997821895 100644 --- a/tests/integration/test_fft_hermitian.py +++ b/tests/integration/test_fft_hermitian.py @@ -64,5 +64,4 @@ def test_1d_inverse(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_fft_r2c.py b/tests/integration/test_fft_r2c.py index bbe6d128e..e7b829fef 100644 --- a/tests/integration/test_fft_r2c.py +++ b/tests/integration/test_fft_r2c.py @@ -216,5 +216,4 @@ def test_4d(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_fill.py b/tests/integration/test_fill.py index c325f6963..d77ae8e07 100644 --- a/tests/integration/test_fill.py +++ b/tests/integration/test_fill.py @@ -152,5 +152,4 @@ def test_fill_string_to_float() -> None: if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_fill_diagonal.py b/tests/integration/test_fill_diagonal.py index 87e71a4f6..fc18783d3 100644 --- a/tests/integration/test_fill_diagonal.py +++ b/tests/integration/test_fill_diagonal.py @@ -128,5 +128,4 @@ def test_val_none(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_flags.py b/tests/integration/test_flags.py index 0bb8ce12c..f7e8f16ac 100644 --- a/tests/integration/test_flags.py +++ b/tests/integration/test_flags.py @@ -140,5 +140,4 @@ def test_views_inherit_writeable(self, params): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_flatten.py b/tests/integration/test_flatten.py index 46719a422..f83ad2704 100644 --- a/tests/integration/test_flatten.py +++ b/tests/integration/test_flatten.py @@ -85,5 +85,4 @@ def test_bad_string_order(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_flip.py b/tests/integration/test_flip.py index d1cdbcee1..587b8c3b5 100644 --- a/tests/integration/test_flip.py +++ b/tests/integration/test_flip.py @@ -104,5 +104,4 @@ def test_axis_2d(self, axis): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_floating.py b/tests/integration/test_floating.py index 124db6f2c..36304949e 100644 --- a/tests/integration/test_floating.py +++ b/tests/integration/test_floating.py @@ -142,5 +142,4 @@ def test_typing_unary(fun, dtype, shape): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_get_item.py b/tests/integration/test_get_item.py index f543b62ed..724fe850f 100644 --- a/tests/integration/test_get_item.py +++ b/tests/integration/test_get_item.py @@ -69,5 +69,4 @@ def test_empty_slice(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_histogram.py b/tests/integration/test_histogram.py index fefd3757a..92824f0da 100644 --- a/tests/integration/test_histogram.py +++ b/tests/integration/test_histogram.py @@ -207,5 +207,4 @@ def test_histogram_singleton_empty(src, bins): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_identity.py b/tests/integration/test_identity.py index 25dd7172a..716eae725 100644 --- a/tests/integration/test_identity.py +++ b/tests/integration/test_identity.py @@ -72,5 +72,4 @@ def test_dtype(dtype): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_index_routines.py b/tests/integration/test_index_routines.py index a89534ebe..c86f62e97 100644 --- a/tests/integration/test_index_routines.py +++ b/tests/integration/test_index_routines.py @@ -557,5 +557,4 @@ def test_k_none(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_indices.py b/tests/integration/test_indices.py index 2de03c01b..55a369975 100644 --- a/tests/integration/test_indices.py +++ b/tests/integration/test_indices.py @@ -110,5 +110,4 @@ def test_indices_sparse(self, ndim): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_ingest.py b/tests/integration/test_ingest.py index 272f2080d..8ca6bfbcd 100644 --- a/tests/integration/test_ingest.py +++ b/tests/integration/test_ingest.py @@ -98,5 +98,4 @@ def test(custom_partitioning, custom_sharding): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_inlinemap-keeps-region-alive.py b/tests/integration/test_inlinemap-keeps-region-alive.py index 347391d05..15de386aa 100644 --- a/tests/integration/test_inlinemap-keeps-region-alive.py +++ b/tests/integration/test_inlinemap-keeps-region-alive.py @@ -15,7 +15,6 @@ import gc -import numpy as np import pytest import cunumeric as num @@ -30,5 +29,4 @@ def test_all(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_inner.py b/tests/integration/test_inner.py index 14f26408f..24904a07c 100644 --- a/tests/integration/test_inner.py +++ b/tests/integration/test_inner.py @@ -13,7 +13,6 @@ # limitations under the License. # -import numpy as np import pytest from legate.core import LEGATE_MAX_DIM from utils.contractions import check_default @@ -63,5 +62,4 @@ def test_out_invalid_shape(self, shape): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_input_output.py b/tests/integration/test_input_output.py index 543b0c5ce..9f25de50c 100644 --- a/tests/integration/test_input_output.py +++ b/tests/integration/test_input_output.py @@ -81,5 +81,4 @@ def test_ndarray_tofile(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_intra_array_copy.py b/tests/integration/test_intra_array_copy.py index 9a2bb25f6..c783e2bb7 100644 --- a/tests/integration/test_intra_array_copy.py +++ b/tests/integration/test_intra_array_copy.py @@ -146,5 +146,4 @@ def test_overlap(ndim): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_item.py b/tests/integration/test_item.py index 433053517..09d506214 100644 --- a/tests/integration/test_item.py +++ b/tests/integration/test_item.py @@ -91,5 +91,4 @@ def test_ndim(ndim): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_itemset.py b/tests/integration/test_itemset.py index 32c45e109..0329be4d3 100644 --- a/tests/integration/test_itemset.py +++ b/tests/integration/test_itemset.py @@ -107,5 +107,4 @@ def test_ndim(ndim): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_jacobi.py b/tests/integration/test_jacobi.py index 72823a4b6..bdca2e3cc 100644 --- a/tests/integration/test_jacobi.py +++ b/tests/integration/test_jacobi.py @@ -60,5 +60,4 @@ def test_basic(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_length.py b/tests/integration/test_length.py index a5a11ed18..cb79b642a 100644 --- a/tests/integration/test_length.py +++ b/tests/integration/test_length.py @@ -13,7 +13,6 @@ # limitations under the License. # -import numpy as np import pytest import cunumeric as num @@ -48,5 +47,4 @@ def test_method(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_linspace.py b/tests/integration/test_linspace.py index 8930f782f..89b0ee727 100644 --- a/tests/integration/test_linspace.py +++ b/tests/integration/test_linspace.py @@ -327,5 +327,4 @@ def test_stop_none(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_logic.py b/tests/integration/test_logic.py index aabc99946..4d15524b1 100644 --- a/tests/integration/test_logic.py +++ b/tests/integration/test_logic.py @@ -275,5 +275,4 @@ def test_arrays_invalid_shape(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_logical.py b/tests/integration/test_logical.py index cb835ba75..ca9b99220 100644 --- a/tests/integration/test_logical.py +++ b/tests/integration/test_logical.py @@ -173,5 +173,4 @@ def test_out_invalid_shape(self, func, axis, out_shape): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_lstm_backward_test.py b/tests/integration/test_lstm_backward_test.py index cb95b1da7..ae9428f22 100644 --- a/tests/integration/test_lstm_backward_test.py +++ b/tests/integration/test_lstm_backward_test.py @@ -139,5 +139,4 @@ def test_basic(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_lstm_simple_forward.py b/tests/integration/test_lstm_simple_forward.py index 4d741a41b..2e2936ad8 100644 --- a/tests/integration/test_lstm_simple_forward.py +++ b/tests/integration/test_lstm_simple_forward.py @@ -13,7 +13,6 @@ # limitations under the License. # -import numpy as np import pytest import cunumeric as num @@ -68,5 +67,4 @@ def test_basic(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_map_reduce.py b/tests/integration/test_map_reduce.py index d427970e6..70379460f 100644 --- a/tests/integration/test_map_reduce.py +++ b/tests/integration/test_map_reduce.py @@ -34,5 +34,4 @@ def test_basic(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_mask.py b/tests/integration/test_mask.py index e31e12409..afe4987e3 100644 --- a/tests/integration/test_mask.py +++ b/tests/integration/test_mask.py @@ -50,5 +50,4 @@ def test_inverted_lhs(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_mask_indices.py b/tests/integration/test_mask_indices.py index 02c4b1fca..2bcc88212 100644 --- a/tests/integration/test_mask_indices.py +++ b/tests/integration/test_mask_indices.py @@ -114,5 +114,4 @@ def test_mask_func_none(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_matmul.py b/tests/integration/test_matmul.py index bedc62bcc..7a0759e0d 100644 --- a/tests/integration/test_matmul.py +++ b/tests/integration/test_matmul.py @@ -208,5 +208,4 @@ def test_invalid_casting(self, dtype): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_matrix_power.py b/tests/integration/test_matrix_power.py index a0b972551..de4838798 100644 --- a/tests/integration/test_matrix_power.py +++ b/tests/integration/test_matrix_power.py @@ -107,5 +107,4 @@ def test_n_negative_int(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_mean.py b/tests/integration/test_mean.py index 4b82929a4..0f9064280 100755 --- a/tests/integration/test_mean.py +++ b/tests/integration/test_mean.py @@ -158,5 +158,4 @@ def test_out_invalid_shape(self, axis_out_shape): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_min_on_gpu.py b/tests/integration/test_min_on_gpu.py index 494cdd212..2a5345c4f 100644 --- a/tests/integration/test_min_on_gpu.py +++ b/tests/integration/test_min_on_gpu.py @@ -13,7 +13,6 @@ # limitations under the License. # -import numpy as np import pytest import cunumeric as num @@ -27,5 +26,4 @@ def test_min(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_moveaxis.py b/tests/integration/test_moveaxis.py index 76ce63483..d52a98f3d 100644 --- a/tests/integration/test_moveaxis.py +++ b/tests/integration/test_moveaxis.py @@ -124,5 +124,4 @@ def test_axis_none(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_msort.py b/tests/integration/test_msort.py index db02f0aeb..fbb1cdac0 100644 --- a/tests/integration/test_msort.py +++ b/tests/integration/test_msort.py @@ -83,5 +83,4 @@ def test_basic_complex(self, size): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_multi_dot.py b/tests/integration/test_multi_dot.py index 29a8f4f44..1c4ca3d05 100644 --- a/tests/integration/test_multi_dot.py +++ b/tests/integration/test_multi_dot.py @@ -146,5 +146,4 @@ def test_out_invalid_dtype(self, dtype): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_nan_reduction.py b/tests/integration/test_nan_reduction.py index 69146ef9f..34bbd1447 100644 --- a/tests/integration/test_nan_reduction.py +++ b/tests/integration/test_nan_reduction.py @@ -371,5 +371,4 @@ def test_disallowed_dtype_nanprod(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_nanarg_reduction.py b/tests/integration/test_nanarg_reduction.py index a04fc5798..9956244a2 100644 --- a/tests/integration/test_nanarg_reduction.py +++ b/tests/integration/test_nanarg_reduction.py @@ -278,5 +278,4 @@ def test_out_dtype_mismatch(self, func_name): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_ndim.py b/tests/integration/test_ndim.py index dd0d94dd7..c9bba7f07 100644 --- a/tests/integration/test_ndim.py +++ b/tests/integration/test_ndim.py @@ -52,5 +52,4 @@ def test_python_values_diff_dim(input): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_nonzero.py b/tests/integration/test_nonzero.py index a4f6fd781..8d525446c 100644 --- a/tests/integration/test_nonzero.py +++ b/tests/integration/test_nonzero.py @@ -188,5 +188,4 @@ def test_deprecated_0d(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_norm.py b/tests/integration/test_norm.py index d971a3499..f38ed8804 100644 --- a/tests/integration/test_norm.py +++ b/tests/integration/test_norm.py @@ -173,5 +173,4 @@ def test_invalid_ord_for_matrices(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_ones.py b/tests/integration/test_ones.py index 4ee528b1f..918045dd5 100644 --- a/tests/integration/test_ones.py +++ b/tests/integration/test_ones.py @@ -86,5 +86,4 @@ def test_basic_dtype(self, size, dtype): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_outer.py b/tests/integration/test_outer.py index f70965218..7ce71a68f 100644 --- a/tests/integration/test_outer.py +++ b/tests/integration/test_outer.py @@ -139,5 +139,4 @@ def test_out_invalid_dtype(self, src_dt, out_dt): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_overwrite_slice.py b/tests/integration/test_overwrite_slice.py index c8cd1c950..5ae4070fa 100644 --- a/tests/integration/test_overwrite_slice.py +++ b/tests/integration/test_overwrite_slice.py @@ -73,5 +73,4 @@ def test_basic(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_partition.py b/tests/integration/test_partition.py index 3ccab94db..bd40c2dd1 100644 --- a/tests/integration/test_partition.py +++ b/tests/integration/test_partition.py @@ -226,5 +226,4 @@ def test_kth_out_of_bound(self, kth): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_prod.py b/tests/integration/test_prod.py index ee6121328..8b627ecd1 100644 --- a/tests/integration/test_prod.py +++ b/tests/integration/test_prod.py @@ -338,5 +338,4 @@ def test_initial(self, size): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_put.py b/tests/integration/test_put.py index 0bc3e3023..aced0ce64 100644 --- a/tests/integration/test_put.py +++ b/tests/integration/test_put.py @@ -235,5 +235,4 @@ def test_invalid_mode(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_put_along_axis.py b/tests/integration/test_put_along_axis.py index e73ba79fa..9386f8d92 100644 --- a/tests/integration/test_put_along_axis.py +++ b/tests/integration/test_put_along_axis.py @@ -236,5 +236,4 @@ def test_values_axis_none(self, shape): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_putmask.py b/tests/integration/test_putmask.py index 91d6b7e90..26926b1be 100644 --- a/tests/integration/test_putmask.py +++ b/tests/integration/test_putmask.py @@ -245,5 +245,4 @@ def test_a_values_different_dtype(self, dtype_val): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_randint.py b/tests/integration/test_randint.py index 195dfc922..84bd4778c 100644 --- a/tests/integration/test_randint.py +++ b/tests/integration/test_randint.py @@ -13,7 +13,6 @@ # limitations under the License. # -import numpy as np import pytest import cunumeric as num @@ -30,5 +29,4 @@ def test_2d(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_random_advanced.py b/tests/integration/test_random_advanced.py index 242b01821..72d6861c2 100644 --- a/tests/integration/test_random_advanced.py +++ b/tests/integration/test_random_advanced.py @@ -269,5 +269,4 @@ def test_hypergeometric_invalid_args(self, ngood, nbad, nsample): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_random_beta.py b/tests/integration/test_random_beta.py index 6dc8fd0dd..8abb4f1a7 100644 --- a/tests/integration/test_random_beta.py +++ b/tests/integration/test_random_beta.py @@ -167,5 +167,4 @@ def test_beta_size_none(func, args): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_random_bitgenerator.py b/tests/integration/test_random_bitgenerator.py index 09e1dab4a..7cc6c91f3 100644 --- a/tests/integration/test_random_bitgenerator.py +++ b/tests/integration/test_random_bitgenerator.py @@ -319,5 +319,4 @@ def test_random_out_shape_mismatch(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_random_creation.py b/tests/integration/test_random_creation.py index 60653e585..a69d96b11 100644 --- a/tests/integration/test_random_creation.py +++ b/tests/integration/test_random_creation.py @@ -43,8 +43,6 @@ def reseed_and_gen_random( func: str, seed: Any, *args: Any, **kwargs: Any ) -> Tuple[Any, Any]: """Reseeed singleton rng and generate random in NumPy and cuNumeric.""" - np.random.seed(seed) - num.random.seed(seed) return gen_random_from_both(func, *args, **kwargs) @@ -505,5 +503,4 @@ def test_random_sample_invalid_size(self, size, expected_exc): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_random_gamma.py b/tests/integration/test_random_gamma.py index 56582190e..4aa03d194 100644 --- a/tests/integration/test_random_gamma.py +++ b/tests/integration/test_random_gamma.py @@ -152,5 +152,4 @@ def test_gamma_size_none(func): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_random_straightforward.py b/tests/integration/test_random_straightforward.py index 48213fc16..9ab7d2499 100644 --- a/tests/integration/test_random_straightforward.py +++ b/tests/integration/test_random_straightforward.py @@ -382,5 +382,4 @@ def test_beta_size_none(t, func, args): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_reduction.py b/tests/integration/test_reduction.py index 233e79439..58f133a1f 100644 --- a/tests/integration/test_reduction.py +++ b/tests/integration/test_reduction.py @@ -311,5 +311,4 @@ def test_indexed(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_repeat.py b/tests/integration/test_repeat.py index c8cda0662..1ea9eadef 100644 --- a/tests/integration/test_repeat.py +++ b/tests/integration/test_repeat.py @@ -256,5 +256,4 @@ def test_nd_repeats(ndim): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_reshape.py b/tests/integration/test_reshape.py index b3c81b35f..ee39c6d5f 100644 --- a/tests/integration/test_reshape.py +++ b/tests/integration/test_reshape.py @@ -253,5 +253,4 @@ def test_invalid_order(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_scan.py b/tests/integration/test_scan.py index 7374358dc..1d99f8e7a 100644 --- a/tests/integration/test_scan.py +++ b/tests/integration/test_scan.py @@ -270,5 +270,4 @@ def test_out_invalid_shape(self, op, out_shape): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_searchsorted.py b/tests/integration/test_searchsorted.py index 5ba9e21eb..c3d1461af 100644 --- a/tests/integration/test_searchsorted.py +++ b/tests/integration/test_searchsorted.py @@ -221,5 +221,4 @@ def test_ndim(ndim, side): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_set_item.py b/tests/integration/test_set_item.py index bf60faacd..2314916f5 100644 --- a/tests/integration/test_set_item.py +++ b/tests/integration/test_set_item.py @@ -48,5 +48,4 @@ def test_scalar_ndarray_as_index(arr): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_setflags.py b/tests/integration/test_setflags.py index f30ba58ed..561c6a2cf 100644 --- a/tests/integration/test_setflags.py +++ b/tests/integration/test_setflags.py @@ -142,5 +142,4 @@ def test_set_align_false(ndim): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_shape.py b/tests/integration/test_shape.py index d4972c62d..03656b6c5 100644 --- a/tests/integration/test_shape.py +++ b/tests/integration/test_shape.py @@ -61,5 +61,4 @@ def test_reshape(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_singleton_access.py b/tests/integration/test_singleton_access.py index 054e06237..118d2828f 100644 --- a/tests/integration/test_singleton_access.py +++ b/tests/integration/test_singleton_access.py @@ -115,5 +115,4 @@ def test_all(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_slicing.py b/tests/integration/test_slicing.py index fc039ea55..73b4a4ee4 100644 --- a/tests/integration/test_slicing.py +++ b/tests/integration/test_slicing.py @@ -108,5 +108,4 @@ def test_slice_step(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_solve.py b/tests/integration/test_solve.py index 82a204889..e9b0e2015 100644 --- a/tests/integration/test_solve.py +++ b/tests/integration/test_solve.py @@ -213,5 +213,4 @@ def test_a_singular_matrix(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_sort.py b/tests/integration/test_sort.py index c348d1286..81e06d86a 100644 --- a/tests/integration/test_sort.py +++ b/tests/integration/test_sort.py @@ -214,5 +214,4 @@ def test_compare_complex_arr_axis_sort(self, size, sort_type): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_sort_complex.py b/tests/integration/test_sort_complex.py index 6f7e3ec46..d2ff93c77 100644 --- a/tests/integration/test_sort_complex.py +++ b/tests/integration/test_sort_complex.py @@ -85,5 +85,4 @@ def test_basic_complex(self, size): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_split.py b/tests/integration/test_split.py index a48800b9c..d943f77b7 100644 --- a/tests/integration/test_split.py +++ b/tests/integration/test_split.py @@ -305,5 +305,4 @@ def test_dsplit(size): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_squeeze.py b/tests/integration/test_squeeze.py index c04ce1dc1..84ac8be2e 100644 --- a/tests/integration/test_squeeze.py +++ b/tests/integration/test_squeeze.py @@ -155,5 +155,4 @@ def test_array_axis(size): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_swapaxes.py b/tests/integration/test_swapaxes.py index fade309cf..0217019c9 100644 --- a/tests/integration/test_swapaxes.py +++ b/tests/integration/test_swapaxes.py @@ -208,5 +208,4 @@ def test_axes_none(self, axes): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_take.py b/tests/integration/test_take.py index 5054392ba..afa997a14 100644 --- a/tests/integration/test_take.py +++ b/tests/integration/test_take.py @@ -278,5 +278,4 @@ def test_out_invalid_dtype(self, dtype): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_take_along_axis.py b/tests/integration/test_take_along_axis.py index 42a8b7721..1597b2ff6 100644 --- a/tests/integration/test_take_along_axis.py +++ b/tests/integration/test_take_along_axis.py @@ -149,5 +149,4 @@ def test_indice_none(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_tensordot.py b/tests/integration/test_tensordot.py index 6530d3aa2..dd2873be3 100644 --- a/tests/integration/test_tensordot.py +++ b/tests/integration/test_tensordot.py @@ -13,7 +13,6 @@ # limitations under the License. # -import numpy as np import pytest from legate.core import LEGATE_MAX_DIM from utils.contractions import check_default @@ -88,5 +87,4 @@ def test_out_invalid_shape(self, shape): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_tile.py b/tests/integration/test_tile.py index 4a410d64f..1bfc1dcf8 100644 --- a/tests/integration/test_tile.py +++ b/tests/integration/test_tile.py @@ -72,5 +72,4 @@ def test_basic(size, value): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_trace.py b/tests/integration/test_trace.py index 6c6d07049..aad02ca28 100644 --- a/tests/integration/test_trace.py +++ b/tests/integration/test_trace.py @@ -210,5 +210,4 @@ def test_out_invalid_shape(self, out_shape): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_transpose.py b/tests/integration/test_transpose.py index 5b2c7e27d..4162df713 100644 --- a/tests/integration/test_transpose.py +++ b/tests/integration/test_transpose.py @@ -272,5 +272,4 @@ def test_axes_3d(self, size, axes): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_tri.py b/tests/integration/test_tri.py index 095c82187..127180064 100644 --- a/tests/integration/test_tri.py +++ b/tests/integration/test_tri.py @@ -151,5 +151,4 @@ def test_k_none(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_trilu.py b/tests/integration/test_trilu.py index 48889f58e..10e5f22ca 100644 --- a/tests/integration/test_trilu.py +++ b/tests/integration/test_trilu.py @@ -97,5 +97,4 @@ def test_m_scalar(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_trilu_indices.py b/tests/integration/test_trilu_indices.py index 1f1e592d3..a069fda6f 100644 --- a/tests/integration/test_trilu_indices.py +++ b/tests/integration/test_trilu_indices.py @@ -233,5 +233,4 @@ def test_k_none(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_unary_functions_2d_complex.py b/tests/integration/test_unary_functions_2d_complex.py index e1326941d..bd0b92174 100644 --- a/tests/integration/test_unary_functions_2d_complex.py +++ b/tests/integration/test_unary_functions_2d_complex.py @@ -51,5 +51,4 @@ def test_sqrt(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_unary_ufunc.py b/tests/integration/test_unary_ufunc.py index 99c936ffe..c1deefe85 100644 --- a/tests/integration/test_unary_ufunc.py +++ b/tests/integration/test_unary_ufunc.py @@ -14,7 +14,6 @@ # import argparse -from zlib import adler32 import numpy as np import pytest @@ -28,7 +27,6 @@ def deterministic_op_test(func): # This enforces that inputs are always the same whether # running all tests or a single test with -k. def wrapper_set_seed(op, *args, **kwargs): - np.random.seed(adler32(bytes(op, "utf-8"))) func(op, *args, **kwargs) func(op, *args, **kwargs) @@ -382,5 +380,4 @@ def parse_inputs(in_str, dtype_str): in_np = parse_inputs(args.inputs, args.dtypes) check_ops([args.op], in_np) else: - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_unique.py b/tests/integration/test_unique.py index 624c006aa..28374586c 100644 --- a/tests/integration/test_unique.py +++ b/tests/integration/test_unique.py @@ -71,5 +71,4 @@ def test_parameters(return_index, return_inverse, return_counts, axis): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_update.py b/tests/integration/test_update.py index 48bbdc4fb..49ea59bff 100644 --- a/tests/integration/test_update.py +++ b/tests/integration/test_update.py @@ -13,7 +13,6 @@ # limitations under the License. # -import numpy as np import pytest import cunumeric as num @@ -34,5 +33,4 @@ def test_basic(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_vdot.py b/tests/integration/test_vdot.py index a36c82db1..2cc380587 100644 --- a/tests/integration/test_vdot.py +++ b/tests/integration/test_vdot.py @@ -158,5 +158,4 @@ def test_out_invalid_shape(self, out_shape): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_view.py b/tests/integration/test_view.py index 7d9f3337d..ab27ed19f 100644 --- a/tests/integration/test_view.py +++ b/tests/integration/test_view.py @@ -64,5 +64,4 @@ def test_scalar(value): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_where.py b/tests/integration/test_where.py index 0b7ead603..cd66c0ce7 100644 --- a/tests/integration/test_where.py +++ b/tests/integration/test_where.py @@ -182,5 +182,4 @@ def test_argwhere_scalar(): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_window.py b/tests/integration/test_window.py index ea900bb0e..16afeec9a 100644 --- a/tests/integration/test_window.py +++ b/tests/integration/test_window.py @@ -44,5 +44,4 @@ def test_kaiser_window(M, beta): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/unit/cunumeric/_sphinxext/test__comparison_util.py b/tests/unit/cunumeric/_sphinxext/test__comparison_util.py index 42b08d383..d22c9451a 100644 --- a/tests/unit/cunumeric/_sphinxext/test__comparison_util.py +++ b/tests/unit/cunumeric/_sphinxext/test__comparison_util.py @@ -60,5 +60,4 @@ def test_skip(self): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/unit/cunumeric/random/test_bitgenerator.py b/tests/unit/cunumeric/random/test_bitgenerator.py index fb42fb1da..895a49ccc 100644 --- a/tests/unit/cunumeric/random/test_bitgenerator.py +++ b/tests/unit/cunumeric/random/test_bitgenerator.py @@ -13,7 +13,6 @@ # limitations under the License. # -import numpy as np import pytest from mock import patch @@ -87,5 +86,4 @@ def test_seed(self, mock_perf_counter_ns) -> None: if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/unit/cunumeric/test_config.py b/tests/unit/cunumeric/test_config.py index 70630315a..5e85ccfde 100644 --- a/tests/unit/cunumeric/test_config.py +++ b/tests/unit/cunumeric/test_config.py @@ -13,7 +13,6 @@ # limitations under the License. # -import numpy as np import pytest from legate.core import Library from legate.core.context import Context @@ -240,5 +239,4 @@ def test_ScanCode() -> None: if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/unit/cunumeric/test_coverage.py b/tests/unit/cunumeric/test_coverage.py index 71137a03a..bb1ca6b4e 100644 --- a/tests/unit/cunumeric/test_coverage.py +++ b/tests/unit/cunumeric/test_coverage.py @@ -16,7 +16,6 @@ from types import ModuleType from typing import Any -import numpy as np import pytest from mock import MagicMock, patch @@ -497,5 +496,4 @@ def test_ufunc_methods_unary() -> None: if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/unit/cunumeric/test_patch.py b/tests/unit/cunumeric/test_patch.py index e80dbec3a..51911c563 100644 --- a/tests/unit/cunumeric/test_patch.py +++ b/tests/unit/cunumeric/test_patch.py @@ -15,7 +15,6 @@ from subprocess import run -import numpy as np import pytest # TODO: (bev) This probably only works in bash. Skipping the tests until @@ -41,5 +40,4 @@ def test_patch() -> None: if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/unit/cunumeric/test_settings.py b/tests/unit/cunumeric/test_settings.py index eb1b408d3..b51570699 100644 --- a/tests/unit/cunumeric/test_settings.py +++ b/tests/unit/cunumeric/test_settings.py @@ -16,7 +16,6 @@ from pathlib import Path -import numpy as np import pytest from legate.util.fs import read_c_define from legate.util.settings import EnvOnlySetting, PrioritizedSetting @@ -113,5 +112,4 @@ def test_test_default(self, name: str) -> None: if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) diff --git a/tests/unit/cunumeric/test_utils.py b/tests/unit/cunumeric/test_utils.py index 12e6d0265..d934bca6a 100644 --- a/tests/unit/cunumeric/test_utils.py +++ b/tests/unit/cunumeric/test_utils.py @@ -304,5 +304,4 @@ def test_explicit_axis(self, a: int, b: int, axes: AxesType): if __name__ == "__main__": import sys - np.random.seed(12345) sys.exit(pytest.main(sys.argv)) From f2a87a77b60aa903a11b8c09e075c897692f5582 Mon Sep 17 00:00:00 2001 From: Sandeep Datta <128171450+sandeepd-nv@users.noreply.github.com> Date: Fri, 11 Aug 2023 09:33:04 +0530 Subject: [PATCH 19/33] GH docker based reusable CI workflows. (#993) --- .dockerignore | 1 + .github/workflows/ci-gh.yml | 112 ++-------------- .github/workflows/gh-build-and-test.yml | 32 +++++ .github/workflows/gh-build.yml | 123 ++++++++++++++++++ .github/workflows/gh-cleanup.yml | 43 ++++++ cmake/versions.json | 2 +- continuous_integration/Dockerfile | 44 +++++++ .../home/coder/.local/bin/build-cunumeric-all | 17 +++ .../coder/.local/bin/build-cunumeric-conda | 6 +- 9 files changed, 280 insertions(+), 100 deletions(-) create mode 120000 .dockerignore create mode 100644 .github/workflows/gh-build-and-test.yml create mode 100644 .github/workflows/gh-build.yml create mode 100644 .github/workflows/gh-cleanup.yml create mode 100644 continuous_integration/Dockerfile create mode 100644 continuous_integration/home/coder/.local/bin/build-cunumeric-all diff --git a/.dockerignore b/.dockerignore new file mode 120000 index 000000000..3e4e48b0b --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/.github/workflows/ci-gh.yml b/.github/workflows/ci-gh.yml index a81802f9d..f1aafdd59 100644 --- a/.github/workflows/ci-gh.yml +++ b/.github/workflows/ci-gh.yml @@ -1,7 +1,7 @@ -name: Build cunumeric on GH +name: Build and test cunumeric on GH concurrency: - group: ci-gpu-on-${{ github.event_name }}-from-${{ github.ref_name }} + group: ci-build-and-test-on-${{ github.event_name }}-from-${{ github.ref_name }} cancel-in-progress: true on: @@ -11,99 +11,15 @@ on: - "branch-*" jobs: - build: - permissions: - id-token: write # This is required for configure-aws-credentials - contents: read # This is required for actions/checkout - - # Ref: https://docs.rapids.ai/resources/github-actions/#cpu-labels for `linux-amd64-cpu4` - runs-on: ${{ github.repository == 'nv-legate/cunumeric' && 'linux-amd64-cpu4' || 'ubuntu-latest' }} - container: - options: -u root - image: rapidsai/devcontainers:23.06-cpp-cuda11.8-mambaforge-ubuntu22.04 - volumes: - - ${{ github.workspace }}/out:/tmp/out - env: - DEFAULT_CONDA_ENV: legate - PYTHONDONTWRITEBYTECODE: 1 - SCCACHE_REGION: us-east-2 - SCCACHE_BUCKET: rapids-sccache-east - SCCACHE_S3_KEY_PREFIX: legate-cunumeric-dev - GH_TOKEN: "${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - GITHUB_TOKEN: "${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}" - VAULT_HOST: "${{ secrets.PERSONAL_ACCESS_TOKEN && 'https://vault.ops.k8s.rapids.ai' || '' }}" - VAULT_S3_TTL: "28800s" # 8 hours - - steps: - - name: Checkout legate.core - uses: actions/checkout@v3 - with: - repository: nv-legate/legate.core - fetch-depth: 0 - path: legate - - - name: Checkout cunumeric (= this repo) - uses: actions/checkout@v3 - with: - fetch-depth: 0 - path: cunumeric - - - name: Setup - shell: bash -eo pipefail {0} - run: | - export LEGATE_SHA=$(cat cunumeric/cmake/versions.json | jq -r '.packages.legate_core.git_tag') - echo "Checking out LEGATE_SHA: ${LEGATE_SHA}" - git -C legate checkout $LEGATE_SHA - - cp -ar legate/continuous_integration/home/coder/.gitconfig /home/coder/; - cp -ar legate/continuous_integration/home/coder/.local /home/coder/; - mv legate /home/coder/legate - - cp -ar cunumeric/continuous_integration/home/coder/.local/bin/* /home/coder/.local/bin/; - mv cunumeric /home/coder/cunumeric; - - chmod a+x /home/coder/.local/bin/*; - chown -R coder:coder /home/coder/; - chown -R coder:coder /tmp/out; - - - if: github.repository == 'nv-legate/cunumeric' - name: Get AWS credentials for sccache bucket - uses: aws-actions/configure-aws-credentials@v2 - with: - aws-region: us-east-2 - role-duration-seconds: 28800 # 8 hours - role-to-assume: arn:aws:iam::279114543810:role/gha-oidc-nv-legate - - - name: Create conda env - shell: su coder {0} - run: cd ~/; exec entrypoint get-yaml-and-make-conda-env; - - - name: Build legate.core C++ library - shell: su coder {0} - run: cd ~/; exec entrypoint build-legate-cpp; - - - name: Build legate.core Python Wheel - shell: su coder {0} - run: cd ~/; exec entrypoint build-legate-wheel; - - - name: Build legate.core Conda Package - shell: su coder {0} - run: cd ~/; exec entrypoint build-legate-conda; - - - name: Build cunumeric C++ library - shell: su coder {0} - run: cd ~/; exec entrypoint build-cunumeric-cpp; - - - name: Build cunumeric Python Wheel - shell: su coder {0} - run: cd ~/; exec entrypoint build-cunumeric-wheel; - - - name: Build cunumeric Conda Package - shell: su coder {0} - run: cd ~/; exec entrypoint build-cunumeric-conda; - - - name: Upload build output - uses: actions/upload-artifact@v3 - with: - name: "cunumeric-${{ github.sha }}" - path: ./out/* + build-and-test: + strategy: + fail-fast: false + matrix: + include: + - {build-target: cpu} + - {build-target: gpu} + uses: + ./.github/workflows/gh-build-and-test.yml + with: + build-target: ${{ matrix.build-target }} + sha: ${{ github.sha }} diff --git a/.github/workflows/gh-build-and-test.yml b/.github/workflows/gh-build-and-test.yml new file mode 100644 index 000000000..f297b9761 --- /dev/null +++ b/.github/workflows/gh-build-and-test.yml @@ -0,0 +1,32 @@ +on: + workflow_call: + inputs: + build-target: + required: true + type: string + sha: + required: true + type: string + +jobs: + build: + name: "Build cunumeric (with ${{ inputs.build-target }} legate) on GH" + uses: + ./.github/workflows/gh-build.yml + with: + build-target: ${{ inputs.build-target }} + # Ref: https://docs.rapids.ai/resources/github-actions/#cpu-labels for `linux-amd64-cpu4` + runs-on: ${{ github.repository_owner == 'nv-legate' && 'linux-amd64-cpu4' || 'ubuntu-latest' }} + sha: ${{ inputs.sha }} + + cleanup: + needs: + - build + + # This ensures the cleanup job runs even if previous jobs fail or the workflow is cancelled. + if: always() + uses: + ./.github/workflows/gh-cleanup.yml + with: + build-target: ${{ inputs.build-target }} + sha: ${{ inputs.sha }} diff --git a/.github/workflows/gh-build.yml b/.github/workflows/gh-build.yml new file mode 100644 index 000000000..c84ac0b9a --- /dev/null +++ b/.github/workflows/gh-build.yml @@ -0,0 +1,123 @@ +name: Build cunumeric on GH + +on: + workflow_call: + inputs: + build-target: + required: true + type: string + runs-on: + required: true + type: string + sha: + required: true + type: string + +env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BASE_IMAGE: rapidsai/devcontainers:23.06-cpp-cuda11.8-mambaforge-ubuntu22.04 + IMAGE_NAME_LEGATE: legate.core-${{ inputs.build-target }} + IMAGE_NAME_CUNUMERIC: cunumeric-${{ inputs.build-target }} + USE_CUDA: ${{ (inputs.build-target == 'cpu' && 'OFF') || 'ON' }} + +jobs: + build: + name: build-${{ inputs.build-target }}-sub-workflow + + permissions: + id-token: write # This is required for configure-aws-credentials + contents: read # This is required for actions/checkout + packages: write # This is required to push docker image to ghcr.io + + runs-on: ${{ inputs.runs-on }} + + steps: + - name: Checkout legate.core + uses: actions/checkout@v3 + with: + repository: nv-legate/legate.core + fetch-depth: 0 + path: legate + + - name: Checkout cunumeric (= this repo) + uses: actions/checkout@v3 + with: + fetch-depth: 0 + path: cunumeric + + - if: github.repository_owner == 'nv-legate' + name: Get AWS credentials for sccache bucket + uses: aws-actions/configure-aws-credentials@v2 + with: + aws-region: us-east-2 + role-duration-seconds: 28800 # 8 hours + role-to-assume: arn:aws:iam::279114543810:role/gha-oidc-nv-legate + + - name: Docker system prune + run: | + docker version + docker system prune --all --force + + - name: Build legate.core using docker build + run: | + echo BUILD_TARGET: ${{ inputs.build-target }} + echo USE_CUDA: ${{ env.USE_CUDA }} + + export LEGATE_SHA=$(cat cunumeric/cmake/versions.json | jq -r '.packages.legate_core.git_tag') + echo "Checking out LEGATE_SHA: ${LEGATE_SHA}" + git -C legate checkout $LEGATE_SHA + + IMAGE_TAG_LEGATE=${{ env.IMAGE_NAME_LEGATE }}:${{ inputs.sha }} + + chmod +x legate/continuous_integration/build-docker-image + legate/continuous_integration/build-docker-image \ + --base-image "$BASE_IMAGE" \ + --image-tag "$IMAGE_TAG_LEGATE" \ + --source-dir legate + + - name: Build cunumeric using docker build + run: | + IMAGE_TAG_CUNUMERIC=${{ env.IMAGE_NAME_CUNUMERIC }}:${{ inputs.sha }} + IMAGE_TAG_LEGATE=${{ env.IMAGE_NAME_LEGATE }}:${{ inputs.sha }} + + legate/continuous_integration/build-docker-image \ + --base-image "$IMAGE_TAG_LEGATE" \ + --image-tag "$IMAGE_TAG_CUNUMERIC" \ + --source-dir cunumeric + + - name: Dump docker history of image before upload + run: | + IMAGE_TAG=${{ env.IMAGE_NAME_CUNUMERIC }}:${{ inputs.sha }} + docker history $IMAGE_TAG + + - name: Log in to container image registry + run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u $ --password-stdin + + - name: Push cunumeric image + run: | + IMAGE_TAG=${{ env.IMAGE_NAME_CUNUMERIC }}:${{ inputs.sha }} + + IMAGE_ID=ghcr.io/${{ github.repository_owner }} + + # Change all uppercase to lowercase + IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') + + IMAGE_ID=$IMAGE_ID/$IMAGE_TAG + + docker tag $IMAGE_TAG $IMAGE_ID + docker push $IMAGE_ID + + - name: Copy artifacts back to the host + run: | + IMAGE_TAG=${{ env.IMAGE_NAME_CUNUMERIC }}:${{ inputs.sha }} + mkdir -p artifacts + docker run -v "$(pwd)/artifacts:/home/coder/.artifacts" --rm -t $IMAGE_TAG copy-artifacts + + - name: Display structure of workdir + run: ls -R + + - name: Upload build artifacts + uses: actions/upload-artifact@v3 + with: + name: "cunumeric-${{ inputs.build-target }}-${{ inputs.sha }}" + path: artifacts diff --git a/.github/workflows/gh-cleanup.yml b/.github/workflows/gh-cleanup.yml new file mode 100644 index 000000000..6451c401c --- /dev/null +++ b/.github/workflows/gh-cleanup.yml @@ -0,0 +1,43 @@ +name: Clean up + +on: + workflow_call: + inputs: + build-target: + required: true + type: string + sha: + required: true + type: string + +env: + IMAGE_NAME: cunumeric-${{ inputs.build-target }} + +jobs: + cleanup: + permissions: + packages: write + + runs-on: ubuntu-latest + + steps: + - name: Delete docker image + run: | + set -xeuo pipefail + + PACKAGE_NAME=${{ env.IMAGE_NAME }} + PACKAGE_VERSION_ID=$( + curl -L \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ github.token }}"\ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/orgs/${{ github.repository_owner }}/packages/container/$PACKAGE_NAME/versions | + jq '.[] | select(.metadata.container.tags[] == "${{ inputs.sha }}") | .id' - + ) + + curl -L \ + -X DELETE \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer ${{ github.token }}"\ + -H "X-GitHub-Api-Version: 2022-11-28" \ + https://api.github.com/orgs/${{ github.repository_owner }}/packages/container/$PACKAGE_NAME/versions/$PACKAGE_VERSION_ID diff --git a/cmake/versions.json b/cmake/versions.json index 98dbd8c3a..d7e1d8133 100644 --- a/cmake/versions.json +++ b/cmake/versions.json @@ -5,7 +5,7 @@ "git_url" : "https://github.com/nv-legate/legate.core.git", "git_shallow": false, "always_download": false, - "git_tag" : "a405f595603238c8557cb5fefd3981d190a2fb1d" + "git_tag" : "4b79075eb5d7035d501c334c87a87939af79abc2" } } } diff --git a/continuous_integration/Dockerfile b/continuous_integration/Dockerfile new file mode 100644 index 000000000..4e6478d14 --- /dev/null +++ b/continuous_integration/Dockerfile @@ -0,0 +1,44 @@ +ARG BASE_IMAGE +FROM ${BASE_IMAGE} as stage0 + +COPY --chown=coder:coder continuous_integration/home/coder/.local/bin/* /home/coder/.local/bin/ +COPY --chown=coder:coder . /home/coder/cunumeric + +RUN chmod a+x /home/coder/.local/bin/* + +#--------------------------------------------------- +FROM stage0 as setup + +USER coder +WORKDIR /home/coder + +RUN set -x && . conda-utils && \ + get_yaml_and_make_conda_env && \ + install_legate_core_with_war + +#--------------------------------------------------- +FROM setup as build +USER coder +WORKDIR /home/coder + +ARG GITHUB_TOKEN +ENV GITHUB_TOKEN=${GITHUB_TOKEN} +ARG AWS_SESSION_TOKEN +ENV AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN} +ARG AWS_ACCESS_KEY_ID +ENV AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} +ARG AWS_SECRET_ACCESS_KEY +ENV AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} + +COPY --chown=coder:coder .creds /run/secrets + +RUN entrypoint build-cunumeric-all + +#--------------------------------------------------- +FROM stage0 as final +USER coder +WORKDIR /home/coder + +COPY --from=build --chown=coder:coder /tmp/out /tmp/out +COPY --from=build --chown=coder:coder /tmp/conda-build /tmp/conda-build +COPY --from=build --chown=coder:coder /tmp/env_yaml /tmp/env_yaml diff --git a/continuous_integration/home/coder/.local/bin/build-cunumeric-all b/continuous_integration/home/coder/.local/bin/build-cunumeric-all new file mode 100644 index 000000000..62c6da0d7 --- /dev/null +++ b/continuous_integration/home/coder/.local/bin/build-cunumeric-all @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + + +build_cunumeric_all() { + set -x + cd ~/; + + conda info + + set -euo pipefail; + + build-cunumeric-cpp; + build-cunumeric-wheel; + build-cunumeric-conda; +} + +(build_cunumeric_all "$@"); diff --git a/continuous_integration/home/coder/.local/bin/build-cunumeric-conda b/continuous_integration/home/coder/.local/bin/build-cunumeric-conda index b957e13af..0be424252 100755 --- a/continuous_integration/home/coder/.local/bin/build-cunumeric-conda +++ b/continuous_integration/home/coder/.local/bin/build-cunumeric-conda @@ -23,7 +23,11 @@ build_cunumeric_conda_package() { conda_build_args+=("--build-id-pat=''"); conda_build_args+=(--no-include-recipe); conda_build_args+=(--no-anaconda-upload); - conda_build_args+=(--variants "{gpu_enabled:true,python:${python_version}}"); + + GPU_ENABLED=true + [ "${USE_CUDA:-}" = "OFF" ] && GPU_ENABLED=false + + conda_build_args+=(--variants "{gpu_enabled:${GPU_ENABLED},python:${python_version}}"); rm -rf /tmp/conda-build/cunumeric; mkdir -p /tmp/conda-build/cunumeric; From 901751415beb90c3694fc42e5734c978a44e7e10 Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Mon, 14 Aug 2023 10:48:54 -0700 Subject: [PATCH 20/33] Skip the docstrings for functions pulled from cloned modules (#1024) * Skip the docstrings for functions pulled from cloned modules The docstring for numpy.binomial has an unused reference, which causes our doc build to panic, in cases where curand is not installed, and thus we end up falling back to numpy for binomial. * Add a stub docstring instead of dropping it completely * Fix unit test --- cunumeric/coverage.py | 18 +++++++++++++++--- tests/unit/cunumeric/test_coverage.py | 12 ++++++++---- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/cunumeric/coverage.py b/cunumeric/coverage.py index 9cd246ca9..55f74d238 100644 --- a/cunumeric/coverage.py +++ b/cunumeric/coverage.py @@ -16,7 +16,7 @@ import warnings from dataclasses import dataclass -from functools import wraps +from functools import WRAPPER_ASSIGNMENTS, wraps from types import ( BuiltinFunctionType, FunctionType, @@ -137,6 +137,11 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: return wrapper +_UNIMPLEMENTED_COPIED_ATTRS = tuple( + attr for attr in WRAPPER_ASSIGNMENTS if attr != "__doc__" +) + + def unimplemented( func: AnyCallable, prefix: str, @@ -157,7 +162,7 @@ def unimplemented( if reporting: - @wraps(func) + @wraps(func, assigned=_UNIMPLEMENTED_COPIED_ATTRS) def wrapper(*args: Any, **kwargs: Any) -> Any: location = find_last_user_frames( not settings.report_dump_callstack() @@ -174,7 +179,7 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: else: - @wraps(func) + @wraps(func, assigned=_UNIMPLEMENTED_COPIED_ATTRS) def wrapper(*args: Any, **kwargs: Any) -> Any: stacklevel = find_last_user_stacklevel() warnings.warn( @@ -187,6 +192,13 @@ def wrapper(*args: Any, **kwargs: Any) -> Any: kwargs = deep_apply(kwargs, fallback) return func(*args, **kwargs) + wrapper.__doc__ = f""" + cuNumeric has not implemented this function, and will fall back to NumPy. + + See Also + -------- + {name} + """ wrapper._cunumeric = CuWrapperMetadata(implemented=False) return wrapper diff --git a/tests/unit/cunumeric/test_coverage.py b/tests/unit/cunumeric/test_coverage.py index bb1ca6b4e..0ce089e09 100644 --- a/tests/unit/cunumeric/test_coverage.py +++ b/tests/unit/cunumeric/test_coverage.py @@ -210,7 +210,8 @@ def test_reporting_True_func( assert wrapped.__name__ == _test_func.__name__ assert wrapped.__qualname__ == _test_func.__qualname__ - assert wrapped.__doc__ == _test_func.__doc__ + assert wrapped.__doc__ != _test_func.__doc__ + assert "not implemented" in wrapped.__doc__ assert wrapped.__wrapped__ is _test_func assert wrapped(10, 20) == 30 @@ -234,7 +235,8 @@ def test_reporting_False_func( assert wrapped.__name__ == _test_func.__name__ assert wrapped.__qualname__ == _test_func.__qualname__ - assert wrapped.__doc__ == _test_func.__doc__ + assert wrapped.__doc__ != _test_func.__doc__ + assert "not implemented" in wrapped.__doc__ assert wrapped.__wrapped__ is _test_func with pytest.warns(RuntimeWarning) as record: @@ -253,7 +255,8 @@ def test_reporting_True_ufunc( ) -> None: wrapped = m.unimplemented(_test_ufunc, "foo", "_test_ufunc") - assert wrapped.__doc__ == _test_ufunc.__doc__ + assert wrapped.__doc__ != _test_ufunc.__doc__ + assert "not implemented" in wrapped.__doc__ assert wrapped.__wrapped__ is _test_ufunc assert wrapped(10, 20) == 30 @@ -275,7 +278,8 @@ def test_reporting_False_ufunc( _test_ufunc, "foo", "_test_ufunc", reporting=False ) - assert wrapped.__doc__ == _test_ufunc.__doc__ + assert wrapped.__doc__ != _test_ufunc.__doc__ + assert "not implemented" in wrapped.__doc__ assert wrapped.__wrapped__ is _test_ufunc with pytest.warns(RuntimeWarning) as record: From 06cd5344a806040c2e836dfa7a808f84d0de1215 Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Mon, 14 Aug 2023 14:14:28 -0700 Subject: [PATCH 21/33] Fix random test failures in CPU-only runs (#1025) * Anticipate some test failures when cuRand is not available * Ensure input to unstable sort algorithms contains no duplicates * Add mergesort to stable sort methods --- cunumeric/random/legacy.py | 3 +-- tests/integration/test_argsort.py | 31 +++++++++++++---------- tests/integration/test_random_creation.py | 11 ++++++-- 3 files changed, 27 insertions(+), 18 deletions(-) diff --git a/cunumeric/random/legacy.py b/cunumeric/random/legacy.py index c0568136b..4070653e8 100644 --- a/cunumeric/random/legacy.py +++ b/cunumeric/random/legacy.py @@ -111,7 +111,6 @@ def randint( -------- Multiple GPUs, Multiple CPUs """ - if not isinstance(low, int): raise NotImplementedError("'low' must be an integer") if high is not None and not isinstance(high, int): @@ -126,7 +125,7 @@ def randint( dtype = np.dtype(np.int64) # TODO: randint must support unsigned integer dtypes as well if dtype.kind != "i": - raise TypeError( + raise NotImplementedError( "cunumeric.random.randint must be given an integer dtype" ) if isinstance(size, int): diff --git a/tests/integration/test_argsort.py b/tests/integration/test_argsort.py index bdc816c04..b36de0d16 100644 --- a/tests/integration/test_argsort.py +++ b/tests/integration/test_argsort.py @@ -58,8 +58,9 @@ (DIM, DIM, DIM), ] -SORT_TYPES = ["quicksort", "mergesort", "stable"] -UNSTABLE_SORT_TYPE = ["heapsort"] +STABLE_SORT_TYPES = ["stable", "mergesort"] +UNSTABLE_SORT_TYPES = ["heapsort", "quicksort"] +SORT_TYPES = STABLE_SORT_TYPES + UNSTABLE_SORT_TYPES class TestArgSort(object): @@ -137,7 +138,7 @@ def test_basic_axis(self, size): assert np.array_equal(res_num, res_np) @pytest.mark.parametrize("size", SIZES) - @pytest.mark.parametrize("sort_type", SORT_TYPES) + @pytest.mark.parametrize("sort_type", STABLE_SORT_TYPES) def test_basic_axis_sort_type(self, size, sort_type): arr_np = np.random.randint(-100, 100, size) arr_num = num.array(arr_np) @@ -146,13 +147,14 @@ def test_basic_axis_sort_type(self, size, sort_type): res_num = num.argsort(arr_num, axis=axis, kind=sort_type) assert np.array_equal(res_num, res_np) - @pytest.mark.xfail @pytest.mark.parametrize("size", SIZES) - @pytest.mark.parametrize("sort_type", UNSTABLE_SORT_TYPE) + @pytest.mark.parametrize("sort_type", UNSTABLE_SORT_TYPES) def test_basic_axis_sort_type_unstable(self, size, sort_type): - # intermittent failed due to - # https://github.com/nv-legate/cunumeric/issues/782 - arr_np = np.random.randint(-100, 100, size) + # have to guarantee unique values in input + # see https://github.com/nv-legate/cunumeric/issues/782 + arr_np = np.arange(np.prod(size)) + np.random.shuffle(arr_np) + arr_np = arr_np.reshape(size) arr_num = num.array(arr_np) for axis in range(-arr_np.ndim + 1, arr_np.ndim): res_np = np.argsort(arr_np, axis=axis, kind=sort_type) @@ -171,7 +173,7 @@ def test_arr_basic_axis(self, size): assert np.array_equal(arr_np_copy, arr_num_copy) @pytest.mark.parametrize("size", SIZES) - @pytest.mark.parametrize("sort_type", SORT_TYPES) + @pytest.mark.parametrize("sort_type", STABLE_SORT_TYPES) def test_arr_basic_axis_sort(self, size, sort_type): arr_np = np.random.randint(-100, 100, size) arr_num = num.array(arr_np) @@ -182,13 +184,14 @@ def test_arr_basic_axis_sort(self, size, sort_type): arr_num_copy.argsort(axis=axis, kind=sort_type) assert np.array_equal(arr_np_copy, arr_num_copy) - @pytest.mark.xfail @pytest.mark.parametrize("size", SIZES) - @pytest.mark.parametrize("sort_type", UNSTABLE_SORT_TYPE) + @pytest.mark.parametrize("sort_type", UNSTABLE_SORT_TYPES) def test_arr_basic_axis_sort_unstable(self, size, sort_type): - # intermittent failed due to - # https://github.com/nv-legate/cunumeric/issues/782 - arr_np = np.random.randint(-100, 100, size) + # have to guarantee unique values in input + # see https://github.com/nv-legate/cunumeric/issues/782 + arr_np = np.arange(np.prod(size)) + np.random.shuffle(arr_np) + arr_np = arr_np.reshape(size) arr_num = num.array(arr_np) for axis in range(-arr_num.ndim + 1, arr_num.ndim): arr_np_copy = arr_np diff --git a/tests/integration/test_random_creation.py b/tests/integration/test_random_creation.py index a69d96b11..f4bdecec7 100644 --- a/tests/integration/test_random_creation.py +++ b/tests/integration/test_random_creation.py @@ -246,12 +246,15 @@ def test_randint_float_range(low, high): @pytest.mark.xfail( - not EAGER_TEST, reason="cuNumeric raises NotImplementedError" + not num.runtime.has_curand or not EAGER_TEST, + reason="cuNumeric raises NotImplementedError", ) @pytest.mark.parametrize("size", ALL_RNG_SIZES, ids=str) @pytest.mark.parametrize("low, high", [(1000, 65535), (0, 1024)], ids=str) @pytest.mark.parametrize("dtype", UINT_DTYPES, ids=str) def test_randint_uint(low, high, dtype, size): + # NotImplementedError: cunumeric.random.randint must be given an integer + # dtype # NotImplementedError: type for random.integers has to be int64 or int32 # or int16 arr_np, arr_num = gen_random_from_both( @@ -287,7 +290,8 @@ def test_randint_distribution(low, high, size, dtype): @pytest.mark.xfail( - not EAGER_TEST, reason="cuNumeric raises NotImplementedError" + not num.runtime.has_curand or not EAGER_TEST, + reason="cuNumeric raises NotImplementedError", ) @pytest.mark.parametrize("size", (1024, 1025)) def test_randint_bool(size): @@ -297,6 +301,9 @@ def test_randint_bool(size): arr_num, np.mean(arr_np), np.std(arr_np), mean_tol=0.05 ) # NumPy pass + # cuNumeric not num.runtime.has_curand: + # NotImplementedError: cunumeric.random.randint must be given an integer + # dtype # cuNumeric LEGATE_TEST=1 or size > 1024: # NotImplementedError: type for random.integers has to be int64 or int32 # or int16 From 780fd40ffc70536698399fed45cc55f67ec21c39 Mon Sep 17 00:00:00 2001 From: Andrei Schaffer <37386037+aschaffer@users.noreply.github.com> Date: Mon, 14 Aug 2023 16:48:54 -0500 Subject: [PATCH 22/33] Quantile Implementation (#664) * Quantile Python implementation and test. * Added boilerplate integration code for quantile. * fixed ceil() call * Added 1st quantile pytest. * Fixed 1st quantile pytest: added missing main. * Fixed global dtype to numpy.dtype. * Replaced product() with numpy.product(). * Debugging 'nearest'. Failed attempts, but some useful ways to simplify the repro. * Simplified 'nearest' implementation. * Fix for 'nearest' implementation. * Added more quantile tests. * Clean-up. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * FIxed module.py formatting issues. * FIxed test_quantiles.py formatting issues. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Adrressed comments on re-using 'out' if given. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed 'out' management for scalar input. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix for keepdims with multiple axes. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added tests to check for the supplied output case. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added the keepdims = True case for out is not None. Caveat: there's a numpy bug associated with it: https://github.com/numpy/numpy/issues/22544 * Addressed comment on scalar input asarray(). * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Addressed comment on scalar input np.asarray(). * Addressed comment on add_boilerplate. * Fixed LEGATE_MAX_DIM issues in tests. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Attempts to fix ndenumerate() approach. * Attempts to fix ndenumerate() approach. More debugging. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed ndenumerate, non-flattening approach. * Clean-up (flake8). * Addressed comment on directly using qs_all for result. * Addressed comment on input_is_scalar. * Addressed comment on replacing take([pos],...) by just take(pos,...). * Addressed comment on always using asarray(q). * Addressed comment on removing is_sorted() check. * Addressed comment on more pythonic list comprehension in any(). * Addressed comment on using math.floor(). * Addressed comment on using sorted() instead of sort(). * Addressed comment on removing last 2 args, to be deprecated. * Removed docstring on deprecated 'interpolation' arg. * Addressed comment on qs_all[...]. * Addressed comment on in-place update of qs_all (avoids copies). * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Addressed comment on overwrite_input test. Added. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Addressed comment on using allclose(). * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed keepdims=True failures. * Addressed comment on adding multiple consecutive axes test, (0, 1). * Added test to debug axis=None case. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Partial fix for axis=None failures. * Fix for axis=None failures. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Addressed review on not doing all(shapes). * Addressed review on not returning empty tuple from in_place_interp(). * Added tests for random input. * Addressed review on throwing on complex input. * Addressed review on float128. * Addressed reviews on better replacement for in_place_interp() and using TypeError. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Addressed reviews on axes_set confusing apparent dependency on arr.shape and remaining_shape as tuple of 1s. * Completely removed the artificial dependency of axes_set to arr.shape. * Addressed comments on replacing np with num; fixed Weibull issue. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Addressed comments on mypy function signatures: floor_i(). * Addressed comments on mypy function signatures: method functions. * Addressed comments on mypy function signatures: is_diff(). * Addressed comments on mypy function signatures: reshuffle_reshape(). * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Addressed comments on mypy function signatures: quantile_impl(). * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Addressed comments on mypy function signatures: quantile(). Tentative 1. * Addressed comments on mypy function signatures: quantile(). Tentative 2 (temporary fix, until better axis type can be provided). * Addressed comments on mypy function signatures: quantile(). Tentative 3: adjust quantile_impl() signature. * Addressed comments on mypy function signatures: quantile(). * Addressed reviews on corner cases: quantile limits {0.0, 1.0}. * Possible solution for the case of a <- scalar, list, or tuple. Attempt 1. * Better solution for the case of a <- scalar, list, or tuple. * Addressed review on testing output cast, but functionality awaits bug-fixes in numpy. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Addressed review on annotating a: ndarray and removing the asarray() coercion. * Re-activated skipped test to, at least, run the output cast, w/o checking, for now, due to numpy issue 22766. * Re-activated skipped output downcast test from float64 to float32 (rather than int, which fails in numpy, due to numpy issue 22766). * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cosmetic changes * Use np instead of num * Use num instead of cu * Make sure all test input arrays are vanilla NumPy arrays Because cuNumeric supports the __array_function__ protocol, if you pass a cuNumeric array to a vanilla NumPy method, it will dispatch through to the cuNumeric implementation, meaning that np.foo(arr) and cn.foo(arr) end up doing the same thing, and we're not testing anything by comparing them. * Fix typing of axis * Minor cleanups * Clean up some comments * No need for helper method for tuple comparison * Clean up docs * Typo in docs * Fix for upper-bound quantile on averaged_interpolated_cdf. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added seeding to quantile tests. * Added percentile() main function. * Percentile. Fix. Test failing. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed args fwd-ing. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added profiling test for quantiles. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Increased dimension of input array. * Fixed cupy failure with q.ndim > 1. * Refactored for performance. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replaced deprecated np.product(). * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed typo in str_method. * Fix pre-commit complaints --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Manolis Papadakis Co-authored-by: Andrei Schaffer Co-authored-by: Andrei Schaffer Co-authored-by: Andrei Schaffer --- cunumeric/module.py | 696 +++++++++++++++++++++++ docs/cunumeric/source/api/statistics.rst | 10 + examples/quantiles.py | 191 +++++++ tests/integration/test_percentiles.py | 159 ++++++ tests/integration/test_quantiles.py | 463 +++++++++++++++ 5 files changed, 1519 insertions(+) create mode 100644 examples/quantiles.py create mode 100644 tests/integration/test_percentiles.py create mode 100644 tests/integration/test_quantiles.py diff --git a/cunumeric/module.py b/cunumeric/module.py index 1af26e4ef..d33e59900 100644 --- a/cunumeric/module.py +++ b/cunumeric/module.py @@ -7160,6 +7160,702 @@ def bincount( return out +# Quantiles + + +# account for 0-based indexing +# there's no negative numbers +# arithmetic at this level, +# (pos, k) are always positive! +# +def floor_i(k: int | float) -> int: + j = k - 1 if k > 0 else 0 + return int(j) + + +# Generic rule: if `q` input value falls onto a node, then return that node + + +# Discontinuous methods: +# +# 'inverted_cdf' +# q = quantile input \in [0, 1] +# n = sizeof(array) +# +def inverted_cdf(q: float, n: int) -> tuple[float, int]: + pos = q * n + k = math.floor(pos) + + g = pos - k + gamma = 1.0 if g > 0 else 0.0 + + j = int(k) - 1 + if j < 0: + return (0.0, 0) + else: + return (gamma, j) + + +# 'averaged_inverted_cdf' +# +def averaged_inverted_cdf(q: float, n: int) -> tuple[float, int]: + pos = q * n + k = math.floor(pos) + + g = pos - k + gamma = 1.0 if g > 0 else 0.5 + + j = int(k) - 1 + if j < 0: + return (0.0, 0) + elif j >= n - 1: + return (1.0, n - 2) + else: + return (gamma, j) + + +# 'closest_observation' +# +def closest_observation(q: float, n: int) -> tuple[float, int]: + # p = q*n - 0.5 + # pos = 0 if p < 0 else p + + # weird departure from paper + # (bug?), but this fixes it: + # also, j even in original paper + # applied to 1-based indexing; we have 0-based! + # numpy impl. doesn't account that the original paper used + # 1-based indexing, 0-based j is still checked for evennes! + # (see proof in quantile_policies.py) + # + p0 = q * n - 0.5 + p = p0 - 1.0 + + pos = 0 if p < 0 else p0 + k = math.floor(pos) + + j = floor_i(k) + gamma = 1 if k < pos else (0 if j % 2 == 0 else 1) + + return (gamma, j) + + +# Continuous methods: +# +# Parzen method: +# 'interpolated_inverted_cdf' +# +def interpolated_inverted_cdf(q: float, n: int) -> tuple[float, int]: + pos = q * n + k = math.floor(pos) + # gamma = pos-k + # this fixes it: + # + gamma = 0.0 if k == 0 else pos - k + j = floor_i(k) + return (gamma, j) + + +# Hazen method: +# 'hazen' +# +def hazen(q: float, n: int) -> tuple[float, int]: + pos = q * n + 0.5 + k = math.floor(pos) + # gamma = pos-k + # + # this fixes it: + # (when pos > n: this actually selects the right point, + # which is the correct choice, because right = arr[n] + # gets invalidated) + # + gamma = 0.0 if (pos < 1 or pos > n) else pos - k + + j = floor_i(k) + return (gamma, j) + + +# Weibull method: +# 'weibull' +# +def weibull(q: float, n: int) -> tuple[float, int]: + pos = q * (n + 1) + + k = math.floor(pos) + # gamma = pos-k + # + # this fixes it: + # (when pos > n: this actually selects the right point, + # which is the correct choice, because right = arr[n] + # gets invalidated) + # + gamma = 0.0 if (pos < 1 or pos > n) else pos - k + + j = floor_i(k) + + if j >= n: + j = n - 1 + + return (gamma, j) + + +# Gumbel method: +# 'linear' +# +def linear(q: float, n: int) -> tuple[float, int]: + pos = q * (n - 1) + 1 + k = math.floor(pos) + # gamma = pos-k + # + # this fixes it: + # (when pos > n: this actually selects the right point, + # which is the correct choice, because right = arr[n] + # gets invalidated) + # + gamma = 0.0 if (pos < 1 or pos > n) else pos - k + + j = floor_i(k) + return (gamma, j) + + +# Johnson & Kotz method: +# 'median_unbiased' +# +def median_unbiased(q: float, n: int) -> tuple[float, int]: + fract = 1.0 / 3.0 + pos = q * (n + fract) + fract + k = math.floor(pos) + + # gamma = pos-k + # + # this fixes it: + # (when pos > n: this actually selects the right point, + # which is the correct choice, because right = arr[n] + # gets invalidated) + # + gamma = 0.0 if (pos < 1 or pos > n) else pos - k + + j = floor_i(k) + return (gamma, j) + + +# Blom method: +# 'normal_unbiased' +# +def normal_unbiased(q: float, n: int) -> tuple[float, int]: + fract1 = 0.25 + fract2 = 3.0 / 8.0 + pos = q * (n + fract1) + fract2 + k = math.floor(pos) + + # gamma = pos-k + # + # this fixes it: + # (when pos > n: this actually selects the right point, + # which is the correct choice, because right = arr[n] + # gets invalidated) + # + gamma = 0.0 if (pos < 1 or pos > n) else pos - k + + j = floor_i(k) + return (gamma, j) + + +# `lower` +# +def lower(q: float, n: int) -> tuple[float, int]: + gamma = 0.0 + pos = q * (n - 1) + k = math.floor(pos) + + j = int(k) + return (gamma, j) + + +# `higher` +# +def higher(q: float, n: int) -> tuple[float, int]: + pos = q * (n - 1) + k = math.floor(pos) + + # Generic rule: (k == pos) + gamma = 0.0 if (pos == 0 or k == pos) else 1.0 + + j = int(k) + return (gamma, j) + + +# `midpoint` +# +def midpoint(q: float, n: int) -> tuple[float, int]: + pos = q * (n - 1) + k = math.floor(pos) + + # Generic rule: (k == pos) + gamma = 0.0 if (pos == 0 or k == pos) else 0.5 + + j = int(k) + return (gamma, j) + + +# `nearest` +# +def nearest(q: float, n: int) -> tuple[float, int]: + pos = q * (n - 1) + + # k = floor(pos) + # gamma = 1.0 if pos - k >= 0.5 else 0.0 + + k = np.round(pos) + gamma = 0.0 + + j = int(k) + return (gamma, j) + + +# for the case when axis = tuple (non-singleton) +# reshuffling might have to be done (if tuple is non-consecutive) +# and the src array must be collapsed along that set of axes +# +# args: +# +# arr: [in] source nd-array on which quantiles are calculated; +# axes_set: [in] tuple or list of axes (indices less than arr dimension); +# +# return: pair: (minimal_index, reshuffled_and_collapsed source array) +def reshuffle_reshape( + arr: ndarray, axes_set: Iterable[int] +) -> tuple[int, ndarray]: + ndim = len(arr.shape) + + sorted_axes = tuple(sorted(axes_set)) + + min_dim_index = sorted_axes[0] + num_axes = len(sorted_axes) + reshuffled_axes = tuple(range(min_dim_index, min_dim_index + num_axes)) + + non_consecutive = sorted_axes != reshuffled_axes + if non_consecutive: + arr_shuffled = moveaxis(arr, sorted_axes, reshuffled_axes) + else: + arr_shuffled = arr + + # shape_reshuffled = arr_shuffled.shape # debug + collapsed_shape = np.prod([arr_shuffled.shape[i] for i in reshuffled_axes]) + + redimed = tuple(range(0, min_dim_index + 1)) + tuple( + range(min_dim_index + num_axes, ndim) + ) + reshaped = tuple( + [ + collapsed_shape if k == min_dim_index else arr_shuffled.shape[k] + for k in redimed + ] + ) + + arr_reshaped = arr_shuffled.reshape(reshaped) + return (min_dim_index, arr_reshaped) + + +# args: +# +# arr: [in] source nd-array on which quantiles are calculated; +# preccondition: assumed sorted! +# q_arr: [in] quantile input values nd-array; +# axis: [in] axis along which quantiles are calculated; +# method: [in] func(q, n) returning (gamma, j), +# where = array1D.size; +# keepdims: [in] boolean flag specifying whether collapsed axis +# should be kept as dim=1; +# to_dtype: [in] dtype to convert the result to; +# qs_all: [in/out] result pass through or created (returned) +# +def quantile_impl( + arr: ndarray, + q_arr: npt.NDArray[Any], + axis: Optional[int], + axes_set: Iterable[int], + original_shape: tuple[int, ...], + method: Callable[[float, int], tuple[float, int]], + keepdims: bool, + to_dtype: np.dtype[Any], + qs_all: Optional[ndarray], +) -> ndarray: + ndims = len(arr.shape) + + if axis is None: + n = arr.size + + if keepdims: + remaining_shape = (1,) * len(original_shape) + else: + remaining_shape = () # only `q_arr` dictates shape; + # quantile applied to `arr` seen as 1D; + else: + n = arr.shape[axis] + + # arr.shape -{axis}; if keepdims use 1 for arr.shape[axis]: + # (can be empty []) + # + if keepdims: + remaining_shape = tuple( + 1 if k in axes_set else original_shape[k] + for k in range(0, len(original_shape)) + ) + else: + remaining_shape = tuple( + arr.shape[k] for k in range(0, ndims) if k != axis + ) + + # compose qarr.shape with arr.shape: + # + # result.shape = (q_arr.shape, arr.shape -{axis}): + # + qresult_shape = (*q_arr.shape, *remaining_shape) + + # construct result NdArray, non-flattening approach: + # + if qs_all is None: + qs_all = zeros(qresult_shape, dtype=to_dtype) + else: + # implicit conversion from to_dtype to qs_all.dtype assumed + # + if qs_all.shape != qresult_shape: + raise ValueError("wrong shape on output array") + + for index, q in np.ndenumerate(q_arr): + (gamma, j) = method(q, n) + (left_pos, right_pos) = (j, j + 1) + + # (N-1) dimensional ndarray of left, right + # neighbor values: + # + # non-flattening approach: + # + # extract values at index=left_pos; + arr_1D_lvals = arr.take(left_pos, axis) + arr_vals_shape = arr_1D_lvals.shape + + if right_pos >= n: + # some quantile methods may result in j==(n-1), + # hence (j+1) could surpass array boundary; + # + arr_1D_rvals = zeros(arr_vals_shape, dtype=arr_1D_lvals.dtype) + else: + # extract values at index=right_pos; + arr_1D_rvals = arr.take(right_pos, axis) + + # vectorized for axis != None; + # (non-flattening approach) + # + if len(index) == 0: + left = (1.0 - gamma) * arr_1D_lvals.reshape(qs_all.shape) + right = gamma * arr_1D_rvals.reshape(qs_all.shape) + qs_all[...] = left + right + else: + left = (1.0 - gamma) * arr_1D_lvals.reshape(qs_all[index].shape) + right = gamma * arr_1D_rvals.reshape(qs_all[index].shape) + qs_all[index] = left + right + + return qs_all + + +@add_boilerplate("a") +def quantile( + a: ndarray, + q: Union[float, Iterable[float], ndarray], + axis: Union[None, int, tuple[int, ...]] = None, + out: Optional[ndarray] = None, + overwrite_input: bool = False, + method: str = "linear", + keepdims: bool = False, +) -> ndarray: + """ + Compute the q-th quantile of the data along the specified axis. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + q : array_like of float + Quantile or sequence of quantiles to compute, which must be between + 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The default is + to compute the quantile(s) along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + quantile. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single quantile and `axis=None`, then the result + is a scalar. If multiple quantiles are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + Raises + ------ + TypeError + If the type of the input is complex. + + See Also + -------- + numpy.quantile + + Availability + -------- + Multiple GPUs, Multiple CPUs + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + """ + + dict_methods = { + "inverted_cdf": inverted_cdf, + "averaged_inverted_cdf": averaged_inverted_cdf, + "closest_observation": closest_observation, + "interpolated_inverted_cdf": interpolated_inverted_cdf, + "hazen": hazen, + "weibull": weibull, + "linear": linear, + "median_unbiased": median_unbiased, + "normal_unbiased": normal_unbiased, + "lower": lower, + "higher": higher, + "midpoint": midpoint, + "nearest": nearest, + } + + real_axis: Optional[int] + axes_set: Iterable[int] = [] + original_shape = a.shape + + if axis is not None and isinstance(axis, Iterable): + if len(axis) == 1: + real_axis = axis[0] + a_rr = a + else: + (real_axis, a_rr) = reshuffle_reshape(a, axis) + # What happens with multiple axes and overwrite_input = True ? + # It seems overwrite_input is reset to False; + overwrite_input = False + axes_set = axis + else: + real_axis = axis + a_rr = a + if real_axis is not None: + axes_set = [real_axis] + + # covers both array-like and scalar cases: + # + q_arr = np.asarray(q) + + # in the future k-sort (partition) + # might be faster, for now it uses sort + # arr = partition(arr, k = floor(nq), axis = real_axis) + # but that would require a k-sort call for each `q`! + # too expensive for many `q` values... + # if no axis given then elements are sorted as a 1D array + # + if overwrite_input: + a_rr.sort(axis=real_axis) + arr = a_rr + else: + arr = sort(a_rr, axis=real_axis) + + if arr.dtype.kind == "c": + raise TypeError("input array cannot be of complex type") + + # return type dependency on arr.dtype: + # + # it depends on interpolation method; + # For discontinuous methods returning either end of the interval within + # which the quantile falls, or the other; arr.dtype is returned; + # else, logic below: + # + # if is_float(arr_dtype) && (arr.dtype >= dtype('float64')) then + # arr.dtype + # else + # dtype('float64') + # + # see https://github.com/numpy/numpy/issues/22323 + # + if method in [ + "inverted_cdf", + "closest_observation", + "lower", + "higher", + "nearest", + ]: + to_dtype = arr.dtype + else: + to_dtype = np.dtype("float64") + + # in case dtype("float128") becomes supported: + # + # to_dtype = ( + # arr.dtype + # if (arr.dtype == np.dtype("float128")) + # else np.dtype("float64") + # ) + + res = quantile_impl( + arr, + q_arr, + real_axis, + axes_set, + original_shape, + dict_methods[method], + keepdims, + to_dtype, + out, + ) + + if out is not None: + # out = res.astype(out.dtype) -- conversion done inside impl + return out + else: + return res + + +@add_boilerplate("a") +def percentile( + a: ndarray, + q: Union[float, Iterable[float], ndarray], + axis: Union[None, int, tuple[int, ...]] = None, + out: Optional[ndarray] = None, + overwrite_input: bool = False, + method: str = "linear", + keepdims: bool = False, +) -> ndarray: + """ + Compute the q-th percentile of the data along the specified axis. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + q : array_like of float + Percentile or sequence of percentiles to compute, which must be between + 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The default is + to compute the percentile(s) along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + percentile. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + Raises + ------ + TypeError + If the type of the input is complex. + + See Also + -------- + numpy.percentile + + Availability + -------- + Multiple GPUs, Multiple CPUs + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + """ + + q_arr = np.asarray(q) + q01 = q_arr / 100.0 + + return quantile( + a, + q01, + axis, + out=out, + overwrite_input=overwrite_input, + method=method, + keepdims=keepdims, + ) + + @add_boilerplate("x", "weights") def histogram( x: ndarray, diff --git a/docs/cunumeric/source/api/statistics.rst b/docs/cunumeric/source/api/statistics.rst index 99f55567d..7d844d788 100644 --- a/docs/cunumeric/source/api/statistics.rst +++ b/docs/cunumeric/source/api/statistics.rst @@ -20,3 +20,13 @@ Histograms bincount histogram + + +Order statistics +---------------- + +.. autosummary:: + :toctree: generated/ + + quantile + percentile diff --git a/examples/quantiles.py b/examples/quantiles.py new file mode 100644 index 000000000..fbcee5e5e --- /dev/null +++ b/examples/quantiles.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python + +# Copyright 2023 NVIDIA Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse + +import numpy as np +from benchmark import parse_args, run_benchmark + + +def check_quantiles(package, a, q, axis, str_m, q_out): + eps = 1.0e-8 + if package == "cupy": + arr = a.get() + qs_arr = q.get() + else: + arr = a.__array__() + qs_arr = q.__array__() + + np_q_out = np.quantile( + arr, + qs_arr, + axis=axis, + method=str_m, + ) + + print("Checking result...") + if num.allclose(np_q_out, q_out, atol=eps): + print("PASS!") + else: + print("FAIL!") + print("NUMPY : " + str(np_q_out)) + print(package + ": " + str(q_out)) + assert False + + +def run_quantiles( + shape, + axis, + datatype, + lower, + upper, + str_method, + perform_check, + timing, + package, +): + num.random.seed(1729) + newtype = np.dtype(datatype).type + + N = 1 + for e in shape: + N *= e + shape = tuple(shape) + if np.issubdtype(newtype, np.integer): + if lower is None: + lower = 0 + if upper is None: + upper = np.iinfo(newtype).max + a = num.random.randint(low=lower, high=upper, size=N).astype(newtype) + a = a.reshape(shape) + elif np.issubdtype(newtype, np.floating): + a = num.random.random(shape).astype(newtype) + else: + print("UNKNOWN type " + str(newtype)) + assert False + + q = np.array([0.0, 0.37, 0.42, 0.5, 0.67, 0.83, 0.99, 1.0]) + + timer.start() + q_out = num.quantile( + a, + q, + axis=axis, + method=str_method, + ) + total = timer.stop() + + if perform_check: + check_quantiles( + package, + a, + q, + axis, + str_method, + q_out, + ) + else: + # do we need to synchronize? + assert True + if timing: + print("Elapsed Time: " + str(total) + " ms") + return total + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--check", + dest="check", + action="store_true", + help="check the result of the solve", + ) + parser.add_argument( + "-t", + "--time", + dest="timing", + action="store_true", + help="perform timing", + ) + parser.add_argument( + "-s", + "--shape", + type=int, + nargs="+", + default=[1000], + dest="shape", + help="array reshape (default '[100000]')", + ) + parser.add_argument( + "-d", + "--datatype", + type=str, + default="uint32", + dest="datatype", + help="data type (default np.uint32)", + ) + parser.add_argument( + "-l", + "--lower", + type=int, + default=None, + dest="lower", + help="lower bound for integer based arrays (inclusive)", + ) + parser.add_argument( + "-u", + "--upper", + type=int, + default=None, + dest="upper", + help="upper bound for integer based arrays (exclusive)", + ) + parser.add_argument( + "-a", + "--axis", + type=int, + default=None, + dest="axis", + help="sort axis (default None)", + ) + parser.add_argument( + "-m", + "--method", + type=str, + default="linear", + dest="method", + help="quantile interpolation method", + ) + + args, num, timer = parse_args(parser) + + run_benchmark( + run_quantiles, + args.benchmark, + "Quantiles", + ( + args.shape, + args.axis, + args.datatype, + args.lower, + args.upper, + args.method, + args.check, + args.timing, + args.package, + ), + ) diff --git a/tests/integration/test_percentiles.py b/tests/integration/test_percentiles.py new file mode 100644 index 000000000..95f3e0c09 --- /dev/null +++ b/tests/integration/test_percentiles.py @@ -0,0 +1,159 @@ +# Copyright 2023 NVIDIA Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import numpy as np +import pytest +from utils.comparisons import allclose + +import cunumeric as num + +ALL_METHODS = ( + "inverted_cdf", + "averaged_inverted_cdf", + "closest_observation", + "interpolated_inverted_cdf", + "hazen", + "weibull", + "linear", + "median_unbiased", + "normal_unbiased", + "lower", + "higher", + "midpoint", + "nearest", +) + + +@pytest.mark.parametrize("str_method", ALL_METHODS) +@pytest.mark.parametrize("axes", (0, 1, (0, 1), (0, 2))) +@pytest.mark.parametrize( + "qin_arr", (50.0, [0.1, 37.0, 42.0, 67.0, 83.0, 100.0, 49.0, 50.0, 0.0]) +) +@pytest.mark.parametrize("keepdims", (False, True)) +@pytest.mark.parametrize("overwrite_input", (False, True)) +def test_multi_axes(str_method, axes, qin_arr, keepdims, overwrite_input): + eps = 1.0e-8 + arr = np.ndarray( + shape=(2, 3, 4), + buffer=np.array( + [ + 1, + 2, + 2, + 40, + 1, + 1, + 2, + 1, + 0, + 10, + 3, + 3, + 40, + 15, + 3, + 7, + 5, + 4, + 7, + 3, + 5, + 1, + 0, + 9, + ] + ), + dtype=int, + ) + + if num.isscalar(qin_arr): + qs_arr = qin_arr + else: + qs_arr = np.array(qin_arr) + + # cunumeric: + # print("cunumeric axis = %d:"%(axis)) + q_out = num.percentile( + arr, + qs_arr, + axis=axes, + method=str_method, + keepdims=keepdims, + overwrite_input=overwrite_input, + ) + # print(q_out) + + # np: + # print("numpy axis = %d:"%(axis)) + np_q_out = np.percentile( + arr, + qs_arr, + axis=axes, + method=str_method, + keepdims=keepdims, + overwrite_input=overwrite_input, + ) + # print(np_q_out) + + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + + assert allclose(np_q_out, q_out, atol=eps) + + +@pytest.mark.skip(reason="simplified for debugging, only...") +@pytest.mark.parametrize("str_method", ALL_METHODS) +def test_random_simple(str_method): + eps = 1.0e-8 + arr = np.random.random((3, 4, 5)) + + qin_arr = [0.1, 37.0, 42.0, 67.0, 83.0, 100.0, 49.0, 50.0, 0.0] + axes = 0 + + q_out = num.percentile(arr, qin_arr, method=str_method, axis=axes) + np_q_out = np.percentile(arr, qin_arr, method=str_method, axis=axes) + + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + + assert allclose(np_q_out, q_out, atol=eps) + + +@pytest.mark.parametrize("str_method", ALL_METHODS) +@pytest.mark.parametrize( + "qin_arr", (50.0, [0.1, 37.0, 42.0, 67.0, 83.0, 100.0, 49.0, 50.0, 0.0]) +) +@pytest.mark.parametrize("axes", (None, 0)) +def test_random_inlined(str_method, qin_arr, axes): + eps = 1.0e-8 + arr = np.random.random((3, 4, 5)) + + qin_arr = [0.1, 37.0, 42.0, 67.0, 83.0, 100.0, 49.0, 50.0, 0.0] + axes = 0 + + q_out = num.percentile(arr, qin_arr, method=str_method, axis=axes) + np_q_out = np.percentile(arr, qin_arr, method=str_method, axis=axes) + + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + + assert allclose(np_q_out, q_out, atol=eps) + + +if __name__ == "__main__": + import sys + + sys.exit(pytest.main(sys.argv)) diff --git a/tests/integration/test_quantiles.py b/tests/integration/test_quantiles.py new file mode 100644 index 000000000..3458a2ea0 --- /dev/null +++ b/tests/integration/test_quantiles.py @@ -0,0 +1,463 @@ +# Copyright 2022 NVIDIA Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +import numpy as np +import pytest +from legate.core import LEGATE_MAX_DIM +from utils.comparisons import allclose + +import cunumeric as num + +ALL_METHODS = ( + "inverted_cdf", + "averaged_inverted_cdf", + "closest_observation", + "interpolated_inverted_cdf", + "hazen", + "weibull", + "linear", + "median_unbiased", + "normal_unbiased", + "lower", + "higher", + "midpoint", + "nearest", +) + + +@pytest.mark.parametrize("str_method", ALL_METHODS) +@pytest.mark.parametrize("axes", (0, 1, (0, 1), (0, 2))) +@pytest.mark.parametrize( + "qin_arr", (0.5, [0.001, 0.37, 0.42, 0.67, 0.83, 0.99, 0.39, 0.49, 0.5]) +) +@pytest.mark.parametrize("keepdims", (False, True)) +@pytest.mark.parametrize("overwrite_input", (False, True)) +def test_multi_axes(str_method, axes, qin_arr, keepdims, overwrite_input): + eps = 1.0e-8 + arr = np.ndarray( + shape=(2, 3, 4), + buffer=np.array( + [ + 1, + 2, + 2, + 40, + 1, + 1, + 2, + 1, + 0, + 10, + 3, + 3, + 40, + 15, + 3, + 7, + 5, + 4, + 7, + 3, + 5, + 1, + 0, + 9, + ] + ), + dtype=int, + ) + + if num.isscalar(qin_arr): + qs_arr = qin_arr + else: + qs_arr = np.array(qin_arr) + + # cunumeric: + # print("cunumeric axis = %d:"%(axis)) + q_out = num.quantile( + arr, + qs_arr, + axis=axes, + method=str_method, + keepdims=keepdims, + overwrite_input=overwrite_input, + ) + # print(q_out) + + # np: + # print("numpy axis = %d:"%(axis)) + np_q_out = np.quantile( + arr, + qs_arr, + axis=axes, + method=str_method, + keepdims=keepdims, + overwrite_input=overwrite_input, + ) + # print(np_q_out) + + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + + assert allclose(np_q_out, q_out, atol=eps) + + +@pytest.mark.parametrize("str_method", ALL_METHODS) +@pytest.mark.parametrize( + "ls_in", + ( + [[1.0, 0.13, 2.11], [1.9, 9.2, 0.17]], + [ + [1, 1, 0], + [2, 1, 10], + [2, 2, 3], + [40, 1, 3], + [40, 5, 5], + [15, 4, 1], + [3, 7, 0], + [7, 3, 9], + ], + ), +) +@pytest.mark.parametrize("axes", (0, 1)) +@pytest.mark.parametrize("keepdims", (False, True)) +def test_nd_quantile(str_method, ls_in, axes, keepdims): + eps = 1.0e-8 + + arr = np.array(ls_in) + + qs_arr = np.ndarray( + shape=(2, 4), + buffer=np.array([0.001, 0.37, 0.42, 0.5, 0.67, 0.83, 0.99, 0.39]).data, + ) + + # cunumeric: + # print("cunumeric axis = %d:"%(axis)) + q_out = num.quantile( + arr, qs_arr, axis=axes, method=str_method, keepdims=keepdims + ) + # print(q_out) + + # np: + # print("numpy axis = %d:"%(axis)) + np_q_out = np.quantile( + arr, qs_arr, axis=axes, method=str_method, keepdims=keepdims + ) + # print(np_q_out) + + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + + assert allclose(np_q_out, q_out, atol=eps) + + +@pytest.mark.parametrize("str_method", ALL_METHODS) +@pytest.mark.parametrize("axes", (0, 1)) +@pytest.mark.parametrize( + "qs_arr", + ( + 0.5, + np.ndarray( + shape=(5, 6), buffer=np.array([x / 30.0 for x in range(0, 30)]) + ), + ), +) +@pytest.mark.parametrize("keepdims", (False, True)) +def test_quantiles_w_output(str_method, axes, qs_arr, keepdims): + eps = 1.0e-8 + original_shape = (2, 3, 4) + arr = np.ndarray( + shape=original_shape, + buffer=np.array( + [ + 1, + 2, + 2, + 40, + 1, + 1, + 2, + 1, + 0, + 10, + 3, + 3, + 40, + 15, + 3, + 7, + 5, + 4, + 7, + 3, + 5, + 1, + 0, + 9, + ] + ), + dtype=float, + ) + + # cannot currently run tests with LEGATE_MAX_DIM >= 5 + # (see https://github.com/nv-legate/legate.core/issues/318) + # + if ( + (keepdims is True) + and (num.isscalar(qs_arr) is False) + and (len(qs_arr.shape) > 1) + and (LEGATE_MAX_DIM < 5) + ): + keepdims = False # reset keepdims, else len(result.shape)>4 + + if keepdims: + remaining_shape = [ + 1 if k == axes else original_shape[k] + for k in range(0, len(original_shape)) + ] + else: + remaining_shape = [ + original_shape[k] + for k in range(0, len(original_shape)) + if k != axes + ] + + if num.isscalar(qs_arr): + q_out = num.zeros(remaining_shape, dtype=float) + # np_q_out = np.zeros(remaining_shape, dtype=float) + else: + q_out = num.zeros((*qs_arr.shape, *remaining_shape), dtype=float) + # np_q_out = np.zeros((*qs_arr.shape, *remaining_shape), dtype=float) + + # cunumeric: + # print("cunumeric axis = %d:"%(axis)) + num.quantile( + arr, qs_arr, axis=axes, out=q_out, method=str_method, keepdims=keepdims + ) + # print(q_out) + + # np: + # print("numpy axis = %d:"%(axis)) + # due to numpy bug https://github.com/numpy/numpy/issues/22544 + # out = fails with keepdims = True + # + np_q_out = np.quantile( + arr, + qs_arr, + axis=axes, + # out=np_q_out, + method=str_method, + keepdims=keepdims, + ) + # print(np_q_out) + + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + + assert allclose(np_q_out, q_out, atol=eps) + + +@pytest.mark.parametrize("str_method", ALL_METHODS) +@pytest.mark.parametrize( + "qin_arr", (0.5, [0.001, 0.37, 0.42, 0.67, 0.83, 0.99, 0.39, 0.49, 0.5]) +) +@pytest.mark.parametrize("keepdims", (False, True)) +def test_quantiles_axis_none(str_method, qin_arr, keepdims): + eps = 1.0e-8 + arr = np.ndarray( + shape=(2, 3, 4), + buffer=np.array( + [ + 1, + 2, + 2, + 40, + 1, + 1, + 2, + 1, + 0, + 10, + 3, + 3, + 40, + 15, + 3, + 7, + 5, + 4, + 7, + 3, + 5, + 1, + 0, + 9, + ] + ), + dtype=int, + ) + + if num.isscalar(qin_arr): + qs_arr = qin_arr + else: + qs_arr = np.array(qin_arr) + + # cunumeric: + # print("cunumeric axis = %d:"%(axis)) + q_out = num.quantile( + arr, + qs_arr, + method=str_method, + keepdims=keepdims, + ) + # print(q_out) + + # np: + # print("numpy axis = %d:"%(axis)) + np_q_out = np.quantile( + arr, + qs_arr, + method=str_method, + keepdims=keepdims, + ) + # print(np_q_out) + + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + + assert allclose(np_q_out, q_out, atol=eps) + + +@pytest.mark.parametrize("str_method", ALL_METHODS) +@pytest.mark.parametrize( + "qin_arr", (0.5, [0.001, 0.37, 0.42, 0.67, 0.83, 0.99, 0.39, 0.49, 0.5]) +) +@pytest.mark.parametrize("axes", (None, 0)) +def test_random_inlined(str_method, qin_arr, axes): + eps = 1.0e-8 + arr = np.random.random((3, 4, 5)) + + q_out = num.quantile(arr, qin_arr, method=str_method, axis=axes) + np_q_out = np.quantile(arr, qin_arr, method=str_method, axis=axes) + + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + + assert allclose(np_q_out, q_out, atol=eps) + + +@pytest.mark.parametrize("str_method", ALL_METHODS) +def test_quantile_at_1(str_method): + eps = 1.0e-8 + arr = np.arange(4) + + q_out = num.quantile(arr, 1.0, method=str_method) + np_q_out = np.quantile(arr, 1.0, method=str_method) + + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + + assert allclose(np_q_out, q_out, atol=eps) + + +@pytest.mark.parametrize("str_method", ALL_METHODS) +def test_quantile_at_0(str_method): + eps = 1.0e-8 + arr = np.arange(4) + + q_out = num.quantile(arr, 0.0, method=str_method) + np_q_out = np.quantile(arr, 0.0, method=str_method) + + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + + assert allclose(np_q_out, q_out, atol=eps) + + +@pytest.mark.parametrize("str_method", ALL_METHODS) +@pytest.mark.parametrize( + "qs_arr", + ( + 0.5, + np.ndarray( + shape=(2, 3), buffer=np.array([x / 6.0 for x in range(0, 6)]) + ), + ), +) +@pytest.mark.parametrize("arr", (3, (3,), [3], (2, 1), [2, 1])) +def test_non_ndarray_input(str_method, qs_arr, arr): + eps = 1.0e-8 + + q_out = num.quantile(arr, qs_arr, method=str_method) + np_q_out = np.quantile(arr, qs_arr, method=str_method) + + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + + assert allclose(np_q_out, q_out, atol=eps) + + +@pytest.mark.parametrize("str_method", ALL_METHODS) +@pytest.mark.parametrize( + "qs_arr", + ( + 0.5, + np.ndarray( + shape=(2, 3), buffer=np.array([x / 6.0 for x in range(0, 6)]) + ), + ), +) +@pytest.mark.parametrize("keepdims", (False, True)) +def test_output_conversion(str_method, qs_arr, keepdims): + # + # downcast from float64 to float32, rather than int, until + # numpy issue: https://github.com/numpy/numpy/issues/22766 + # gets addressed + # + eps = 1.0e-8 + + arr = np.arange(4, dtype=np.dtype("float64")) + + # force downcast (`int` fails due to 22766): + # + q_out = num.zeros(np.shape(qs_arr), dtype=np.dtype("float32")) + np_q_out = np.zeros(np.shape(qs_arr), dtype=np.dtype("float32")) + + # temporarily reset keepdims=False due to + # numpy bug https://github.com/numpy/numpy/issues/22544 + # may interfere with checking proper functionality + # + keepdims = False + num.quantile(arr, qs_arr, method=str_method, keepdims=keepdims, out=q_out) + + np.quantile( + arr, qs_arr, method=str_method, keepdims=keepdims, out=np_q_out + ) + + if not num.isscalar(q_out): + assert q_out.shape == np_q_out.shape + assert q_out.dtype == np_q_out.dtype + assert allclose(np_q_out, q_out, atol=eps) + else: + assert abs(q_out - np_q_out) < eps + + +if __name__ == "__main__": + import sys + + np.random.seed(12345) + + sys.exit(pytest.main(sys.argv)) From 821d1c2d8efc2e1be6488796954241788b5d135b Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Tue, 15 Aug 2023 09:27:58 -0700 Subject: [PATCH 23/33] ignore last-failed file (#1026) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 12b2a0e5c..84244ce82 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,4 @@ cunumeric/install_info.py /_skbuild /_cmake_test_compile .ipynb_checkpoints +.legate-test-last-failed From 0060dc7927226f2c7efd44e01e2786fbaacf17a3 Mon Sep 17 00:00:00 2001 From: Sandeep Datta <128171450+sandeepd-nv@users.noreply.github.com> Date: Tue, 29 Aug 2023 15:59:50 +0530 Subject: [PATCH 24/33] Updated versions.json SHA. (#1033) --- cmake/versions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/versions.json b/cmake/versions.json index d7e1d8133..d1ae134ab 100644 --- a/cmake/versions.json +++ b/cmake/versions.json @@ -5,7 +5,7 @@ "git_url" : "https://github.com/nv-legate/legate.core.git", "git_shallow": false, "always_download": false, - "git_tag" : "4b79075eb5d7035d501c334c87a87939af79abc2" + "git_tag" : "14cca04834095553e4d88f503dc4cd35e4072212" } } } From a7afb95a7214f3c14820d8443a7fc62dc7d218d8 Mon Sep 17 00:00:00 2001 From: AJ Schmidt Date: Tue, 29 Aug 2023 16:20:02 -0400 Subject: [PATCH 25/33] Use `copy-pr-bot` (#1035) This PR replaces the `copy_prs` functionality from the `ops-bot` with the new dedicated `copy-pr-bot` GitHub application. Thorough documentation for the new `copy-pr-bot` application can be viewed below. - https://docs.gha-runners.nvidia.com/apps/copy-pr-bot/ **Important**: `copy-pr-bot` enforces signed commits. If an organization member opens a PR that contains unsigned commits, it will be deemed untrusted and therefore require an `/ok to test` comment. See the GitHub docs [here](https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification) for information on how to set up commit signing. Any time a PR is deemed untrusted, it will receive a comment that looks like this: https://github.com/rapidsai/ci-imgs/pull/63#issuecomment-1688973208. Every subsequent commit on an untrusted PR will require an additional `/ok to test` comment. Any existing PRs that have unsigned commits after this change is merged will require an `/ok to test` comment for each subsequent commit _or_ the PR can be rebased to include signed commits as mentioned in the docs below: https://docs.gha-runners.nvidia.com/cpr/contributors. This information is all included on the documentation page linked above. _I've skipped CI on this PR since it's not a change that is tested._ [skip ci] --- .github/copy-pr-bot.yaml | 4 ++++ .github/ops-bot.yaml | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 .github/copy-pr-bot.yaml delete mode 100644 .github/ops-bot.yaml diff --git a/.github/copy-pr-bot.yaml b/.github/copy-pr-bot.yaml new file mode 100644 index 000000000..895ba83ee --- /dev/null +++ b/.github/copy-pr-bot.yaml @@ -0,0 +1,4 @@ +# Configuration file for `copy-pr-bot` GitHub App +# https://docs.gha-runners.nvidia.com/apps/copy-pr-bot/ + +enabled: true diff --git a/.github/ops-bot.yaml b/.github/ops-bot.yaml deleted file mode 100644 index 84bbe71f4..000000000 --- a/.github/ops-bot.yaml +++ /dev/null @@ -1,4 +0,0 @@ -# This file controls which features from the `ops-bot` repository below are enabled. -# - https://github.com/rapidsai/ops-bot - -copy_prs: true From 2c13393ba277b062dd13f99c2003b77f68db52c1 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Thu, 31 Aug 2023 13:42:00 -0700 Subject: [PATCH 26/33] Update pre-commit mypy (#1037) * update pre-commit mypy * do not warn unused ignores --- .pre-commit-config.yaml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1c5451198..1b637e8ae 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/mirrors-mypy - rev: 'v1.4.1' + rev: 'v1.5.1' hooks: - id: mypy language: system diff --git a/pyproject.toml b/pyproject.toml index 73ebc13c8..04c31fb7a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,7 @@ no_implicit_optional = true strict_optional = true warn_redundant_casts = true -warn_unused_ignores = true +warn_unused_ignores = false warn_no_return = true warn_return_any = false warn_unreachable = true From 8dddd18022055374f51e1728e81a49958908e04a Mon Sep 17 00:00:00 2001 From: XiaLuNV <110973296+XiaLuNV@users.noreply.github.com> Date: Fri, 1 Sep 2023 09:07:21 +0800 Subject: [PATCH 27/33] enhance test_matrix_power.py/cholesky.py/norm.py (#1038) * enhance test_matrix_power.py * enhance test_ch * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- cunumeric/linalg/exception.py | 3 +-- cunumeric/linalg/linalg.py | 4 +--- tests/integration/test_cholesky.py | 9 +++++++++ tests/integration/test_matrix_power.py | 21 +++++++++++++++------ tests/integration/test_norm.py | 11 +++++++++++ 5 files changed, 37 insertions(+), 11 deletions(-) diff --git a/cunumeric/linalg/exception.py b/cunumeric/linalg/exception.py index 2558fbf5c..9a086edb2 100644 --- a/cunumeric/linalg/exception.py +++ b/cunumeric/linalg/exception.py @@ -14,5 +14,4 @@ # -class LinAlgError(Exception): - pass +from numpy.linalg.linalg import LinAlgError # noqa: F401 diff --git a/cunumeric/linalg/linalg.py b/cunumeric/linalg/linalg.py index aaf381889..f3f7eb9fb 100644 --- a/cunumeric/linalg/linalg.py +++ b/cunumeric/linalg/linalg.py @@ -207,8 +207,6 @@ def matrix_power(a: ndarray, n: int) -> ndarray: a = empty_like(a) a[...] = eye(a.shape[-2], dtype=a.dtype) return a - elif n == 1: - return a.copy() # Invert if necessary if n < 0: @@ -219,7 +217,7 @@ def matrix_power(a: ndarray, n: int) -> ndarray: # Fast paths if n == 1: - return a + return a.copy() elif n == 2: return matmul(a, a) elif n == 3: diff --git a/tests/integration/test_cholesky.py b/tests/integration/test_cholesky.py index 6ed8e35f8..91edbaa7e 100644 --- a/tests/integration/test_cholesky.py +++ b/tests/integration/test_cholesky.py @@ -41,6 +41,15 @@ def test_array_negative_3dim(): num.linalg.cholesky(arr) +def test_array_negative(): + arr = num.random.randint(0, 9, size=(3, 2, 3)) + expected_exc = ValueError + with pytest.raises(expected_exc): + num.linalg.cholesky(arr) + with pytest.raises(expected_exc): + np.linalg.cholesky(arr) + + def test_diagonal(): a = num.eye(10) * 10.0 b = num.linalg.cholesky(a) diff --git a/tests/integration/test_matrix_power.py b/tests/integration/test_matrix_power.py index de4838798..192d01638 100644 --- a/tests/integration/test_matrix_power.py +++ b/tests/integration/test_matrix_power.py @@ -73,18 +73,24 @@ class TestMatrixPowerErrors: def test_matrix_ndim_smaller_than_two(self, ndim): shape = (3,) * ndim a_num = mk_0to1_array(num, shape) - msg = "Expected at least 2d array" - with pytest.raises(num.linalg.LinAlgError, match=msg): + a_np = mk_0to1_array(np, shape) + expected_exc = num.linalg.LinAlgError + with pytest.raises(expected_exc): num.linalg.matrix_power(a_num, 1) + with pytest.raises(expected_exc): + np.linalg.matrix_power(a_np, 1) @pytest.mark.parametrize( "shape", ((2, 1), (2, 2, 1)), ids=lambda shape: f"(shape={shape})" ) def test_matrix_not_square(self, shape): a_num = mk_0to1_array(num, shape) - msg = "Last 2 dimensions of the array must be square" - with pytest.raises(num.linalg.LinAlgError, match=msg): + a_np = mk_0to1_array(np, shape) + expected_exc = num.linalg.LinAlgError + with pytest.raises(expected_exc): num.linalg.matrix_power(a_num, 1) + with pytest.raises(expected_exc): + np.linalg.matrix_power(a_np, 1) @pytest.mark.parametrize( "n", (-1.0, 1.0, [1], None), ids=lambda n: f"(n={n})" @@ -92,9 +98,12 @@ def test_matrix_not_square(self, shape): def test_n_not_int(self, n): shape = (2, 2) a_num = mk_0to1_array(num, shape) - msg = "exponent must be an integer" - with pytest.raises(TypeError, match=msg): + a_np = mk_0to1_array(np, shape) + expected_exc = TypeError + with pytest.raises(expected_exc): num.linalg.matrix_power(a_num, n) + with pytest.raises(expected_exc): + np.linalg.matrix_power(a_np, n) def test_n_negative_int(self): shape = (2, 2) diff --git a/tests/integration/test_norm.py b/tests/integration/test_norm.py index f38ed8804..add546fd6 100644 --- a/tests/integration/test_norm.py +++ b/tests/integration/test_norm.py @@ -141,6 +141,17 @@ def test_axis_invalid_value(self, axis): with pytest.raises(expected_exc): num.linalg.norm(num_arrays[ndim], axis=axis) + def test_axis_out_of_bounds(self): + # raise ValueError("Improper number of dimensions to norm") + expected_exc = ValueError + ndim = 3 + + with pytest.raises(expected_exc): + np.linalg.norm(np_arrays[ndim], ord=1) + + with pytest.raises(expected_exc): + num.linalg.norm(num_arrays[ndim], ord=1) + @pytest.mark.parametrize( "ndim_axis", ((1, None), (2, 0)), From f0141055eb433e51896ef3794191af5410793b65 Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Wed, 6 Sep 2023 10:08:02 -0700 Subject: [PATCH 28/33] Don't cast histogram to int64 when density=True (#1042) --- cunumeric/module.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cunumeric/module.py b/cunumeric/module.py index d33e59900..ad93b675e 100644 --- a/cunumeric/module.py +++ b/cunumeric/module.py @@ -7910,7 +7910,7 @@ def histogram( -------- Multiple GPUs, Multiple CPUs """ - result_type = np.dtype(np.int64) + result_type: np.dtype[Any] = np.dtype(np.int64) if np.ndim(bins) > 1: raise ValueError("`bins` must be 1d, when an array") @@ -7996,6 +7996,7 @@ def histogram( # handle (density = True): # if density: + result_type = np.dtype(np.float64) hist /= sum(hist) hist /= bins_array[1:] - bins_array[:-1] From 3a6ca9ecc084b6ac53c92abcab30928f632ba8c4 Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Thu, 21 Sep 2023 11:52:07 -0700 Subject: [PATCH 29/33] Explicitly cast result of shift binary operators (#1046) --- src/cunumeric/binary/binary_op_util.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cunumeric/binary/binary_op_util.h b/src/cunumeric/binary/binary_op_util.h index a0c9540dc..604bc6dc4 100644 --- a/src/cunumeric/binary/binary_op_util.h +++ b/src/cunumeric/binary/binary_op_util.h @@ -558,7 +558,7 @@ struct BinaryOp { BinaryOp(const std::vector& args) {} - constexpr decltype(auto) operator()(const T& a, const T& b) const + constexpr T operator()(const T& a, const T& b) const { #if defined(__NVCC__) || defined(__CUDACC__) return a << b; @@ -867,7 +867,7 @@ struct BinaryOp { BinaryOp(const std::vector& args) {} - constexpr decltype(auto) operator()(const T& a, const T& b) const { return a >> b; } + constexpr T operator()(const T& a, const T& b) const { return a >> b; } }; template From 892799f56ed91b9dd0ab1e809d670e84cd20a39d Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Thu, 21 Sep 2023 14:28:27 -0700 Subject: [PATCH 30/33] Reject types that DeferredArray doesn't support (#1044) * Reject types that DeferredArray doesn't support IMHO there is little value in pretending that we support these types. We currently accept them by falling back to EagerArray, and that works for a little bit, but it is possible that later we will need to convert to a truly Legion-backed implementation, in which case the failure will pop up there. Instead we should just reject them from the start. * Fix unit test --- cunumeric/array.py | 27 ++++++++++--------------- cunumeric/eager.py | 4 ++-- cunumeric/module.py | 4 +++- cunumeric/runtime.py | 7 +------ cunumeric/utils.py | 13 +++++++++--- tests/integration/test_array.py | 23 +++++++++++++++++++-- tests/integration/test_astype.py | 8 -------- tests/integration/test_atleast_nd.py | 15 -------------- tests/integration/test_fill.py | 1 + tests/integration/test_fill_diagonal.py | 2 +- tests/integration/test_logic.py | 2 +- tests/integration/test_matmul.py | 8 +++----- tests/integration/test_repeat.py | 16 --------------- tests/integration/test_sort.py | 18 ----------------- tests/integration/test_split.py | 22 -------------------- tests/unit/cunumeric/test_utils.py | 5 +++-- 16 files changed, 57 insertions(+), 118 deletions(-) diff --git a/cunumeric/array.py b/cunumeric/array.py index b9db174e2..cce24e492 100644 --- a/cunumeric/array.py +++ b/cunumeric/array.py @@ -310,14 +310,9 @@ def __init__( if isinstance(inp, ndarray) ] core_dtype = to_core_dtype(dtype) - if core_dtype is not None: - self._thunk = runtime.create_empty_thunk( - sanitized_shape, core_dtype, inputs - ) - else: - self._thunk = runtime.create_eager_thunk( - sanitized_shape, dtype - ) + self._thunk = runtime.create_empty_thunk( + sanitized_shape, core_dtype, inputs + ) else: self._thunk = thunk self._legate_data: Union[dict[str, Any], None] = None @@ -1665,7 +1660,7 @@ def __rxor__(self, lhs: Any) -> ndarray: # __setattr__ @add_boilerplate("value") - def __setitem__(self, key: Any, value: Any) -> None: + def __setitem__(self, key: Any, value: ndarray) -> None: """__setitem__(key, value, /) Set ``self[key]=value``. @@ -2680,7 +2675,7 @@ def trace( return res @add_boilerplate("rhs") - def dot(self, rhs: Any, out: Union[ndarray, None] = None) -> ndarray: + def dot(self, rhs: ndarray, out: Union[ndarray, None] = None) -> ndarray: """a.dot(rhs, out=None) Return the dot product of this array with ``rhs``. @@ -4241,8 +4236,8 @@ def _perform_unary_reduction( def _perform_binary_reduction( cls, op: BinaryOpCode, - one: Any, - two: Any, + one: ndarray, + two: ndarray, dtype: np.dtype[Any], extra_args: Union[tuple[Any, ...], None] = None, ) -> ndarray: @@ -4258,14 +4253,14 @@ def _perform_binary_reduction( broadcast = None common_type = cls.find_common_type(one, two) - one = one._maybe_convert(common_type, args)._thunk - two = two._maybe_convert(common_type, args)._thunk + one_thunk = one._maybe_convert(common_type, args)._thunk + two_thunk = two._maybe_convert(common_type, args)._thunk dst = ndarray(shape=(), dtype=dtype, inputs=args) dst._thunk.binary_reduction( op, - one, - two, + one_thunk, + two_thunk, broadcast, extra_args, ) diff --git a/cunumeric/eager.py b/cunumeric/eager.py index bc83d49eb..680f1b5a1 100644 --- a/cunumeric/eager.py +++ b/cunumeric/eager.py @@ -42,7 +42,7 @@ ) from .deferred import DeferredArray from .thunk import NumPyThunk -from .utils import is_advanced_indexing +from .utils import is_advanced_indexing, is_supported_type if TYPE_CHECKING: import numpy.typing as npt @@ -305,7 +305,7 @@ def to_deferred_array(self) -> DeferredArray: # or whether we need to go up the tree to have it made if self.deferred is None: if self.parent is None: - assert self.runtime.is_supported_type(self.array.dtype) + assert is_supported_type(self.array.dtype) # We are at the root of the tree so we need to # actually make a DeferredArray to use if self.array.size == 1: diff --git a/cunumeric/module.py b/cunumeric/module.py index ad93b675e..c676dc02c 100644 --- a/cunumeric/module.py +++ b/cunumeric/module.py @@ -7136,7 +7136,9 @@ def bincount( # Handle the special case of 0-D array if weights is None: out = zeros((minlength,), dtype=np.dtype(np.int64)) - out[x[0]] = 1 + # TODO: Remove this "type: ignore" once @add_boilerplate can + # propagate "ndarray -> ndarray | npt.ArrayLike" in wrapped sigs + out[x[0]] = 1 # type: ignore [assignment] else: out = zeros((minlength,), dtype=weights.dtype) index = x[0] diff --git a/cunumeric/runtime.py b/cunumeric/runtime.py index 689a38423..a07c0847b 100644 --- a/cunumeric/runtime.py +++ b/cunumeric/runtime.py @@ -177,7 +177,6 @@ def create_wrapped_scalar( future = self.create_scalar(array, shape) assert all(extent == 1 for extent in shape) core_dtype = to_core_dtype(dtype) - assert core_dtype is not None store = self.legate_context.create_store( core_dtype, shape=shape, @@ -260,9 +259,6 @@ def get_next_random_epoch(self) -> int: self.current_random_epoch += 1 return result - def is_supported_type(self, dtype: Union[str, np.dtype[Any]]) -> bool: - return to_core_dtype(dtype) is not None - def get_numpy_thunk( self, obj: Union[ndarray, npt.NDArray[Any]], @@ -416,7 +412,7 @@ def find_or_create_array_thunk( # Check to see if it is a type that we support for doing deferred # execution and big enough to be worth off-loading onto Legion dtype = to_core_dtype(array.dtype) - if dtype is not None and ( + if ( defer or not self.is_eager_shape(array.shape) or self.has_external_attachment(array) @@ -446,7 +442,6 @@ def find_or_create_array_thunk( numpy_array=array if share else None, ) - assert not defer # Make this into an eager evaluated thunk return EagerArray(self, array) diff --git a/cunumeric/utils.py b/cunumeric/utils.py index 0586bb8f3..55a9b8c1e 100644 --- a/cunumeric/utils.py +++ b/cunumeric/utils.py @@ -18,7 +18,7 @@ from functools import reduce from string import ascii_lowercase, ascii_uppercase from types import FrameType -from typing import Any, Callable, List, Optional, Sequence, Tuple, Union +from typing import Any, Callable, List, Sequence, Tuple, Union import legate.core.types as ty import numpy as np @@ -43,8 +43,15 @@ } -def to_core_dtype(dtype: Union[str, np.dtype[Any]]) -> Optional[ty.Dtype]: - return SUPPORTED_DTYPES.get(np.dtype(dtype)) +def is_supported_type(dtype: Union[str, np.dtype[Any]]) -> bool: + return np.dtype(dtype) in SUPPORTED_DTYPES + + +def to_core_dtype(dtype: Union[str, np.dtype[Any]]) -> ty.Dtype: + core_dtype = SUPPORTED_DTYPES.get(np.dtype(dtype)) + if core_dtype is None: + raise TypeError(f"cuNumeric does not support dtype={dtype}") + return core_dtype def is_advanced_indexing(key: Any) -> bool: diff --git a/tests/integration/test_array.py b/tests/integration/test_array.py index 7adb8b093..6e03e98df 100755 --- a/tests/integration/test_array.py +++ b/tests/integration/test_array.py @@ -36,6 +36,13 @@ ), ) +UNSUPPORTED_OBJECTS = ( + None, + "somestr", + ["one", "two"], + [("name", "S10"), ("height", float), ("age", int)], +) + def strict_type_equal(a, b): return np.array_equal(a, b) and a.dtype == b.dtype @@ -43,7 +50,7 @@ def strict_type_equal(a, b): @pytest.mark.parametrize( "obj", - (None,) + SCALARS + ARRAYS, + SCALARS + ARRAYS, ids=lambda obj: f"(object={obj})", ) def test_array_basic(obj): @@ -52,6 +59,12 @@ def test_array_basic(obj): assert strict_type_equal(res_np, res_num) +@pytest.mark.parametrize("obj", UNSUPPORTED_OBJECTS) +def test_array_unsupported(obj): + with pytest.raises(TypeError, match="cuNumeric does not support dtype"): + num.array(obj) + + def test_array_ndarray(): obj = [[1, 2], [3, 4]] res_np = np.array(np.array(obj)) @@ -129,7 +142,7 @@ def test_invalid_dtype(self, obj, dtype): @pytest.mark.parametrize( "obj", - (None,) + SCALARS + ARRAYS, + SCALARS + ARRAYS, ids=lambda obj: f"(object={obj})", ) def test_asarray_basic(obj): @@ -138,6 +151,12 @@ def test_asarray_basic(obj): assert strict_type_equal(res_np, res_num) +@pytest.mark.parametrize("obj", UNSUPPORTED_OBJECTS) +def test_asarray_unsupported(obj): + with pytest.raises(TypeError, match="cuNumeric does not support dtype"): + num.array(obj) + + def test_asarray_ndarray(): obj = [[1, 2], [3, 4]] res_np = np.asarray(np.array(obj)) diff --git a/tests/integration/test_astype.py b/tests/integration/test_astype.py index 5a54a7789..725bab21b 100644 --- a/tests/integration/test_astype.py +++ b/tests/integration/test_astype.py @@ -46,14 +46,6 @@ def to_dtype(s): return str(np.dtype(s)) -def test_none(): - arr = None - in_np = num.array(arr) - msg = r"NoneType" - with pytest.raises(TypeError, match=msg): - in_np.astype("b") - - @pytest.mark.parametrize("src_dtype", ALL_TYPES, ids=to_dtype) def test_empty(src_dtype): arr = [] diff --git a/tests/integration/test_atleast_nd.py b/tests/integration/test_atleast_nd.py index 3946cb92f..da67e2de9 100644 --- a/tests/integration/test_atleast_nd.py +++ b/tests/integration/test_atleast_nd.py @@ -43,11 +43,6 @@ def test_atleast_1d_scalar(): assert np.array_equal(np.atleast_1d(a), num.atleast_1d(a)) -def test_atleast_1d_none(): - a = None - assert np.array_equal(np.atleast_1d(a), num.atleast_1d(a)) - - @pytest.mark.parametrize("size", SIZE_CASES, ids=str) def test_atleast_2d(size): a = [np.arange(np.prod(size)).reshape(size)] @@ -60,11 +55,6 @@ def test_atleast_2d_scalar(): assert np.array_equal(np.atleast_2d(a), num.atleast_2d(a)) -def test_atleast_2d_none(): - a = None - assert np.array_equal(np.atleast_2d(a), num.atleast_2d(a)) - - @pytest.mark.parametrize("size", SIZE_CASES, ids=str) def test_atleast_3d(size): a = [np.arange(np.prod(size)).reshape(size)] @@ -77,11 +67,6 @@ def test_atleast_3d_scalar(): assert np.array_equal(np.atleast_2d(a), num.atleast_2d(a)) -def test_atleast_3d_none(): - a = None - assert np.array_equal(np.atleast_2d(a), num.atleast_2d(a)) - - # test to run atleast_nd w/ list of arrays @pytest.mark.parametrize("dim", range(1, 4)) def test_atleast_nd(dim): diff --git a/tests/integration/test_fill.py b/tests/integration/test_fill.py index d77ae8e07..134e209f2 100644 --- a/tests/integration/test_fill.py +++ b/tests/integration/test_fill.py @@ -124,6 +124,7 @@ def test_fill_int_to_float() -> None: assert np.array_equal(a_np, a_num) +@pytest.mark.xfail def test_fill_string() -> None: a_list = ["hello", "hi"] a_np = np.array(a_list) diff --git a/tests/integration/test_fill_diagonal.py b/tests/integration/test_fill_diagonal.py index fc18783d3..f50309607 100644 --- a/tests/integration/test_fill_diagonal.py +++ b/tests/integration/test_fill_diagonal.py @@ -97,7 +97,7 @@ def test_dimension_mismatch(self): with pytest.raises(expected_exc): num.fill_diagonal(arr, 5) - @pytest.mark.parametrize("arr", (None, -3, [0], (5))) + @pytest.mark.parametrize("arr", (-3, [0], (5))) def test_arr_invalid(self, arr): arr_np = np.array(arr) arr_num = num.array(arr) diff --git a/tests/integration/test_logic.py b/tests/integration/test_logic.py index 4d15524b1..f969eb168 100644 --- a/tests/integration/test_logic.py +++ b/tests/integration/test_logic.py @@ -67,7 +67,7 @@ def test_out_invalid_shape(self, func_name): func_num(x, out=res_num) -SCALARS = (pytest.param("a string", marks=pytest.mark.xfail), None, False) +SCALARS = (pytest.param("a string", marks=pytest.mark.xfail), False) ARRAYS = ( [1.0, 2.0, 3.0], [1.0 + 0j, 2.0 + 0j, 3.0 + 0j], diff --git a/tests/integration/test_matmul.py b/tests/integration/test_matmul.py index 7a0759e0d..66f6ad89a 100644 --- a/tests/integration/test_matmul.py +++ b/tests/integration/test_matmul.py @@ -149,8 +149,7 @@ def test_out_invalid_shape_DIVERGENCE(self): @pytest.mark.parametrize( ("dtype", "out_dtype", "casting"), - ((None, np.int64, "same_kind"), (float, str, "safe")), - ids=("direct", "intermediate"), + ((None, np.int64, "same_kind"),), ) def test_out_invalid_dtype(self, dtype, out_dtype, casting): expected_exc = TypeError @@ -187,9 +186,8 @@ def test_invalid_casting_dtype(self, casting_dtype): with pytest.raises(expected_exc): num.matmul(A_num, B_num, casting=casting, dtype=dtype) - @pytest.mark.parametrize( - "dtype", (str, pytest.param(float, marks=pytest.mark.xfail)), ids=str - ) + @pytest.mark.xfail + @pytest.mark.parametrize("dtype", (float,), ids=str) def test_invalid_casting(self, dtype): expected_exc = ValueError casting = "unknown" diff --git a/tests/integration/test_repeat.py b/tests/integration/test_repeat.py index 1ea9eadef..3023f97c8 100644 --- a/tests/integration/test_repeat.py +++ b/tests/integration/test_repeat.py @@ -31,22 +31,6 @@ def test_repeats_none(array): num.repeat(array, None) -@pytest.mark.parametrize("repeats", (-3, [], [-3], [2, 3])) -def test_array_none_invalid(repeats): - expected_exc = ValueError - with pytest.raises(expected_exc): - np.repeat(None, repeats) - with pytest.raises(expected_exc): - num.repeat(None, repeats) - - -@pytest.mark.parametrize("repeats", (3, [0], [3], 4.7, [4.7])) -def test_array_none_valid(repeats): - res_num = num.repeat(None, repeats) - res_np = np.repeat(None, repeats) - assert np.array_equal(res_np, res_num) - - @pytest.mark.parametrize("repeats", (-3, 0, 3, 4.7, [], [-3], [0], [3], [4.7])) def test_array_empty_repeats_valid(repeats): res_np = np.repeat([], repeats) diff --git a/tests/integration/test_sort.py b/tests/integration/test_sort.py index 81e06d86a..1fdfc2f13 100644 --- a/tests/integration/test_sort.py +++ b/tests/integration/test_sort.py @@ -64,24 +64,6 @@ def test_arr_empty(self, arr): res_num = num.sort(arr) assert np.array_equal(res_num, res_np) - def test_structured_array_order(self): - dtype = [("name", "S10"), ("height", float), ("age", int)] - values = [ - ("Arthur", 1.8, 41), - ("Lancelot", 1.9, 38), - ("Galahad", 1.7, 38), - ] - a_np = np.array(values, dtype=dtype) - a_num = num.array(values, dtype=dtype) - - res_np = np.sort(a_np, order="height") - res_num = num.sort(a_num, order="height") - assert np.array_equal(res_np, res_num) - - res_np = np.sort(a_np, order=["age", "height"]) - res_num = num.sort(a_num, order=["age", "height"]) - assert np.array_equal(res_np, res_num) - def test_axis_out_bound(self): arr = [-1, 0, 1, 2, 10] with pytest.raises(ValueError): diff --git a/tests/integration/test_split.py b/tests/integration/test_split.py index d943f77b7..7646d478b 100644 --- a/tests/integration/test_split.py +++ b/tests/integration/test_split.py @@ -87,18 +87,6 @@ class TestSplitErrors: this class is to test negative cases """ - @pytest.mark.xfail - def test_array_none(self): - expected_exc = AttributeError - with pytest.raises(expected_exc): - np.split(None, 1) - # Numpy raises - # AttributeError: 'NoneType' object has no attribute 'shape' - with pytest.raises(expected_exc): - num.split(None, 1) - # cuNumeric raises - # ValueError: array(()) has less dimensions than axis(0) - @pytest.mark.parametrize("indices", (-2, 0, "hi", 1.0, None)) def test_indices_negative(self, indices): ary = num.arange(10) @@ -135,16 +123,6 @@ def test_axis_bigger(self): with pytest.raises(expected_exc): np.split(ary, 5, axis=axis) - @pytest.mark.parametrize("func_name", ARG_FUNCS) - def test_array_none_different_split(self, func_name): - expected_exc = ValueError - func_num = getattr(num, func_name) - func_np = getattr(np, func_name) - with pytest.raises(expected_exc): - func_np(None, 1) - with pytest.raises(expected_exc): - func_num(None, 1) - @pytest.mark.parametrize("indices", (-2, 0, "hi", 1.0, None)) @pytest.mark.parametrize("func_name", ARG_FUNCS) def test_indices_negative_different_split(self, func_name, indices): diff --git a/tests/unit/cunumeric/test_utils.py b/tests/unit/cunumeric/test_utils.py index d934bca6a..3b3da8bc5 100644 --- a/tests/unit/cunumeric/test_utils.py +++ b/tests/unit/cunumeric/test_utils.py @@ -126,12 +126,13 @@ def test_type_bad(self, value) -> None: @pytest.mark.parametrize("value", EXPECTED_SUPPORTED_DTYPES) def test_supported(self, value) -> None: - assert m.to_core_dtype(value) is not None + m.to_core_dtype(value) # This is just a representative sample, not exhasutive @pytest.mark.parametrize("value", [np.float128, np.datetime64, [], {}]) def test_unsupported(self, value) -> None: - assert m.to_core_dtype(value) is None + with pytest.raises(TypeError): + m.to_core_dtype(value) @pytest.mark.parametrize( From b76a58a84a4aa5c1bd8a66ae3a705d78372a1ae2 Mon Sep 17 00:00:00 2001 From: Manolis Papadakis Date: Fri, 22 Sep 2023 09:32:36 -0700 Subject: [PATCH 31/33] Remove use of deprecated np.find_common_type (#1045) --- cunumeric/array.py | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/cunumeric/array.py b/cunumeric/array.py index cce24e492..9a172305c 100644 --- a/cunumeric/array.py +++ b/cunumeric/array.py @@ -3983,33 +3983,28 @@ def _get_where_thunk( return where._thunk @staticmethod - def find_common_type(*args: Any) -> np.dtype[Any]: - """Determine common type following standard coercion rules. + def find_common_type(*args: ndarray) -> np.dtype[Any]: + """Determine common type following NumPy's coercion rules. Parameters ---------- - \\*args : - A list of dtypes or dtype convertible objects representing arrays - or scalars. - + *args : ndarray + A list of ndarrays Returns ------- datatype : data-type - The common data type, which is the maximum of the array types, - ignoring any scalar types , unless the maximum scalar type is of a - different kind (`dtype.kind`). If the kind is not understood, then - None is returned. - + The type that results from applying the NumPy type promotion rules + to the arguments. """ array_types = list() - scalar_types = list() + scalars = list() for array in args: - if array.size == 1: - scalar_types.append(array.dtype) + if array.ndim == 0: + scalars.append(array.dtype.type(0)) else: array_types.append(array.dtype) - return np.find_common_type(array_types, scalar_types) # type: ignore + return np.result_type(*array_types, *scalars) def _maybe_convert(self, dtype: np.dtype[Any], hints: Any) -> ndarray: if self.dtype == dtype: From 0b1195873eeee7bfb22e73e2ee7f1842acc41b2d Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Fri, 22 Sep 2023 10:05:23 -0700 Subject: [PATCH 32/33] Add best practices info to sphinx docs (#1048) * Add best practices info to sphinx docs * Apply suggestions from code review Co-authored-by: Manolis Papadakis * Apply suggestions from code review --------- Co-authored-by: Manolis Papadakis --- docs/cunumeric/source/user/index.rst | 1 + docs/cunumeric/source/user/practices.rst | 411 +++++++++++++++++++++++ 2 files changed, 412 insertions(+) create mode 100644 docs/cunumeric/source/user/practices.rst diff --git a/docs/cunumeric/source/user/index.rst b/docs/cunumeric/source/user/index.rst index 3d7f18887..d64a74487 100644 --- a/docs/cunumeric/source/user/index.rst +++ b/docs/cunumeric/source/user/index.rst @@ -7,4 +7,5 @@ User guide installation usage configuration + practices notebooks diff --git a/docs/cunumeric/source/user/practices.rst b/docs/cunumeric/source/user/practices.rst new file mode 100644 index 000000000..7da9c3b47 --- /dev/null +++ b/docs/cunumeric/source/user/practices.rst @@ -0,0 +1,411 @@ +.. _practices: + +Best practices +============== + +General Recommendations +----------------------- + +Following the basics of numpy as documented +`here `_ is highly recommended. Here +we highlight some of the best practices for cuNumeric to avoid commonly +encountered problems related to performance. In general, array-based +computations are recommended. + +Availability of each API (e.g., single CPU or Multiple GPUs/Multiple CPUs, +etc.) is noted in the docstring of the API. This would be useful to know while +designing the application since it can impact the scalability. + +Guidelines on using cuNumeric APIs +---------------------------------- + +Array Creation +~~~~~~~~~~~~~~ + +Create a cuNumeric array from data structures native to Python like lists, +tuples, etc., and operate on the cuNumeric array, as shown in the example +below. Find more details on this here: + +.. https://numpy.org/doc/stable/user/basics.creation.html + +.. code-block:: python + + # Not recommended: Performing large-scale computation using lists + # and other native Python data structures + x = [1, 2, 3] + y = [] + for val in x: + y.append(val + 2) + + # Recommended: Create a cuNumeric array and use array-based operations + y = np.array(x) + y = x + 2 + + +In the example below, the function ``transform`` is defined to operate on +scalars. But it can also be used on an array to linearly transform its elements, +thus performing an array-based operation. + +.. code-block:: python + + import cunumeric as np + + def transform(input): + return (input + 3) * 4 + + x = np.linspace(start=0, stop=10, num=11) + + # Acceptable options + y = transform(x) + # or + y = (x + 3) * 4 + +Indexing +~~~~~~~~ + +Use array-based implementations as much as possible, and while doing so, ensure +that some of the best practices given below are followed. + +If a component of the array needs to be set/updated, use an array-based +implementation instead of an explicit loop-based implementation. + +.. code-block:: python + + # x and y are three-dimensional arrays + + # Not recommended: Naive element-wise implementation + for i in range(ny): + for j in range(nx): + x[0, j, i] = y[3, j, i] + + # Recommended: Array-based implementation + x[0] = y[3] + +The same recommendation applies when the value we are setting to is a scalar +or when values are set conditionally. We first form the condition array +corresponding to the conditional and then use that to update the array, +essentially breaking it down to three steps: + +* create the condition array +* update the array corresponding to the `if` statement +* update the array corresponding to the `else` statement while noting that + the condition is flipped for the `else` statement. + +.. code-block:: python + + # x and y are two-dimensional arrays, and we need to update x + # depending on whether y meets a condition or not. + + # Not recommended: Naive element-wise implementation + for i in range(ny): + for j in range(nx): + if (y[j, i] < tol): + x[j, i] = const + else + x[j, i] = 1.0 - const + + # Recommended: Array-based implementation + cond = y < tol + x[cond] = const + x[~cond] = 1.0 - const + +In the example below, using a boolean mask array will be faster than using +indices. For the curious reader, using indices with cuNumeric will require +additional communication that might be undesirable for performance. + +.. code-block:: python + + import cunumeric as np + + # Not recommended: don't use nonzero to get indices + indices = np.nonzero(h < 0) + x[indices] = y[indices] + + # Recommended: Use boolean mask to update the array + cond = h < 0 + x[cond] = y[cond] + + +When the array needs to be updated from another array based on a condition +that they both satisfy, use ``putmask`` for better performance. Unlike the +previous example, here ``x`` is set to twice the value of ``y`` when the +condition is met. + +.. code-block:: python + + import cunumeric as np + + # We need to update elements of x from y based on a condition + cond = y < tol + + # Acceptable + x[cond] = y[cond] * 2.0 + + # Recommended: use putmask to update elements based on a condition + np.putmask(x, cond, y * 2.0) + +Logic Functions +~~~~~~~~~~~~~~~ + +Setting elements of an array that satisfy multiple conditions to a scalar +should be done using logic functions instead of iterating through a loop. +Here is an example: + +.. code-block:: python + + # Not recommended: naive element-wise update to update x + for i in range(ny): + for j in range(nx): + if (first_cond and second_cond): + x[j, i] = const + + # Recommended: Use logical operations. + x[np.logical_and(first_cond, second_cond)] = const + + +Refer to the `documentation for other logical operations `_. + +Mathematical Functions +~~~~~~~~~~~~~~~~~~~~~~ + +When there are nested element-wise operations, it is recommended that they +are translated to array-based operations using equivalent cuNumeric APIs, if +possible. Here is an example: + +.. code-block:: python + + import cunumeric as np + + # Not recommended: Naive element-wise implementation + for i in range(ny): + for j in range(nx): + x[j, i] = max(max(y[j, i], z[j, i]), const) + + # Recommended: Use array-based implementation + x = np.maximum(np.maximum(y, z), const) + + +Array Manipulation Routines +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Reshape +....... + +It's important to note that in our implementation, ``reshape`` returns a copy +of the array rather than a view like numpy, so this deviation can cause +differences in results, as shown in the example below. This additional copy +can also make it run slower, so we recommend using it as sparingly as possible. + +.. code-block:: python + + import cunumeric as np + + x = np.ones((3,4)) + y = x.reshape((12,)) + + y[0] = 42 + + assert x[0,0] == 42 # succeeds in NumPy, fails in cuNumeric + +Stack +..... + +There is a performance penalty to stacking arrays using +`hstack `_ +or +`vstack `_ +because they incur additional copies of data in our implementation. + +I/O Routines +~~~~~~~~~~~~ + +As of 23.07, we recommend using `h5py `_ to perform I/O. + +Guidelines on designing cuNumeric applications +---------------------------------------------- + +Use Output argument +~~~~~~~~~~~~~~~~~~~ + +Whenever possible, use the ``out`` parameter in the APIs, to avoid allocating an +intermediate array in our implementation. + +.. code-block:: python + + import cunumeric as np + + # Acceptable + x = x + y + y = x - y + x = x * y + + # Recommended for better performance + np.add(x, y, out=x) + np.subtract(x, y, out=y) + np.multiply(x, y, out=x) + + +Vectorize +~~~~~~~~~ + +Functions with conditionals that operate on scalars might make array-based +operations less straightforward. The general recommendation in such cases is to +apply the three step process mentioned here where we evaluate the conditional +and then apply it for both the ``if`` and ``else`` statements. Here is an +example of what approaches might or might not work. The first and second +options have ``if`` and ``else`` clauses written out as separate array-based +operations while the third option (using the API ``where``) includes them both +in one API. + +.. code-block:: python + + # Works with scalars but not NumPy arrays + def bar(x): + if x < 0: + return x + 1 + else: + return x + 2 + + # Not Recommended for arrays + x = np.array(...) + y = bar(x) # doesn't work + + # Recommended (1): Use array-based operations + cond = x < 0 + x[cond] += 1 + x[~cond] += 2 + + # Recommended (2): Use array-based operations + cond = x < 0 + np.add(x, 1, where=cond, out=x) + np.add(x, 2, where=~cond, out=x) + + # Recommended (3): Use array-based operations + cond = x < 0 + x = np.where(cond, x + 1, x + 2) + + +Merge Tasks +~~~~~~~~~~~ + +It is recommended that tasks (e.g., a Python operation like ``z = x + y``, +will be a task) be large enough to execute for at least a millisecond to +mitigate the runtime overheads associated with launching a task. One way to +make the tasks execute for longer is to merge them when possible. This is +especially useful for tasks that are really small, in the order of a few +hundred microseconds or less. Here is an example: + +.. code-block:: python + + # x is a 3D array of shape (4, _, _) where only the first three + # components need to be updated. cond is a 2D bool mask derived from h + cond = h < 0.0 # h is a two-dimensional array + + # Updating arrays like this is acceptable + x[0, cond] = const + x[1, cond] = const + x[2, cond] = const + + # Making them into one is recommended + x[0:3, cond] = const + + +Avoid blocking operations +~~~~~~~~~~~~~~~~~~~~~~~~~ + +While this might require more invasive application-level changes, it is often +recommended that any blocking operation in an iterative loop is delayed as much +as possible. Blocking can occur when there is data-dependency between execution +of tasks. In the example below, the runtime will be blocked until the result +from ``norm < tolerance`` is available since ``norm`` needs to be fetched from +the processor it is running on to evaluate the conditional. + +The current recommended best practice is to design applications such that these +blocking operations are done as sparingly as possible, as permitted by the +computations performed inside the iterative loop. This might manifest in +different ways in applications, so only one illustrative example is provided +here. + +.. code-block:: python + + import cunumeric as np + + # compute() does some computations and returns a multi-dimensional + # cuNumeric array. The application stops after the iterative computation + # is converged + + # Acceptable: Performing convergence checks every iteration + for i in range(niterations): + x_current = compute() + if i > 0: + norm = np.linalg.norm(x_current - x_prev) + if norm < tolerance: + break + x_prev = x_current.copy() + + # Recommended: Reduce the frequency of convergence checks + every_niter = 5 + for i in range(niterations): + x_current = compute() + if i > 0 and i%every_niter == 0: + norm = np.linalg.norm(x_current - x_prev) + if norm < tolerance: + break + + # This could potentially be updated one iteration before the + # convergence check, but that's not done here + x_prev = x_current.copy() + +Measurement +~~~~~~~~~~~ + +Use legate’s timing tool to measure elapsed time, rather than standard Python +timers. cuNumeric executes work asynchronously when possible, and a standard +Python timer will only measure the time taken to launch the work, not the time +spent in actual computation. Make sure warm-up iterations, initialization, I/O, +and other one-time computations are excluded while timing iterative +computations. + +Here is an example of how to measure elapsed time in milliseconds: + +.. code-block:: python + + import cunumeric as np + from legate.timing import time + + init() # Initialization step + + # Do few warm-up iterations + for i in range(n_warmup_iters): + compute() + + start = time() + for i in range(niters): + compute() + end = time() + + elapsed_millisecs = (end - start)/1000.0 + + dump_data() # I/O + + +Guidelines for performance benchmarks +------------------------------------- + +Manual partitioning of data for use with message-passing from Python (say, +using mpi4py package) is discouraged. Measure elapsed time using Legate's +timing tool (as given in the example above) while making sure to skip +initialization steps, warm-up iterations, I/O operations etc., while timing +the application. + +Ensure that the problem size is large enough to offset runtime overheads +associated with tasks. A rule of thumb is that the problem size is large +enough for a task granularity of about 1 millisecond (as of release 23.07). + +For arrays that are small, or for arrays that operate on a subset of a larger +array, it is recommended that they be merged with similar operations when +possible. For example, in some applications using structured meshes, boundary +conditions are set on a subset of data (at the boundaries only) which typically +tends to be a sequence of very small operations. When possible, boundary +conditions for different variables and different boundaries should be combined. +In general, merging small operations might yield better results. From 443f751194559729a01dc1ce5dd14407b0f9bbbf Mon Sep 17 00:00:00 2001 From: Irina Demeshko Date: Mon, 25 Sep 2023 11:30:06 -0700 Subject: [PATCH 33/33] adding 23.09 version to docs (#1049) --- docs/cunumeric/source/versions.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/cunumeric/source/versions.rst b/docs/cunumeric/source/versions.rst index 3df009689..4a21cc9ef 100644 --- a/docs/cunumeric/source/versions.rst +++ b/docs/cunumeric/source/versions.rst @@ -10,3 +10,4 @@ Versions 23.01 23.03 23.07 + 23.09