From b66a421c3366c9e6e0e9fec048e18e25dd9571bb Mon Sep 17 00:00:00 2001 From: Brian Ward Date: Fri, 6 Oct 2023 12:49:12 -0400 Subject: [PATCH] Factor out common test code --- tests/test_1d/test_forward_1d.py | 41 ++++------------- tests/test_2d/test_backward_2d.py | 74 ++++++++++--------------------- tests/test_2d/test_forward_2d.py | 40 ++++------------- tests/test_3d/test_forward_3d.py | 40 ++++------------- 4 files changed, 47 insertions(+), 148 deletions(-) diff --git a/tests/test_1d/test_forward_1d.py b/tests/test_1d/test_forward_1d.py index 8dc6ff9..eec8a9b 100644 --- a/tests/test_1d/test_forward_1d.py +++ b/tests/test_1d/test_forward_1d.py @@ -105,17 +105,16 @@ def test_1d_t2_forward_CPU(targets: torch.Tensor): ) -@pytest.mark.parametrize("N", Ns) -def test_t1_forward_CPU(N: int) -> None: +def check_t1_forward(N: int, device: str) -> None: """ Tests against implementations of the FFT by setting up a uniform grid over which to call FINUFFT through the API. """ g = np.mgrid[:N] * 2 * np.pi / N g.shape = 1, -1 - points = torch.from_numpy(g.reshape(1, -1)) + points = torch.from_numpy(g.reshape(1, -1)).to(device) - values = torch.randn(*points[0].shape, dtype=torch.complex128) + values = torch.randn(*points[0].shape, dtype=torch.complex128).to(device) print("N is " + str(N)) print("shape of points is " + str(points.shape)) @@ -140,37 +139,13 @@ def test_t1_forward_CPU(N: int) -> None: @pytest.mark.parametrize("N", Ns) -def test_t1_forward_cuda(N: int) -> None: - """ - Tests against implementations of the FFT by setting up a uniform grid - over which to call FINUFFT through the API. - """ - g = np.mgrid[:N] * 2 * np.pi / N - g.shape = 1, -1 - points = torch.from_numpy(g.reshape(1, -1)).to("cuda") - - values = torch.randn(*points[0].shape, dtype=torch.complex128).to("cuda") - - print("N is " + str(N)) - print("shape of points is " + str(points.shape)) - print("shape of values is " + str(values.shape)) - - finufft_out = pytorch_finufft.functional.finufft_type1.apply( - points, - values, - (N,), - ) - - against_torch = torch.fft.fft(values.reshape(g[0].shape)) +def test_t1_forward_CPU(N: int) -> None: + check_t1_forward(N, "cpu") - abs_errors = torch.abs(finufft_out - against_torch) - l_inf_error = abs_errors.max() - l_2_error = torch.sqrt(torch.sum(abs_errors**2)) - l_1_error = torch.sum(abs_errors) - assert l_inf_error < 4.5e-5 * N - assert l_2_error < 1e-5 * N**2 - assert l_1_error < 1e-5 * N**3 +@pytest.mark.parametrize("N", Ns) +def test_t1_forward_cuda(N: int) -> None: + check_t1_forward(N, "cuda") # @pytest.mark.parametrize("values", cases) diff --git a/tests/test_2d/test_backward_2d.py b/tests/test_2d/test_backward_2d.py index 0cf7f71..751f364 100644 --- a/tests/test_2d/test_backward_2d.py +++ b/tests/test_2d/test_backward_2d.py @@ -100,18 +100,19 @@ def test_t1_backward_CPU_values( assert gradcheck(apply_finufft2d1(modifier, fftshift, isign), inputs) -@pytest.mark.parametrize("N", Ns) -@pytest.mark.parametrize("modifier", length_modifiers) -@pytest.mark.parametrize("fftshift", [False, True]) -@pytest.mark.parametrize("isign", [-1, 1]) -def test_t1_consolidated_backward_CPU_values( - N: int, modifier: int, fftshift: bool, isign: int +def check_t1_backward( + N: int, + modifier: int, + fftshift: bool, + isign: int, + device: str, + points_or_values: bool, ) -> None: - points = torch.rand((2, N), dtype=torch.float64) * 2 * np.pi - values = torch.randn(N, dtype=torch.complex128) + points = torch.rand((2, N), dtype=torch.float64).to(device) * 2 * np.pi + values = torch.randn(N, dtype=torch.complex128).to(device) - points.requires_grad = False - values.requires_grad = True + points.requires_grad = points_or_values + values.requires_grad = not points_or_values inputs = (points, values) @@ -120,7 +121,7 @@ def func(points, values): points, values, (N, N + modifier), None, fftshift, dict(isign=isign) ) - assert gradcheck(func, inputs) + assert gradcheck(func, inputs, atol=1e-5 * N) @pytest.mark.parametrize("N", Ns) @@ -130,20 +131,17 @@ def func(points, values): def test_t1_consolidated_backward_CPU_points( N: int, modifier: int, fftshift: bool, isign: int ) -> None: - points = torch.rand((2, N), dtype=torch.float64) * 2 * np.pi - values = torch.randn(N, dtype=torch.complex128) - - points.requires_grad = True - values.requires_grad = False + check_t1_backward(N, modifier, fftshift, isign, "cpu", True) - inputs = (points, values) - - def func(points, values): - return pytorch_finufft.functional.finufft_type1.apply( - points, values, (N, N + modifier), None, fftshift, dict(isign=isign) - ) - assert gradcheck(func, inputs, atol=1e-5 * N) +@pytest.mark.parametrize("N", Ns) +@pytest.mark.parametrize("modifier", length_modifiers) +@pytest.mark.parametrize("fftshift", [False, True]) +@pytest.mark.parametrize("isign", [-1, 1]) +def test_t1_consolidated_backward_CPU_values( + N: int, modifier: int, fftshift: bool, isign: int +) -> None: + check_t1_backward(N, modifier, fftshift, isign, "cpu", False) @pytest.mark.parametrize("N", Ns) @@ -153,20 +151,7 @@ def func(points, values): def test_t1_consolidated_backward_cuda_values( N: int, modifier: int, fftshift: bool, isign: int ) -> None: - points = torch.rand((2, N), dtype=torch.float64).to("cuda") * 2 * np.pi - values = torch.randn(N, dtype=torch.complex128).to("cuda") - - points.requires_grad = False - values.requires_grad = True - - inputs = (points, values) - - def func(points, values): - return pytorch_finufft.functional.finufft_type1.apply( - points, values, (N, N + modifier), None, fftshift, dict(isign=isign) - ) - - assert gradcheck(func, inputs) + check_t1_backward(N, modifier, fftshift, isign, "cuda", False) @pytest.mark.parametrize("N", Ns) @@ -176,20 +161,7 @@ def func(points, values): def test_t1_consolidated_backward_cuda_points( N: int, modifier: int, fftshift: bool, isign: int ) -> None: - points = torch.rand((2, N), dtype=torch.float64).to("cuda") * 2 * np.pi - values = torch.randn(N, dtype=torch.complex128).to("cuda") - - points.requires_grad = True - values.requires_grad = False - - inputs = (points, values) - - def func(points, values): - return pytorch_finufft.functional.finufft_type1.apply( - points, values, (N, N + modifier), None, fftshift, dict(isign=isign) - ) - - assert gradcheck(func, inputs, atol=1e-5 * N) + check_t1_backward(N, modifier, fftshift, isign, "cuda", True) @pytest.mark.parametrize("N", Ns) diff --git a/tests/test_2d/test_forward_2d.py b/tests/test_2d/test_forward_2d.py index 3008f8a..c24d4d0 100644 --- a/tests/test_2d/test_forward_2d.py +++ b/tests/test_2d/test_forward_2d.py @@ -123,16 +123,15 @@ def test_2d_t2_forward_CPU(N: int) -> None: # pass -@pytest.mark.parametrize("N", Ns) -def test_t1_forward_CPU(N: int) -> None: +def check_t1_forward(N: int, device: str) -> None: """ Tests against implementations of the FFT by setting up a uniform grid over which to call FINUFFT through the API. """ g = np.mgrid[:N, :N] * 2 * np.pi / N - points = torch.from_numpy(g.reshape(2, -1)) + points = torch.from_numpy(g.reshape(2, -1)).to(device) - values = torch.randn(*points[0].shape, dtype=torch.complex128) + values = torch.randn(*points[0].shape, dtype=torch.complex128).to(device) print("N is " + str(N)) print("shape of points is " + str(points.shape)) @@ -157,33 +156,10 @@ def test_t1_forward_CPU(N: int) -> None: @pytest.mark.parametrize("N", Ns) -def test_t1_forward_cuda(N: int) -> None: - """ - Tests against implementations of the FFT by setting up a uniform grid - over which to call FINUFFT through the API. - """ - g = np.mgrid[:N, :N] * 2 * np.pi / N - points = torch.from_numpy(g.reshape(2, -1)).to("cuda") - - values = torch.randn(*points[0].shape, dtype=torch.complex128).to("cuda") - - print("N is " + str(N)) - print("shape of points is " + str(points.shape)) - print("shape of values is " + str(values.shape)) - - finufft_out = pytorch_finufft.functional.finufft_type1.apply( - points, - values, - (N, N), - ) +def test_t1_forward_CPU(N: int) -> None: + check_t1_forward(N, "cpu") - against_torch = torch.fft.fft2(values.reshape(g[0].shape)) - abs_errors = torch.abs(finufft_out - against_torch) - l_inf_error = abs_errors.max() - l_2_error = torch.sqrt(torch.sum(abs_errors**2)) - l_1_error = torch.sum(abs_errors) - - assert l_inf_error < 4.5e-5 * N - assert l_2_error < 1e-5 * N**2 - assert l_1_error < 1e-5 * N**3 +@pytest.mark.parametrize("N", Ns) +def test_t1_forward_cuda(N: int) -> None: + check_t1_forward(N, "cuda") diff --git a/tests/test_3d/test_forward_3d.py b/tests/test_3d/test_forward_3d.py index dd7c2ed..bc148b1 100644 --- a/tests/test_3d/test_forward_3d.py +++ b/tests/test_3d/test_forward_3d.py @@ -84,16 +84,15 @@ def test_3d_t2_forward_CPU(N: int) -> None: assert l_1_error < 1e-5 * N**4.5 -@pytest.mark.parametrize("N", Ns) -def test_t1_forward_CPU(N: int) -> None: +def check_t1_forward(N: int, device: str) -> None: """ Tests against implementations of the FFT by setting up a uniform grid over which to call FINUFFT through the API. """ g = np.mgrid[:N, :N, :N] * 2 * np.pi / N - points = torch.from_numpy(g.reshape(3, -1)) + points = torch.from_numpy(g.reshape(3, -1)).to(device) - values = torch.randn(*points[0].shape, dtype=torch.complex128) + values = torch.randn(*points[0].shape, dtype=torch.complex128).to(device) print("N is " + str(N)) print("shape of points is " + str(points.shape)) @@ -118,33 +117,10 @@ def test_t1_forward_CPU(N: int) -> None: @pytest.mark.parametrize("N", Ns) -def test_t1_forward_cuda(N: int) -> None: - """ - Tests against implementations of the FFT by setting up a uniform grid - over which to call FINUFFT through the API. - """ - g = np.mgrid[:N, :N, :N] * 2 * np.pi / N - points = torch.from_numpy(g.reshape(3, -1)).to("cuda") - - values = torch.randn(*points[0].shape, dtype=torch.complex128).to("cuda") - - print("N is " + str(N)) - print("shape of points is " + str(points.shape)) - print("shape of values is " + str(values.shape)) - - finufft_out = pytorch_finufft.functional.finufft_type1.apply( - points, - values, - (N, N, N), - ) +def test_t1_forward_CPU(N: int) -> None: + check_t1_forward(N, "cpu") - against_torch = torch.fft.fftn(values.reshape(g[0].shape)) - abs_errors = torch.abs(finufft_out - against_torch) - l_inf_error = abs_errors.max() - l_2_error = torch.sqrt(torch.sum(abs_errors**2)) - l_1_error = torch.sum(abs_errors) - - assert l_inf_error < 1.5e-5 * N**1.5 - assert l_2_error < 1e-5 * N**3 - assert l_1_error < 1e-5 * N**4.5 +@pytest.mark.parametrize("N", Ns) +def test_t1_forward_cuda(N: int) -> None: + check_t1_forward(N, "cuda")