Skip to content

Commit

Permalink
Factor out common test code
Browse files Browse the repository at this point in the history
  • Loading branch information
WardBrian committed Oct 6, 2023
1 parent fd1f729 commit b66a421
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 148 deletions.
41 changes: 8 additions & 33 deletions tests/test_1d/test_forward_1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,17 +105,16 @@ def test_1d_t2_forward_CPU(targets: torch.Tensor):
)


@pytest.mark.parametrize("N", Ns)
def test_t1_forward_CPU(N: int) -> None:
def check_t1_forward(N: int, device: str) -> None:
"""
Tests against implementations of the FFT by setting up a uniform grid
over which to call FINUFFT through the API.
"""
g = np.mgrid[:N] * 2 * np.pi / N
g.shape = 1, -1
points = torch.from_numpy(g.reshape(1, -1))
points = torch.from_numpy(g.reshape(1, -1)).to(device)

values = torch.randn(*points[0].shape, dtype=torch.complex128)
values = torch.randn(*points[0].shape, dtype=torch.complex128).to(device)

print("N is " + str(N))
print("shape of points is " + str(points.shape))
Expand All @@ -140,37 +139,13 @@ def test_t1_forward_CPU(N: int) -> None:


@pytest.mark.parametrize("N", Ns)
def test_t1_forward_cuda(N: int) -> None:
"""
Tests against implementations of the FFT by setting up a uniform grid
over which to call FINUFFT through the API.
"""
g = np.mgrid[:N] * 2 * np.pi / N
g.shape = 1, -1
points = torch.from_numpy(g.reshape(1, -1)).to("cuda")

values = torch.randn(*points[0].shape, dtype=torch.complex128).to("cuda")

print("N is " + str(N))
print("shape of points is " + str(points.shape))
print("shape of values is " + str(values.shape))

finufft_out = pytorch_finufft.functional.finufft_type1.apply(
points,
values,
(N,),
)

against_torch = torch.fft.fft(values.reshape(g[0].shape))
def test_t1_forward_CPU(N: int) -> None:
check_t1_forward(N, "cpu")

abs_errors = torch.abs(finufft_out - against_torch)
l_inf_error = abs_errors.max()
l_2_error = torch.sqrt(torch.sum(abs_errors**2))
l_1_error = torch.sum(abs_errors)

assert l_inf_error < 4.5e-5 * N
assert l_2_error < 1e-5 * N**2
assert l_1_error < 1e-5 * N**3
@pytest.mark.parametrize("N", Ns)
def test_t1_forward_cuda(N: int) -> None:
check_t1_forward(N, "cuda")


# @pytest.mark.parametrize("values", cases)
Expand Down
74 changes: 23 additions & 51 deletions tests/test_2d/test_backward_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,18 +100,19 @@ def test_t1_backward_CPU_values(
assert gradcheck(apply_finufft2d1(modifier, fftshift, isign), inputs)


@pytest.mark.parametrize("N", Ns)
@pytest.mark.parametrize("modifier", length_modifiers)
@pytest.mark.parametrize("fftshift", [False, True])
@pytest.mark.parametrize("isign", [-1, 1])
def test_t1_consolidated_backward_CPU_values(
N: int, modifier: int, fftshift: bool, isign: int
def check_t1_backward(
N: int,
modifier: int,
fftshift: bool,
isign: int,
device: str,
points_or_values: bool,
) -> None:
points = torch.rand((2, N), dtype=torch.float64) * 2 * np.pi
values = torch.randn(N, dtype=torch.complex128)
points = torch.rand((2, N), dtype=torch.float64).to(device) * 2 * np.pi
values = torch.randn(N, dtype=torch.complex128).to(device)

points.requires_grad = False
values.requires_grad = True
points.requires_grad = points_or_values
values.requires_grad = not points_or_values

inputs = (points, values)

Expand All @@ -120,7 +121,7 @@ def func(points, values):
points, values, (N, N + modifier), None, fftshift, dict(isign=isign)
)

assert gradcheck(func, inputs)
assert gradcheck(func, inputs, atol=1e-5 * N)


@pytest.mark.parametrize("N", Ns)
Expand All @@ -130,20 +131,17 @@ def func(points, values):
def test_t1_consolidated_backward_CPU_points(
N: int, modifier: int, fftshift: bool, isign: int
) -> None:
points = torch.rand((2, N), dtype=torch.float64) * 2 * np.pi
values = torch.randn(N, dtype=torch.complex128)

points.requires_grad = True
values.requires_grad = False
check_t1_backward(N, modifier, fftshift, isign, "cpu", True)

inputs = (points, values)

def func(points, values):
return pytorch_finufft.functional.finufft_type1.apply(
points, values, (N, N + modifier), None, fftshift, dict(isign=isign)
)

assert gradcheck(func, inputs, atol=1e-5 * N)
@pytest.mark.parametrize("N", Ns)
@pytest.mark.parametrize("modifier", length_modifiers)
@pytest.mark.parametrize("fftshift", [False, True])
@pytest.mark.parametrize("isign", [-1, 1])
def test_t1_consolidated_backward_CPU_values(
N: int, modifier: int, fftshift: bool, isign: int
) -> None:
check_t1_backward(N, modifier, fftshift, isign, "cpu", False)


@pytest.mark.parametrize("N", Ns)
Expand All @@ -153,20 +151,7 @@ def func(points, values):
def test_t1_consolidated_backward_cuda_values(
N: int, modifier: int, fftshift: bool, isign: int
) -> None:
points = torch.rand((2, N), dtype=torch.float64).to("cuda") * 2 * np.pi
values = torch.randn(N, dtype=torch.complex128).to("cuda")

points.requires_grad = False
values.requires_grad = True

inputs = (points, values)

def func(points, values):
return pytorch_finufft.functional.finufft_type1.apply(
points, values, (N, N + modifier), None, fftshift, dict(isign=isign)
)

assert gradcheck(func, inputs)
check_t1_backward(N, modifier, fftshift, isign, "cuda", False)


@pytest.mark.parametrize("N", Ns)
Expand All @@ -176,20 +161,7 @@ def func(points, values):
def test_t1_consolidated_backward_cuda_points(
N: int, modifier: int, fftshift: bool, isign: int
) -> None:
points = torch.rand((2, N), dtype=torch.float64).to("cuda") * 2 * np.pi
values = torch.randn(N, dtype=torch.complex128).to("cuda")

points.requires_grad = True
values.requires_grad = False

inputs = (points, values)

def func(points, values):
return pytorch_finufft.functional.finufft_type1.apply(
points, values, (N, N + modifier), None, fftshift, dict(isign=isign)
)

assert gradcheck(func, inputs, atol=1e-5 * N)
check_t1_backward(N, modifier, fftshift, isign, "cuda", True)


@pytest.mark.parametrize("N", Ns)
Expand Down
40 changes: 8 additions & 32 deletions tests/test_2d/test_forward_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,16 +123,15 @@ def test_2d_t2_forward_CPU(N: int) -> None:
# pass


@pytest.mark.parametrize("N", Ns)
def test_t1_forward_CPU(N: int) -> None:
def check_t1_forward(N: int, device: str) -> None:
"""
Tests against implementations of the FFT by setting up a uniform grid
over which to call FINUFFT through the API.
"""
g = np.mgrid[:N, :N] * 2 * np.pi / N
points = torch.from_numpy(g.reshape(2, -1))
points = torch.from_numpy(g.reshape(2, -1)).to(device)

values = torch.randn(*points[0].shape, dtype=torch.complex128)
values = torch.randn(*points[0].shape, dtype=torch.complex128).to(device)

print("N is " + str(N))
print("shape of points is " + str(points.shape))
Expand All @@ -157,33 +156,10 @@ def test_t1_forward_CPU(N: int) -> None:


@pytest.mark.parametrize("N", Ns)
def test_t1_forward_cuda(N: int) -> None:
"""
Tests against implementations of the FFT by setting up a uniform grid
over which to call FINUFFT through the API.
"""
g = np.mgrid[:N, :N] * 2 * np.pi / N
points = torch.from_numpy(g.reshape(2, -1)).to("cuda")

values = torch.randn(*points[0].shape, dtype=torch.complex128).to("cuda")

print("N is " + str(N))
print("shape of points is " + str(points.shape))
print("shape of values is " + str(values.shape))

finufft_out = pytorch_finufft.functional.finufft_type1.apply(
points,
values,
(N, N),
)
def test_t1_forward_CPU(N: int) -> None:
check_t1_forward(N, "cpu")

against_torch = torch.fft.fft2(values.reshape(g[0].shape))

abs_errors = torch.abs(finufft_out - against_torch)
l_inf_error = abs_errors.max()
l_2_error = torch.sqrt(torch.sum(abs_errors**2))
l_1_error = torch.sum(abs_errors)

assert l_inf_error < 4.5e-5 * N
assert l_2_error < 1e-5 * N**2
assert l_1_error < 1e-5 * N**3
@pytest.mark.parametrize("N", Ns)
def test_t1_forward_cuda(N: int) -> None:
check_t1_forward(N, "cuda")
40 changes: 8 additions & 32 deletions tests/test_3d/test_forward_3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,16 +84,15 @@ def test_3d_t2_forward_CPU(N: int) -> None:
assert l_1_error < 1e-5 * N**4.5


@pytest.mark.parametrize("N", Ns)
def test_t1_forward_CPU(N: int) -> None:
def check_t1_forward(N: int, device: str) -> None:
"""
Tests against implementations of the FFT by setting up a uniform grid
over which to call FINUFFT through the API.
"""
g = np.mgrid[:N, :N, :N] * 2 * np.pi / N
points = torch.from_numpy(g.reshape(3, -1))
points = torch.from_numpy(g.reshape(3, -1)).to(device)

values = torch.randn(*points[0].shape, dtype=torch.complex128)
values = torch.randn(*points[0].shape, dtype=torch.complex128).to(device)

print("N is " + str(N))
print("shape of points is " + str(points.shape))
Expand All @@ -118,33 +117,10 @@ def test_t1_forward_CPU(N: int) -> None:


@pytest.mark.parametrize("N", Ns)
def test_t1_forward_cuda(N: int) -> None:
"""
Tests against implementations of the FFT by setting up a uniform grid
over which to call FINUFFT through the API.
"""
g = np.mgrid[:N, :N, :N] * 2 * np.pi / N
points = torch.from_numpy(g.reshape(3, -1)).to("cuda")

values = torch.randn(*points[0].shape, dtype=torch.complex128).to("cuda")

print("N is " + str(N))
print("shape of points is " + str(points.shape))
print("shape of values is " + str(values.shape))

finufft_out = pytorch_finufft.functional.finufft_type1.apply(
points,
values,
(N, N, N),
)
def test_t1_forward_CPU(N: int) -> None:
check_t1_forward(N, "cpu")

against_torch = torch.fft.fftn(values.reshape(g[0].shape))

abs_errors = torch.abs(finufft_out - against_torch)
l_inf_error = abs_errors.max()
l_2_error = torch.sqrt(torch.sum(abs_errors**2))
l_1_error = torch.sum(abs_errors)

assert l_inf_error < 1.5e-5 * N**1.5
assert l_2_error < 1e-5 * N**3
assert l_1_error < 1e-5 * N**4.5
@pytest.mark.parametrize("N", Ns)
def test_t1_forward_cuda(N: int) -> None:
check_t1_forward(N, "cuda")

0 comments on commit b66a421

Please sign in to comment.