forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
GridSamplerUtils.h
109 lines (94 loc) · 3.44 KB
/
GridSamplerUtils.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#pragma once
// See NOTE: [Tensor vs. TensorBase]
// https://github.com/pytorch/pytorch/pull/66979
#include <ATen/core/TensorBase.h>
#include <ATen/native/TensorProperties.h>
#include <ATen/native/CanUse32BitIndexMath.h>
namespace at { namespace native {
namespace detail {
enum class GridSamplerInterpolation {Bilinear, Nearest, Bicubic};
enum class GridSamplerPadding {Zeros, Border, Reflection};
} // namespace detail
using detail::GridSamplerInterpolation;
using detail::GridSamplerPadding;
namespace {
// See NOTE [ grid_sampler Native Functions ].
void check_grid_sampler_common(
const TensorBase& input,
const TensorBase& grid
) {
auto input_opt = input.options();
auto grid_opt = grid.options();
TORCH_CHECK(
input.defined(),
"grid_sampler(): expected input to not be undefined");
TORCH_CHECK(
grid.defined(),
"grid_sampler(): expected grid to not be undefined");
TORCH_CHECK(
input_opt.device() == grid_opt.device(),
"grid_sampler(): expected input and grid to be on same device, but input "
"is on ", input_opt.device(), " and grid is on ", grid_opt.device());
TORCH_CHECK(
input_opt.layout() == kStrided && grid_opt.layout() == kStrided,
"grid_sampler(): expected input and grid to have torch.strided layout, but "
"input has ", input_opt.layout(), " and grid has ", grid_opt.layout());
TORCH_CHECK(
input.size(0) == grid.size(0),
"grid_sampler(): expected grid and input to have same batch size, but got "
"input with sizes ", input.sizes(), " and grid with sizes ", grid.sizes());
TORCH_CHECK(
grid.size(-1) == input.dim() - 2,
"grid_sampler(): expected grid to have size ", input.dim() - 2, " in last "
"dimension, but got grid with sizes ", grid.sizes());
for (const auto i : c10::irange(2, input.dim())) {
TORCH_CHECK(input.size(i) > 0,
"grid_sampler(): expected input to have non-empty spatial dimensions, "
"but input has sizes ", input.sizes(), " with dimension ", i, " being "
"empty");
}
}
// See NOTE [ grid_sampler Native Functions ].
void check_grid_sampler_2d(
const TensorBase& input,
const TensorBase& grid
) {
TORCH_CHECK(
input.dim() == 4 && input.dim() == grid.dim(),
"grid_sampler(): expected 4D input and grid with same number of "
"dimensions, but got input with sizes ", input.sizes(),
" and grid with sizes ", grid.sizes());
}
// See NOTE [ grid_sampler Native Functions ].
void check_grid_sampler_3d(
const TensorBase& input,
const TensorBase& grid,
int64_t interpolation_mode
) {
TORCH_CHECK(
input.dim() == 5 && input.dim() == grid.dim(),
"grid_sampler(): expected 5D input and grid with same number of "
"dimensions, but got input with sizes ", input.sizes(),
" and grid with sizes ", grid.sizes());
TORCH_CHECK(
!(input.dim() == 5 &&
static_cast<GridSamplerInterpolation>(interpolation_mode) ==
GridSamplerInterpolation::Bicubic),
"grid_sampler(): bicubic interpolation only supports 4D input");
}
// See NOTE [ grid_sampler Native Functions ].
// cudnn does not support inputs larger than 1024.
bool cond_cudnn_grid_sampler(
const TensorBase& input,
const TensorBase& grid
) {
return (
at::native::cudnn_is_acceptable(input) &&
at::native::cudnn_is_acceptable(grid) &&
at::native::canUse32BitIndexMath(input) &&
at::native::canUse32BitIndexMath(grid) &&
input.dim() == 4 &&
input.sym_size(1) <= 1024);
}
} // anonymous namespace
}} // namespace at::native