Skip to content

Commit

Permalink
fix missing-prototypes warnings in torch_cpu (Part 5) (pytorch#101788)
Browse files Browse the repository at this point in the history
This PR fixes more missing-prototypes violations in the torch_cpu source following PRs pytorch#100053, pytorch#100147 and pytorch#100245

Pull Request resolved: pytorch#101788
Approved by: https://github.com/Skylion007
  • Loading branch information
cyyever authored and pytorchmergebot committed May 18, 2023
1 parent c9ba967 commit ac1cf00
Show file tree
Hide file tree
Showing 64 changed files with 182 additions and 155 deletions.
10 changes: 0 additions & 10 deletions aten/src/ATen/native/AdaptiveAveragePooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,16 +140,6 @@ namespace {
}
}

Tensor& adaptive_avg_pool2d_backward_out_cpu(
Tensor& grad_input,
const Tensor& grad_output,
const Tensor& input)
{
adaptive_avg_pool2d_backward_out_cpu_template(
grad_input, grad_output, input);
return grad_input;
}

Tensor adaptive_avg_pool2d_backward_cpu(
const Tensor& grad_output,
const Tensor& input)
Expand Down
18 changes: 9 additions & 9 deletions aten/src/ATen/native/BinaryOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1482,23 +1482,23 @@ Tensor& not_equal_(Tensor& self, const Scalar& other) { return self.ne_(other);
Tensor& logical_and_out(const Tensor& self, const Tensor& other, Tensor& result) { return comparison_op_out(result, self, other, logical_and_stub); }
Tensor logical_and(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_and_out)); }
Tensor& logical_and_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_and_out)); }
Tensor& logical_and_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_and_out)); }
Tensor logical_and(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_and_out)); }
Tensor& logical_and_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_and_out)); }
static Tensor& logical_and_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_and_out)); }
static Tensor logical_and(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_and_out)); }
static Tensor& logical_and_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_and_out)); }

Tensor& logical_or_out(const Tensor& self, const Tensor& other, Tensor& result) { return comparison_op_out(result, self, other, logical_or_stub); }
Tensor logical_or(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_or_out)); }
Tensor& logical_or_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_or_out)); }
Tensor& logical_or_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_or_out)); }
Tensor logical_or(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_or_out)); }
Tensor& logical_or_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_or_out)); }
static Tensor& logical_or_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_or_out)); }
static Tensor logical_or(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_or_out)); }
static Tensor& logical_or_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_or_out)); }

Tensor& logical_xor_out(const Tensor& self, const Tensor& other, Tensor& result) { return comparison_op_out(result, self, other, logical_xor_stub); }
Tensor logical_xor(const Tensor& self, const Tensor& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_xor_out)); }
Tensor& logical_xor_(Tensor& self, const Tensor& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_xor_out)); }
Tensor& logical_xor_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_xor_out)); }
Tensor logical_xor(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_xor_out)); }
Tensor& logical_xor_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_xor_out)); }
static Tensor& logical_xor_out(Tensor& result, const Tensor& self, const Scalar& other) { return comparison_op_out(result, self, other, static_cast<OutFunc>(at::logical_xor_out)); }
static Tensor logical_xor(const Tensor& self, const Scalar& other) { return comparison_op(self, other, static_cast<OutFunc>(at::logical_xor_out)); }
static Tensor& logical_xor_(Tensor& self, const Scalar& other) { return comparison_op_(self, other, static_cast<OutFunc>(at::logical_xor_out)); }

// binary max, alias for maximum
Tensor& max_out(const Tensor& self, const Tensor& other, Tensor& result) {
Expand Down
4 changes: 4 additions & 0 deletions aten/src/ATen/native/ComparisonUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@
#include <ATen/core/TensorBody.h>
#include <c10/util/OptionalArrayRef.h>

#ifdef AT_PER_OPERATOR_HEADERS
#include <ATen/ops/_assert_tensor_metadata_native.h>
#endif

namespace at {

class Tensor;
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/native/Copy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#else
#include <ATen/ops/_copy_from.h>
#include <ATen/ops/_propagate_xla_data.h>
#include <ATen/ops/_propagate_xla_data_native.h>
#include <ATen/ops/copy_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/expand_copy.h>
Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/native/Histogram.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ std::pair<double, double> histc_select_outer_bin_edges(const Tensor& input,

} // namespace

std::vector<Tensor> allocate_bin_edges_tensors(const Tensor& self) {
static std::vector<Tensor> allocate_bin_edges_tensors(const Tensor& self) {
TORCH_CHECK(self.dim() >= 2, "torch.histogramdd: input tensor should have at least 2 dimensions");
const int64_t N = self.size(-1);
std::vector<Tensor> bin_edges_out(N);
Expand All @@ -281,7 +281,7 @@ std::vector<Tensor> allocate_bin_edges_tensors(const Tensor& self) {

/* Versions of histogramdd in which bins is a Tensor[] defining the sequences of bin edges.
*/
Tensor& histogramdd_out(const Tensor& self, TensorList bins,
static Tensor& histogramdd_out(const Tensor& self, TensorList bins,
const c10::optional<Tensor>& weight, bool density,
Tensor& hist, TensorList& bin_edges) {
histogramdd_check_inputs(self, bins, weight);
Expand All @@ -308,7 +308,7 @@ Tensor _histogramdd(const Tensor& self, TensorList bins,
/* Versions of histogramdd in which bins is an int[]
* defining the number of bins in each dimension.
*/
std::vector<Tensor>& histogramdd_bin_edges_out(const Tensor& self, IntArrayRef bin_ct,
static std::vector<Tensor>& histogramdd_bin_edges_out(const Tensor& self, IntArrayRef bin_ct,
c10::optional<c10::ArrayRef<double>> range,
const c10::optional<Tensor>& weight, bool density,
std::vector<Tensor>& bin_edges_out) {
Expand Down Expand Up @@ -340,7 +340,7 @@ std::vector<Tensor> histogramdd_bin_edges(const Tensor& self, IntArrayRef bin_ct
return histogramdd_bin_edges_out(self, bin_ct, range, weight, density, bin_edges_out);
}

Tensor& histogramdd_out(const Tensor& self, IntArrayRef bin_ct,
static Tensor& histogramdd_out(const Tensor& self, IntArrayRef bin_ct,
c10::optional<c10::ArrayRef<double>> range,
const c10::optional<Tensor>& weight, bool density,
Tensor& hist, TensorList& bin_edges) {
Expand Down
16 changes: 8 additions & 8 deletions aten/src/ATen/native/LinearAlgebra.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ DEFINE_DISPATCH(addr_stub);

// As P is a permutation matrix
// det(P) = 1 if it's an even permutation and det(P) = -1 if it's an odd permutation
Tensor lu_det_P(const Tensor& pivots) {
static Tensor lu_det_P(const Tensor& pivots) {
return (at::arange(1, pivots.size(-1) + 1, pivots.options()) != pivots)
.sum(-1, /*keepdim=*/false, /*dtype=*/at::kLong)
.fmod_(2)
Expand Down Expand Up @@ -1594,7 +1594,7 @@ inline void baddbmm_cpu_kernel(const Tensor& result, const Tensor& self, const T
});
}

void baddbmm_with_gemm_(const Tensor &result, const Tensor &mat1, const Tensor &mat2, const Scalar &beta_, const Scalar &alpha_) {
static void baddbmm_with_gemm_(const Tensor &result, const Tensor &mat1, const Tensor &mat2, const Scalar &beta_, const Scalar &alpha_) {
TORCH_INTERNAL_ASSERT(result.is_contiguous());

const auto result_sizes = result.sizes();
Expand Down Expand Up @@ -1766,7 +1766,7 @@ static inline void bmm_out_or_baddbmm_(const Tensor& self_or_result_, const Tens
return;
}

void conjugate_mutable_input_if_needed(const Tensor& self, bool conjugate) {
static void conjugate_mutable_input_if_needed(const Tensor& self, bool conjugate) {
if (conjugate) {
self.conj_physical_();
}
Expand Down Expand Up @@ -1823,7 +1823,7 @@ Tensor& vdot_out(const Tensor& self, const Tensor& other, Tensor& result) {
return result.fill_(self.vdot(other));
}

bool should_fold(const Tensor& tensor1, const Tensor& tensor2) {
static bool should_fold(const Tensor& tensor1, const Tensor& tensor2) {
// We check that we can fold the larger tensor into a matrix and dispatch to mm or mv rather than
// to bmm. We want to make sure we can do so without incurring in any extra copy
const auto tensor1_larger = tensor1.dim() >= tensor2.dim();
Expand Down Expand Up @@ -2678,7 +2678,7 @@ TORCH_IMPL_FUNC(linalg_vector_norm_out)(const Tensor& self, const Scalar& scalar
norm_stub(iter.device_type(), iter, ord);
}

void _linalg_matrix_norm_checks(const Tensor& A, std::vector<int64_t>& dim, optional<ScalarType> opt_dtype, bool low_precision) {
static void _linalg_matrix_norm_checks(const Tensor& A, std::vector<int64_t>& dim, optional<ScalarType> opt_dtype, bool low_precision) {
// A
at::native::checkIsMatrix(A, "linalg.matrix_norm");
at::native::checkFloatingOrComplex(A, "linalg.matrix_norm", /*low_precision*/low_precision);
Expand Down Expand Up @@ -2950,7 +2950,7 @@ Tensor& nuclear_norm_out(const Tensor& self, IntArrayRef dim, bool keepdim, Tens


// This function helps to dispatch norm computations depending on 'ord' of variant type
Tensor _linalg_cond_helper(const Tensor& self, c10::variant<Scalar, c10::string_view> ord_variant) {
static Tensor _linalg_cond_helper(const Tensor& self, c10::variant<Scalar, c10::string_view> ord_variant) {
Tensor inverse, info;
std::tie(inverse, info) = at::linalg_inv_ex(self);
info.unsqueeze_(-1).unsqueeze_(-1);
Expand All @@ -2967,13 +2967,13 @@ Tensor _linalg_cond_helper(const Tensor& self, c10::variant<Scalar, c10::string_
}

// Return zero for each matrix in the batch
Tensor _linalg_cond_empty_matrix(const Tensor& self, c10::ScalarType dtype) {
static Tensor _linalg_cond_empty_matrix(const Tensor& self, c10::ScalarType dtype) {
auto result_shape = IntArrayRef(self.sizes().cbegin(), self.sizes().cend()-2);
TensorOptions options = self.options().dtype(toRealValueType(self.scalar_type()));
return at::zeros(result_shape, options);
}

void _linalg_cond_check_ord(c10::variant<Scalar, c10::string_view> ord_variant) {
static void _linalg_cond_check_ord(c10::variant<Scalar, c10::string_view> ord_variant) {
if (ord_variant.index() == 0) {
Scalar* ord = c10::get_if<Scalar>(&ord_variant);
double abs_ord = std::abs(ord->toDouble());
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/LossNLL.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -668,7 +668,7 @@ Tensor nll_loss_symint(const Tensor & self, const Tensor & target, const c10::op
}

// Duplicate of above code for non-symbolic ints. Kept for BC purposes and to minimize breakages.
Tensor nll_loss(const Tensor & self, const Tensor & target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index) {
static Tensor nll_loss(const Tensor & self, const Tensor & target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/LossNLL2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,7 @@ Tensor nll_loss2d_symint(const Tensor & self, const Tensor & target, const c10::
}

// Duplicate of above code for non-symbolic ints. Kept for BC purposes and to minimize breakages.
Tensor nll_loss2d(const Tensor & self, const Tensor & target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index) {
static Tensor nll_loss2d(const Tensor & self, const Tensor & target, const c10::optional<Tensor>& weight_opt, int64_t reduction, int64_t ignore_index) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/MetaTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ Tensor empty_meta_symint(
}

// Kept only for BC with XLA
Tensor empty_strided_meta(
static Tensor empty_strided_meta(
IntArrayRef size,
IntArrayRef stride,
c10::optional<ScalarType> dtype_opt,
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/NaiveConvolutionTranspose2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -799,7 +799,7 @@ TORCH_IMPL_FUNC(slow_conv_transpose2d_structured_cpu)
dilation);
}

std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose2d_backward_out_cpu(const Tensor& grad_output,
static std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose2d_backward_out_cpu(const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -872,7 +872,7 @@ Tensor slow_conv_transpose3d_cpu(
return output;
}

std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cpu(const Tensor& grad_output,
static std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cpu(const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
Expand Down
10 changes: 5 additions & 5 deletions aten/src/ATen/native/NamedTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -339,10 +339,10 @@ Tensor& gather_out(const Tensor& self, Dimname dim, const Tensor& index, bool sp
Tensor index_add(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const Scalar &alpha) {
reportNYIDimnameOverload("index_add");
}
Tensor& index_add_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const Scalar &alpha) {
static Tensor& index_add_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const Scalar &alpha) {
reportNYIDimnameOverload("index_add");
}
Tensor& index_add_out(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const Scalar& alpha, Tensor& result) {
static Tensor& index_add_out(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source, const Scalar& alpha, Tensor& result) {
reportNYIDimnameOverload("index_add");
}
Tensor index_fill(const Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) {
Expand Down Expand Up @@ -372,19 +372,19 @@ Tensor index_select(const Tensor& self, Dimname dim, const Tensor& index) {
Tensor scatter(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("scatter");
}
Tensor& scatter_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
static Tensor& scatter_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("scatter");
}
Tensor scatter(const Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) {
reportNYIDimnameOverload("scatter");
}
Tensor& scatter_(Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) {
static Tensor& scatter_(Tensor& self, Dimname dim, const Tensor& index, const Scalar& source) {
reportNYIDimnameOverload("scatter");
}
Tensor scatter_add(const Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("scatter_add");
}
Tensor& scatter_add_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
static Tensor& scatter_add_(Tensor& self, Dimname dim, const Tensor& index, const Tensor& source) {
reportNYIDimnameOverload("scatter_add");
}
std::tuple<Tensor&, Tensor&> sort_out(const Tensor& self, c10::optional<bool> stable, Dimname dim, bool keepdim, Tensor& values, Tensor& indices) {
Expand Down
3 changes: 3 additions & 0 deletions aten/src/ATen/native/Normalization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@
#include <ATen/ops/_batch_norm_impl_index.h>
#include <ATen/ops/_batch_norm_impl_index_backward_native.h>
#include <ATen/ops/_batch_norm_impl_index_native.h>
#include <ATen/ops/_native_batch_norm_legit_native.h>
#include <ATen/ops/_native_batch_norm_legit_no_training.h>
#include <ATen/ops/_native_batch_norm_legit_no_training_native.h>
#include <ATen/ops/alias.h>
#include <ATen/ops/batch_norm.h>
#include <ATen/ops/batch_norm_native.h>
Expand Down
Loading

0 comments on commit ac1cf00

Please sign in to comment.