Skip to content

Commit

Permalink
Increase header coverage of clang-tidy (pytorch#110443)
Browse files Browse the repository at this point in the history
Pull Request resolved: pytorch#110443
Approved by: https://github.com/Skylion007
  • Loading branch information
cyyever authored and pytorchmergebot committed Oct 4, 2023
1 parent 0e55cc4 commit 5220d0d
Show file tree
Hide file tree
Showing 13 changed files with 68 additions and 48 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ modernize-*,
performance-*,
readability-container-size-empty,
'
HeaderFilterRegex: '^(c10/|torch/csrc/).*$'
HeaderFilterRegex: '^(aten/|c10/|torch/).*$'
AnalyzeTemporaryDtors: false
WarningsAsErrors: '*'
...
2 changes: 1 addition & 1 deletion aten/src/ATen/ExpandUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ inline bool are_expandable(IntArrayRef shape1, IntArrayRef shape2) {
size_t ndim2 = shape2.size();
size_t ndim = ndim1 < ndim2 ? ndim1 : ndim2;

for (int64_t i = ndim - 1; i >= 0; --i) {
for (int64_t i = static_cast<int64_t>(ndim) - 1; i >= 0; --i) {
if (shape1[--ndim1] == shape2[--ndim2] || shape1[ndim1] == 1 ||
shape2[ndim2] == 1) {
continue;
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/LegacyBatchedTensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,15 +113,15 @@ inline bool isBatchedTensor(const Tensor& tensor) {

// It is unsafe to call this on a Tensor that is not backed by a
// BatchedTensorImpl. Please use `maybeGetBatchedImpl` whenever possible.
inline BatchedTensorImpl* unsafeGetBatchedImpl(Tensor tensor) {
inline BatchedTensorImpl* unsafeGetBatchedImpl(const Tensor& tensor) {
return static_cast<BatchedTensorImpl*>(tensor.unsafeGetTensorImpl());
}

inline BatchedTensorImpl* maybeGetBatchedImpl(Tensor tensor) {
inline BatchedTensorImpl* maybeGetBatchedImpl(const Tensor& tensor) {
if (!isBatchedTensor(tensor)) {
return nullptr;
}
return unsafeGetBatchedImpl(std::move(tensor));
return unsafeGetBatchedImpl(tensor);
}

// Returns a bitset. If bit i is set, then that means dim i is a batchdim.
Expand Down
7 changes: 5 additions & 2 deletions aten/src/ATen/NestedTensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -95,11 +95,12 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
const auto buffer_size = get_buffer_size();
auto buffer_tensor_impl = c10::make_intrusive<TensorImpl>(
c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_);
buffer_tensor_impl->set_sizes_contiguous(c10::makeArrayRef(buffer_size));
buffer_tensor_impl->set_sizes_contiguous(
c10::makeArrayRef(static_cast<int64_t>(buffer_size)));
return Tensor(buffer_tensor_impl);
}

int64_t get_buffer_size() const {
size_t get_buffer_size() const {
return storage_.nbytes() / data_type_.itemsize();
}

Expand Down Expand Up @@ -146,6 +147,7 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
// to TensorImpl.
void refresh_dim();

// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const at::Tensor nested_sizes_, nested_strides_;
// The starting positions of the underlying tensors in contiguous buffer
// i.e. the buffer memory offsets to get the underlying tensors
Expand All @@ -159,6 +161,7 @@ struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
// Some strong enough constraints are:
// 1. every underlying tensor is contiguous in memory
// && nesting in ascending order
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const at::Tensor storage_offsets_;
// NOTE: -1 here means the size is missing
// Optional to allow it to be computed lazily from nested.
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/ParallelOpenMP.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#pragma once

#include <algorithm>
#include <atomic>
#include <cstddef>
#include <exception>
Expand Down
10 changes: 5 additions & 5 deletions aten/src/ATen/SparseCsrTensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
auto impl = c10::make_intrusive<SparseCsrTensorImpl>(
key_set(), device(), layout_impl(), dtype());
copy_tensor_metadata(
/*src_impl=*/this,
/*dest_impl=*/impl.get(),
/*src_sparse_impl=*/this,
/*dest_sparse_impl=*/impl.get(),
/*version_counter=*/version_counter,
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
impl->refresh_numel();
Expand All @@ -135,9 +135,9 @@ struct TORCH_API SparseCsrTensorImpl : public TensorImpl {
auto impl = c10::make_intrusive<SparseCsrTensorImpl>(
key_set(), device(), layout_impl(), dtype());
copy_tensor_metadata(
/*src_impl=*/this,
/*dest_impl=*/impl.get(),
/*version_counter=*/std::move(version_counter),
/*src_sparse_impl=*/this,
/*dest_sparse_impl=*/impl.get(),
/*version_counter=*/version_counter,
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
impl->refresh_numel();
return impl;
Expand Down
4 changes: 3 additions & 1 deletion aten/src/ATen/TensorNames.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ namespace at::namedinference {
struct TORCH_API TensorName {
explicit TensorName(ArrayRef<Dimname> origin, int origin_idx)
: origin_(origin),
name_(origin[maybe_wrap_dim(origin_idx, origin.size())]),
name_(origin[maybe_wrap_dim(
origin_idx,
static_cast<int64_t>(origin.size()))]),
origin_idx_(origin_idx) {}

// op_name is only used for error reporting.
Expand Down
4 changes: 3 additions & 1 deletion aten/src/ATen/autocast_mode.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,9 @@ TORCH_API bool is_autocast_cache_enabled();
TORCH_API void set_autocast_cache_enabled(bool enabled);

namespace {
bool is_autocast_eligible(const Tensor& tensor, c10::DeviceType device_type) {
inline bool is_autocast_eligible(
const Tensor& tensor,
c10::DeviceType device_type) {
switch (device_type) {
case c10::DeviceType::CUDA:
return (tensor.is_cuda() || tensor.is_xla()) &&
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/List_inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ typename List<T>::internal_const_reference_type List<T>::operator[](size_type po
template<class T>
typename List<T>::internal_reference_type List<T>::operator[](size_type pos) {
static_cast<void>(impl_->list.at(pos)); // Throw the exception if it is out of range.
return {impl_->list.begin() + pos};
return {impl_->list.begin() + static_cast<typename decltype(impl_->list)::difference_type>(pos)};
}

template<class T>
Expand Down
40 changes: 22 additions & 18 deletions aten/src/ATen/native/ForeachUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,14 @@
namespace at::native {
namespace {
// Check if tensor list has either a boolean tensor or a integer tensor
bool has_integral_tensor(TensorList tensors, const bool includeBool) {
inline bool has_integral_tensor(TensorList tensors, const bool includeBool) {
return std::any_of(
tensors.begin(), tensors.end(), [&includeBool](const auto& t) {
return at::isIntegralType(t.scalar_type(), includeBool);
});
}
// check if tensor list has bool tensors
bool has_bool_tensor(TensorList tensors) {
inline bool has_bool_tensor(TensorList tensors) {
return std::any_of(tensors.begin(), tensors.end(), [](const auto& t) -> bool {
return t.scalar_type() == ScalarType::Bool;
});
Expand All @@ -37,11 +37,11 @@ bool has_bool_tensor(TensorList tensors) {
// - Tensor lists must be non-empty.
// - All TensorLists and ScalarLists must have the same number of elements.
// - Corresponding tensors must have the same size.
void check_foreach_api_restrictions(TensorList tensors) {
inline void check_foreach_api_restrictions(TensorList tensors) {
TORCH_CHECK(!tensors.empty(), "Tensor list must have at least one tensor.");
}

void check_foreach_api_restrictions(
inline void check_foreach_api_restrictions(
TensorList tensors,
ArrayRef<Scalar> scalars) {
check_foreach_api_restrictions(tensors);
Expand All @@ -50,7 +50,9 @@ void check_foreach_api_restrictions(
"Tensor list must have same number of elements as scalar list.");
}

void check_foreach_api_restrictions(TensorList tensors1, TensorList tensors2) {
inline void check_foreach_api_restrictions(
TensorList tensors1,
TensorList tensors2) {
TORCH_CHECK(!tensors1.empty(), "Tensor list must have at least one tensor.");
TORCH_CHECK(!tensors2.empty(), "Tensor list must have at least one tensor.");
TORCH_CHECK(
Expand All @@ -61,7 +63,7 @@ void check_foreach_api_restrictions(TensorList tensors1, TensorList tensors2) {
tensors2.size());
}

void check_foreach_api_restrictions(
inline void check_foreach_api_restrictions(
TensorList tensors1,
TensorList tensors2,
TensorList tensors3) {
Expand All @@ -82,7 +84,7 @@ void check_foreach_api_restrictions(
tensors3.size());
}

void check_foreach_api_restrictions(
inline void check_foreach_api_restrictions(
TensorList tensors1,
TensorList tensors2,
TensorList tensors3,
Expand All @@ -99,7 +101,8 @@ void check_foreach_api_restrictions(
// Helper function called in check_fast_path_restrictions to check whether all
// corresponding tensors (aligning in index across the tensorLists) share the
// same device and dtype.
bool _check_tensors_share_device_and_dtype(ArrayRef<TensorList> tensorLists) {
inline bool _check_tensors_share_device_and_dtype(
ArrayRef<TensorList> tensorLists) {
const auto expected_dtype = tensorLists[0][0].dtype();
const auto expected_device = tensorLists[0][0].device();

Expand All @@ -122,7 +125,8 @@ bool _check_tensors_share_device_and_dtype(ArrayRef<TensorList> tensorLists) {

// Helper function called in check_fast_path_restrictions to check if
// corresponding tensors in tensor lists have the same sizes and strides.
bool _check_tensors_share_sizes_and_strides(ArrayRef<TensorList> tensorLists) {
inline bool _check_tensors_share_sizes_and_strides(
ArrayRef<TensorList> tensorLists) {
for (const auto i : c10::irange(1, tensorLists.size())) {
for (const auto j : c10::irange(tensorLists[0].size())) {
if (tensorLists[0][j].sizes() != tensorLists[i][j].sizes() ||
Expand All @@ -140,7 +144,7 @@ bool _check_tensors_share_sizes_and_strides(ArrayRef<TensorList> tensorLists) {
// function assumes that _check_tensors_share_device_and_dtype has already been
// called so that all corresponding tensors in tensorLists have the same dtype.
// Then, it is sufficient to check the type promotion with just one tensorList.
bool _check_tensors_do_type_promotion_with_scalars(
inline bool _check_tensors_do_type_promotion_with_scalars(
TensorList tensorList,
ArrayRef<Scalar> scalarList = {},
bool does_op_promote_integer_inputs_to_float = false) {
Expand Down Expand Up @@ -176,7 +180,7 @@ bool _check_tensors_do_type_promotion_with_scalars(

// Please, make sure to call check_foreach_api_restrictions before calling this
// method. There is a set of preconditions that have to be satisfied.
bool check_fast_path_restrictions(
inline bool check_fast_path_restrictions(
ArrayRef<TensorList> tensorLists,
ArrayRef<Scalar> scalarList = {},
bool does_op_promote_integer_inputs_to_float = false) {
Expand All @@ -188,7 +192,7 @@ bool check_fast_path_restrictions(
does_op_promote_integer_inputs_to_float);
}

std::vector<c10::Scalar> convert_tensor_to_scalar_list(
inline std::vector<c10::Scalar> convert_tensor_to_scalar_list(
const Tensor& scalarList_,
int64_t expect_length) {
std::vector<c10::Scalar> scalarList;
Expand Down Expand Up @@ -221,21 +225,21 @@ std::vector<c10::Scalar> convert_tensor_to_scalar_list(
scalarList_.size(0),
" instead.");
for (int64_t i = 0; i < scalarList_.size(0); i++) {
scalarList.push_back(c10::Scalar(scalar_data[i]));
scalarList.emplace_back(scalar_data[i]);
}
});
return scalarList;
}

bool can_use_fast_route(
inline bool can_use_fast_route(
ArrayRef<TensorList> tensorLists,
ArrayRef<Scalar> scalarList = {},
bool does_op_promote_integer_inputs_to_float = false) {
return check_fast_path_restrictions(
tensorLists, scalarList, does_op_promote_integer_inputs_to_float);
}

bool can_use_fast_route(
inline bool can_use_fast_route(
TensorList tensors1,
TensorList tensors2,
bool does_op_promote_integer_inputs_to_float = false) {
Expand All @@ -253,13 +257,13 @@ using FlatMap = std::unordered_map<
TensorsAndIndicesT,
ParamsHash<DeviceDtypeKey>>;

FlatMap _group_tensors_by_first_tensors_device_and_dtype(
inline FlatMap _group_tensors_by_first_tensors_device_and_dtype(
const nested_optional_tensorvec_t& nested_tensorlist,
const bool with_indices) {
FlatMap grouped_tensors_with_indices;

TORCH_CHECK(nested_tensorlist.size() > 0);
TORCH_CHECK(nested_tensorlist[0].size() > 0);
TORCH_CHECK(!nested_tensorlist.empty());
TORCH_CHECK(!nested_tensorlist[0].empty());
const auto num_lists = nested_tensorlist.size();
const auto num_tensors = nested_tensorlist[0].size();

Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/native/IndexingUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ static C10_UNUSED std::vector<Tensor> expandTensors(const Tensor & self, IOptTen
// The sizes of the ByteTensor mask or bool tensor must match the sizes of the
// corresponding dimensions in self
for (const auto j : c10::irange(index.dim())) {
int64_t srcIdx = result.size() + j;
int64_t srcIdx = static_cast<int64_t>(result.size() + j);
if (index.size(j) != self.size(srcIdx)) {
invalid_mask(self, srcIdx, index, j);
}
Expand All @@ -41,7 +41,7 @@ static C10_UNUSED std::vector<Tensor> expandTensors(const Tensor & self, IOptTen
result.emplace_back(nonzero.select(1, j));
}
} else {
result.emplace_back(std::move(index));
result.emplace_back(index);
}
}
}
Expand Down Expand Up @@ -101,7 +101,7 @@ static C10_UNUSED bool hasContiguousSubspace(TensorList tl) {
// returns
// tensor.permute([1, 3, 0, 2]), {a, b, nullptr, nullptr}
static C10_UNUSED std::tuple<Tensor, std::vector<Tensor>>
transposeToFront(Tensor self, TensorList indices) {
transposeToFront(const Tensor& self, TensorList indices) {
std::vector<int64_t> dims;
std::vector<Tensor> transposedIndices;
dims.reserve(self.dim());
Expand All @@ -121,7 +121,7 @@ transposeToFront(Tensor self, TensorList indices) {
}

inline std::tuple<Tensor, std::vector<Tensor>, std::vector<int64_t>>
transposeToFrontAndInvPerm(Tensor self, TensorList indices) {
transposeToFrontAndInvPerm(const Tensor& self, TensorList indices) {
std::vector<int64_t> dims;
std::vector<int64_t> invPerm;
std::vector<Tensor> transposedIndices;
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/native/nested/NestedTensorUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,13 +169,13 @@ inline std::vector<IntArrayRef> NestedTensor_get_strides(
inline void check_numel_equals_buffer_size(const at::Tensor& self) {
auto self_impl = get_nested_tensor_impl(self);
TORCH_CHECK(
self.numel() == self_impl->get_buffer_size(),
self.numel() == static_cast<int64_t>(self_impl->get_buffer_size()),
"Number of elements in nested tensor must match number of elements in buffer.");
}

inline void check_numel_equals_buffer_size(const NestedTensorImpl* self_ptr) {
TORCH_CHECK(
self_ptr->numel() == self_ptr->get_buffer_size(),
self_ptr->numel() == static_cast<int64_t>(self_ptr->get_buffer_size()),
"Number of elements in nested tensor must match number of elements in buffer.");
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expand Down
Loading

0 comments on commit 5220d0d

Please sign in to comment.