Skip to content

Commit

Permalink
Apply Clang-Tidy readability-container-size-empty (pytorch#93236)
Browse files Browse the repository at this point in the history
Not only is this change usually shorter and more readable, it also can yield better performance. size() is not always a constant time operation (such as on LinkedLists), but empty() always is.

Pull Request resolved: pytorch#93236
Approved by: https://github.com/malfet
  • Loading branch information
Skylion007 authored and pytorchmergebot committed Jan 29, 2023
1 parent 239afa0 commit 0247ed2
Show file tree
Hide file tree
Showing 216 changed files with 518 additions and 525 deletions.
1 change: 1 addition & 0 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ modernize-*,
performance-*,
-performance-noexcept-move-constructor,
-performance-unnecessary-value-param,
readability-container-size-empty,
'
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$'
AnalyzeTemporaryDtors: false
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/CPUApplyUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ struct strided_tensor_iter {
};

inline bool _all_equal_numel(at::ArrayRef<Tensor> tensors) {
if (tensors.size() == 0)
if (tensors.empty())
return true;
int64_t all_numel = tensors[0].numel();
for (const auto i : c10::irange(1, tensors.size())) {
Expand Down
5 changes: 3 additions & 2 deletions aten/src/ATen/FunctionalStorageImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,10 @@ ViewMeta ViewMeta::to_out_idx(int64_t out_idx) {
const Tensor apply_update(const FunctionalStorageImpl::Update& update, const Tensor& base) {
at::Tensor t = update.new_val;
TORCH_INTERNAL_ASSERT(!at::functionalization::impl::isFunctionalTensor(t));
if (update.view_metas.size() == 0) return t;
if (update.view_metas.empty()) return t;

std::vector<at::Tensor> tmp_values({base});
tmp_values.reserve(update.view_metas.size());
for (size_t i = 0; i < update.view_metas.size() - 1; ++i) {
at::Tensor next_view = update.view_metas[i].forward_fn(tmp_values.back(), update.view_metas[i].out_index);
// NB: We only actually need tmp_values for ops like select/slice/diagonal/squeeze/as_strided
Expand Down Expand Up @@ -113,7 +114,7 @@ bool FunctionalStorageImpl::apply_updates() {
// It adds the Functionalize key into TLS before redispatching to the functionalization kernels,
// which means that we need to explicitly exclude it here before doing any other work underneath the pass.
at::AutoDispatchSkipFunctionalize guard;
bool any_updates = updates_.size() > 0;
bool any_updates = !updates_.empty();
for (auto& update_data: updates_) {
base_ = apply_update(update_data, base_);
}
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/FunctionalTensorWrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ FunctionalTensorWrapper::FunctionalTensorWrapper(const Tensor& view_value, const
{
set_constructor_metadata();
// Copy the original tensor's ViewMeta vector and push the current one.
if (base->view_metas_.size() > 0) {
if (!base->view_metas_.empty()) {
view_metas_ = base->view_metas_; // copy
}
view_metas_.push_back(meta);
Expand Down Expand Up @@ -238,7 +238,7 @@ void FunctionalTensorWrapper::maybe_replace_storage(const Tensor& other) {
//
// Given all of the above, for now we're just banning the above usage.
TORCH_CHECK(storage().use_count() == 1, "Attempted to resize a view tensor to a larger size. This is not allowed in the functionalization pass");
TORCH_CHECK(view_metas_.size() == 0, "Attempted to resize a view tensor to a larger size. This is not allowed in the functionalization pass");
TORCH_CHECK(view_metas_.empty(), "Attempted to resize a view tensor to a larger size. This is not allowed in the functionalization pass");
// If this tensor is not a view (and has no outstanding views taken out on it),
// Then it's safe to throw out the old storage and replace it with the new, larger one.
storage_ = c10::Storage(c10::make_intrusive<functionalization::FunctionalStorageImpl>(other));
Expand Down Expand Up @@ -508,7 +508,7 @@ bool isFunctionalTensor(const c10::optional<Tensor>& t) {
}

bool isFunctionalTensor(const c10::List<c10::optional<Tensor>>& t_list) {
if (t_list.size() == 0) return false;
if (t_list.empty()) return false;
auto functional_count = 0;
for (const auto i : c10::irange(t_list.size())) {
if (!t_list[i].has_value() || !t_list[i]->defined()) continue;
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/LegacyBatchedFallback.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ void batchedTensorInplaceForLoopFallback(const c10::OperatorHandle& op, torch::j
batched_tensor_inputs.push_back(tensor);
batched_tensor_inputs_position.push_back(idx);
}
TORCH_INTERNAL_ASSERT(batched_tensor_inputs.size() > 0);
TORCH_INTERNAL_ASSERT(!batched_tensor_inputs.empty());

// MultiBatchVmapTransform the BatchedTensor arguments. This returns
// VmapPhysicalViews that contain all of the batch dimensions.
Expand Down Expand Up @@ -290,7 +290,7 @@ void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Sta
batched_tensor_inputs.push_back(tensor);
batched_tensor_inputs_position.push_back(idx);
}
TORCH_INTERNAL_ASSERT(batched_tensor_inputs.size() > 0);
TORCH_INTERNAL_ASSERT(!batched_tensor_inputs.empty());

// MultiBatchVmapTransform the BatchedTensor arguments. This returns
// VmapPhysicalViews that contain all of the batch dimensions.
Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/LegacyBatchingRegistrations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ Tensor sum_batching_rule(const Tensor& self, OptionalIntArrayRef opt_dims, bool
// >>> x = torch.randn(B0) # the per-examples are all scalars
// >>> vmap(partial(torch.sum, dim=0), x)
// then we replicate the behavior of sum(scalar_tensor, dim=0).
if (/*logical*/self.dim() == 0 && (dims.size() == 0 || (dims.size() == 1 && is_allowed_dim_on_scalar_tensor(dims[0])))) {
if (/*logical*/self.dim() == 0 && (dims.empty() || (dims.size() == 1 && is_allowed_dim_on_scalar_tensor(dims[0])))) {
return self.clone();
}
}
Expand Down Expand Up @@ -477,7 +477,7 @@ Tensor view_batching_rule(const Tensor& self, IntArrayRef size) {
Tensor view_as_complex_batching_rule(const Tensor& self) {
// guard against the user passing in a batch of scalar tensors with batch
// size equal to 2.
TORCH_CHECK(self.sizes().size() != 0, "Input tensor must have one or more dimensions");
TORCH_CHECK(!self.sizes().empty(), "Input tensor must have one or more dimensions");
auto self_physical = MultiBatchVmapTransform::logicalToPhysical(self);
auto result = at::view_as_complex(self_physical.tensor());
return self_physical.getPhysicalToLogicalMap().apply(result);
Expand Down Expand Up @@ -931,7 +931,7 @@ Tensor cat_batching_rule(const ITensorListRef& tensors, int64_t dim) {
auto physical_tensors = fmap(
physical_views, [](const VmapPhysicalView& view) -> Tensor { return view.tensor(); });
TORCH_INTERNAL_ASSERT(
tensors.size() > 0, "The dispatcher should not have dispatched here otherwise.");
!tensors.empty(), "The dispatcher should not have dispatched here otherwise.");
auto result = at::cat(physical_tensors, physical_views[0].getPhysicalDim(dim));
return physical_views[0].getPhysicalToLogicalMap().apply(result);
}
Expand All @@ -941,7 +941,7 @@ Tensor stack_batching_rule(TensorList tensors, int64_t dim) {
auto physical_tensors = fmap(
physical_views, [](const VmapPhysicalView& view) -> Tensor { return view.tensor(); });
TORCH_INTERNAL_ASSERT(
tensors.size() > 0, "The dispatcher should not have dispatched here otherwise.");
!tensors.empty(), "The dispatcher should not have dispatched here otherwise.");
// NB: stack wraps the dimensionality to (logical dim + 1), so we have to
// manually handle that here.
auto dim_physical =
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/LegacyVmapTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ MultiBatchVmapTransform::logicalToPhysical(ITensorListRef logical_tensors) {

static std::pair<std::bitset<kVmapNumLevels>,int64_t>
getLevelsAndLargestLogicalDim(TensorList logical_tensors) {
TORCH_INTERNAL_ASSERT(logical_tensors.size() > 0);
TORCH_INTERNAL_ASSERT(!logical_tensors.empty());
std::bitset<kVmapNumLevels> levels;
int64_t largest_logical_dim = -1;
for (const auto& tensor : logical_tensors) {
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/NamedTensorUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ void propagate_names_for_reduction(const Tensor& result, const Tensor& src, IntA
return;
}
// This actually means "full reduction"
if (reduced_dims.size() == 0) {
if (reduced_dims.empty()) {
return;
}
propagate_names_except(result, src, reduced_dims);
Expand Down Expand Up @@ -303,7 +303,7 @@ static int64_t num_batch_dims(DimnameList names) {
static std::vector<Dimname> compute_matmul_outnames(
DimnameList self_names,
DimnameList other_names) {
TORCH_CHECK(self_names.size() >= 1 && other_names.size() >= 1,
TORCH_CHECK(!self_names.empty() && !other_names.empty(),
"both arguments to matmul need to be at least 1D, but they are ",
self_names.size(), "D and ", other_names.size(), "D");

Expand Down Expand Up @@ -430,7 +430,7 @@ std::vector<Dimname> compute_cat_outnames(const MaterializedITensorListRef& tens
std::vector<Dimname> result;
for (const Tensor& tensor : tensors) {
const auto tensor_names = tensor.names();
TORCH_CHECK(tensor_names.size() > 0, "zero-dimensional tensor cannot be concatenated");
TORCH_CHECK(!tensor_names.empty(), "zero-dimensional tensor cannot be concatenated");
TORCH_CHECK(result.empty() || tensor_names.size() == result.size(),
"Tensors must have same number of dimensions: got ", result.size(),
" and ", tensor_names.size());
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/PythonTorchFunctionTLS.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ void PythonTorchFunctionTLS::push_onto_stack(std::shared_ptr<SafePyObject> mode)
}

const std::shared_ptr<SafePyObject> PythonTorchFunctionTLS::pop_stack() {
TORCH_CHECK(pythonTorchFunctionState.stack_.size() > 0, "trying to pop from empty mode stack");
TORCH_CHECK(!pythonTorchFunctionState.stack_.empty(), "trying to pop from empty mode stack");
auto out = pythonTorchFunctionState.stack_.back();
pythonTorchFunctionState.stack_.pop_back();
return out;
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/SavedTensorHooks.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ bool SavedTensorDefaultHooks::is_enabled() {

void SavedTensorDefaultHooks::disable(const std::string& message) {
tls.disabled_error_message = message;
if (tls.stack.size() > 0) {
if (!tls.stack.empty()) {
assertSavedTensorHooksNotDisabled();
}
}
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/TensorIndexing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ static inline void set_item(const Tensor& self, ArrayRef<TensorIndex> indices, c
} // namespace indexing

Tensor Tensor::index(ArrayRef<at::indexing::TensorIndex> indices) const {
TORCH_CHECK(indices.size() > 0, "Passing an empty index list to Tensor::index() is not valid syntax");
TORCH_CHECK(!indices.empty(), "Passing an empty index list to Tensor::index() is not valid syntax");
OptionalDeviceGuard device_guard(device_of(*this));
return at::indexing::get_item(*this, indices);
}
Expand All @@ -74,13 +74,13 @@ Tensor Tensor::index(std::initializer_list<at::indexing::TensorIndex> indices) c
}

Tensor & Tensor::index_put_(ArrayRef<at::indexing::TensorIndex> indices, Tensor const & rhs) {
TORCH_CHECK(indices.size() > 0, "Passing an empty index list to Tensor::index_put_() is not valid syntax");
TORCH_CHECK(!indices.empty(), "Passing an empty index list to Tensor::index_put_() is not valid syntax");
OptionalDeviceGuard device_guard(device_of(*this));
at::indexing::set_item(*this, indices, rhs);
return *this;
}
Tensor & Tensor::index_put_(ArrayRef<at::indexing::TensorIndex> indices, const Scalar& v) {
TORCH_CHECK(indices.size() > 0, "Passing an empty index list to Tensor::index_put_() is not valid syntax");
TORCH_CHECK(!indices.empty(), "Passing an empty index list to Tensor::index_put_() is not valid syntax");
OptionalDeviceGuard device_guard(device_of(*this));
at::indexing::set_item(*this, indices, v);
return *this;
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/TensorIndexing.h
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ static inline Tensor applySelect(
// See NOTE [nested tensor size for indexing]
if (self_sizes.has_value()) {
TORCH_CHECK_INDEX(
!(index == 0 && dim == 0 && self_sizes->size() == 0),
!(index == 0 && dim == 0 && self_sizes->empty()),
"invalid index of a 0-dim tensor. ",
"Use `tensor.item()` in Python or `tensor.item<T>()` in C++ to convert a 0-dim tensor to a number");

Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/TensorIterator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ TensorIteratorConfig& TensorIteratorConfig::declare_static_shape(IntArrayRef sha

TensorIteratorConfig& TensorIteratorConfig::declare_static_shape(IntArrayRef shape, IntArrayRef squash_dims) {
declare_static_shape(shape);
if (!static_shape_->size()) return *this;
if (static_shape_->empty()) return *this;
for (const auto& squash_dim : squash_dims) {
TORCH_CHECK(squash_dim >= 0 && squash_dim < static_cast<int64_t>(static_shape_->size()),
"squash_dim ", squash_dim, " must be in [0, ", static_shape_->size(), ").");
Expand Down Expand Up @@ -715,7 +715,7 @@ void TensorIteratorBase::permute_dimensions(IntArrayRef perm) {
// Update shape and strides
shape_ = reorder(shape_);
for (auto& op : operands_) {
if (op.stride_bytes.size() > 0) {
if (!op.stride_bytes.empty()) {
op.stride_bytes = reorder(op.stride_bytes);
}
}
Expand Down Expand Up @@ -1225,7 +1225,7 @@ void TensorIteratorBase::compute_shape(const TensorIteratorConfig& config) {
"TensorIterator does not support symbolic shapes; please implement this operator in torch/_refs "
"using the elementwise or reduction helpers (look at backtrace to find out what operator this is)");
auto shape = op.tensor_base().sizes();
if (shape.size() == 0) {
if (shape.empty()) {
has_scalars = true;
} else {
has_tensors = true;
Expand Down Expand Up @@ -1724,7 +1724,7 @@ void DimCounter::increment(const std::array<int64_t, 2>& step) {
std::array<int64_t, 2> DimCounter::max_2d_step() const {
int64_t step0 = std::min(shape[0] - values[0], range.end - offset);
int64_t step1 = 1;
if (step0 == shape[0] && shape.size() >= 1) {
if (step0 == shape[0] && !shape.empty()) {
step1 = std::min(shape[1] - values[1], (range.end - offset) / shape[0]);
}
return {step0, step1};
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/WrapDimUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ inline int64_t maybe_wrap_dim(int64_t dim, TensorImpl* tensor) {
}

inline int64_t maybe_wrap_dim(int64_t dim, TensorList tensors) {
if (tensors.size() == 0) {
if (tensors.empty()) {
// can't wrap empty TensorList; rely on underlying implementation to throw
// error if necessary.
return dim;
Expand All @@ -30,7 +30,7 @@ inline int64_t maybe_wrap_dim(int64_t dim, TensorList tensors) {
inline int64_t maybe_wrap_dim(
int64_t dim,
const std::vector<std::vector<int64_t>>& tensor_sizes) {
if (tensor_sizes.size() == 0) {
if (tensor_sizes.empty()) {
// can't wrap empty list; rely on underlying implementation to throw error
// if necessary
return dim;
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/code_template.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,14 +192,14 @@ struct CodeTemplate {
const string_list& strings,
bool comma_before,
bool comma_after) const {
if (comma_before && strings.size() > 0)
if (comma_before && !strings.empty())
out << ", ";
for (const auto i : c10::irange(strings.size())) {
if (i > 0)
out << ", ";
out << strings[i];
}
if (comma_after && strings.size() > 0)
if (comma_after && !strings.empty())
out << ", ";
}
// These indentation functions follow the convention that they never emit
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/boxing/impl/boxing.h
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ struct BoxedKernelWrapper<
[&] {
// op returns void, boxed kernel has pushed nothing onto stack.
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
stack.size() == 0,
stack.empty(),
"Boxed kernel was expected to return no values on the stack, ",
"but instead returned ", stack.size(), " values."
);
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/core/class_type.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ void checkForwardHookInputArguments(
if (forward_args.size() == 1) {
// check for empty forward case
TORCH_CHECK(
input_tuple_types.size() == 0,
input_tuple_types.empty(),
hook_id,
"was expecting Tuple[()] as the input type. Received type: '",
input_arg.type()->annotation_str(),
Expand Down Expand Up @@ -213,7 +213,7 @@ void ClassType::checkForwardPreHookSchema(
// or the contained single type if the input was a tuple containing a single
// type.
TORCH_CHECK(
pre_hook_schema.returns().size() != 0,
!pre_hook_schema.returns().empty(),
hook_id,
"is missing a return annotation. Return annotations are required, please add one.\n",
pre_hook_err_msg
Expand Down Expand Up @@ -254,7 +254,7 @@ void ClassType::checkForwardPreHookSchema(
// check for edge case of Tuple[()] for when forward has no arguments
if (forward_args.size() == 1) {
TORCH_CHECK(
return_tuple_types.size() == 0,
return_tuple_types.empty(),
wrong_type_returned_err_msg,
" Was expecting either 'None' or 'Tuple[()]' since forward had ",
"no arguments.\n",
Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/core/dispatch/OperatorEntry.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ OperatorEntry::AnnotatedKernelContainerIterator OperatorEntry::registerKernel(
#ifdef C10_DISPATCHER_ONE_KERNEL_PER_DISPATCH_KEY
if (k[0].kernel.isValid()) {
#else
if (k.size() > 0) {
if (!k.empty()) {
#endif
// Suppress the warning for Meta key as we are overriding C++ meta functions with python meta functions
// for some ops
Expand Down Expand Up @@ -221,12 +221,12 @@ bool OperatorEntry::hasKernelForDispatchKey(DispatchKey k) const {
TORCH_INTERNAL_ASSERT(kernels_.find(DispatchKey::Undefined) == kernels_.end());
auto it = kernels_.find(k);
if (it == kernels_.end()) return false;
return it->second.size() > 0;
return !it->second.empty();
}

const KernelFunction& OperatorEntry::kernelForDispatchKey(DispatchKey k) const {
auto it = kernels_.find(k);
TORCH_CHECK(it != kernels_.end() && it->second.size(), "no kernel for ", k, " on ", name_);
TORCH_CHECK(it != kernels_.end() && !it->second.empty(), "no kernel for ", k, " on ", name_);
auto jt = it->second.begin();
TORCH_INTERNAL_ASSERT(jt->kernel.isValid())
return jt->kernel;
Expand Down Expand Up @@ -462,7 +462,7 @@ void OperatorEntry::checkInvariants() const {
}
TORCH_INTERNAL_ASSERT(kernels_.find(DispatchKey::Undefined) == kernels_.end(), dumpState());
for (const auto& kv : kernels_) {
TORCH_INTERNAL_ASSERT(kv.second.size() > 0, dumpState());
TORCH_INTERNAL_ASSERT(!kv.second.empty(), dumpState());
}
for (auto k : DispatchKeySet(DispatchKeySet::FULL)) {
auto expected_k = computeDispatchTableEntry(c10::Dispatcher::singleton(), k);
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/dynamic_type.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ std::string DynamicType::str() const {
std::string ret = "Dynamic<";
ret += std::to_string(static_cast<DynamicTypeBits>(tag_));
ret += ">";
if (tag_ != Tag::Class && arguments_.elems.size() > 0) {
if (tag_ != Tag::Class && !arguments_.elems.empty()) {
ret += "[";
for (const auto& arg : arguments_.elems) {
if (arg.label) {
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/core/function_schema.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ c10::optional<AliasTypeSet> FunctionSchema::mapTypeToAliasTypeSet(const TypePtr&
(*maybe_inner_types).end());
}
}
if (mutable_types.size() == 0) {
if (mutable_types.empty()) {
return c10::nullopt;
}
return mutable_types;
Expand All @@ -130,7 +130,7 @@ c10::optional<AliasTypeSet> FunctionSchema::mapTypeToAliasTypeSet(const TypePtr&
(*maybe_inner_types).end());
}
}
if (mutable_types.size() == 0) {
if (mutable_types.empty()) {
return c10::nullopt;
}
return {AliasTypeSet{TupleType::create(std::move(mutable_types))}};
Expand Down
Loading

0 comments on commit 0247ed2

Please sign in to comment.