Skip to content

Commit

Permalink
use performance-unnecessary-value-param in clang-tidy (pytorch#102615)
Browse files Browse the repository at this point in the history
performance-unnecessary-value-param has been disabled in clang-tidy for a long time. However, this check is actually useful and able to some interesting performance problems.

Pull Request resolved: pytorch#102615
Approved by: https://github.com/malfet, https://github.com/Skylion007
  • Loading branch information
cyyever authored and pytorchmergebot committed Jul 28, 2023
1 parent 8f4d8b3 commit b3e24c5
Show file tree
Hide file tree
Showing 16 changed files with 51 additions and 49 deletions.
1 change: 0 additions & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ modernize-*,
-modernize-use-trailing-return-type,
-modernize-use-nodiscard,
performance-*,
-performance-unnecessary-value-param,
readability-container-size-empty,
'
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$'
Expand Down
13 changes: 4 additions & 9 deletions aten/src/ATen/core/IListRef_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,10 @@ static std::vector<at::OptionalTensorRef> get_unboxed_opt_tensor_vector() {
static std::vector<at::Tensor> tensors;
std::vector<at::OptionalTensorRef> optional_tensors;
constexpr size_t SIZE = 5;
for (size_t i = 0; i < SIZE * 2; i++) {
if (i % 2 == 0) {
if (tensors.size() + 1 < i / 2) {
tensors.push_back(at::empty({0}));
}
optional_tensors.emplace_back(tensors[i / 2]);
} else {
optional_tensors.emplace_back();
}
for (size_t i = 0; i < SIZE; i++) {
tensors.push_back(at::empty({0}));
optional_tensors.emplace_back(tensors[i]);
optional_tensors.emplace_back();
}
return optional_tensors;
}
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/core/List.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class ListElementReference final {
ListElementReference& operator=(const T& new_value) &&;

// assigning another ref to this assigns the underlying value
ListElementReference& operator=(ListElementReference&& rhs) &&;
ListElementReference& operator=(ListElementReference&& rhs) && noexcept;

const IValue& get() const& {
return *iterator_;
Expand Down Expand Up @@ -124,7 +124,7 @@ class ListIterator final {
ListIterator(const ListIterator&) = default;
ListIterator(ListIterator&&) noexcept = default;
ListIterator& operator=(const ListIterator&) = default;
ListIterator& operator=(ListIterator&&) = default;
ListIterator& operator=(ListIterator&&) noexcept = default;

ListIterator& operator++() {
++iterator_;
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/List_inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(
}

template<class T, class Iterator>
ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(ListElementReference<T, Iterator>&& rhs) && {
ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(ListElementReference<T, Iterator>&& rhs) && noexcept {
*iterator_ = *rhs.iterator_;
return *this;
}
Expand Down
20 changes: 10 additions & 10 deletions aten/src/ATen/core/ivalue.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -488,9 +488,9 @@ template <class T>
std::ostream& printList(
std::ostream& out,
const T& list,
const std::string start,
const std::string finish,
IValueFormatter formatter) {
const std::string& start,
const std::string& finish,
const IValueFormatter& formatter) {
out << start;
for (const auto i : c10::irange(list.size())) {
if (i > 0) {
Expand All @@ -506,24 +506,24 @@ std::ostream& printList(
std::ostream& printMaybeAnnotatedList(
std::ostream& out,
const IValue& the_list,
IValueFormatter formatter) {
const IValueFormatter& formatter) {
auto list_elem_type = the_list.type()->containedType(0);
if (the_list.toListRef().empty() ||
!elementTypeCanBeInferredFromMembers(list_elem_type)) {
out << "annotate(" << the_list.type<c10::Type>()->annotation_str() << ", ";
printList(out, the_list.toListRef(), "[", "]", std::move(formatter));
printList(out, the_list.toListRef(), "[", "]", formatter);
out << ")";
return out;
} else {
return printList(out, the_list.toListRef(), "[", "]", std::move(formatter));
return printList(out, the_list.toListRef(), "[", "]", formatter);
}
}

template <typename Dict>
std::ostream& printDict(
std::ostream& out,
const Dict& v,
IValueFormatter formatter) {
const IValueFormatter& formatter) {
out << "{";

bool first = true;
Expand All @@ -547,14 +547,14 @@ std::ostream& printDict(
static std::ostream& printMaybeAnnotatedDict(
std::ostream& out,
const IValue& the_dict,
IValueFormatter formatter) {
const IValueFormatter& formatter) {
auto value_type = the_dict.type()->castRaw<DictType>()->getValueType();
if (the_dict.toGenericDict().empty() ||
!elementTypeCanBeInferredFromMembers(value_type)) {
out << "annotate(" << the_dict.type<c10::Type>()->annotation_str() << ",";
printDict(out, the_dict.toGenericDict(), std::move(formatter)) << ")";
printDict(out, the_dict.toGenericDict(), formatter) << ")";
} else {
return printDict(out, the_dict.toGenericDict(), std::move(formatter));
return printDict(out, the_dict.toGenericDict(), formatter);
}
return out;
}
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/ivalue_inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -1919,7 +1919,7 @@ template <
std::is_lvalue_reference<Args>...,
guts::negation<std::is_constructible<IValue, Args>>...>::value,
std::nullptr_t> = nullptr>
std::tuple<Args...> generic_to(IValue ivalue, _fake_type<std::tuple<Args...>>) {
std::tuple<Args...> generic_to(const IValue& ivalue, _fake_type<std::tuple<Args...>>) {
const auto& vals = ivalue.toTupleRef().elements();
TORCH_CHECK(vals.size() == sizeof...(Args));
return detail::generic_to_tuple_impl<std::tuple<Args...>>(vals, Indices{});
Expand Down
2 changes: 1 addition & 1 deletion c10/core/StorageImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ struct C10_API StorageImpl : public c10::intrusive_ptr_target {

StorageImpl(
use_byte_size_t /*use_byte_size*/,
SymInt size_bytes,
const SymInt& size_bytes,
at::Allocator* allocator,
bool resizable)
: StorageImpl(
Expand Down
5 changes: 3 additions & 2 deletions c10/core/TensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -260,10 +260,11 @@ struct C10_API ExtraMeta {
std::unique_ptr<c10::SymbolicShapeMeta> symbolic_shape_meta,
std::unique_ptr<c10::NamedTensorMetaInterface> named_tensor_meta,
intrusive_ptr<c10::BackendMeta> backend_meta,
c10::optional<std::string> custom_data_ptr_error_msg_ = c10::nullopt)
c10::optional<std::string> custom_data_ptr_error_msg = c10::nullopt)
: symbolic_shape_meta_(std::move(symbolic_shape_meta)),
named_tensor_meta_(std::move(named_tensor_meta)),
backend_meta_(std::move(backend_meta)) {}
backend_meta_(std::move(backend_meta)),
custom_data_ptr_error_msg_(std::move(custom_data_ptr_error_msg)) {}

std::unique_ptr<ExtraMeta> clone() const {
return std::make_unique<ExtraMeta>(*this);
Expand Down
4 changes: 2 additions & 2 deletions functorch/csrc/dim/dim.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -880,7 +880,7 @@ mpy::obj<Tensor> Tensor::create_delayed(mpy::object op, mpy::vector_args args, S
mpy::obj<Tensor> self = Tensor::create();
self->capture_levels(levels);
self->has_device_ = has_device;
self->delayed_ = std::make_unique<DelayedOperator>(op, args);
self->delayed_ = std::make_unique<DelayedOperator>(std::move(op), args);
return self;
}

Expand Down Expand Up @@ -1082,7 +1082,7 @@ PyObject* py_tree_flatten(PyObject *self,



mpy::object tree_map(Arena& A, std::function<mpy::handle(mpy::handle)> fn, mpy::handle agg) {
mpy::object tree_map(Arena& A, const std::function<mpy::handle(mpy::handle)>& fn, mpy::handle agg) {
Slice<mpy::handle> elements;
auto unflatten = tree_flatten(A, agg, elements);
for (auto i : elements.enumerate()) {
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/api/module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,7 @@ IValue Module::create_class(const c10::QualifiedName& name, Stack stack) const {

Module freeze(
const Module& module,
c10::optional<std::vector<std::string>> preserved_attrs,
const c10::optional<std::vector<std::string>>& preserved_attrs,
bool optimize_numerics) {
TORCH_CHECK(
!module.hasattr("training") || !module.is_training(),
Expand Down
15 changes: 9 additions & 6 deletions torch/csrc/jit/api/module.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,8 @@ struct TORCH_API Module : public Object {
explicit Module(c10::QualifiedName class_name);
Module(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
Module() = default;
Module(const Module&) = default;
Module& operator=(const Module&) = default;
Module(
c10::QualifiedName,
std::shared_ptr<CompilationUnit> cu,
Expand Down Expand Up @@ -268,7 +270,7 @@ struct TORCH_API Module : public Object {
}

void set_delete_memory(std::shared_ptr<char> delete_mem) {
mem_to_delete_ = delete_mem;
mem_to_delete_ = std::move(delete_mem);
}

// A set of functions to maintain input shapes through torch.jit.save and
Expand All @@ -279,11 +281,11 @@ struct TORCH_API Module : public Object {
return;
}
auto c10_inputs = c10::impl::GenericList(AnyType::get());
for (const IValue& value : inputs) {
for (IValue& value : inputs) {
// Not checking whether this is traceable type as that is already checked
// higher up in the stack and changing that would require a larger
// restructuring.
c10_inputs.push_back(value);
c10_inputs.emplace_back(std::move(value));
}
traced_inputs_.insert_or_assign(func_name, c10_inputs);
}
Expand Down Expand Up @@ -326,7 +328,8 @@ struct TORCH_API Module : public Object {
// details.
TORCH_API Module freeze(
const Module& module,
c10::optional<std::vector<std::string>> preserved_attrs = c10::nullopt,
const c10::optional<std::vector<std::string>>& preserved_attrs =
c10::nullopt,
bool optimize_numerics = true);

// C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation
Expand Down Expand Up @@ -400,7 +403,7 @@ struct slot_iterator_impl {
// slots of root
bool return_module) // if true include root itself as the first thing
// visited (used in modules())
: cursors_({SlotCursor{root, return_module ? -1 : 0}}),
: cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}),
recurse_(recurse) {
// advance iterator to first valid element (or the end, if empty)
while_not_valid_next();
Expand Down Expand Up @@ -543,7 +546,7 @@ struct slot_list_impl {
}

slot_list_impl(Module module, bool recurse, bool return_module)
: module_(module),
: module_(std::move(module)),
recurse_(recurse),
return_module_(return_module),
size_(c10::nullopt) {
Expand Down
8 changes: 5 additions & 3 deletions torch/csrc/jit/api/object.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ class ObjectAttributeError : public std::runtime_error {
ObjectAttributeError(const std::string& what) : std::runtime_error(what) {}
};

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct TORCH_API Object {
Object() = default;
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
Object(const Object&) = default;
Object& operator=(const Object&) = default;
Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {}
Object(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
Object(
Expand Down Expand Up @@ -146,7 +146,9 @@ struct TORCH_API Object {
setter = Method(_ivalue(), prop.setter);
}
return Property{
prop.name, Method(_ivalue(), prop.getter), std::move(setter)};
std::move(prop.name),
Method(_ivalue(), prop.getter),
std::move(setter)};
});
}

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/profiler/collection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,7 @@ void materialize_vulkan(
std::vector<std::shared_ptr<Result>>& out,
AppendOnlyList<ExtraFields<EventType::Vulkan>::raw_event_t, BlockSize>&
raw_events,
const std::function<time_t(approx_time_t)> time_converter,
const std::function<time_t(approx_time_t)>& time_converter,
const uint64_t tid,
const kineto::DeviceAndResource& kineto_info) {
for (const auto& i : raw_events) {
Expand Down
16 changes: 9 additions & 7 deletions torch/csrc/utils/init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,15 @@ void initThroughputBenchmarkBindings(PyObject* module) {
// the GIL or not further down in the stack
return self.runOnce(std::move(args), std::move(kwargs));
})
.def("benchmark", [](ThroughputBenchmark& self, BenchmarkConfig config) {
// The benchmark always runs without the GIL. GIL will be used where
// needed. This will happen only in the nn.Module mode when manipulating
// inputs and running actual inference
pybind11::gil_scoped_release no_gil_guard;
return self.benchmark(config);
});
.def(
"benchmark",
[](ThroughputBenchmark& self, const BenchmarkConfig& config) {
// The benchmark always runs without the GIL. GIL will be used where
// needed. This will happen only in the nn.Module mode when
// manipulating inputs and running actual inference
pybind11::gil_scoped_release no_gil_guard;
return self.benchmark(config);
});
}

} // namespace throughput_benchmark
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/utils/throughput_benchmark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ py::object ThroughputBenchmark::runOnce(py::args&& args, py::kwargs&& kwargs) {
}
}

ThroughputBenchmark::ThroughputBenchmark(jit::Module script_module)
ThroughputBenchmark::ThroughputBenchmark(const jit::Module& script_module)
: script_module_(script_module) {}

ThroughputBenchmark::ThroughputBenchmark(py::object module)
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/utils/throughput_benchmark.h
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ void ModuleBenchmark::addInput(py::args&& args, py::kwargs&& kwargs);
*/
class C10_HIDDEN ThroughputBenchmark {
public:
explicit ThroughputBenchmark(jit::Module module);
explicit ThroughputBenchmark(const jit::Module& module);
explicit ThroughputBenchmark(py::object module);

// Add one more input example. This input example should be in the exact
Expand Down

0 comments on commit b3e24c5

Please sign in to comment.