Skip to content

Commit

Permalink
More fixes and improved clang-tidy checkers (pytorch#93213)
Browse files Browse the repository at this point in the history
Pull Request resolved: pytorch#93213
Approved by: https://github.com/Skylion007
  • Loading branch information
cyyever authored and pytorchmergebot committed Feb 1, 2023
1 parent 679e869 commit 37f7c00
Show file tree
Hide file tree
Showing 37 changed files with 91 additions and 100 deletions.
5 changes: 4 additions & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,14 @@
InheritParentConfig: true
Checks: '
bugprone-*,
-bugprone-easily-swappable-parameters,
-bugprone-forward-declaration-namespace,
-bugprone-macro-parentheses,
-bugprone-lambda-function-name,
-bugprone-reserved-identifier,
-bugprone-swapped-arguments,
cppcoreguidelines-*,
-cppcoreguidelines-avoid-do-while,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-avoid-non-const-global-variables,
-cppcoreguidelines-interfaces-global-init,
Expand All @@ -30,6 +33,7 @@ misc-unused-alias-decls,
misc-unused-using-decls,
modernize-*,
-modernize-concat-nested-namespaces,
-modernize-macro-to-enum,
-modernize-return-braced-init-list,
-modernize-use-auto,
-modernize-use-default-member-init,
Expand All @@ -44,5 +48,4 @@ readability-container-size-empty,
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$'
AnalyzeTemporaryDtors: false
WarningsAsErrors: '*'
CheckOptions:
...
2 changes: 1 addition & 1 deletion aten/src/ATen/core/TensorBase.h
Original file line number Diff line number Diff line change
Expand Up @@ -858,7 +858,7 @@ auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_void_t

template <typename T>
auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_var_t<T> {
return _register_hook(std::move(hook));
return _register_hook(std::forward<T>(hook));
}

namespace detail {
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/dispatch/OperatorEntry.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ const AnnotatedKernel& OperatorEntry::ambiguousAutogradOtherKernel() const {
return kernel;
}

void OperatorEntry::assertSignatureIsCorrect(const CppSignature call_signature, bool has_symint) const {
void OperatorEntry::assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const {
if (has_symint) {
if (C10_UNLIKELY(sym_cpp_signature_.has_value() && (call_signature != sym_cpp_signature_->signature))) {
reportSignatureError(call_signature, *sym_cpp_signature_);
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/dispatch/OperatorEntry.h
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ class TORCH_API OperatorEntry final {
assertSignatureIsCorrect(CppSignature::make<FuncType>(), fn_has_symint<FuncType>::value);
}

void assertSignatureIsCorrect(const CppSignature call_signature, bool has_symint) const;
void assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const;

[[noreturn]] void reportError(DispatchKey dispatchKey) const;

Expand Down
10 changes: 5 additions & 5 deletions aten/src/ATen/core/ivalue.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ struct StreamData3Holder : c10::intrusive_ptr_target {
StreamData3Holder(struct c10::StreamData3 d) {
val = d;
}
StreamData3Holder() = default;
StreamData3Holder() = delete;
struct c10::StreamData3 val;
};

Expand Down Expand Up @@ -1261,12 +1261,12 @@ struct TORCH_API IValue final {
friend MaybeOwnedTraits<IValue>;

Payload payload;
Tag tag;
Tag tag{IValue::Tag::None};
friend struct WeakIValue;
};

struct TORCH_API WeakIValue final {
WeakIValue() : tag(IValue::Tag::None), is_intrusive_ptr(false) {}
WeakIValue() = default;

WeakIValue(const WeakIValue& rhs)
: payload(rhs.payload),
Expand Down Expand Up @@ -1378,8 +1378,8 @@ struct TORCH_API WeakIValue final {
private:
using Payload = IValue::Payload::TriviallyCopyablePayload;
Payload payload;
IValue::Tag tag;
bool is_intrusive_ptr;
IValue::Tag tag{IValue::Tag::None};
bool is_intrusive_ptr{false};
};

// An owning pointer to a type. When the type is class type, it requires a pair
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/core/jit_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -1001,8 +1001,8 @@ struct TORCH_API DictType : public SharedType {

std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
std::stringstream ss;
ss << "Dict[" << getKeyType()->annotation_str(printer) << ", "
<< getValueType()->annotation_str(std::move(printer)) << "]";
ss << "Dict[" << getKeyType()->annotation_str(printer) << ", ";
ss << getValueType()->annotation_str(std::move(printer)) << "]";
return ss.str();
}

Expand Down
4 changes: 1 addition & 3 deletions aten/src/ATen/functorch/BatchRulesScatterOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -350,14 +350,12 @@ namespace {
// /aten/src/ATen/native/TensorAdvancedIndexing.cpp#L294-L312
VmapDimVector compute_indexed_shape(const Tensor &src, TensorList indices_list)
{
int64_t dims_before = 0, dims_after = 0, dims_indexed = 0;
int64_t dims_before = 0, dims_indexed = 0;
IntArrayRef replacement_shape;
for (const auto dim : c10::irange(indices_list.size())) {
if (!indices_list[dim].defined()) {
if (dims_indexed == 0) {
dims_before++;
} else {
dims_after++;
}
} else {
dims_indexed++;
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/record_function.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ class CacheEntry {

// Includes sampling callbacks which are waiting to run.
c10::SmallVector<CallbackAndCounter, kSoftLimitCallbacks> callbacks_;
RecordScope scope_;
RecordScope scope_{RecordScope::FUNCTION};

StepCallbacks active_callbacks_;

Expand Down
12 changes: 10 additions & 2 deletions c10/core/CPUAllocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,11 @@ void ProfiledCPUMemoryReporter::New(void* ptr, size_t nbytes) {
}
if (profile_memory) {
reportMemoryUsageToProfiler(
ptr, nbytes, allocated, 0, c10::Device(c10::DeviceType::CPU));
ptr,
static_cast<int64_t>(nbytes),
static_cast<int64_t>(allocated),
0,
c10::Device(c10::DeviceType::CPU));
}
}

Expand Down Expand Up @@ -242,7 +246,11 @@ void ProfiledCPUMemoryReporter::Delete(void* ptr) {
}
if (profile_memory) {
reportMemoryUsageToProfiler(
ptr, -nbytes, allocated, 0, c10::Device(c10::DeviceType::CPU));
ptr,
-static_cast<int64_t>(nbytes),
static_cast<int64_t>(allocated),
0,
c10::Device(c10::DeviceType::CPU));
}
}

Expand Down
2 changes: 1 addition & 1 deletion c10/core/Device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ Device::Device(const std::string& device_string) : Device(Type::CPU) {

try {
if (!device_index_str.empty()) {
index_ = c10::stoi(device_index_str);
index_ = static_cast<c10::DeviceIndex>(c10::stoi(device_index_str));
}
} catch (const std::exception&) {
TORCH_CHECK(
Expand Down
6 changes: 5 additions & 1 deletion c10/core/TensorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ TensorImpl::TensorImpl(
// the Python and PythonTLSSnapshot dispatch keys will be set and all is well.
// The point is to delay the dispatch key setting until that point.

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
TensorImpl::TensorImpl(
ImplType type,
Storage&& storage,
Expand All @@ -122,12 +123,14 @@ TensorImpl::TensorImpl(
}
}

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
TensorImpl::TensorImpl(
DispatchKeySet key_set,
const caffe2::TypeMeta data_type,
c10::optional<c10::Device> device_opt)
: TensorImpl({}, key_set, data_type, device_opt) {}

// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
TensorImpl::TensorImpl(
Storage&& storage,
DispatchKeySet key_set,
Expand Down Expand Up @@ -864,7 +867,8 @@ void TensorImpl::Extend(int64_t num, float growthPct) {
newCapacity[0] = std::max(
newDims[0],
static_cast<int64_t>(std::ceil(
sizes_and_strides_.size_at_unchecked(0) * (1 + growthPct / 100))));
static_cast<float>(sizes_and_strides_.size_at_unchecked(0)) *
(1 + growthPct / 100))));
auto oldData = std::move(storage_.data_ptr());
auto oldSize = numel_;
Resize(std::move(newCapacity));
Expand Down
3 changes: 3 additions & 0 deletions c10/core/impl/PyObjectSlot.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ PyInterpreter* PyObjectSlot::pyobj_interpreter() {
}

PyObject* PyObjectSlot::_unchecked_untagged_pyobj() const {
// NOLINTNEXTLINE(performance-no-int-to-ptr)
return reinterpret_cast<PyObject*>(
reinterpret_cast<uintptr_t>(pyobj_) & ~0x1ULL);
}
Expand All @@ -47,10 +48,12 @@ PyInterpreter& PyObjectSlot::load_pyobj_interpreter() const {
}

bool PyObjectSlot::owns_pyobj() {
// NOLINTNEXTLINE(performance-no-int-to-ptr)
return reinterpret_cast<uintptr_t>(pyobj_) & 1;
}

void PyObjectSlot::set_owns_pyobj(bool b) {
// NOLINTNEXTLINE(performance-no-int-to-ptr)
pyobj_ = reinterpret_cast<PyObject*>(
reinterpret_cast<uintptr_t>(_unchecked_untagged_pyobj()) | b);
}
Expand Down
2 changes: 1 addition & 1 deletion c10/core/impl/TorchDispatchModeTLS.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ const std::shared_ptr<SafePyObject>& TorchDispatchModeTLS::get_stack_at(
}

int64_t TorchDispatchModeTLS::stack_len() {
return torchDispatchModeState.stack_.size();
return static_cast<int64_t>(torchDispatchModeState.stack_.size());
}

const TorchDispatchModeTLS& TorchDispatchModeTLS::get_state() {
Expand Down
4 changes: 2 additions & 2 deletions c10/core/impl/alloc_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ void memset_junk(void* data, size_t num) {
static constexpr int32_t kJunkPattern = 0x7fedbeef;
static constexpr int64_t kJunkPattern64 =
static_cast<int64_t>(kJunkPattern) << 32 | kJunkPattern;
int32_t int64_count = num / sizeof(kJunkPattern64);
int32_t remaining_bytes = num % sizeof(kJunkPattern64);
auto int64_count = num / sizeof(kJunkPattern64);
auto remaining_bytes = num % sizeof(kJunkPattern64);
int64_t* data_i64 = reinterpret_cast<int64_t*>(data);
for (const auto i : c10::irange(int64_count)) {
data_i64[i] = kJunkPattern64;
Expand Down
3 changes: 1 addition & 2 deletions c10/macros/Macros.h
Original file line number Diff line number Diff line change
Expand Up @@ -434,8 +434,7 @@ __device__ __attribute__((noinline)) __attribute__((weak)) void __assert_fail(
// Warning: __has_trivial_copy for GCC may not always detect the non-POD
// correctly. For example, T = std::unique_ptr may evaluate to true and be
// treated as POD. This can cause unexpected behavior.
#if defined(__GNUG__) && __GNUC__ < 5 && \
!(defined(__clang__) && defined(_LIBCPP_VERSION))
#if defined(__GNUG__) && __GNUC__ < 5 && !defined(__clang__)
#define C10_IS_TRIVIALLY_COPYABLE(T) __has_trivial_copy(T)
#else
#define C10_IS_TRIVIALLY_COPYABLE(T) std::is_trivially_copyable<T>::value
Expand Down
9 changes: 2 additions & 7 deletions c10/util/Optional.h
Original file line number Diff line number Diff line change
Expand Up @@ -501,13 +501,8 @@ class arrayref_optional_base {
: storage_(v) {}

constexpr bool initialized() const noexcept {
typename storage::raw repr;
// Cast to void* to suppress GCC's -Wclass-memaccess.
memcpy(
static_cast<void*>(&repr),
static_cast<const void*>(&storage_),
sizeof(storage_));
return repr.p != nullptr || repr.sz == 0;
return storage_.uninitialized_.p != nullptr ||
storage_.uninitialized_.sz == 0;
}

void setInitialized(bool init) noexcept {
Expand Down
2 changes: 1 addition & 1 deletion functorch/csrc/dim/dim.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ struct Dim : public py::base<Dim> {
return batchtensor_;
}
private:
int64_t size_;
int64_t size_{-1};
at::Tensor range_;
at::Tensor batchtensor_;
};
Expand Down
6 changes: 3 additions & 3 deletions torch/csrc/StorageSharing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,10 @@ static PyObject* THPStorage_shareFilename(PyObject* _self, PyObject* noargs) {
"_share_filename_: only available on CPU");
auto self = (THPStorage*)_self;
c10::StorageImpl* storage = self->cdata;
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
THManagedMapAllocator* ctx;
THManagedMapAllocator* ctx =
THManagedMapAllocator::fromDataPtr(storage->data_ptr());
// Storage is already in shared memory, just return a handle
if ((ctx = THManagedMapAllocator::fromDataPtr(storage->data_ptr()))) {
if (ctx) {
// done
} else {
// TODO: retry on collision
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/custom_function.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ struct TORCH_API AutogradContext {
// weak_ptr to avoid a refcycle. Since grad_fn_ owns this AutogradContext, it
// will always be alive when we want to use it.
std::weak_ptr<Node> grad_fn_;
bool has_freed_buffers_;
bool has_freed_buffers_{false};

void save_variables();

Expand Down
7 changes: 0 additions & 7 deletions torch/csrc/autograd/python_torch_functions_manual.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,18 +28,11 @@
#include <utility>
#include <vector>

using at::ArrayRef;
using at::Backend;
using at::Device;
using at::DeviceGuard;
using at::Dimname;
using at::DimnameList;
using at::Generator;
using at::IntArrayRef;
using at::Layout;
using at::OptionalDeviceGuard;
using at::Scalar;
using at::ScalarType;
using at::Tensor;
using at::TensorList;
using at::TensorOptions;
Expand Down
8 changes: 4 additions & 4 deletions torch/csrc/distributed/rpc/tensorpipe_agent.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,17 +97,17 @@ struct TORCH_API TensorPipeRpcBackendOptions : public RpcBackendOptions {
"num_worker_threads must be positive, got ",
numWorkerThreads);

if (transports.has_value()) {
for (const std::string& transportName : transports.value()) {
if (this->transports.has_value()) {
for (const std::string& transportName : this->transports.value()) {
TORCH_CHECK(
TensorPipeTransportRegistry()->Has(transportName),
"Unknown transport: ",
transportName);
}
}

if (channels.has_value()) {
for (const std::string& channelName : channels.value()) {
if (this->channels.has_value()) {
for (const std::string& channelName : this->channels.value()) {
TORCH_CHECK(
TensorPipeChannelRegistry()->Has(channelName),
"Unknown channel: ",
Expand Down
1 change: 0 additions & 1 deletion torch/csrc/jit/frontend/schema_type_parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ using c10::ListType;
using c10::MemoryFormatType;
using c10::NoneType;
using c10::NumberType;
using c10::OptionalType;
using c10::QSchemeType;
using c10::QuantizerType;
using c10::RRefType;
Expand Down
10 changes: 5 additions & 5 deletions torch/csrc/jit/frontend/sugared_value.h
Original file line number Diff line number Diff line change
Expand Up @@ -658,15 +658,15 @@ struct TORCH_API RangeValue : SugaredValue {
}

private:
Value* start_;
Value* end_;
Value* step_;
Value* start_{};
Value* end_{};
Value* step_{};
// a flag to determine if it's a simple range() call with only end_ from
// arguments If true, we will not insert length calculation and index
// derivation nodes to simplify the graph and enable more possible
// optimizations
bool has_only_end_;
c10::optional<int64_t> static_len_ = c10::nullopt;
bool has_only_end_{};
c10::optional<int64_t> static_len_;
};

// Specialized Tree structure to matched against for special handling
Expand Down
4 changes: 1 addition & 3 deletions torch/csrc/jit/frontend/tracer.h
Original file line number Diff line number Diff line change
Expand Up @@ -179,9 +179,7 @@ inline void warn(const char* _reason, const char* _kind = nullptr) {
TORCH_API void setWarn(warn_fn_type fn);

struct TORCH_API NoWarn {
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
NoWarn() : state(getTracingState()) {
// NOLINTNEXTLINE(*.cplusplus.UninitializedObject)
if (state) {
prev = state->warn;
state->warn = false;
Expand All @@ -193,7 +191,7 @@ struct TORCH_API NoWarn {
}
}
std::shared_ptr<TracingState> state;
bool prev;
bool prev{false};
};

struct WithNestedTracingFrame {
Expand Down
3 changes: 0 additions & 3 deletions torch/csrc/jit/mobile/compatibility/backport.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,8 @@
namespace torch {
namespace jit {

using caffe2::serialize::FileAdapter;
using caffe2::serialize::IStreamAdapter;
using caffe2::serialize::PyTorchStreamReader;
using caffe2::serialize::PyTorchStreamWriter;
using caffe2::serialize::ReadAdapterInterface;

const static BackportManager backportManager;

Expand Down
Loading

0 comments on commit 37f7c00

Please sign in to comment.