diff --git a/.clang-tidy b/.clang-tidy index eea5c737d0c49..2e82704333485 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -44,7 +44,7 @@ modernize-*, performance-*, readability-container-size-empty, ' -HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$' +HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/).*$' AnalyzeTemporaryDtors: false WarningsAsErrors: '*' ... diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 71e0f76e9a9c1..43147996d0720 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -17,6 +17,7 @@ jobs: lintrunner: uses: pytorch/test-infra/.github/workflows/linux_job.yml@main with: + timeout: 120 runner: linux.2xlarge docker-image: pytorch-linux-focal-linter fetch-depth: 0 diff --git a/.lintrunner.toml b/.lintrunner.toml index e7d458895e697..6b41bc716f09b 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -277,33 +277,33 @@ command = [ code = 'CLANGTIDY' include_patterns = [ 'c10/**/*.cpp', - 'torch/csrc/fx/**/*.cpp', - 'torch/csrc/generic/**/*.cpp', - 'torch/csrc/onnx/**/*.cpp', - 'torch/csrc/tensor/**/*.cpp', - 'torch/csrc/utils/**/*.cpp', + 'torch/csrc/**/*.cpp', ] exclude_patterns = [ # The negative filters below are to exclude files that include onnx_pb.h or # caffe2_pb.h, otherwise we'd have to build protos as part of this CI job. - # FunctionsManual.cpp is excluded to keep this diff clean. It will be fixed - # in a follow up PR. - # that are not easily converted to accepted c++ - 'c10/cuda/**/*.cpp', - 'c10/test/**/*.cpp', + # CUDA files are also excluded. '**/fb/**', - 'torch/csrc/jit/passes/onnx/helper.cpp', - 'torch/csrc/jit/passes/onnx/shape_type_inference.cpp', - 'torch/csrc/jit/serialization/onnx.cpp', - 'torch/csrc/jit/serialization/export.cpp', - 'torch/csrc/jit/serialization/import.cpp', + '**/*pb.h', + '**/*CUDA*', + '**/cuda/*pp', + 'c10/test/**', + 'third_party/**/*', + 'torch/csrc/api/**', + 'torch/csrc/autograd/**', + 'torch/csrc/CudaIPCTypes.cpp', + 'torch/csrc/cuda/**', + 'torch/csrc/dynamo/*', + 'torch/csrc/distributed/**/*', + 'torch/csrc/inductor/**/*', + 'torch/csrc/jit/**/*', 'torch/csrc/jit/serialization/import_legacy.cpp', + 'torch/csrc/jit/serialization/export.cpp', + 'torch/csrc/lazy/**/*', 'torch/csrc/onnx/init.cpp', - 'torch/csrc/cuda/nccl.*', - 'torch/csrc/cuda/python_nccl.cpp', - 'torch/csrc/autograd/FunctionsManual.cpp', - 'torch/csrc/jit/codegen/cuda/runtime/*', - 'torch/csrc/utils/disable_torch_function.cpp', + 'torch/csrc/profiler/**/*', + 'torch/csrc/quantized/**/*', + 'torch/csrc/mps/**/*', ] init_command = [ 'python3', diff --git a/c10/core/SafePyObject.h b/c10/core/SafePyObject.h index 700d90dd4b6f3..38f5c1ef3ff8e 100644 --- a/c10/core/SafePyObject.h +++ b/c10/core/SafePyObject.h @@ -22,7 +22,7 @@ struct C10_API SafePyObject { // Steals a reference to data SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter) : data_(data), pyinterpreter_(pyinterpreter) {} - SafePyObject(SafePyObject&& other) + SafePyObject(SafePyObject&& other) noexcept : data_(std::exchange(other.data_, nullptr)), pyinterpreter_(other.pyinterpreter_) {} diff --git a/c10/core/TensorImpl.h b/c10/core/TensorImpl.h index 65e7bdbe1f56d..aafbbfc5409b1 100644 --- a/c10/core/TensorImpl.h +++ b/c10/core/TensorImpl.h @@ -75,7 +75,7 @@ inline int64_t size_from_dim_(int k, IntArrayRef dims) { // Product of all dims up to k (not including dims[k]) inline int64_t size_to_dim_(int k, IntArrayRef dims) { - TORCH_CHECK((unsigned)k <= dims.size()); + TORCH_CHECK(k >= 0 && static_cast(k) <= dims.size()); int64_t r = 1; for (const auto i : c10::irange(k)) { r *= dims[i]; diff --git a/c10/util/OptionalArrayRef.h b/c10/util/OptionalArrayRef.h index fc9f3af0f84e6..6fb332f242382 100644 --- a/c10/util/OptionalArrayRef.h +++ b/c10/util/OptionalArrayRef.h @@ -27,7 +27,7 @@ class OptionalArrayRef final { OptionalArrayRef(const OptionalArrayRef& other) = default; - OptionalArrayRef(OptionalArrayRef&& other) = default; + OptionalArrayRef(OptionalArrayRef&& other) noexcept = default; constexpr OptionalArrayRef(const optional>& other) noexcept : wrapped_opt_array_ref(other) {} @@ -90,7 +90,7 @@ class OptionalArrayRef final { OptionalArrayRef& operator=(const OptionalArrayRef& other) = default; - OptionalArrayRef& operator=(OptionalArrayRef&& other) = default; + OptionalArrayRef& operator=(OptionalArrayRef&& other) noexcept = default; constexpr OptionalArrayRef& operator=( const optional>& other) noexcept { diff --git a/c10/util/either.h b/c10/util/either.h index 757663f5896fb..d0492ad6db591 100644 --- a/c10/util/either.h +++ b/c10/util/either.h @@ -78,7 +78,7 @@ class either final { return *this; } - either& operator=(either&& rhs) { + either& operator=(either&& rhs) noexcept { _destruct(); _side = rhs._side; if (_side == Side::left) { diff --git a/torch/csrc/CudaIPCTypes.cpp b/torch/csrc/CudaIPCTypes.cpp index d18a23ebe4e68..2bd076377f2e6 100644 --- a/torch/csrc/CudaIPCTypes.cpp +++ b/torch/csrc/CudaIPCTypes.cpp @@ -139,8 +139,8 @@ void ReturnRefCounter(const std::string& handle, uint64_t offset /* unused */) { CudaIPCSentData::CudaIPCSentData( std::string handle, - int64_t offset, - int64_t* counter_ptr, + uint64_t offset, + uint64_t* counter_ptr, at::Device device) : handle_(std::move(handle)), offset_(offset), @@ -206,7 +206,7 @@ CudaIPCSentData::~CudaIPCSentData() { #endif } -int64_t CudaIPCSentData::counter_value() { +uint64_t CudaIPCSentData::counter_value() { return *counter_ptr_; } diff --git a/torch/csrc/CudaIPCTypes.h b/torch/csrc/CudaIPCTypes.h index bedbe34ad633b..3af22c596eb9c 100644 --- a/torch/csrc/CudaIPCTypes.h +++ b/torch/csrc/CudaIPCTypes.h @@ -22,8 +22,8 @@ struct CudaIPCReceivedData final { struct CudaIPCSentData final { std::string handle_; - int64_t offset_; - int64_t* counter_ptr_; // Reference counter shared memory block + uint64_t offset_; + uint64_t* counter_ptr_; // Reference counter shared memory block at::DataPtr original_ptr_; // Original mem allocation cudaEvent_t event_; // Sync cuEventDestroy bool event_sync_required_; @@ -31,16 +31,16 @@ struct CudaIPCSentData final { CudaIPCSentData( std::string handle, - int64_t offset, - int64_t* counter_ptr, + uint64_t offset, + uint64_t* counter_ptr, at::Device device); ~CudaIPCSentData(); - int64_t counter_value(); + uint64_t counter_value(); std::string handle() { return handle_; } - int64_t offset() { + uint64_t offset() { return offset_; } void set_original_ptr(at::DataPtr data_ptr) { @@ -87,8 +87,8 @@ struct CudaIPCRefCountersFile final { handle_(std::move(handle)), refcounted_shared_mem_(std::move(data_ptr)) {} - int64_t* counter_ptr() { - return static_cast(refcounted_shared_mem_.get()) + next_offset_; + uint64_t* counter_ptr() { + return static_cast(refcounted_shared_mem_.get()) + next_offset_; } void set_counter(uint64_t value) { @@ -103,7 +103,7 @@ struct CudaIPCRefCountersFile final { return used_slots_; } - int64_t get_offset() { + uint64_t get_offset() { return next_offset_; } diff --git a/torch/csrc/Storage.cpp b/torch/csrc/Storage.cpp index c5f4732985812..c2e963ef70dc4 100644 --- a/torch/csrc/Storage.cpp +++ b/torch/csrc/Storage.cpp @@ -134,8 +134,8 @@ static PyObject* THPStorage_pynew( torch::ParsedArgs<3> parsed_args; auto r = parser.parse(args, kwargs, parsed_args); - int64_t allocator_arg_idx = 0; - int64_t device_arg_idx = 1; + int allocator_arg_idx = 0; + int device_arg_idx = 1; if (r.idx > 0) { allocator_arg_idx = 1; diff --git a/torch/csrc/jit/passes/onnx/helper.cpp b/torch/csrc/jit/passes/onnx/helper.cpp index a51047838cf82..2f4757546bc14 100644 --- a/torch/csrc/jit/passes/onnx/helper.cpp +++ b/torch/csrc/jit/passes/onnx/helper.cpp @@ -186,7 +186,7 @@ Node* createONNXConstant( at::Tensor value) { Node* constant_node = graph->create(onnx::Constant, 1); constant_node->insertBefore(n_to_insert_before); - constant_node->t_(attr::value, value); + constant_node->t_(attr::value, std::move(value)); return constant_node; }