Skip to content

Commit

Permalink
Revert "increase clang-tidy coverage in torch/csrc (pytorch#103058)"
Browse files Browse the repository at this point in the history
This reverts commit cdf7f3e.

Reverted pytorch#103058 on behalf of https://github.com/atalman due to Sorry for reverting your change, breaks lint ([comment](pytorch#103058 (comment)))
  • Loading branch information
pytorchmergebot committed Sep 8, 2023
1 parent cdf7f3e commit fa8bfe5
Show file tree
Hide file tree
Showing 11 changed files with 41 additions and 42 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ modernize-*,
performance-*,
readability-container-size-empty,
'
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/).*$'
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$'
AnalyzeTemporaryDtors: false
WarningsAsErrors: '*'
...
1 change: 0 additions & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ jobs:
lintrunner:
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
with:
timeout: 120
runner: linux.2xlarge
docker-image: pytorch-linux-focal-linter
fetch-depth: 0
Expand Down
40 changes: 20 additions & 20 deletions .lintrunner.toml
Original file line number Diff line number Diff line change
Expand Up @@ -278,33 +278,33 @@ command = [
code = 'CLANGTIDY'
include_patterns = [
'c10/**/*.cpp',
'torch/csrc/**/*.cpp',
'torch/csrc/fx/**/*.cpp',
'torch/csrc/generic/**/*.cpp',
'torch/csrc/onnx/**/*.cpp',
'torch/csrc/tensor/**/*.cpp',
'torch/csrc/utils/**/*.cpp',
]
exclude_patterns = [
# The negative filters below are to exclude files that include onnx_pb.h or
# caffe2_pb.h, otherwise we'd have to build protos as part of this CI job.
# CUDA files are also excluded.
# FunctionsManual.cpp is excluded to keep this diff clean. It will be fixed
# in a follow up PR.
# that are not easily converted to accepted c++
'c10/cuda/**/*.cpp',
'c10/test/**/*.cpp',
'**/fb/**',
'**/*pb.h',
'**/*CUDA*',
'**/cuda/*pp',
'c10/test/**',
'third_party/**/*',
'torch/csrc/api/**',
'torch/csrc/autograd/**',
'torch/csrc/CudaIPCTypes.cpp',
'torch/csrc/cuda/**',
'torch/csrc/dynamo/*',
'torch/csrc/distributed/**/*',
'torch/csrc/inductor/**/*',
'torch/csrc/jit/**/*',
'torch/csrc/jit/serialization/import_legacy.cpp',
'torch/csrc/jit/passes/onnx/helper.cpp',
'torch/csrc/jit/passes/onnx/shape_type_inference.cpp',
'torch/csrc/jit/serialization/onnx.cpp',
'torch/csrc/jit/serialization/export.cpp',
'torch/csrc/lazy/**/*',
'torch/csrc/jit/serialization/import.cpp',
'torch/csrc/jit/serialization/import_legacy.cpp',
'torch/csrc/onnx/init.cpp',
'torch/csrc/profiler/**/*',
'torch/csrc/quantized/**/*',
'torch/csrc/mps/**/*',
'torch/csrc/cuda/nccl.*',
'torch/csrc/cuda/python_nccl.cpp',
'torch/csrc/autograd/FunctionsManual.cpp',
'torch/csrc/jit/codegen/cuda/runtime/*',
'torch/csrc/utils/disable_torch_function.cpp',
]
init_command = [
'python3',
Expand Down
2 changes: 1 addition & 1 deletion c10/core/SafePyObject.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ struct C10_API SafePyObject {
// Steals a reference to data
SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
: data_(data), pyinterpreter_(pyinterpreter) {}
SafePyObject(SafePyObject&& other) noexcept
SafePyObject(SafePyObject&& other)
: data_(std::exchange(other.data_, nullptr)),
pyinterpreter_(other.pyinterpreter_) {}

Expand Down
2 changes: 1 addition & 1 deletion c10/core/TensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ inline int64_t size_from_dim_(int k, IntArrayRef dims) {

// Product of all dims up to k (not including dims[k])
inline int64_t size_to_dim_(int k, IntArrayRef dims) {
TORCH_CHECK(k >= 0 && static_cast<size_t>(k) <= dims.size());
TORCH_CHECK((unsigned)k <= dims.size());
int64_t r = 1;
for (const auto i : c10::irange(k)) {
r *= dims[i];
Expand Down
4 changes: 2 additions & 2 deletions c10/util/OptionalArrayRef.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class OptionalArrayRef final {

OptionalArrayRef(const OptionalArrayRef& other) = default;

OptionalArrayRef(OptionalArrayRef&& other) noexcept = default;
OptionalArrayRef(OptionalArrayRef&& other) = default;

constexpr OptionalArrayRef(const optional<ArrayRef<T>>& other) noexcept
: wrapped_opt_array_ref(other) {}
Expand Down Expand Up @@ -90,7 +90,7 @@ class OptionalArrayRef final {

OptionalArrayRef& operator=(const OptionalArrayRef& other) = default;

OptionalArrayRef& operator=(OptionalArrayRef&& other) noexcept = default;
OptionalArrayRef& operator=(OptionalArrayRef&& other) = default;

constexpr OptionalArrayRef& operator=(
const optional<ArrayRef<T>>& other) noexcept {
Expand Down
2 changes: 1 addition & 1 deletion c10/util/either.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class either final {
return *this;
}

either<Left, Right>& operator=(either<Left, Right>&& rhs) noexcept {
either<Left, Right>& operator=(either<Left, Right>&& rhs) {
_destruct();
_side = rhs._side;
if (_side == Side::left) {
Expand Down
6 changes: 3 additions & 3 deletions torch/csrc/CudaIPCTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,8 @@ void ReturnRefCounter(const std::string& handle, uint64_t offset /* unused */) {

CudaIPCSentData::CudaIPCSentData(
std::string handle,
uint64_t offset,
uint64_t* counter_ptr,
int64_t offset,
int64_t* counter_ptr,
at::Device device)
: handle_(std::move(handle)),
offset_(offset),
Expand Down Expand Up @@ -206,7 +206,7 @@ CudaIPCSentData::~CudaIPCSentData() {
#endif
}

uint64_t CudaIPCSentData::counter_value() {
int64_t CudaIPCSentData::counter_value() {
return *counter_ptr_;
}

Expand Down
18 changes: 9 additions & 9 deletions torch/csrc/CudaIPCTypes.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,25 +22,25 @@ struct CudaIPCReceivedData final {

struct CudaIPCSentData final {
std::string handle_;
uint64_t offset_;
uint64_t* counter_ptr_; // Reference counter shared memory block
int64_t offset_;
int64_t* counter_ptr_; // Reference counter shared memory block
at::DataPtr original_ptr_; // Original mem allocation
cudaEvent_t event_; // Sync cuEventDestroy
bool event_sync_required_;
at::Device device_;

CudaIPCSentData(
std::string handle,
uint64_t offset,
uint64_t* counter_ptr,
int64_t offset,
int64_t* counter_ptr,
at::Device device);
~CudaIPCSentData();

uint64_t counter_value();
int64_t counter_value();
std::string handle() {
return handle_;
}
uint64_t offset() {
int64_t offset() {
return offset_;
}
void set_original_ptr(at::DataPtr data_ptr) {
Expand Down Expand Up @@ -87,8 +87,8 @@ struct CudaIPCRefCountersFile final {
handle_(std::move(handle)),
refcounted_shared_mem_(std::move(data_ptr)) {}

uint64_t* counter_ptr() {
return static_cast<uint64_t*>(refcounted_shared_mem_.get()) + next_offset_;
int64_t* counter_ptr() {
return static_cast<int64_t*>(refcounted_shared_mem_.get()) + next_offset_;
}

void set_counter(uint64_t value) {
Expand All @@ -103,7 +103,7 @@ struct CudaIPCRefCountersFile final {
return used_slots_;
}

uint64_t get_offset() {
int64_t get_offset() {
return next_offset_;
}

Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/Storage.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -372,8 +372,8 @@ static PyObject* THPStorage_pynew(
torch::ParsedArgs<3> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);

int allocator_arg_idx = 0;
int device_arg_idx = 1;
int64_t allocator_arg_idx = 0;
int64_t device_arg_idx = 1;

if (r.idx > 0) {
allocator_arg_idx = 1;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/passes/onnx/helper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ Node* createONNXConstant(
at::Tensor value) {
Node* constant_node = graph->create(onnx::Constant, 1);
constant_node->insertBefore(n_to_insert_before);
constant_node->t_(attr::value, std::move(value));
constant_node->t_(attr::value, value);
return constant_node;
}

Expand Down

0 comments on commit fa8bfe5

Please sign in to comment.