Skip to content

Commit

Permalink
[clang-tidy] Exclude cppcoreguidelines-avoid-magic-numbers (pytorch#5…
Browse files Browse the repository at this point in the history
…7841)

Summary:
Add cppcoreguidelines-avoid-magic-numbers exclusion to clang-tidy
Remove existing nolint warnings using following script:
```
for file in `git ls-files | grep -v \.py`; do gsed '/^ *\/\/ NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)/d' -i  $file; done
```

Pull Request resolved: pytorch#57841

Reviewed By: samestep

Differential Revision: D28295045

Pulled By: malfet

fbshipit-source-id: 7c6e8d1213c9593f169ed3df6a916498f1a97163
  • Loading branch information
malfet authored and facebook-github-bot committed May 8, 2021
1 parent bc2540f commit 3a66a1c
Show file tree
Hide file tree
Showing 458 changed files with 1 addition and 9,602 deletions.
1 change: 1 addition & 0 deletions .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ bugprone-*,
-bugprone-lambda-function-name,
-bugprone-reserved-identifier,
cppcoreguidelines-*,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-interfaces-global-init,
-cppcoreguidelines-macro-usage,
-cppcoreguidelines-owning-memory,
Expand Down
2 changes: 0 additions & 2 deletions aten/src/ATen/CPUGeneratorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ Generator createCPUGenerator(uint64_t seed_val) {
* and return them as a 64 bit unsigned int
*/
inline uint64_t make64BitsFrom32Bits(uint32_t hi, uint32_t lo) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
return (static_cast<uint64_t>(hi) << 32) | lo;
}

Expand Down Expand Up @@ -157,7 +156,6 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
// intermediate values.
if (legacy_pod->normal_is_valid) {
auto r = legacy_pod->normal_rho;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
auto theta = 2.0 * c10::pi<double> * legacy_pod->normal_x;
// we return the sin version of the normal sample when in caching mode
double_normal_sample = c10::optional<double>(r * ::sin(theta));
Expand Down
3 changes: 0 additions & 3 deletions aten/src/ATen/Context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ bool Context::checkCuBLASConfigDeterministic() {
bool cublas_config_deterministic = true;
// If using CUDA 10.2 or greater, need to make sure CuBLAS workspace config
// is set to deterministic setting
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if (hasCUDART() && (versionCUDART() >= 10020)) {
char* workspace_config = std::getenv(cublas_config_var_name);
cublas_config_deterministic = (workspace_config != nullptr) && (
Expand Down Expand Up @@ -277,7 +276,6 @@ void Context::setDefaultMobileCPUAllocator() {
"Cannot set another allocator.");
// Setting the priority high to make sure no other allocator gets used instead of this.
prev_allocator_ptr_ = c10::GetCPUAllocator();
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::SetCPUAllocator(c10::GetDefaultMobileCPUAllocator(), /*priority*/ 100);
}

Expand All @@ -286,7 +284,6 @@ void Context::unsetDefaultMobileCPUAllocator() {
"setDefaultMobileCPUAllocator must have been called "
"before unsetDefaultMobileCPUAllocator.");
// Setting the priority high to make sure no other allocator gets used instead of this.
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
c10::SetCPUAllocator(prev_allocator_ptr_ , /*priority*/ 100);
prev_allocator_ptr_ = nullptr;
}
Expand Down
9 changes: 0 additions & 9 deletions aten/src/ATen/DLConvertor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ namespace at {
DLDataType getDLDataType(const Tensor& t) {
DLDataType dtype;
dtype.lanes = 1;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
dtype.bits = t.element_size() * 8;
switch (t.scalar_type()) {
case ScalarType::Byte:
Expand Down Expand Up @@ -126,7 +125,6 @@ ScalarType toScalarType(const DLDataType& dtype) {
switch (dtype.code) {
case DLDataTypeCode::kDLUInt:
switch (dtype.bits) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 8:
stype = ScalarType::Byte;
break;
Expand All @@ -137,19 +135,15 @@ ScalarType toScalarType(const DLDataType& dtype) {
break;
case DLDataTypeCode::kDLInt:
switch (dtype.bits) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 8:
stype = ScalarType::Char;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 16:
stype = ScalarType::Short;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 32:
stype = ScalarType::Int;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 64:
stype = ScalarType::Long;
break;
Expand All @@ -160,15 +154,12 @@ ScalarType toScalarType(const DLDataType& dtype) {
break;
case DLDataTypeCode::kDLFloat:
switch (dtype.bits) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 16:
stype = ScalarType::Half;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 32:
stype = ScalarType::Float;
break;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
case 64:
stype = ScalarType::Double;
break;
Expand Down
1 change: 0 additions & 1 deletion aten/src/ATen/SparseTensorUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,6 @@ Tensor coo_to_csr(const int64_t* indices, int64_t dim, int64_t nnz) {
if (nnz > 0) {
auto csr_accessor = csr.accessor<int64_t, 1>();
// Convert the sparse matrix to CSR format
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
at::parallel_for(0, nnz, 10000, [&](int64_t start, int64_t end) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t h, hp0, hp1;
Expand Down
8 changes: 0 additions & 8 deletions aten/src/ATen/benchmarks/quantize_per_channel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ static void quantize_per_channel_4d_contiguous(benchmark::State& state) {
at::Tensor a = at::rand({batches, channels, height, width});
at::Tensor scales = at::rand({channels});
at::Tensor zero_points = at::randint(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int));

at::Tensor qa;
Expand All @@ -33,7 +32,6 @@ static void quantize_per_channel_4d_channels_last(benchmark::State& state) {
at::TensorOptions().memory_format(at::MemoryFormat::ChannelsLast));
at::Tensor scales = at::rand({channels});
at::Tensor zero_points = at::randint(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int));

at::Tensor qa;
Expand All @@ -50,7 +48,6 @@ static void quantize_per_channel_2d(benchmark::State& state) {
at::Tensor a = at::rand({channels, nelem});
at::Tensor scales = at::rand({channels});
at::Tensor zero_points = at::randint(
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int));

at::Tensor qa;
Expand All @@ -63,11 +60,8 @@ static void quantize_per_channel_2d(benchmark::State& state) {
static void GenerateSizes4d(benchmark::internal::Benchmark* b) {
b->ArgNames({"N", "C", "H", "W"});

// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t n = 16; n < 256; n *= 2) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t c = 4; c < 256; c *= 2) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t hw = 4; hw < 256; hw *= 2) {
b->Args({n, c, hw, hw});
}
Expand All @@ -78,9 +72,7 @@ static void GenerateSizes4d(benchmark::internal::Benchmark* b) {
static void GenerateSizes2d(benchmark::internal::Benchmark* b) {
b->ArgNames({"C", "N"});

// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t c = 4; c < 512; c *= 2) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t n = 4; n < 512; n *= 2) {
b->Args({c, n});
}
Expand Down
6 changes: 0 additions & 6 deletions aten/src/ATen/benchmarks/stateful_conv1d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ static void stateful_conv1d(benchmark::State& state) {
)");

std::vector<std::vector<torch::jit::IValue>> inputs;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (int i = 0; i < 10; ++i) {
std::vector<torch::jit::IValue> input;
// NOLINTNEXTLINE(modernize-use-emplace)
Expand Down Expand Up @@ -69,15 +68,10 @@ static void GenerateSizes(benchmark::internal::Benchmark* b) {
"Width",
"Optimized"});

// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t input_channels = 32; input_channels < 256; input_channels *= 2) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t output_channels = 32; output_channels < 256; output_channels *= 2) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t kernel = 3; kernel < 8; ++kernel) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t batch_size = 1; batch_size < 5; ++batch_size) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t width = 32; width < 256; width *= 2) {
b->Args({input_channels, output_channels, kernel, batch_size, width, true});
b->Args({input_channels, output_channels, kernel, batch_size, width, false});
Expand Down
2 changes: 0 additions & 2 deletions aten/src/ATen/benchmarks/tensor_add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ static void tensor_add(benchmark::State& state) {
static void GenerateSizes(benchmark::internal::Benchmark* b) {
b->ArgNames({"N", "C"});

// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t n = 8; n < 1024;) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
for (size_t c = 8; c < 1024;) {
b->Args({n, c});
c *= 2;
Expand Down
9 changes: 0 additions & 9 deletions aten/src/ATen/core/Formatting.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,7 @@ static std::tuple<double, int64_t> __printFormat(std::ostream& stream, const Ten
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
int64_t sz;
if(intMode) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if(expMax > 9) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
sz = 11;
stream << std::scientific << std::setprecision(4);
} else {
Expand All @@ -107,27 +105,20 @@ static std::tuple<double, int64_t> __printFormat(std::ostream& stream, const Ten
}
} else {
if(expMax-expMin > 4) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
sz = 11;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if(std::fabs(expMax) > 99 || std::fabs(expMin) > 99) {
sz = sz + 1;
}
stream << std::scientific << std::setprecision(4);
} else {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
if(expMax > 5 || expMax < 0) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
sz = 7;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
scale = std::pow(10, expMax-1);
stream << std::fixed << std::setprecision(4);
} else {
if(expMax == 0) {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
sz = 7;
} else {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
sz = expMax+6;
}
stream << std::fixed << std::setprecision(4);
Expand Down
Loading

0 comments on commit 3a66a1c

Please sign in to comment.