diff --git a/.clang-tidy b/.clang-tidy index aeb98b0298ec6..3e6cb5db4d38a 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -8,6 +8,7 @@ bugprone-*, -bugprone-lambda-function-name, -bugprone-reserved-identifier, cppcoreguidelines-*, +-cppcoreguidelines-avoid-magic-numbers, -cppcoreguidelines-interfaces-global-init, -cppcoreguidelines-macro-usage, -cppcoreguidelines-owning-memory, diff --git a/aten/src/ATen/CPUGeneratorImpl.cpp b/aten/src/ATen/CPUGeneratorImpl.cpp index 2b86f601b10ae..d7dce2561d4f9 100644 --- a/aten/src/ATen/CPUGeneratorImpl.cpp +++ b/aten/src/ATen/CPUGeneratorImpl.cpp @@ -71,7 +71,6 @@ Generator createCPUGenerator(uint64_t seed_val) { * and return them as a 64 bit unsigned int */ inline uint64_t make64BitsFrom32Bits(uint32_t hi, uint32_t lo) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return (static_cast(hi) << 32) | lo; } @@ -157,7 +156,6 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) { // intermediate values. if (legacy_pod->normal_is_valid) { auto r = legacy_pod->normal_rho; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto theta = 2.0 * c10::pi * legacy_pod->normal_x; // we return the sin version of the normal sample when in caching mode double_normal_sample = c10::optional(r * ::sin(theta)); diff --git a/aten/src/ATen/Context.cpp b/aten/src/ATen/Context.cpp index c3f0572750dfa..66945e40fe8c2 100644 --- a/aten/src/ATen/Context.cpp +++ b/aten/src/ATen/Context.cpp @@ -101,7 +101,6 @@ bool Context::checkCuBLASConfigDeterministic() { bool cublas_config_deterministic = true; // If using CUDA 10.2 or greater, need to make sure CuBLAS workspace config // is set to deterministic setting - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (hasCUDART() && (versionCUDART() >= 10020)) { char* workspace_config = std::getenv(cublas_config_var_name); cublas_config_deterministic = (workspace_config != nullptr) && ( @@ -277,7 +276,6 @@ void Context::setDefaultMobileCPUAllocator() { "Cannot set another allocator."); // Setting the priority high to make sure no other allocator gets used instead of this. prev_allocator_ptr_ = c10::GetCPUAllocator(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::SetCPUAllocator(c10::GetDefaultMobileCPUAllocator(), /*priority*/ 100); } @@ -286,7 +284,6 @@ void Context::unsetDefaultMobileCPUAllocator() { "setDefaultMobileCPUAllocator must have been called " "before unsetDefaultMobileCPUAllocator."); // Setting the priority high to make sure no other allocator gets used instead of this. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::SetCPUAllocator(prev_allocator_ptr_ , /*priority*/ 100); prev_allocator_ptr_ = nullptr; } diff --git a/aten/src/ATen/DLConvertor.cpp b/aten/src/ATen/DLConvertor.cpp index 740e66dd28407..73d0ec024a682 100644 --- a/aten/src/ATen/DLConvertor.cpp +++ b/aten/src/ATen/DLConvertor.cpp @@ -10,7 +10,6 @@ namespace at { DLDataType getDLDataType(const Tensor& t) { DLDataType dtype; dtype.lanes = 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dtype.bits = t.element_size() * 8; switch (t.scalar_type()) { case ScalarType::Byte: @@ -126,7 +125,6 @@ ScalarType toScalarType(const DLDataType& dtype) { switch (dtype.code) { case DLDataTypeCode::kDLUInt: switch (dtype.bits) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 8: stype = ScalarType::Byte; break; @@ -137,19 +135,15 @@ ScalarType toScalarType(const DLDataType& dtype) { break; case DLDataTypeCode::kDLInt: switch (dtype.bits) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 8: stype = ScalarType::Char; break; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 16: stype = ScalarType::Short; break; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 32: stype = ScalarType::Int; break; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 64: stype = ScalarType::Long; break; @@ -160,15 +154,12 @@ ScalarType toScalarType(const DLDataType& dtype) { break; case DLDataTypeCode::kDLFloat: switch (dtype.bits) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 16: stype = ScalarType::Half; break; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 32: stype = ScalarType::Float; break; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 64: stype = ScalarType::Double; break; diff --git a/aten/src/ATen/SparseTensorUtils.cpp b/aten/src/ATen/SparseTensorUtils.cpp index 8abc7ce81145a..564eeda03c3da 100644 --- a/aten/src/ATen/SparseTensorUtils.cpp +++ b/aten/src/ATen/SparseTensorUtils.cpp @@ -95,7 +95,6 @@ Tensor coo_to_csr(const int64_t* indices, int64_t dim, int64_t nnz) { if (nnz > 0) { auto csr_accessor = csr.accessor(); // Convert the sparse matrix to CSR format - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::parallel_for(0, nnz, 10000, [&](int64_t start, int64_t end) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) int64_t h, hp0, hp1; diff --git a/aten/src/ATen/benchmarks/quantize_per_channel.cpp b/aten/src/ATen/benchmarks/quantize_per_channel.cpp index a0c13eba01ee2..375fad4e6b135 100644 --- a/aten/src/ATen/benchmarks/quantize_per_channel.cpp +++ b/aten/src/ATen/benchmarks/quantize_per_channel.cpp @@ -12,7 +12,6 @@ static void quantize_per_channel_4d_contiguous(benchmark::State& state) { at::Tensor a = at::rand({batches, channels, height, width}); at::Tensor scales = at::rand({channels}); at::Tensor zero_points = at::randint( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int)); at::Tensor qa; @@ -33,7 +32,6 @@ static void quantize_per_channel_4d_channels_last(benchmark::State& state) { at::TensorOptions().memory_format(at::MemoryFormat::ChannelsLast)); at::Tensor scales = at::rand({channels}); at::Tensor zero_points = at::randint( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int)); at::Tensor qa; @@ -50,7 +48,6 @@ static void quantize_per_channel_2d(benchmark::State& state) { at::Tensor a = at::rand({channels, nelem}); at::Tensor scales = at::rand({channels}); at::Tensor zero_points = at::randint( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0, 10, {channels}, at::TensorOptions().dtype(at::ScalarType::Int)); at::Tensor qa; @@ -63,11 +60,8 @@ static void quantize_per_channel_2d(benchmark::State& state) { static void GenerateSizes4d(benchmark::internal::Benchmark* b) { b->ArgNames({"N", "C", "H", "W"}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t n = 16; n < 256; n *= 2) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t c = 4; c < 256; c *= 2) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t hw = 4; hw < 256; hw *= 2) { b->Args({n, c, hw, hw}); } @@ -78,9 +72,7 @@ static void GenerateSizes4d(benchmark::internal::Benchmark* b) { static void GenerateSizes2d(benchmark::internal::Benchmark* b) { b->ArgNames({"C", "N"}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t c = 4; c < 512; c *= 2) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t n = 4; n < 512; n *= 2) { b->Args({c, n}); } diff --git a/aten/src/ATen/benchmarks/stateful_conv1d.cpp b/aten/src/ATen/benchmarks/stateful_conv1d.cpp index 91112db5de937..5ca0973b19f9f 100644 --- a/aten/src/ATen/benchmarks/stateful_conv1d.cpp +++ b/aten/src/ATen/benchmarks/stateful_conv1d.cpp @@ -33,7 +33,6 @@ static void stateful_conv1d(benchmark::State& state) { )"); std::vector> inputs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 10; ++i) { std::vector input; // NOLINTNEXTLINE(modernize-use-emplace) @@ -69,15 +68,10 @@ static void GenerateSizes(benchmark::internal::Benchmark* b) { "Width", "Optimized"}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t input_channels = 32; input_channels < 256; input_channels *= 2) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t output_channels = 32; output_channels < 256; output_channels *= 2) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t kernel = 3; kernel < 8; ++kernel) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t batch_size = 1; batch_size < 5; ++batch_size) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t width = 32; width < 256; width *= 2) { b->Args({input_channels, output_channels, kernel, batch_size, width, true}); b->Args({input_channels, output_channels, kernel, batch_size, width, false}); diff --git a/aten/src/ATen/benchmarks/tensor_add.cpp b/aten/src/ATen/benchmarks/tensor_add.cpp index fbe5eef8dcbb7..c7eab8ce8d719 100644 --- a/aten/src/ATen/benchmarks/tensor_add.cpp +++ b/aten/src/ATen/benchmarks/tensor_add.cpp @@ -17,9 +17,7 @@ static void tensor_add(benchmark::State& state) { static void GenerateSizes(benchmark::internal::Benchmark* b) { b->ArgNames({"N", "C"}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t n = 8; n < 1024;) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t c = 8; c < 1024;) { b->Args({n, c}); c *= 2; diff --git a/aten/src/ATen/core/Formatting.cpp b/aten/src/ATen/core/Formatting.cpp index 09d76d6f2c279..baf1691bd1d53 100644 --- a/aten/src/ATen/core/Formatting.cpp +++ b/aten/src/ATen/core/Formatting.cpp @@ -96,9 +96,7 @@ static std::tuple __printFormat(std::ostream& stream, const Ten // NOLINTNEXTLINE(cppcoreguidelines-init-variables) int64_t sz; if(intMode) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if(expMax > 9) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz = 11; stream << std::scientific << std::setprecision(4); } else { @@ -107,27 +105,20 @@ static std::tuple __printFormat(std::ostream& stream, const Ten } } else { if(expMax-expMin > 4) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz = 11; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if(std::fabs(expMax) > 99 || std::fabs(expMin) > 99) { sz = sz + 1; } stream << std::scientific << std::setprecision(4); } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if(expMax > 5 || expMax < 0) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz = 7; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scale = std::pow(10, expMax-1); stream << std::fixed << std::setprecision(4); } else { if(expMax == 0) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz = 7; } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz = expMax+6; } stream << std::fixed << std::setprecision(4); diff --git a/aten/src/ATen/core/List_test.cpp b/aten/src/ATen/core/List_test.cpp index 42ffda0bb6a8e..92f99330cd116 100644 --- a/aten/src/ATen/core/List_test.cpp +++ b/aten/src/ATen/core/List_test.cpp @@ -324,7 +324,6 @@ TEST(ListTest_IValueBasedList, givenList_whenErasingFullRange_thenIsEmpty) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_IValueBasedList, whenCallingReserve_thenDoesntCrash) { List list; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) list.reserve(100); } @@ -680,7 +679,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingExtractWithNonExistingPosition_then // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingCopyingSetWithExistingPosition_thenChangesElement) { List list({3, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t value = 5; list.set(1, value); EXPECT_EQ(3, list.get(0)); @@ -690,7 +688,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingCopyingSetWithExistingPosition_then // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingMovingSetWithExistingPosition_thenChangesElement) { List list({3, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t value = 5; // NOLINTNEXTLINE(performance-move-const-arg) list.set(1, std::move(value)); @@ -701,7 +698,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingMovingSetWithExistingPosition_thenC // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingCopyingSetWithNonExistingPosition_thenThrowsException) { List list({3, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t value = 5; // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) EXPECT_THROW(list.set(2, value), std::out_of_range); @@ -710,7 +706,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingCopyingSetWithNonExistingPosition_t // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingMovingSetWithNonExistingPosition_thenThrowsException) { List list({3, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t value = 5; // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,performance-move-const-arg,hicpp-avoid-goto) EXPECT_THROW(list.set(2, std::move(value)), std::out_of_range); @@ -725,9 +720,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingAccessOperatorWithExistingPosition_ // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenAssigningToAccessOperatorWithExistingPosition_thenSetsElement) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) list[1] = 6; EXPECT_EQ(3, list.get(0)); EXPECT_EQ(6, list.get(1)); @@ -736,7 +729,6 @@ TEST(ListTest_NonIValueBasedList, whenAssigningToAccessOperatorWithExistingPosit // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenAssigningToAccessOperatorFromAccessOperator_thenSetsElement) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 5}); list[1] = list[2]; EXPECT_EQ(3, list.get(0)); @@ -746,7 +738,6 @@ TEST(ListTest_NonIValueBasedList, whenAssigningToAccessOperatorFromAccessOperato // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenSwappingFromAccessOperator_thenSwapsElements) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 5}); swap(list[1], list[2]); EXPECT_EQ(3, list.get(0)); @@ -763,9 +754,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingAccessOperatorWithNonExistingPositi // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingInsertOnIteratorWithLValue_thenInsertsElement) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 6}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t v = 5; list.insert(list.begin() + 2, v); EXPECT_EQ(4, list.size()); @@ -774,9 +763,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingInsertOnIteratorWithLValue_thenInse // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingInsertOnIteratorWithRValue_thenInsertsElement) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 6}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t v = 5; // NOLINTNEXTLINE(performance-move-const-arg) list.insert(list.begin() + 2, std::move(v)); @@ -786,9 +773,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingInsertOnIteratorWithRValue_thenInse // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingInsertWithLValue_thenReturnsIteratorToNewElement) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 6}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t v = 5; List::iterator result = list.insert(list.begin() + 2, v); EXPECT_EQ(list.begin() + 2, result); @@ -796,9 +781,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingInsertWithLValue_thenReturnsIterato // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingInsertWithRValue_thenReturnsIteratorToNewElement) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 6}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t v = 5; // NOLINTNEXTLINE(performance-move-const-arg) List::iterator result = list.insert(list.begin() + 2, std::move(v)); @@ -807,9 +790,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingInsertWithRValue_thenReturnsIterato // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithLValue_thenInsertsElement) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 6}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t v = 5; list.emplace(list.begin() + 2, v); EXPECT_EQ(4, list.size()); @@ -818,9 +799,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithLValue_thenInsertsElemen // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithRValue_thenInsertsElement) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 6}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t v = 5; // NOLINTNEXTLINE(performance-move-const-arg) list.emplace(list.begin() + 2, std::move(v)); @@ -830,9 +809,7 @@ TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithRValue_thenInsertsElemen // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithConstructorArg_thenInsertsElement) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 6}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) list.emplace(list.begin() + 2, 5); // const char* is a constructor arg to std::int64_t EXPECT_EQ(4, list.size()); EXPECT_EQ(5, list.get(2)); @@ -841,7 +818,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingEmplaceWithConstructorArg_thenInser // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingPushBackWithLValue_ThenInsertsElement) { List list; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t v = 5; list.push_back(v); EXPECT_EQ(1, list.size()); @@ -851,7 +827,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingPushBackWithLValue_ThenInsertsEleme // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingPushBackWithRValue_ThenInsertsElement) { List list; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t v = 5; // NOLINTNEXTLINE(performance-move-const-arg) list.push_back(std::move(v)); @@ -862,7 +837,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingPushBackWithRValue_ThenInsertsEleme // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingEmplaceBackWithLValue_ThenInsertsElement) { List list; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t v = 5; list.emplace_back(v); EXPECT_EQ(1, list.size()); @@ -872,7 +846,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingEmplaceBackWithLValue_ThenInsertsEl // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingEmplaceBackWithRValue_ThenInsertsElement) { List list; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t v = 5; // NOLINTNEXTLINE(performance-move-const-arg) list.emplace_back(std::move(v)); @@ -883,7 +856,6 @@ TEST(ListTest_NonIValueBasedList, whenCallingEmplaceBackWithRValue_ThenInsertsEl // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingEmplaceBackWithConstructorArg_ThenInsertsElement) { List list; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) list.emplace_back(5); // const char* is a constructor arg to std::int64_t EXPECT_EQ(1, list.size()); EXPECT_EQ(5, list.get(0)); @@ -901,7 +873,6 @@ TEST(ListTest_NonIValueBasedList, givenEmptyList_whenIterating_thenBeginIsEnd) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenIterating_thenFindsElements) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 5}); bool found_first = false; bool found_second = false; @@ -910,7 +881,6 @@ TEST(ListTest_NonIValueBasedList, whenIterating_thenFindsElements) { if (static_cast(*iter) == 3) { EXPECT_FALSE(found_first); found_first = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (static_cast(*iter) == 5) { EXPECT_FALSE(found_second); found_second = true; @@ -924,7 +894,6 @@ TEST(ListTest_NonIValueBasedList, whenIterating_thenFindsElements) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenIteratingWithForeach_thenFindsElements) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 5}); bool found_first = false; bool found_second = false; @@ -933,7 +902,6 @@ TEST(ListTest_NonIValueBasedList, whenIteratingWithForeach_thenFindsElements) { if (elem == 3) { EXPECT_FALSE(found_first); found_first = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (elem == 5) { EXPECT_FALSE(found_second); found_second = true; @@ -969,7 +937,6 @@ TEST(ListTest_NonIValueBasedList, givenList_whenErasingFullRange_thenIsEmpty) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, whenCallingReserve_thenDoesntCrash) { List list; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) list.reserve(100); } @@ -1092,7 +1059,6 @@ TEST(ListTest_NonIValueBasedList, givenIterator_whenPrefixDecrementing_thenMoves // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, givenIterator_whenIncreasing_thenMovesToNextAndReturnsNewPosition) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 5}); List::iterator iter1 = list.begin(); @@ -1103,7 +1069,6 @@ TEST(ListTest_NonIValueBasedList, givenIterator_whenIncreasing_thenMovesToNextAn // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, givenIterator_whenDecreasing_thenMovesToNextAndReturnsNewPosition) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 5}); List::iterator iter1 = list.end(); @@ -1114,7 +1079,6 @@ TEST(ListTest_NonIValueBasedList, givenIterator_whenDecreasing_thenMovesToNextAn // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, givenIterator_whenAdding_thenReturnsNewIterator) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 5}); List::iterator iter1 = list.begin(); @@ -1125,7 +1089,6 @@ TEST(ListTest_NonIValueBasedList, givenIterator_whenAdding_thenReturnsNewIterato // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, givenIterator_whenSubtracting_thenReturnsNewIterator) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) List list({3, 4, 5}); List::iterator iter1 = list.end() - 1; @@ -1218,7 +1181,6 @@ TEST(ListTest_NonIValueBasedList, givenEmptyList_whenCallingResize_thenResizesAn // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ListTest_NonIValueBasedList, givenEmptyList_whenCallingResizeWithValue_thenResizesAndSetsValue) { List list; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) list.resize(2, 5); EXPECT_EQ(2, list.size()); EXPECT_EQ(5, list.get(0)); diff --git a/aten/src/ATen/core/boxing/KernelFunction_test.cpp b/aten/src/ATen/core/boxing/KernelFunction_test.cpp index 172b2035a445d..bb56535b5ab08 100644 --- a/aten/src/ATen/core/boxing/KernelFunction_test.cpp +++ b/aten/src/ATen/core/boxing/KernelFunction_test.cpp @@ -42,7 +42,6 @@ void boxed_func_with_return(const OperatorHandle& /*opHandle*/, Stack* stack) { called_with_args = tuple(stack->at(0).toInt(), stack->at(1).toInt()); stack->clear(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) stack->push_back(5); } @@ -71,7 +70,6 @@ void boxed_func_with_multi_return(const OperatorHandle& /*opHandle*/, Stack* sta struct unboxed_functor_with_return final : OperatorKernel { int64_t operator()(int64_t a, int64_t b) { called_with_args = tuple(a, b); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 5; } }; @@ -96,7 +94,6 @@ struct unboxed_functor_without_return_factory final { int64_t unboxed_function_with_return(int64_t a, int64_t b) { called_with_args = tuple(a, b); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 5; } @@ -107,7 +104,6 @@ void unboxed_function_without_return(int64_t a, int64_t b) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) auto unboxed_lambda_with_return = [] (int64_t a, int64_t b) -> int64_t { called_with_args = tuple(a, b); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 5; }; @@ -267,7 +263,6 @@ void expectOutOfPlaceMultiBoxedCallingWorks(const KernelFunction& func) { OperatorHandle dummy = makeDummyOperatorHandle(); auto s1 = 1.0f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto s2 = 2.0f; auto t1 = at::zeros({1}); auto t2 = at::zeros({1}); @@ -368,7 +363,6 @@ void expectOutOfPlaceMultiUnboxedCallingWorks(const KernelFunction& func) { OperatorHandle dummy = makeDummyOperatorHandle(); auto s1 = 1.0f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto s2 = 2.0f; auto t1 = at::zeros({1}); auto t2 = at::zeros({1}); diff --git a/aten/src/ATen/core/boxing/impl/kernel_function_legacy_test.cpp b/aten/src/ATen/core/boxing/impl/kernel_function_legacy_test.cpp index 04e5a6dc4799e..cc7c0536c4b9c 100644 --- a/aten/src/ATen/core/boxing/impl/kernel_function_legacy_test.cpp +++ b/aten/src/ATen/core/boxing/impl/kernel_function_legacy_test.cpp @@ -50,7 +50,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(dispatch_key), 5); EXPECT_EQ(1, result.size()); EXPECT_EQ(6, result[0].toInt()); @@ -62,7 +61,6 @@ void expectCallsDecrement(DispatchKey dispatch_key) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(dispatch_key), 5); EXPECT_EQ(1, result.size()); EXPECT_EQ(4, result[0].toInt()); @@ -155,7 +153,6 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntOutpu auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6); EXPECT_EQ(1, result.size()); EXPECT_EQ(9, result[0].toInt()); @@ -214,7 +211,6 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListO auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6); EXPECT_EQ(1, result.size()); EXPECT_EQ(3, result[0].toIntVector().size()); @@ -229,7 +225,6 @@ std::tuple, c10::optional, Dict, c10::optional, Dict>( dummyTensor(DispatchKey::CUDA), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, {dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}, c10::optional(c10::in_place, 0), @@ -399,7 +394,6 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListI ASSERT_TRUE(op.has_value()); captured_input_list_size = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List({2, 4, 6})); EXPECT_EQ(0, outputs.size()); EXPECT_EQ(3, captured_input_list_size); @@ -417,7 +411,6 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithIntListI auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List({2, 4, 6})); EXPECT_EQ(1, outputs.size()); EXPECT_EQ(3, outputs[0].toInt()); @@ -696,9 +689,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithMapOfLis ASSERT_TRUE(op.has_value()); c10::Dict> dict; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict.insert("key1", c10::List({10, 20})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict.insert("key2", c10::List({30, 40})); auto outputs = callOp(*op, dict); EXPECT_EQ(1, outputs.size()); @@ -727,15 +718,11 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithMapOfLis c10::Dict>> dict; c10::Dict dict1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict1.insert(10, "10"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict1.insert(20, "20"); dict.insert("key1", c10::List>({dict1})); c10::Dict dict2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict2.insert(30, "30"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict2.insert(40, "40"); dict.insert("key2", c10::List>({dict2})); auto outputs = callOp(*op, dict); @@ -800,9 +787,7 @@ TEST(OperatorRegistrationTest_LegacyFunctionBasedKernel, givenKernelWithListOfMa dict1.insert("1", c10::List({1, 2})); dict1.insert("3", c10::List({3, 4})); c10::Dict> dict2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict2.insert("5", c10::List({5, 6})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict2.insert("7", c10::List({7, 8})); c10::List>> list({ dict1, dict2 }); auto outputs = callOp(*op, list); diff --git a/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp b/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp index a84eb719d1cc3..833fbb1d8bb24 100644 --- a/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp +++ b/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp @@ -39,7 +39,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(dispatch_key), 5); EXPECT_EQ(1, result.size()); EXPECT_EQ(6, result[0].toInt()); @@ -51,7 +50,6 @@ void expectCallsDecrement(DispatchKey dispatch_key) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(dispatch_key), 5); EXPECT_EQ(1, result.size()); EXPECT_EQ(4, result[0].toInt()); @@ -171,7 +169,6 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntOutput_when auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6); EXPECT_EQ(1, result.size()); EXPECT_EQ(9, result[0].toInt()); @@ -231,7 +228,6 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListOutput_ auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6); EXPECT_EQ(1, result.size()); EXPECT_EQ(3, result[0].toIntVector().size()); @@ -246,7 +242,6 @@ std::tuple, c10::optional, Dict, c10::optional, Dict>( dummyTensor(DispatchKey::CUDA), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, c10::List({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}), c10::optional(c10::in_place, 0), @@ -421,7 +416,6 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListInput_w ASSERT_TRUE(op.has_value()); captured_input_list_size = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List({2, 4, 6})); EXPECT_EQ(0, outputs.size()); EXPECT_EQ(3, captured_input_list_size); @@ -439,7 +433,6 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernelWithIntListInput_w auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List({2, 4, 6})); EXPECT_EQ(1, outputs.size()); EXPECT_EQ(3, outputs[0].toInt()); diff --git a/aten/src/ATen/core/boxing/impl/kernel_lambda_legacy_test.cpp b/aten/src/ATen/core/boxing/impl/kernel_lambda_legacy_test.cpp index 59b31d58cf35d..c36ad6eb285da 100644 --- a/aten/src/ATen/core/boxing/impl/kernel_lambda_legacy_test.cpp +++ b/aten/src/ATen/core/boxing/impl/kernel_lambda_legacy_test.cpp @@ -36,7 +36,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(dispatch_key), 5); EXPECT_EQ(1, result.size()); EXPECT_EQ(6, result[0].toInt()); @@ -139,7 +138,6 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntOutput_ auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6); EXPECT_EQ(1, result.size()); EXPECT_EQ(9, result[0].toInt()); @@ -192,7 +190,6 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListOut auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6); EXPECT_EQ(1, result.size()); EXPECT_EQ(3, result[0].toIntVector().size()); @@ -210,7 +207,6 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMultipleOu dict.insert("second", dummyTensor(DispatchKey::CUDA)); return std::tuple, c10::optional, Dict>( dummyTensor(DispatchKey::CUDA), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, {dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}, c10::optional(c10::in_place, 0), @@ -362,7 +358,6 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListInp ASSERT_TRUE(op.has_value()); captured_input_list_size = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List({2, 4, 6})); EXPECT_EQ(0, outputs.size()); EXPECT_EQ(3, captured_input_list_size); @@ -378,7 +373,6 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithIntListInp auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List({2, 4, 6})); EXPECT_EQ(1, outputs.size()); EXPECT_EQ(3, outputs[0].toInt()); @@ -630,9 +624,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMapOfList_ ASSERT_TRUE(op.has_value()); c10::Dict> dict; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict.insert("key1", c10::List({10, 20})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict.insert("key2", c10::List({30, 40})); auto outputs = callOp(*op, dict); EXPECT_EQ(1, outputs.size()); @@ -660,15 +652,11 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithMapOfListO c10::Dict>> dict; c10::Dict dict1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict1.insert(10, "10"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict1.insert(20, "20"); dict.insert("key1", c10::List>({dict1})); c10::Dict dict2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict2.insert(30, "30"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict2.insert(40, "40"); dict.insert("key2", c10::List>({dict2})); auto outputs = callOp(*op, dict); @@ -729,9 +717,7 @@ TEST(OperatorRegistrationTest_LegacyLambdaBasedKernel, givenKernelWithListOfMapO dict1.insert("1", c10::List({1, 2})); dict1.insert("3", c10::List({3, 4})); c10::Dict> dict2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict2.insert("5", c10::List({5, 6})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict2.insert("7", c10::List({7, 8})); c10::List>> list({ dict1, dict2 }); auto outputs = callOp(*op, list); diff --git a/aten/src/ATen/core/boxing/impl/kernel_lambda_test.cpp b/aten/src/ATen/core/boxing/impl/kernel_lambda_test.cpp index edd52ec915968..f82b6199639f3 100644 --- a/aten/src/ATen/core/boxing/impl/kernel_lambda_test.cpp +++ b/aten/src/ATen/core/boxing/impl/kernel_lambda_test.cpp @@ -26,7 +26,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(dispatch_key), 5); EXPECT_EQ(1, result.size()); EXPECT_EQ(6, result[0].toInt()); @@ -38,7 +37,6 @@ void expectCallsDecrement(DispatchKey dispatch_key) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(dispatch_key), 5); EXPECT_EQ(1, result.size()); EXPECT_EQ(4, result[0].toInt()); @@ -140,7 +138,6 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntOutput_whenRe auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6); EXPECT_EQ(1, result.size()); EXPECT_EQ(9, result[0].toInt()); @@ -191,7 +188,6 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListOutput_wh auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6); EXPECT_EQ(1, result.size()); EXPECT_EQ(3, result[0].toIntVector().size()); @@ -210,7 +206,6 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithMultipleOutputs_ dict.insert("second", dummyTensor(DispatchKey::CUDA)); return std::tuple, c10::optional, Dict>( dummyTensor(DispatchKey::CUDA), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, c10::List({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}), c10::optional(c10::in_place, 0), @@ -359,7 +354,6 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListInput_wit ASSERT_TRUE(op.has_value()); captured_input_list_size = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List({2, 4, 6})); EXPECT_EQ(0, outputs.size()); EXPECT_EQ(3, captured_input_list_size); @@ -374,7 +368,6 @@ TEST(OperatorRegistrationTest_LambdaBasedKernel, givenKernelWithIntListInput_wit auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List({2, 4, 6})); EXPECT_EQ(1, outputs.size()); EXPECT_EQ(3, outputs[0].toInt()); diff --git a/aten/src/ATen/core/boxing/impl/kernel_stackbased_test.cpp b/aten/src/ATen/core/boxing/impl/kernel_stackbased_test.cpp index 73fe3bfbc51ec..5a312b0ce7ef8 100644 --- a/aten/src/ATen/core/boxing/impl/kernel_stackbased_test.cpp +++ b/aten/src/ATen/core/boxing/impl/kernel_stackbased_test.cpp @@ -49,7 +49,6 @@ void expectCallsIncrement(c10::DispatchKeySet ks) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(ks), 5); EXPECT_EQ(1, result.size()); EXPECT_EQ(6, result[0].toInt()); @@ -65,7 +64,6 @@ void expectCallsIncrementUnboxed(DispatchKey dispatch_key) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t result = callOpUnboxed(*op, dummyTensor(dispatch_key), 5); EXPECT_EQ(6, result); } @@ -76,7 +74,6 @@ void expectCallsDecrement(DispatchKey dispatch_key) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(dispatch_key), 5); EXPECT_EQ(1, result.size()); EXPECT_EQ(4, result[0].toInt()); diff --git a/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor_test.cpp b/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor_test.cpp index 551bc3658971d..2e12d5c1c1bfc 100644 --- a/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor_test.cpp +++ b/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor_test.cpp @@ -46,7 +46,6 @@ void expectCallsIncrement(DispatchKey dispatch_key) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(dispatch_key), 5); EXPECT_EQ(1, result.size()); EXPECT_EQ(6, result[0].toInt()); @@ -58,7 +57,6 @@ void expectCallsDecrement(DispatchKey dispatch_key) { // assert that schema and cpu kernel are present auto op = c10::Dispatcher::singleton().findSchema({"_test::my_op", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(dispatch_key), 5); EXPECT_EQ(1, result.size()); EXPECT_EQ(4, result[0].toInt()); @@ -143,7 +141,6 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntOutput_whenR auto op = c10::Dispatcher::singleton().findSchema({"_test::int_output", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 3, 6); EXPECT_EQ(1, result.size()); EXPECT_EQ(9, result[0].toInt()); @@ -209,7 +206,6 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListOutput_w auto op = c10::Dispatcher::singleton().findSchema({"_test::list_output", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = callOp(*op, dummyTensor(DispatchKey::CPU), 2, 4, 6); EXPECT_EQ(1, result.size()); EXPECT_EQ(3, result[0].toIntVector().size()); @@ -225,7 +221,6 @@ struct KernelWithMultipleOutputs final : OperatorKernel { dict.insert("second", dummyTensor(DispatchKey::CUDA)); return std::tuple, c10::optional, Dict>( dummyTensor(DispatchKey::CUDA), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, c10::List({dummyTensor(DispatchKey::CPU), dummyTensor(DispatchKey::CUDA)}), c10::optional(c10::in_place, 0), @@ -415,7 +410,6 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListInput_wi ASSERT_TRUE(op.has_value()); captured_input_list_size = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List({2, 4, 6})); EXPECT_EQ(0, outputs.size()); EXPECT_EQ(3, captured_input_list_size); @@ -435,7 +429,6 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithIntListInput_wi auto op = c10::Dispatcher::singleton().findSchema({"_test::int_list_input", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outputs = callOp(*op, dummyTensor(DispatchKey::CPU), c10::List({2, 4, 6})); EXPECT_EQ(1, outputs.size()); EXPECT_EQ(3, outputs[0].toInt()); @@ -579,7 +572,6 @@ TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithTupleInput_with auto op = c10::Dispatcher::singleton().findSchema({"_test::tuple_input", ""}); ASSERT_TRUE(op.has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::tuple tup{"foobar", 123, 420.1337}; auto outputs = callOp(*op, tup); EXPECT_EQ(1, outputs.size()); @@ -661,7 +653,6 @@ class KernelWithMultipleConstructorArgs final : public OperatorKernel { TEST(OperatorRegistrationTest_FunctorBasedKernel, givenKernelWithMultipleConstructorArgs_whenRegistered_thenCanBeCalled) { auto registrar = RegisterOperators() .op("_test::offset_op(Tensor tensor, int input) -> int", RegisterOperators::options().kernel(DispatchKey::CPU, 2, 3) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .kernel(DispatchKey::CUDA, 4, 5)); auto op = c10::Dispatcher::singleton().findSchema({"_test::offset_op", ""}); diff --git a/aten/src/ATen/core/dispatch/backend_fallback_test.cpp b/aten/src/ATen/core/dispatch/backend_fallback_test.cpp index c6f575a5c1d91..801df900536f3 100644 --- a/aten/src/ATen/core/dispatch/backend_fallback_test.cpp +++ b/aten/src/ATen/core/dispatch/backend_fallback_test.cpp @@ -90,9 +90,7 @@ TEST(BackendFallbackTest, TestBackendFallbackWithMode) { c10::impl::IncludeDispatchKeyGuard guard(DispatchKey::TESTING_ONLY_GenericMode); override_call_count = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor a = ones({5, 5}, kDouble); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor b = batch_norm(a, {}, {}, {}, {}, true, 0.1, 1e-05, false); ASSERT_EQ(override_call_count, 2); } @@ -103,9 +101,7 @@ TEST(BackendFallbackTest, TestBackendFallbackWithWrapper) { m.fallback(torch::CppFunction::makeFromBoxedFunction<&generic_wrapper_fallback>()); override_call_count = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor a = at::detail::make_tensor(ones({5, 5}, kDouble)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor b = batch_norm(a, {}, {}, {}, {}, true, 0.1, 1e-05, false); ASSERT_EQ(override_call_count, 1); } @@ -122,7 +118,6 @@ TEST(BackendFallbackTest, TestFallthroughBackendFallback) { override_call_count = 0; // Doesn't trigger, as we fallthrough - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor a = zeros({5, 5}, kDouble); ASSERT_EQ(override_call_count, 0); // Does trigger, because we explicitly set it diff --git a/aten/src/ATen/core/ivalue.cpp b/aten/src/ATen/core/ivalue.cpp index 4d31c3b9fadbe..5d963098117b6 100644 --- a/aten/src/ATen/core/ivalue.cpp +++ b/aten/src/ATen/core/ivalue.cpp @@ -508,7 +508,6 @@ std::ostream& IValue::repr( case IValue::Tag::Double: { double d = v.toDouble(); int c = std::fpclassify(d); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((c == FP_NORMAL || c == FP_ZERO ) && std::abs(d) < 1e10) { int64_t i = int64_t(d); if (double(i) == d) { diff --git a/aten/src/ATen/core/op_registration/op_registration_test.cpp b/aten/src/ATen/core/op_registration/op_registration_test.cpp index 7af2eeecf3681..718cec18a55eb 100644 --- a/aten/src/ATen/core/op_registration/op_registration_test.cpp +++ b/aten/src/ATen/core/op_registration/op_registration_test.cpp @@ -859,9 +859,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) { // primitive types testArgTypes::test( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.5, [] (const double& v) {EXPECT_EQ(1.5, v);}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.5, [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());}, "(float a) -> float"); testArgTypes::test( @@ -888,9 +886,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) { // optional types (with has_value() == true) testArgTypes>::test( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::optional(1.5), [] (const c10::optional& v) {EXPECT_EQ(1.5, v.value());}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::optional(2.5), [] (const IValue& v) {EXPECT_EQ(2.5, v.toDouble());}, "(float? a) -> float?"); testArgTypes>::test( @@ -963,9 +959,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) { // list types (with non-empty list) testArgTypes>::test( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::List({1.5, 2.5}), [] (const c10::List& v) {expectListEquals({1.5, 2.5}, v);}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::List({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to>());}, "(float[] a) -> float[]"); testArgTypes>::test( @@ -1014,9 +1008,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) { // list types (with non-empty list) testArgTypes, c10::List>::test( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::ArrayRef({1.5, 2.5}), [] (c10::ArrayRef v) {expectListEquals({1.5, 2.5}, v);}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::List({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to>());}, "(float[] a) -> float[]"); testArgTypes, c10::List>::test( @@ -1066,9 +1058,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) { // std::array list types (with non-empty list) testArgTypes>::test( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::array({1.5, 2.5}), [] (std::array v) {expectListEquals({1.5, 2.5}, v);}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::array({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to>());}, "(float[2] a) -> float[2]"); testArgTypes>::test( @@ -1119,9 +1109,7 @@ TEST(OperatorRegistrationTest, testAvailableArgTypes) { // deprecated list types (with non-empty list) testArgTypes>::test( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector({1.5, 2.5}), [] (const std::vector& v) {expectListEquals({1.5, 2.5}, v);}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector({3.5, 4.5}), [] (const IValue& v) {expectListEquals({3.5, 4.5}, v.to>());}, "(float[] a) -> float[]"); testArgTypes>::test( diff --git a/aten/src/ATen/native/Activation.cpp b/aten/src/ATen/native/Activation.cpp index bbb1e588b7558..5092546084852 100644 --- a/aten/src/ATen/native/Activation.cpp +++ b/aten/src/ATen/native/Activation.cpp @@ -207,7 +207,6 @@ Tensor selu(const Tensor & self) { } Tensor relu6(const Tensor & self) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return at::hardtanh(self, /*min_val=*/0, /*max_val=*/6); } @@ -216,7 +215,6 @@ Tensor & selu_(Tensor & self) { } Tensor & relu6_(Tensor & self) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return at::hardtanh_(self, /*min_val=*/0, /*max_val=*/6); } @@ -355,7 +353,6 @@ Tensor rrelu_with_noise_backward( bool is_result) { auto lower_tensor = scalar_to_tensor(lower); auto upper_tensor = scalar_to_tensor(upper); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (training && (upper_tensor - lower_tensor).item().to() > 1E-6) { return grad_output.mul(noise); } else { @@ -464,7 +461,6 @@ void inline prelu_cpu_kernel_share_weights( auto input_data = input.data_ptr(); auto weight_val = weight.data_ptr()[0]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::parallel_for(0, input_numel, 1000, [&](int64_t start, int64_t end) { for (auto i = start; i < end; i++) { scalar_t input_data_val = input_data[i]; @@ -505,7 +501,6 @@ void inline prelu_cpu_kernel_multi_weights( } } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (input.numel() > 1000) { at::parallel_for(0, input_dim0_size, 0, loop); } else { @@ -579,7 +574,6 @@ void inline prelu_cpu_backward_kernel_share_weights( auto input_grad_data = input_grad.data_ptr(); auto weight_grad_data = weight_grad.data_ptr(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scalar_t sum = at::parallel_reduce(0, input_numel, 1000, scalar_t(0), [&](int64_t start, int64_t end, scalar_t ident) -> scalar_t { scalar_t partial_sum = ident; @@ -634,7 +628,6 @@ void inline prelu_cpu_backward_kernel_multi_weights( } } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (input.numel() > 1000) { at::parallel_for(0, input_dim0_size, 0, loop); } else { @@ -785,9 +778,7 @@ Tensor infinitely_differentiable_gelu_backward( const Tensor& grad, const Tensor& self) { constexpr double kAlpha = M_2_SQRTPI * M_SQRT1_2 * 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor cdf = (1.0 + (self * M_SQRT1_2).erf_()).mul_(0.5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor pdf = (-0.5 * self * self).exp_(); return cdf.addcmul_(self, pdf, kAlpha).mul_(grad); } diff --git a/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp b/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp index 5b067eb8c7421..fdd739d1c15fb 100644 --- a/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp +++ b/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp @@ -139,7 +139,6 @@ void adaptive_avg_pool3d_out_cpu_template( istrideW); }); } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output.resize_({input.size(-5), sizeD, osizeT, osizeH, osizeW}); int64_t n = input.size(0); diff --git a/aten/src/ATen/native/BatchLinearAlgebra.cpp b/aten/src/ATen/native/BatchLinearAlgebra.cpp index 34c9e8bcd863b..890228beff3e0 100644 --- a/aten/src/ATen/native/BatchLinearAlgebra.cpp +++ b/aten/src/ATen/native/BatchLinearAlgebra.cpp @@ -2504,7 +2504,6 @@ std::tuple linalg_eig_out_info(const Tensor& input, Tensor& va // See: https://github.com/pytorch/pytorch/pull/52491#issuecomment-795685687 // Here we call CPU path for matrices smaller than 2048x2048 // that should be in general significantly faster than calling MAGMA - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (input.size(-1) <= 2048) { linalg_eig_stub(at::kCPU, real_imag_values, maybe_complex_vectors, infos, input.to(kCPU), compute_eigenvectors); } else { @@ -2780,7 +2779,6 @@ static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT, auto lda = std::max(1, m); auto ldvt = std::max(1, n); auto mn = std::min(m, n); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor iwork = at::empty({8 * mn}, at::kInt); auto iwork_data = iwork.data_ptr(); Tensor rwork; diff --git a/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp b/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp index a858099898221..1a832b0645c21 100644 --- a/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp +++ b/aten/src/ATen/native/BatchLinearAlgebraKernel.cpp @@ -81,7 +81,6 @@ void apply_reflect_conj_tri_single(scalar_t* self, int64_t n, int64_t stride, bo }; } // For small matrices OpenMP overhead is too large - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (n < 256) { loop(0, n); } else { diff --git a/aten/src/ATen/native/BinaryOps.cpp b/aten/src/ATen/native/BinaryOps.cpp index fd79092fdca52..b101663bf09df 100644 --- a/aten/src/ATen/native/BinaryOps.cpp +++ b/aten/src/ATen/native/BinaryOps.cpp @@ -1103,12 +1103,10 @@ Tensor& heaviside_(Tensor& self, const Tensor& values) { } Tensor& ldexp_out(const Tensor& self, const Tensor& other, Tensor& result) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return at::mul_out(result, self, at::pow(2.0, other)); } Tensor ldexp(const Tensor& self, const Tensor& other) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return at::mul(self, at::pow(2.0, other)); } diff --git a/aten/src/ATen/native/Convolution.cpp b/aten/src/ATen/native/Convolution.cpp index b10c136d7d318..d774af9031d7f 100644 --- a/aten/src/ATen/native/Convolution.cpp +++ b/aten/src/ATen/native/Convolution.cpp @@ -254,13 +254,11 @@ auto ConvParams::use_mkldnn(const at::Tensor& input, const at::Tensor& weight) c !transposed && // or transposed tensors // For 1x1 filters, MKLDNN is faster than THNN when multi-threaded, // but THNN is faster when single-threaded. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (is_strided() || is_dilated() || input.size(0) >= 16 || weight.size(-1) != 1 || weight.size(-2) != 1 || at::get_num_threads() > 1) && (groups > 1 || (weight.size(-1) > 3 && weight.size(-2) > 3) || input.size(0) > 1 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) || input.size(0)*input.size(1)*input.size(2)*input.size(3) > 20480) // for some case, native is faster ); @@ -277,10 +275,8 @@ auto ConvParams::use_nnpack(const at::Tensor& input, const at::Tensor& weight) c !transposed && // or transposed tensors input.ndimension() == 4 && // must be in NCHW format weight.ndimension() == 4 && - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (weight.size(2) < 17) && (weight.size(3) < 17) // NNPACK only supports kernels up to 16x16 #if !defined(C10_MOBILE) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) && input.size(0) >= 16 // ensure large enough batch size to ensure perf, tuneable #endif ; @@ -316,7 +312,6 @@ auto ConvParams::is_depthwise( const at::Tensor& input, const at::Tensor& weight) const -> bool { return input.is_cuda() && !transposed && - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (input.ndimension() == 4 || input.ndimension() == 5) && input.size(1) == groups && groups > 1 && // no point if there is only a single group @@ -329,145 +324,113 @@ bool check_cudnn_depthwise_workload(const at::Tensor& input, int stride) { int ch = input.size(1); int bs = input.size(0); if (stride==1) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (w >= 7) { // All batch sizes and nb_channels - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (w >= 112) { return true; } // large nb_channels - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (ch >= 1024) { // NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers) if (w >= 56) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (bs >= 32) { return true; } } // batch_size specific - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (bs >= 128) { // NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers) if (ch >= 512) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (ch >= 64) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (w >= 14) { return true; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if ((ch >= 32) && (w >=28)) { return true; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (bs >= 64) { // NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers) if ((ch >= 256) && (w >= 14)) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if ((ch >= 32) && (w >= 28)) { return true; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (bs >= 32) { // NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers) if ((ch >= 256) && (w >= 14)) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if ((ch >= 128) && (w >= 28)) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if ((ch >= 32) && (w >= 56)) { return true; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (bs >= 16) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((ch >= 1024) && (w >= 14)) { return true; } // NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers) if ((ch >= 256) && (w >= 28)) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if ((ch >= 32) && (w >= 56)) { return true; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (bs >= 8) { // NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers) if ((ch >= 512) && (w >= 28)) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if ((ch >= 64) && (w >= 56)) { return true; } } } } else if (stride==2) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (ch < 256) { return false; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (w >= 7) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (bs >= 128) { // NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers) if (ch >= 1024) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if ((ch >= 512) && (w >= 14)) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (w >= 28) { return true; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (bs >= 64) { // NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers) if ((ch >= 512) && (w >= 14)) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (w >= 28) { return true; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (bs >= 32) { // NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers) if ((ch >= 1024) && (w >= 14)) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (w >= 28) { return true; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (bs >= 16) { // NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers) if ((ch >= 512) && (w >= 28)) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (w >= 56) { return true; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (bs >= 8) { // NOLINTNEXTLINE(bugprone-branch-clone,cppcoreguidelines-avoid-magic-numbers) if ((ch >= 1024) && (w >= 28)) { return true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (w >= 56) { return true; } } else if (bs >= 1) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((ch >= 512) && (w >=112)) { return true; } @@ -484,7 +447,6 @@ auto ConvParams::use_cudnn_depthwise( } if (detail::getCUDAHooks().supportsDepthwiseConvolutionWithCuDNN()) { long cudnn_version = detail::getCUDAHooks().versionCuDNN(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bool kernel_cond = (cudnn_version >= 7600 && use_cudnn(input, weight) && input.scalar_type() == kHalf && // only for FP16 @@ -492,12 +454,10 @@ auto ConvParams::use_cudnn_depthwise( is_depthwise(input, weight) && input.ndimension() == 4 && // TODO: 5-D contiguous depthwise is not supported yet, need benchmarks weight.size(2) == weight.size(3) && // only square kernels - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.size(2) >= 7 && // min width/height 7 !is_dilated() && // no dilation supported stride[0] == stride[1] && // equal strides ((weight.size(3) == 3) || (weight.size(3) == 1)) && - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.size(1) >= 32); // min 32 channels supported) if (kernel_cond) { return check_cudnn_depthwise_workload(input, stride[0]); @@ -895,7 +855,6 @@ at::Tensor _convolution( at::MemoryFormat cudnn_memory_format = at::MemoryFormat::Contiguous; if (cudnn_conv_use_channels_last(input, weight)) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) cudnn_memory_format = (k == 5) ? at::MemoryFormat::ChannelsLast3d : at::MemoryFormat::ChannelsLast; } @@ -1008,7 +967,6 @@ at::Tensor _convolution( params.padding, params.groups); } else if ( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) !params.transposed && (input.ndimension() == 5) && (input.device().is_cpu()) && !params.is_dilated()) { @@ -1092,7 +1050,6 @@ at::Tensor _convolution_nogroup( return at::slow_conv_transpose2d( input, weight, kernel_size, bias, stride, padding, output_padding, dilation); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (dim == 5) { return at::slow_conv_transpose3d( input, weight, kernel_size, bias, @@ -1118,12 +1075,10 @@ at::Tensor _convolution_nogroup( stride, padding); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (dim == 5 && (input.is_cuda() || dilated)) { return at::slow_conv_dilated3d( input, weight, kernel_size, bias, stride, padding, dilation); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (dim == 5) { /* dim == 5, CPU, non-dilated */ /* CPU implementation has specialized MM kernels for non-dilated case here */ diff --git a/aten/src/ATen/native/ConvolutionMM3d.cpp b/aten/src/ATen/native/ConvolutionMM3d.cpp index 9ad3adc564890..d8b1e7870e1dc 100644 --- a/aten/src/ATen/native/ConvolutionMM3d.cpp +++ b/aten/src/ATen/native/ConvolutionMM3d.cpp @@ -67,7 +67,6 @@ static inline void slow_conv3d_shape_check( const int64_t dim_width = 4; // Allow for empty batch size but not other dimensions - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bool valid_empty = ndim == 5 && input.size(dim_batch) == 0 && input.size(dim_planes) != 0 && input.size(dim_depth) != 0 && input.size(dim_height) != 0 && input.size(dim_width) != 0; @@ -156,7 +155,6 @@ static inline void slow_conv3d_shape_check( static Tensor view_weight_2d(const Tensor& weight_) { Tensor weight = weight_.contiguous(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (weight.dim() == 5) { const int64_t s1 = weight.size(0); const int64_t s2 = diff --git a/aten/src/ATen/native/Distance.cpp b/aten/src/ATen/native/Distance.cpp index f8700c8ee23dd..432bc38c56da4 100644 --- a/aten/src/ATen/native/Distance.cpp +++ b/aten/src/ATen/native/Distance.cpp @@ -68,7 +68,6 @@ static Tensor cdist_impl(const Tensor& x1, const Tensor& x2, const double p, c10 // See Note [cdist relies on cdist_impl redispatching] // Keep this condition in sync with the condition at the Note - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (!(p == 2 && (mode == 1 || (mode == 0 && (r1 > 25 || r2 > 25))))) { TORCH_CHECK(device1 == kCPU || device1 == kCUDA, "cdist only supports CPU and CUDA devices, X1 got: ", device1); TORCH_CHECK(device2 == kCPU || device2 == kCUDA, "cdist only supports CPU and CUDA devices, X2 got: ", device2); @@ -102,7 +101,6 @@ static Tensor cdist_impl(const Tensor& x1, const Tensor& x2, const double p, c10 result = at::empty(output_shape, x1.options()); } else if (c1 == 0) { result = at::zeros(output_shape, x1.options()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (p == 2 && (mode == 1 || (mode == 0 && (r1 > 25 || r2 > 25)))) { // See Note [cdist relies on cdist_impl redispatching] // Keep the condition above in sync with the condition at the Note @@ -134,7 +132,6 @@ Tensor cdist(const Tensor& x1, const Tensor& x2, const double p, c10::optional 25 || r2 > 25)))) { return cdist_impl(x1, x2, p, compute_mode); } else { diff --git a/aten/src/ATen/native/Distributions.cpp b/aten/src/ATen/native/Distributions.cpp index 3896dff9783a7..5f98c1c6060a9 100644 --- a/aten/src/ATen/native/Distributions.cpp +++ b/aten/src/ATen/native/Distributions.cpp @@ -62,7 +62,6 @@ namespace { int64_t sample_poisson(double lambda, at::CPUGeneratorImpl* generator) { TORCH_CHECK(lambda >= 0, "invalid Poisson rate, expected rate to be non-negative"); at::uniform_real_distribution standard_uniform(0.0, 1.0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (lambda >= 10) { // transformed rejection method, (Hoermann, 1993) // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -72,29 +71,20 @@ int64_t sample_poisson(double lambda, at::CPUGeneratorImpl* generator) { double slam = std::sqrt(lambda); double loglam = std::log(lambda); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b = 0.931 + 2.53 * slam; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a = -0.059 + 0.02483 * b; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) invalpha = 1.1239 + 1.1328 / (b - 3.4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vr = 0.9277 - 3.6224 / (b - 2); // NOLINTNEXTLINE(modernize-use-bool-literals) while (1) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) U = standard_uniform(generator) - 0.5; V = standard_uniform(generator); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) us = 0.5 - std::fabs(U); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k = (int64_t)std::floor((2 * a / us + b) * U + lambda + 0.43); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((us >= 0.07) && (V <= vr)) { return k; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((k < 0) || ((us < 0.013) && (V > us))) { continue; } diff --git a/aten/src/ATen/native/Embedding.cpp b/aten/src/ATen/native/Embedding.cpp index bbf7a56ace41a..e04433669dc39 100644 --- a/aten/src/ATen/native/Embedding.cpp +++ b/aten/src/ATen/native/Embedding.cpp @@ -158,7 +158,6 @@ Tensor & embedding_renorm_cpu_( auto row = self[sorted_indices[i]]; auto norm = row.norm(norm_type).item(); if (norm > max_norm) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto scale = max_norm / (norm + 1e-7); row *= scale; } diff --git a/aten/src/ATen/native/EmbeddingBag.cpp b/aten/src/ATen/native/EmbeddingBag.cpp index 8a4dd05c4b24d..0b7783c5ffeb3 100644 --- a/aten/src/ATen/native/EmbeddingBag.cpp +++ b/aten/src/ATen/native/EmbeddingBag.cpp @@ -150,7 +150,6 @@ index_select_add(const Tensor &select_indices, /* block_size */ddim, /* has_weight */false, /* normalize_by_lengths */false, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) /* prefetch */16, /* is_weight_positional */false, /* use_offsets */true @@ -312,7 +311,6 @@ index_select_scale_add(const Tensor &select_indices, /* block_size */ddim, /* has_weight */true, /* normalize_by_lengths */false, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) /* prefetch */16, /* is_weight_positional */false, /* use_offsets */true diff --git a/aten/src/ATen/native/FractionalMaxPool3d.cpp b/aten/src/ATen/native/FractionalMaxPool3d.cpp index a7c39e42a9f5b..4fded33426085 100644 --- a/aten/src/ATen/native/FractionalMaxPool3d.cpp +++ b/aten/src/ATen/native/FractionalMaxPool3d.cpp @@ -172,7 +172,6 @@ void fractional_max_pool3d_out_cpu_template( "fractional_max_pool3d_out(): non-empty 4D or 5D (batch mode) tensor ", " expected for input, but got: ", ndims); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (ndims == 5) { numBatch = input_.size(0); planeDim++; @@ -312,7 +311,6 @@ void fractional_max_pool3d_backward_out_cpu_template( int64_t widthDim = 3; int64_t ndims = input.ndimension(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (ndims == 5) { numBatch = input.size(0); planeDim = 1; diff --git a/aten/src/ATen/native/GridSampler.cpp b/aten/src/ATen/native/GridSampler.cpp index 176981e878e1c..60f5fe45ab472 100644 --- a/aten/src/ATen/native/GridSampler.cpp +++ b/aten/src/ATen/native/GridSampler.cpp @@ -873,7 +873,6 @@ Tensor grid_sampler(const Tensor& input, const Tensor& grid, static_cast(padding_mode) == GridSamplerPadding::Zeros && align_corners && input.dim() == 4 && - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.size(1) <= 1024) { return cudnn_grid_sampler(input, grid); } diff --git a/aten/src/ATen/native/Integration.cpp b/aten/src/ATen/native/Integration.cpp index b196e57a8f92c..909a946c69ef3 100644 --- a/aten/src/ATen/native/Integration.cpp +++ b/aten/src/ATen/native/Integration.cpp @@ -20,14 +20,12 @@ Tensor do_trapz(const Tensor& y, const Tensor& dx, int64_t dim) { Tensor left = y.slice(dim, 0, -1); Tensor right = y.slice(dim, 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return ((left + right) * dx).sum(dim) / 2.; } // When dx is constant, the above formula simplifies // to dx * [(\sum_{i=1}^n y_i) - (y_1 + y_n)/2] Tensor do_trapz(const Tensor& y, double dx, int64_t dim) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return (y.sum(dim) - (y.select(dim, 0) + y.select(dim, -1)) * (0.5)) * dx; } diff --git a/aten/src/ATen/native/LinearAlgebra.cpp b/aten/src/ATen/native/LinearAlgebra.cpp index 4f04d662bdb47..b7725a20a9e95 100644 --- a/aten/src/ATen/native/LinearAlgebra.cpp +++ b/aten/src/ATen/native/LinearAlgebra.cpp @@ -1209,7 +1209,6 @@ static inline Tensor& bmm_out_or_baddbmm_(Tensor& self_or_result, const Tensor& || (strides[1] == 1 && strides[2] >= sizes[1]); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (contraction_size * res_rows * res_cols < 400) { if (is_bmm_out) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, batch1.scalar_type(), "bmm", [&] { @@ -1574,7 +1573,6 @@ Tensor compute_T2(const Tensor& A) { auto As = _allocate_buffer(A, 3); // 3 for {I, A, A^2} _fill_matrix_powers(As, A, 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) As.select(0, 2).div_(2.0); return As.sum(0); } @@ -1594,7 +1592,6 @@ Tensor compute_T4(const Tensor& A) { // computes (I / 2 + A / 6 + A^2 / 24) at::native::_compute_linear_combination( As.narrow(0, 0, 3), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _blob_to_Tensor({1 / 2.0, 1 / 6.0, 1 / 24.0}, A) ) ); @@ -1617,7 +1614,6 @@ Tensor compute_T8(const Tensor& A) { constexpr scalar_t x7 = (89. - sqrt_177) / (5040. * x3); constexpr scalar_t y2 = (857. - 58. * sqrt_177) / 630.; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto As = _allocate_buffer(A, 5); // 3 for {I, A, A^2} _fill_matrix_powers(As, A, 3); @@ -1662,43 +1658,27 @@ Tensor compute_T12(const Tensor& A) { constexpr int num_prods = 4; array2d b = {{ { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.0198e-16, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.46932117595418237389, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -0.20099424927047284052, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -0.04623946134063071740 }, { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.31597895759871264183, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.19926790417132231573, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.01179296240992997031, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.01108844528519167989 }, { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.18188869982170434744, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.05502798439925399070, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.09351590770535414968, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.00610700528898058230 }, { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -2.0861320e-13, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -0.13181061013830184015, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -0.02027855540589259079, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -0.00675951846863086359 } }}; @@ -1740,57 +1720,37 @@ Tensor compute_T18(const Tensor& A) { array2d b = {{ { 0., - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.00365581030144618291e-01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -8.02924648241156932449e-03, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -8.92138498045729985177e-04, 0. }, { 0., - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.97849749499645077844e-01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.36783778460411720168e+00, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 4.98289622525382669416e-01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -6.37898194594723280150e-04 }, { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.09676396052962061844e+01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.68015813878906206114e+00, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.71779846478865511061e-02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -6.98210122488052056106e-03, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.34975017086070470649e-05 }, { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -9.04316832390810593223e-02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -6.76404519071381882256e-02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.75961301770459654925e-02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.95552570429315521194e-02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.39180257516060693404e-05 }, { 0., 0., - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -9.23364619367118555360e-02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.69364939002081722752e-02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.40086798182036094347e-05 } }}; @@ -2161,7 +2121,6 @@ static Tensor _norm_min_max(Tensor& self, double ord, int64_t dim, bool keepdim) static Tensor& _linalg_norm_matrix_out(Tensor& result, const Tensor &self, const optional& opt_ord, IntArrayRef dim, bool keepdim, optional opt_dtype) { Tensor result_; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto ord = opt_ord.value_or(2.0).toDouble(); TORCH_CHECK(self.layout() == Layout::Strided, "matrix norm only supports strided layout, got: ", self.layout()); @@ -2434,7 +2393,6 @@ void _linalg_cond_check_ord(c10::variant ord_variant) { if (ord_variant.index() == 0) { Scalar* ord = c10::get_if(&ord_variant); double abs_ord = std::abs(ord->toDouble()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_CHECK(abs_ord == 2.0 || abs_ord == 1.0 || abs_ord == INFINITY, "linalg_cond got an invalid norm type: ", ord->toDouble()); } else if (ord_variant.index() == 1) { @@ -2465,14 +2423,12 @@ Tensor linalg_cond(const Tensor& self, const optional& opt_ord) { } // If ord == None or ord == ±2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (std::abs(ord.toDouble()) == 2.0) { auto singular_values = std::get<1>(at::svd(self)); // singular values are sorted in descending order auto s_max = at::narrow(singular_values, /*dim=*/-1, /*start=*/0, /*length=*/1); auto s_min = at::narrow(singular_values, /*dim=*/-1, /*start=*/-1, /*length=*/1); Tensor result; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (ord.toDouble() == -2.0) { result = s_min / s_max; } else { @@ -2642,11 +2598,8 @@ struct KronImpl final { maxdim = std::max(self.dim(), other.dim()); int64_t pad_self = maxdim - self.dim(); int64_t pad_other = maxdim - other.dim(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a_reshape = c10::SmallVector(2 * maxdim); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_reshape = c10::SmallVector(2 * maxdim); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) result_reshape = c10::SmallVector(maxdim); for (int64_t i = 0; i < maxdim; i++) { a_reshape[2 * i] = (i >= pad_self ? self.sizes()[i - pad_self] : 1); @@ -2662,7 +2615,6 @@ struct KronImpl final { Tensor& kron_out(Tensor& result) const { TORCH_INTERNAL_ASSERT(result.defined(), "Cannot call kron_out with an undefined result tensor as the out argument. Please allocate a Tensor before calling kron_out with it."); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::SmallVector mul_shape(2 * maxdim); for (int64_t i = 0; i < maxdim; i++) { mul_shape[2 * i] = a_reshape[2 * i]; @@ -2682,11 +2634,8 @@ struct KronImpl final { int64_t maxdim; Tensor self_view; Tensor other_view; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::SmallVector result_reshape; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::SmallVector a_reshape; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::SmallVector b_reshape; }; } diff --git a/aten/src/ATen/native/Loss.cpp b/aten/src/ATen/native/Loss.cpp index 3d674d7df9c45..9e24cf7f8c149 100644 --- a/aten/src/ATen/native/Loss.cpp +++ b/aten/src/ATen/native/Loss.cpp @@ -280,7 +280,6 @@ Tensor poisson_nll_loss(const Tensor& input, const Tensor& target, const bool lo } if (full) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto stirling_term = target * at::log(target) - target + 0.5 * at::log(2 * c10::pi * target); loss += stirling_term.masked_fill(target <= 1, 0); } @@ -449,7 +448,6 @@ Tensor mse_loss_backward(const Tensor& grad_output, const Tensor& input, const T Tensor& mse_loss_backward_out(const Tensor& grad_output, const Tensor& input, const Tensor& target, int64_t reduction, Tensor& grad_input) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto norm = reduction == Reduction::Mean ? 2. / input.numel() : 2.; auto iter = at::TensorIteratorConfig() .add_output(grad_input) diff --git a/aten/src/ATen/native/LossMultiLabelMargin.cpp b/aten/src/ATen/native/LossMultiLabelMargin.cpp index ef11445057556..ce5ef6b487dcc 100644 --- a/aten/src/ATen/native/LossMultiLabelMargin.cpp +++ b/aten/src/ATen/native/LossMultiLabelMargin.cpp @@ -158,7 +158,6 @@ static void multilabel_margin_loss_backward_out_frame( int64_t dim) { // NOLINTNEXTLINE(clang-diagnostic-unused-variable) CheckedFrom c = "multilabel_margin_loss_backward_out_frame"; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto is_target_arg = TensorArg(is_target_contiguous, "is_target", 5); TORCH_CHECK( @@ -228,7 +227,6 @@ static void multilabel_margin_loss_backward_out_cpu_template( int64_t nframe, dim; CheckedFrom c = "multilabel_margin_loss_backward_cpu_template"; auto target_arg = TensorArg(target, "target", 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto is_target_arg = TensorArg(is_target, "is_target", 5); const int64_t ndims = input.dim(); diff --git a/aten/src/ATen/native/MaxUnpooling.cpp b/aten/src/ATen/native/MaxUnpooling.cpp index 268469ab26621..032ed0139e0a6 100644 --- a/aten/src/ATen/native/MaxUnpooling.cpp +++ b/aten/src/ATen/native/MaxUnpooling.cpp @@ -142,7 +142,6 @@ Tensor max_unpooling3d_forward_out_cpu_frame( int64_t dimh = 2; int64_t dimt = 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (input.ndimension() == 5) { nBatch = input.size(0); dimw++; @@ -252,7 +251,6 @@ static void max_unpooling3d_shape_check( int dimt = 1; int dimn = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (input.ndimension() == 5) { dimw++; dimh++; @@ -303,7 +301,6 @@ Tensor& max_unpooling3d_forward_out_cpu(const Tensor& self_, max_unpooling3d_shape_check( self_, Tensor(), indices_, output_size, stride, padding); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (self_.ndimension() == 5) { output.resize_({self.size(0), self.size(1), oT, oH, oW}); } else { @@ -567,7 +564,6 @@ Tensor& max_unpooling3d_backward_out_cpu(const Tensor& grad_output_, /* resize */ grad_input.resize_as_(self); grad_input.zero_(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (self.ndimension() == 5) { nbatch = self.size(0); dimt++; diff --git a/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp b/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp index 450257d82006a..dcde12ea42baa 100644 --- a/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp +++ b/aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp @@ -104,7 +104,6 @@ static inline void slow_conv_transpose3d_shape_check( int dimh = 2; int dimw = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (ndim == 5) { dimf++; dimd++; diff --git a/aten/src/ATen/native/NaiveDilatedConvolution.cpp b/aten/src/ATen/native/NaiveDilatedConvolution.cpp index 6647613ca06d3..3274f13efbe33 100644 --- a/aten/src/ATen/native/NaiveDilatedConvolution.cpp +++ b/aten/src/ATen/native/NaiveDilatedConvolution.cpp @@ -560,7 +560,6 @@ Tensor slow_conv_dilated3d_cpu( stride_size, pad_size, dilation_size); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto is_batch = input.dim() == 5; auto options = input.options(); // calculate output tensor size @@ -610,7 +609,6 @@ std::tuple slow_conv_dilated3d_backward_cpu( stride_size, pad_size, dilation_size); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto is_batch = input.dim() == 5; auto options = grad_output.options(); // template function assumes batched tensors. unsqueeze(0) will diff --git a/aten/src/ATen/native/Normalization.cpp b/aten/src/ATen/native/Normalization.cpp index b52eb57ea98a4..d231e0f992ada 100644 --- a/aten/src/ATen/native/Normalization.cpp +++ b/aten/src/ATen/native/Normalization.cpp @@ -431,16 +431,11 @@ std::tuple _batch_norm_impl_index( && weight.defined() && bias.defined() && ((running_mean.defined() && running_var.defined()) || (!running_mean.defined() && !running_var.defined() && training)) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) && ((input.dim() == 2 && input.size(0) <= 131070 && training) // per-activation, training - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) || (input.dim() == 2 && input.size(0) <= 262136 && !training) // per-activation, eval - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) || (input.dim() >= 3 && input.size(0) <= 880801 && training) // spatial, training - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) || (input.dim() >= 3 && input.size(0) <= 65535 && !training)) //spatial, eval && detail::getCUDAHooks().compiledWithCuDNN() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) && cudnn_enabled && detail::getCUDAHooks().versionCuDNN() >= 5110L); if (use_cudnn && eps >= detail::getCUDAHooks().batchnormMinEpsilonCuDNN()) { diff --git a/aten/src/ATen/native/PixelShuffle.cpp b/aten/src/ATen/native/PixelShuffle.cpp index 7d5a22edf2ac0..fc8e3c80cefc4 100644 --- a/aten/src/ATen/native/PixelShuffle.cpp +++ b/aten/src/ATen/native/PixelShuffle.cpp @@ -46,7 +46,6 @@ Tensor pixel_shuffle(const Tensor& self, int64_t upscale_factor) { std::vector permutation(self.sizes().begin(), self_sizes_batch_end); // std::iota is used to maintain the batch dims within the permutation. std::iota(permutation.begin(), permutation.end(), 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) permutation.insert(permutation.end(), {-5 /* oc */, -2 /* h */, -4 /* 1st upscale_factor */, -1 /* w */, -3 /* 2nd upscale_factor */}); const auto input_permuted = input_reshaped.permute(permutation); @@ -98,7 +97,6 @@ Tensor pixel_unshuffle(const Tensor& self, int64_t downscale_factor) { std::vector permutation(self.sizes().begin(), self_sizes_batch_end); // std::iota is used to maintain the batch dims within the permutation. std::iota(permutation.begin(), permutation.end(), 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) permutation.insert(permutation.end(), {-5 /* c */, -3 /* 1st downscale_factor */, -1 /*2nd downscale_factor */, -4 /* oh */, -2 /* ow */}); const auto input_permuted = input_reshaped.permute(permutation); diff --git a/aten/src/ATen/native/RNN.cpp b/aten/src/ATen/native/RNN.cpp index ef3f0582bf2bd..5d8052e2808f9 100644 --- a/aten/src/ATen/native/RNN.cpp +++ b/aten/src/ATen/native/RNN.cpp @@ -238,7 +238,6 @@ struct QuantizedCellParams : public CellParamsBase { at::Tensor qw_ih = std::move(tensors[0]), qw_hh = std::move(tensors[1]), b_ih = std::move(tensors[2]), b_hh = std::move(tensors[3]), col_offsets_ih = std::move(tensors[4]), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) col_offsets_hh = std::move(tensors[5]); double scale_ih = doubles[0], scale_hh = doubles[1]; int64_t zero_point_ih = longs[0], zero_point_hh = longs[1]; @@ -555,7 +554,6 @@ static std::vector gather_params(TensorList params, bool has_biases, if (has_biases) { if (has_projections) { TORCH_CHECK(params.size() % 5 == 0, "got an incorrect number of RNN parameters"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < params.size(); i += 5) { result.emplace_back(params[i], params[i + 1], params[i + 2], params[i + 3], params[i + 4]); } @@ -589,7 +587,6 @@ static c10::List> gather_quantized_params( static at::Tensor undefined; std::vector> result; TORCH_CHECK(params.size() % 12 == 0, "got an incorrect number of quantized RNN parameters"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < params.size(); i += 12) { result.emplace_back(c10::make_intrusive( static_cast(params[i]), @@ -597,19 +594,12 @@ static c10::List> gather_quantized_params( static_cast(params[i + 2]), static_cast(params[i + 3]), static_cast(params[i + 4]), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_cast(params[i + 5]), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_cast(params[i + 6]), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_cast(params[i + 7]), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_cast(params[i + 8]).item(), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_cast(params[i + 9]).item(), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_cast(params[i + 10]).item(), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_cast(params[i + 11]).item())); } return c10::List>(result); diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp index 6a49b2bbfe824..eacc8579cab25 100644 --- a/aten/src/ATen/native/ReduceOps.cpp +++ b/aten/src/ATen/native/ReduceOps.cpp @@ -876,7 +876,6 @@ Tensor& logsumexp_out(const Tensor& self, DimnameList dims, bool keepdim, Tensor static Tensor& norm_out(Tensor &result, const Tensor &self, const optional& opt_p, IntArrayRef dim, bool keepdim, optional opt_dtype) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto p = opt_p.value_or(2.0).to(); TORCH_CHECK(self.device().is_cpu() || self.is_cuda(), "norm only supports CPU and CUDA device types, but got: ", self.device().type()); diff --git a/aten/src/ATen/native/ReplicationPadding.cpp b/aten/src/ATen/native/ReplicationPadding.cpp index 6a8d5c1119643..4eab518d8f7e6 100644 --- a/aten/src/ATen/native/ReplicationPadding.cpp +++ b/aten/src/ATen/native/ReplicationPadding.cpp @@ -149,7 +149,6 @@ static inline void shapeCheck3d( "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (input.dim() == 5) { dimw++; @@ -185,7 +184,6 @@ TORCH_META_FUNC(replication_pad3d) ( int64_t ptop = paddingSize[2]; int64_t pbottom = paddingSize[3]; int64_t pfront = paddingSize[4]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t pback = paddingSize[5]; int64_t dimw = 3; int64_t dimh = 2; @@ -195,7 +193,6 @@ TORCH_META_FUNC(replication_pad3d) ( shapeCheck3d(input, pleft, pright, ptop, pbottom, pfront, pback); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (input.dim() == 5) { nbatch = input.size(0); @@ -740,7 +737,6 @@ Tensor& replication_pad3d_backward_out_cpu_template( int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int pback = paddingSize[5]; int dimw = 3; int dimh = 2; @@ -748,7 +744,6 @@ Tensor& replication_pad3d_backward_out_cpu_template( int dimslices = 0; int64_t nbatch = 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (input.dim() == 5) { nbatch = input.size(0); @@ -1034,7 +1029,6 @@ TORCH_IMPL_FUNC(replication_pad3d_out_cpu) ( int64_t ptop = paddingSize[2]; int64_t pbottom = paddingSize[3]; int64_t pfront = paddingSize[4]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t pback = paddingSize[5]; int64_t dimw = 3; int64_t dimh = 2; @@ -1045,7 +1039,6 @@ TORCH_IMPL_FUNC(replication_pad3d_out_cpu) ( /* get contiguous input */ auto input = input_.contiguous(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (input.dim() == 5) { nbatch = input.size(0); dimw++; diff --git a/aten/src/ATen/native/Sorting.cpp b/aten/src/ATen/native/Sorting.cpp index e3d5249a1596a..2ffa0389ba128 100644 --- a/aten/src/ATen/native/Sorting.cpp +++ b/aten/src/ATen/native/Sorting.cpp @@ -230,7 +230,6 @@ void quantile_impl( interpolation == QUANTILE_INTERPOLATION_MODE::MIDPOINT) { // calculate weights for linear and midpoint Tensor weights = interpolation == QUANTILE_INTERPOLATION_MODE::MIDPOINT - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ? at::full_like(ranks, 0.5) : ranks - ranks_below; diff --git a/aten/src/ATen/native/SpectralOps.cpp b/aten/src/ATen/native/SpectralOps.cpp index 673c9a442c5d2..aca797d3360cd 100644 --- a/aten/src/ATen/native/SpectralOps.cpp +++ b/aten/src/ATen/native/SpectralOps.cpp @@ -931,7 +931,6 @@ Tensor istft(const Tensor& self, const int64_t n_fft, const optional ho y = y.slice(2, start, end, 1); window_envelop = window_envelop.slice(2, start, end, 1); const auto window_envelop_lowest = window_envelop.abs().min().item().toDouble(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (window_envelop_lowest < 1e-11) { std::ostringstream ss; REPR(ss) << "window overlap add min: " << window_envelop_lowest; diff --git a/aten/src/ATen/native/TensorFactories.cpp b/aten/src/ATen/native/TensorFactories.cpp index a905c3156392b..eb218f5cab66d 100644 --- a/aten/src/ATen/native/TensorFactories.cpp +++ b/aten/src/ATen/native/TensorFactories.cpp @@ -1125,7 +1125,6 @@ Tensor bartlett_window( window_length += 1; } auto window = native::arange(window_length, dtype, layout, device, pin_memory) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .mul_(2. / static_cast(window_length - 1)); const int64_t first_half_size = ((window_length - 1) >> 1) + 1; window.narrow(0, first_half_size, window_length - first_half_size).mul_(-1).add_(2); @@ -1167,7 +1166,6 @@ Tensor blackman_window( auto window = native::arange(window_length, dtype, layout, device, pin_memory) .mul_(c10::pi / static_cast(window_length - 1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) window = window.mul(4).cos_().mul_(0.08) - window.mul(2).cos_().mul_(0.5) + 0.42; return periodic ? window.narrow(0, 0, window_length - 1) : window; } @@ -1193,7 +1191,6 @@ Tensor hamming_window( return native::hamming_window( window_length, periodic, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) /*alpha=*/0.54, dtype, layout, @@ -1210,7 +1207,6 @@ Tensor hamming_window( c10::optional device, c10::optional pin_memory) { return native::hamming_window( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) window_length, periodic, alpha, /*beta=*/0.46, dtype, layout, device, pin_memory); } @@ -1237,7 +1233,6 @@ Tensor hamming_window( window_length += 1; } auto window = native::arange(window_length, dtype, layout, device, pin_memory); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) window.mul_(c10::pi * 2. / static_cast(window_length - 1)).cos_().mul_(-beta).add_(alpha); return periodic ? window.narrow(0, 0, window_length - 1) : window; } @@ -1264,7 +1259,6 @@ Tensor hann_window( window_function_checks("hann_window", options, window_length); return native::hamming_window( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) window_length, periodic, /*alpha=*/0.5, /*beta=*/0.5, dtype, layout, device, pin_memory); } @@ -1278,7 +1272,6 @@ Tensor kaiser_window(int64_t window_length, return native::kaiser_window( window_length, /*periodic=*/true, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) /*beta=*/12.0, dtype, layout, @@ -1291,7 +1284,6 @@ Tensor kaiser_window(int64_t window_length, bool periodic, c10::optional layout, c10::optional device, c10::optional pin_memory) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return native::kaiser_window(window_length, periodic, /*beta=*/12.0, dtype, layout, device, pin_memory); } diff --git a/aten/src/ATen/native/TensorIteratorReduce.cpp b/aten/src/ATen/native/TensorIteratorReduce.cpp index 268f40738708e..e3c32d9812ae9 100644 --- a/aten/src/ATen/native/TensorIteratorReduce.cpp +++ b/aten/src/ATen/native/TensorIteratorReduce.cpp @@ -110,7 +110,6 @@ static void parallel_dim_reduction(TensorIteratorBase& iter, loop2d_t loop) { if (should_round_columns) { // round columns to multiples of 128 bytes if adjacent columns are // contiguous in memory. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t cols_per_128_bytes = 128 / element_size; std::tie(begin, end) = round_columns(iter, dim, cols_per_128_bytes, begin, end); } diff --git a/aten/src/ATen/native/TensorTransformations.cpp b/aten/src/ATen/native/TensorTransformations.cpp index 6e0886f30767a..cd7fa3cf50045 100644 --- a/aten/src/ATen/native/TensorTransformations.cpp +++ b/aten/src/ATen/native/TensorTransformations.cpp @@ -27,7 +27,6 @@ void inline flip_cpu_kernel( auto sizes_v = in_tensor.sizes().vec(); auto strides_v = in_tensor.strides().vec(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::parallel_for(0, numel, 1000, [&](int64_t start, int64_t end) { for (auto i = start; i < end; i++) { int64_t cur_indices = i; diff --git a/aten/src/ATen/native/UpSampleNearest3d.cpp b/aten/src/ATen/native/UpSampleNearest3d.cpp index e02fe8a0ea287..9801c1b0f72f4 100644 --- a/aten/src/ATen/native/UpSampleNearest3d.cpp +++ b/aten/src/ATen/native/UpSampleNearest3d.cpp @@ -37,7 +37,6 @@ TORCH_META_FUNC(upsample_nearest3d_backward) ( grad_output.dim() == 5, "Expected grad_output to be a tensor of dimension 5 but got: dimension ", grad_output.dim()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 5; ++i) { TORCH_CHECK( grad_output.size(i) == full_output_size[i], diff --git a/aten/src/ATen/native/UpSampleTrilinear3d.cpp b/aten/src/ATen/native/UpSampleTrilinear3d.cpp index 41229c6577274..ffa9f0be62a35 100644 --- a/aten/src/ATen/native/UpSampleTrilinear3d.cpp +++ b/aten/src/ATen/native/UpSampleTrilinear3d.cpp @@ -42,7 +42,6 @@ TORCH_META_FUNC(upsample_trilinear3d_backward) ( grad_output.dim() == 5, "Expected grad_output to be a tensor of dimension 5 but got: dimension ", grad_output.dim()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 5; ++i) { TORCH_CHECK( grad_output.size(i) == full_output_size[i], diff --git a/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_dynamic.cpp b/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_dynamic.cpp index bdb763ecdc92e..e1047d9e6313b 100644 --- a/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_dynamic.cpp +++ b/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_dynamic.cpp @@ -62,7 +62,6 @@ at::Tensor PackedLinearWeightQnnp::apply_dynamic_impl( /*min=*/x_min, /*max=*/x_max, /*qmin=*/0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) /*qmax=*/255); // Quantize input diff --git a/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp b/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp index 4575aab3a940e..6f91164acbd2d 100644 --- a/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp +++ b/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp @@ -186,7 +186,6 @@ PackedLinearWeightQnnp::PackedLinearWeightQnnp( int8_t* w_data = reinterpret_cast(weight_contig.data_ptr()); for (int i = 0; i < wt_numel; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qnnp_w_data[i] = static_cast(w_data[i] + 128); } bcsr_matrix_ = qnnpack::generateBlockCSRMatrix( diff --git a/aten/src/ATen/native/cpu/Activation.cpp b/aten/src/ATen/native/cpu/Activation.cpp index b94d3113b06cc..f667e544ca3b5 100644 --- a/aten/src/ATen/native/cpu/Activation.cpp +++ b/aten/src/ATen/native/cpu/Activation.cpp @@ -279,7 +279,6 @@ void GeluKernelImpl(TensorIterator& it) { it, [](scalar_t x) { constexpr scalar_t kAlpha = M_SQRT1_2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return x * scalar_t(0.5) * (scalar_t(1) + std::erf(x * kAlpha)); }, [&](Vec x_vec) { @@ -355,7 +354,6 @@ void hardsigmoid_backward_kernel(TensorIterator& iter) { const scalar_t one_sixth(1.0f / 6.0f); using Vec = Vec256; Vec kZeroVec(0.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Vec kOneSixthVec(1.0f / 6.0f); cpu_kernel_vec( iter, diff --git a/aten/src/ATen/native/cpu/BinaryOpsKernel.cpp b/aten/src/ATen/native/cpu/BinaryOpsKernel.cpp index e2c733f5f78fe..c09272f2bf51c 100644 --- a/aten/src/ATen/native/cpu/BinaryOpsKernel.cpp +++ b/aten/src/ATen/native/cpu/BinaryOpsKernel.cpp @@ -183,7 +183,6 @@ void div_floor_kernel(TensorIteratorBase& iter) { scalar_t floordiv; if (div != 0) { floordiv = std::floor(div); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (div - floordiv > scalar_t(0.5)) { floordiv += scalar_t(1.0); } @@ -639,9 +638,7 @@ void smooth_l1_kernel(TensorIterator& iter, double beta) { [&beta_val](scalar_t a, scalar_t b) -> scalar_t { auto z = std::abs(a - b); return z < beta_val - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ? static_cast(0.5) * z * z / beta_val - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) : z - static_cast(0.5) * beta_val; }, [&beta_val_vec, &point_five_vec](Vec a, Vec b) { @@ -662,9 +659,7 @@ void huber_kernel(TensorIterator& iter, double delta) { iter, [&delta_val](scalar_t a, scalar_t b) -> scalar_t { auto z = std::abs(a - b); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return z < delta_val ? static_cast(0.5) * z * z : - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) delta_val * (z - static_cast(0.5) * delta_val); }, [&delta_val_vec, &point_five_vec](Vec a, Vec b) { @@ -847,7 +842,6 @@ void logaddexp2_kernel(TensorIteratorBase& iter) { [=](Vec256 a, Vec256 b) { Vec256 inf(std::numeric_limits::infinity()); Vec256 one(1.0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Vec256 two(2.0); Vec256 m = maximum(a, b); return Vec256::blendv( diff --git a/aten/src/ATen/native/cpu/DistanceOpsKernel.cpp b/aten/src/ATen/native/cpu/DistanceOpsKernel.cpp index 53d0380d717c6..3839fcf2fdfed 100644 --- a/aten/src/ATen/native/cpu/DistanceOpsKernel.cpp +++ b/aten/src/ATen/native/cpu/DistanceOpsKernel.cpp @@ -155,10 +155,8 @@ struct Dist { // vector from the input, j is the second, and k is the result index. This // parallelizes over the range of k and infers what i and j are from the // value of k. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) parallel_for(0, combs, internal::GRAIN_SIZE / (16 * m), [p, self_start, self_end, n, m, res_start](int64_t k, int64_t end) { const Vec pvec(p); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double n2 = n - .5; // The -1 accounts for floating point truncation issues // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) @@ -191,7 +189,6 @@ struct Dist { run_parallel_pdist>(result, self, p); } else if (p == 1.0) { run_parallel_pdist>(result, self, p); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (p == 2.0) { run_parallel_pdist>(result, self, p); } else if (std::isinf(p)) { @@ -215,7 +212,6 @@ struct Dist { int64_t size1 = r1 * m; int64_t size2 = r2 * m; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) parallel_for(0, combs * d, internal::GRAIN_SIZE / (16 * m), [=](int64_t start, int64_t end) { scalar_t * res = res_start + start; const scalar_t * const res_end = res_start + end; @@ -257,7 +253,6 @@ struct Dist { run_parallel_cdist>(result, x1, x2, p); } else if (p == 1.0) { run_parallel_cdist>(result, x1, x2, p); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (p == 2.0) { run_parallel_cdist>(result, x1, x2, p); } else if (std::isinf(p)) { @@ -306,7 +301,6 @@ struct Dist { // The only way to parallelize and avoid locking requires parallelizing // over the columns of the input, i.e. we compute the gradient for the // first section of each vector independentaly of the second section, etc. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::parallel_for(0, m / Vec::size(), internal::GRAIN_SIZE / (8 * n * n), [p, n, m, gs, grad_start, dist_start, self_start, res_start](int64_t l, int64_t end) { const Vec pvec(p); @@ -329,10 +323,8 @@ struct Dist { if (p == 0.0) { } else if (p == 1.0) { run_backward_parallel_pdist>(result, grad, self, p, dist); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (p < 2.0) { run_backward_parallel_pdist(result, grad, self, p, dist); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (p == 2.0) { run_backward_parallel_pdist>(result, grad, self, p, dist); } else if (std::isinf(p)) { @@ -347,10 +339,8 @@ struct Dist { if (p == 0.0) { } else if (p == 1.0) { run_backward_parallel_cdist>(result, grad, x1, x2, p, dist); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (p < 2.0) { run_backward_parallel_cdist(result, grad, x1, x2, p, dist); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (p == 2.0) { run_backward_parallel_cdist>(result, grad, x1, x2, p, dist); } else if (std::isinf(p)) { @@ -380,7 +370,6 @@ struct Dist { const scalar_t * const t2_start = t2.data_ptr(); scalar_t * const res_start = result.data_ptr(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::parallel_for(0, m / Vec::size(), internal::GRAIN_SIZE / (16 * r1), [=](int64_t l, int64_t end) { const Vec pvec(p); diff --git a/aten/src/ATen/native/cpu/GridSamplerKernel.cpp b/aten/src/ATen/native/cpu/GridSamplerKernel.cpp index 1c1f6ed187391..d6680cb2bad8f 100644 --- a/aten/src/ATen/native/cpu/GridSamplerKernel.cpp +++ b/aten/src/ATen/native/cpu/GridSamplerKernel.cpp @@ -276,13 +276,11 @@ struct ComputeLocationBase { ComputeLocationBase(int64_t size) : max_val(static_cast(size - 1)) , scaling_factor(static_cast(size) / 2) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) , low(static_cast(-0.5)) , twice_span(static_cast(size) * 2) , empty(size <= 0) {} inline Vec unnormalize(const Vec &in) const { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return (in + Vec(1)) * Vec(scaling_factor) - Vec(0.5); } @@ -544,25 +542,16 @@ struct ApplyGridSample(interp_params); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto ne = std::get<5>(interp_params); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto sw = std::get<6>(interp_params); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto se = std::get<7>(interp_params); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto nw_mask = std::get<8>(interp_params); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto ne_mask = std::get<9>(interp_params); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto sw_mask = std::get<10>(interp_params); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto se_mask = std::get<11>(interp_params); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto i_y_n = std::get<12>(interp_params); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto i_x_w = std::get<13>(interp_params); auto i_nw_offset = i_y_n * iVec(inp_sH) + i_x_w * iVec(inp_sW); @@ -838,14 +827,12 @@ struct ApplyGridSample(weight_val) < 0.5) ? self_val + weight_val * (end_val - self_val) : end_val - (end_val - self_val) * (scalar_t(1) - weight_val); @@ -48,7 +47,6 @@ static void lerp_kernel_tensor( at::native::cpu_kernel( iter, [](scalar_t self_val, scalar_t end_val, scalar_t weight_val) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return (zabs(weight_val) < 0.5) ? self_val + weight_val * (end_val - self_val) : end_val - (end_val - self_val) * (scalar_t(1) - weight_val); diff --git a/aten/src/ATen/native/cpu/MultinomialKernel.cpp b/aten/src/ATen/native/cpu/MultinomialKernel.cpp index 374ac8a25dde1..44b86f9ee4313 100644 --- a/aten/src/ATen/native/cpu/MultinomialKernel.cpp +++ b/aten/src/ATen/native/cpu/MultinomialKernel.cpp @@ -69,7 +69,6 @@ void multinomial_with_replacement_apply( /* normalize cumulative probability distribution so that last val is 1 i.e. doesn't assume original self row sums to one */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((sum > 0) || ((sum < 1.00001) && (sum > 0.99999))) { for (int64_t j = 0; j < n_categories; j++) { cum_dist_ptr[j * cum_dist_stride_0] /= sum; diff --git a/aten/src/ATen/native/cpu/PowKernel.cpp b/aten/src/ATen/native/cpu/PowKernel.cpp index 69c832a869482..b01fec2f8273b 100644 --- a/aten/src/ATen/native/cpu/PowKernel.cpp +++ b/aten/src/ATen/native/cpu/PowKernel.cpp @@ -48,7 +48,6 @@ void pow_tensor_tensor_kernel(TensorIteratorBase& iter) { template void pow_tensor_scalar_optimized_kernel(TensorIteratorBase& iter, const exp_scalar_t exp) { using Vec = Vec256; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (exp == 0.5) { cpu_kernel_vec(iter, [](scalar_t base) -> scalar_t { @@ -56,7 +55,6 @@ void pow_tensor_scalar_optimized_kernel(TensorIteratorBase& iter, const exp_scal }, [](Vec base) -> Vec { return base.sqrt(); } ); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (exp == 2.0) { cpu_kernel_vec(iter, [](scalar_t base) -> scalar_t { @@ -64,7 +62,6 @@ void pow_tensor_scalar_optimized_kernel(TensorIteratorBase& iter, const exp_scal }, [](Vec base) -> Vec { return base * base; } ); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (exp == 3.0) { cpu_kernel_vec(iter, [](scalar_t base) -> scalar_t { @@ -72,7 +69,6 @@ void pow_tensor_scalar_optimized_kernel(TensorIteratorBase& iter, const exp_scal }, [](Vec base) -> Vec { return base * base * base; } ); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (exp == -0.5) { cpu_kernel_vec(iter, [](scalar_t base) __ubsan_ignore_float_divide_by_zero__ -> scalar_t { @@ -87,7 +83,6 @@ void pow_tensor_scalar_optimized_kernel(TensorIteratorBase& iter, const exp_scal }, [](Vec base) -> Vec { return base.reciprocal(); } ); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (exp == -2.0) { cpu_kernel_vec(iter, [](scalar_t base) -> scalar_t { diff --git a/aten/src/ATen/native/cpu/SoftMaxKernel.cpp b/aten/src/ATen/native/cpu/SoftMaxKernel.cpp index 00214ee593f0c..2d897cf24172e 100644 --- a/aten/src/ATen/native/cpu/SoftMaxKernel.cpp +++ b/aten/src/ATen/native/cpu/SoftMaxKernel.cpp @@ -30,7 +30,6 @@ inline void _vec_log_softmax_lastdim( int64_t dim_size) { using Vec = vec256::Vec256; static constexpr int64_t CHUNK_SIZE = (128 / sizeof(scalar_t)) * Vec::size(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t grain_size = internal::GRAIN_SIZE / (16 * dim_size * CHUNK_SIZE); if (grain_size < CHUNK_SIZE) grain_size = CHUNK_SIZE; @@ -102,7 +101,6 @@ inline void _vec_softmax_lastdim( int64_t outer_size, int64_t dim_size) { using Vec = vec256::Vec256; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t grain_size = internal::GRAIN_SIZE / (16 * dim_size); if (grain_size < 1) grain_size = 1; @@ -144,7 +142,6 @@ inline void _vec_host_softmax_backward_lastdim( int64_t outer_size, int64_t dim_size) { using Vec = vec256::Vec256; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t grain_size = internal::GRAIN_SIZE / (16 * dim_size); if (grain_size < 1) grain_size = 1; diff --git a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp index db283c5839734..ba3fd3eb1e5d3 100644 --- a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp +++ b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp @@ -430,7 +430,6 @@ static void kaiser_window_kernel(TensorIteratorBase& iter, int64_t window_length AT_DISPATCH_FLOATING_TYPES_AND(kBFloat16, iter.dtype(), "kaiser_window_cpu", [&](){ const scalar_t alpha = static_cast((window_length - 1) / 2.0); cpu_kernel(iter, [=](scalar_t a){ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return calc_i0(static_cast(beta) * std::sqrt(1 - std::pow((a - alpha) / alpha, static_cast(2.0)))) / calc_i0(static_cast(beta)); }); }); diff --git a/aten/src/ATen/native/cpu/UpSampleKernel.cpp b/aten/src/ATen/native/cpu/UpSampleKernel.cpp index f9303a772b0ef..f515df1297890 100644 --- a/aten/src/ATen/native/cpu/UpSampleKernel.cpp +++ b/aten/src/ATen/native/cpu/UpSampleKernel.cpp @@ -269,9 +269,7 @@ void cpu_upsample_nearest_channels_last( int64_t num_batches = input_sizes[0]; int64_t channels = input_sizes[1]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t input_depth = (ndim == 5) ? input_sizes[2] : 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t output_depth = (ndim == 5) ? output_sizes[2] : 1; int64_t input_height = (ndim >= 4) ? input_sizes[ndim - 2] : 1; int64_t output_height = (ndim >= 4) ? output_sizes[ndim - 2] : 1; @@ -367,9 +365,7 @@ void cpu_upsample_linear_channels_last( int64_t num_batches = input_sizes[0]; int64_t channels = input_sizes[1]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t input_depth = (ndim == 5) ? input_sizes[2] : 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t output_depth = (ndim == 5) ? output_sizes[2] : 1; int64_t input_height = (ndim >= 4) ? input_sizes[ndim - 2] : 1; int64_t output_height = (ndim >= 4) ? output_sizes[ndim - 2] : 1; @@ -508,7 +504,6 @@ void cpu_upsample_linear_channels_last( } else { // upsample nearest 3d TORCH_INTERNAL_ASSERT(ndim == 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::parallel_for(0, num_batches, at::internal::GRAIN_SIZE / output_slice_size / 8, loop3d); } @@ -903,9 +898,7 @@ void cpu_upsample_nearest_backward( // treat nbatch and channels as one dimension int64_t channels = input_sizes[0] * input_sizes[1]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t input_depth = (ndim == 5) ? input_sizes[2] : 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t output_depth = (ndim == 5) ? output_sizes[2] : 1; int64_t input_height = (ndim >= 4) ? input_sizes[ndim - 2] : 1; int64_t output_height = (ndim >= 4) ? output_sizes[ndim - 2] : 1; diff --git a/aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp b/aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp index 42aa3e9a8817f..63a0906148b13 100644 --- a/aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp +++ b/aten/src/ATen/native/cpu/UpSampleMoreKernel.cpp @@ -35,9 +35,7 @@ void cpu_upsample_linear_backward( // treat nbatch and channels as one dimension int64_t channels = input_sizes[0] * input_sizes[1]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t input_depth = (ndim == 5) ? input_sizes[2] : 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t output_depth = (ndim == 5) ? output_sizes[2] : 1; int64_t input_height = (ndim >= 4) ? input_sizes[ndim - 2] : 1; int64_t output_height = (ndim >= 4) ? output_sizes[ndim - 2] : 1; @@ -149,7 +147,6 @@ void cpu_upsample_linear_backward( } else { // upsample trilinear 3d TORCH_INTERNAL_ASSERT(ndim == 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::parallel_for(0, channels, at::internal::GRAIN_SIZE / output_slice_size / 8, loop3d); } diff --git a/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp b/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp index 22765da991cdc..cfbbf5c6fa198 100644 --- a/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp +++ b/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp @@ -88,7 +88,6 @@ Tensor mkldnn_reorder_conv2d_weight( // [o, i, h, w]. Ideally we should reorder the weight back in serialization. // For backward compatibility, we squash the first two dims (g * o/g) back to // its original form. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (w.ndims() == 5) { auto wdims = w.get_dims(); w.reshape({wdims[0] * wdims[1], wdims[2], wdims[3], wdims[4]}); diff --git a/aten/src/ATen/native/mkldnn/MkldnnTensorMath.cpp b/aten/src/ATen/native/mkldnn/MkldnnTensorMath.cpp index 6e19a38cf4830..cf005e28f7b66 100644 --- a/aten/src/ATen/native/mkldnn/MkldnnTensorMath.cpp +++ b/aten/src/ATen/native/mkldnn/MkldnnTensorMath.cpp @@ -31,7 +31,6 @@ Tensor& mkldnn_zero_(Tensor& self) { auto n = x.get_nelems(); auto* x_ = static_cast(x.get_data_handle()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) parallel_for(0, n, 2048, [x_](int64_t begin, int64_t end) { vec256::map( [](Vec /* unused */) { return 0.0; }, diff --git a/aten/src/ATen/native/quantized/QTensor.cpp b/aten/src/ATen/native/quantized/QTensor.cpp index 6864f3837a491..07a0f87d1c894 100644 --- a/aten/src/ATen/native/quantized/QTensor.cpp +++ b/aten/src/ATen/native/quantized/QTensor.cpp @@ -225,7 +225,6 @@ std::tuple _choose_qparams_per_tensor( /*min=*/x_min, /*max=*/x_max, /*qmin=*/0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) /*qmax=*/255, /*preserve_sparsity=*/false, /*force_scale_power_of_two=*/false, diff --git a/aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp b/aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp index ad73f2399a45e..bef348c14f517 100644 --- a/aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp +++ b/aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp @@ -25,7 +25,6 @@ namespace fbgemm_utils { namespace { bool IsChannelsLast3d(const Tensor& tensor) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (tensor.dim() != 5) { return false; } diff --git a/aten/src/ATen/native/quantized/cpu/int_repr_quant.cpp b/aten/src/ATen/native/quantized/cpu/int_repr_quant.cpp index e649467e34ab0..011e2e4777e37 100644 --- a/aten/src/ATen/native/quantized/cpu/int_repr_quant.cpp +++ b/aten/src/ATen/native/quantized/cpu/int_repr_quant.cpp @@ -15,7 +15,6 @@ Tensor int_repr_quantized_cpu(const Tensor& self) { // NOLINTNEXTLINE(clang-diagnostic-unused-variable) AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(self.scalar_type(), "int_repr", [&]() { if (bit_width == 4) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t out_size = std::ceil(self.numel() * 0.5); dst = at::empty( {out_size}, diff --git a/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp b/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp index 0853e3e3b854c..2cabbeaa6386d 100644 --- a/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp +++ b/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp @@ -437,7 +437,6 @@ void qrelu6_kernel(const Tensor& qx, Tensor& qy) { using Vec = Vec256; auto iter = TensorIterator::unary_op(qy, qx); scalar_t six = at::native::quantize_val( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qx.q_scale(), qx.q_zero_point(), 6.0); auto zero_point_vec = Vec(scalar_t(zero_point)); auto six_vec = Vec(six); @@ -564,11 +563,9 @@ void qhardsigmoid_kernel(const Tensor& qx, Tensor& qy) { AT_DISPATCH_QINT_TYPES(qx.scalar_type(), "qhardsigmoid", [&]() { // - Output scale is set to 1.0 / 2^(BIT_NUM) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float output_scale = 0.00390625; // 1.0 / 2^8 // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) if (SCALAR_TYPE == at::kQInt32) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output_scale = 2.3283064365386963e-10; // 1.0 / 2^32 } float inv_output_scale = 1.0 / output_scale; @@ -592,9 +589,7 @@ void qhardsigmoid_kernel(const Tensor& qx, Tensor& qy) { using qVec = Vec256; using fVec = Vec256; fVec kZeroVec(0.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) fVec kThreeVec(3.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) fVec kSixVec(6.0f); // Naive implemenentation: uses dequantize/execute/quantize routine @@ -800,9 +795,7 @@ void qhardswish_kernel(const Tensor& qx, Tensor& qy) { fVec i_zero_point_vec(i_zero_point); fVec i_scale_neg_zp_premul_vec = i_scale_vec * i_zero_point_vec.neg(); fVec zero_vec(0.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) fVec three_vec(3.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) fVec six_vec(6.0f); AT_DISPATCH_QINT_TYPES(qx.scalar_type(), "qhardswish", [&]() { @@ -845,12 +838,10 @@ void qtanh_kernel(const Tensor& qx, Tensor& qy) { // - Output scale is set to 2.0 / 2^(BIT_NUM) // - For signed types output zero point is set to 0 // - For unsigned types output zero point is set to (qmax + qmin) / 2.0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float output_scale = 0.0078125; // 2.0 / 512 int64_t output_zero_point = 0; // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) if (SCALAR_TYPE == at::kQInt32) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output_scale = 4.656612873077393e-10; // 2.0 / 2^32 } else if (SCALAR_TYPE == at::kQUInt8) { output_zero_point = 128; @@ -2249,9 +2240,7 @@ void fake_quantize_learnable_channel_grad_kernel_cpu( float* dzero_point_output = (float*)(data[2] + i * strides[2]); float* x_input = (float*)(data[3] + i * strides[3]); float* dy_input = (float*)(data[4] + i * strides[4]); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float* scale_input = (float*)(data[5] + i * strides[5]); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float* zero_point_input = (float*)(data[6] + i * strides[6]); float inv_scale = 1.0f / (*scale_input); @@ -2918,7 +2907,6 @@ void dequantize_per_channel_affine_kernel( Tensor scales, Tensor zero_points, int64_t axis, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int bit_width=8) { // For contiguous tensors, e.g. NCHW, arbitrary axis can be used. @@ -2949,7 +2937,6 @@ void dequantize_per_channel_affine_kernel( // We need to convert the qint8 value to float to ensure the // subtraction subexpression returns a float auto qvalue = qd[i / elem_per_byte].val_; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (bit_width < 8) { qvalue >>= (i % elem_per_byte) * bit_width; qvalue &= (1 << bit_width) - 1; @@ -2968,7 +2955,6 @@ void dequantize_per_channel_affine_kernel( // subtraction subexpression returns a float // NOLINTNEXTLINE(clang-analyzer-core.DivideZero) auto qvalue = qd[i / elem_per_byte].val_; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (bit_width < 8) { qvalue >>= (i % elem_per_byte) * bit_width; qvalue &= (1 << bit_width) - 1; diff --git a/aten/src/ATen/native/quantized/cpu/qbatch_norm.cpp b/aten/src/ATen/native/quantized/cpu/qbatch_norm.cpp index 7d0ee165bb27c..ff2f5d0214bce 100644 --- a/aten/src/ATen/native/quantized/cpu/qbatch_norm.cpp +++ b/aten/src/ATen/native/quantized/cpu/qbatch_norm.cpp @@ -357,7 +357,6 @@ Tensor q_batch_norm_impl( } else if (dim == 4) { qy = q_batch_norm2d_impl( qx, mb_weight, mb_bias, mean, var, eps, output_scale, output_zero_point); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (dim == 5) { qy = q_batch_norm3d_impl( qx, mb_weight, mb_bias, mean, var, eps, output_scale, output_zero_point); diff --git a/aten/src/ATen/native/quantized/cpu/qconv.cpp b/aten/src/ATen/native/quantized/cpu/qconv.cpp index 7cdee6ae6cd2e..bc4b0434e366a 100644 --- a/aten/src/ATen/native/quantized/cpu/qconv.cpp +++ b/aten/src/ATen/native/quantized/cpu/qconv.cpp @@ -138,7 +138,6 @@ at::SmallVector MakeConvOutputShape<2>( } template <> -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::SmallVector MakeConvOutputShape<3>( int N, int M, @@ -183,7 +182,6 @@ at::SmallVector MakeConvOutputShape<2>( } template <> -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::SmallVector MakeConvOutputShape<3>( int N, // mini-batch int M, // output channels @@ -642,7 +640,6 @@ at::Tensor PackedConvWeightsQnnp::apply_impl( auto* qnnp_w_data = qnnp_weight.template data_ptr(); auto wt_numel = weight_contig.numel(); for (int i = 0; i < wt_numel; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qnnp_w_data[i] = static_cast(w_data[i] + 128); } at::Tensor qbias; diff --git a/aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp b/aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp index 48e2dbe4a61bb..d1a7d2785fbb6 100644 --- a/aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp +++ b/aten/src/ATen/native/quantized/cpu/qembeddingbag.cpp @@ -90,28 +90,21 @@ at::Tensor& embedding_lookup_fallback_impl( } // NOLINTNEXTLINE(cppcoreguidelines-init-variables) float scale, bias; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (BIT_RATE == 8) { const uint8_t* scale_bias = weight_data + (idx + 1) * weight_size - 2 * sizeof(float); uint32_t scale_val_int32 = 0; scale_val_int32 = scale_val_int32 | (scale_bias[0]) | - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (scale_bias[1] << 8) | - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (scale_bias[2] << 16) | - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (scale_bias[3] << 24); float scale_val = (reinterpret_cast(&scale_val_int32))[0]; uint32_t bias_val_int32 = 0; bias_val_int32 = bias_val_int32 | (scale_bias[4]) | - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (scale_bias[5] << 8) | - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (scale_bias[6] << 16) | - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (scale_bias[7] << 24); float bias_val = (reinterpret_cast(&bias_val_int32))[0]; scale = weight_val * scale_val; @@ -122,13 +115,11 @@ at::Tensor& embedding_lookup_fallback_impl( uint16_t scale_val_int16 = 0; scale_val_int16 = scale_val_int16 | (scale_bias[0]) | - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (scale_bias[1] << 8); at::Half scale_val = (reinterpret_cast(&scale_val_int16))[0]; uint16_t bias_val_int16 = 0; bias_val_int16 = bias_val_int16 | (scale_bias[2]) | - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (scale_bias[3] << 8); at::Half bias_val = (reinterpret_cast(&bias_val_int16))[0]; scale = weight_val * scale_val; @@ -805,7 +796,6 @@ class QEmbeddingBag final { const c10::optional& per_sample_weights_, const c10::optional& compressed_indices_mapping, bool include_last_offset) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (bit_rate == 8) { return packed_weight->embeddingbag_byte( indices, @@ -841,7 +831,6 @@ class QEmbedding final { const auto offsets_size = indices.numel(); at::Tensor offsets = at::arange(0, offsets_size, indices.scalar_type()); at::Tensor output; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (bit_rate == 8) { return packed_weight->embeddingbag_byte( indices, diff --git a/aten/src/ATen/native/quantized/cpu/qembeddingbag_prepack.cpp b/aten/src/ATen/native/quantized/cpu/qembeddingbag_prepack.cpp index c32281d61116b..106908c94b186 100644 --- a/aten/src/ATen/native/quantized/cpu/qembeddingbag_prepack.cpp +++ b/aten/src/ATen/native/quantized/cpu/qembeddingbag_prepack.cpp @@ -34,9 +34,7 @@ c10::intrusive_ptr PackedEmbeddingBagWeight::prepack( int bit_width, scale_bias_bytes; uint8_t* weight_data = static_cast(weight_contig.data_ptr()); if (qweight.scalar_type() == c10::kQUInt8) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bit_width = 8; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scale_bias_bytes = 8; // extra 8 bytes to store FP scale and bias per row. } else { bit_width = 4; @@ -77,7 +75,6 @@ c10::intrusive_ptr PackedEmbeddingBagWeight::prepack( weight_contig.suggest_memory_format()); auto* output_data = output.data_ptr(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (bit_width == 8) { at::parallel_for( 0, embedding_rows, 1, [&](int32_t start_idx, int32_t end_idx) { @@ -276,7 +273,6 @@ Tensor _qembeddingbag_nbit_prepack_helper( "bit_width must be either 2 or 4 to use 'qembeddingbag_nbit_prepack'." "For 8bit, consider using 'embedding_bag_byte_prepack'."); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int NUM_ELEM_PER_BYTE = 8 / bit_width; TORCH_CHECK( weight_contig.size(weight.dim() - 1) % NUM_ELEM_PER_BYTE == 0, diff --git a/aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp b/aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp index 938c3b7a8e84b..f74d3b7ac8431 100644 --- a/aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp +++ b/aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp @@ -10,18 +10,15 @@ at::Tensor PackedEmbeddingBagWeight::unpack() { auto packed_weight = packed_w; at::Tensor weight_origin; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (bit_rate_ == 8 || bit_rate_ == 4) { const auto input_rows = packed_weight.size(0); const auto input_columns = packed_weight.size(1); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) int scale_bias_bytes; const auto num_elem_per_byte = 8 / bit_rate_; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (bit_rate_ == 8) { // The last 2 values are used to store the FP32 scale and zero_point // values per row. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scale_bias_bytes = 8; } else { scale_bias_bytes = 4; @@ -45,7 +42,6 @@ at::Tensor PackedEmbeddingBagWeight::unpack() { uint8_t* output_data; // Allocate output weight tensor based on the bit_width - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (bit_rate_ == 8) { weight_origin = at::_empty_per_channel_affine_quantized( output_shape, @@ -161,7 +157,6 @@ Tensor _qembeddingbag_nbit_unpack_helper( const auto input_rows = packed_weight.size(0); const auto input_columns = packed_weight.size(1); const auto* input_data = packed_weight.data_ptr(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int NUM_ELEM_PER_BYTE = 8 / BIT_RATE; // The last 4 bytes per row are two fp16 scale and zero_point. diff --git a/aten/src/ATen/native/quantized/cpu/qlinear.cpp b/aten/src/ATen/native/quantized/cpu/qlinear.cpp index cc5c3272903f4..eb66c1c46914f 100644 --- a/aten/src/ATen/native/quantized/cpu/qlinear.cpp +++ b/aten/src/ATen/native/quantized/cpu/qlinear.cpp @@ -267,7 +267,6 @@ at::Tensor PackedLinearWeightsQnnp::apply_impl( auto* qnnp_w_data = qnnp_weight.data_ptr(); auto wt_numel = weight_contig.numel(); for (int i = 0; i < wt_numel; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qnnp_w_data[i] = static_cast(w_data[i] + 128); } // Original bias was float, so we requantize it here. diff --git a/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp b/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp index 07a4158fd686f..d784a4461185d 100644 --- a/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp +++ b/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp @@ -264,7 +264,6 @@ at::Tensor PackedLinearWeightsQnnp::apply_dynamic_impl(at::Tensor input) { /*min=*/x_min, /*max=*/x_max, /*qmin=*/0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) /*qmax=*/255); float* weight_scales_data = w_scales.data_ptr(); if (!input_scale.has_value() || input_scale.value() != q_params.scale) { @@ -289,7 +288,6 @@ at::Tensor PackedLinearWeightsQnnp::apply_dynamic_impl(at::Tensor input) { int8_t* w_data = (int8_t*)weight_contig.data_ptr(); auto wt_numel = weight_contig.numel(); for (int i = 0; i < wt_numel; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qnnp_w_data[i] = static_cast(w_data[i] + 128); } diff --git a/aten/src/ATen/native/quantized/cpu/qrelu.cpp b/aten/src/ATen/native/quantized/cpu/qrelu.cpp index a9bba3759036f..86e6c03ba8dee 100644 --- a/aten/src/ATen/native/quantized/cpu/qrelu.cpp +++ b/aten/src/ATen/native/quantized/cpu/qrelu.cpp @@ -148,7 +148,6 @@ Tensor quantized_relu6_(Tensor& qx) { scalar_t six = at::native::quantize_val( qx.q_scale(), qx.q_zero_point(), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) /*value=*/6.0); auto six_vec = Vec(six); cpu_kernel_vec( diff --git a/aten/src/ATen/native/quantized/cpu/qsigmoid.cpp b/aten/src/ATen/native/quantized/cpu/qsigmoid.cpp index 971d0765f5242..fdbe4fb24091e 100644 --- a/aten/src/ATen/native/quantized/cpu/qsigmoid.cpp +++ b/aten/src/ATen/native/quantized/cpu/qsigmoid.cpp @@ -101,12 +101,10 @@ Tensor sigmoid_quantized_cpu(const Tensor& qx) { // - For unsigned types output zero point is set to (qmax + qmin) / 2.0 // See https://stackoverflow.com/a/34448562/3606192 for potential // optimizations - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double output_scale = 0.00390625; // 1.0 / 2^8 int64_t output_zero_point = 0; // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) if (SCALAR_TYPE == at::kQInt32) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output_scale = 2.3283064365386963e-10; // 1.0 / 2^32 } else if (SCALAR_TYPE == at::kQInt8) { output_zero_point = -128; diff --git a/aten/src/ATen/native/quantized/fake_quant_per_tensor_affine.cpp b/aten/src/ATen/native/quantized/fake_quant_per_tensor_affine.cpp index acf7f48f6c29c..c42336be9bcfc 100644 --- a/aten/src/ATen/native/quantized/fake_quant_per_tensor_affine.cpp +++ b/aten/src/ATen/native/quantized/fake_quant_per_tensor_affine.cpp @@ -109,7 +109,6 @@ int64_t _get_zero_point_from_tensor( int64_t quant_max, bool is_forward) { float zero_point_fp = zero_point[0].item(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) zero_point_fp = is_forward ? std::nearbyint(zero_point_fp) : zero_point_fp + 0.5f; float zero_point_clamped = std::min(std::max(zero_point_fp, static_cast(quant_min)), static_cast(quant_max)); diff --git a/aten/src/ATen/native/sparse/SparseTensor.cpp b/aten/src/ATen/native/sparse/SparseTensor.cpp index f452ad8abfe8e..7518ec8dbe4e1 100644 --- a/aten/src/ATen/native/sparse/SparseTensor.cpp +++ b/aten/src/ATen/native/sparse/SparseTensor.cpp @@ -637,7 +637,6 @@ void inline sparse_mask_out_cpu_kernel( auto mask_indices_accessor = mask_indices.accessor(); scalar_t* t_ptr = t.data_ptr(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::parallel_for(0, r_nnz, 1000, [&](int64_t start, int64_t end) { for (auto i = start; i < end; i++) { int64_t idx = 0; diff --git a/aten/src/ATen/native/xnnpack/RegisterOpContextClass.cpp b/aten/src/ATen/native/xnnpack/RegisterOpContextClass.cpp index e31149bfdf75f..03ac612aa12d0 100644 --- a/aten/src/ATen/native/xnnpack/RegisterOpContextClass.cpp +++ b/aten/src/ATen/native/xnnpack/RegisterOpContextClass.cpp @@ -69,7 +69,6 @@ TORCH_LIBRARY(xnnpack, m) { std::move(std::get<2>(state)), std::move(std::get<3>(state)), std::move(std::get<4>(state)), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::move(std::get<5>(state)), // NOLINTNEXTLINE(performance-move-const-arg,cppcoreguidelines-avoid-magic-numbers) std::move(std::get<6>(state)), diff --git a/aten/src/ATen/nnapi/nnapi_model_loader.cpp b/aten/src/ATen/nnapi/nnapi_model_loader.cpp index 56dc04e9026b9..3b05bdd6f67b5 100644 --- a/aten/src/ATen/nnapi/nnapi_model_loader.cpp +++ b/aten/src/ATen/nnapi/nnapi_model_loader.cpp @@ -191,7 +191,6 @@ int load_nnapi_model( CAFFE_ENFORCE(len == 12); uint32_t buffer_number = *(uint32_t*)stored_pointer; uint32_t buffer_offset = *(uint32_t*)(stored_pointer + 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) uint32_t operand_length = *(uint32_t*)(stored_pointer + 8); CAFFE_ENFORCE(buffer_number < num_buffers); CAFFE_ENFORCE(buffer_offset + operand_length >= buffer_offset); // No integer overflow diff --git a/aten/src/ATen/quantized/Quantizer.cpp b/aten/src/ATen/quantized/Quantizer.cpp index d51a164969729..a8b1b302a39cc 100644 --- a/aten/src/ATen/quantized/Quantizer.cpp +++ b/aten/src/ATen/quantized/Quantizer.cpp @@ -84,7 +84,6 @@ int64_t get_sub_byte_tensor_size(int64_t size_bytes, at::ScalarType t) { int64_t new_size_bytes; switch(t) { case at::ScalarType::QUInt4x2: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) new_size_bytes = std::ceil(size_bytes * 0.5); break; default: diff --git a/aten/src/ATen/test/Dict_test.cpp b/aten/src/ATen/test/Dict_test.cpp index d99e01a829a19..08c78fbd5236c 100644 --- a/aten/src/ATen/test/Dict_test.cpp +++ b/aten/src/ATen/test/Dict_test.cpp @@ -113,7 +113,6 @@ TEST(DictTest, givenEmptyDict_whenIterating_thenBeginIsEnd) { TEST(DictTest, givenMutableDict_whenIterating_thenFindsElements) { Dict dict; dict.insert(3, "3"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict.insert(5, "5"); bool found_first = false; bool found_second = false; @@ -122,7 +121,6 @@ TEST(DictTest, givenMutableDict_whenIterating_thenFindsElements) { EXPECT_EQ("3", iter->value()); EXPECT_FALSE(found_first); found_first = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (iter->key() == 5) { EXPECT_EQ("5", iter->value()); EXPECT_FALSE(found_second); @@ -139,7 +137,6 @@ TEST(DictTest, givenMutableDict_whenIterating_thenFindsElements) { TEST(DictTest, givenMutableDict_whenIteratingWithForeach_thenFindsElements) { Dict dict; dict.insert(3, "3"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict.insert(5, "5"); bool found_first = false; bool found_second = false; @@ -148,7 +145,6 @@ TEST(DictTest, givenMutableDict_whenIteratingWithForeach_thenFindsElements) { EXPECT_EQ("3", elem.value()); EXPECT_FALSE(found_first); found_first = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (elem.key() == 5) { EXPECT_EQ("5", elem.value()); EXPECT_FALSE(found_second); @@ -165,7 +161,6 @@ TEST(DictTest, givenMutableDict_whenIteratingWithForeach_thenFindsElements) { TEST(DictTest, givenConstDict_whenIterating_thenFindsElements) { Dict dict_; dict_.insert(3, "3"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict_.insert(5, "5"); const Dict& dict = dict_; bool found_first = false; @@ -175,7 +170,6 @@ TEST(DictTest, givenConstDict_whenIterating_thenFindsElements) { EXPECT_EQ("3", iter->value()); EXPECT_FALSE(found_first); found_first = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (iter->key() == 5) { EXPECT_EQ("5", iter->value()); EXPECT_FALSE(found_second); @@ -192,7 +186,6 @@ TEST(DictTest, givenConstDict_whenIterating_thenFindsElements) { TEST(DictTest, givenConstDict_whenIteratingWithForeach_thenFindsElements) { Dict dict_; dict_.insert(3, "3"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict_.insert(5, "5"); const Dict& dict = dict_; bool found_first = false; @@ -202,7 +195,6 @@ TEST(DictTest, givenConstDict_whenIteratingWithForeach_thenFindsElements) { EXPECT_EQ("3", elem.value()); EXPECT_FALSE(found_first); found_first = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (elem.key() == 5) { EXPECT_EQ("5", elem.value()); EXPECT_FALSE(found_second); @@ -281,7 +273,6 @@ TEST(DictTest, givenMutableDict_whenCallingFindOnNonExistingKey_thenReturnsEnd) Dict dict; dict.insert(3, "3"); dict.insert(4, "4"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dict::iterator found = dict.find(5); EXPECT_EQ(dict.end(), found); } @@ -303,7 +294,6 @@ TEST(DictTest, givenConstDict_whenCallingFindOnNonExistingKey_thenReturnsEnd) { dict_.insert(3, "3"); dict_.insert(4, "4"); const Dict& dict = dict_; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dict::iterator found = dict.find(5); EXPECT_EQ(dict.end(), found); } @@ -327,7 +317,6 @@ TEST(DictTest, whenCallingContainsWithNonExistingKey_thenReturnsFalse) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DictTest, whenCallingReserve_thenDoesntCrash) { Dict dict; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict.reserve(100); } @@ -503,13 +492,11 @@ TEST(ListTest_IValueBasedList, givenIterator_whenWritingToValueFromIterator_then Dict dict; dict.insert(3, "3"); dict.insert(4, "4"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict.insert(5, "5"); (*dict.find(3)).setValue(dict.find(4)->value()); EXPECT_EQ("4", dict.find(3)->value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict.find(3)->setValue(dict.find(5)->value()); EXPECT_EQ("5", dict.find(3)->value()); } @@ -556,7 +543,6 @@ TEST(DictTest, dictTensorAsKey) { EXPECT_EQ("three", found_key1->value()); Dict::iterator found_nokey1 = dict.find(at::tensor(3)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dict::iterator found_nokey2 = dict.find(at::tensor(5)); EXPECT_EQ(dict.end(), found_nokey1); EXPECT_EQ(dict.end(), found_nokey2); diff --git a/aten/src/ATen/test/NamedTensor_test.cpp b/aten/src/ATen/test/NamedTensor_test.cpp index 21e411bd0c7bd..32479120fd622 100644 --- a/aten/src/ATen/test/NamedTensor_test.cpp +++ b/aten/src/ATen/test/NamedTensor_test.cpp @@ -20,15 +20,12 @@ static Dimname dimnameFromString(const std::string& str) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NamedTensorTest, isNamed) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = at::zeros({3, 2, 5, 7}); ASSERT_FALSE(tensor.has_names()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::zeros({3, 2, 5, 7}); ASSERT_FALSE(tensor.has_names()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::zeros({3, 2, 5, 7}); auto N = dimnameFromString("N"); auto C = dimnameFromString("C"); @@ -55,7 +52,6 @@ static bool dimnames_equal(at::DimnameList names, at::DimnameList other) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NamedTensorTest, attachMetadata) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = at::zeros({3, 2, 5, 7}); auto N = dimnameFromString("N"); auto C = dimnameFromString("C"); @@ -75,7 +71,6 @@ TEST(NamedTensorTest, attachMetadata) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NamedTensorTest, internalSetNamesInplace) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = at::zeros({3, 2, 5, 7}); auto N = dimnameFromString("N"); auto C = dimnameFromString("C"); diff --git a/aten/src/ATen/test/apply_utils_test.cpp b/aten/src/ATen/test/apply_utils_test.cpp index d895cc3bf1b88..0b748f5d98953 100644 --- a/aten/src/ATen/test/apply_utils_test.cpp +++ b/aten/src/ATen/test/apply_utils_test.cpp @@ -109,7 +109,6 @@ void test(DeprecatedTypeProperties& type, IntArrayRef shape, int64_t a = 0, int6 // apply utils test 2-dim small contiguous // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ApplyUtilsTest, Contiguous2D) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); test(CPU(kDouble), {2, 1}, -1, -1); } @@ -117,7 +116,6 @@ TEST(ApplyUtilsTest, Contiguous2D) { // apply utils test 2-dim small // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ApplyUtilsTest, Small2D) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); test(CPU(kDouble), {2, 1}); } @@ -125,16 +123,13 @@ TEST(ApplyUtilsTest, Small2D) { // apply utils test 2-dim // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ApplyUtilsTest, _2D) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) test(CPU(kDouble), {20, 10}); } // apply utils test 3-dim // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ApplyUtilsTest, _3D) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); test(CPU(kDouble), {3, 4, 2}); } @@ -142,17 +137,13 @@ TEST(ApplyUtilsTest, _3D) { // apply utils test 3-dim medium // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ApplyUtilsTest, Medium3D) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) test(CPU(kDouble), {3, 40, 2}); } // apply utils test 10-dim // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(ApplyUtilsTest, _10D) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) test(CPU(kDouble), {3, 4, 2, 5, 2, 1, 3, 4, 2, 3}); } diff --git a/aten/src/ATen/test/atest.cpp b/aten/src/ATen/test/atest.cpp index c4194680cc6ad..34aff6b6f86ad 100644 --- a/aten/src/ATen/test/atest.cpp +++ b/aten/src/ATen/test/atest.cpp @@ -9,15 +9,11 @@ using namespace at; class atest : public ::testing::Test { protected: void SetUp() override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x_tensor = tensor({10, -1, 0, 1, -10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) y_tensor = tensor({-10, 1, 0, -1, 10}); x_logical = tensor({1, 1, 0, 1, 0}); y_logical = tensor({0, 1, 0, 1, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x_float = tensor({2.0, 2.4, 5.6, 7.0, 36.0}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) y_float = tensor({1.0, 1.1, 8.7, 10.0, 24.0}); } @@ -53,7 +49,6 @@ void unit_binary_ops_test( const Tensor& exp, ScalarType dtype, Args... args) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto out_tensor = empty({5}, dtype); func(out_tensor, x_tensor.to(dtype), y_tensor.to(dtype), args...); ASSERT_EQ(out_tensor.dtype(), dtype); @@ -101,7 +96,6 @@ void run_binary_ops_test( } void trace() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor foo = rand({12, 12}); // ASSERT foo is 2-dimensional and holds floats. @@ -117,9 +111,7 @@ void trace() { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(atest, operators) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int a = 0b10101011; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int b = 0b01111011; auto a_tensor = tensor({a}); @@ -202,14 +194,12 @@ TEST_F(atest, ne_operators) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(atest, add_operators) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto exp_tensor = tensor({-10, 1, 0, -1, 10}); run_binary_ops_test(add_out, x_tensor, y_tensor, exp_tensor, INTBOOL, 2); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(atest, max_operators) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto exp_tensor = tensor({10, 1, 0, 1, 10}); run_binary_ops_test< at::Tensor& (*)(at::Tensor&, const at::Tensor&, const at::Tensor&)>( @@ -218,7 +208,6 @@ TEST_F(atest, max_operators) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(atest, min_operators) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto exp_tensor = tensor({-10, -1, 0, -1, -10}); run_binary_ops_test< at::Tensor& (*)(at::Tensor&, const at::Tensor&, const at::Tensor&)>( @@ -227,7 +216,6 @@ TEST_F(atest, min_operators) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(atest, sigmoid_backward_operator) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto exp_tensor = tensor({-1100, 0, 0, -2, 900}); // only test with type Float run_binary_ops_test< @@ -237,7 +225,6 @@ TEST_F(atest, sigmoid_backward_operator) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(atest, fmod_tensor_operators) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto exp_tensor = tensor({0.0, 0.2, 5.6, 7.0, 12.0}); run_binary_ops_test< at::Tensor& (*)(at::Tensor&, const at::Tensor&, const at::Tensor&)>( @@ -247,10 +234,8 @@ TEST_F(atest, fmod_tensor_operators) { // TEST_CASE( "atest", "[]" ) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(atest, atest) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto foo = rand({12, 6}); ASSERT_EQ(foo.size(0), 12); diff --git a/aten/src/ATen/test/basic.cpp b/aten/src/ATen/test/basic.cpp index e0d3c3a9cdbd2..172252edca3fe 100644 --- a/aten/src/ATen/test/basic.cpp +++ b/aten/src/ATen/test/basic.cpp @@ -26,7 +26,6 @@ void TestResize(DeprecatedTypeProperties& type) { auto a = at::empty({0}, type.options()); a.resize_({3, 4}); ASSERT_EQ_RESOLVED(a.numel(), 12); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a.resize_({5, 7}); ASSERT_EQ_RESOLVED(a.numel(), 35); } @@ -56,7 +55,6 @@ void TestSort(DeprecatedTypeProperties& type) { void TestRandperm(DeprecatedTypeProperties& type) { if (type.backend() != Backend::CUDA) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor b = randperm(15, type); Tensor rv, ri; std::tie(rv, ri) = sort(b, 0); @@ -75,7 +73,6 @@ void TestAdd(DeprecatedTypeProperties& type) { Tensor b = rand({3, 4}, type); Tensor c = add(a, add(a, b)); // TODO:0-dim Tensor d(3.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Scalar d = 3.f; if (type.backend() == Backend::CPU && type.scalarType() == kHalf) { ASSERT_TRUE(add(c, d).allclose(a + a + b + d, 1e-2)); @@ -86,11 +83,8 @@ void TestAdd(DeprecatedTypeProperties& type) { void TestZeros(DeprecatedTypeProperties& type) { auto begin = std::chrono::high_resolution_clock::now(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor a = zeros({1024, 1024}, type); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 1; i < 1000; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a = zeros({128, 128}, type); } auto end = std::chrono::high_resolution_clock::now(); @@ -108,7 +102,6 @@ void TestLoadsOfAdds(DeprecatedTypeProperties& type) { auto begin = std::chrono::high_resolution_clock::now(); Tensor d = ones({3, 4}, type); Tensor r = zeros({3, 4}, type); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 100000; i++) { add_out(r, r, d); } @@ -126,7 +119,6 @@ void TestLoadOfAddsWithCopy(DeprecatedTypeProperties& type) { auto begin = std::chrono::high_resolution_clock::now(); Tensor d = ones({3, 4}, type); Tensor r = zeros({3, 4}, type); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 100000; i++) { r = add(r, d); } @@ -148,7 +140,6 @@ void TestIsContiguous(DeprecatedTypeProperties& type) { } void TestPermute(DeprecatedTypeProperties& type) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor a = rand({3, 4, 5}, type); Tensor b = a.permute({1, 2, 0}); ASSERT_TRUE(b.sizes().equals({4, 5, 3})); @@ -212,7 +203,6 @@ void TestAddingAValueWithScalar(DeprecatedTypeProperties& type) { } void TestSelect(DeprecatedTypeProperties& type) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor a = rand({3, 7}, type); auto a_13 = select(a, 1, 3); auto a_13_02 = select(select(a, 1, 3), 0, 2); @@ -239,7 +229,6 @@ void TestZeroDim(DeprecatedTypeProperties& type) { void TestToCFloat() { Tensor a = zeros({3, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor b = ones({3, 7}); Tensor c = cat({a, b}, 1); ASSERT_EQ_RESOLVED(c.size(1), 11); @@ -248,7 +237,6 @@ void TestToCFloat() { ASSERT_EQ_RESOLVED(*e.data_ptr(), e.sum().item()); } void TestToString() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor b = ones({3, 7}) * .0000001f; std::stringstream s; s << b << "\n"; @@ -257,7 +245,6 @@ void TestToString() { } void TestIndexingByScalar() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor tensor = arange(0, 10, kInt); Tensor one = ones({}, kInt); for (int64_t i = 0; i < tensor.numel(); ++i) { @@ -283,7 +270,6 @@ void TestIndexingByScalar() { } void TestIndexingByZerodimTensor() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor tensor = arange(0, 10, kInt); Tensor one = ones({}, kInt); for (int i = 0; i < tensor.numel(); ++i) { @@ -301,17 +287,13 @@ void TestIndexingByZerodimTensor() { ASSERT_ANY_THROW(tensor[ones({2, 3, 4}, kInt)].equal(one)); } void TestIndexingMixedDevice(DeprecatedTypeProperties& type) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor tensor = randn({20, 20}, type); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor index = arange(10, kLong).cpu(); Tensor result = tensor.index({index}); ASSERT_TRUE(result[0].equal(tensor[0])); } void TestDispatch() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor tensor = randn({20, 20}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor other = randn({20, 20}); auto result = tensor.m(relu).m(mse_loss, other, at::Reduction::Mean); ASSERT_TRUE(result.allclose(mse_loss(relu(tensor), other))); @@ -322,7 +304,6 @@ void TestNegativeDim(DeprecatedTypeProperties& type) { ASSERT_ANY_THROW(empty({5, -5, 5}, type.options())); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) ASSERT_ANY_THROW(empty({5, -5, -5}, type.options())); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor tensor = empty({5, 5}, type.options()); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) ASSERT_ANY_THROW(tensor.reshape({-5, -5})); @@ -334,7 +315,6 @@ void TestView(DeprecatedTypeProperties& type) { // for details Tensor tensor = randn({3, 4}, type);; Tensor viewed = tensor.view({3, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor.resize_({6, 2}); ASSERT_TRUE(tensor.sizes().equals({6, 2})); ASSERT_TRUE(viewed.sizes().equals({3, 4})); @@ -382,7 +362,6 @@ void test(DeprecatedTypeProperties& type) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BasicTest, BasicTestCPU) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); test(CPU(kFloat)); @@ -390,7 +369,6 @@ TEST(BasicTest, BasicTestCPU) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BasicTest, BasicTestHalfCPU) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(234); test(CPU(kHalf)); @@ -398,7 +376,6 @@ TEST(BasicTest, BasicTestHalfCPU) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BasicTest, BasicTestCUDA) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); if (at::hasCUDA()) { diff --git a/aten/src/ATen/test/broadcast_test.cpp b/aten/src/ATen/test/broadcast_test.cpp index 50942b27f7285..6f565d20dc7d4 100644 --- a/aten/src/ATen/test/broadcast_test.cpp +++ b/aten/src/ATen/test/broadcast_test.cpp @@ -15,9 +15,7 @@ void TestEmptyTensor(DeprecatedTypeProperties& T) { // out-place function with 2 args void TestOut2Basic(DeprecatedTypeProperties& T) { auto a = randn({3, 1}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = randn({5}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector expanded_sizes = {3, 5}; ASSERT_TRUE( (a + b).equal(a.expand(expanded_sizes) + b.expand(expanded_sizes))); @@ -26,7 +24,6 @@ void TestOut2Basic(DeprecatedTypeProperties& T) { // with scalar void TestOut2WithScalar(DeprecatedTypeProperties& T) { auto aScalar = ones({}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = randn({3, 5}, T); ASSERT_TRUE( (aScalar + b).equal(aScalar.expand(b.sizes()) + b.expand(b.sizes()))); @@ -34,9 +31,7 @@ void TestOut2WithScalar(DeprecatedTypeProperties& T) { // old fallback behavior yields error void TestOut2OldFallback(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({3, 5}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = randn({5, 3}, T); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) ASSERT_ANY_THROW(a + b); @@ -44,9 +39,7 @@ void TestOut2OldFallback(DeprecatedTypeProperties& T) { // with mismatched sizes void TestOut2MismatchedSizes(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({3, 5}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = randn({7, 5}, T); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) ASSERT_ANY_THROW(a + b); @@ -56,9 +49,7 @@ void TestOut2MismatchedSizes(DeprecatedTypeProperties& T) { void TestOut3Basic(DeprecatedTypeProperties& T) { auto a = randn({3, 1, 1}, T); auto b = randn({1, 2, 1}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = randn({1, 1, 5}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector expanded_sizes = {3, 2, 5}; ASSERT_TRUE((a + b + c).equal( a.expand(expanded_sizes) + b.expand(expanded_sizes) + @@ -69,9 +60,7 @@ void TestOut3Basic(DeprecatedTypeProperties& T) { void TestOut3WithScalar(DeprecatedTypeProperties& T) { auto aTensorScalar = ones({}, T); auto b = randn({3, 2, 1}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = randn({1, 2, 5}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector expanded_sizes = {3, 2, 5}; ASSERT_TRUE(aTensorScalar.addcmul(b, c).equal( aTensorScalar.expand(expanded_sizes) @@ -80,11 +69,8 @@ void TestOut3WithScalar(DeprecatedTypeProperties& T) { // old fallback behavior yields error void TestOut3OldFallback(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({3, 2, 5}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = randn({2, 3, 5}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = randn({5, 3, 2}, T); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) ASSERT_ANY_THROW(a.addcmul(b, c)); @@ -92,11 +78,8 @@ void TestOut3OldFallback(DeprecatedTypeProperties& T) { // with mismatched sizes void TestOut3MismatchedSizes(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({3, 2, 5}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = randn({2, 3, 5}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = randn({5, 5, 5}, T); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) ASSERT_ANY_THROW(a.addcmul(b, c)); @@ -104,7 +87,6 @@ void TestOut3MismatchedSizes(DeprecatedTypeProperties& T) { // in-place function with 2 args void TestIn2Basic(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({3, 5}, T); auto b = randn({3, 1}, T); ASSERT_TRUE((a + b).equal(a + b.expand({3, 5}))); @@ -112,7 +94,6 @@ void TestIn2Basic(DeprecatedTypeProperties& T) { // with scalar void TestIn2WithScalar(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({3, 5}, T); auto bScalar = ones({}, T); ASSERT_TRUE((a + bScalar).equal(a + bScalar.expand(a.sizes()))); @@ -120,7 +101,6 @@ void TestIn2WithScalar(DeprecatedTypeProperties& T) { // error: would have to expand inplace arg void TestIn2ExpandError(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({1, 5}, T); auto b = randn({3, 1}, T); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) @@ -129,10 +109,8 @@ void TestIn2ExpandError(DeprecatedTypeProperties& T) { // in-place function with 3 args void TestIn3Basic(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({3, 5, 2}, T); auto b = randn({3, 1, 2}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = randn({1, 5, 1}, T); auto aClone = a.clone(); ASSERT_TRUE(a.addcmul_(b, c).equal( @@ -141,10 +119,8 @@ void TestIn3Basic(DeprecatedTypeProperties& T) { // with scalar void TestIn3WithScalar(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({3, 5, 2}, T); auto b = randn({3, 1, 2}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = randn({1, 5, 1}, T); auto aClone = a.clone(); auto bScalar = ones({}, T); @@ -155,7 +131,6 @@ void TestIn3WithScalar(DeprecatedTypeProperties& T) { // error: would have to expand inplace arg void TestIn3ExpandError(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({1, 3, 5}, T); auto b = randn({4, 1, 1}, T); auto c = randn({1, 3, 1}, T); @@ -166,9 +141,7 @@ void TestIn3ExpandError(DeprecatedTypeProperties& T) { // explicit dim specification void TestExplicitDimBasic(DeprecatedTypeProperties& T) { auto a = randn({1}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = randn({5, 3}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = randn({3, 7}, T); ASSERT_TRUE(a.addmm(b, c).equal(a.expand({5, 7}).addmm(b, c))); } @@ -176,9 +149,7 @@ void TestExplicitDimBasic(DeprecatedTypeProperties& T) { // with scalar void TestExplicitDimWithScalar(DeprecatedTypeProperties& T) { auto a = randn({1}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = randn({5, 3}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = randn({3, 7}, T); Tensor aScalar = ones({}, T); ASSERT_TRUE(aScalar.addmm(b, c).equal(aScalar.expand({5, 7}).addmm(b, c))); @@ -186,9 +157,7 @@ void TestExplicitDimWithScalar(DeprecatedTypeProperties& T) { // with mismatched sizes void TestExplicitDimWithMismatchedSizes(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = randn({5, 3}, T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = randn({3, 7}, T); auto a = randn({3, 3}, T); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) @@ -197,7 +166,6 @@ void TestExplicitDimWithMismatchedSizes(DeprecatedTypeProperties& T) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BroadcastTest, Broadcast) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); DeprecatedTypeProperties& T = CPU(kFloat); diff --git a/aten/src/ATen/test/cpu_caching_allocator_test.cpp b/aten/src/ATen/test/cpu_caching_allocator_test.cpp index ab3c6036bfe63..d6a02cafffb7b 100644 --- a/aten/src/ATen/test/cpu_caching_allocator_test.cpp +++ b/aten/src/ATen/test/cpu_caching_allocator_test.cpp @@ -10,11 +10,9 @@ TEST(CPUCachingAllocatorTest, check_alloc_free) { c10::CPUCachingAllocator caching_allocator; c10::WithCPUCachingAllocatorGuard cachine_allocator_guard( &caching_allocator); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor a = at::rand({23, 23}); float* data_ptr = a.data_ptr(); a.reset(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a = at::rand({23, 23}); ASSERT_TRUE(data_ptr == a.data_ptr()); } @@ -23,7 +21,6 @@ TEST(CPUCachingAllocatorTest, check_alloc_free) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(CPUCachingAllocatorTest, check_alloc_outside_free_inside) { c10::CPUCachingAllocator caching_allocator; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor a = at::rand({23, 23}); { c10::WithCPUCachingAllocatorGuard cachine_allocator_guard( @@ -31,7 +28,6 @@ TEST(CPUCachingAllocatorTest, check_alloc_outside_free_inside) { // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) float* data_ptr = a.data_ptr(); a.reset(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a = at::rand({23, 23}); } } @@ -43,7 +39,6 @@ TEST(CPUCachingAllocatorTest, check_alloc_inside_free_outside) { { c10::WithCPUCachingAllocatorGuard cachine_allocator_guard( &caching_allocator); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a = at::rand({23, 23}); } a.reset(); diff --git a/aten/src/ATen/test/cpu_generator_test.cpp b/aten/src/ATen/test/cpu_generator_test.cpp index c8ed53b9be569..eaf4c6142ad92 100644 --- a/aten/src/ATen/test/cpu_generator_test.cpp +++ b/aten/src/ATen/test/cpu_generator_test.cpp @@ -93,7 +93,6 @@ TEST(CPUGeneratorImpl, TestGetSetCurrentSeed) { // See Note [Acquire lock when using random generators] auto foo = at::detail::getDefaultCPUGenerator(); std::lock_guard lock(foo.mutex()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) foo.set_current_seed(123); auto current_seed = foo.current_seed(); ASSERT_EQ(current_seed, 123); @@ -134,12 +133,9 @@ TEST(CPUGeneratorImpl, TestRNGForking) { std::lock_guard lock(default_gen.mutex()); current_gen = default_gen.clone(); // capture the current state of default generator } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target_value = at::randn({1000}); // Dramatically alter the internal state of the main generator - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = at::randn({100000}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forked_value = at::randn({1000}, current_gen); ASSERT_EQ(target_value.sum().item(), forked_value.sum().item()); } @@ -168,14 +164,11 @@ TEST(CPUGeneratorImpl, TestPhiloxEngineOffset1) { // make another engine increment to until the // first 8 values. Assert that the first call // of engine2 and the 9th call of engine1 are equal. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Philox4_32_10 engine1(123, 1, 0); // Note: offset is a multiple of 4. // So if you want to skip 8 values, offset would // be 2, since 2*4=8. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Philox4_32_10 engine2(123, 1, 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for(int i = 0; i < 8; i++){ // Note: instead of using the engine() call 8 times // we could have achieved the same functionality by @@ -194,9 +187,7 @@ TEST(CPUGeneratorImpl, TestPhiloxEngineOffset2) { // make engine2 skip to the 2^64th 128 bit while being at 2^64th thread // Assert that engine2 should be increment_val+1 steps behind engine1. unsigned long long increment_val = std::numeric_limits::max(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Philox4_32_10 engine1(123, 0, increment_val); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Philox4_32_10 engine2(123, increment_val, increment_val); engine2.incr_n(increment_val); @@ -213,9 +204,7 @@ TEST(CPUGeneratorImpl, TestPhiloxEngineOffset3) { // start engine2 at thread 1, with offset 0 // Assert that engine1 is 1 step behind engine2. unsigned long long increment_val = std::numeric_limits::max(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Philox4_32_10 engine1(123, 0, increment_val); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Philox4_32_10 engine2(123, 1, 0); engine1.incr(); ASSERT_EQ(engine1(), engine2()); @@ -227,9 +216,7 @@ TEST(CPUGeneratorImpl, TestPhiloxEngineIndex) { // Tests if thread indexing is working properly. // create two engines with different thread index but same offset. // Assert that the engines have different sequences. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Philox4_32_10 engine1(123456, 0, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Philox4_32_10 engine2(123456, 1, 4); ASSERT_NE(engine1(), engine2()); } @@ -247,17 +234,13 @@ TEST(CPUGeneratorImpl, TestMT19937EngineReproducibility) { // test with zero seed at::mt19937 engine1(0); std::mt19937 engine2(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for(int i = 0; i < 10000; i++) { ASSERT_EQ(engine1(), engine2()); } // test with large seed - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) engine1 = at::mt19937(2147483647); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) engine2 = std::mt19937(2147483647); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for(int i = 0; i < 10000; i++) { ASSERT_EQ(engine1(), engine2()); } @@ -267,7 +250,6 @@ TEST(CPUGeneratorImpl, TestMT19937EngineReproducibility) { auto seed = rd(); engine1 = at::mt19937(seed); engine2 = std::mt19937(seed); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for(int i = 0; i < 10000; i++) { ASSERT_EQ(engine1(), engine2()); } diff --git a/aten/src/ATen/test/cpu_profiling_allocator_test.cpp b/aten/src/ATen/test/cpu_profiling_allocator_test.cpp index 60ba94f642d90..d053bd9751a8d 100644 --- a/aten/src/ATen/test/cpu_profiling_allocator_test.cpp +++ b/aten/src/ATen/test/cpu_profiling_allocator_test.cpp @@ -34,14 +34,11 @@ at::Tensor run_with_control_flow( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(CPUAllocationPlanTest, with_control_flow) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor a = at::rand({23, 16, 16, 16}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor conv_weight = at::rand({16, 16, 3, 3}); // output shape // 23, 16, 14, 14 // Flattened shape = 23, 3136 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor linear_weight = at::rand({32, 3136}); at::Tensor output, ref_output; std::vector pointers; @@ -66,7 +63,6 @@ TEST(CPUAllocationPlanTest, with_control_flow) { run_with_control_flow(a, conv_weight, linear_weight, record_mode, pointers); } bool success{true}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (uint64_t i = 0; i < 10; ++i) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) bool validation_success; @@ -88,14 +84,11 @@ TEST(CPUAllocationPlanTest, with_control_flow) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(CPUAllocationPlanTest, with_profiling_alloc) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor a = at::rand({23, 16, 16, 16}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor conv_weight = at::rand({16, 16, 3, 3}); // output shape // 23, 16, 14, 14 // Flattened shape = 23, 3136 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor linear_weight = at::rand({32, 3136}); at::Tensor output, ref_output; std::vector pointers; @@ -141,7 +134,6 @@ TEST(CPUAllocationPlanTest, with_profiling_alloc) { validate_pointers, false); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (uint64_t i = 0; i < 10; ++i) { { c10::WithProfilingAllocatorGuard @@ -184,14 +176,12 @@ TEST(CPUAllocationPlanTest, with_profiling_alloc) { int main(int argc, char* argv[]) { // Setting the priority high to make sure no other allocator gets used instead of this. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::SetCPUAllocator(c10::GetDefaultMobileCPUAllocator(), /*priority*/ 100); // Need to disable mkldnn for this test since it allocatred memory // via raw_allocate inteface which requires context pointer and raw // pointer to be the same. Tis is not true for mobile allocator. at::globalContext().setUserEnabledMkldnn(false); ::testing::InitGoogleTest(&argc, argv); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::manual_seed(42); return RUN_ALL_TESTS(); } diff --git a/aten/src/ATen/test/cpu_rng_test.cpp b/aten/src/ATen/test/cpu_rng_test.cpp index f42a3cd89e26e..156fabb8f3829 100644 --- a/aten/src/ATen/test/cpu_rng_test.cpp +++ b/aten/src/ATen/test/cpu_rng_test.cpp @@ -208,7 +208,6 @@ TEST_F(RNGTest, Normal) { const auto std = 67.89; auto gen = at::make_generator(MAGIC_NUMBER); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto actual = torch::empty({10}); actual.normal_(mean, std, gen); @@ -224,9 +223,7 @@ TEST_F(RNGTest, Normal_float_Tensor_out) { const auto std = 67.89; auto gen = at::make_generator(MAGIC_NUMBER); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto actual = torch::empty({10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::normal_out(actual, mean, torch::full({10}, std), gen); auto expected = torch::empty_like(actual); @@ -241,9 +238,7 @@ TEST_F(RNGTest, Normal_Tensor_float_out) { const auto std = 67.89; auto gen = at::make_generator(MAGIC_NUMBER); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto actual = torch::empty({10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::normal_out(actual, torch::full({10}, mean), std, gen); auto expected = torch::empty_like(actual); @@ -258,9 +253,7 @@ TEST_F(RNGTest, Normal_Tensor_Tensor_out) { const auto std = 67.89; auto gen = at::make_generator(MAGIC_NUMBER); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto actual = torch::empty({10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::normal_out(actual, torch::full({10}, mean), torch::full({10}, std), gen); auto expected = torch::empty_like(actual); @@ -275,7 +268,6 @@ TEST_F(RNGTest, Normal_float_Tensor) { const auto std = 67.89; auto gen = at::make_generator(MAGIC_NUMBER); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto actual = at::normal(mean, torch::full({10}, std), gen); auto expected = torch::empty_like(actual); @@ -290,7 +282,6 @@ TEST_F(RNGTest, Normal_Tensor_float) { const auto std = 67.89; auto gen = at::make_generator(MAGIC_NUMBER); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto actual = at::normal(torch::full({10}, mean), std, gen); auto expected = torch::empty_like(actual); @@ -305,7 +296,6 @@ TEST_F(RNGTest, Normal_Tensor_Tensor) { const auto std = 67.89; auto gen = at::make_generator(MAGIC_NUMBER); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto actual = at::normal(torch::full({10}, mean), torch::full({10}, std), gen); auto expected = torch::empty_like(actual); @@ -358,7 +348,6 @@ TEST_F(RNGTest, LogNormal) { const auto std = 6.789; auto gen = at::make_generator(MAGIC_NUMBER); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto actual = torch::empty({10}); actual.log_normal_(mean, std, gen); diff --git a/aten/src/ATen/test/dlconvertor_test.cpp b/aten/src/ATen/test/dlconvertor_test.cpp index bbac7446bdd3d..91d3e9d4b5061 100644 --- a/aten/src/ATen/test/dlconvertor_test.cpp +++ b/aten/src/ATen/test/dlconvertor_test.cpp @@ -11,7 +11,6 @@ using namespace at; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestDlconvertor, TestDlconvertor) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); Tensor a = rand({3, 4}); @@ -24,7 +23,6 @@ TEST(TestDlconvertor, TestDlconvertor) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestDlconvertor, TestDlconvertorNoStrides) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); Tensor a = rand({3, 4}); diff --git a/aten/src/ATen/test/extension_backend_test.cpp b/aten/src/ATen/test/extension_backend_test.cpp index 2472840903044..f904b80d3b4e5 100644 --- a/aten/src/ATen/test/extension_backend_test.cpp +++ b/aten/src/ATen/test/extension_backend_test.cpp @@ -50,7 +50,6 @@ TORCH_LIBRARY_IMPL(aten, MSNPU, m) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BackendExtensionTest, TestRegisterOp) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor a = empty({5, 5}, at::kMSNPU); ASSERT_EQ(a.device().type(), at::kMSNPU); ASSERT_EQ(a.device().index(), 1); @@ -66,7 +65,6 @@ TEST(BackendExtensionTest, TestRegisterOp) { ASSERT_EQ(test_int, 2); // Ensure that non-MSNPU operator still works - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor d = empty({5, 5}, at::kCPU); ASSERT_EQ(d.device().type(), at::kCPU); } diff --git a/aten/src/ATen/test/half_test.cpp b/aten/src/ATen/test/half_test.cpp index 4c02d170f52c1..e875ff7b6016e 100644 --- a/aten/src/ATen/test/half_test.cpp +++ b/aten/src/ATen/test/half_test.cpp @@ -42,7 +42,6 @@ TEST(TestHalf, Comparisions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestHalf, Cast) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Half value = 1.5f; ASSERT_EQ((int)value, 1); ASSERT_EQ((short)value, 1); @@ -126,7 +125,6 @@ ASSERT_SAME_TYPE(tinyness_before); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestHalf, CommonMath) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float threshold = 0.00001; assert(std::abs(std::lgamma(Half(10.0)) - std::lgamma(10.0f)) <= threshold); assert(std::abs(std::exp(Half(1.0)) - std::exp(1.0f)) <= threshold); diff --git a/aten/src/ATen/test/ivalue_test.cpp b/aten/src/ATen/test/ivalue_test.cpp index 3ea5ae0484604..07ac45bf07644 100644 --- a/aten/src/ATen/test/ivalue_test.cpp +++ b/aten/src/ATen/test/ivalue_test.cpp @@ -16,7 +16,6 @@ namespace c10 { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(IValueTest, Basic) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::List foo({3, 4, 5}); ASSERT_EQ(foo.use_count(), 1); IValue bar{foo}; @@ -28,7 +27,6 @@ TEST(IValueTest, Basic) { ASSERT_TRUE(foo2.isIntList()); // NOLINTNEXTLINE(bugprone-use-after-move,clang-analyzer-cplusplus.Move) ASSERT_TRUE(bar.isNone()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) foo2 = IValue(4.0); ASSERT_TRUE(foo2.isDouble()); ASSERT_EQ(foo2.toDouble(), 4.0); @@ -42,18 +40,15 @@ TEST(IValueTest, Basic) { IValue i(4); ASSERT_TRUE(i.isInt()); ASSERT_EQ(i.toInt(), 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) IValue dlist(c10::List({3.5})); ASSERT_TRUE(dlist.isDoubleList()); ASSERT_TRUE(dlist.toDoubleVector() == std::vector({3.5})); std::move(dlist).toDoubleList(); // NOLINTNEXTLINE(bugprone-use-after-move) ASSERT_TRUE(dlist.isNone()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dlist = IValue(c10::List({3.4})); ASSERT_TRUE(dlist.toDoubleVector() == std::vector({3.4})); IValue the_list( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::ivalue::Tuple::create({IValue(3.4), IValue(4), IValue(foo)})); ASSERT_EQ(foo.use_count(), 3); ASSERT_TRUE(the_list.isTuple()); @@ -70,7 +65,6 @@ TEST(IValueTest, Basic) { auto elem1 = c10::complex(3, 4); auto elem2 = c10::complex(3, -4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto elem3 = c10::complex(5, 0); c10::List> foo1({elem1, elem2, elem3}); ASSERT_EQ(foo1.use_count(), 1); @@ -91,7 +85,6 @@ TEST(IValueTest, Basic) { ASSERT_TRUE(baz1.toComplexDoubleVector() == std::vector>({elem1, elem2, elem3})); IValue complex_tuple( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::ivalue::Tuple::create({IValue(c10::complex(3.4, 4.7)), IValue(foo1)})); ASSERT_TRUE(complex_tuple.isTuple()); ASSERT_EQ(complex_tuple.toTuple()->elements()[0].toComplexDouble(), c10::complex(3.4, 4.7)); @@ -102,9 +95,7 @@ TEST(IValueTest, Basic) { TEST(IValueTest, ComplexDict) { typedef c10::complex c_type; c10::Dict m; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto num1 = c_type(2.3, -3.5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto num2 = c_type(0, 5); m.insert(num1, 2 * num1); m.insert(num2, 2 * num2); @@ -113,15 +104,11 @@ TEST(IValueTest, ComplexDict) { ASSERT_EQ(m_.at(num1), 2 * num1); ASSERT_EQ(m_.at(num2), 2 * num2); } -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static std::array makeSampleIValues() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return { at::rand({3, 4}), "hello", 42, true, 1.5 }; } -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static std::array makeMoreSampleIValues() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return { at::rand({3, 4}), "goodbye", 23, false, 0.5 }; } @@ -211,7 +198,6 @@ TEST(IValueTest, MoveAssign) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(IValueTest, Tuple) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::tuple t = std::make_tuple(123, at::randn({1})); auto iv = IValue(t); auto t_ = iv.to>(); @@ -299,7 +285,6 @@ TEST(IValueTest, BasicFuture) { auto f1 = c10::make_intrusive(IntType::get()); ASSERT_FALSE(f1->completed()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) f1->markCompleted(IValue(42)); ASSERT_TRUE(f1->completed()); ASSERT_EQ(42, f1->value().toInt()); @@ -317,7 +302,6 @@ TEST(IValueTest, FutureCallbacks) { ASSERT_EQ(f2.value().toInt(), 43); ++calledTimesA; }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) f2->markCompleted(IValue(43)); ASSERT_EQ(calledTimesA, 1); ASSERT_EQ(calledTimesB, 0); @@ -555,7 +539,6 @@ TEST(IValueTest, EnumEquality) { TEST(IValueTest, isPtrType) { IValue tensor(at::rand({3, 4})); IValue undefinedTensor((at::Tensor())); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) IValue integer(42); IValue str("hello"); @@ -638,7 +621,6 @@ TEST(IValueTest, IdentityComparisonAndHashing) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(IValueTest, getSubValues) { // Scalars have no subvalues. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) IValue integer(42), float_(1.5), complex(c10::complex(2, 3)); IValue::HashAliasedIValues subvalues; diff --git a/aten/src/ATen/test/math_kernel_test.cpp b/aten/src/ATen/test/math_kernel_test.cpp index da3f1833d0616..005c11cb0eaa8 100644 --- a/aten/src/ATen/test/math_kernel_test.cpp +++ b/aten/src/ATen/test/math_kernel_test.cpp @@ -4,7 +4,6 @@ using namespace at; -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bool allClose(const at::Tensor& t1, const at::Tensor& t2, double rtol=1e-5, double atol=1e-8) { if (!t1.is_same_size(t2)) { std::cerr << "Difference in tensor shapes: " @@ -27,7 +26,6 @@ bool allClose(const at::Tensor& t1, const at::Tensor& t2, double rtol=1e-5, doub // and rely on backward tests of each at:: function used in math kernels. // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MathKernelTest, NativeGroupNorm) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_channels = 6; int N = 2; int H = 2, W = 2; @@ -36,7 +34,6 @@ TEST(MathKernelTest, NativeGroupNorm) { const auto input = randn({N, num_channels, H, W}); const auto weight = randn({num_channels}); const auto bias = randn({num_channels}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; for (bool undef_weight: {true, false}) { for (int num_groups: {3, 6, 1}) { @@ -61,12 +58,10 @@ TEST(MathKernelTest, NativeLayerNorm) { // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) const auto input_ndim = input.dim(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; for (bool undef_weight: {true, false}) { for (int normalized_size: {2, 3}) { Tensor undef; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector normalized_shape(normalized_size, 10); const auto weight = rand(normalized_shape); const auto bias = rand(normalized_shape); @@ -117,7 +112,6 @@ TEST(MathKernelTest, SiluBackward) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MathKernelTest, NarrowCopy) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = rand({5, 8, 7}); for (int64_t dim = 0; dim < 3; ++dim) { const int64_t start = 1, length = 4; @@ -136,8 +130,6 @@ TEST(MathKernelTest, Bmm) { EXPECT_THROW(auto z = at::bmm(x, y), std::exception); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) test_bmm(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) test_bmm(1000); } diff --git a/aten/src/ATen/test/memory_format_test.cpp b/aten/src/ATen/test/memory_format_test.cpp index 7795cd3b0c39a..2b0c8f13c95e4 100644 --- a/aten/src/ATen/test/memory_format_test.cpp +++ b/aten/src/ATen/test/memory_format_test.cpp @@ -29,18 +29,15 @@ TEST(MemoryFormatTest, SetMemoryFormat) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemoryFormatTest, TransposeMemoryFormat) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor t = at::rand({2, 3, 4, 5}); EXPECT_TRUE(t.suggest_memory_format() == at::MemoryFormat::Contiguous); t.transpose_(1, 3); EXPECT_TRUE(t.suggest_memory_format() != at::MemoryFormat::ChannelsLast); t.transpose_(2, 3); EXPECT_TRUE(t.suggest_memory_format() == at::MemoryFormat::ChannelsLast); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = at::rand({2, 3, 4, 5}); t.transpose_(1, 2); EXPECT_TRUE(t.suggest_memory_format() != at::MemoryFormat::ChannelsLast); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = at::rand({2, 3, 4, 5}); t.transpose_(2, 3); EXPECT_TRUE(t.suggest_memory_format() != at::MemoryFormat::ChannelsLast); diff --git a/aten/src/ATen/test/mobile_memory_cleanup.cpp b/aten/src/ATen/test/mobile_memory_cleanup.cpp index dbc181a4fbc72..4f102d6945cb6 100644 --- a/aten/src/ATen/test/mobile_memory_cleanup.cpp +++ b/aten/src/ATen/test/mobile_memory_cleanup.cpp @@ -10,9 +10,7 @@ using namespace torch::jit; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemoryCleanUp, NoErrorWithoutRelease) { Module m("m"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) m.register_parameter("weight", torch::ones({20, 1, 5, 5}), false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) m.register_parameter("bias", torch::ones({20}), false); m.define(R"( def forward(self, input): @@ -29,9 +27,7 @@ TEST(MemoryCleanUp, NoErrorWithoutRelease) { TEST(MemoryCleanUp, UnpackError) { at::globalContext().setReleaseWeightsWhenPrepacking(true); Module m("m"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) m.register_parameter("weight", torch::ones({20, 1, 5, 5}), false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) m.register_parameter("bias", torch::ones({20}), false); m.define(R"( def forward(self, input): diff --git a/aten/src/ATen/test/native_test.cpp b/aten/src/ATen/test/native_test.cpp index f9ce9f36d7dd6..4b9d965edfd42 100644 --- a/aten/src/ATen/test/native_test.cpp +++ b/aten/src/ATen/test/native_test.cpp @@ -145,12 +145,10 @@ void TestMatmul(TensorOptions T, Tensor& t, TensorOptions AccT) { ASSERT_ALLCLOSE(d1o.matmul(d2), d1o.unsqueeze(0).mm(d2).squeeze(0)); // 2-d - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto d2o = randn({3, 5}, T); ASSERT_ALLCLOSE(d2.matmul(d2o), d2.mm(d2o)); // > 2-d, 1-d - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto d3 = randn({5, 2, 3}, T); ASSERT_ALLCLOSE( d3.matmul(d1), d3.bmm(d1.view({1, 3, 1}).expand({5, 3, 1})).view({5, 2})); @@ -171,9 +169,7 @@ void TestMatmul(TensorOptions T, Tensor& t, TensorOptions AccT) { // comparison to bmm doesn't work; instead, compare to the higher precision // computation (technically, we should always do this). Tolerances are // selected empirically. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double atol = 1e-04; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double rtol = 1e-06; d2 = randn({3, 4}, T); d2o = randn({4, 2}, T); @@ -181,9 +177,7 @@ void TestMatmul(TensorOptions T, Tensor& t, TensorOptions AccT) { auto d5Acc = d5.to(AccT); auto d2Acc = d2.to(AccT); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto acc_result = d5Acc.view({24, 2, 3}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .bmm(d2Acc.expand({24, 3, 4})) .view({3, 2, 4, 2, 4}); ASSERT_ALLCLOSE_TOLERANCES(result, acc_result, atol, rtol); @@ -194,10 +188,8 @@ void TestMatmul(TensorOptions T, Tensor& t, TensorOptions AccT) { // > 2-d, > 2-d auto d5o = randn({2, 1, 2, 4, 3, 2}, T); auto d5_bmm_view = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d5.expand({2, 3, 2, 4, 2, 3}).contiguous().view({48, 2, 3}); auto d5o_bmm_view = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d5o.expand({2, 3, 2, 4, 3, 2}).contiguous().view({48, 3, 2}); ASSERT_ALLCLOSE( d5.matmul(d5o), d5_bmm_view.bmm(d5o_bmm_view).view({2, 3, 2, 4, 2, 2})); @@ -215,9 +207,7 @@ void TestStandardGammaGrad(TensorOptions T, Tensor& t) { ASSERT_EQUAL(empty, at::_standard_gamma_grad(empty, empty)); // check scalar equals one element - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto one_scalar = ones({}, T).mul(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto one_with_dim = ones({1}, T).mul(5); ASSERT_ALLCLOSE( at::_standard_gamma_grad(one_scalar, one_scalar), @@ -239,9 +229,7 @@ void TestWhere(TensorOptions T, Tensor& t) { ASSERT_EQUAL(empty, at::where(empty_byte, empty, empty)); // check scalar equals one element - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x_scalar = ones({}, T).mul(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y_scalar = ones({}, T).mul(7); auto cond_scalar = zeros({}, bT); auto x_1d = x_scalar.unsqueeze(0); @@ -265,7 +253,6 @@ void test(TensorOptions T, TensorOptions AccT) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestNative, NativeTestCPU) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); test(at::device(kCPU).dtype(kFloat), @@ -274,7 +261,6 @@ TEST(TestNative, NativeTestCPU) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestNative, NativeTestGPU) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); if (at::hasCUDA()) { diff --git a/aten/src/ATen/test/quantized_test.cpp b/aten/src/ATen/test/quantized_test.cpp index 64b55a484f5ec..a92720a282827 100644 --- a/aten/src/ATen/test/quantized_test.cpp +++ b/aten/src/ATen/test/quantized_test.cpp @@ -15,7 +15,6 @@ using namespace at; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestQTensor, QuantDequantAPIs) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto num_elements = 10; Tensor r = at::ones({num_elements}); const double scale = 1.0; @@ -55,7 +54,6 @@ TEST(TestQTensor, QuantDequantAPIs) { } // Check for correct requantization - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double new_scale = 2.0; int64_t new_zero_point = 1; Tensor reqr = at::quantize_per_tensor(r, new_scale, new_zero_point, kQInt8); @@ -76,13 +74,10 @@ TEST(TestQTensor, RoundingMode) { // We assume that quantization is defined as: // qx = clamp(zero_point + round(x / scale)) // If the zero_point is added before rounding, the result will be wrong. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int32_t zero_point = 5; std::vector x_values{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.5, -4.5, -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5}; std::vector qx_expect{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0, 1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11}; // scale = 1.0 Tensor x = from_blob(x_values.data(), x_values.size()); @@ -106,13 +101,9 @@ TEST(TestQTensor, Item) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestQTensor, EmptyQuantized) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int zero_point = 10; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int val = 100; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int numel = 10; Tensor q = at::_empty_affine_quantized( {numel}, at::device(at::kCPU).dtype(kQUInt8), scale, zero_point); @@ -132,12 +123,9 @@ TEST(TestQTensor, EmptyQuantized) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestQTensor, EmptyPerchannelQuantized) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int numel = 10; auto scales = rand({numel}).toType(kDouble); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto zero_points = randint(10, {10}).toType(kLong); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int val = 100; int ch_axis = 0; Tensor q = at::_empty_per_channel_affine_quantized( @@ -164,10 +152,8 @@ TEST(TestQTensor, EmptyPerchannelQuantized) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestQTensor, QuantizePerChannel4d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int C = 64, H = 10, W = 10; auto scales = rand({C}).toType(kDouble); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto zero_points = randint(10, {C}).toType(kLong); int ch_axis = 1; // create 4d tensor where each H x W image is a range(0, H*W) @@ -196,10 +182,8 @@ TEST(TestQTensor, QuantizePerChannel4d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestQTensor, QuantizePerChannel4dChannelsLast) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int C = 64, H = 10, W = 10; auto scales = rand({C}).toType(kDouble); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto zero_points = randint(10, {C}).toType(kLong); int ch_axis = 1; // create 4d tensor where each H x W image is a range(0, H*W) diff --git a/aten/src/ATen/test/scalar_tensor_test.cpp b/aten/src/ATen/test/scalar_tensor_test.cpp index b799307ddec6b..afdced9bd1705 100644 --- a/aten/src/ATen/test/scalar_tensor_test.cpp +++ b/aten/src/ATen/test/scalar_tensor_test.cpp @@ -285,14 +285,12 @@ void test(DeprecatedTypeProperties &T) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestScalarTensor, TestScalarTensorCPU) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); test(CPU(kFloat)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestScalarTensor, TestScalarTensorCUDA) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); if (at::hasCUDA()) { diff --git a/aten/src/ATen/test/scalar_test.cpp b/aten/src/ATen/test/scalar_test.cpp index 400dbd23052de..7e07b0d801ae4 100644 --- a/aten/src/ATen/test/scalar_test.cpp +++ b/aten/src/ATen/test/scalar_test.cpp @@ -37,7 +37,6 @@ void test_overflow() { ASSERT_EQ(s1.toFloat(), static_cast(M_PI)); s1.toHalf(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1 = Scalar(100000); ASSERT_EQ(s1.toFloat(), 100000.0); ASSERT_EQ(s1.toInt(), 100000); @@ -58,12 +57,9 @@ void test_overflow() { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestScalar, TestScalar) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Scalar what = 257; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Scalar bar = 3.0; Half h = bar.toHalf(); Scalar h2 = h; @@ -92,13 +88,9 @@ TEST(TestScalar, TestScalar) { ASSERT_EQ(t.strides()[1], 1); TensorOptions options = dtype(kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor x = randn({1, 10}, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor prev_h = randn({1, 20}, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor W_h = randn({20, 20}, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor W_x = randn({20, 10}, options); Tensor i2h = at::mm(W_x, x.t()); Tensor h2h = at::mm(W_h, prev_h.t()); @@ -148,11 +140,8 @@ TEST(TestScalar, TestScalar) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestScalar, TestConj) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Scalar int_scalar = 257; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Scalar float_scalar = 3.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Scalar complex_scalar = c10::complex(2.3, 3.5); ASSERT_EQ(int_scalar.conj().toInt(), 257); diff --git a/aten/src/ATen/test/tensor_interop_test.cpp b/aten/src/ATen/test/tensor_interop_test.cpp index 943a88a11e140..7fc5ca3c74a96 100644 --- a/aten/src/ATen/test/tensor_interop_test.cpp +++ b/aten/src/ATen/test/tensor_interop_test.cpp @@ -9,14 +9,12 @@ TEST(Caffe2ToPytorch, SimpleLegacy) { caffe2::Tensor c2_tensor(caffe2::CPU); c2_tensor.Resize(4, 4); auto data = c2_tensor.mutable_data(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 16; i++) { data[i] = i; } at::Tensor at_tensor(c2_tensor); auto it = at_tensor.data_ptr(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 16; i++) { ASSERT_EQ(it[i], i); } @@ -26,14 +24,12 @@ TEST(Caffe2ToPytorch, SimpleLegacy) { TEST(Caffe2ToPytorch, Simple) { caffe2::Tensor c2_tensor = caffe2::empty({4, 4}, at::kLong); auto data = c2_tensor.mutable_data(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 16; i++) { data[i] = i; } at::Tensor at_tensor(c2_tensor); auto it = at_tensor.data_ptr(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 16; i++) { ASSERT_EQ(it[i], i); } @@ -44,11 +40,9 @@ TEST(Caffe2ToPytorch, ExternalData) { caffe2::Tensor c2_tensor = caffe2::empty({4, 4}, at::kLong); // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers) int64_t buf[16]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 16; i++) { buf[i] = i; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor.ShareExternalPointer(buf, 16 * sizeof(int64_t)); // If the buffer is allocated externally, we can still pass tensor around, @@ -57,7 +51,6 @@ TEST(Caffe2ToPytorch, ExternalData) { at_tensor.permute({1, 0}); at_tensor.permute({1, 0}); auto it = at_tensor.data_ptr(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 16; i++) { ASSERT_EQ(it[i], i); } @@ -71,7 +64,6 @@ TEST(Caffe2ToPytorch, Op) { caffe2::Tensor c2_tensor(caffe2::CPU); c2_tensor.Resize(3, 3); auto data = c2_tensor.mutable_data(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 9; i++) { data[i] = i; } @@ -119,10 +111,8 @@ TEST(Caffe2ToPytorch, PartiallyInitialized) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Caffe2ToPytorch, MutualResizes) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) caffe2::Tensor c2_tensor = caffe2::empty({5, 5}, at::kFloat); auto data = c2_tensor.mutable_data(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 25; i++) { data[i] = 0; } @@ -130,20 +120,16 @@ TEST(Caffe2ToPytorch, MutualResizes) { at::Tensor at_tensor(c2_tensor); // change is visible - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at_tensor[0][0] = 123; ASSERT_EQ(c2_tensor.mutable_data()[0], 123); // resize PT tensor in smaller direction - storage is preserved at_tensor.resize_({4, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor.mutable_data()[1] = 234; ASSERT_EQ(at_tensor[0][1].item().to(), 234); // resize PT tensor in larger direction - storage is preserved - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at_tensor.resize_({6, 6}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor.mutable_data()[2] = 345; ASSERT_EQ(at_tensor[0][2].item().to(), 345); ASSERT_EQ(c2_tensor.sizes()[0], 6); @@ -151,9 +137,7 @@ TEST(Caffe2ToPytorch, MutualResizes) { // resize Caffe2 tensor - semantics are to NOT preserve the data, but the // TensorImpl is still shared - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor.Resize(7, 7); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor.mutable_data()[3] = 456; ASSERT_EQ(at_tensor[0][3].item().to(), 456); ASSERT_EQ(at_tensor.sizes()[0], 7); @@ -165,11 +149,8 @@ TEST(PytorchToCaffe2, Op) { caffe2::Workspace workspace; caffe2::NetDef net; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto at_tensor_a = at::ones({5, 5}, at::dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto at_tensor_b = at::ones({5, 5}, at::dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto at_tensor_c = at::ones({5, 5}, at::dtype(at::kFloat)); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) @@ -194,11 +175,9 @@ TEST(PytorchToCaffe2, Op) { workspace.RunNetOnce(net); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = XBlobGetMutableTensor(workspace.CreateBlob("d"), {5, 5}, at::kCPU); auto it = result.data(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 25; i++) { ASSERT_EQ(it[i], 3.0); } @@ -211,9 +190,7 @@ TEST(PytorchToCaffe2, SharedStorageRead) { caffe2::Workspace workspace; caffe2::NetDef net; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto at_tensor_a = at::ones({5, 5}, at::dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto at_tensor_b = at_tensor_a.view({5, 5}); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) @@ -231,10 +208,8 @@ TEST(PytorchToCaffe2, SharedStorageRead) { workspace.RunNetOnce(net); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = XBlobGetMutableTensor(workspace.CreateBlob("c"), {5, 5}, at::kCPU); auto it = result.data(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 25; i++) { ASSERT_EQ(it[i], 2.0); } @@ -244,16 +219,13 @@ TEST(PytorchToCaffe2, SharedStorageRead) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(PytorchToCaffe2, SharedStorageWrite) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto at_tensor_a = at::ones({5, 5}, at::dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto at_tensor_b = at_tensor_a.view({25}); caffe2::Tensor c2_tensor_a(at_tensor_a); caffe2::Tensor c2_tensor_b(at_tensor_b); // change is visible everywhere - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor_a.mutable_data()[1] = 123; ASSERT_EQ(c2_tensor_b.mutable_data()[1], 123); ASSERT_EQ(at_tensor_a[0][1].item().to(), 123); @@ -262,26 +234,21 @@ TEST(PytorchToCaffe2, SharedStorageWrite) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(PytorchToCaffe2, MutualResizes) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto at_tensor = at::ones({5, 5}, at::dtype(at::kFloat)); caffe2::Tensor c2_tensor(at_tensor); // change is visible - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor.mutable_data()[0] = 123; ASSERT_EQ(at_tensor[0][0].item().to(), 123); // resize PT tensor in smaller direction - storage is preserved at_tensor.resize_({4, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor.mutable_data()[1] = 234; ASSERT_EQ(at_tensor[0][1].item().to(), 234); // resize PT tensor in larger direction - storage is preserved - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at_tensor.resize_({6, 6}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor.mutable_data()[2] = 345; ASSERT_EQ(at_tensor[0][2].item().to(), 345); ASSERT_EQ(c2_tensor.sizes()[0], 6); @@ -289,9 +256,7 @@ TEST(PytorchToCaffe2, MutualResizes) { // resize Caffe2 tensor - semantics are to NOT preserve the data, but the // TensorImpl is still shared - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor.Resize(7, 7); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor.mutable_data()[3] = 456; ASSERT_EQ(at_tensor[0][3].item().to(), 456); ASSERT_EQ(at_tensor.sizes()[0], 7); @@ -300,13 +265,11 @@ TEST(PytorchToCaffe2, MutualResizes) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(PytorchToCaffe2, Strided) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto at_tensor = at::ones({5, 5}, at::dtype(at::kFloat)).t(); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) ASSERT_ANY_THROW(caffe2::Tensor c2_tensor(at_tensor)); // but calling contiguous is fine caffe2::Tensor c2_tensor(at_tensor.contiguous()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 25; i++) { ASSERT_EQ(c2_tensor.data()[i], 1.0); } @@ -314,13 +277,11 @@ TEST(PytorchToCaffe2, Strided) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(PytorchToCaffe2, InplaceStrided) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto at_tensor = at::zeros({2, 5}, at::dtype(at::kFloat)); caffe2::Tensor c2_tensor(at_tensor); ASSERT_EQ(c2_tensor.sizes()[0], 2); ASSERT_EQ(c2_tensor.sizes()[1], 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c2_tensor.mutable_data()[1] = 234; ASSERT_EQ(at_tensor[0][1].item().to(), 234); diff --git a/aten/src/ATen/test/tensor_iterator_test.cpp b/aten/src/ATen/test/tensor_iterator_test.cpp index 8d2ed288dad91..16d7013acc46c 100644 --- a/aten/src/ATen/test/tensor_iterator_test.cpp +++ b/aten/src/ATen/test/tensor_iterator_test.cpp @@ -13,7 +13,6 @@ using namespace at; TEST(TensorIteratorTest, CPUScalar) { if (!at::hasCUDA()) return; Tensor out; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = at::randn({5, 5}, kCUDA); auto y = at::ones(1, kCPU).squeeze(); auto iter = TensorIterator::binary_op(out, x, y); @@ -26,7 +25,6 @@ TEST(TensorIteratorTest, CPUScalar) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIteratorTest, CPUScalarInputs) { if (!at::hasCUDA()) return; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor out = at::empty({5, 5}, kCUDA); auto x = at::ones(1, kCPU).squeeze(); auto y = at::ones(1, kCPU).squeeze(); @@ -39,9 +37,7 @@ TEST(TensorIteratorTest, CPUScalarInputs) { TEST(TensorIteratorTest, MixedDevices) { if (!at::hasCUDA()) return; Tensor out; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = at::randn({5, 5}, kCUDA); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = at::ones({5}, kCPU); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) ASSERT_ANY_THROW(TensorIterator::binary_op(out, x, y)); @@ -49,13 +45,10 @@ TEST(TensorIteratorTest, MixedDevices) { Tensor random_tensor_for_type(at::ScalarType scalar_type) { if (at::isFloatingType(scalar_type)) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return at::randn({5, 5}, at::device(kCPU).dtype(scalar_type)); } else if (scalar_type == kBool) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return at::randint(0, 2, {5, 5}, at::device(kCPU).dtype(scalar_type)); } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return at::randint(1, 10, {5, 5}, at::device(kCPU).dtype(scalar_type)); } } @@ -176,7 +169,6 @@ AT_FORALL_SCALAR_TYPES_AND(Bool, COMPARISON_TEST_ITER_FOR_TYPE) TEST(TensorIteratorTest, SerialLoopSingleThread) { std::thread::id thread_id = std::this_thread::get_id(); Tensor out; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = at::zeros({50000}, at::TensorOptions(kCPU).dtype(kInt)); auto iter = TensorIterator::unary_op(out, x); at::native::cpu_serial_kernel(iter, [=](int a) -> int { diff --git a/aten/src/ATen/test/test_parallel.cpp b/aten/src/ATen/test/test_parallel.cpp index a766e6abe865e..a85540f240d1a 100644 --- a/aten/src/ATen/test/test_parallel.cpp +++ b/aten/src/ATen/test/test_parallel.cpp @@ -13,7 +13,6 @@ using namespace at; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestParallel, TestParallel) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); set_num_threads(1); @@ -30,11 +29,9 @@ TEST(TestParallel, TestParallel) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestParallel, NestedParallel) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor a = ones({1024, 1024}); auto expected = a.sum(); // check that calling sum() from within a parallel block computes the same result - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::parallel_for(0, 10, 1, [&](int64_t begin, int64_t end) { if (begin == 0) { ASSERT_TRUE(a.sum().equal(expected)); diff --git a/aten/src/ATen/test/thread_init_test.cpp b/aten/src/ATen/test/thread_init_test.cpp index efde85295ab6a..55df55f3b58cf 100644 --- a/aten/src/ATen/test/thread_init_test.cpp +++ b/aten/src/ATen/test/thread_init_test.cpp @@ -9,12 +9,10 @@ // will throw an exception when multiple threads call // their first parallel construct. void test(int given_num_threads) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = at::ones({1000 * 1000}, at::CPU(at::kFloat)); ASSERT_TRUE(given_num_threads >= 0); ASSERT_EQ(at::get_num_threads(), given_num_threads); auto t_sum = t.sum(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 1000; ++i) { t_sum = t_sum + t.sum(); } @@ -37,7 +35,6 @@ int main() { #endif // test inter-op settings - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::set_num_interop_threads(5); ASSERT_EQ(at::get_num_interop_threads(), 5); ASSERT_ANY_THROW(at::set_num_interop_threads(6)); diff --git a/aten/src/ATen/test/undefined_tensor_test.cpp b/aten/src/ATen/test/undefined_tensor_test.cpp index 234d23c2c0b77..8d846b006e5e7 100644 --- a/aten/src/ATen/test/undefined_tensor_test.cpp +++ b/aten/src/ATen/test/undefined_tensor_test.cpp @@ -8,7 +8,6 @@ using namespace at; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestUndefined, UndefinedTest) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); // mainly test ops on undefined tensors don't segfault and give a reasonable errror message. diff --git a/aten/src/ATen/test/vec256_test_all_types.cpp b/aten/src/ATen/test/vec256_test_all_types.cpp index d5cb87c82179a..1e535b54cd31b 100644 --- a/aten/src/ATen/test/vec256_test_all_types.cpp +++ b/aten/src/ATen/test/vec256_test_all_types.cpp @@ -164,20 +164,14 @@ namespace { TYPED_TEST(Rounding, Round) { using vec = TypeParam; using UVT = UvalueType; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) UVT case1 = -658.5f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) UVT exp1 = -658.f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) UVT case2 = -657.5f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) UVT exp2 = -658.f; auto test_case = TestingCase::getBuilder() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {-1000, 1000}} }) .addCustom({ {case1},exp1 }) .addCustom({ {case2},exp2 }) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .setTrialCount(64000) .setTestSeed(TestSeed()); test_unary( @@ -266,11 +260,8 @@ namespace { using vec = TypeParam; using UVT = UvalueType; auto test_case = TestingCase::getBuilder() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {-4096, 4096}}, true, 1.2e-7f}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {-8192, 8192}}, true, 3.0e-7f}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .setTrialCount(8000) .setTestSeed(TestSeed()); test_unary( @@ -284,11 +275,8 @@ namespace { using vec = TypeParam; using UVT = UvalueType; auto test_case = TestingCase::getBuilder() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {-4096, 4096}}, true, 1.2e-7f}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {-8192, 8192}}, true, 3.0e-7f}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .setTrialCount(8000) .setTestSeed(TestSeed()); test_unary( @@ -321,9 +309,7 @@ namespace { using UVT = UvalueType; auto test_case = TestingCase::getBuilder() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {-88, 88}}, true, getDefaultTolerance()}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .setTrialCount(65536) .setTestSeed(TestSeed()); test_unary( @@ -338,9 +324,7 @@ namespace { using UVT = UvalueType; auto test_case = TestingCase::getBuilder() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {-88, 88}}, true, getDefaultTolerance()}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .setTrialCount(65536) .setTestSeed(TestSeed()); test_unary( @@ -356,9 +340,7 @@ namespace { bool checkRelativeErr = is_complex>(); auto test_case = TestingCase::getBuilder() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {-10, 10}}, checkRelativeErr, getDefaultTolerance() }) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .setTrialCount(125536) .setTestSeed(TestSeed()); test_unary( @@ -374,9 +356,7 @@ namespace { bool checkRelativeErr = is_complex>(); auto test_case = TestingCase::getBuilder() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {-10, 10}}, checkRelativeErr, getDefaultTolerance() }) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .setTrialCount(125536) .setTestSeed(TestSeed()); test_unary( @@ -392,9 +372,7 @@ namespace { using UVT = UvalueType; auto test_case = TestingCase::getBuilder() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {-100, 100}}, checkRelativeErr, getDefaultTolerance()}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .setTrialCount(65536) .setTestSeed(TestSeed()); test_unary( @@ -437,11 +415,8 @@ namespace { using UVT = UvalueType; auto test_case = TestingCase::getBuilder() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {-1, 1000}}, true, getDefaultTolerance()}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {1000, 1.e+30}}, true, getDefaultTolerance()}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .setTrialCount(65536) .setTestSeed(TestSeed()); test_unary( @@ -510,10 +485,8 @@ namespace { test_vals[i] = std::numeric_limits::quiet_NaN(); // All bits are set to 1 if true, otherwise 0. // same rule as at::Vec256::binary_pred. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::memset(static_cast(&expected_vals[i]), 0xFF, sizeof(VT)); } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) test_vals[i] = (VT)0.123; std::memset(static_cast(&expected_vals[i]), 0, sizeof(VT)); } @@ -529,14 +502,10 @@ namespace { using UVT = UvalueType; UVT tolerance = getDefaultTolerance(); // double: 2e+305 float: 4e+36 (https://sleef.org/purec.xhtml#eg) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) UVT maxCorrect = std::is_same::value ? (UVT)4e+36 : (UVT)2e+305; TestingCase testCase = TestingCase::getBuilder() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {(UVT)-100, (UVT)0}}, true, tolerance}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {(UVT)0, (UVT)1000 }}, true, tolerance}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .addDomain(CheckWithinDomains{ { {(UVT)1000, maxCorrect }}, true, tolerance}) .setTestSeed(TestSeed()); test_unary( @@ -836,7 +805,6 @@ namespace { test_vals[i] = (VT)0; } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) test_vals[i] = (VT)0.897; } } @@ -860,7 +828,6 @@ namespace { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) CACHE_ALIGN IntVT actual_vals1[vec::size()]; for (int64_t i = 0; i < vec::size(); i++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input1[i] = (VT)i * (VT)2.1 + (VT)0.5; expected_vals1[i] = static_cast(input1[i]); } @@ -894,7 +861,6 @@ namespace { auto test_case = TestingCase::getBuilder() .addDomain(CheckWithinDomains{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{(VT)-1000, (VT)1000}, {(VT)-1000, (VT)1000}, {(VT)-1000, (VT)1000}}, true, getDefaultTolerance()}) .setTestSeed(TestSeed()); @@ -908,14 +874,12 @@ namespace { RESOLVE_OVERLOAD(filter_fmadd)); } template - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) typename std::enable_if_t<(mask < 0 || mask> 255), void> // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) test_blend(VT expected_val[vec::size()], VT a[vec::size()], VT b[vec::size()]) { } template - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) typename std::enable_if_t<(mask >= 0 && mask <= 255), void> // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) test_blend(VT expected_val[vec::size()], VT a[vec::size()], VT b[vec::size()]) { @@ -970,7 +934,6 @@ namespace { mask[idx] = (VT)0; } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t hex_mask = 0xFFFFFFFFFFFFFFFF; std::memcpy(&mask[idx], &hex_mask, sizeof(VT)); } @@ -993,7 +956,6 @@ namespace { void blend_init, 4>(Complex(&a)[4], Complex(&b)[4]) { auto add = Complex(1., 100.); a[0] = Complex(1., 100.); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b[0] = Complex(5., 1000.); for (int i = 1; i < 4; i++) { a[i] = a[i - 1] + add; @@ -1005,7 +967,6 @@ namespace { void blend_init, 2>(Complex(&a)[2], Complex(&b)[2]) { auto add = Complex(1.0, 100.0); a[0] = Complex(1.0, 100.0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b[0] = Complex(3.0, 1000.0); a[1] = a[0] + add; b[1] = b[0] + add; @@ -1075,17 +1036,13 @@ namespace { template std::enable_if_t::value, void> arange_init(T& base, T& step) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) base = (T)5.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) step = (T)2.0; } template std::enable_if_t::value, void> arange_init(T& base, T& step) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) base = T(5.0, 5.0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) step = T(2.0, 3.0); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) @@ -1143,12 +1100,9 @@ namespace { //zero point ValueGen generator_zp(min_val, max_val, seed); //scale - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ValueGen generator_sc(1.f, 15.f, seed.add(1)); //value - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float minv = static_cast(static_cast(min_val) * 2.0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float maxv = static_cast(static_cast(max_val) * 2.0); ValueGen gen(minv, maxv, seed.add(2)); for (int i = 0; i < trials; i++) { @@ -1188,7 +1142,6 @@ namespace { auto seed = TestSeed(); ValueGen generator(min_val, max_val, seed.add(1)); //scale - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ValueGen generator_sc(1.f, 15.f, seed.add(2)); for (int i = 0; i < trials; i++) { float scale = generator_sc.get(); @@ -1237,7 +1190,6 @@ namespace { //zero point and value ValueGen generator(min_val, max_val, seed); //scale - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ValueGen generator_sc(1.f, 15.f, seed.add(1)); for (int i = 0; i < trials; i++) { float multiplier = 1.f / (generator_sc.get()); diff --git a/aten/src/ATen/test/vmap_test.cpp b/aten/src/ATen/test/vmap_test.cpp index a3102ebd493fe..41a6baf313ff0 100644 --- a/aten/src/ATen/test/vmap_test.cpp +++ b/aten/src/ATen/test/vmap_test.cpp @@ -103,7 +103,6 @@ TEST(VmapTest, TestBatchedTensorMaxLevel) { TEST(VmapTest, TestBatchedTensorActualDim) { { // No batch dims - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor tensor = makeBatched(ones({2, 3, 5, 7}), {}); auto* batched = maybeGetBatchedImpl(tensor); ASSERT_EQ(batched->actualDim(0), 0); @@ -126,7 +125,6 @@ TEST(VmapTest, TestBatchedTensorActualDim) { } { // Single batch dim at front - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor tensor = makeBatched(ones({2, 3, 5, 7}), {{/*lvl*/1, /*dim*/0}}); auto* batched = maybeGetBatchedImpl(tensor); ASSERT_EQ(batched->actualDim(0), 1); @@ -137,7 +135,6 @@ TEST(VmapTest, TestBatchedTensorActualDim) { } { // Single batch dim in middle - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor tensor = makeBatched(ones({2, 3, 5, 7}), {{/*lvl*/1, /*dim*/1}}); auto* batched = maybeGetBatchedImpl(tensor); ASSERT_EQ(batched->actualDim(0), 0); @@ -146,7 +143,6 @@ TEST(VmapTest, TestBatchedTensorActualDim) { } { // Single batch dim at end - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor tensor = makeBatched(ones({2, 3, 5, 7}), {{/*lvl*/1, /*dim*/1}}); auto* batched = maybeGetBatchedImpl(tensor); ASSERT_EQ(batched->actualDim(0), 0); @@ -156,7 +152,6 @@ TEST(VmapTest, TestBatchedTensorActualDim) { { // Multiple (2) batch dims at front Tensor tensor = makeBatched( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ones({2, 3, 5, 7}), {{/*lvl*/1, /*dim*/0}, {/*lvl*/2, /*dim*/1}}); auto* batched = maybeGetBatchedImpl(tensor); @@ -166,7 +161,6 @@ TEST(VmapTest, TestBatchedTensorActualDim) { { // Multiple (2) batch dims, misc places Tensor tensor = makeBatched( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ones({2, 3, 5, 7}), {{/*lvl*/1, /*dim*/1}, {/*lvl*/2, /*dim*/3}}); auto* batched = maybeGetBatchedImpl(tensor); @@ -197,14 +191,12 @@ TEST(VmapTest, TestBatchedTensorActualDim) { TEST(VmapTest, TestMultiBatchVmapTransform) { { // Input is regular Tensor - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = ones({2, 3, 5}); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) ASSERT_THROW(MultiBatchVmapTransform::logicalToPhysical(tensor), c10::Error); } { // Input is BatchedTensor, Batch dims are already at the front - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = ones({2, 3, 5}); BatchDims bdims = {{/*lvl*/1, /*dim*/0}, {/*lvl*/3, /*dim*/1}}; auto batched = makeBatched(tensor, bdims); @@ -214,7 +206,6 @@ TEST(VmapTest, TestMultiBatchVmapTransform) { } { // Single batch dim, not at front - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = ones({2, 3, 5}); BatchDims bdims = {{/*lvl*/1, /*dim*/1}}; auto batched = makeBatched(tensor, bdims); @@ -225,7 +216,6 @@ TEST(VmapTest, TestMultiBatchVmapTransform) { } { // Multiple batch dims, not at front. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = ones({2, 3, 5}); BatchDims bdims = {{/*lvl*/1, /*dim*/1}, {/*lvl*/2,/*dim*/2}, {/*lvl*/3,/*dim*/0}}; auto batched = makeBatched(tensor, bdims); @@ -241,7 +231,6 @@ TEST(VmapTest, TestMultiBatchVmapTransform) { auto sizes = std::vector(kVmapNumLevels, 1); sizes[0] = 2; sizes[2] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sizes[5] = 7; // bdims = {{lvl=0,dim=0,lvl=1,dim=1,...,{lvl=63,dim=63}} @@ -259,9 +248,7 @@ TEST(VmapTest, TestMultiBatchVmapTransform) { auto sizes = std::vector(kVmapNumLevels, 1); sizes[1] = 3; sizes[2] = 2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sizes[5] = 7; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sizes[kVmapNumLevels - 1] = 5; // The goal is to permute sizes such that the final sizes are: @@ -269,18 +256,14 @@ TEST(VmapTest, TestMultiBatchVmapTransform) { auto expected_result_sizes = std::vector(kVmapNumLevels, 1); expected_result_sizes[0] = 2; expected_result_sizes[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected_result_sizes[2] = 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected_result_sizes[3] = 7; // bdims = {{0, 2}, {1, 1}, {2, 63}, {3, 5}, {4, 0}, {5, 3}, {6, 4}, // {7, 6}, {8, 7}, {9, 8}, ..., {63, 62}} BatchDims batch_dims = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0, 2}, {1, 1}, {2, kVmapNumLevels - 1}, {3, 5}, {4, 0}, {5, 3}, {6, 4} }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t level = 7; level < kVmapNumLevels; level++ ) { batch_dims.emplace_back(level, /*dim=*/level - 1); } @@ -294,7 +277,6 @@ TEST(VmapTest, TestMultiBatchVmapTransform) { } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(VmapTest, TestVmapPhysicalViewGetPhysicalDim) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VmapPhysicalView physical_view(ones({2, 3, 4, 5, 6}), 1 | 4); // Positive dims @@ -313,7 +295,6 @@ TEST(VmapTest, TestVmapPhysicalViewGetPhysicalDim) { } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(VmapTest, TestVmapPhysicalViewGetPhysicalDims) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VmapPhysicalView physical_view(ones({2, 3, 4, 5, 6}), 2 | 8 | 16); ASSERT_EQ( @@ -339,7 +320,6 @@ TEST(VmapTest, TestVmapPhysicalViewNewLogicalFromPhysical) { { // Simple case: single level VmapPhysicalView physical_view(ones({2, 3, 4}), /*levels = {2}*/4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor physical = ones({2, 6, 7}); auto result = physical_view.getPhysicalToLogicalMap().apply(physical); @@ -350,9 +330,7 @@ TEST(VmapTest, TestVmapPhysicalViewNewLogicalFromPhysical) { } { // Multiple levels - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VmapPhysicalView physical_view(ones({2, 3, 4, 5, 6}), /*levels = {1, 3, 4}*/2 | 8 | 16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor physical = ones({2, 3, 4, 7}); auto result = physical_view.getPhysicalToLogicalMap().apply(physical); @@ -381,7 +359,6 @@ TEST(VmapTest, TestVmapPhysicalViewNewLogicalFromPhysical) { TEST(VmapTest, TestBatchedTensorSum) { { // Simple: single batch dim, single reduce dim - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor x = at::randn({2, 3, 5, 7}); Tensor batched_x = makeBatched(x, {{/*lvl*/1, /*dim*/0}}); @@ -402,7 +379,6 @@ TEST(VmapTest, TestBatchedTensorSum) { } { // single batch dim, multiple reduce dim - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor x = at::randn({2, 3, 5, 7}); Tensor batched_x = makeBatched(x, {{/*lvl*/1, /*dim*/1}}); @@ -413,7 +389,6 @@ TEST(VmapTest, TestBatchedTensorSum) { } { // multiple batch dim, multiple reduce dim - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor x = at::randn({2, 3, 5, 7}); Tensor batched_x = makeBatched(x, {{/*lvl*/1, /*dim*/0}, {/*lvl*/2, /*dim*/1}}); @@ -438,7 +413,6 @@ static void checkBroadcastingVmapTransform(TensorList inputs, TensorList expecte TEST(VmapTest, TestBroadcastingVmapTransformBatchedBatched) { { // Check that batch dims get moved to the front - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7; Tensor x = at::randn({2, B0, 3, B1}); Tensor y = at::randn({B1, 2, 3, B0}); @@ -451,7 +425,6 @@ TEST(VmapTest, TestBroadcastingVmapTransformBatchedBatched) { } { // Check that batch dims become aligned (i.e. extra 1 dims get added) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7, B2 = 9; Tensor x = at::randn({B0, B2, 2, 3}); Tensor y = at::randn({B0, B1, 2, 3}); @@ -464,7 +437,6 @@ TEST(VmapTest, TestBroadcastingVmapTransformBatchedBatched) { } { // Check that the "example" gets padded with extra dims of size 1. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5; Tensor x = at::randn({B0, 3}); Tensor y = at::randn({B0, 2, 3}); @@ -478,7 +450,6 @@ TEST(VmapTest, TestBroadcastingVmapTransformBatchedBatched) { { // Check batch dims get moved to front, batch dims get aligned, // and the example gets padded correctly. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7, B2 = 11, B3 = 13; Tensor x = at::randn({2, B0, 3, B2}); Tensor y = at::randn({B3, 3, B1}); @@ -494,7 +465,6 @@ TEST(VmapTest, TestBroadcastingVmapTransformBatchedBatched) { } { // Edge case: BatchedTensor "scalar" handling - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B2 = 11; Tensor x = at::randn({B0}); Tensor y = at::randn({B0, B2}); @@ -506,7 +476,6 @@ TEST(VmapTest, TestBroadcastingVmapTransformBatchedBatched) { } { // Edge case: Only one tensor is a "batchedtensor scalar" - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B2 = 11; Tensor x = at::randn({B0}); Tensor y = at::randn({B0, B2, 2}); @@ -522,7 +491,6 @@ TEST(VmapTest, TestBroadcastingVmapTransformBatchedBatched) { TEST(VmapTest, TestBroadcastingVmapTransformBatchedUnbatched) { { // Check same example size - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7; Tensor x = at::randn({2, B0, 3, B1}); Tensor y = at::randn({2, 3}); @@ -537,7 +505,6 @@ TEST(VmapTest, TestBroadcastingVmapTransformBatchedUnbatched) { } { // BatchedTensor has higher example dim than non-batched-tensor - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7; Tensor x = at::randn({B0, B1, 2, 3}); Tensor y = at::randn({3}); @@ -550,7 +517,6 @@ TEST(VmapTest, TestBroadcastingVmapTransformBatchedUnbatched) { } { // BatchedTensor has lower example dim than non-batched-tensor - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7; Tensor x = at::randn({B0, B1, 3}); Tensor y = at::randn({2, 3}); @@ -563,7 +529,6 @@ TEST(VmapTest, TestBroadcastingVmapTransformBatchedUnbatched) { } { // Scalar handling - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7; Tensor x = at::randn({B0, B1}); Tensor y = at::randn({}); @@ -587,7 +552,6 @@ TEST(VmapTest, TestBroadcastingVmapTransformMaxLevels) { } { // inputs don't have all 64 levels, but results do. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t split = 19; auto x = randn(std::vector(split, 1)); auto y = randn(std::vector(kVmapNumLevels - split, 1)); @@ -646,7 +610,6 @@ TEST(VmapTest, TestBatchedTensorMul) { { // batched (level 1) * batched (level 2) Tensor x = at::randn({2, 3}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor y = at::randn({5, 3}); Tensor Bx = addBatchDim(x, /*lvl*/1, /*dim*/0); @@ -655,16 +618,13 @@ TEST(VmapTest, TestBatchedTensorMul) { // We get a doubly wrapped BatchTensor... const auto& out = maybeGetBatchedImpl(Bout)->value(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector expected_size = {2, 5, 3}; ASSERT_EQ(out.sizes(), expected_size); ASSERT_TRUE(at::allclose(out, x.unsqueeze(1) * y)); } { // batched (level 2, 3, 4) * batched (level 3, 1, 2) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor x = at::randn({3, 5, 7}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor y = at::randn({5, 2, 3}); // Each BatchDim is constructed in {dim, level} format. @@ -676,7 +636,6 @@ TEST(VmapTest, TestBatchedTensorMul) { // The batching rule aligns dimensions in the order of their `level`. // It just happened that we chose sizes to be in the same order as the level. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector expected_size = {2, 3, 5, 7}; ASSERT_EQ(out.sizes(), expected_size); ASSERT_TRUE(at::allclose(out, x * y.permute({1, 2, 0}).unsqueeze(3))); @@ -688,7 +647,6 @@ TEST(VmapTest, TestBatchedTensorMul) { TEST(VmapTest, TestBatchedTensorSize) { { // Single batch dim at front - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor x = at::randn({3, 5, 7}); Tensor Bx = makeBatched(x, {{0, 0}}); @@ -703,7 +661,6 @@ TEST(VmapTest, TestBatchedTensorSize) { } { // multiple batch dims not at front - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor x = at::randn({2, 3, 5, 7, 11}); Tensor Bx = makeBatched(x, {{0, 3}, {1, 1}}); @@ -723,7 +680,6 @@ TEST(VmapTest, TestBatchedTensorSize) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(VmapTest, TestVmapPhysicalViewGetPhysicalShape) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VmapPhysicalView physical_view(ones({2, 3, 4, 5, 6}), 1 | 4); ASSERT_EQ(physical_view.getPhysicalShape({}), VmapDimVector({2, 3})); ASSERT_EQ(physical_view.getPhysicalShape({7}), VmapDimVector({2, 3, 7})); @@ -731,7 +687,6 @@ TEST(VmapTest, TestVmapPhysicalViewGetPhysicalShape) { ASSERT_EQ(physical_view.getPhysicalShape({7, 11, 13, 17}), VmapDimVector({2, 3, 7, 11, 13, 17})); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VmapPhysicalView physical_view(ones({2, 3, 4, 5, 6}), 2); ASSERT_EQ(physical_view.getPhysicalShape({}), VmapDimVector({2})); ASSERT_EQ(physical_view.getPhysicalShape({7}), VmapDimVector({2, 7})); @@ -743,7 +698,6 @@ TEST(VmapTest, TestVmapPhysicalViewGetPhysicalShape) { TEST(VmapTest, TestBatchedTensorExpand) { { // Expand size is too small - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = at::randn({2, 3, 5}); auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}}); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) @@ -751,10 +705,8 @@ TEST(VmapTest, TestBatchedTensorExpand) { } { // Expand size has same dimensionality as the logical dim - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = at::randn({2, 1, 5}); auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto batched_out = batched.expand({3, 5}); const auto& out = maybeGetBatchedImpl(batched_out)->value(); @@ -763,7 +715,6 @@ TEST(VmapTest, TestBatchedTensorExpand) { } { // Expand size has same dimensionality as the logical dim, incorrect expand size - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = at::randn({2, 1, 5}); auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}}); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) @@ -771,10 +722,8 @@ TEST(VmapTest, TestBatchedTensorExpand) { } { // Expand size has greater dimensionality as the logical dim - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = at::randn({2, 3, 5}); auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto batched_out = batched.expand({7, 3, 5}); const auto& out = maybeGetBatchedImpl(batched_out)->value(); @@ -783,7 +732,6 @@ TEST(VmapTest, TestBatchedTensorExpand) { } { // Expand size has greater dimensionality as the logical dim, incorrect expand size - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = at::randn({2, 3, 5}); auto batched = makeBatched(tensor, {{/*lvl*/0, /*dim*/0}}); // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) @@ -802,7 +750,6 @@ TEST(VmapTest, TestBatchedTensorExpand) { // logical dim is 0, expand size has greater dimensionality than logical dim auto tensor = at::randn({2, 3}); auto batched = makeBatched(tensor, {{0, 0}, {1, 1}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto batched_out = batched.expand({5, 7}); const auto& out = maybeGetBatchedImpl(batched_out)->value(); ASSERT_EQ(out.data_ptr(), tensor.data_ptr()); @@ -962,7 +909,6 @@ static void checkMultiBatchVmapTransform(TensorList inputs, TensorList expected_ TEST(VmapTest, TestMultiBatchVmapTransformBatchedBatched) { { // Check that batch dims get moved to the front - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7; Tensor x = at::randn({2, B0, 3, B1}); Tensor y = at::randn({B1, 2, 3, B0}); @@ -975,7 +921,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformBatchedBatched) { } { // Check that batch dims become broadcasted and are present in all returns - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7, B2 = 9; Tensor x = at::randn({B0, B2, 2, 3}); Tensor y = at::randn({B0, B1, 2, 3}); @@ -988,7 +933,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformBatchedBatched) { } { // Check operation on tensors of different logical dims - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5; Tensor x = at::randn({B0, 3}); Tensor y = at::randn({B0, 2, 3}); @@ -999,7 +943,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformBatchedBatched) { } { // More complicated example with two tensors. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7, B2 = 11, B3 = 13; Tensor x = at::randn({2, B0, 3, B2}); Tensor y = at::randn({B3, 3, B1}); @@ -1015,7 +958,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformBatchedBatched) { } { // Edge case: BatchedTensor "scalar" handling - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B2 = 11; Tensor x = at::randn({B0}); Tensor y = at::randn({B0, B2}); @@ -1027,7 +969,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformBatchedBatched) { } { // Edge case: Only one tensor is a "batchedtensor scalar" - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B2 = 11; Tensor x = at::randn({B0}); Tensor y = at::randn({B0, B2, 2}); @@ -1043,7 +984,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformBatchedBatched) { TEST(VmapTest, TestMultiBatchVmapTransformBatchedUnbatched) { { // Check same example size - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7; Tensor x = at::randn({2, B0, 3, B1}); Tensor y = at::randn({2, 3}); @@ -1058,7 +998,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformBatchedUnbatched) { } { // BatchedTensor has higher example dim than non-batched-tensor - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7; Tensor x = at::randn({B0, B1, 2, 3}); Tensor y = at::randn({3}); @@ -1071,7 +1010,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformBatchedUnbatched) { } { // BatchedTensor has lower example dim than non-batched-tensor - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7; Tensor x = at::randn({B0, B1, 3}); Tensor y = at::randn({2, 3}); @@ -1084,7 +1022,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformBatchedUnbatched) { } { // Scalar handling - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7; Tensor x = at::randn({B0, B1}); Tensor y = at::randn({}); @@ -1108,7 +1045,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformMaxLevels) { } { // inputs don't have all 64 levels, but results do. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t split = 19; auto x = randn(std::vector(split, 1)); auto y = randn(std::vector(kVmapNumLevels - split, 1)); @@ -1139,7 +1075,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformMaxLevels) { TEST(VmapTest, TestMultiBatchVmapTransformMultipleTensors) { // Test with three (all batched) tensors { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7, B2 = 9; Tensor x = at::randn({2, B0, 3, B1}); Tensor y = at::randn({B1, 4}); @@ -1158,7 +1093,6 @@ TEST(VmapTest, TestMultiBatchVmapTransformMultipleTensors) { } // Test with three tensors, some batched, some unbatched { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t B0 = 5, B1 = 7, B2 = 9; Tensor x = at::randn({2, 3}); Tensor y = at::randn({4, B0}); diff --git a/aten/src/ATen/test/vulkan_test.cpp b/aten/src/ATen/test/vulkan_test.cpp index 20d268f0f92ae..87cc319bf5e74 100644 --- a/aten/src/ATen/test/vulkan_test.cpp +++ b/aten/src/ATen/test/vulkan_test.cpp @@ -11,7 +11,6 @@ bool checkRtol(const at::Tensor& diff, const std::vector inputs) { for (auto& tensor : inputs) { maxValue = fmax(tensor.abs().max().item(), maxValue); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return diff.abs().max().item() < (0.01 + 2e-2 * maxValue); } bool almostEqual(const at::Tensor& a, const at::Tensor& b) { @@ -42,12 +41,10 @@ TEST(VulkanTest, upsampleNearest2D) { auto t_in = at::rand({1, 2, 2, 3}, at::TensorOptions(at::kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t_out_expected = at::upsample_nearest2d(t_in, {4, 6}); auto tv_in = t_in.to(at::TensorOptions{at::Device{at::kVulkan}}.dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tv_out = at::upsample_nearest2d(tv_in, {4, 6}); auto t_out = tv_out.to(at::TensorOptions{at::Device{at::kCPU}}.dtype(at::kFloat)); @@ -79,9 +76,7 @@ TEST(VulkanTest, add) { TEST(VulkanTest, add_not4dim) { if (!at::is_vulkan_available()) return; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t_in0 = at::rand({1, 1000}, at::device(at::kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t_in1 = at::rand({1000}, at::device(at::kCPU).dtype(at::kFloat)); auto t_out_expected = at::add(t_in0, t_in1, 2); auto tv_in0 = t_in0.vulkan(); @@ -96,10 +91,8 @@ TEST(VulkanTest, add_not4dim) { TEST(VulkanTest, add_cpu_vulkan) { if (!at::is_vulkan_available()) return; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t_in0 = at::rand({2, 96, 1000}, at::device(at::kCPU).dtype(at::kFloat)); auto t_in1 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::rand({1, 2, 96, 1000}, at::device(at::kCPU).dtype(at::kFloat)); auto t_out_expected = at::add(t_in0, t_in1, 2); auto tv_in0 = t_in0.vulkan(); @@ -249,7 +242,6 @@ TEST(VulkanTest, addmm) { auto t_m2 = at::rand({2, 3}, at::device(at::kCPU).dtype(at::kFloat)); auto t_b = at::rand({2, 3}, at::device(at::kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float beta = 100; float alpha = 2; auto t_out_expected = at::addmm(t_b, t_m1, t_m2, beta, alpha); @@ -271,9 +263,7 @@ TEST(VulkanTest, addmm) { TEST(VulkanTest, mm) { if (!at::is_vulkan_available()) return; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t_m1 = at::rand({10, 20}, at::device(at::kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t_m2 = at::rand({20, 30}, at::device(at::kCPU).dtype(at::kFloat)); auto t_out_expected = t_m1.mm(t_m2); @@ -294,11 +284,8 @@ TEST(VulkanTest, mm) { TEST(VulkanTest, clamp) { if (!at::is_vulkan_available()) return; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float min = -0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float max = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t_in = at::rand({1, 3, 16, 16}, at::device(at::kCPU).dtype(at::kFloat)); auto t_out_expected = at::clamp(t_in, min, max); @@ -313,11 +300,8 @@ TEST(VulkanTest, clamp) { TEST(VulkanTest, hardtanh_) { if (!at::is_vulkan_available()) return; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float min = -0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float max = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t_in = at::rand({1, 3, 16, 16}, at::device(at::kCPU).dtype(at::kFloat)); auto t_out_expected = at::hardtanh_(t_in, min, max); @@ -379,7 +363,6 @@ class Hardtanh_ : public BaseOp { public: Hardtanh_() : BaseOp(OpType::hardtanh_) {} at::Tensor run(at::Tensor& t) override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return at::hardtanh_(t, 0, 6); } std::string toString() override { @@ -492,148 +475,95 @@ class OpsList { class MobileNetV2 : public OpsList { public: MobileNetV2() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({32, 3, 3, 3}, 1, 2, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({32, 1, 3, 3}, 32, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({16, 32, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({96, 16, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({96, 1, 3, 3}, 96, 2, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({24, 96, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({144, 24, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({144, 1, 3, 3}, 144, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({24, 144, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({144, 24, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({144, 1, 3, 3}, 144, 2, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({32, 144, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({192, 32, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({192, 1, 3, 3}, 192, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({32, 192, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({192, 32, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({192, 1, 3, 3}, 192, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({32, 192, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({192, 32, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({192, 1, 3, 3}, 192, 2, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({64, 192, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({384, 64, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({384, 1, 3, 3}, 384, 1, 1)); ops.emplace_back(new Hardtanh_()); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({64, 384, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({384, 64, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({384, 1, 3, 3}, 384, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({64, 384, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({384, 64, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({384, 1, 3, 3}, 384, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({64, 384, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({384, 64, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({384, 1, 3, 3}, 384, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({96, 384, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({576, 96, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({576, 1, 3, 3}, 576, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({96, 576, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({576, 96, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({576, 1, 3, 3}, 576, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({96, 576, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({576, 96, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({576, 1, 3, 3}, 576, 2, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({160, 576, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({960, 160, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({960, 1, 3, 3}, 960, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({160, 960, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({960, 160, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({960, 1, 3, 3}, 960, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({160, 960, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({960, 160, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({960, 1, 3, 3}, 960, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({320, 960, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({1280, 320, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); ops.emplace_back(new Mean()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Addmm(1, 1280, 1000, 0, 1)); } }; @@ -645,7 +575,6 @@ TEST(VulkanTest, DISABLED_mobilenetv2) { MobileNetV2 mn2{}; auto t_in = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::rand({1, 3, 224, 224}, at::device(at::kCPU).dtype(at::kFloat)); auto tv_in = t_in.vulkan(); mn2.runDual(t_in, tv_in); @@ -657,31 +586,22 @@ TEST(VulkanTest, OpsList) { return; std::vector> ops; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({32, 3, 3, 3}, 1, 2, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({32, 1, 3, 3}, 32, 1, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({16, 32, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({96, 16, 1, 1}, 1, 1, 0)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({96, 1, 3, 3}, 96, 2, 1)); ops.emplace_back(new Hardtanh_()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({24, 96, 1, 1}, 1, 1, 0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Conv2d({144, 24, 1, 1}, 1, 1, 0)); // 1, 144, 56, 56 ops.emplace_back(new Hardtanh_()); ops.emplace_back(new Mean()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ops.emplace_back(new Addmm(1, 144, 1000, 0, 1)); OpsList opsList(ops); auto t_in = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::rand({1, 3, 224, 224}, at::device(at::kCPU).dtype(at::kFloat)); auto t_out_expected = opsList.run(t_in); @@ -732,7 +652,6 @@ TEST(VulkanTest, conv2dPrepack) { std::vector stride{1, 1}; std::vector padding{0, 0}; std::vector dilation{1, 1}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float output_min = 0.25; float output_max = 1.0; @@ -782,7 +701,6 @@ TEST(VulkanTest, adaptive_avg_pool2d) { return; auto t_in = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::rand({1, 2, 7, 7}, at::TensorOptions(at::kCPU).dtype(at::kFloat)); auto t_out_expected = at::adaptive_avg_pool2d(t_in, {3, 3}); auto tv_in = t_in.vulkan(); @@ -805,7 +723,6 @@ TEST(VulkanTest, DISABLED_adaptive_avg_pool2d_2) { return; auto t_in = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::rand({1, 1280, 7, 7}, at::TensorOptions(at::kCPU).dtype(at::kFloat)); auto t_out_expected = at::adaptive_avg_pool2d(t_in, {1, 1}); auto tv_in = t_in.vulkan(); @@ -827,12 +744,9 @@ TEST(VulkanTest, reshape) { return; auto t_in = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::rand({1, 8, 1, 1}, at::TensorOptions(at::kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t_out_expected = at::reshape(t_in, {1, 8}); auto tv_in = t_in.vulkan(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tv_out = at::reshape(tv_in, {1, 8}); auto t_out = tv_out.cpu(); @@ -1009,7 +923,6 @@ TEST(VulkanTest, cat) { auto t_in1 = at::rand({1, 2, 3, 3}, at::TensorOptions(at::kCPU).dtype(at::kFloat)); auto t_in2 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::rand({1, 5, 3, 3}, at::TensorOptions(at::kCPU).dtype(at::kFloat)); auto t_out_expected = at::cat({t_in0, t_in1, t_in2}, 1); @@ -1030,7 +943,6 @@ TEST(VulkanTest, DISABLED_max_pool2d) { return; auto t_in = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::rand({1, 3, 7, 7}, at::TensorOptions(at::kCPU).dtype(at::kFloat)); auto t_out_expected = at::max_pool2d(t_in, {2, 2}, {1}, {0}, {1}); auto tv_in = t_in.vulkan(); @@ -1052,7 +964,6 @@ TEST(VulkanTest, avg_pool2d) { return; auto t_in = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::rand({1, 3, 7, 7}, at::TensorOptions(at::kCPU).dtype(at::kFloat)); auto t_out_expected = at::avg_pool2d(t_in, {2, 2}, {1}, {0}, true); auto tv_in = t_in.vulkan(); diff --git a/aten/src/ATen/test/wrapdim_test.cpp b/aten/src/ATen/test/wrapdim_test.cpp index e1463fd657edf..5b0a3b33e6a75 100644 --- a/aten/src/ATen/test/wrapdim_test.cpp +++ b/aten/src/ATen/test/wrapdim_test.cpp @@ -4,14 +4,12 @@ using namespace at; void TestSimpleCase(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({2, 3, 4, 5}, T); ASSERT_TRUE(a.prod(-4).equal(a.prod(0))); ASSERT_TRUE(a.prod(3).equal(a.prod(-1))); } void TestExpressionSpecification(DeprecatedTypeProperties& T) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = randn({2, 3, 4, 5}, T); ASSERT_TRUE(a.unsqueeze(-5).equal(a.unsqueeze(0))); ASSERT_TRUE(a.unsqueeze(4).equal(a.unsqueeze(-1))); @@ -36,7 +34,6 @@ void TestScalarVs1Dim1Size(DeprecatedTypeProperties& T) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestWrapdim, TestWrapdim) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) manual_seed(123); DeprecatedTypeProperties& T = CPU(kFloat); diff --git a/aten/src/TH/THAllocator.cpp b/aten/src/TH/THAllocator.cpp index d7838a727c049..e07677aea76a9 100644 --- a/aten/src/TH/THAllocator.cpp +++ b/aten/src/TH/THAllocator.cpp @@ -235,13 +235,11 @@ THMapAllocator::THMapAllocator(WithFd, const char *filename, int fd, int flags, if (!(flags_ & TH_ALLOCATOR_MAPPED_FROMFD)) { if (flags_ & TH_ALLOCATOR_MAPPED_SHARED) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((fd = open(filename_.c_str(), flags, (mode_t)0600)) == -1) { AT_ERROR("unable to open file <", filename_, "> in read-write mode"); } } else if (flags_ & TH_ALLOCATOR_MAPPED_SHAREDMEM) { #ifdef HAVE_SHM_OPEN - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if((fd = shm_open(filename_.c_str(), flags, (mode_t)0600)) == -1) { AT_ERROR("unable to open shared memory object <", filename_, "> in read-write mode"); } diff --git a/aten/src/TH/THGeneral.cpp b/aten/src/TH/THGeneral.cpp index c59bbc19787df..d69b95dc924e8 100644 --- a/aten/src/TH/THGeneral.cpp +++ b/aten/src/TH/THGeneral.cpp @@ -44,13 +44,10 @@ void _THError(const char *file, const int line, const char *fmt, ...) /* vasprintf not standard */ /* vsnprintf: how to handle if does not exists? */ va_start(args, fmt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int n = vsnprintf(msg, 2048, fmt, args); va_end(args); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if(n < 2048) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) snprintf(msg + n, 2048 - n, " at %s:%d", file, line); } @@ -66,7 +63,6 @@ void _THAssertionFailed(const char *file, const int line, const char *exp, const char msg[1024]; va_list args; va_start(args, fmt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vsnprintf(msg, 1024, fmt, args); va_end(args); _THError(file, line, "Assertion `%s' failed. %s", exp, msg); @@ -114,13 +110,10 @@ void _THArgCheck(const char *file, int line, int condition, int argNumber, const /* vasprintf not standard */ /* vsnprintf: how to handle if does not exists? */ va_start(args, fmt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int n = vsnprintf(msg, 2048, fmt, args); va_end(args); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if(n < 2048) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) snprintf(msg + n, 2048 - n, " at %s:%d", file, line); } @@ -231,7 +224,6 @@ THDescBuff _THSizeDesc(const int64_t *size, const int64_t ndim) { if (n < L - 2) { snprintf(str+n, L-n, "]"); } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) snprintf(str+L-5, 5, "...]"); } diff --git a/benchmarks/cpp/convolution.cpp b/benchmarks/cpp/convolution.cpp index e165d9db8d7f5..c58ca1a58431d 100644 --- a/benchmarks/cpp/convolution.cpp +++ b/benchmarks/cpp/convolution.cpp @@ -41,231 +41,122 @@ std::ostream& operator<<(std::ostream& os, const ConvParams& params) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) std::vector MobileNetV3Params = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 3, 224, 224}, {16, 3, 3, 3}, {16}, {2, 2}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 16, 112, 112}, {16, 16, 1, 1}, {16}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 16, 112, 112}, {16, 1, 3, 3}, {16}, {2, 2}, {1, 1}, {1, 1}, 16}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 16, 56, 56}, {16, 16, 1, 1}, {16}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 16, 56, 56}, {72, 16, 1, 1}, {72}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 72, 56, 56}, {72, 1, 3, 3}, {72}, {2, 2}, {1, 1}, {1, 1}, 72}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 72, 28, 28}, {24, 72, 1, 1}, {24}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 24, 28, 28}, {88, 24, 1, 1}, {88}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 88, 28, 28}, {88, 1, 3, 3}, {88}, {1, 1}, {1, 1}, {1, 1}, 88}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 88, 28, 28}, {24, 88, 1, 1}, {24}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 24, 28, 28}, {96, 24, 1, 1}, {96}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 96, 28, 28}, {96, 1, 5, 5}, {96}, {2, 2}, {2, 2}, {1, 1}, 96}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 96, 14, 14}, {40, 96, 1, 1}, {40}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 40, 14, 14}, {240, 40, 1, 1}, {240}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 240, 14, 14}, {240, 1, 5, 5}, {240}, {1, 1}, {2, 2}, {1, 1}, 240}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 240, 14, 14}, {40, 240, 1, 1}, {40}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 40, 14, 14}, {240, 40, 1, 1}, {240}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 240, 14, 14}, {240, 1, 5, 5}, {240}, {1, 1}, {2, 2}, {1, 1}, 240}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 240, 14, 14}, {40, 240, 1, 1}, {40}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 40, 14, 14}, {120, 40, 1, 1}, {120}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 120, 14, 14}, {120, 1, 5, 5}, {120}, {1, 1}, {2, 2}, {1, 1}, 120}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 120, 14, 14}, {48, 120, 1, 1}, {48}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 48, 14, 14}, {144, 48, 1, 1}, {144}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 144, 14, 14}, {144, 1, 5, 5}, {144}, {1, 1}, {2, 2}, {1, 1}, 144}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 144, 14, 14}, {48, 144, 1, 1}, {48}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 48, 14, 14}, {288, 48, 1, 1}, {288}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 288, 14, 14}, {288, 1, 5, 5}, {288}, {2, 2}, {2, 2}, {1, 1}, 288}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 288, 7, 7}, {96, 288, 1, 1}, {96}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 96, 7, 7}, {576, 96, 1, 1}, {576}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 576, 7, 7}, {576, 1, 5, 5}, {576}, {1, 1}, {2, 2}, {1, 1}, 576}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 576, 7, 7}, {96, 576, 1, 1}, {96}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 96, 7, 7}, {576, 96, 1, 1}, {576}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 576, 7, 7}, {576, 1, 5, 5}, {576}, {1, 1}, {2, 2}, {1, 1}, 576}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 576, 7, 7}, {96, 576, 1, 1}, {96}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 96, 7, 7}, {576, 96, 1, 1}, {576}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 576, 1, 1}, {1280, 576, 1, 1}, {1280}, {1, 1}, {0, 0}, {1, 1}, 1}, }; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) std::vector ResNet18Params = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 3, 224, 224}, {64, 3, 7, 7}, {}, {2, 2}, {3, 3}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {64, 64, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {64, 64, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {64, 64, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {64, 64, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {128, 64, 3, 3}, {}, {2, 2}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {128, 128, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {128, 64, 1, 1}, {}, {2, 2}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {128, 128, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {128, 128, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {256, 128, 3, 3}, {}, {2, 2}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {256, 256, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {256, 128, 1, 1}, {}, {2, 2}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {256, 256, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {256, 256, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {512, 256, 3, 3}, {}, {2, 2}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 7, 7}, {512, 512, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {512, 256, 1, 1}, {}, {2, 2}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 7, 7}, {512, 512, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 7, 7}, {512, 512, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, }; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) std::vector ResNet50Params = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 3, 224, 224}, {64, 3, 7, 7}, {}, {2, 2}, {3, 3}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {64, 64, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {64, 64, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {256, 64, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {256, 64, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 56, 56}, {64, 256, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {64, 64, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {256, 64, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 56, 56}, {64, 256, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {64, 64, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 64, 56, 56}, {256, 64, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 56, 56}, {128, 256, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 56, 56}, {128, 128, 3, 3}, {}, {2, 2}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {512, 128, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 56, 56}, {512, 256, 1, 1}, {}, {2, 2}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 28, 28}, {128, 512, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {128, 128, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {512, 128, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 28, 28}, {128, 512, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {128, 128, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {512, 128, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 28, 28}, {128, 512, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {128, 128, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 128, 28, 28}, {512, 128, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 28, 28}, {256, 512, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 28, 28}, {256, 256, 3, 3}, {}, {2, 2}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {1024, 256, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 28, 28}, {1024, 512, 1, 1}, {}, {2, 2}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 1024, 14, 14}, {256, 1024, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {256, 256, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {1024, 256, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 1024, 14, 14}, {256, 1024, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {256, 256, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {1024, 256, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 1024, 14, 14}, {256, 1024, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {256, 256, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {1024, 256, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 1024, 14, 14}, {256, 1024, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {256, 256, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {1024, 256, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 1024, 14, 14}, {256, 1024, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {256, 256, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 256, 14, 14}, {1024, 256, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 1024, 14, 14}, {512, 1024, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 14, 14}, {512, 512, 3, 3}, {}, {2, 2}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 7, 7}, {2048, 512, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 1024, 14, 14}, {2048, 1024, 1, 1}, {}, {2, 2}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 2048, 7, 7}, {512, 2048, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 7, 7}, {512, 512, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 7, 7}, {2048, 512, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 2048, 7, 7}, {512, 2048, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 7, 7}, {512, 512, 3, 3}, {}, {1, 1}, {1, 1}, {1, 1}, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 512, 7, 7}, {2048, 512, 1, 1}, {}, {1, 1}, {0, 0}, {1, 1}, 1}, }; @@ -309,7 +200,6 @@ static void BM_conv2d_native( params.groups); } state.counters["GFLOPS/s"] = benchmark::Counter( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.0f * output.numel() * weight.numel() / weight.size(0) * state.iterations(), benchmark::Counter::kIsRate); @@ -372,7 +262,6 @@ static void BM_conv2d_mkldnn( params.groups); } state.counters["GFLOPS/s"] = benchmark::Counter( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.0f * output.numel() * weight.numel() / weight.size(0) * state.iterations(), benchmark::Counter::kIsRate); diff --git a/c10/core/GeneratorImpl.cpp b/c10/core/GeneratorImpl.cpp index 78d30da67e3cb..7fb5571b516f7 100644 --- a/c10/core/GeneratorImpl.cpp +++ b/c10/core/GeneratorImpl.cpp @@ -82,7 +82,6 @@ uint64_t getNonDeterministicRandom(bool is_cuda) { } else { std::random_device rd; // limit to 53 bits to ensure unique representation in double - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s = ((((uint64_t)rd()) << 32) + rd()) & 0x1FFFFFFFFFFFFF; } return s; diff --git a/c10/core/TensorImpl.cpp b/c10/core/TensorImpl.cpp index 9e2d803ea9a89..c3cd4e6b17bef 100644 --- a/c10/core/TensorImpl.cpp +++ b/c10/core/TensorImpl.cpp @@ -235,7 +235,6 @@ bool TensorImpl::compute_channels_last_contiguous_3d() const { // Please don't combine these code, constant array is used here to let // compiler fully unroll the loop to get better performance switch (sizes_and_strides_.size()) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 5: { int64_t expected = 1; for (auto& d : {1, 4, 3, 2, 0}) { @@ -273,7 +272,6 @@ bool TensorImpl::compute_non_overlapping_and_dense() const { return sizes_and_strides_.size_at_unchecked(0) < 2 || sizes_and_strides_.stride_at_unchecked(0) == 1; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SmallVector perm; perm.resize(dim()); for (int64_t i = 0; i < dim(); i++) { diff --git a/c10/test/core/DispatchKeySet_test.cpp b/c10/test/core/DispatchKeySet_test.cpp index 57f4f596b1e63..29377b76a9d1a 100644 --- a/c10/test/core/DispatchKeySet_test.cpp +++ b/c10/test/core/DispatchKeySet_test.cpp @@ -127,9 +127,7 @@ TEST(DispatchKeySet, SpecificKeys) { DispatchKeySet keyset({ static_cast(0), // Undefined should be ignored static_cast(4), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_cast(10), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_cast(15), }); std::unordered_set visited_keys; diff --git a/c10/test/core/impl/SizesAndStrides_test.cpp b/c10/test/core/impl/SizesAndStrides_test.cpp index 4df51271883ef..d0cd822aa0754 100644 --- a/c10/test/core/impl/SizesAndStrides_test.cpp +++ b/c10/test/core/impl/SizesAndStrides_test.cpp @@ -46,9 +46,7 @@ TEST(SizesAndStridesTest, DefaultConstructor) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(SizesAndStridesTest, SetSizes) { SizesAndStrides sz; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.set_sizes({5, 6, 7, 8}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {5, 6, 7, 8}, {1, 0, 0, 0}); } @@ -62,7 +60,6 @@ TEST(SizesAndStridesTest, Resize) { checkData(sz, {0, 0}, {1, 0}); // Small to small growing, again. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(5); checkData(sz, {0, 0, 0, 0, 0}, {1, 0, 0, 0, 0}); @@ -71,73 +68,52 @@ TEST(SizesAndStridesTest, Resize) { sz.stride_at_unchecked(ii) = 2 * (ii + 1); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {1, 2, 3, 4, 5}, {2, 4, 6, 8, 10}); // Small to small, shrinking. sz.resize(4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {1, 2, 3, 4}, {2, 4, 6, 8}); // Small to small with no size change. sz.resize(4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {1, 2, 3, 4}, {2, 4, 6, 8}); // Small to small, growing back so that we can confirm that our "new" // data really does get zeroed. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {1, 2, 3, 4, 0}, {2, 4, 6, 8, 0}); // Small to big. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {1, 2, 3, 4, 0, 0}, {2, 4, 6, 8, 0, 0}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.size_at_unchecked(5) = 6; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.stride_at_unchecked(5) = 12; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {1, 2, 3, 4, 0, 6}, {2, 4, 6, 8, 0, 12}); // Big to big, growing. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(7); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {1, 2, 3, 4, 0, 6, 0}, {2, 4, 6, 8, 0, 12, 0}); // Big to big with no size change. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(7); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {1, 2, 3, 4, 0, 6, 0}, {2, 4, 6, 8, 0, 12, 0}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.size_at_unchecked(6) = 11; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.stride_at_unchecked(6) = 22; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {1, 2, 3, 4, 0, 6, 11}, {2, 4, 6, 8, 0, 12, 22}); // Big to big, shrinking. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {1, 2, 3, 4, 0, 6}, {2, 4, 6, 8, 0, 12}); // Grow back to make sure "new" elements get zeroed in big mode too. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(7); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {1, 2, 3, 4, 0, 6, 0}, {2, 4, 6, 8, 0, 12, 0}); // Finally, big to small. @@ -150,12 +126,9 @@ TEST(SizesAndStridesTest, Resize) { sz.stride_at_unchecked(ii) = 2 * (ii - 1); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {-1, 0, 1, 2, 3, 4, 5}, {-2, 0, 2, 4, 6, 8, 10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {-1, 0, 1, 2, 3}, {-2, 0, 2, 4, 6}); } @@ -163,24 +136,16 @@ TEST(SizesAndStridesTest, Resize) { TEST(SizesAndStridesTest, SetAtIndex) { SizesAndStrides sz; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.size_at(4) = 42; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.stride_at(4) = 23; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {0, 0, 0, 0, 42}, {1, 0, 0, 0, 23}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.size_at(5) = 43; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.stride_at(5) = 24; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {0, 0, 0, 0, 42, 43}, {1, 0, 0, 0, 23, 24}); } @@ -188,24 +153,16 @@ TEST(SizesAndStridesTest, SetAtIndex) { TEST(SizesAndStridesTest, SetAtIterator) { SizesAndStrides sz; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *(sz.sizes_begin() + 4) = 42; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *(sz.strides_begin() + 4) = 23; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {0, 0, 0, 0, 42}, {1, 0, 0, 0, 23}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *(sz.sizes_begin() + 5) = 43; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *(sz.strides_begin() + 5) = 24; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {0, 0, 0, 0, 42, 43}, {1, 0, 0, 0, 23, 24}); } @@ -213,24 +170,16 @@ TEST(SizesAndStridesTest, SetAtIterator) { TEST(SizesAndStridesTest, SetViaData) { SizesAndStrides sz; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *(sz.sizes_data() + 4) = 42; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *(sz.strides_data() + 4) = 23; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {0, 0, 0, 0, 42}, {1, 0, 0, 0, 23}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sz.resize(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *(sz.sizes_data() + 5) = 43; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *(sz.strides_data() + 5) = 24; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkData(sz, {0, 0, 0, 0, 42, 43}, {1, 0, 0, 0, 23, 24}); } @@ -247,7 +196,6 @@ static SizesAndStrides makeSmall(int offset = 0) { static SizesAndStrides makeBig(int offset = 0) { SizesAndStrides big; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) big.resize(8); for (const auto ii : c10::irange(big.size())) { big.size_at_unchecked(ii) = ii - 1 + offset; @@ -267,9 +215,7 @@ static void checkSmall(const SizesAndStrides& sm, int offset = 0) { } static void checkBig(const SizesAndStrides& big, int offset = 0) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector sizes(8), strides(8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int ii = 0; ii < 8; ++ii) { sizes[ii] = ii - 1 + offset; strides[ii] = 2 * (ii - 1 + offset); diff --git a/c10/test/util/Array_test.cpp b/c10/test/util/Array_test.cpp index be265132d4376..d9c345cf6103b 100644 --- a/c10/test/util/Array_test.cpp +++ b/c10/test/util/Array_test.cpp @@ -88,9 +88,7 @@ static_assert(array{{3}} == prepend(3, array{{}}), ""); namespace test_to_std_array { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) constexpr int obj2[3] = {3, 5, 6}; -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(array{{3, 5, 6}} == to_array(obj2), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(array{{3, 5, 6}} == to_array({3, 5, 6}), ""); } // namespace test_to_std_array diff --git a/c10/test/util/Bitset_test.cpp b/c10/test/util/Bitset_test.cpp index 4b5f46bcc7d8e..629d4ee097cdf 100644 --- a/c10/test/util/Bitset_test.cpp +++ b/c10/test/util/Bitset_test.cpp @@ -34,7 +34,6 @@ TEST(BitsetTest, givenEmptyBitset_whenSettingAndUnsettingBit_thenIsZero) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BitsetTest, givenEmptyBitset_whenSettingBit_thenIsSet) { bitset b; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(6); EXPECT_TRUE(b.get(6)); } @@ -42,13 +41,10 @@ TEST(BitsetTest, givenEmptyBitset_whenSettingBit_thenIsSet) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BitsetTest, givenEmptyBitset_whenSettingBit_thenOthersStayUnset) { bitset b; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 6; ++i) { EXPECT_FALSE(b.get(i)); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 7; i < bitset::NUM_BITS(); ++i) { EXPECT_FALSE(b.get(i)); } @@ -57,9 +53,7 @@ TEST(BitsetTest, givenEmptyBitset_whenSettingBit_thenOthersStayUnset) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BitsetTest, givenNonemptyBitset_whenSettingBit_thenIsSet) { bitset b; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(30); EXPECT_TRUE(b.get(30)); } @@ -67,19 +61,14 @@ TEST(BitsetTest, givenNonemptyBitset_whenSettingBit_thenIsSet) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BitsetTest, givenNonemptyBitset_whenSettingBit_thenOthersStayAtOldValue) { bitset b; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(30); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 6; ++i) { EXPECT_FALSE(b.get(i)); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 7; i < 30; ++i) { EXPECT_FALSE(b.get(i)); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 31; i < bitset::NUM_BITS(); ++i) { EXPECT_FALSE(b.get(i)); } @@ -88,11 +77,8 @@ TEST(BitsetTest, givenNonemptyBitset_whenSettingBit_thenOthersStayAtOldValue) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BitsetTest, givenNonemptyBitset_whenUnsettingBit_thenIsUnset) { bitset b; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(30); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.unset(6); EXPECT_FALSE(b.get(6)); } @@ -102,18 +88,13 @@ TEST( BitsetTest, givenNonemptyBitset_whenUnsettingBit_thenOthersStayAtOldValue) { bitset b; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(30); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.unset(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 30; ++i) { EXPECT_FALSE(b.get(i)); } EXPECT_TRUE(b.get(30)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 31; i < bitset::NUM_BITS(); ++i) { EXPECT_FALSE(b.get(i)); } @@ -148,10 +129,8 @@ TEST( givenBitsetWithOneBitSet_whenCallingForEachBit_thenCallsForEachBit) { IndexCallbackMock callback; bitset b; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(5); b.for_each_set_bit(callback); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) callback.expect_was_called_for_indices({5}); } @@ -161,21 +140,14 @@ TEST( givenBitsetWithMultipleBitsSet_whenCallingForEachBit_thenCallsForEachBit) { IndexCallbackMock callback; bitset b; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(5); b.set(2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(25); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(50); b.set(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.unset(25); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.set(10); b.for_each_set_bit(callback); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) callback.expect_was_called_for_indices({0, 2, 5, 10, 32, 50}); } diff --git a/c10/test/util/C++17_test.cpp b/c10/test/util/C++17_test.cpp index 5a10d12f56c6b..afb151e81a24b 100644 --- a/c10/test/util/C++17_test.cpp +++ b/c10/test/util/C++17_test.cpp @@ -5,23 +5,17 @@ namespace { namespace test_min { using c10::guts::min; -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(min(3, 5) == 3, ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(min(5, 3) == 3, ""); static_assert(min(3, 3) == 3, ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(min(3.0, 3.1) == 3.0, ""); } // namespace test_min namespace test_max { using c10::guts::max; -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(max(3, 5) == 5, ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(max(5, 3) == 5, ""); static_assert(max(3, 3) == 3, ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(max(3.0, 3.1) == 3.1, ""); } // namespace test_max @@ -107,7 +101,6 @@ TEST(if_constexpr, otherCaseCanHaveInvalidCode) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(if_constexpr, worksWithoutElseCase_withIdentityArg) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int var = 5; if_constexpr([&](auto) { var = 3; }); EXPECT_EQ(5, var); @@ -117,7 +110,6 @@ TEST(if_constexpr, worksWithoutElseCase_withIdentityArg) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(if_constexpr, worksWithoutElseCase_withoutIdentityArg) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int var = 5; if_constexpr([&] { var = 3; }); EXPECT_EQ(5, var); diff --git a/c10/test/util/ConstexprCrc_test.cpp b/c10/test/util/ConstexprCrc_test.cpp index cdff85d6e70ac..837cc6e92a112 100644 --- a/c10/test/util/ConstexprCrc_test.cpp +++ b/c10/test/util/ConstexprCrc_test.cpp @@ -14,5 +14,4 @@ static_assert( // check concrete expected values (for CRC64 with Jones coefficients and an init // value of 0) static_assert(crc64_t{0} == crc64(""), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(crc64_t{0xe9c6d914c4b8d9ca} == crc64("123456789"), ""); diff --git a/c10/test/util/Half_test.cpp b/c10/test/util/Half_test.cpp index b008aee9c931b..8a44e23a0e769 100644 --- a/c10/test/util/Half_test.cpp +++ b/c10/test/util/Half_test.cpp @@ -6,40 +6,29 @@ namespace { namespace half_legacy_impl { float halfbits2float(unsigned short h) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) unsigned sign = ((h >> 15) & 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) unsigned exponent = ((h >> 10) & 0x1f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) unsigned mantissa = ((h & 0x3ff) << 13); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (exponent == 0x1f) { /* NaN or Inf */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mantissa = (mantissa ? (sign = 0, 0x7fffff) : 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) exponent = 0xff; } else if (!exponent) { /* Denorm or Zero */ if (mantissa) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) unsigned int msb; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) exponent = 0x71; do { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) msb = (mantissa & 0x400000); mantissa <<= 1; /* normalize */ --exponent; } while (!msb); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mantissa &= 0x7fffff; /* 1.mantissa is implicit */ } } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) exponent += 0x70; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) unsigned result_bit = (sign << 31) | (exponent << 23) | mantissa; // Reinterpret the result bit pattern as a float @@ -61,42 +50,29 @@ unsigned short float2halfbits(float src) { unsigned sign, exponent, mantissa; // Get rid of +NaN/-NaN case first. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (u > 0x7f800000) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 0x7fffU; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sign = ((x >> 16) & 0x8000); // Get rid of +Inf/-Inf, +0/-0. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (u > 0x477fefff) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return sign | 0x7c00U; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (u < 0x33000001) { return (sign | 0x0000); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) exponent = ((u >> 23) & 0xff); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mantissa = (u & 0x7fffff); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (exponent > 0x70) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) shift = 13; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) exponent -= 0x70; } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) shift = 0x7e - exponent; exponent = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mantissa |= 0x800000; } lsb = (1 << shift); @@ -108,14 +84,12 @@ unsigned short float2halfbits(float src) { mantissa >>= shift; if (remainder > lsb_s1 || (remainder == lsb_s1 && (mantissa & 0x1))) { ++mantissa; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (!(mantissa & 0x3ff)) { ++exponent; mantissa = 0; } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return (sign | (exponent << 10) | mantissa); }; } // namespace half_legacy_impl @@ -123,11 +97,8 @@ unsigned short float2halfbits(float src) { TEST(HalfDoubleConversionTest, Half2Double) { std::vector inputs = { 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0xfbff, // 1111 1011 1111 1111 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (1 << 15 | 1), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0x7bff // 0111 1011 1111 1111 }; for (auto x : inputs) { diff --git a/c10/test/util/LeftRight_test.cpp b/c10/test/util/LeftRight_test.cpp index e113e9cb65fe7..1b5c47deab1e7 100644 --- a/c10/test/util/LeftRight_test.cpp +++ b/c10/test/util/LeftRight_test.cpp @@ -9,7 +9,6 @@ using std::vector; TEST(LeftRightTest, givenInt_whenWritingAndReading_thenChangesArePresent) { LeftRight obj; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) obj.write([](int& obj) { obj = 5; }); int read = obj.read([](const int& obj) { return obj; }); EXPECT_EQ(5, read); @@ -24,12 +23,10 @@ TEST(LeftRightTest, givenInt_whenWritingAndReading_thenChangesArePresent) { TEST(LeftRightTest, givenVector_whenWritingAndReading_thenChangesArePresent) { LeftRight> obj; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) obj.write([](vector& obj) { obj.push_back(5); }); vector read = obj.read([](const vector& obj) { return obj; }); EXPECT_EQ((vector{5}), read); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) obj.write([](vector& obj) { obj.push_back(6); }); read = obj.read([](const vector& obj) { return obj; }); EXPECT_EQ((vector{5, 6}), read); @@ -39,7 +36,6 @@ TEST(LeftRightTest, givenVector_whenWritingAndReading_thenChangesArePresent) { TEST(LeftRightTest, givenVector_whenWritingReturnsValue_thenValueIsReturned) { LeftRight> obj; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = obj.write([](vector&) -> int { return 5; }); static_assert(std::is_same::value, ""); EXPECT_EQ(5, a); @@ -137,7 +133,6 @@ TEST(LeftRightTest, writesCannotBeConcurrentWithWrites) { std::thread writer1([&]() { obj.write([&](int&) { first_writer_started = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::this_thread::sleep_for(std::chrono::milliseconds(50)); first_writer_finished = true; }); @@ -184,7 +179,6 @@ TEST( givenInt_whenWriteThrowsExceptionOnFirstCall_thenResetsToOldState) { LeftRight obj; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) obj.write([](int& obj) { obj = 5; }); // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) @@ -213,7 +207,6 @@ TEST( givenInt_whenWriteThrowsExceptionOnSecondCall_thenKeepsNewState) { LeftRight obj; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) obj.write([](int& obj) { obj = 5; }); bool write_called = false; @@ -244,7 +237,6 @@ TEST( TEST(LeftRightTest, givenVector_whenWriteThrowsException_thenResetsToOldState) { LeftRight> obj; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) obj.write([](vector& obj) { obj.push_back(5); }); // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) diff --git a/c10/test/util/Metaprogramming_test.cpp b/c10/test/util/Metaprogramming_test.cpp index bcce22d634b82..e45931d2fce56 100644 --- a/c10/test/util/Metaprogramming_test.cpp +++ b/c10/test/util/Metaprogramming_test.cpp @@ -116,13 +116,10 @@ class MyClass {}; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, ExtractArgByFilteredIndex) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a1 = extract_arg_by_filtered_index( 3, "bla", MyClass(), 4, nullptr, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a2 = extract_arg_by_filtered_index( 3, "bla", MyClass(), 4, nullptr, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a3 = extract_arg_by_filtered_index( 3, "bla", MyClass(), 4, nullptr, 5); EXPECT_EQ(3, a1); @@ -207,7 +204,6 @@ struct map_to_double { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, FilterMap) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = filter_map( map_to_double(), 3, "bla", MyClass(), 4, nullptr, 5); static_assert(std::is_same, decltype(result)>::value, ""); @@ -240,7 +236,6 @@ TEST(MetaprogrammingTest, FilterMap_movableOnly_byRValue) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = filter_map( map_movable_by_rvalue(), MovableOnly(5), @@ -262,7 +257,6 @@ TEST(MetaprogrammingTest, FilterMap_movableOnly_byValue) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = filter_map( map_movable_by_lvalue(), MovableOnly(5), @@ -366,20 +360,16 @@ namespace test_tuple_elements { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleElements_subsetSelection) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = std::make_tuple(0, "HEY", 2.0); auto y = tuple_elements(x, std::index_sequence<0, 2>()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto z = std::make_tuple(0, 2.0); EXPECT_EQ(y, z); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleElements_reorderSelection) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = std::make_tuple(0, "HEY", 2.0); auto y = tuple_elements(x, std::index_sequence<0, 2, 1>()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto z = std::make_tuple(0, 2.0, "HEY"); EXPECT_EQ(y, z); } @@ -390,7 +380,6 @@ namespace test_tuple_take { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleTake_nonemptyPrefix) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = std::make_tuple(0, "HEY", 2.0); auto y = tuple_take(x); auto z = std::make_tuple(0, "HEY"); @@ -399,7 +388,6 @@ TEST(MetaprogrammingTest, TupleTake_nonemptyPrefix) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleTake_fullPrefix) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = std::make_tuple(0, "HEY", 2.0); auto y = tuple_take(x); EXPECT_EQ(x, y); @@ -407,10 +395,8 @@ TEST(MetaprogrammingTest, TupleTake_fullPrefix) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleTake_negative) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = std::make_tuple(0, "HEY", 2.0); auto y = tuple_take(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto z = std::make_tuple("HEY", 2.0); EXPECT_EQ(y, z); } @@ -419,17 +405,14 @@ TEST(MetaprogrammingTest, TupleTake_negative) { namespace test_tuple_slice { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleSlice_middle) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = std::make_tuple(0, "HEY", 2.0, false); auto y = tuple_slice(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto z = std::make_tuple("HEY", 2.0); EXPECT_EQ(y, z); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleSlice_full) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = std::make_tuple(0, "HEY", 2.0); auto y = tuple_slice(x); EXPECT_EQ(x, y); @@ -439,7 +422,6 @@ TEST(MetaprogrammingTest, TupleSlice_full) { namespace test_tuple_map { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleMap_simple) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = tuple_map( std::tuple(3, 4, 5), [](int32_t a) -> int16_t { return a + 1; }); @@ -454,7 +436,6 @@ TEST(MetaprogrammingTest, TupleMap_simple) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleMap_mapperTakesDifferentButConvertibleType) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = tuple_map( std::tuple(3, 4, 5), [](int64_t a) -> int16_t { return a + 1; }); @@ -469,7 +450,6 @@ TEST(MetaprogrammingTest, TupleMap_mapperTakesDifferentButConvertibleType) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleMap_mapperTakesConstRef) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = tuple_map( std::tuple(3, 4, 5), [](const int32_t& a) -> int16_t { return a + 1; }); @@ -524,7 +504,6 @@ TEST(MetaprogrammingTest, TupleMap_differentiatesLRValueReferences) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleMap_canWorkWithMovableOnlyType) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = tuple_map( std::tuple(MovableOnly(7)), [](MovableOnly a) { return a; }); static_assert( @@ -560,7 +539,6 @@ TEST(MetaprogrammingTest, TupleMap_doesntUnecessarilyMoveValues) { TEST(MetaprogrammingTest, TupleMap_canBeUsedWithAutoLambdas) { struct A final { int32_t func() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 5; } }; @@ -601,7 +579,6 @@ TEST(MetaprogrammingTest, TupleConcat_onenonemptytuple) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleConcat_twotuples) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = tuple_concat( std::tuple(3, "4"), std::tuple(2.3, 15)); @@ -618,7 +595,6 @@ TEST(MetaprogrammingTest, TupleConcat_twotuples) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleConcat_threetuples) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = tuple_concat( std::tuple(3, "4"), std::tuple(2.3, 15), @@ -638,7 +614,6 @@ TEST(MetaprogrammingTest, TupleConcat_threetuples) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleConcat_emptytupleatbeginning) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = tuple_concat( std::tuple<>(), std::tuple(2.3, 15), @@ -656,7 +631,6 @@ TEST(MetaprogrammingTest, TupleConcat_emptytupleatbeginning) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleConcat_emptytupleinmiddle) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = tuple_concat( std::tuple(2.3, 15), std::tuple<>(), @@ -674,7 +648,6 @@ TEST(MetaprogrammingTest, TupleConcat_emptytupleinmiddle) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleConcat_emptytupleatend) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto result = tuple_concat( std::tuple(2.3, 15), std::tuple("5", 3.2), @@ -692,12 +665,9 @@ TEST(MetaprogrammingTest, TupleConcat_emptytupleatend) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MetaprogrammingTest, TupleConcat_workswithreferencesandpointers) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double val1 = 2.3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int16_t val2 = 15; std::string val3 = "hello"; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float val4 = 3.2; auto result = tuple_concat( std::tuple(val1, val2), @@ -797,7 +767,6 @@ static_assert( index_sequence<4, 2>, index_sequence<>>>::value, ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert( std::is_same< index_sequence<4, 2, 9>, @@ -807,7 +776,6 @@ static_assert( index_sequence<9>>>::value, ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert( std::is_same< integer_sequence, diff --git a/c10/test/util/TypeIndex_test.cpp b/c10/test/util/TypeIndex_test.cpp index 2f0c757a8a3a2..fdc88c3c1e252 100644 --- a/c10/test/util/TypeIndex_test.cpp +++ b/c10/test/util/TypeIndex_test.cpp @@ -120,7 +120,6 @@ struct Class final {}; #if C10_TYPENAME_SUPPORTS_CONSTEXPR static_assert( string_view::npos != - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) get_fully_qualified_type_name>().find("38474355"), ""); #endif diff --git a/c10/test/util/TypeList_test.cpp b/c10/test/util/TypeList_test.cpp index 0b38aeb386289..80bf5d5066db0 100644 --- a/c10/test/util/TypeList_test.cpp +++ b/c10/test/util/TypeList_test.cpp @@ -215,7 +215,6 @@ struct map_to_size { TEST(TypeListTest, MapTypesToValues_sametype) { auto sizes = map_types_to_values>(map_to_size()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::tuple expected(8, 1, 4); static_assert(std::is_same::value, ""); EXPECT_EQ(expected, sizes); @@ -244,7 +243,6 @@ struct Class1 { return 3; } }; -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) struct Class2 { static double func() { return 2.0; @@ -262,7 +260,6 @@ struct mapper_call_func { TEST(TypeListTest, MapTypesToValues_members) { auto result = map_types_to_values>(mapper_call_func()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::tuple expected(3, 2.0); static_assert(std::is_same::value, ""); EXPECT_EQ(expected, result); diff --git a/c10/test/util/accumulate_test.cpp b/c10/test/util/accumulate_test.cpp index 02de9ea950bca..2668f65a22397 100644 --- a/c10/test/util/accumulate_test.cpp +++ b/c10/test/util/accumulate_test.cpp @@ -11,7 +11,6 @@ using namespace ::testing; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(accumulate_test, vector_test) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector ints = {1, 2, 3, 4, 5}; EXPECT_EQ(c10::sum_integers(ints), 1 + 2 + 3 + 4 + 5); @@ -33,7 +32,6 @@ TEST(accumulate_test, vector_test) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(accumulate_test, list_test) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::list ints = {1, 2, 3, 4, 5}; EXPECT_EQ(c10::sum_integers(ints), 1 + 2 + 3 + 4 + 5); @@ -59,7 +57,6 @@ TEST(accumulate_test, base_cases) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(accumulate_test, errors) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector ints = {1, 2, 3, 4, 5}; #ifndef NDEBUG diff --git a/c10/test/util/bfloat16_test.cpp b/c10/test/util/bfloat16_test.cpp index 0cde27587fa89..98816c7df24ac 100644 --- a/c10/test/util/bfloat16_test.cpp +++ b/c10/test/util/bfloat16_test.cpp @@ -7,10 +7,8 @@ float float_from_bytes(uint32_t sign, uint32_t exponent, uint32_t fraction) { uint32_t bytes; bytes = 0; bytes |= sign; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bytes <<= 8; bytes |= exponent; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bytes <<= 23; bytes |= fraction; @@ -24,7 +22,6 @@ float float_from_bytes(uint32_t sign, uint32_t exponent, uint32_t fraction) { TEST(BFloat16Conversion, FloatToBFloat16AndBack) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers,modernize-avoid-c-arrays) float in[100]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 100; ++i) { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers) in[i] = i + 1.25; @@ -35,7 +32,6 @@ TEST(BFloat16Conversion, FloatToBFloat16AndBack) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers,modernize-avoid-c-arrays) float out[100]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 100; ++i) { bfloats[i].x = c10::detail::bits_from_f32(in[i]); out[i] = c10::detail::f32_from_bits(bfloats[i].x); @@ -50,7 +46,6 @@ TEST(BFloat16Conversion, FloatToBFloat16AndBack) { TEST(BFloat16Conversion, FloatToBFloat16RNEAndBack) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers,modernize-avoid-c-arrays) float in[100]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 100; ++i) { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers) in[i] = i + 1.25; @@ -61,7 +56,6 @@ TEST(BFloat16Conversion, FloatToBFloat16RNEAndBack) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers,modernize-avoid-c-arrays) float out[100]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 100; ++i) { bfloats[i].x = c10::detail::round_to_nearest_even(in[i]); out[i] = c10::detail::f32_from_bits(bfloats[i].x); @@ -74,7 +68,6 @@ TEST(BFloat16Conversion, FloatToBFloat16RNEAndBack) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BFloat16Conversion, NaN) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float inNaN = float_from_bytes(0, 0xFF, 0x7FFFFF); EXPECT_TRUE(std::isnan(inNaN)); @@ -86,7 +79,6 @@ TEST(BFloat16Conversion, NaN) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BFloat16Conversion, Inf) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float inInf = float_from_bytes(0, 0xFF, 0); EXPECT_TRUE(std::isinf(inInf)); @@ -114,13 +106,11 @@ TEST(BFloat16Math, Addition) { // input bits // S | Exponent | Mantissa // 0 | 10000000 | 10010000000000000000000 = 3.125 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float input = float_from_bytes(0, 0, 0x40480000); // expected bits // S | Exponent | Mantissa // 0 | 10000001 | 10010000000000000000000 = 6.25 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float expected = float_from_bytes(0, 0, 0x40c80000); // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) @@ -140,19 +130,16 @@ TEST(BFloat16Math, Subtraction) { // input bits // S | Exponent | Mantissa // 0 | 10000001 | 11101000000000000000000 = 7.625 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float input = float_from_bytes(0, 0, 0x40f40000); // expected bits // S | Exponent | Mantissa // 0 | 10000000 | 01010000000000000000000 = 2.625 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float expected = float_from_bytes(0, 0, 0x40280000); // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) c10::BFloat16 b; b.x = c10::detail::bits_from_f32(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b = b - 5; float res = c10::detail::f32_from_bits(b.x); diff --git a/c10/test/util/either_test.cpp b/c10/test/util/either_test.cpp index a2bb42fc55ce2..4e1a99bf17889 100644 --- a/c10/test/util/either_test.cpp +++ b/c10/test/util/either_test.cpp @@ -251,18 +251,15 @@ TEST(EitherTest, givenMultiParamMakeLeft) { test_with_matrix( { [](std::function, string>&)> test) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) either, string> a = make_left, string>(5, 6); test(a); }, [](std::function, string>&)> test) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = make_left, string>(5, 6); test(a); }, }, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EXPECT_IS_LEFT, string>(pair(5, 6))); } @@ -270,16 +267,13 @@ TEST(EitherTest, givenMultiParamMakeLeft) { TEST(EitherTest, givenMultiParamMakeRight) { test_with_matrix( {[](std::function>&)> test) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) either> a = make_right>(5, 6); test(a); }, [](std::function>&)> test) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = make_right>(5, 6); test(a); }}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EXPECT_IS_RIGHT>(pair(5, 6))); } @@ -1029,17 +1023,14 @@ TEST(EitherTest, givenLeft_whenModified_thenValueIsChanged) { test_with_matrix( {[](std::function&)> test) { either a(4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a.left() = 5; test(a); }, [](std::function&)> test) { either a(4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a.left() = 5; test(a); }}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EXPECT_IS_LEFT(5)); } diff --git a/c10/test/util/intrusive_ptr_test.cpp b/c10/test/util/intrusive_ptr_test.cpp index a17582f8d7cb6..f2244e27769fe 100644 --- a/c10/test/util/intrusive_ptr_test.cpp +++ b/c10/test/util/intrusive_ptr_test.cpp @@ -104,7 +104,6 @@ TEST(MakeIntrusiveTest, ClassWith0Parameters) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MakeIntrusiveTest, ClassWith1Parameter) { intrusive_ptr var = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_intrusive(5); EXPECT_EQ(5, var->param); } @@ -112,7 +111,6 @@ TEST(MakeIntrusiveTest, ClassWith1Parameter) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MakeIntrusiveTest, ClassWith2Parameters) { intrusive_ptr var = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_intrusive(7, 2); EXPECT_EQ(7, var->param1); EXPECT_EQ(2, var->param2); @@ -139,7 +137,6 @@ TEST(IntrusivePtrTargetTest, whenAllocatedOnStack_thenDoesntCrash) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(IntrusivePtrTest, givenValidPtr_whenCallingGet_thenReturnsObject) { intrusive_ptr obj = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_intrusive(5); EXPECT_EQ(5, obj.get()->param); } @@ -160,7 +157,6 @@ TEST(IntrusivePtrTest, givenInvalidPtr_whenCallingGet_thenReturnsNullptr) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(IntrusivePtrTest, givenValidPtr_whenDereferencing_thenReturnsObject) { intrusive_ptr obj = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_intrusive(5); EXPECT_EQ(5, (*obj).param); } @@ -296,7 +292,6 @@ TEST( TEST( IntrusivePtrTest, givenInvalidPtr_whenMoveAssigningToBaseClass_thenNewInstanceIsValid) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) intrusive_ptr obj1 = make_intrusive(5); intrusive_ptr obj2; // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) @@ -309,7 +304,6 @@ TEST( TEST( IntrusivePtrTest, givenInvalidPtr_whenMoveAssigningToBaseClass_thenPointsToSameObject) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) intrusive_ptr obj1 = make_intrusive(5); intrusive_ptr obj2; SomeBaseClass* obj1ptr = obj1.get(); @@ -407,7 +401,6 @@ TEST( IntrusivePtrTest, givenValidPtr_whenCopyAssigningToBaseClass_thenPointsToSameObject) { intrusive_ptr child = make_intrusive(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) intrusive_ptr base = make_intrusive(10); base = child; EXPECT_EQ(3, base->v); @@ -418,7 +411,6 @@ TEST( IntrusivePtrTest, givenValidPtr_whenCopyAssigningToBaseClass_thenOldInstanceInvalid) { intrusive_ptr obj1 = make_intrusive(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) intrusive_ptr obj2 = make_intrusive(10); obj2 = obj1; EXPECT_TRUE(obj1.defined()); @@ -428,7 +420,6 @@ TEST( TEST( IntrusivePtrTest, givenInvalidPtr_whenCopyAssigningToBaseClass_thenNewInstanceIsValid) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) intrusive_ptr obj1 = make_intrusive(5); intrusive_ptr obj2; // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) @@ -441,7 +432,6 @@ TEST( TEST( IntrusivePtrTest, givenInvalidPtr_whenCopyAssigningToBaseClass_thenPointsToSameObject) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) intrusive_ptr obj1 = make_intrusive(5); intrusive_ptr obj2; SomeBaseClass* obj1ptr = obj1.get(); @@ -734,7 +724,6 @@ TEST(IntrusivePtrTest, SwapMethodInvalidWithInvalid) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(IntrusivePtrTest, CanBePutInContainer) { std::vector> vec; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vec.push_back(make_intrusive(5)); EXPECT_EQ(5, vec[0]->param); } @@ -742,7 +731,6 @@ TEST(IntrusivePtrTest, CanBePutInContainer) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(IntrusivePtrTest, CanBePutInSet) { std::set> set; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) set.insert(make_intrusive(5)); EXPECT_EQ(5, (*set.begin())->param); } @@ -750,7 +738,6 @@ TEST(IntrusivePtrTest, CanBePutInSet) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(IntrusivePtrTest, CanBePutInUnorderedSet) { std::unordered_set> set; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) set.insert(make_intrusive(5)); EXPECT_EQ(5, (*set.begin())->param); } @@ -762,7 +749,6 @@ TEST(IntrusivePtrTest, CanBePutInMap) { intrusive_ptr> map; map.insert(std::make_pair( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_intrusive(5), make_intrusive(3))); EXPECT_EQ(5, map.begin()->first->param); @@ -777,7 +763,6 @@ TEST(IntrusivePtrTest, CanBePutInUnorderedMap) { map; map.insert(std::make_pair( make_intrusive(3), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_intrusive(5))); EXPECT_EQ(3, map.begin()->first->param); EXPECT_EQ(5, map.begin()->second->param); @@ -2086,7 +2071,6 @@ TEST( WeakIntrusivePtrTest, givenInvalidPtr_whenMoveAssigningToBaseClass_thenNewInstanceIsValid) { IntrusiveAndWeak obj1 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); weak_intrusive_ptr obj2 = make_invalid_weak(); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) @@ -2100,7 +2084,6 @@ TEST( WeakIntrusivePtrTest, givenInvalidPtr_whenMoveAssigningToBaseClass_thenPointsToSameObject) { IntrusiveAndWeak obj1 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); weak_intrusive_ptr obj2 = make_invalid_weak(); SomeBaseClass* obj1ptr = obj1.weak.lock().get(); @@ -2125,7 +2108,6 @@ TEST( WeakIntrusivePtrTest, givenWeakOnlyPtr_whenMoveAssigningToBaseClass_thenNewInstanceIsValid) { IntrusiveAndWeak obj1 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); weak_intrusive_ptr obj2 = make_weak_only(2); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) @@ -2139,7 +2121,6 @@ TEST( WeakIntrusivePtrTest, givenWeakOnlyPtr_whenMoveAssigningToBaseClass_thenPointsToSameObject) { IntrusiveAndWeak obj1 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); weak_intrusive_ptr obj2 = make_weak_only(2); SomeBaseClass* obj1ptr = obj1.weak.lock().get(); @@ -2152,7 +2133,6 @@ TEST( TEST( WeakIntrusivePtrTest, givenWeakOnlyPtr_whenMoveAssigningInvalidPtrToBaseClass_thenNewInstanceIsValid) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) weak_intrusive_ptr obj1 = make_weak_only(5); IntrusiveAndWeak obj2 = make_weak_intrusive(2); EXPECT_FALSE(obj2.weak.expired()); @@ -2289,7 +2269,6 @@ TEST( givenValidPtr_whenCopyAssigningToBaseClass_thenPointsToSameObject) { IntrusiveAndWeak child = make_weak_intrusive(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) IntrusiveAndWeak base = make_weak_intrusive(10); base.weak = child.weak; EXPECT_EQ(3, base.weak.lock()->v); @@ -2301,7 +2280,6 @@ TEST( givenValidPtr_whenCopyAssigningToBaseClass_thenOldInstanceInvalid) { IntrusiveAndWeak obj1 = make_weak_intrusive(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) IntrusiveAndWeak obj2 = make_weak_intrusive(10); obj2.weak = obj1.weak; EXPECT_FALSE(obj1.weak.expired()); @@ -2312,7 +2290,6 @@ TEST( WeakIntrusivePtrTest, givenInvalidPtr_whenCopyAssigningToBaseClass_thenNewInstanceIsValid) { IntrusiveAndWeak obj1 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); weak_intrusive_ptr obj2 = make_invalid_weak(); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) @@ -2326,7 +2303,6 @@ TEST( WeakIntrusivePtrTest, givenInvalidPtr_whenCopyAssigningToBaseClass_thenPointsToSameObject) { IntrusiveAndWeak obj1 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); weak_intrusive_ptr obj2 = make_invalid_weak(); SomeBaseClass* obj1ptr = obj1.weak.lock().get(); @@ -2351,7 +2327,6 @@ TEST( WeakIntrusivePtrTest, givenWeakOnlyPtr_whenCopyAssigningToBaseClass_thenNewInstanceIsValid) { IntrusiveAndWeak obj1 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); weak_intrusive_ptr obj2 = make_weak_only(2); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) @@ -2365,7 +2340,6 @@ TEST( WeakIntrusivePtrTest, givenWeakOnlyPtr_whenCopyAssigningToBaseClass_thenPointsToSameObject) { IntrusiveAndWeak obj1 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); weak_intrusive_ptr obj2 = make_weak_only(2); SomeBaseClass* obj1ptr = obj1.weak.lock().get(); @@ -2763,7 +2737,6 @@ TEST(WeakIntrusivePtrTest, SwapMethodWeakOnlyPtrWithWeakOnlyPtr) { TEST(WeakIntrusivePtrTest, CanBePutInContainer) { std::vector> vec; IntrusiveAndWeak obj = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); vec.push_back(obj.weak); EXPECT_EQ(5, vec[0].lock()->param); @@ -2773,7 +2746,6 @@ TEST(WeakIntrusivePtrTest, CanBePutInContainer) { TEST(WeakIntrusivePtrTest, CanBePutInSet) { std::set> set; IntrusiveAndWeak obj = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); set.insert(obj.weak); EXPECT_EQ(5, set.begin()->lock()->param); @@ -2783,7 +2755,6 @@ TEST(WeakIntrusivePtrTest, CanBePutInSet) { TEST(WeakIntrusivePtrTest, CanBePutInUnorderedSet) { std::unordered_set> set; IntrusiveAndWeak obj = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); set.insert(obj.weak); EXPECT_EQ(5, set.begin()->lock()->param); @@ -2796,7 +2767,6 @@ TEST(WeakIntrusivePtrTest, CanBePutInMap) { weak_intrusive_ptr> map; IntrusiveAndWeak obj1 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); IntrusiveAndWeak obj2 = make_weak_intrusive(3); @@ -2812,7 +2782,6 @@ TEST(WeakIntrusivePtrTest, CanBePutInUnorderedMap) { weak_intrusive_ptr> map; IntrusiveAndWeak obj1 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) make_weak_intrusive(5); IntrusiveAndWeak obj2 = make_weak_intrusive(3); diff --git a/c10/test/util/irange_test.cpp b/c10/test/util/irange_test.cpp index 274aedd260797..cadbd2d53d3d8 100644 --- a/c10/test/util/irange_test.cpp +++ b/c10/test/util/irange_test.cpp @@ -41,7 +41,6 @@ TEST(irange, empty_reverse_range_two_inputs) { std::vector test_vec; for (const auto i : c10::irange(3, -3)) { test_vec.push_back(i); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (i > 20) { // Cap the number of elements we add if something goes wrong break; } @@ -55,7 +54,6 @@ TEST(irange, empty_reverse_range_one_input) { std::vector test_vec; for (const auto i : c10::irange(-3)) { test_vec.push_back(i); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (i > 20) { // Cap the number of elements we add if something goes wrong break; } diff --git a/c10/test/util/logging_test.cpp b/c10/test/util/logging_test.cpp index a66788012e04b..6b690fe5a0611 100644 --- a/c10/test/util/logging_test.cpp +++ b/c10/test/util/logging_test.cpp @@ -32,7 +32,6 @@ TEST(LoggingTest, TestEnforceFalse) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(LoggingTest, TestEnforceEquals) { int x = 4; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int y = 5; int z = 0; try { diff --git a/c10/test/util/optional_test.cpp b/c10/test/util/optional_test.cpp index 6595bf46ae53a..e32ab46a0d1f7 100644 --- a/c10/test/util/optional_test.cpp +++ b/c10/test/util/optional_test.cpp @@ -24,7 +24,6 @@ bool getSampleValue() { template <> uint64_t getSampleValue() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 42; } @@ -67,7 +66,6 @@ TYPED_TEST(OptionalTest, Initialized) { optional moveAssign; moveAssign = std::move(moveFrom2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::array opts = { &opt, ©, ©Assign, &move, &moveAssign}; for (auto* popt : opts) { diff --git a/c10/test/util/ordered_preserving_dict_test.cpp b/c10/test/util/ordered_preserving_dict_test.cpp index 11f0646399b9b..a04aeccbf4377 100644 --- a/c10/test/util/ordered_preserving_dict_test.cpp +++ b/c10/test/util/ordered_preserving_dict_test.cpp @@ -15,7 +15,6 @@ using dict_int_int = ska_ordered::order_preserving_flat_hash_map; dict_int_int test_dict(dict_int_int& dict) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int64_t i = 0; i < 100; ++i) { dict[i] = i + 1; } @@ -27,7 +26,6 @@ dict_int_int test_dict(dict_int_int& dict) { } // erase a few entries by themselves - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::unordered_set erase_set = {0, 2, 9, 71}; for (auto erase : erase_set) { dict.erase(erase); @@ -35,12 +33,10 @@ dict_int_int test_dict(dict_int_int& dict) { // erase via iterators auto begin = dict.begin(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 20; ++i) begin++; auto end = begin; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 20; ++i) { erase_set.insert(end->first); end++; @@ -48,7 +44,6 @@ dict_int_int test_dict(dict_int_int& dict) { dict.erase(begin, end); std::vector order; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 100; ++i) { if (!erase_set.count(i)) { order.push_back(i); @@ -133,7 +128,6 @@ TEST(OrderedPreservingDictTest, DictCollisions) { } // erase a few entries; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::unordered_set erase_set = {0, 2, 9}; for (auto erase : erase_set) { dict.erase(erase); @@ -141,12 +135,10 @@ TEST(OrderedPreservingDictTest, DictCollisions) { // erase a few entries via iterator auto begin = dict.begin(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 10; ++i) { begin++; } auto end = begin; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 7; ++i) { erase_set.insert(end->first); end++; @@ -186,7 +178,6 @@ TEST(OrderedPreservingDictTest, test_range_insert) { } dict_int_int map = {{-1, 0}, {-2, 0}}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) map.insert(values.begin() + 10, values.end() - 5); TORCH_INTERNAL_ASSERT(map.size(), 987); @@ -195,7 +186,6 @@ TEST(OrderedPreservingDictTest, test_range_insert) { ASSERT_EQUAL_PRIM(map.at(-2), 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 10, j = 2; i < nb_values - 5; i++, j++) { ASSERT_EQUAL_PRIM(map.at(i), i + 1); } @@ -231,9 +221,7 @@ TEST(OrderedPreservingDictTest, test_range_erase) { } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto it_first = std::next(map.begin(), 10); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto it_last = std::next(map.begin(), 220); auto it = map.erase(it_first, it_last); @@ -248,7 +236,6 @@ TEST(OrderedPreservingDictTest, test_range_erase) { // Check order it = map.begin(); for (std::size_t i = 0; i < nb_values; i++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (i >= 10 && i < 220) { continue; } @@ -363,7 +350,6 @@ TEST(OrderedPreservingDictTest, test_copy_constructor_empty) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OrderedPreservingDictTest, test_copy_operator_empty) { ska_ordered::order_preserving_flat_hash_map map(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ska_ordered::order_preserving_flat_hash_map map_copy(16); map_copy = map; @@ -399,7 +385,6 @@ TEST(OrderedPreservingDictTest, test_at) { */ // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OrderedPreservingDictTest, test_equal_range) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ska_ordered::order_preserving_flat_hash_map map = {{0, 10}, {-2, 20}}; @@ -418,7 +403,6 @@ TEST(OrderedPreservingDictTest, test_equal_range) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OrderedPreservingDictTest, test_access_operator) { // insert x values, use at for known and unknown values. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ska_ordered::order_preserving_flat_hash_map map = {{0, 10}, {-2, 20}}; @@ -434,10 +418,8 @@ TEST(OrderedPreservingDictTest, test_access_operator) { */ // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OrderedPreservingDictTest, test_swap) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ska_ordered::order_preserving_flat_hash_map map = {{1, 10}, {8, 80}, {3, 30}}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ska_ordered::order_preserving_flat_hash_map map2 = {{4, 40}, {5, 50}}; @@ -453,9 +435,7 @@ TEST(OrderedPreservingDictTest, test_swap) { (ska_ordered::order_preserving_flat_hash_map{ {1, 10}, {8, 80}, {3, 30}})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) map.insert({6, 60}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) map2.insert({4, 40}); TORCH_INTERNAL_ASSERT( @@ -470,7 +450,6 @@ TEST(OrderedPreservingDictTest, test_swap) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OrderedPreservingDictTest, test_swap_empty) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ska_ordered::order_preserving_flat_hash_map map = {{1, 10}, {8, 80}, {3, 30}}; ska_ordered::order_preserving_flat_hash_map map2; @@ -487,9 +466,7 @@ TEST(OrderedPreservingDictTest, test_swap_empty) { (ska_ordered::order_preserving_flat_hash_map{ {1, 10}, {8, 80}, {3, 30}})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) map.insert({6, 60}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) map2.insert({4, 40}); TORCH_INTERNAL_ASSERT( diff --git a/c10/test/util/string_view_test.cpp b/c10/test/util/string_view_test.cpp index c5d05d808a8fb..5a8c50f177292 100644 --- a/c10/test/util/string_view_test.cpp +++ b/c10/test/util/string_view_test.cpp @@ -58,7 +58,6 @@ static_assert(string_view() == string_view(""), ""); namespace test_constchar_constructor { static_assert(string_view("").size() == 0, ""); constexpr string_view hello = "hello"; -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == hello.size(), ""); static_assert(string_equal("hello", hello.data(), hello.size()), ""); } // namespace test_constchar_constructor @@ -100,7 +99,6 @@ TEST(StringViewTest, testConversionToString) { namespace test_copy_constructor { constexpr string_view hello = "hello"; constexpr string_view copy = hello; -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == copy.size(), ""); static_assert(string_equal("hello", copy.data(), copy.size()), ""); } // namespace test_copy_constructor @@ -116,14 +114,11 @@ TEST(StringViewTest, testCopyAssignment) { #if defined(__cpp_constexpr) && __cpp_constexpr >= 201304 { constexpr string_view hello = assign("hello"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == hello.size(), ""); static_assert(string_equal("hello", hello.data(), hello.size()), ""); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == (string_view() = "hello").size(), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_equal("hello", (string_view() = "hello").data(), 5), ""); } @@ -157,7 +152,6 @@ static_assert('e' == *(hello.begin() + 1), ""); static_assert('l' == *(hello.begin() + 2), ""); static_assert('l' == *(hello.begin() + 3), ""); static_assert('o' == *(hello.begin() + 4), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(hello.end() == hello.begin() + 5, ""); } // namespace test_forward_iteration @@ -168,7 +162,6 @@ static_assert('l' == *(hello.rbegin() + 1), ""); static_assert('l' == *(hello.rbegin() + 2), ""); static_assert('e' == *(hello.rbegin() + 3), ""); static_assert('h' == *(hello.rbegin() + 4), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(hello.rend() == hello.rbegin() + 5, ""); } // namespace test_reverse_iteration @@ -197,22 +190,18 @@ TEST(StringViewTest, whenCallingAccessOperatorOutOfRange_thenThrows) { "string_view::operator[] or string_view::at() out of range. Index: 1, size: 0"); expectThrows( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) [] { string_view("hello")[5]; }, "string_view::operator[] or string_view::at() out of range. Index: 5, size: 5"); expectThrows( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) [] { string_view("hello").at(5); }, "string_view::operator[] or string_view::at() out of range. Index: 5, size: 5"); expectThrows( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) [] { string_view("hello")[100]; }, "string_view::operator[] or string_view::at() out of range. Index: 100, size: 5"); expectThrows( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) [] { string_view("hello").at(100); }, "string_view::operator[] or string_view::at() out of range. Index: 100, size: 5"); @@ -232,17 +221,14 @@ static_assert('o' == string_view("hello").back(), ""); } // namespace test_front_back namespace test_data { -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(string_equal("hello", string_view("hello").data(), 5), ""); } // namespace test_data namespace test_size_length { static_assert(0 == string_view("").size(), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("hello").size(), ""); static_assert(0 == string_view("").length(), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("hello").length(), ""); } // namespace test_size_length @@ -265,7 +251,6 @@ TEST(StringViewTest, whenRemovingValidPrefix_thenWorks) { remove_prefix(string_view("hello"), 0) == string_view("hello"), ""); static_assert( remove_prefix(string_view("hello"), 1) == string_view("ello"), ""); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(remove_prefix(string_view("hello"), 5) == string_view(""), ""); #endif @@ -277,7 +262,6 @@ TEST(StringViewTest, whenRemovingValidPrefix_thenWorks) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(StringViewTest, whenRemovingTooLargePrefix_thenThrows) { expectThrows( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) [] { remove_prefix(string_view("hello"), 6); }, "basic_string_view::remove_prefix: out of range. PrefixLength: 6, size: 5"); } @@ -296,7 +280,6 @@ TEST(StringViewTest, whenRemovingValidSuffix_thenWorks) { remove_suffix(string_view("hello"), 0) == string_view("hello"), ""); static_assert( remove_suffix(string_view("hello"), 1) == string_view("hell"), ""); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(remove_suffix(string_view("hello"), 5) == string_view(""), ""); #endif @@ -308,7 +291,6 @@ TEST(StringViewTest, whenRemovingValidSuffix_thenWorks) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(StringViewTest, whenRemovingTooLargeSuffix_thenThrows) { expectThrows( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) [] { remove_suffix(string_view("hello"), 6); }, "basic_string_view::remove_suffix: out of range. SuffixLength: 6, size: 5"); } @@ -358,7 +340,6 @@ TEST(StringViewTest, whenCopyingFullStringView_thenDestinationHasCorrectData) { string_view data = "hello"; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers,modernize-avoid-c-arrays) char result[5]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t num_copied = data.copy(result, 5); EXPECT_EQ(5, num_copied); EXPECT_TRUE(string_equal("hello", result, 5)); @@ -379,7 +360,6 @@ TEST(StringViewTest, whenCopyingTooMuch_thenJustCopiesLess) { string_view data = "hello"; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers,modernize-avoid-c-arrays) char result[100]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t num_copied = data.copy(result, 100, 2); EXPECT_EQ(3, num_copied); EXPECT_TRUE(string_equal("llo", result, 3)); @@ -390,7 +370,6 @@ TEST(StringViewTest, whenCopyingJustAtRange_thenDoesntCrash) { string_view data = "hello"; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) char result[1]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t num_copied = data.copy(result, 2, 5); EXPECT_EQ(0, num_copied); } @@ -415,7 +394,6 @@ static_assert(string_view("").substr(0, 0) == string_view(""), ""); static_assert(string_view("hello").substr() == string_view("hello"), ""); static_assert(string_view("hello").substr(0) == string_view("hello"), ""); static_assert(string_view("hello").substr(1) == string_view("ello"), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(string_view("hello").substr(5) == string_view(""), ""); static_assert(string_view("hello").substr(0, 0) == string_view(""), ""); @@ -423,22 +401,17 @@ static_assert(string_view("hello").substr(0, 2) == string_view("he"), ""); static_assert(string_view("hello").substr(1, 2) == string_view("el"), ""); static_assert(string_view("hello").substr(4, 1) == string_view("o"), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(string_view("hello").substr(0, 100) == string_view("hello"), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(string_view("hello").substr(1, 100) == string_view("ello"), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(string_view("hello").substr(5, 100) == string_view(""), ""); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(StringViewTest, whenCallingSubstrWithPosOutOfRange_thenThrows) { expectThrows( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) [] { string_view("hello").substr(6); }, "basic_string_view::substr parameter out of bounds. Index: 6, size: 5"); expectThrows( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) [] { string_view("hello").substr(6, 0); }, "basic_string_view::substr parameter out of bounds. Index: 6, size: 5"); } @@ -495,11 +468,9 @@ static_assert( 0 > string_view("hello").compare(2, 2, string_view("hello"), 2, 3), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0 < string_view("hello").compare(2, 2, string_view("hellola"), 5, 2), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0 > string_view("hello").compare(2, 2, string_view("hellolz"), 5, 2), ""); } // namespace test_compare_overload3 @@ -540,9 +511,7 @@ static_assert(0 == string_view("").compare(0, 0, "", 0, 0), ""); static_assert(0 == string_view("hello").compare(2, 2, "hello", 2, 2), ""); static_assert(0 < string_view("hello").compare(2, 2, "hello", 2, 1), ""); static_assert(0 > string_view("hello").compare(2, 2, "hello", 2, 3), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(0 < string_view("hello").compare(2, 2, "hellola", 5, 2), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(0 > string_view("hello").compare(2, 2, "hellolz", 5, 2), ""); } // namespace test_compare_overload6 @@ -740,7 +709,6 @@ static_assert(2 == string_view("abc").find('c'), ""); static_assert(2 == string_view("abc").find('c', 1), ""); static_assert(2 == string_view("abc").find('c', 2), ""); static_assert(string_view::npos == string_view("abc").find('c', 3), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(string_view::npos == string_view("abc").find('a', 100), ""); static_assert(string_view::npos == string_view("abc").find('z'), ""); static_assert(0 == string_view("ababa").find('a'), ""); @@ -749,7 +717,6 @@ static_assert(2 == string_view("ababa").find('a', 1), ""); static_assert(2 == string_view("ababa").find('a', 2), ""); static_assert(4 == string_view("ababa").find('a', 3), ""); static_assert(4 == string_view("ababa").find('a', 4), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(string_view::npos == string_view("ababa").find('a', 5), ""); } // namespace test_find_overload2 @@ -836,7 +803,6 @@ static_assert(string_view::npos == string_view("abc").rfind('c', 0), ""); static_assert(string_view::npos == string_view("abc").rfind('c', 1), ""); static_assert(2 == string_view("abc").rfind('c', 2), ""); static_assert(2 == string_view("abc").rfind('c', 3), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(0 == string_view("abc").rfind('a', 100), ""); static_assert(string_view::npos == string_view("abc").rfind('z'), ""); static_assert(4 == string_view("ababa").rfind('a'), ""); @@ -845,7 +811,6 @@ static_assert(0 == string_view("ababa").rfind('a', 1), ""); static_assert(2 == string_view("ababa").rfind('a', 2), ""); static_assert(2 == string_view("ababa").rfind('a', 3), ""); static_assert(4 == string_view("ababa").rfind('a', 4), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(4 == string_view("ababa").rfind('a', 5), ""); } // namespace test_rfind_overload2 @@ -930,7 +895,6 @@ static_assert( string_view::npos == string_view("").find_first_of(string_view("a"), 1), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("").find_first_of(string_view("abc"), 100), ""); static_assert( @@ -951,7 +915,6 @@ static_assert( 4 == string_view("abcabc").find_first_of(string_view("b"), 3), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5 == string_view("abcabc").find_first_of(string_view("c"), 5), ""); static_assert( @@ -979,7 +942,6 @@ static_assert( string_view::npos == string_view("abc").find_first_of('c', 3), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("abc").find_first_of('a', 100), ""); static_assert(string_view::npos == string_view("abc").find_first_of('z'), ""); @@ -990,7 +952,6 @@ static_assert(2 == string_view("ababa").find_first_of('a', 2), ""); static_assert(4 == string_view("ababa").find_first_of('a', 3), ""); static_assert(4 == string_view("ababa").find_first_of('a', 4), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("ababa").find_first_of('a', 5), ""); } // namespace test_find_first_of_overload2 @@ -1028,7 +989,6 @@ static_assert( string_view::npos == string_view("").find_first_of("abc", 1, 1), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("").find_first_of("abcdef", 100, 3), ""); static_assert( @@ -1043,7 +1003,6 @@ static_assert( static_assert(3 == string_view("abcabc").find_first_of("abc", 1, 1), ""); static_assert(4 == string_view("abcabc").find_first_of("bac", 3, 1), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_first_of("cab", 5, 1), ""); static_assert(4 == string_view("abcabc").find_first_of("bccda", 3, 2), ""); static_assert(4 == string_view("abcabc").find_first_of("cbdab", 4, 3), ""); @@ -1066,7 +1025,6 @@ static_assert(1 == string_view("abcabc").find_first_of("cbd"), ""); static_assert(string_view::npos == string_view("").find_first_of("", 1), ""); static_assert(string_view::npos == string_view("").find_first_of("a", 1), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("").find_first_of("abc", 100), ""); static_assert(string_view::npos == string_view("abc").find_first_of("", 1), ""); @@ -1079,7 +1037,6 @@ static_assert( static_assert(3 == string_view("abcabc").find_first_of("a", 1), ""); static_assert(4 == string_view("abcabc").find_first_of("b", 3), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_first_of("c", 5), ""); static_assert(4 == string_view("abcabc").find_first_of("bc", 3), ""); static_assert(4 == string_view("abcabc").find_first_of("cbd", 4), ""); @@ -1107,11 +1064,8 @@ static_assert( static_assert(3 == string_view("abcabc").find_last_of(string_view("a")), ""); static_assert(4 == string_view("abcabc").find_last_of(string_view("b")), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_last_of(string_view("c")), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_last_of(string_view("bc")), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_last_of(string_view("cbd")), ""); static_assert( @@ -1121,7 +1075,6 @@ static_assert( string_view::npos == string_view("").find_last_of(string_view("a"), 0), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("").find_last_of(string_view("abc"), 100), ""); static_assert( @@ -1158,7 +1111,6 @@ static_assert(string_view::npos == string_view("abc").find_last_of('c', 0), ""); static_assert(string_view::npos == string_view("abc").find_last_of('c', 1), ""); static_assert(2 == string_view("abc").find_last_of('c', 2), ""); static_assert(2 == string_view("abc").find_last_of('c', 3), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(0 == string_view("abc").find_last_of('a', 100), ""); static_assert(string_view::npos == string_view("abc").find_last_of('z'), ""); static_assert(4 == string_view("ababa").find_last_of('a'), ""); @@ -1167,7 +1119,6 @@ static_assert(0 == string_view("ababa").find_last_of('a', 1), ""); static_assert(2 == string_view("ababa").find_last_of('a', 2), ""); static_assert(2 == string_view("ababa").find_last_of('a', 3), ""); static_assert(4 == string_view("ababa").find_last_of('a', 4), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(4 == string_view("ababa").find_last_of('a', 5), ""); } // namespace test_find_last_of_overload2 @@ -1204,15 +1155,12 @@ static_assert( 4 == string_view("abcabc").find_last_of("bca", string_view::npos, 1), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5 == string_view("abcabc").find_last_of("cab", string_view::npos, 1), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5 == string_view("abcabc").find_last_of("bcab", string_view::npos, 2), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5 == string_view("abcabc").find_last_of("cbdac", string_view::npos, 3), ""); @@ -1223,7 +1171,6 @@ static_assert( string_view::npos == string_view("").find_last_of("abc", 0, 1), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("").find_last_of("abcdef", 100, 3), ""); static_assert( @@ -1253,17 +1200,13 @@ static_assert(string_view::npos == string_view("abc").find_last_of("def"), ""); static_assert(3 == string_view("abcabc").find_last_of("a"), ""); static_assert(4 == string_view("abcabc").find_last_of("b"), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_last_of("c"), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_last_of("bc"), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_last_of("cbd"), ""); static_assert(string_view::npos == string_view("").find_last_of("", 1), ""); static_assert(string_view::npos == string_view("").find_last_of("a", 0), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("").find_last_of("abc", 100), ""); static_assert(string_view::npos == string_view("abc").find_last_of("", 1), ""); @@ -1329,7 +1272,6 @@ static_assert( ""); static_assert( string_view::npos == - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view("").find_first_not_of(string_view("abc"), 100), ""); static_assert( @@ -1355,7 +1297,6 @@ static_assert( 4 == string_view("abcabc").find_first_not_of(string_view("ac"), 4), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5 == string_view("abcabc").find_first_not_of(string_view("ab"), 5), ""); static_assert( @@ -1388,7 +1329,6 @@ static_assert( string_view::npos == string_view("abc").find_first_not_of('c', 3), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("abc").find_first_not_of('a', 100), ""); static_assert(1 == string_view("ababa").find_first_not_of('a'), ""); @@ -1400,7 +1340,6 @@ static_assert( string_view::npos == string_view("ababa").find_first_not_of('a', 4), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("ababa").find_first_not_of('a', 5), ""); } // namespace test_find_first_not_of_overload2 @@ -1422,7 +1361,6 @@ static_assert( string_view::npos == string_view("abc").find_first_not_of("acdbef", 0, 4), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("abc").find_first_not_of("defabcas", 0, 6), ""); @@ -1440,7 +1378,6 @@ static_assert( string_view::npos == string_view("").find_first_not_of("abc", 1, 1), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("").find_first_not_of("abcdef", 100, 3), ""); static_assert( @@ -1450,14 +1387,12 @@ static_assert( string_view::npos == string_view("abc").find_first_not_of("acdbef", 3, 4), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("abc").find_first_not_of("defabcas", 2, 6), ""); static_assert(1 == string_view("abcabc").find_first_not_of("bca", 1, 0), ""); static_assert(3 == string_view("abcabc").find_first_not_of("bca", 1, 2), ""); static_assert(4 == string_view("abcabc").find_first_not_of("acb", 4, 2), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_first_not_of("abc", 5, 2), ""); static_assert(4 == string_view("abcabc").find_first_not_of("abac", 3, 1), ""); static_assert(4 == string_view("abcabc").find_first_not_of("dadab", 4, 2), ""); @@ -1493,7 +1428,6 @@ static_assert( string_view::npos == string_view("").find_first_not_of("a", 1), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("").find_first_not_of("abc", 100), ""); static_assert( @@ -1509,7 +1443,6 @@ static_assert( static_assert(1 == string_view("abcabc").find_first_not_of("", 1), ""); static_assert(3 == string_view("abcabc").find_first_not_of("bc", 1), ""); static_assert(4 == string_view("abcabc").find_first_not_of("ac", 4), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_first_not_of("ab", 5), ""); static_assert(4 == string_view("abcabc").find_first_not_of("a", 3), ""); static_assert(4 == string_view("abcabc").find_first_not_of("da", 4), ""); @@ -1538,7 +1471,6 @@ static_assert( string_view("abc").find_last_not_of(string_view("defabc")), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_last_not_of(string_view("")), ""); static_assert( 3 == string_view("abcabc").find_last_not_of(string_view("bc")), @@ -1547,7 +1479,6 @@ static_assert( 4 == string_view("abcabc").find_last_not_of(string_view("ac")), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5 == string_view("abcabc").find_last_not_of(string_view("ab")), ""); static_assert( @@ -1565,7 +1496,6 @@ static_assert( ""); static_assert( string_view::npos == - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view("").find_last_not_of(string_view("abc"), 100), ""); static_assert( @@ -1618,7 +1548,6 @@ static_assert(0 == string_view("abc").find_last_not_of('c', 0), ""); static_assert(1 == string_view("abc").find_last_not_of('c', 1), ""); static_assert(1 == string_view("abc").find_last_not_of('c', 2), ""); static_assert(1 == string_view("abc").find_last_not_of('c', 3), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(2 == string_view("abc").find_last_not_of('a', 100), ""); static_assert(3 == string_view("ababa").find_last_not_of('a'), ""); static_assert( @@ -1628,7 +1557,6 @@ static_assert(1 == string_view("ababa").find_last_not_of('a', 1), ""); static_assert(1 == string_view("ababa").find_last_not_of('a', 2), ""); static_assert(3 == string_view("ababa").find_last_not_of('a', 3), ""); static_assert(3 == string_view("ababa").find_last_not_of('a', 4), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(3 == string_view("ababa").find_last_not_of('a', 5), ""); } // namespace test_find_last_not_of_overload2 @@ -1655,12 +1583,10 @@ static_assert( ""); static_assert( string_view::npos == - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view("abc").find_last_not_of("defabcas", string_view::npos, 6), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5 == string_view("abcabc").find_last_not_of("cab", string_view::npos, 0), ""); static_assert( @@ -1670,7 +1596,6 @@ static_assert( 4 == string_view("abcabc").find_last_not_of("acb", string_view::npos, 2), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5 == string_view("abcabc").find_last_not_of("abc", string_view::npos, 2), ""); static_assert( @@ -1687,7 +1612,6 @@ static_assert( string_view::npos == string_view("").find_last_not_of("abc", 0, 1), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("").find_last_not_of("abcdef", 100, 3), ""); static_assert( @@ -1697,7 +1621,6 @@ static_assert( string_view::npos == string_view("abc").find_last_not_of("acdbef", 3, 4), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("abc").find_last_not_of("defabcas", 2, 6), ""); @@ -1723,11 +1646,9 @@ static_assert( string_view::npos == string_view("abc").find_last_not_of("defabc"), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_last_not_of(""), ""); static_assert(3 == string_view("abcabc").find_last_not_of("bc"), ""); static_assert(4 == string_view("abcabc").find_last_not_of("ac"), ""); -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_assert(5 == string_view("abcabc").find_last_not_of("ab"), ""); static_assert(4 == string_view("abcabc").find_last_not_of("c"), ""); static_assert(4 == string_view("abcabc").find_last_not_of("ca"), ""); @@ -1737,7 +1658,6 @@ static_assert( string_view::npos == string_view("").find_last_not_of("a", 0), ""); static_assert( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) string_view::npos == string_view("").find_last_not_of("abc", 100), ""); static_assert( diff --git a/c10/test/util/typeid_test.cpp b/c10/test/util/typeid_test.cpp index dab9fc6794385..3c61ffc88b08d 100644 --- a/c10/test/util/typeid_test.cpp +++ b/c10/test/util/typeid_test.cpp @@ -75,7 +75,6 @@ TEST(TypeMetaTest, TypeMeta) { class ClassAllowAssignment { public: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ClassAllowAssignment() : x(42) {} // NOLINTNEXTLINE(modernize-use-equals-default) ClassAllowAssignment(const ClassAllowAssignment& src) : x(src.x) {} @@ -85,7 +84,6 @@ class ClassAllowAssignment { class ClassNoAssignment { public: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ClassNoAssignment() : x(42) {} ClassNoAssignment(const ClassNoAssignment& src) = delete; ClassNoAssignment& operator=(const ClassNoAssignment& src) = delete; @@ -110,7 +108,6 @@ TEST(TypeMetaTest, CtorDtorAndCopy) { EXPECT_TRUE(meta_a.placementDelete() != nullptr); EXPECT_TRUE(meta_a.copy() != nullptr); ClassAllowAssignment src; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) src.x = 10; ClassAllowAssignment dst; EXPECT_EQ(dst.x, 42); diff --git a/caffe2/core/blob_test.cc b/caffe2/core/blob_test.cc index 538f638f3b6e7..49767f62f14ad 100644 --- a/caffe2/core/blob_test.cc +++ b/caffe2/core/blob_test.cc @@ -170,7 +170,6 @@ TEST(BlobTest, BlobNonConstructible) { ASSERT_TRUE( blob.GetMutableOrNull() != nullptr); EXPECT_EQ(blob.Get().val, 42); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) blob.GetMutableOrNull()->val = 37; EXPECT_EQ(blob.Get().val, 37); } @@ -216,7 +215,6 @@ TEST(TensorNonTypedTest, TensorChangeType) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; Tensor tensor(dims, CPU); @@ -254,7 +252,6 @@ TEST(TensorNonTypedTest, NonDefaultConstructible) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; Tensor tensor(dims, CPU); @@ -283,7 +280,6 @@ TYPED_TEST(TensorCPUTest, TensorInitializedEmpty) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; tensor.Resize(dims); EXPECT_EQ(tensor.dim(), 3); @@ -300,7 +296,6 @@ TYPED_TEST(TensorCPUTest, TensorInitializedNonEmpty) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; Tensor tensor(dims, CPU); EXPECT_EQ(tensor.dim(), 3); @@ -309,13 +304,9 @@ TYPED_TEST(TensorCPUTest, TensorInitializedNonEmpty) { EXPECT_EQ(tensor.dim32(2), 5); EXPECT_TRUE(tensor.mutable_data() != nullptr); EXPECT_TRUE(tensor.data() != nullptr); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[0] = 7; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[1] = 11; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 13; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims.push_back(17); tensor.Resize(dims); EXPECT_EQ(tensor.dim(), 4); @@ -332,7 +323,6 @@ TYPED_TEST(TensorCPUTest, TensorInitializedZeroDim) { vector dims(3); dims[0] = 2; dims[1] = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; Tensor tensor(dims, CPU); EXPECT_EQ(tensor.dim(), 3); @@ -348,7 +338,6 @@ TYPED_TEST(TensorCPUTest, TensorResizeZeroDim) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; Tensor tensor(dims, CPU); EXPECT_EQ(tensor.dim(), 3); @@ -358,10 +347,8 @@ TYPED_TEST(TensorCPUTest, TensorResizeZeroDim) { EXPECT_TRUE(tensor.mutable_data() != nullptr); EXPECT_TRUE(tensor.data() != nullptr); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[0] = 7; dims[1] = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 13; tensor.Resize(dims); EXPECT_EQ(tensor.numel(), 0); @@ -389,7 +376,6 @@ TYPED_TEST(TensorCPUTest, TensorAlias) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; Tensor tensor(dims, CPU); EXPECT_TRUE(tensor.mutable_data() != nullptr); @@ -409,7 +395,6 @@ TYPED_TEST(TensorCPUTest, TensorShareDataRawPointer) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-avoid-c-arrays) std::unique_ptr raw_buffer(new TypeParam[2 * 3 * 5]); @@ -429,7 +414,6 @@ TYPED_TEST(TensorCPUTest, TensorShareDataRawPointerWithMeta) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-avoid-c-arrays) std::unique_ptr raw_buffer(new TypeParam[2 * 3 * 5]); @@ -450,10 +434,8 @@ TYPED_TEST(TensorCPUTest, TensorAliasCanUseDifferentShapes) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; vector alternate_dims(1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) alternate_dims[0] = 2 * 3 * 5; Tensor tensor(dims, CPU); EXPECT_TRUE(tensor.mutable_data() != nullptr); @@ -476,7 +458,6 @@ TYPED_TEST(TensorCPUTest, NoLongerAliassAfterNumelChanges) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; Tensor tensor(dims, CPU); EXPECT_TRUE(tensor.mutable_data() != nullptr); @@ -484,7 +465,6 @@ TYPED_TEST(TensorCPUTest, NoLongerAliassAfterNumelChanges) { EXPECT_EQ(tensor.data(), other_tensor.data()); auto* old_pointer = other_tensor.data(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[0] = 7; tensor.Resize(dims); EXPECT_EQ(old_pointer, other_tensor.data()); @@ -496,7 +476,6 @@ TYPED_TEST(TensorCPUTest, NoLongerAliasAfterFreeMemory) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; Tensor tensor(dims, CPU); EXPECT_TRUE(tensor.mutable_data() != nullptr); @@ -515,13 +494,11 @@ TYPED_TEST(TensorCPUTest, KeepOnShrink) { FLAGS_caffe2_keep_on_shrink = true; FLAGS_caffe2_max_keep_on_shrink_memory = LLONG_MAX; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector dims{2, 3, 5}; Tensor tensor(dims, CPU); TypeParam* ptr = tensor.mutable_data(); EXPECT_TRUE(ptr != nullptr); // Expanding - will reallocate - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor.Resize(3, 4, 6); TypeParam* larger_ptr = tensor.mutable_data(); EXPECT_TRUE(larger_ptr != nullptr); @@ -535,10 +512,8 @@ TYPED_TEST(TensorCPUTest, KeepOnShrink) { EXPECT_TRUE(smaller_ptr != nullptr); EXPECT_EQ(larger_ptr, smaller_ptr); // resize to 0 in the meantime; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor.Resize(3, 0, 6); // Expanding but still under capacity - will not reallocate - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor.Resize(2, 3, 5); TypeParam* new_ptr = tensor.mutable_data(); EXPECT_TRUE(new_ptr != nullptr); @@ -549,22 +524,18 @@ TYPED_TEST(TensorCPUTest, KeepOnShrink) { TYPED_TEST(TensorCPUTest, MaxKeepOnShrink) { // Set flags FLAGS_caffe2_keep_on_shrink = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) FLAGS_caffe2_max_keep_on_shrink_memory = 8 * 4 * sizeof(TypeParam); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector dims{1, 8, 8}; Tensor tensor(dims, CPU); TypeParam* ptr = tensor.mutable_data(); EXPECT_TRUE(ptr != nullptr); // Shrinking - will not reallocate - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor.Resize(1, 7, 8); TypeParam* smaller_ptr = tensor.mutable_data(); EXPECT_TRUE(smaller_ptr != nullptr); EXPECT_EQ(ptr, smaller_ptr); // Resize to more than maximum shrink, should reallocate - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor.Resize(1, 1, 8); TypeParam* new_ptr = tensor.mutable_data(); EXPECT_TRUE(new_ptr != nullptr); @@ -656,7 +627,6 @@ TEST(TensorTest, Tensor64BitDimension) { EXPECT_EQ(tensor.itemsize(), sizeof(char)); // Try to go even larger, but this time we will not do mutable_data because we // do not have a large enough memory. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor.Resize(large_number, 100); EXPECT_EQ(tensor.dim(), 2); EXPECT_EQ(tensor.size(0), large_number); @@ -673,9 +643,7 @@ TEST(TensorTest, UndefinedTensor) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, CopyAndAssignment) { Tensor x(CPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.Resize(16, 17); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testing::randomFill(x.template mutable_data(), 16 * 17); EXPECT_TRUE(x.defined()); @@ -782,7 +750,6 @@ TEST(TensorTest, TensorSerialization_CustomType) { Blob blob; TensorCPU* tensor = BlobGetMutableTensor(&blob, CPU); tensor->Resize(2, 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 6; ++i) { tensor->mutable_data()[i].val = i; } @@ -799,7 +766,6 @@ TEST(TensorTest, TensorSerialization_CustomType) { EXPECT_EQ(new_tensor.dim(), 2); EXPECT_EQ(new_tensor.size(0), 2); EXPECT_EQ(new_tensor.size(1), 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 6; ++i) { EXPECT_EQ( new_tensor.data()[i].val, @@ -814,7 +780,6 @@ TEST(TensorTest, Half) { TensorCPU* tensor = BlobGetMutableTensor(&blob, CPU); tensor->Resize(kSize); for (int i = 0; i < tensor->numel(); ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor->mutable_data()[i].x = i % 10000; } string serialized = SerializeBlob(blob, "test"); @@ -830,9 +795,7 @@ TEST(TensorTest, Half) { EXPECT_EQ(tensor_proto.byte_data().size(), 2 * kSize); for (int i = 0; i < kSize; ++i) { auto value = tensor->mutable_data()[i].x; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto low_bits = static_cast(value & 0xff); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto high_bits = static_cast(value >> 8); EXPECT_EQ(tensor_proto.byte_data()[2 * i], low_bits); EXPECT_EQ(tensor_proto.byte_data()[2 * i + 1], high_bits); @@ -856,7 +819,6 @@ TEST(TensorTest, Half) { TEST(TensorTest, TensorFactory) { Tensor a = empty({1, 2, 3}, at::device(CPU).dtype()); EXPECT_NE(a.data(), nullptr); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a.mutable_data()[0] = 3.0; Tensor b = empty({1, 2, 3}, at::device(CPU).dtype()); EXPECT_NE(b.data(), nullptr); @@ -867,19 +829,14 @@ TEST(TensorTest, TensorFactory) { TEST(QTensorTest, QTensorSerialization) { Blob blob; QTensor* qtensor = blob.GetMutable>(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qtensor->SetPrecision(5); qtensor->SetSigned(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qtensor->SetScale(1.337); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qtensor->SetBias(-1.337); qtensor->Resize(std::vector{2, 3}); // "Randomly" set bits. srand(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 6; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 5; ++j) { // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand) qtensor->SetBitAtIndex(j, i, rand() % 2); @@ -906,9 +863,7 @@ TEST(QTensorTest, QTensorSerialization) { EXPECT_EQ(new_qtensor.ndim(), 2); EXPECT_EQ(new_qtensor.dim32(0), 2); EXPECT_EQ(new_qtensor.dim32(1), 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 6; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 5; ++j) { EXPECT_EQ(qtensor->GetBitAtIndex(j, i), new_qtensor.GetBitAtIndex(j, i)); } @@ -1133,7 +1088,6 @@ TEST(ContentChunks, Serialization) { Blob blob; DummyType* container = blob.GetMutable(); VLOG(1) << "Allocating blob"; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) container->n_chunks = 10; VLOG(1) << "Filling out the blob"; StringMap data; @@ -1214,7 +1168,6 @@ TEST(QTensor, QTensorSizingTest) { vector dims(3); dims[0] = 2; dims[1] = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dims[2] = 5; QTensor qtensor(dims, 3); EXPECT_TRUE(qtensor.mutable_data() != nullptr); @@ -1252,7 +1205,6 @@ TEST(TensorConstruction, UninitializedCopyTest) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorConstruction, CopyConstructorTest) { Tensor x(CPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.Resize(5); x.mutable_data()[0] = 1; Tensor y = x.Clone(); @@ -1261,7 +1213,6 @@ TEST(TensorConstruction, CopyConstructorTest) { EXPECT_EQ(*x.data(), 1); EXPECT_EQ(*y.data(), 1); EXPECT_EQ(*z.data(), 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.mutable_data()[0] = 5; EXPECT_EQ(*x.data(), 5); EXPECT_EQ(*y.data(), 1); @@ -1271,7 +1222,6 @@ TEST(TensorConstruction, CopyConstructorTest) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorConstruction, MoveAssignmentOpTest) { Tensor x(CPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.Resize(5); x.mutable_data()[0] = 1; Tensor y(CPU); @@ -1373,7 +1323,6 @@ void TestDataType( std::string dataTypeName) { LOG(INFO) << dataTypeName; FLAGS_caffe2_serialize_using_bytes_as_holder = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t numEl = 1000; // Proto with int32 auto protoInt32 = CreateProtoWithInt32Data(dataType, numEl, false); diff --git a/caffe2/core/context.cc b/caffe2/core/context.cc index a5c30b7b8f433..0aa8c80a691e6 100644 --- a/caffe2/core/context.cc +++ b/caffe2/core/context.cc @@ -14,9 +14,7 @@ uint32_t RandomNumberSeed() { auto tv = std::chrono::system_clock::now().time_since_epoch(); uint64_t usec = static_cast( std::chrono::duration_cast(tv).count()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) uint32_t tv_sec = usec / 1000000; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) uint32_t tv_usec = usec % 1000000; const uint32_t kPrime0 = 51551; const uint32_t kPrime1 = 61631; diff --git a/caffe2/core/context_test.cc b/caffe2/core/context_test.cc index e8399f18ddd10..9e94f0954abf9 100644 --- a/caffe2/core/context_test.cc +++ b/caffe2/core/context_test.cc @@ -8,7 +8,6 @@ namespace caffe2 { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(CPUContextTest, TestAllocAlignment) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 1; i < 10; ++i) { auto data = CPUContext::New(i); EXPECT_EQ((reinterpret_cast(data.get()) % gAlignment), 0); @@ -18,24 +17,19 @@ TEST(CPUContextTest, TestAllocAlignment) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(CPUContextTest, TestAllocDealloc) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto data_ptr = CPUContext::New(10 * sizeof(float)); float* data = static_cast(data_ptr.get()); EXPECT_NE(data, nullptr); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto dst_data_ptr = CPUContext::New(10 * sizeof(float)); float* dst_data = static_cast(dst_data_ptr.get()); EXPECT_NE(dst_data, nullptr); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 10; ++i) { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) data[i] = i; } DeviceOption option; CPUContext context(option); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) context.CopyToCPU(10, data, dst_data); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 10; ++i) { EXPECT_FLOAT_EQ(dst_data[i], i); } diff --git a/caffe2/core/init.cc b/caffe2/core/init.cc index 921ba31a07630..6e81eb2d84b6b 100644 --- a/caffe2/core/init.cc +++ b/caffe2/core/init.cc @@ -76,7 +76,6 @@ bool GlobalInit(int* pargc, char*** pargv) { if (FLAGS_caffe2_version) { std::cerr << "Caffe2 build configuration: " << std::endl; for (const auto& it : GetBuildOptions()) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::cerr << " " << std::setw(25) << std::left << it.first << " : " << it.second << std::endl; } diff --git a/caffe2/core/int8_serialization.cc b/caffe2/core/int8_serialization.cc index cb40bd03e8239..0a17afb486a17 100644 --- a/caffe2/core/int8_serialization.cc +++ b/caffe2/core/int8_serialization.cc @@ -25,7 +25,6 @@ class Int8TensorCPUSerializer : public BlobSerializerBase { for (int i = 0; i < tensor.t.dim(); ++i) { proto.add_dims(tensor.t.dim32(i)); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) proto.set_precision(8); proto.set_scale(tensor.scale); proto.set_bias(tensor.zero_point); diff --git a/caffe2/core/memonger.cc b/caffe2/core/memonger.cc index 7036e814a2805..c7670738034f8 100644 --- a/caffe2/core/memonger.cc +++ b/caffe2/core/memonger.cc @@ -264,7 +264,6 @@ class ComputeBlobRecyclingForDag { << mapped_blobs_set.size() << " shared blobs."; if (floats_saved_ > 0) { LOG(INFO) << "Memonger saved approximately : " - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << (floats_saved_ * 4.0 / 1024.0 / 1024.0) << " MB."; } diff --git a/caffe2/core/net.cc b/caffe2/core/net.cc index c33ac6dc3da1c..4599c629a69e1 100644 --- a/caffe2/core/net.cc +++ b/caffe2/core/net.cc @@ -235,7 +235,6 @@ std::vector NetBase::TEST_Benchmark( LOG(INFO) << "Main runs finished. Milliseconds per iter: " // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) << millis / main_runs - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << ". Iters per second: " << 1000.0 * main_runs / millis; if (run_individual) { diff --git a/caffe2/core/net_async_tracing_test.cc b/caffe2/core/net_async_tracing_test.cc index 72cd663cf62c5..afacd0d83916a 100644 --- a/caffe2/core/net_async_tracing_test.cc +++ b/caffe2/core/net_async_tracing_test.cc @@ -27,18 +27,14 @@ void testExtractShardId(const string& name, int expectedId) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NetAsyncTracingTest, ExtractShardId) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testExtractShardId("ABCDEFshard:1705!!A", 1705); // Should use the last one - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testExtractShardId("ABCDEFshard:4324!!Ashard:01220b", 1220); // Nothing to extract testExtractShardId("ABCDEFsha:222", -1); // Regular cases testExtractShardId("FC:shard:0", 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testExtractShardId("FC:shard:10", 10); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testExtractShardId("FC:shard:15", 15); } diff --git a/caffe2/core/net_dag_utils_test.cc b/caffe2/core/net_dag_utils_test.cc index 201fa847f935b..6a2ad3dc7c507 100644 --- a/caffe2/core/net_dag_utils_test.cc +++ b/caffe2/core/net_dag_utils_test.cc @@ -301,7 +301,6 @@ TEST(DagUtilTest, Mixed2) { DagUtilTestContext t(spec, &ws); auto chains = t.computeChains(); dag_utils::ExecutionChains expected{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0, {0}}, {1, {1}}, {3, {3, 6}}, {4, {4, 2, 5}}, {7, {7}}, {8, {8}}}; EXPECT_EQ(chains, expected); } diff --git a/caffe2/core/net_simple.cc b/caffe2/core/net_simple.cc index 59c3672d3d8dd..45a5a2b4fd143 100644 --- a/caffe2/core/net_simple.cc +++ b/caffe2/core/net_simple.cc @@ -127,7 +127,6 @@ vector SimpleNet::TEST_Benchmark( std::cout << "Main run finished. Milliseconds per iter: " // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) << millis / main_runs - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << ". Iters per second: " << 1000.0 * main_runs / millis << std::endl; } @@ -223,10 +222,8 @@ void IndividualMetrics::PrintOperatorProfilingResults() { : "NO_OUTPUT")); std::stringstream flops_str; if (idx < flops_per_op.size() && flops_per_op[idx]) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) flops_str << " (" << to_string(1.0e-9 * flops_per_op[idx]) << " GFLOP, " << to_string( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.0e-6 * flops_per_op[idx] / time_per_op[idx] * main_runs_) << " GFLOPS)"; @@ -235,7 +232,6 @@ void IndividualMetrics::PrintOperatorProfilingResults() { if (idx < memory_bytes_read_per_op.size() && memory_bytes_read_per_op[idx]) { memory_bytes_read_str << " (" - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << to_string(1.0e-6 * memory_bytes_read_per_op[idx]) << " MB)"; } @@ -243,13 +239,11 @@ void IndividualMetrics::PrintOperatorProfilingResults() { if (idx < memory_bytes_written_per_op.size() && memory_bytes_written_per_op[idx]) { memory_bytes_written_str - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << " (" << to_string(1.0e-6 * memory_bytes_written_per_op[idx]) << " MB)"; } std::stringstream param_bytes_str; if (idx < param_bytes_per_op.size() && param_bytes_per_op[idx]) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) param_bytes_str << " (" << to_string(1.0e-6 * param_bytes_per_op[idx]) << " MB)"; } @@ -300,15 +294,12 @@ void IndividualMetrics::PrintOperatorProfilingResults() { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) percent = (100.0 * value * normalizer[i] / total_metric); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::cout << std::setw(15) << std::setfill(' ') << value * normalizer[i] - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << " " << unit[i] << ". " << std::setw(10) << std::setfill(' ') << percent << "%. " << op << " (" << num_ops_per_op_type_[op] << " ops)" << std::endl; } if (total_metric > 0.) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::cout << std::setw(15) << std::setfill(' ') << total_metric << " " << unit[i] << " in Total" << std::endl; } diff --git a/caffe2/core/net_test.cc b/caffe2/core/net_test.cc index eeff440b473a4..c31bdc6029bb1 100644 --- a/caffe2/core/net_test.cc +++ b/caffe2/core/net_test.cc @@ -146,7 +146,6 @@ TEST(NetDeathTest, DeclaredOutputNotMet) { void testExecution(std::unique_ptr& net, int num_ops) { // Run 100 times - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 100; i++) { counter.exchange(0); net.get()->Run(); @@ -583,7 +582,6 @@ TEST(NetTest, DISABLED_FailingOperator) { { net_def.set_num_workers(4); std::unique_ptr net(CreateNet(net_def, &ws)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 10; i++) { counter.exchange(0); bool run_result = false; @@ -1138,7 +1136,6 @@ void testProfDAGNetErrorCase(bool test_error) { auto net = CreateNet(net_def, &ws); // with failing op - net runs return false, without - true - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto num_runs = 0; num_runs < 10; ++num_runs) { auto ret = net->Run(); ASSERT_TRUE(test_error ? !ret : ret); diff --git a/caffe2/core/nomnigraph/tests/SubgraphMatcherTest.cc b/caffe2/core/nomnigraph/tests/SubgraphMatcherTest.cc index 45d46c46c4c06..3a1ca4ac8ee8a 100644 --- a/caffe2/core/nomnigraph/tests/SubgraphMatcherTest.cc +++ b/caffe2/core/nomnigraph/tests/SubgraphMatcherTest.cc @@ -537,11 +537,9 @@ TEST(SubgraphMatcher, DagMatchingRandomLargeGraph) { nodes.emplace_back(node); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TestRandom random(517); for (int i = 0; i < numPatterns; i++) { std::vector nodeIdx; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int k = 0; k < 5; k++) { // NOLINTNEXTLINE(performance-inefficient-vector-operation) nodeIdx.emplace_back(random.nextInt() % numNodes); diff --git a/caffe2/core/nomnigraph/tests/TarjansImplTest.cc b/caffe2/core/nomnigraph/tests/TarjansImplTest.cc index 447e9c85c6e65..01139303077e5 100644 --- a/caffe2/core/nomnigraph/tests/TarjansImplTest.cc +++ b/caffe2/core/nomnigraph/tests/TarjansImplTest.cc @@ -48,12 +48,10 @@ TEST(Tarjans, Cycle) { TEST(Tarjans, Random) { nom::Graph g; std::vector::NodeRef> nodes; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 10; ++i) { TestClass t; nodes.emplace_back(g.createNode(std::move(t))); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 30; ++i) { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions,clang-analyzer-security.insecureAPI.rand) int ri1 = rand() % nodes.size(); diff --git a/caffe2/core/observer_test.cc b/caffe2/core/observer_test.cc index 21184391deae6..1597087008ca4 100644 --- a/caffe2/core/observer_test.cc +++ b/caffe2/core/observer_test.cc @@ -31,19 +31,16 @@ void DummyObserver::Start() { for (auto& op : operators) { op->AttachObserver(std::make_unique>(op)); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) counter.fetch_add(1000); } template <> void DummyObserver::Start() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) counter.fetch_add(100); } template <> void DummyObserver::Stop() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) counter.fetch_add(10); } diff --git a/caffe2/core/operator.cc b/caffe2/core/operator.cc index 946867778d347..d64588b978c50 100644 --- a/caffe2/core/operator.cc +++ b/caffe2/core/operator.cc @@ -259,7 +259,6 @@ unique_ptr _CreateOperator( } if (operator_def.engine().size() && !VLOG_IS_ON(1)) { static int log_occurrences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurrences <= 64) { ++log_occurrences; LOG(INFO) << "Engine " << operator_def.engine() diff --git a/caffe2/core/operator_schema_test.cc b/caffe2/core/operator_schema_test.cc index e0faee63e73dc..d68bb61c3b1cc 100644 --- a/caffe2/core/operator_schema_test.cc +++ b/caffe2/core/operator_schema_test.cc @@ -121,7 +121,6 @@ TEST(OperatorSchemaTest, SameInputOutput) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(OpSchemaCalculateOutputOp) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(1, 5).NumOutputs(2, 6) .OutputCalculator([](int n) { return n + 1; }); @@ -211,7 +210,6 @@ OPERATOR_SCHEMA(OpSchemaArbitraryTensorInference) [](const OperatorDef&, const vector&) { vector shapes(1); shapes[0].set_data_type(TensorProto::FLOAT); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) shapes[0].add_dims(1701); return shapes; }); @@ -289,14 +287,10 @@ TEST(OperatorSchemaTest, TestCostInference) { "OpSchemaCostInference", "", vector{"in"}, vector{"out"}); vector shapes(2); shapes[0].set_data_type(TensorProto::FLOAT); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) shapes[0].add_dims(10); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) shapes[0].add_dims(10); shapes[1].set_data_type(TensorProto::FLOAT); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) shapes[1].add_dims(10); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) shapes[1].add_dims(10); EXPECT_EQ(2000, schema->InferCost(def, shapes).flops); } diff --git a/caffe2/core/operator_test.cc b/caffe2/core/operator_test.cc index 591a7fc27d171..1ff30564a694d 100644 --- a/caffe2/core/operator_test.cc +++ b/caffe2/core/operator_test.cc @@ -49,7 +49,6 @@ class JustTestWithSomeOutput : public JustTest { public: using JustTest::JustTest; bool Run(int /* unused */ /*stream_id*/) override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *OperatorBase::Output(0) = 5; return true; } @@ -182,7 +181,6 @@ TEST(OperatorTest, TestParameterAccess) { op_def.set_type("JustTest"); op_def.add_input("input"); op_def.add_output("output"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AddArgument("arg0", 0.1, &op_def); AddArgument>("arg1", vector{1, 2}, &op_def); AddArgument("arg2", "argstring", &op_def); @@ -210,7 +208,6 @@ TEST(OperatorTest, CannotAccessParameterWithWrongType) { op_def.set_type("JustTest"); op_def.add_input("input"); op_def.add_output("output"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AddArgument("arg0", 0.1f, &op_def); EXPECT_NE(ws.CreateBlob("input"), nullptr); OperatorBase op(op_def, &ws); @@ -228,7 +225,6 @@ TEST(OperatorDeathTest, DISABLED_CannotAccessRepeatedParameterWithWrongType) { op_def.set_type("JustTest"); op_def.add_input("input"); op_def.add_output("output"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AddArgument>("arg0", vector{0.1f}, &op_def); EXPECT_NE(ws.CreateBlob("input"), nullptr); OperatorBase op(op_def, &ws); diff --git a/caffe2/core/parallel_net_test.cc b/caffe2/core/parallel_net_test.cc index 6ebab67847e33..f2522852ce80f 100644 --- a/caffe2/core/parallel_net_test.cc +++ b/caffe2/core/parallel_net_test.cc @@ -23,7 +23,6 @@ class SleepOp final : public Operator { public: SleepOp(const OperatorDef& operator_def, Workspace* ws) : Operator(operator_def, ws), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ms_(OperatorBase::GetSingleArgument("ms", 1000)) { DCHECK_GT(ms_, 0); DCHECK_LT(ms_, 3600 * 1000) << "Really? This long?"; diff --git a/caffe2/core/plan_executor.cc b/caffe2/core/plan_executor.cc index 18cc79903630e..efe7ac8acc99b 100644 --- a/caffe2/core/plan_executor.cc +++ b/caffe2/core/plan_executor.cc @@ -166,7 +166,6 @@ struct WorkspaceIdInjector { CAFFE_ENFORCE( seq_ < (1 << 16), "Integer overflow while calculating GLOBAL_WORKSPACE_ID blob"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int32_t global_ws_id = (seq_++) + (static_cast(node_id) << 16); Blob* global_ws_id_blob = workspace->CreateLocalBlob(GLOBAL_WORKSPACE_ID); TensorCPU* global_ws_id_tensor = @@ -522,7 +521,6 @@ bool ExecuteStepRecursive(ExecutionStepWrapper& stepWrapper) { auto* reportNet = compiledStep->reportNet; if (reportNet) { VLOG(1) << "Starting reporter net"; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) reporter->start(step.report_interval() * 1000, [reportNet]() { if (!reportNet->Run()) { LOG(WARNING) << "Error running report_net."; diff --git a/caffe2/core/plan_executor_test.cc b/caffe2/core/plan_executor_test.cc index 291384cb48344..5dab70dcb4a4d 100644 --- a/caffe2/core/plan_executor_test.cc +++ b/caffe2/core/plan_executor_test.cc @@ -31,7 +31,6 @@ class StuckBlockingOp final : public Operator { stuckRun = true; while (!cancelled_) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::this_thread::sleep_for(std::chrono::milliseconds(10)); } @@ -112,7 +111,6 @@ class ErrorOp final : public Operator { bool RunOnDevice() override { // Wait for StuckAsyncOp or StuckBlockingOp to run first. while (!stuckRun) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::this_thread::sleep_for(std::chrono::milliseconds(10)); } throw TestError(); @@ -137,7 +135,6 @@ class BlockingErrorOp final : public Operator { if (blockingErrorRuns.fetch_sub(1) >= 1) { LOG(INFO) << "blocking"; while (true) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::this_thread::sleep_for(std::chrono::hours(10)); } } else { @@ -254,7 +251,6 @@ PlanDef reporterErrorPlanWithCancellableStuckNet() { } struct HandleExecutorThreadExceptionsGuard { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) HandleExecutorThreadExceptionsGuard(int timeout = 60) { globalInit({ "caffe2", @@ -400,7 +396,6 @@ PlanDef shouldStopWithCancelPlan() { auto* substep2 = substep->add_substep(); substep2->set_name("should_stop_net"); substep2->add_network(should_stop_net->name()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) substep2->set_num_iter(10); } { diff --git a/caffe2/core/prof_dag_counters.cc b/caffe2/core/prof_dag_counters.cc index 02e0c90459b50..a2ba15b868efb 100644 --- a/caffe2/core/prof_dag_counters.cc +++ b/caffe2/core/prof_dag_counters.cc @@ -189,9 +189,7 @@ void ProfDAGReport::PrintStats() { const auto& moments = item.second.computeMoments(); const auto& times_moments = times_per_run_per_type_total_[item.first].computeMoments(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) debug_out << std::setw(10) << std::setfill(' ') << moments.first - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << " ms/run (" << std::setw(10) << std::setfill(' ') << moments.second << " ms/run) " << " Op count per run: " << times_moments.first << " " diff --git a/caffe2/core/stats_test.cc b/caffe2/core/stats_test.cc index 1cdb7fa11cbf7..5dc4cfeb1f9fd 100644 --- a/caffe2/core/stats_test.cc +++ b/caffe2/core/stats_test.cc @@ -53,11 +53,8 @@ ExportedStatMap filterMap( TEST(StatsTest, StatsTestClass) { MyCaffeClass a("first"); MyCaffeClass b("second"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 10; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a.run(10); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b.run(5); } EXPECT_SUBSET( diff --git a/caffe2/core/timer_test.cc b/caffe2/core/timer_test.cc index bee455f3087b1..9e02d3df7dc36 100644 --- a/caffe2/core/timer_test.cc +++ b/caffe2/core/timer_test.cc @@ -18,7 +18,6 @@ TEST(TimerTest, Test) { // Sleep for a while, and get the time. timer.Start(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::this_thread::sleep_for(std::chrono::milliseconds(100)); float ns = timer.NanoSeconds(); float us = timer.MicroSeconds(); diff --git a/caffe2/core/transform_test.cc b/caffe2/core/transform_test.cc index a0befaf37cdf5..cbbfe156bf369 100644 --- a/caffe2/core/transform_test.cc +++ b/caffe2/core/transform_test.cc @@ -346,7 +346,6 @@ class TransformSleepFastOp final : public OperatorBase { public: using OperatorBase::OperatorBase; bool Run(int /* unused */) override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::this_thread::sleep_for(std::chrono::milliseconds(30)); return true; } @@ -365,7 +364,6 @@ class TransformSleepSlowOp final : public OperatorBase { public: using OperatorBase::OperatorBase; bool Run(int /* unused */) override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::this_thread::sleep_for(std::chrono::milliseconds(100)); return true; } @@ -456,7 +454,6 @@ TEST(TransformTest, TestApplyTransformIfFasterIsFaster) { // Should be still transform normally. auto mystery_net = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ApplyTransformIfFaster("SlowToFast", netdef, init_netdef, 5, 10, 1.01); EXPECT_EQ(mystery_net.op(1).type(), "TransformSleepFastOp"); } @@ -477,7 +474,6 @@ TEST(TransformTest, TestApplyTransformIfFasterButSlower) { // Should not actually change! auto mystery_net = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ApplyTransformIfFaster("FastToSlow", netdef, init_netdef, 5, 10, 1.01); EXPECT_EQ(mystery_net.op(1).type(), "TransformSleepFastOp"); } diff --git a/caffe2/distributed/file_store_handler.cc b/caffe2/distributed/file_store_handler.cc index f2caf7979e5aa..aa882153ea22f 100644 --- a/caffe2/distributed/file_store_handler.cc +++ b/caffe2/distributed/file_store_handler.cc @@ -29,13 +29,11 @@ namespace caffe2 { static std::string encodeName(const std::string& name) { // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) std::array out; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MurmurHash3_x64_128(name.data(), name.size(), 0xcafef00d, out.data()); // Size is 33 to have space for final NUL // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,cppcoreguidelines-avoid-magic-numbers) std::array buf; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 16; i++) { snprintf(&buf[i * 2], buf.size() - (i * 2), "%02x", ((char*)out.data())[i]); } @@ -54,7 +52,6 @@ FileStoreHandler::FileStoreHandler( #if defined(_MSC_VER) auto ret = _mkdir(basePath_.c_str()); #else - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto ret = mkdir(basePath_.c_str(), 0777); #endif // defined(_MSC_VER) if (ret == -1) { @@ -181,7 +178,6 @@ void FileStoreHandler::wait( "Wait timeout for name(s): ", c10::Join(" ", names)); } /* sleep override */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::this_thread::sleep_for(std::chrono::milliseconds(10)); } } diff --git a/caffe2/ideep/operators/adam_op.cc b/caffe2/ideep/operators/adam_op.cc index 287db5d1d579c..e44f10bea1dae 100644 --- a/caffe2/ideep/operators/adam_op.cc +++ b/caffe2/ideep/operators/adam_op.cc @@ -90,11 +90,8 @@ class IDEEPAdamOp final : public IDEEPOperator { IDEEPAdamOp(const OperatorDef& operator_def, Workspace* ws) : IDEEPOperator(operator_def, ws), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) beta1_(OperatorBase::GetSingleArgument("beta1", 0.9f)), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) beta2_(OperatorBase::GetSingleArgument("beta2", 0.999f)), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) epsilon_(OperatorBase::GetSingleArgument("epsilon", 1e-5f)) {} bool RunOnDevice() override { // Iter live on the CPU diff --git a/caffe2/ideep/operators/dropout_op.cc b/caffe2/ideep/operators/dropout_op.cc index 78e2d19c75177..6003d64c5f8b8 100644 --- a/caffe2/ideep/operators/dropout_op.cc +++ b/caffe2/ideep/operators/dropout_op.cc @@ -11,7 +11,6 @@ class IDEEPDropoutOp final : public IDEEPOperator { IDEEPDropoutOp(const OperatorDef& operator_def, Workspace* ws) : IDEEPOperator(operator_def, ws), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ratio_(OperatorBase::GetSingleArgument("ratio", 0.5)), is_test_( OperatorBase::GetSingleArgument(OpSchema::Arg_IsTest, 0)) { @@ -53,7 +52,6 @@ class IDEEPDropoutGradientOp final : public IDEEPOperator { IDEEPDropoutGradientOp(const OperatorDef& operator_def, Workspace* ws) : IDEEPOperator(operator_def, ws), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ratio_(OperatorBase::GetSingleArgument("ratio", 0.5)), is_test_( OperatorBase::GetSingleArgument(OpSchema::Arg_IsTest, 0)) { diff --git a/caffe2/ideep/operators/pool_op.cc b/caffe2/ideep/operators/pool_op.cc index bc3f920f0cf8b..365fca54747c7 100644 --- a/caffe2/ideep/operators/pool_op.cc +++ b/caffe2/ideep/operators/pool_op.cc @@ -25,10 +25,8 @@ class IDEEPPoolOp final : public IDEEPConvPoolOpBase { pk_ = training_mode ? iprop::forward_training : iprop::forward_inference; // Figure out the pooling descriptor. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (operator_def.type().substr(0, 7) == "MaxPool") { algo_ = ialgo::pooling_max; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (operator_def.type().substr(0, 11) == "AveragePool") { algo_ = ialgo::pooling_avg; } else { @@ -81,10 +79,8 @@ class IDEEPPoolGradientOp final : public IDEEPConvPoolOpBase { "Pad should be smaller than kernel."); } // Figure out the pooling descriptor. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (operator_def.type().substr(0, 15) == "MaxPoolGradient") { algo_ = ialgo::pooling_max; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (operator_def.type().substr(0, 19) == "AveragePoolGradient") { algo_ = ialgo::pooling_avg; } else { diff --git a/caffe2/ideep/operators/quantization/int8_given_tensor_fill_op.cc b/caffe2/ideep/operators/quantization/int8_given_tensor_fill_op.cc index d3afc9be5163d..660da3e2e0ed5 100644 --- a/caffe2/ideep/operators/quantization/int8_given_tensor_fill_op.cc +++ b/caffe2/ideep/operators/quantization/int8_given_tensor_fill_op.cc @@ -64,7 +64,6 @@ class IDEEPInt8GivenTensorFillOp final : public IDEEPOperator { temp_ten.get_nelems(), values_data, data_u8); // Shift quantized data to s8 per zero point - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (zero_point_ == 128) { auto* data_s8 = static_cast(temp_ten.get_data_handle()); auto nelems = temp_ten.get_nelems(); diff --git a/caffe2/ideep/operators/quantization/int8_pool_op.cc b/caffe2/ideep/operators/quantization/int8_pool_op.cc index 7368b008d83c9..3c01c6d50e25a 100644 --- a/caffe2/ideep/operators/quantization/int8_pool_op.cc +++ b/caffe2/ideep/operators/quantization/int8_pool_op.cc @@ -22,10 +22,8 @@ class IDEEPInt8PoolOp final : public IDEEPConvPoolOpBase { } // Figure out the pooling descriptor. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (operator_def.type().substr(0, 11) == "Int8MaxPool") { algo_ = ialgo::pooling_max; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (operator_def.type().substr(0, 15) == "Int8AveragePool") { algo_ = ialgo::pooling_avg; } else { diff --git a/caffe2/ideep/operators/quantization/int8_relu_op.cc b/caffe2/ideep/operators/quantization/int8_relu_op.cc index bb22c3ff3d314..f4334f0f50539 100644 --- a/caffe2/ideep/operators/quantization/int8_relu_op.cc +++ b/caffe2/ideep/operators/quantization/int8_relu_op.cc @@ -12,7 +12,6 @@ class IDEEPInt8ReluOp final : public IDEEPOperator { IDEEPInt8ReluOp(const OperatorDef& operator_def, Workspace* ws) : IDEEPOperator(operator_def, ws), alpha_(0.0) { // Figure out the Relu descriptor. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (operator_def.type().substr(0, 8) == "Int8Relu") { alpha_ = 0.0; } else { diff --git a/caffe2/ideep/operators/relu_op.cc b/caffe2/ideep/operators/relu_op.cc index 2de155d7ff2ac..117c05d9f82f4 100644 --- a/caffe2/ideep/operators/relu_op.cc +++ b/caffe2/ideep/operators/relu_op.cc @@ -14,11 +14,9 @@ class IDEEPReluOp final : public IDEEPOperator { // Figure out the Relu descriptor. if (operator_def.type().substr(0, 4) == "Relu") { alpha_ = 0.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (operator_def.type().substr(0, 9) == "LeakyRelu") { if (HasArgument("alpha")) { alpha_ = static_cast( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) OperatorBase::GetSingleArgument("alpha", 0.01)); } } else { @@ -53,14 +51,11 @@ class IDEEPReluGradientOp final : public IDEEPOperator { IDEEPReluGradientOp(const OperatorDef& operator_def, Workspace* ws) : IDEEPOperator(operator_def, ws), alpha_(0.0) { // Figure out the Relu descriptor. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (operator_def.type().substr(0, 12) == "ReluGradient") { alpha_ = 0.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (operator_def.type().substr(0, 17) == "LeakyReluGradient") { if (HasArgument("alpha")) { alpha_ = static_cast( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) OperatorBase::GetSingleArgument("alpha", 0.01)); } } else { diff --git a/caffe2/ideep/operators/spatial_batch_norm_op.cc b/caffe2/ideep/operators/spatial_batch_norm_op.cc index 60b52f9d5fd5c..f119c6d3d8c69 100644 --- a/caffe2/ideep/operators/spatial_batch_norm_op.cc +++ b/caffe2/ideep/operators/spatial_batch_norm_op.cc @@ -12,9 +12,7 @@ class IDEEPSpatialBNOp final : public IDEEPOperator { IDEEPSpatialBNOp(const OperatorDef& operator_def, Workspace* ws) : IDEEPOperator(operator_def, ws), is_test_(OperatorBase::GetSingleArgument(OpSchema::Arg_IsTest, 0)), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) epsilon_(OperatorBase::GetSingleArgument("epsilon", 1e-5)), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) momentum_(OperatorBase::GetSingleArgument("momentum", 0.9)) { CAFFE_ENFORCE( (is_test_ && OutputSize() > OUTPUT) @@ -74,7 +72,6 @@ class IDEEPSpatialBNGradientOp final : public IDEEPOperator { IDEEPSpatialBNGradientOp(const OperatorDef& operator_def, Workspace* ws) : IDEEPOperator(operator_def, ws), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) epsilon_(OperatorBase::GetSingleArgument("epsilon", 1e-5)) { CAFFE_ENFORCE(InputSize() > SAVED_VAR); CAFFE_ENFORCE(OutputSize() > BIAS_GRAD); diff --git a/caffe2/observers/time_observer_test.cc b/caffe2/observers/time_observer_test.cc index d422c99b4fed5..430e23750cf28 100644 --- a/caffe2/observers/time_observer_test.cc +++ b/caffe2/observers/time_observer_test.cc @@ -17,7 +17,6 @@ class SleepOp final : public OperatorBase { using OperatorBase::OperatorBase; bool Run(int /* unused */) override { StartAllObservers(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::this_thread::sleep_for(std::chrono::milliseconds(3000)); StopAllObservers(); return true; diff --git a/caffe2/onnx/backend.cc b/caffe2/onnx/backend.cc index 1b4d2cd24da74..e3adbfcd1b3b2 100644 --- a/caffe2/onnx/backend.cc +++ b/caffe2/onnx/backend.cc @@ -772,7 +772,6 @@ Caffe2Ops Caffe2Backend::CreateGemm( // Support broadcast by default when opset_version > 6. auto broadcast = onnx_node->attributes.get("broadcast", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (ctx.opset_version() > 6) ? 1L : 0L); // If the c's shape information is available and c is a 1d tensor(except @@ -832,7 +831,6 @@ Caffe2Ops Caffe2Backend::CreateGemm( BuildOperator( c2_op, "MatMul", {input_a, input_b}, {ab}, {arg_trans_a, arg_trans_b}); c2_op = ret.ops.Add(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (ctx.opset_version() >= 7) { BuildOperator(c2_op, "Add", {ab, input_c}, {output}); } else { @@ -873,9 +871,7 @@ Caffe2Ops Caffe2Backend::CreatePad( // first two dim is for batch and channel. Note that now all the values are // non-negative - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (!(pads.size() == 8 && - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (pads.Get(0) + pads.Get(1) + pads.Get(4) + pads.Get(5) == 0))) { CAFFE_THROW( "Caffe2 only supports padding 2D Tensor, whereas padding is ", str); @@ -885,9 +881,7 @@ Caffe2Ops Caffe2Backend::CreatePad( auto* attr = attributes.AddRewrittenAttribute(pad_name); attr->add_ints(pads.Get(2)); attr->add_ints(pads.Get(3)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) attr->add_ints(pads.Get(6)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) attr->add_ints(pads.Get(7)); return CommonOnnxNodeToCaffe2Ops(onnx_node, ctx); @@ -1236,12 +1230,10 @@ Caffe2Ops Caffe2Backend::CreateBatchNormalization( const ConversionContext& ctx) { auto& attributes = onnx_node->attributes; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (ctx.opset_version() < 6) { attributes.remove("consumed_inputs"); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (ctx.opset_version() >= 7) { auto* attr = attributes.AddRewrittenAttribute("is_test"); attr->set_i(1); @@ -1290,7 +1282,6 @@ Caffe2Ops Caffe2Backend::CreateUpsample( auto& attributes = onnx_node->attributes; attributes.remove("mode"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (ctx.opset_version() >= 7 && ctx.opset_version() < 9) { const auto& scales = attributes.get<::google::protobuf::RepeatedField>("scales"); if (scales.size() != 4) { @@ -1308,7 +1299,6 @@ Caffe2Ops Caffe2Backend::CreateUpsample( c2_width->set_name("width_scale"); c2_width->set_f(scales.Get(3)); return c2_op; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (ctx.opset_version() >= 9) { const auto& node = onnx_node->node; if (node.input_size() != 2) { @@ -1347,7 +1337,6 @@ Caffe2Ops Caffe2Backend::CreateUpsample( Caffe2Ops Caffe2Backend::CreateDropout( OnnxNode* onnx_node, const ConversionContext& ctx) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (ctx.opset_version() >= 7) { auto& attributes = onnx_node->attributes; auto* attr = attributes.AddRewrittenAttribute("is_test"); @@ -1365,13 +1354,11 @@ Caffe2Ops Caffe2Backend::CreateLRN( if (!attributes.HasAttribute("alpha")) { auto* arg = c2_op.ops.Mutable(0)->add_arg(); arg->set_name("alpha"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) arg->set_f(1e-4); } if (!attributes.HasAttribute("beta")) { auto* arg = c2_op.ops.Mutable(0)->add_arg(); arg->set_name("beta"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) arg->set_f(0.75); } return c2_op; diff --git a/caffe2/operators/batch_matmul_op_test.cc b/caffe2/operators/batch_matmul_op_test.cc index 53be3c2805776..fc103670f30e5 100644 --- a/caffe2/operators/batch_matmul_op_test.cc +++ b/caffe2/operators/batch_matmul_op_test.cc @@ -59,14 +59,11 @@ class BatchMatMulOpTest : public testing::Test { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(BatchMatMulOpTest, BatchMatMulOpNormalTest) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AddConstInput(std::vector{3, 5, 10}, 1.0f, "A"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AddConstInput(std::vector{3, 10, 6}, 1.0f, "B"); std::unique_ptr op(CreateOperator(def_, &ws_)); ASSERT_NE(nullptr, op); ASSERT_TRUE(op->Run()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VerifyOutput(std::vector{3, 5, 6}, 10.0f); } @@ -75,14 +72,11 @@ TEST_F(BatchMatMulOpTest, BatchMatMulOpBroadcastTest) { auto* arg = def_.add_arg(); arg->set_name("broadcast"); arg->set_i(1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AddConstInput(std::vector{3, 5, 10}, 1.0f, "A"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AddConstInput(std::vector{2, 3, 10, 6}, 1.0f, "B"); std::unique_ptr op(CreateOperator(def_, &ws_)); ASSERT_NE(nullptr, op); ASSERT_TRUE(op->Run()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VerifyOutput(std::vector{2, 3, 5, 6}, 10.0f); } diff --git a/caffe2/operators/batch_moments_op.cc b/caffe2/operators/batch_moments_op.cc index 1eaa3ae9c8b11..b645580e74081 100644 --- a/caffe2/operators/batch_moments_op.cc +++ b/caffe2/operators/batch_moments_op.cc @@ -65,7 +65,6 @@ bool BatchMomentsGradientOp::ComputeBatchMomentsGradientNCHW( for (int i = 0; i < N; ++i) { EigenArrayMap dX_arr(dX_ptr, HxW, C); dX_arr = ConstEigenArrayMap(X_ptr, HxW, C).rowwise() * - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dvar_arr.transpose() * 2.0f; dX_arr.rowwise() += dmu_arr.transpose(); X_ptr += stride; @@ -88,7 +87,6 @@ bool BatchMomentsGradientOp::ComputeBatchMomentsGradientNHWC( const float scale = 1.0f / static_cast(N * HxW); EigenArrayMap dX_arr(dX, C, N * HxW); dX_arr = ConstEigenArrayMap(X, C, N * HxW).colwise() * - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ConstEigenVectorArrayMap(dvar, C) * 2.0f; dX_arr.colwise() += ConstEigenVectorArrayMap(dmu, C); math::Scale(N * C * HxW, scale, dX, dX, &context_); diff --git a/caffe2/operators/box_with_nms_limit_op.cc b/caffe2/operators/box_with_nms_limit_op.cc index 2b43d2e128e5b..8765db1837979 100644 --- a/caffe2/operators/box_with_nms_limit_op.cc +++ b/caffe2/operators/box_with_nms_limit_op.cc @@ -59,7 +59,6 @@ const auto& tscores = Input(0); Tensor* out_keeps_size = nullptr; if (OutputSize() > 4) { out_keeps = Output(4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) out_keeps_size = Output(5); out_keeps->Resize(0); out_keeps_size->Resize(batch_size, num_classes); @@ -179,11 +178,8 @@ const auto& tscores = Input(0); // Write results int cur_start_idx = out_scores->size(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) out_scores->Extend(total_keep_count, 50); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) out_boxes->Extend(total_keep_count, 50); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) out_classes->Extend(total_keep_count, 50); int cur_out_idx = 0; @@ -220,7 +216,6 @@ const auto& tscores = Input(0); } if (out_keeps) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) out_keeps->Extend(total_keep_count, 50); Eigen::Map out_keeps_arr( @@ -262,7 +257,6 @@ REGISTER_CPU_OPERATOR(BoxWithNMSLimit, BoxWithNMSLimitOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(BoxWithNMSLimit) .NumInputs(2, 3) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumOutputs(3, 6) .SetDoc(R"DOC( Apply NMS to each class (except background) and limit the number of @@ -309,7 +303,6 @@ returned boxes. "Output batch splits for scores/boxes after applying NMS") .Output(4, "keeps", "Optional filtered indices, size (n)") .Output( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "keeps_size", "Optional number of filtered indices per class, size (num_classes)"); diff --git a/caffe2/operators/collect_and_distribute_fpn_rpn_proposals_op.cc b/caffe2/operators/collect_and_distribute_fpn_rpn_proposals_op.cc index 3097ac4c68291..6fc1eaf3b06ef 100644 --- a/caffe2/operators/collect_and_distribute_fpn_rpn_proposals_op.cc +++ b/caffe2/operators/collect_and_distribute_fpn_rpn_proposals_op.cc @@ -37,7 +37,6 @@ ERArrXXf MapRoIsToFpnLevels( // equivalent to python code // target_lvls = np.floor(lvl0 + np.log2(s / s0 + 1e-6)) // target_lvls = np.clip(target_lvls, k_min, k_max) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target_lvls = (lvl0 + (s / s0 + 1e-6).log() / log(2)).floor(); auto target_lvls_clipped = target_lvls.min(k_max).max(k_min); return target_lvls_clipped; @@ -146,7 +145,6 @@ bool CollectAndDistributeFpnRpnProposalsOp::RunOnDevice() { const auto& roi_in = Input(i); proposal_num += roi_in.size(0); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ERArrXXf rois(proposal_num, 5); EArrXf scores(proposal_num); int len = 0; @@ -154,9 +152,7 @@ bool CollectAndDistributeFpnRpnProposalsOp::RunOnDevice() { const auto& roi_in = Input(i); const int n = roi_in.size(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::Map roi(roi_in.data(), n, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) rois.block(len, 0, n, 5) = roi; const auto& score_in = Input(num_rpn_lvls + i); @@ -268,7 +264,6 @@ bool CollectRpnProposalsOp::RunOnDevice() { const auto& roi_in = Input(i); proposal_num += roi_in.size(0); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ERArrXXf rois(proposal_num, 5); EArrXf scores(proposal_num); int len = 0; @@ -276,9 +271,7 @@ bool CollectRpnProposalsOp::RunOnDevice() { const auto& roi_in = Input(i); const int n = roi_in.size(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::Map roi(roi_in.data(), n, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) rois.block(len, 0, n, 5) = roi; const auto& score_in = Input(num_rpn_lvls + i); @@ -322,7 +315,6 @@ bool DistributeFpnProposalsOp::RunOnDevice() { CAFFE_ENFORCE(dim_rois == 4 || dim_rois == 5); Eigen::Map rois_4or5( rois_in.data(), num_rois, dim_rois); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ERArrXXf rois = ERArrXXf::Zero(num_rois, 5); rois.rightCols(dim_rois) = rois_4or5; @@ -458,31 +450,26 @@ will change. "format (image_index, x1, y1, x2, y2). See rpn_rois " "documentation from GenerateProposals.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "rpn_roi_probs_fpn2", "RPN objectness probabilities for FPN level 2. " "See rpn_roi_probs documentation from GenerateProposals.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, "rpn_roi_probs_fpn3", "RPN objectness probabilities for FPN level 3. " "See rpn_roi_probs documentation from GenerateProposals.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 7, "rpn_roi_probs_fpn4", "RPN objectness probabilities for FPN level 4. " "See rpn_roi_probs documentation from GenerateProposals.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8, "rpn_roi_probs_fpn5", "RPN objectness probabilities for FPN level 5. " "See rpn_roi_probs documentation from GenerateProposals.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9, "rpn_roi_probs_fpn6", "RPN objectness probabilities for FPN level 6. " @@ -513,7 +500,6 @@ will change. "RPN proposals for ROI level 5, " "format (image_index, x1, y1, x2, y2)") .Output( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "rois_idx_restore", "Permutation on the concatenation of all " @@ -564,31 +550,26 @@ OPERATOR_SCHEMA(CollectRpnProposals) "format (image_index, x1, y1, x2, y2). See rpn_rois " "documentation from GenerateProposals.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "rpn_roi_probs_fpn2", "RPN objectness probabilities for FPN level 2. " "See rpn_roi_probs documentation from GenerateProposals.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, "rpn_roi_probs_fpn3", "RPN objectness probabilities for FPN level 3. " "See rpn_roi_probs documentation from GenerateProposals.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 7, "rpn_roi_probs_fpn4", "RPN objectness probabilities for FPN level 4. " "See rpn_roi_probs documentation from GenerateProposals.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8, "rpn_roi_probs_fpn5", "RPN objectness probabilities for FPN level 5. " "See rpn_roi_probs documentation from GenerateProposals.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9, "rpn_roi_probs_fpn6", "RPN objectness probabilities for FPN level 6. " diff --git a/caffe2/operators/conv_transpose_op_mobile_test.cc b/caffe2/operators/conv_transpose_op_mobile_test.cc index 9d2e440ff9e85..19dff099b499c 100644 --- a/caffe2/operators/conv_transpose_op_mobile_test.cc +++ b/caffe2/operators/conv_transpose_op_mobile_test.cc @@ -35,14 +35,12 @@ void AddNoiseInput(const vector& shape, math::RandGaussian( tensor->numel(), 0.0f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10.0f, tensor->template mutable_data(), &context); } inline float relativeError(float a, float b) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return std::abs(a - b) / (0.5f * (std::abs(a) + std::abs(b))); } diff --git a/caffe2/operators/dropout_op.cc b/caffe2/operators/dropout_op.cc index d4908c546528f..7df39265d8b86 100644 --- a/caffe2/operators/dropout_op.cc +++ b/caffe2/operators/dropout_op.cc @@ -26,7 +26,6 @@ bool DropoutOp::RunOnDevice() { bool* mask_data = mask->template mutable_data(); auto* gen = context_.RandGenerator(); for (int i = 0; i < X.numel(); ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mask_data[i] = dist(gen) > 0.5; // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) Ydata[i] = Xdata[i] * scale * mask_data[i]; diff --git a/caffe2/operators/feature_maps_ops.cc b/caffe2/operators/feature_maps_ops.cc index 6af5871fab353..9b034419e02d8 100644 --- a/caffe2/operators/feature_maps_ops.cc +++ b/caffe2/operators/feature_maps_ops.cc @@ -197,7 +197,6 @@ OPERATOR_SCHEMA(MergeSingleMapFeatureTensors) "multi-feature tensor." + doc) .NumInputs([](int n) { return n >= 4 && n % 4 == 0; }) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumOutputs(5) .Input(0, "in1_lengths", ".lengths") .Input(1, "in1_keys", ".keys") @@ -392,9 +391,7 @@ REGISTER_CPU_OPERATOR( OPERATOR_SCHEMA(MergeMultiMapFeatureTensors) .SetDoc( "Merge given multi-feature tensors with map features into one." + doc) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs([](int n) { return n >= 5 && n % 5 == 0; }) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumOutputs(5) .Input(0, "in1_lengths", ".lengths") .Input(1, "in1_keys", ".keys") diff --git a/caffe2/operators/fused_rowwise_nbitfake_conversion_ops.cc b/caffe2/operators/fused_rowwise_nbitfake_conversion_ops.cc index 86829be978f1f..d70067ff8b098 100644 --- a/caffe2/operators/fused_rowwise_nbitfake_conversion_ops.cc +++ b/caffe2/operators/fused_rowwise_nbitfake_conversion_ops.cc @@ -145,7 +145,6 @@ OPERATOR_SCHEMA(FloatToFused4BitFakeRowwiseQuantized) const vector& in) { vector out; TensorShape X = in[0]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) X.set_dims(1, X.dims(1) + 8); out.push_back(std::move(X)); out[0].set_data_type(TensorProto_DataType_UINT8); @@ -176,7 +175,6 @@ OPERATOR_SCHEMA(HalfToFused4BitFakeRowwiseQuantized) const vector& in) { vector out; TensorShape X = in[0]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) X.set_dims(1, X.dims(1) + 8); out.push_back(std::move(X)); out[0].set_data_type(TensorProto_DataType_UINT8); @@ -227,7 +225,6 @@ OPERATOR_SCHEMA(FloatToFused2BitFakeRowwiseQuantized) const vector& in) { vector out; TensorShape X = in[0]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) X.set_dims(1, X.dims(1) + 8); out.push_back(std::move(X)); out[0].set_data_type(TensorProto_DataType_UINT8); @@ -258,7 +255,6 @@ OPERATOR_SCHEMA(HalfToFused2BitFakeRowwiseQuantized) const vector& in) { vector out; TensorShape X = in[0]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) X.set_dims(1, X.dims(1) + 8); out.push_back(std::move(X)); out[0].set_data_type(TensorProto_DataType_UINT8); diff --git a/caffe2/operators/fused_rowwise_random_quantization_ops.cc b/caffe2/operators/fused_rowwise_random_quantization_ops.cc index a578ed7e05278..c0761d44215c3 100644 --- a/caffe2/operators/fused_rowwise_random_quantization_ops.cc +++ b/caffe2/operators/fused_rowwise_random_quantization_ops.cc @@ -34,7 +34,6 @@ bool FloatToFusedRandRowwiseQuantizedOp::RunOnDevice() { // | 1B | 1B | 4B | 4B | ...output_data....| // In output_data: the b-th bucket of the i-th byte stores // the i-th data of the b-th segment of input row - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t data_per_byte = 8 / bitwidth_; // How many bytes in the output size_t segment_size = (input_columns + data_per_byte - 1) / data_per_byte; @@ -134,13 +133,10 @@ OPERATOR_SCHEMA(FloatToFusedRandRowwiseQuantized) .TensorInferenceFunction([](const OperatorDef& def, const vector& in) { ArgumentHelper helper(def); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto bitwidth = helper.GetSingleArgument("bitwidth", 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t data_per_byte = 8 / bitwidth; vector out; TensorShape X = in[0]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) X.set_dims(1, 10 + (X.dims(1) + data_per_byte - 1) / data_per_byte); out.push_back(std::move(X)); out[0].set_data_type(TensorProto_DataType_UINT8); diff --git a/caffe2/operators/gelu_op.cc b/caffe2/operators/gelu_op.cc index 3652229a19400..6e65c22805492 100644 --- a/caffe2/operators/gelu_op.cc +++ b/caffe2/operators/gelu_op.cc @@ -31,7 +31,6 @@ operator()(const int N, const T* X, T* Y, CPUContext* context) const { Y_arr = X_arr * (((X_arr + X_arr.cube() * gelu_utils::kFastCoeff) * kAlpha).tanh() + T(1)) * - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) static_cast(0.5); } else { // y = x * P(X <= x) where X ~ N(0, 1) @@ -63,13 +62,11 @@ bool GeluGradientFunctor::Forward( dX_arr = (T(1) + dX_arr + X_arr * (T(1) - dX_arr.square()) * (kBeta * X_arr.square() + kAlpha)) * - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dY_arr * static_cast(0.5); } else { constexpr T kAlpha = M_2_SQRTPI * M_SQRT1_2 * T(0.5); math::CdfNorm(N, X, dX, context); dX_arr = (dX_arr + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) X_arr * (-X_arr.square() * static_cast(0.5)).exp() * kAlpha) * dY_arr; } diff --git a/caffe2/operators/generate_proposals_op_test.cc b/caffe2/operators/generate_proposals_op_test.cc index 5dc6787ef7ca9..91dfc66896e37 100644 --- a/caffe2/operators/generate_proposals_op_test.cc +++ b/caffe2/operators/generate_proposals_op_test.cc @@ -63,34 +63,21 @@ static void AddInput( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(GenerateProposalsTest, TestComputeAllAnchors) { ERMatXf anchors(3, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) anchors << -38, -16, 53, 31, -84, -40, 99, 55, -176, -88, 191, 103; int height = 4; int width = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float feat_stride = 16; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ERMatXf all_anchors_gt(36, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) all_anchors_gt << -38, -16, 53, 31, -84, -40, 99, 55, -176, -88, 191, 103, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -22, -16, 69, 31, -68, -40, 115, 55, -160, -88, 207, 103, -6, -16, 85, 31, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -52, -40, 131, 55, -144, -88, 223, 103, -38, 0, 53, 47, -84, -24, 99, 71, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -176, -72, 191, 119, -22, 0, 69, 47, -68, -24, 115, 71, -160, -72, 207, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 119, -6, 0, 85, 47, -52, -24, 131, 71, -144, -72, 223, 119, -38, 16, 53, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 63, -84, -8, 99, 87, -176, -56, 191, 135, -22, 16, 69, 63, -68, -8, 115, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 87, -160, -56, 207, 135, -6, 16, 85, 63, -52, -8, 131, 87, -144, -56, 223, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 135, -38, 32, 53, 79, -84, 8, 99, 103, -176, -40, 191, 151, -22, 32, 69, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 79, -68, 8, 115, 103, -160, -40, 207, 151, -6, 32, 85, 79, -52, 8, 131, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 103, -144, -40, 223, 151; Tensor anchors_tensor(vector{anchors.rows(), anchors.cols()}, CPU); @@ -109,13 +96,11 @@ TEST(GenerateProposalsTest, TestComputeAllAnchors) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(GenerateProposalsTest, TestComputeSortedAnchors) { ERMatXf anchors(3, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) anchors << -38, -16, 53, 31, -84, -40, 99, 55, -176, -88, 191, 103; int height = 4; int width = 3; int A = anchors.rows(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float feat_stride = 16; int total = height * width * A; @@ -163,15 +148,12 @@ TEST(GenerateProposalsTest, TestComputeSortedAnchors) { TEST(GenerateProposalsTest, TestComputeAllAnchorsRotated) { // Similar to TestComputeAllAnchors but for rotated boxes with angle info. ERMatXf anchors_xyxy(3, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) anchors_xyxy << -38, -16, 53, 31, -84, -40, 99, 55, -176, -88, 191, 103; // Convert to RRPN format and add angles - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ERMatXf anchors(3, 5); anchors.block(0, 0, 3, 4) = utils::bbox_xyxy_to_ctrwh( anchors_xyxy.array(), true /* legacy_plus_one */); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector angles{0.0, 45.0, -120.0}; for (int i = 0; i < anchors.rows(); ++i) { anchors(i, 4) = angles[i % angles.size()]; @@ -179,35 +161,21 @@ TEST(GenerateProposalsTest, TestComputeAllAnchorsRotated) { int height = 4; int width = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float feat_stride = 16; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ERMatXf all_anchors_gt_xyxy(36, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) all_anchors_gt_xyxy << -38, -16, 53, 31, -84, -40, 99, 55, -176, -88, 191, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 103, -22, -16, 69, 31, -68, -40, 115, 55, -160, -88, 207, 103, -6, -16, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 85, 31, -52, -40, 131, 55, -144, -88, 223, 103, -38, 0, 53, 47, -84, -24, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 99, 71, -176, -72, 191, 119, -22, 0, 69, 47, -68, -24, 115, 71, -160, -72, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 207, 119, -6, 0, 85, 47, -52, -24, 131, 71, -144, -72, 223, 119, -38, 16, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 53, 63, -84, -8, 99, 87, -176, -56, 191, 135, -22, 16, 69, 63, -68, -8, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 115, 87, -160, -56, 207, 135, -6, 16, 85, 63, -52, -8, 131, 87, -144, -56, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 223, 135, -38, 32, 53, 79, -84, 8, 99, 103, -176, -40, 191, 151, -22, 32, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 69, 79, -68, 8, 115, 103, -160, -40, 207, 151, -6, 32, 85, 79, -52, 8, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 131, 103, -144, -40, 223, 151; // Convert gt to RRPN format and add angles - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ERMatXf all_anchors_gt(36, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) all_anchors_gt.block(0, 0, 36, 4) = utils::bbox_xyxy_to_ctrwh( all_anchors_gt_xyxy.array(), true /* legacy_plus_one */); for (int i = 0; i < all_anchors_gt.rows(); ++i) { @@ -222,7 +190,6 @@ TEST(GenerateProposalsTest, TestComputeAllAnchorsRotated) { auto result = utils::ComputeAllAnchors(anchors_tensor, height, width, feat_stride); Eigen::Map all_anchors_result( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) result.data(), height * width * anchors.rows(), 5); EXPECT_EQ((all_anchors_result - all_anchors_gt).norm(), 0); @@ -232,15 +199,12 @@ TEST(GenerateProposalsTest, TestComputeAllAnchorsRotated) { TEST(GenerateProposalsTest, TestComputeSortedAnchorsRotated) { // Similar to TestComputeSortedAnchors but for rotated boxes with angle info. ERMatXf anchors_xyxy(3, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) anchors_xyxy << -38, -16, 53, 31, -84, -40, 99, 55, -176, -88, 191, 103; // Convert to RRPN format and add angles - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ERMatXf anchors(3, 5); anchors.block(0, 0, 3, 4) = utils::bbox_xyxy_to_ctrwh( anchors_xyxy.array(), true /* legacy_plus_one */); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector angles{0.0, 45.0, -120.0}; for (int i = 0; i < anchors.rows(); ++i) { anchors(i, 4) = angles[i % angles.size()]; @@ -249,7 +213,6 @@ TEST(GenerateProposalsTest, TestComputeSortedAnchorsRotated) { int height = 4; int width = 3; int A = anchors.rows(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float feat_stride = 16; int total = height * width * A; @@ -261,7 +224,6 @@ TEST(GenerateProposalsTest, TestComputeSortedAnchorsRotated) { auto all_anchors = utils::ComputeAllAnchors(anchors_tensor, height, width, feat_stride); Eigen::Map all_anchors_result( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) all_anchors.data(), height * width * A, 5); Eigen::Map anchors_map( @@ -312,13 +274,10 @@ TEST(GenerateProposalsTest, TestEmpty) { const int W = 8; AddConstInput(vector{img_count, A, H, W}, 1., "scores", &ws); AddLinSpacedInput( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector{img_count, 4 * A, H, W}, 0, 10, "bbox_deltas", &ws); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AddConstInput(vector{img_count, 3}, 0.1, "im_info", &ws); AddConstInput(vector{A, 4}, 1.0, "anchors", &ws); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("spatial_scale", 2.0f)); unique_ptr op(CreateOperator(def, &ws)); @@ -353,141 +312,74 @@ TEST(GenerateProposalsTest, TestRealDownSampled) { const int W = 5; vector scores{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.44218998e-03f, 1.19207997e-03f, 1.12379994e-03f, 1.17181998e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.20544003e-03f, 6.17993006e-04f, 1.05261997e-05f, 8.91025957e-06f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.29536981e-09f, 6.09605013e-05f, 4.72735002e-04f, 1.13482002e-10f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.50015003e-05f, 4.45032993e-06f, 3.21612994e-08f, 8.02662980e-04f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.40488002e-04f, 3.12508007e-07f, 3.02616991e-06f, 1.97759000e-08f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.66913995e-02f, 5.26766013e-03f, 5.05053019e-03f, 5.62100019e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.37420018e-03f, 5.26280981e-03f, 2.48894998e-04f, 1.06842002e-04f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.92931997e-06f, 1.79388002e-03f, 4.79440019e-03f, 3.41609990e-07f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.20430971e-04f, 3.34090000e-05f, 2.19159006e-07f, 2.28786003e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.16703985e-05f, 4.04523007e-06f, 1.79227004e-06f, 5.32449000e-08f}; vector bbx{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.65040009e-02f, -1.84051003e-02f, -1.85930002e-02f, -2.08263006e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.83814000e-02f, -2.89172009e-02f, -3.89706008e-02f, -7.52277970e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.54091999e-01f, -2.55433004e-02f, -1.77490003e-02f, -1.10340998e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -4.20190990e-02f, -2.71421000e-02f, 6.89801015e-03f, 5.71171008e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.75665006e-01f, 2.30021998e-02f, 3.08554992e-02f, -1.39333997e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.40579003e-01f, 3.91070992e-01f, 3.91624004e-01f, 3.92527014e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.91445011e-01f, 3.79328012e-01f, 4.26631987e-01f, 3.64892989e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.76894987e-01f, 5.13985991e-01f, 3.79999995e-01f, 1.80457994e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 4.37402993e-01f, 4.18545991e-01f, 2.51549989e-01f, 4.48318988e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.68564007e-01f, 4.65440989e-01f, 4.21891987e-01f, 4.45928007e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.27155995e-03f, 3.71480011e-03f, 3.60032008e-03f, 4.27092984e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.74579988e-03f, 5.95752988e-03f, -3.14473989e-03f, 3.52022005e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.88564006e-02f, 1.65188999e-03f, 1.73791999e-03f, -3.56074013e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.66615995e-04f, 3.14146001e-03f, -1.11830998e-02f, -5.35363983e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.49790000e-03f, -9.27671045e-03f, -2.83346009e-02f, -1.61233004e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -2.15505004e-01f, -2.19910994e-01f, -2.20872998e-01f, -2.12831005e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -2.19145000e-01f, -2.27687001e-01f, -3.43973994e-01f, -2.75869995e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -3.19516987e-01f, -2.50418007e-01f, -2.48537004e-01f, -5.08224010e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -2.28724003e-01f, -2.82402009e-01f, -3.75815988e-01f, -2.86352992e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.28333001e-02f, -4.43836004e-01f, -4.55134988e-01f, -4.34897989e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.65053988e-03f, -9.25739005e-04f, -1.06790999e-03f, -2.37016007e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -9.71166010e-04f, -8.90910998e-03f, -1.17592998e-02f, -2.08992008e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -4.94231991e-02f, 6.63906988e-03f, 3.20469006e-03f, -6.44695014e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -3.11607006e-03f, 2.02738005e-03f, 1.48096997e-02f, 4.39785011e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -8.28424022e-02f, 3.62076014e-02f, 2.71668993e-02f, 1.38250999e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.76669031e-02f, 1.03252999e-01f, 1.03255004e-01f, 9.89722982e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.03646003e-01f, 4.79663983e-02f, 1.11014001e-01f, 9.31736007e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.15768999e-01f, 1.04014002e-01f, -8.90677981e-03f, 1.13103002e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.33085996e-01f, 1.25405997e-01f, 1.50051996e-01f, -1.13038003e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 7.01059997e-02f, 1.79651007e-01f, 1.41055003e-01f, 1.62841007e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.00247003e-02f, -8.17587040e-03f, -8.32176022e-03f, -8.90108012e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -8.13035015e-03f, -1.77263003e-02f, -3.69572006e-02f, -3.51580009e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.92143014e-02f, -1.80795006e-02f, -5.46086021e-03f, -4.10550982e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.83081999e-02f, -2.15411000e-02f, -1.17953997e-02f, 3.33894007e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.29635996e-02f, -6.97528012e-03f, -3.15250992e-03f, -3.27355005e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.29676998e-01f, 1.16080999e-01f, 1.15947001e-01f, 1.21797003e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.16089001e-01f, 1.44875005e-01f, 1.15617000e-01f, 1.31586999e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.74735002e-02f, 1.21973999e-01f, 1.31596997e-01f, 2.48907991e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.18605018e-02f, 1.12855002e-01f, -6.99798986e-02f, 9.58312973e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.53593004e-01f, -8.75087008e-02f, -4.92327996e-02f, -3.32239009e-02f}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector im_info{60, 80, 0.166667f}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector anchors{-38, -16, 53, 31, -120, -120, 135, 135}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ERMatXf rois_gt(9, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) rois_gt << 0, 0, 0, 79, 59, 0, 0, 5.0005703f, 51.6324f, 42.6950f, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 24.13628387f, 7.51243401f, 79, 45.0663f, 0, 0, 7.50924301f, 67.4779f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 45.0336, 0, 0, 23.09477997f, 50.61448669f, 59, 0, 0, 39.52141571f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 51.44710541f, 59, 0, 23.57396317f, 29.98791885f, 79, 59, 0, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 41.90219116f, 79, 59, 0, 0, 23.30098343f, 78.2413f, 58.7287f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector rois_probs_gt{2.66913995e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.44218998e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.20544003e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.19207997e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.17993006e-04f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 4.72735002e-04f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.09605013e-05f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.50015003e-05f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8.91025957e-06f}; AddInput(vector{img_count, A, H, W}, scores, "scores", &ws); @@ -495,15 +387,10 @@ TEST(GenerateProposalsTest, TestRealDownSampled) { AddInput(vector{img_count, 3}, im_info, "im_info", &ws); AddInput(vector{A, 4}, anchors, "anchors", &ws); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("spatial_scale", 1.0f / 16.0f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("pre_nms_topN", 6000)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("post_nms_topN", 300)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("nms_thresh", 0.7f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("min_size", 16.0f)); def.add_arg()->CopyFrom(MakeArgument("correct_transform_coords", true)); @@ -560,106 +447,56 @@ TEST(GenerateProposalsTest, TestRealDownSampledRotatedAngle0) { const int W = 5; vector scores{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.44218998e-03f, 1.19207997e-03f, 1.12379994e-03f, 1.17181998e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.20544003e-03f, 6.17993006e-04f, 1.05261997e-05f, 8.91025957e-06f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.29536981e-09f, 6.09605013e-05f, 4.72735002e-04f, 1.13482002e-10f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.50015003e-05f, 4.45032993e-06f, 3.21612994e-08f, 8.02662980e-04f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.40488002e-04f, 3.12508007e-07f, 3.02616991e-06f, 1.97759000e-08f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.66913995e-02f, 5.26766013e-03f, 5.05053019e-03f, 5.62100019e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.37420018e-03f, 5.26280981e-03f, 2.48894998e-04f, 1.06842002e-04f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.92931997e-06f, 1.79388002e-03f, 4.79440019e-03f, 3.41609990e-07f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.20430971e-04f, 3.34090000e-05f, 2.19159006e-07f, 2.28786003e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.16703985e-05f, 4.04523007e-06f, 1.79227004e-06f, 5.32449000e-08f}; vector bbx{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.65040009e-02f, -1.84051003e-02f, -1.85930002e-02f, -2.08263006e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.83814000e-02f, -2.89172009e-02f, -3.89706008e-02f, -7.52277970e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.54091999e-01f, -2.55433004e-02f, -1.77490003e-02f, -1.10340998e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -4.20190990e-02f, -2.71421000e-02f, 6.89801015e-03f, 5.71171008e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.75665006e-01f, 2.30021998e-02f, 3.08554992e-02f, -1.39333997e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.40579003e-01f, 3.91070992e-01f, 3.91624004e-01f, 3.92527014e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.91445011e-01f, 3.79328012e-01f, 4.26631987e-01f, 3.64892989e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.76894987e-01f, 5.13985991e-01f, 3.79999995e-01f, 1.80457994e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 4.37402993e-01f, 4.18545991e-01f, 2.51549989e-01f, 4.48318988e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.68564007e-01f, 4.65440989e-01f, 4.21891987e-01f, 4.45928007e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.27155995e-03f, 3.71480011e-03f, 3.60032008e-03f, 4.27092984e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.74579988e-03f, 5.95752988e-03f, -3.14473989e-03f, 3.52022005e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.88564006e-02f, 1.65188999e-03f, 1.73791999e-03f, -3.56074013e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.66615995e-04f, 3.14146001e-03f, -1.11830998e-02f, -5.35363983e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.49790000e-03f, -9.27671045e-03f, -2.83346009e-02f, -1.61233004e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -2.15505004e-01f, -2.19910994e-01f, -2.20872998e-01f, -2.12831005e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -2.19145000e-01f, -2.27687001e-01f, -3.43973994e-01f, -2.75869995e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -3.19516987e-01f, -2.50418007e-01f, -2.48537004e-01f, -5.08224010e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -2.28724003e-01f, -2.82402009e-01f, -3.75815988e-01f, -2.86352992e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.28333001e-02f, -4.43836004e-01f, -4.55134988e-01f, -4.34897989e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.65053988e-03f, -9.25739005e-04f, -1.06790999e-03f, -2.37016007e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -9.71166010e-04f, -8.90910998e-03f, -1.17592998e-02f, -2.08992008e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -4.94231991e-02f, 6.63906988e-03f, 3.20469006e-03f, -6.44695014e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -3.11607006e-03f, 2.02738005e-03f, 1.48096997e-02f, 4.39785011e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -8.28424022e-02f, 3.62076014e-02f, 2.71668993e-02f, 1.38250999e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.76669031e-02f, 1.03252999e-01f, 1.03255004e-01f, 9.89722982e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.03646003e-01f, 4.79663983e-02f, 1.11014001e-01f, 9.31736007e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.15768999e-01f, 1.04014002e-01f, -8.90677981e-03f, 1.13103002e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.33085996e-01f, 1.25405997e-01f, 1.50051996e-01f, -1.13038003e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 7.01059997e-02f, 1.79651007e-01f, 1.41055003e-01f, 1.62841007e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.00247003e-02f, -8.17587040e-03f, -8.32176022e-03f, -8.90108012e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -8.13035015e-03f, -1.77263003e-02f, -3.69572006e-02f, -3.51580009e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.92143014e-02f, -1.80795006e-02f, -5.46086021e-03f, -4.10550982e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.83081999e-02f, -2.15411000e-02f, -1.17953997e-02f, 3.33894007e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.29635996e-02f, -6.97528012e-03f, -3.15250992e-03f, -3.27355005e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.29676998e-01f, 1.16080999e-01f, 1.15947001e-01f, 1.21797003e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.16089001e-01f, 1.44875005e-01f, 1.15617000e-01f, 1.31586999e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.74735002e-02f, 1.21973999e-01f, 1.31596997e-01f, 2.48907991e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.18605018e-02f, 1.12855002e-01f, -6.99798986e-02f, 9.58312973e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.53593004e-01f, -8.75087008e-02f, -4.92327996e-02f, -3.32239009e-02f}; // Add angle in bbox deltas @@ -678,29 +515,20 @@ TEST(GenerateProposalsTest, TestRealDownSampledRotatedAngle0) { } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector im_info{60, 80, 0.166667f}; // vector anchors{-38, -16, 53, 31, -120, -120, 135, 135}; // Anchors in [x_ctr, y_ctr, w, h, angle] format - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector anchors{7.5, 7.5, 92, 48, angle, 7.5, 7.5, 256, 256, angle}; // Results should exactly be the same as TestRealDownSampled since // angle = 0 for all boxes and clip_angle_thresh > 0 (which means // all horizontal boxes will be clipped to maintain backward compatibility). - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ERMatXf rois_gt_xyxy(9, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) rois_gt_xyxy << 0, 0, 0, 79, 59, 0, 0, 5.0005703f, 51.6324f, 42.6950f, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 24.13628387f, 7.51243401f, 79, 45.0663f, 0, 0, 7.50924301f, 67.4779f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 45.0336, 0, 0, 23.09477997f, 50.61448669f, 59, 0, 0, 39.52141571f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 51.44710541f, 59, 0, 23.57396317f, 29.98791885f, 79, 59, 0, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 41.90219116f, 79, 59, 0, 0, 23.30098343f, 78.2413f, 58.7287f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ERMatXf rois_gt(rois_gt_xyxy.rows(), 6); // Batch ID rois_gt.block(0, 0, rois_gt.rows(), 1) = @@ -710,26 +538,16 @@ TEST(GenerateProposalsTest, TestRealDownSampledRotatedAngle0) { rois_gt_xyxy.block(0, 1, rois_gt.rows(), 4).array(), true /* legacy_plus_one */); // Angle - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) rois_gt.block(0, 5, rois_gt.rows(), 1) = ERMatXf::Constant(rois_gt.rows(), 1, angle); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector rois_probs_gt{2.66913995e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.44218998e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.20544003e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.19207997e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.17993006e-04f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 4.72735002e-04f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.09605013e-05f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.50015003e-05f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8.91025957e-06f}; AddInput(vector{img_count, A, H, W}, scores, "scores", &ws); @@ -741,15 +559,10 @@ TEST(GenerateProposalsTest, TestRealDownSampledRotatedAngle0) { AddInput(vector{img_count, 3}, im_info, "im_info", &ws); AddInput(vector{A, box_dim}, anchors, "anchors", &ws); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("spatial_scale", 1.0f / 16.0f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("pre_nms_topN", 6000)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("post_nms_topN", 300)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("nms_thresh", 0.7f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("min_size", 16.0f)); def.add_arg()->CopyFrom(MakeArgument("correct_transform_coords", true)); def.add_arg()->CopyFrom(MakeArgument("clip_angle_thresh", clip_angle_thresh)); @@ -808,106 +621,56 @@ TEST(GenerateProposalsTest, TestRealDownSampledRotated) { const int W = 5; vector scores{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.44218998e-03f, 1.19207997e-03f, 1.12379994e-03f, 1.17181998e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.20544003e-03f, 6.17993006e-04f, 1.05261997e-05f, 8.91025957e-06f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.29536981e-09f, 6.09605013e-05f, 4.72735002e-04f, 1.13482002e-10f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.50015003e-05f, 4.45032993e-06f, 3.21612994e-08f, 8.02662980e-04f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.40488002e-04f, 3.12508007e-07f, 3.02616991e-06f, 1.97759000e-08f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.66913995e-02f, 5.26766013e-03f, 5.05053019e-03f, 5.62100019e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.37420018e-03f, 5.26280981e-03f, 2.48894998e-04f, 1.06842002e-04f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.92931997e-06f, 1.79388002e-03f, 4.79440019e-03f, 3.41609990e-07f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.20430971e-04f, 3.34090000e-05f, 2.19159006e-07f, 2.28786003e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5.16703985e-05f, 4.04523007e-06f, 1.79227004e-06f, 5.32449000e-08f}; vector bbx{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.65040009e-02f, -1.84051003e-02f, -1.85930002e-02f, -2.08263006e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.83814000e-02f, -2.89172009e-02f, -3.89706008e-02f, -7.52277970e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.54091999e-01f, -2.55433004e-02f, -1.77490003e-02f, -1.10340998e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -4.20190990e-02f, -2.71421000e-02f, 6.89801015e-03f, 5.71171008e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.75665006e-01f, 2.30021998e-02f, 3.08554992e-02f, -1.39333997e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.40579003e-01f, 3.91070992e-01f, 3.91624004e-01f, 3.92527014e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.91445011e-01f, 3.79328012e-01f, 4.26631987e-01f, 3.64892989e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.76894987e-01f, 5.13985991e-01f, 3.79999995e-01f, 1.80457994e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 4.37402993e-01f, 4.18545991e-01f, 2.51549989e-01f, 4.48318988e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.68564007e-01f, 4.65440989e-01f, 4.21891987e-01f, 4.45928007e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.27155995e-03f, 3.71480011e-03f, 3.60032008e-03f, 4.27092984e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.74579988e-03f, 5.95752988e-03f, -3.14473989e-03f, 3.52022005e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.88564006e-02f, 1.65188999e-03f, 1.73791999e-03f, -3.56074013e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.66615995e-04f, 3.14146001e-03f, -1.11830998e-02f, -5.35363983e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.49790000e-03f, -9.27671045e-03f, -2.83346009e-02f, -1.61233004e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -2.15505004e-01f, -2.19910994e-01f, -2.20872998e-01f, -2.12831005e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -2.19145000e-01f, -2.27687001e-01f, -3.43973994e-01f, -2.75869995e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -3.19516987e-01f, -2.50418007e-01f, -2.48537004e-01f, -5.08224010e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -2.28724003e-01f, -2.82402009e-01f, -3.75815988e-01f, -2.86352992e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.28333001e-02f, -4.43836004e-01f, -4.55134988e-01f, -4.34897989e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.65053988e-03f, -9.25739005e-04f, -1.06790999e-03f, -2.37016007e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -9.71166010e-04f, -8.90910998e-03f, -1.17592998e-02f, -2.08992008e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -4.94231991e-02f, 6.63906988e-03f, 3.20469006e-03f, -6.44695014e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -3.11607006e-03f, 2.02738005e-03f, 1.48096997e-02f, 4.39785011e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -8.28424022e-02f, 3.62076014e-02f, 2.71668993e-02f, 1.38250999e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.76669031e-02f, 1.03252999e-01f, 1.03255004e-01f, 9.89722982e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.03646003e-01f, 4.79663983e-02f, 1.11014001e-01f, 9.31736007e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.15768999e-01f, 1.04014002e-01f, -8.90677981e-03f, 1.13103002e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.33085996e-01f, 1.25405997e-01f, 1.50051996e-01f, -1.13038003e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 7.01059997e-02f, 1.79651007e-01f, 1.41055003e-01f, 1.62841007e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.00247003e-02f, -8.17587040e-03f, -8.32176022e-03f, -8.90108012e-03f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -8.13035015e-03f, -1.77263003e-02f, -3.69572006e-02f, -3.51580009e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.92143014e-02f, -1.80795006e-02f, -5.46086021e-03f, -4.10550982e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -1.83081999e-02f, -2.15411000e-02f, -1.17953997e-02f, 3.33894007e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -5.29635996e-02f, -6.97528012e-03f, -3.15250992e-03f, -3.27355005e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.29676998e-01f, 1.16080999e-01f, 1.15947001e-01f, 1.21797003e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.16089001e-01f, 1.44875005e-01f, 1.15617000e-01f, 1.31586999e-01f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.74735002e-02f, 1.21973999e-01f, 1.31596997e-01f, 2.48907991e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6.18605018e-02f, 1.12855002e-01f, -6.99798986e-02f, 9.58312973e-02f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.53593004e-01f, -8.75087008e-02f, -4.92327996e-02f, -3.32239009e-02f}; // Add angle in bbox deltas @@ -928,10 +691,8 @@ TEST(GenerateProposalsTest, TestRealDownSampledRotated) { } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector im_info{60, 80, 0.166667f}; // vector anchors{-38, -16, 53, 31, -120, -120, 135, 135}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector anchors{8, 8, 92, 48, angle, 8, 8, 256, 256, angle}; AddInput(vector{img_count, A, H, W}, scores, "scores", &ws); @@ -943,15 +704,10 @@ TEST(GenerateProposalsTest, TestRealDownSampledRotated) { AddInput(vector{img_count, 3}, im_info, "im_info", &ws); AddInput(vector{A, box_dim}, anchors, "anchors", &ws); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("spatial_scale", 1.0f / 16.0f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("pre_nms_topN", 6000)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("post_nms_topN", 300)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("nms_thresh", 0.7f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) def.add_arg()->CopyFrom(MakeArgument("min_size", 16.0f)); def.add_arg()->CopyFrom(MakeArgument("correct_transform_coords", true)); def.add_arg()->CopyFrom(MakeArgument("clip_angle_thresh", clip_angle_thresh)); diff --git a/caffe2/operators/generate_proposals_op_util_boxes_test.cc b/caffe2/operators/generate_proposals_op_util_boxes_test.cc index f876a8cc339e8..56c39248ced5f 100644 --- a/caffe2/operators/generate_proposals_op_util_boxes_test.cc +++ b/caffe2/operators/generate_proposals_op_util_boxes_test.cc @@ -9,37 +9,22 @@ namespace caffe2 { TEST(UtilsBoxesTest, TestBboxTransformRandom) { using EMatXf = Eigen::MatrixXf; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EMatXf bbox(5, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bbox << 175.62031555, 20.91103172, 253.352005, 155.0145874, 169.24636841, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 4.85241556, 228.8605957, 105.02092743, 181.77426147, 199.82876587, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 192.88427734, 214.0255127, 174.36262512, 186.75761414, 296.19091797, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 231.27906799, 22.73153877, 92.02596283, 135.5695343, 208.80291748; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EMatXf deltas(5, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) deltas << 0.47861834, 0.13992102, 0.14961673, 0.71495209, 0.29915856, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -0.35664671, 0.89018666, 0.70815367, -0.03852064, 0.44466892, 0.49492538, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.71409376, 0.28052918, 0.02184832, 0.65289006, 1.05060139, -0.38172557, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -0.08533806, -0.60335309, 0.79052375; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EMatXf result_gt(5, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) result_gt << 206.949539, -30.715202, 297.387665, 244.448486, 143.871216, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -83.342888, 290.502289, 121.053398, 177.430283, 198.666245, 196.295273, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 228.703079, 152.251892, 145.431564, 387.215454, 274.594238, 5.062420, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 11.040955, 66.328903, 269.686218; // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) @@ -58,38 +43,23 @@ TEST(UtilsBoxesTest, TestBboxTransformRotated) { // Test rotated bbox transform w/o angle normalization using EMatXf = Eigen::MatrixXf; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EMatXf bbox(5, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bbox << 214.986, 88.4628, 78.7317, 135.104, 0.0, 199.553, 55.4367, 60.6142, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 101.169, 45.0, 187.829, 207.427, 0012.11, 15.1967, 90.0, 235.777, 209.518, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 122.828, 45.5215, -60.0, 79.6505, 150.914, 113.838, 117.777, 170.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EMatXf deltas(5, 5); // 0.174533 radians -> 10 degrees - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) deltas << 0.47861834, 0.13992102, 0.14961673, 0.71495209, 0.0, 0.29915856, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -0.35664671, 0.89018666, 0.70815367, 0.174533, -0.03852064, 0.44466892, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.49492538, 0.71409376, 0.174533, 0.28052918, 0.02184832, 0.65289006, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.05060139, 0.174533, -0.38172557, -0.08533806, -0.60335309, 0.79052375, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.174533; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EMatXf result_gt(5, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) result_gt << 252.668, 107.367, 91.4381, 276.165, 0.0, 217.686, 19.3551, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 147.631, 205.397, 55.0, 187.363, 214.185, 19.865, 31.0368, 100.0, 270.234, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 210.513, 235.963, 130.163, -50.0, 36.1956, 140.863, 62.2665, 259.645, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 180.5; // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) @@ -109,36 +79,22 @@ TEST(UtilsBoxesTest, TestBboxTransformRotatedNormalized) { // Test rotated bbox transform with angle normalization using EMatXf = Eigen::MatrixXf; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EMatXf bbox(5, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bbox << 214.986, 88.4628, 78.7317, 135.104, 0.0, 199.553, 55.4367, 60.6142, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 101.169, 45.0, 187.829, 207.427, 0012.11, 15.1967, 90.0, 235.777, 209.518, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 122.828, 45.5215, -60.0, 79.6505, 150.914, 113.838, 117.777, 170.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EMatXf deltas(5, 5); // 0.174533 radians -> 10 degrees - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) deltas << 0.47861834, 0.13992102, 0.14961673, 0.71495209, 0.0, 0.29915856, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -0.35664671, 0.89018666, 0.70815367, 0.174533, -0.03852064, 0.44466892, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.49492538, 0.71409376, 0.174533, 0.28052918, 0.02184832, 0.65289006, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.05060139, 0.174533, -0.38172557, -0.08533806, -0.60335309, 0.79052375, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.174533; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EMatXf result_gt(5, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) result_gt << 252.668, 107.367, 91.4381, 276.165, 0.0, 217.686, 19.3551, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 147.631, 205.397, 55.0, 187.363, 214.185, 19.865, 31.0368, -80.0, 270.234, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 210.513, 235.963, 130.163, -50.0, 36.1956, 140.863, 62.2665, 259.645, 0.5; // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) @@ -150,9 +106,7 @@ TEST(UtilsBoxesTest, TestBboxTransformRotatedNormalized) { BBOX_XFORM_CLIP, true, /* legacy_plus_one */ true, /* angle_bound_on */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -90, /* angle_bound_lo */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 90 /* angle_bound_hi */); EXPECT_NEAR((result.matrix() - result_gt).norm(), 0.0, 1e-2); } @@ -162,21 +116,13 @@ TEST(UtilsBoxesTest, ClipRotatedBoxes) { // Test utils::clip_boxes_rotated() using EMatXf = Eigen::MatrixXf; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int height = 800; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int width = 600; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EMatXf bbox(5, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bbox << 20, 20, 200, 150, 0, // Horizontal - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 20, 20, 200, 150, 0.5, // Almost horizontal - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 20, 20, 200, 150, 30, // Rotated - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 300, 300, 200, 150, 30, // Rotated - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 579, 779, 200, 150, -0.5; // Almost horizontal // Test with no clipping @@ -185,13 +131,9 @@ TEST(UtilsBoxesTest, ClipRotatedBoxes) { bbox.array(), height, width, angle_thresh, true /* legacy_plus_one */); EXPECT_NEAR((result.matrix() - bbox).norm(), 0.0, 1e-4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) EMatXf result_gt(5, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) result_gt << 59.75, 47.25, 120.5, 95.5, 0, 59.75, 47.25, 120.5, 95.5, 0.5, 20, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 20, 200, 150, 30, 300, 300, 200, 150, 30, 539.25, 751.75, 120.5, 95.5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -0.5; // Test clipping with tolerance diff --git a/caffe2/operators/generate_proposals_op_util_nms_test.cc b/caffe2/operators/generate_proposals_op_util_nms_test.cc index 4ac8c3ddeb5de..e45d57ea336a9 100644 --- a/caffe2/operators/generate_proposals_op_util_nms_test.cc +++ b/caffe2/operators/generate_proposals_op_util_nms_test.cc @@ -9,14 +9,10 @@ namespace caffe2 { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(UtilsNMSTest, TestNMS) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf input(5, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input << 10, 10, 50, 60, 0.5, 11, 12, 48, 60, 0.7, 8, 9, 40, 50, 0.6, 100, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, 150, 140, 0.9, 99, 110, 155, 139, 0.8; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input_thresh{0.1f, 0.3f, 0.5f, 0.8f, 0.9f}; // ground truth generated based on detection.caffe2/lib/nms/py_cpu_nms.py std::vector> output_gt{ @@ -67,95 +63,52 @@ TEST(UtilsNMSTest, TestNMS) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(UtilsNMSTest, TestNMS1) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf proposals(53, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) proposals << 350.9821, 161.8200, 369.9685, 205.2372, 250.5236, 154.2844, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 274.1773, 204.9810, 471.4920, 160.4118, 496.0094, 213.4244, 352.0421, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 164.5933, 366.4458, 205.9624, 166.0765, 169.7707, 183.0102, 232.6606, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 252.3000, 183.1449, 269.6541, 210.6747, 469.7862, 162.0192, 482.1673, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 187.0053, 168.4862, 174.2567, 181.7437, 232.9379, 470.3290, 162.3442, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 496.4272, 214.6296, 251.0450, 155.5911, 272.2693, 203.3675, 252.0326, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 154.7950, 273.7404, 195.3671, 351.7479, 161.9567, 370.6432, 204.3047, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 496.3306, 161.7157, 515.0573, 210.7200, 471.0749, 162.6143, 485.3374, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 207.3448, 250.9745, 160.7633, 264.1924, 206.8350, 470.4792, 169.0351, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 487.1934, 220.2984, 474.4227, 161.9546, 513.1018, 215.5193, 251.9428, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 184.1950, 262.6937, 207.6416, 252.6623, 175.0252, 269.8806, 213.7584, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 260.9884, 157.0351, 288.3554, 206.6027, 251.3629, 164.5101, 263.2179, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 202.4203, 471.8361, 190.8142, 485.6812, 220.8586, 248.6243, 156.9628, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 264.3355, 199.2767, 495.1643, 158.0483, 512.6261, 184.4192, 376.8718, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 168.0144, 387.3584, 201.3210, 122.9191, 160.7433, 172.5612, 231.3837, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 350.3857, 175.8806, 366.2500, 205.4329, 115.2958, 162.7822, 161.9776, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 229.6147, 168.4375, 177.4041, 180.8028, 232.4551, 169.7939, 184.4330, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 181.4767, 232.1220, 347.7536, 175.9356, 355.8637, 197.5586, 495.5434, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 164.6059, 516.4031, 207.7053, 172.1216, 194.6033, 183.1217, 235.2653, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 264.2654, 181.5540, 288.4626, 214.0170, 111.7971, 183.7748, 137.3745, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 225.9724, 253.4919, 186.3945, 280.8694, 210.0731, 165.5334, 169.7344, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 185.9159, 232.8514, 348.3662, 184.5187, 354.9081, 201.4038, 164.6562, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 162.5724, 186.3108, 233.5010, 113.2999, 186.8410, 135.8841, 219.7642, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 117.0282, 179.8009, 142.5375, 221.0736, 462.1312, 161.1004, 495.3576, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 217.2208, 462.5800, 159.9310, 501.2937, 224.1655, 503.5242, 170.0733, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 518.3792, 209.0113, 250.3658, 195.5925, 260.6523, 212.4679, 108.8287, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 163.6994, 146.3642, 229.7261, 256.7617, 187.3123, 288.8407, 211.2013, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 161.2781, 167.4801, 186.3751, 232.7133, 115.3760, 177.5859, 163.3512, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 236.9660, 248.9077, 188.0919, 264.8579, 207.9718, 108.1349, 160.7851, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 143.6370, 229.6243, 465.0900, 156.7555, 490.3561, 213.5704, 107.5338, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 173.4323, 141.0704, 235.2910; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf scores(53, 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scores << 0.1919, 0.3293, 0.0860, 0.1600, 0.1885, 0.4297, 0.0974, 0.2711, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.1483, 0.1173, 0.1034, 0.2915, 0.1993, 0.0677, 0.3217, 0.0966, 0.0526, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.5675, 0.3130, 0.1592, 0.1353, 0.0634, 0.1557, 0.1512, 0.0699, 0.0545, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.2692, 0.1143, 0.0572, 0.1990, 0.0558, 0.1500, 0.2214, 0.1878, 0.2501, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.1343, 0.0809, 0.1266, 0.0743, 0.0896, 0.0781, 0.0983, 0.0557, 0.0623, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.5808, 0.3090, 0.1050, 0.0524, 0.0513, 0.4501, 0.4167, 0.0623, 0.1749; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector output_gt{1, 6, 7, 8, 11, 12, 13, 14, 17, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 18, 19, 21, 23, 24, 25, 26, 30, 32, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 33, 34, 35, 37, 43, 44, 47, 50}; auto cur_out = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) utils::nms_cpu(proposals, scores, 0.5, true /* legacy_plus_one */); std::sort(cur_out.begin(), cur_out.end()); EXPECT_EQ(output_gt, cur_out); @@ -163,34 +116,22 @@ TEST(UtilsNMSTest, TestNMS1) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(UtilsNMSTest, TestSoftNMS) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf input(5, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.row(0) << 5.18349426e+02, 1.77783920e+02, 9.06085266e+02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.59163239e+02, 8.17906916e-01; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.row(1) << 2.11392624e+02, 1.76144958e+02, 6.14215149e+02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.48934662e+02, 9.52467501e-01; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.row(2) << 4.65724518e+02, 1.83594269e+02, 9.39000000e+02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.55136627e+02, 6.73921347e-01; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.row(3) << 6.07164246e+02, 2.60230377e+02, 8.32768127e+02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.39919891e+02, 9.99834776e-01; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.row(4) << 3.23936859e+02, 3.43427063e+02, 6.20561157e+02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.98286072e+02, 9.99737203e-01; const auto& proposals = input.block(0, 0, input.rows(), 4); const auto& scores = input.col(4); vector method{1, 1, 2, 2}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector overlap_thresh{0.1f, 0.3f, 0.1f, 0.3f}; // Ground truth generated based on @@ -198,27 +139,18 @@ TEST(UtilsNMSTest, TestSoftNMS) { std::vector keep_gt{3, 4, 1, 0, 2}; // Explicitly use colmajor order to match scores - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf scores_gt(5, 4); // Linear, overlap_thresh=0.1 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scores_gt.col(0) << 7.13657320e-01, 9.52467501e-01, 1.44501388e-01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.99834776e-01, 9.99737203e-01; // Linear, overlap_thresh=0.3 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scores_gt.col(1) << 8.17906916e-01, 9.52467501e-01, 1.76800430e-01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.99834776e-01, 9.99737203e-01; // Gaussian, overlap_thresh=0.1 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scores_gt.col(2) << 7.91758895e-01, 9.52467501e-01, 2.12320581e-01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.99834776e-01, 9.99737203e-01; // Gaussian, overlap_thresh=0.3 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scores_gt.col(3) << 7.91758895e-01, 9.52467501e-01, 2.12320581e-01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.99834776e-01, 9.99737203e-01; Eigen::ArrayXf out_scores; @@ -231,10 +163,8 @@ TEST(UtilsNMSTest, TestSoftNMS) { &out_scores, proposals, scores, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.5, overlap_thresh[i], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.0001, method[i], -1, /* topN */ @@ -251,10 +181,8 @@ TEST(UtilsNMSTest, TestSoftNMS) { &out_scores, proposals, scores, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.5, overlap_thresh[i], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.0001, method[i], topN, @@ -264,17 +192,14 @@ TEST(UtilsNMSTest, TestSoftNMS) { } // Test with filtered indices - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto indices = utils::GetArrayIndices(scores >= 0.9); keep = utils::soft_nms_cpu( &out_scores, proposals, scores, indices, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.5, overlap_thresh[i], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.0001, method[i], -1, /* topN */ @@ -288,13 +213,11 @@ TEST(UtilsNMSTest, TestSoftNMS) { } // Test with high score_thresh - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float score_thresh = 0.9; keep = utils::soft_nms_cpu( &out_scores, proposals, scores, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.5, overlap_thresh[i], score_thresh, @@ -318,14 +241,10 @@ TEST(UtilsNMSTest, TestSoftNMS) { TEST(UtilsNMSTest, TestNMSRotatedAngle0) { // Same inputs as TestNMS, but in RRPN format with angle 0 for testing // nms_cpu_rotated - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf input(5, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input << 10, 10, 50, 60, 0.5, 11, 12, 48, 60, 0.7, 8, 9, 40, 50, 0.6, 100, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, 150, 140, 0.9, 99, 110, 155, 139, 0.8; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input_thresh{0.1f, 0.3f, 0.5f, 0.8f, 0.9f}; // ground truth generated based on detection.caffe2/lib/nms/py_cpu_nms.py std::vector> output_gt{ @@ -334,11 +253,8 @@ TEST(UtilsNMSTest, TestNMSRotatedAngle0) { // test utils::nms_cpu without indices input. // Add additional dim for angle and convert from // [x1, y1, x2, y1] to [ctr_x, ctr_y, w, h] format. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf proposals = Eigen::ArrayXXf::Zero(input.rows(), 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) proposals.col(0) = (input.col(0) + input.col(2)) / 2.0; // ctr_x = (x1 + x2)/2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) proposals.col(1) = (input.col(1) + input.col(3)) / 2.0; // ctr_y = (y1 + y2)/2 proposals.col(2) = input.col(2) - input.col(0) + 1.0; // w = x2 - x1 + 1 proposals.col(3) = input.col(3) - input.col(1) + 1.0; // h = y2 - y1 + 1 @@ -388,36 +304,22 @@ TEST(UtilsNMSTest, TestNMSRotatedAngle0) { TEST(UtilsNMSTest, TestSoftNMSRotatedAngle0) { // Same inputs as TestSoftNMS, but in RRPN format with angle 0 for testing // nms_cpu_rotated - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf input(5, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.row(0) << 5.18349426e+02, 1.77783920e+02, 9.06085266e+02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.59163239e+02, 8.17906916e-01; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.row(1) << 2.11392624e+02, 1.76144958e+02, 6.14215149e+02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.48934662e+02, 9.52467501e-01; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.row(2) << 4.65724518e+02, 1.83594269e+02, 9.39000000e+02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.55136627e+02, 6.73921347e-01; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.row(3) << 6.07164246e+02, 2.60230377e+02, 8.32768127e+02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.39919891e+02, 9.99834776e-01; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input.row(4) << 3.23936859e+02, 3.43427063e+02, 6.20561157e+02, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3.98286072e+02, 9.99737203e-01; // Add additional dim for angle and convert from // [x1, y1, x2, y1] to [ctr_x, ctr_y, w, h] format. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf proposals = Eigen::ArrayXXf::Zero(input.rows(), 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) proposals.col(0) = (input.col(0) + input.col(2)) / 2.0; // ctr_x = (x1 + x2)/2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) proposals.col(1) = (input.col(1) + input.col(3)) / 2.0; // ctr_y = (y1 + y2)/2 proposals.col(2) = input.col(2) - input.col(0) + 1.0; // w = x2 - x1 + 1 proposals.col(3) = input.col(3) - input.col(1) + 1.0; // h = y2 - y1 + 1 @@ -425,7 +327,6 @@ TEST(UtilsNMSTest, TestSoftNMSRotatedAngle0) { const auto& scores = input.col(4); vector method{1, 1, 2, 2}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector overlap_thresh{0.1f, 0.3f, 0.1f, 0.3f}; // Ground truth generated based on @@ -433,27 +334,18 @@ TEST(UtilsNMSTest, TestSoftNMSRotatedAngle0) { std::vector keep_gt{3, 4, 1, 0, 2}; // Explicitly use colmajor order to match scores - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf scores_gt(5, 4); // Linear, overlap_thresh=0.1 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scores_gt.col(0) << 7.13657320e-01, 9.52467501e-01, 1.44501388e-01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.99834776e-01, 9.99737203e-01; // Linear, overlap_thresh=0.3 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scores_gt.col(1) << 8.17906916e-01, 9.52467501e-01, 1.76800430e-01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.99834776e-01, 9.99737203e-01; // Gaussian, overlap_thresh=0.1 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scores_gt.col(2) << 7.91758895e-01, 9.52467501e-01, 2.12320581e-01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.99834776e-01, 9.99737203e-01; // Gaussian, overlap_thresh=0.3 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scores_gt.col(3) << 7.91758895e-01, 9.52467501e-01, 2.12320581e-01, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9.99834776e-01, 9.99737203e-01; Eigen::ArrayXf out_scores; @@ -466,10 +358,8 @@ TEST(UtilsNMSTest, TestSoftNMSRotatedAngle0) { &out_scores, proposals, scores, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.5, overlap_thresh[i], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.0001, method[i], -1, /* topN */ @@ -486,10 +376,8 @@ TEST(UtilsNMSTest, TestSoftNMSRotatedAngle0) { &out_scores, proposals, scores, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.5, overlap_thresh[i], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.0001, method[i], topN, @@ -499,17 +387,14 @@ TEST(UtilsNMSTest, TestSoftNMSRotatedAngle0) { } // Test with filtered indices - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto indices = utils::GetArrayIndices(scores >= 0.9); keep = utils::soft_nms_cpu( &out_scores, proposals, scores, indices, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.5, overlap_thresh[i], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.0001, method[i], -1, /* topN */ @@ -523,13 +408,11 @@ TEST(UtilsNMSTest, TestSoftNMSRotatedAngle0) { } // Test with high score_thresh - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float score_thresh = 0.9; keep = utils::soft_nms_cpu( &out_scores, proposals, scores, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.5, overlap_thresh[i], score_thresh, @@ -554,23 +437,18 @@ TEST(UtilsNMSTest, RotatedBBoxOverlaps) { { // One box is fully within another box, the angle is irrelavant int M = 2, N = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf boxes(M, 5); for (int i = 0; i < M; i++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) boxes.row(i) << 0, 0, 5, 6, (360.0 / M - 180.0); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf query_boxes(N, 5); for (int i = 0; i < N; i++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) query_boxes.row(i) << 0, 0, 3, 3, (360.0 / M - 180.0); } Eigen::ArrayXXf expected(M, N); // 0.3 == (3 * 3) / (5 * 6) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected.fill(0.3); auto actual = utils::bbox_overlaps_rotated(boxes, query_boxes); @@ -579,18 +457,13 @@ TEST(UtilsNMSTest, RotatedBBoxOverlaps) { { // Angle 0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf boxes(1, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) boxes << 39.500000, 50.451096, 80.000000, 18.097809, -0.000000; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf query_boxes(1, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) query_boxes << 39.120628, 41.014862, 79.241257, 36.427757, -0.000000; Eigen::ArrayXXf expected(1, 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected << 0.48346716237; auto actual = utils::bbox_overlaps_rotated(boxes, query_boxes); @@ -599,18 +472,13 @@ TEST(UtilsNMSTest, RotatedBBoxOverlaps) { { // Angle 0, very similar boxes that can produce 17 candidate 'intersections' - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf boxes(1, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) boxes << 299.500000, 417.370422, 600.000000, 364.259186, 0.000000; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf query_boxes(1, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) query_boxes << 299.500000, 417.370422, 600.000000, 364.259155, 0.000000; Eigen::ArrayXXf expected(1, 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected << 0.99999991489; auto actual = utils::bbox_overlaps_rotated(boxes, query_boxes); @@ -619,20 +487,14 @@ TEST(UtilsNMSTest, RotatedBBoxOverlaps) { { // Simple case with angle 0 (upright boxes) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf boxes(2, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) boxes << 10.5, 15.5, 21, 31, 0, 14.0, 17, 4, 10, 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf query_boxes(3, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) query_boxes << 30.5, 10.5, 41, 1, 0, 13.5, 21.5, 5, 21, 0, 10.5, 15.5, 21, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 31, 0; Eigen::ArrayXXf expected(2, 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected << 0.0161527172, 0.152439028, 1., 0., 0.38095239, 0.0614439324; auto actual = utils::bbox_overlaps_rotated(boxes, query_boxes); @@ -641,17 +503,13 @@ TEST(UtilsNMSTest, RotatedBBoxOverlaps) { { // Angle 45 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf boxes(1, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) boxes << 0, 0, 2.0 * std::sqrt(2), 2.0 * std::sqrt(2), 45; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf query_boxes(1, 5); query_boxes << 1, 1, 2, 2, 0; Eigen::ArrayXXf expected(1, 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected << 0.2; auto actual = utils::bbox_overlaps_rotated(boxes, query_boxes); @@ -659,25 +517,16 @@ TEST(UtilsNMSTest, RotatedBBoxOverlaps) { } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf boxes(2, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) boxes << 60.0, 60.0, 100.0, 100.0, 0.0, 50.0, 50.0, 100.0, 100.0, 135.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf query_boxes(6, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) query_boxes << 60.0, 60.0, 100.0, 100.0, 180.0, 50.0, 50.0, 100.0, 100.0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 45.0, 80.0, 50.0, 100.0, 100.0, 0.0, 50.0, 50.0, 200.0, 50.0, 45.0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 200.0, 200.0, 100.0, 100.0, 0, 60.0, 60.0, 100.0, 100.0, 1.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Eigen::ArrayXXf expected(2, 6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected << 1., 0.6507467031, 0.5625, 0.3718426526, 0., 0.9829941392, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.6507467628, 1., 0.4893216789, 0.3333334029, 0., 0.6508141756; auto actual = utils::bbox_overlaps_rotated(boxes, query_boxes); diff --git a/caffe2/operators/gru_unit_op.cc b/caffe2/operators/gru_unit_op.cc index 5685dde9eeb74..d73f7e1642d85 100644 --- a/caffe2/operators/gru_unit_op.cc +++ b/caffe2/operators/gru_unit_op.cc @@ -30,7 +30,6 @@ value at X[t][n] >= seqLengths[n]. REGISTER_CPU_OPERATOR(GRUUnitGradient, GRUUnitGradientOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(GRUUnitGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5, 6) .NumOutputs(2) .Arg( diff --git a/caffe2/operators/h_softmax_op.cc b/caffe2/operators/h_softmax_op.cc index b6e8b1362d4c1..8b6fbc5feed3a 100644 --- a/caffe2/operators/h_softmax_op.cc +++ b/caffe2/operators/h_softmax_op.cc @@ -206,7 +206,6 @@ bool HSoftmaxGradientOp::RunOnDevice() { const auto& b = Input(2); auto& label = Input(3); auto& intermediate_output = Input(4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto& dY = Input(5); auto* dX = Output(0, X.sizes(), at::dtype()); diff --git a/caffe2/operators/half_float_ops_test.cc b/caffe2/operators/half_float_ops_test.cc index f8b8abebecf88..6c5ef330340ab 100644 --- a/caffe2/operators/half_float_ops_test.cc +++ b/caffe2/operators/half_float_ops_test.cc @@ -16,7 +16,6 @@ namespace caffe2 { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Float16, SimpleTest) { Workspace ws; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector data = {0.1f, 0.23f, 1.6f, 8.2f, -13.9f}; // loading input data @@ -75,18 +74,14 @@ TEST(Float16, UniformDistributionTest) { OperatorDef def; def.set_name("test"); def.set_type("Float16UniformFill"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t size = 5000000L; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector shape = {size, 32}; long tot_size = shape[0]; for (const auto i : c10::irange(1, shape.size())) { tot_size *= shape[i]; } caffe2::AddArgument>("shape", shape, &def); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) caffe2::AddArgument("min", -20.0, &def); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) caffe2::AddArgument("max", 20.0, &def); def.add_output("result"); diff --git a/caffe2/operators/heatmap_max_keypoint_op.cc b/caffe2/operators/heatmap_max_keypoint_op.cc index 49fde5121553a..d419440e2dfab 100644 --- a/caffe2/operators/heatmap_max_keypoint_op.cc +++ b/caffe2/operators/heatmap_max_keypoint_op.cc @@ -128,7 +128,6 @@ bool HeatmapMaxKeypointOp::RunOnDevice() { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) float deltaScore; const float MAX_DELTA = 1.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (std::abs(div) < 1e-4f) { delta << 0.0f, 0.0f; deltaScore = maxScore; @@ -141,17 +140,14 @@ bool HeatmapMaxKeypointOp::RunOnDevice() { delta(1) = delta(1) / larger_delta * MAX_DELTA; } deltaScore = fmax(1, 1) - b.transpose() * delta + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.0 / 2.0 * delta.transpose() * A * delta; } assert(std::abs(delta(0)) <= MAX_DELTA); assert(std::abs(delta(1)) <= MAX_DELTA); // find maximum of delta scores keypoints(k, 0 * keypoint_count + j) = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x0 + (0.5 + maxX + delta(0)) * xLen / heatmap_size; keypoints(k, 1 * keypoint_count + j) = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) y0 + (0.5 + maxY + delta(1)) * yLen / heatmap_size; keypoints(k, 2 * keypoint_count + j) = deltaScore; if (should_output_softmax_) { diff --git a/caffe2/operators/jsd_op.cc b/caffe2/operators/jsd_op.cc index 5351fcf033b2a..278ec5b9c9597 100644 --- a/caffe2/operators/jsd_op.cc +++ b/caffe2/operators/jsd_op.cc @@ -5,7 +5,6 @@ namespace caffe2 { namespace { static constexpr float kLOG_THRESHOLD() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 1e-20; } @@ -40,9 +39,7 @@ bool BernoulliJSDOp::RunOnDevice() { for (int i = 0; i < N; i++) { auto p_mdl = x_data[i]; auto p_emp = t_data[i]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto p_avg = (p_mdl + p_emp) / 2.; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto jsd = entropy(p_avg) - (entropy(p_mdl) + entropy(p_emp)) / 2.; l_data[i] = jsd; } @@ -64,9 +61,7 @@ bool BernoulliJSDGradientOp::RunOnDevice() { for (int i = 0; i < N; i++) { auto p_mdl = x_data[i]; auto p_emp = t_data[i]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto p_avg = (p_mdl + p_emp) / 2.; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto g_jsd = (logit(p_mdl) - logit(p_avg)) / 2.; gi_data[i] = go_data[i] * g_jsd; } diff --git a/caffe2/operators/last_n_window_collector.cc b/caffe2/operators/last_n_window_collector.cc index 68e7e6e3e5e16..47b6406a69e63 100644 --- a/caffe2/operators/last_n_window_collector.cc +++ b/caffe2/operators/last_n_window_collector.cc @@ -87,7 +87,6 @@ class LastNWindowCollectorOp : public Operator { // output_num is >= output_batch_size // NOLINTNEXTLINE(clang-diagnostic-sign-compare) if (output_num > output_batch_size) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output->ExtendTo(output_num, 50); } @@ -145,7 +144,6 @@ REGISTER_CPU_OPERATOR(LastNWindowCollector, LastNWindowCollectorOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(LastNWindowCollector) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs({3, 4, 5}) .NumOutputs(2, 3) .EnforceInplace({{0, 0}, {1, 1}, {4, 2}}) diff --git a/caffe2/operators/lengths_reducer_fused_nbit_rowwise_ops.cc b/caffe2/operators/lengths_reducer_fused_nbit_rowwise_ops.cc index 62f2aa2f26361..7516127fccf65 100644 --- a/caffe2/operators/lengths_reducer_fused_nbit_rowwise_ops.cc +++ b/caffe2/operators/lengths_reducer_fused_nbit_rowwise_ops.cc @@ -320,7 +320,6 @@ REGISTER_CPU_OPERATOR( /*with_weights=*/true>); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseLengthsWeightedSum4BitRowwiseSparse) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(1) .WeightedValueKeyLengthInputFillers( @@ -411,11 +410,8 @@ OPERATOR_SCHEMA(SparseLengthsSum8BitRowwiseSparse) .NumInputs(4) .NumOutputs(1) .ValueKeyLengthInputFillers( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SparseLengthsNBitRowwiseSparseOp<8>::COMPRESSED_INDICES_MAPPING, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SparseLengthsNBitRowwiseSparseOp<8>::INDICES, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SparseLengthsNBitRowwiseSparseOp<8>::LENGTHS) .SetDoc(R"DOC( Performs SparseLengthsSum, but operating on 8-bit rowwise quantized matrices @@ -453,17 +449,12 @@ REGISTER_CPU_OPERATOR( /*with_weights=*/true>); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseLengthsWeightedSum8BitRowwiseSparse) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(1) .WeightedValueKeyLengthInputFillers( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SparseLengthsNBitRowwiseSparseOp<8, true>::COMPRESSED_INDICES_MAPPING, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SparseLengthsNBitRowwiseSparseOp<8, true>::INDICES, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SparseLengthsNBitRowwiseSparseOp<8, true>::LENGTHS, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SparseLengthsNBitRowwiseSparseOp<8, true>::WEIGHTS) .SetDoc(R"DOC( Performs SparseLengthsWeightedSum, but operating on 8-bit rowwise quantized @@ -508,12 +499,9 @@ OPERATOR_SCHEMA(SparseLengthsMean8BitRowwiseSparse) .NumInputs(4) .NumOutputs(1) .ValueKeyLengthInputFillers( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SparseLengthsNBitRowwiseSparseOp<8, false, true>:: COMPRESSED_INDICES_MAPPING, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SparseLengthsNBitRowwiseSparseOp<8, false, true>::INDICES, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SparseLengthsNBitRowwiseSparseOp<8, false, true>::LENGTHS) .SetDoc(R"DOC( Performs SparseLengthsMean, but operating on 8-bit rowwise quantized matrices @@ -590,7 +578,6 @@ REGISTER_CPU_OPERATOR( /*with_weights=*/true>); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseLengthsWeightedSum2BitRowwiseSparse) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(1) .WeightedValueKeyLengthInputFillers( diff --git a/caffe2/operators/lengths_reducer_ops.cc b/caffe2/operators/lengths_reducer_ops.cc index 00a677cd52a7d..dce2c1f3acc6c 100644 --- a/caffe2/operators/lengths_reducer_ops.cc +++ b/caffe2/operators/lengths_reducer_ops.cc @@ -114,7 +114,6 @@ REGISTER_CPU_OPERATOR( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(TTSparseLengthsSum) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(4) .SetDoc(R"DOC( diff --git a/caffe2/operators/lengths_reducer_rowwise_8bit_ops.cc b/caffe2/operators/lengths_reducer_rowwise_8bit_ops.cc index 300d1244ac682..e77cc7f7d2cbd 100644 --- a/caffe2/operators/lengths_reducer_rowwise_8bit_ops.cc +++ b/caffe2/operators/lengths_reducer_rowwise_8bit_ops.cc @@ -70,7 +70,6 @@ and biases. // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseLengthsWeightedSum8BitsRowwise) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(1) .ValueLengthInputFillers( @@ -151,7 +150,6 @@ and biases. // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseLengthsWeightedMean8BitsRowwise) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(1) .ValueLengthInputFillers( diff --git a/caffe2/operators/listwise_l2r_op.cc b/caffe2/operators/listwise_l2r_op.cc index 2706ef53632f7..fe983689eba81 100644 --- a/caffe2/operators/listwise_l2r_op.cc +++ b/caffe2/operators/listwise_l2r_op.cc @@ -109,7 +109,6 @@ float LambdaRankNdcgOp::LambdaRankNdcgSession( float loss = 0; dy_vec = 0; // in case that all docs in a session have zero ratings, no op - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (r_vec.abs().sum() < 1e-6) { return 0; } @@ -152,7 +151,6 @@ float LambdaRankNdcgOp::LambdaRankNdcgSession( dy_vec = -(lambda_mat * CWISE_SIGN(PAIRWISE_DIFF(r_vec, N)) * CWISE_SIGM( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -CWISE_SIGN(PAIRWISE_DIFF(r_vec, N)) * PAIRWISE_DIFF(y_vec, N))) .rowwise() .sum(); @@ -163,7 +161,6 @@ float LambdaRankNdcgOp::LambdaRankNdcgSession( } else { loss = -(lambda_mat * CWISE_LOG_SIGM( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CWISE_SIGN(PAIRWISE_DIFF(r_vec, N)) * PAIRWISE_DIFF(y_vec, N), 100)) .sum(); @@ -174,9 +171,7 @@ float LambdaRankNdcgOp::LambdaRankNdcgSession( // Note that normalization is mathematically correct if idcg is guaranteed to // be positive! if (use_idcg_normalization_) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dy_vec /= std::max(idcg, 1e-5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) loss /= std::max(idcg, 1e-5); } return loss; diff --git a/caffe2/operators/lpnorm_op.cc b/caffe2/operators/lpnorm_op.cc index af1718fe6b550..101f40561ea07 100644 --- a/caffe2/operators/lpnorm_op.cc +++ b/caffe2/operators/lpnorm_op.cc @@ -55,7 +55,6 @@ bool LpNormGradientOp::RunOnDevice() { } else if (p_ == 2) { EigenVectorMap(dX->template mutable_data(), X.numel()) .array() = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ConstEigenVectorMap(X.data(), X.numel()).array() * 2.0f * ((dnorm.data())[0] / size); } diff --git a/caffe2/operators/lstm_unit_op.cc b/caffe2/operators/lstm_unit_op.cc index f5fa7d4cf923e..7d2fa438f41b5 100644 --- a/caffe2/operators/lstm_unit_op.cc +++ b/caffe2/operators/lstm_unit_op.cc @@ -5,7 +5,6 @@ namespace caffe2 { REGISTER_CPU_OPERATOR(LSTMUnit, LSTMUnitOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(LSTMUnit) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(4, 5) .NumOutputs(2) .SetDoc(R"DOC( @@ -27,7 +26,6 @@ value at X{t][n] >= seqLengths[n]. REGISTER_CPU_OPERATOR(LSTMUnitGradient, LSTMUnitGradientOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(LSTMUnitGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(8, 9) .NumOutputs(3) .Arg( diff --git a/caffe2/operators/mish_op.cc b/caffe2/operators/mish_op.cc index 75051d7cbcd90..627135f8b1824 100644 --- a/caffe2/operators/mish_op.cc +++ b/caffe2/operators/mish_op.cc @@ -48,9 +48,7 @@ bool MishGradientOp::DoRunWithType() { math::Tanh(N, dX_data, dX_data, &context_); dX_arr = dY_arr * (dX_arr + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) X_arr * (T(1) - dX_arr.square()) * T(0.5) * - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ((X_arr * T(0.5)).tanh() + T(1))); return true; diff --git a/caffe2/operators/pow_op.cc b/caffe2/operators/pow_op.cc index 740481beb32aa..646532124aa3f 100644 --- a/caffe2/operators/pow_op.cc +++ b/caffe2/operators/pow_op.cc @@ -22,15 +22,12 @@ struct EigenPowFunctor { if (b[0] == -1.) { EigenVectorArrayMap(out, n) = ConstEigenVectorArrayMap(a, n).inverse(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (b[0] == 0.5) { EigenVectorArrayMap(out, n) = ConstEigenVectorArrayMap(a, n).sqrt(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (b[0] == -0.5) { EigenVectorArrayMap(out, n) = ConstEigenVectorArrayMap(a, n).rsqrt(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (b[0] == 2.) { EigenVectorArrayMap(out, n) = ConstEigenVectorArrayMap(a, n).square(); diff --git a/caffe2/operators/quantized/int8_roi_align_op_test.cc b/caffe2/operators/quantized/int8_roi_align_op_test.cc index 8fa3cd40a1dd1..e5a35210c19cb 100644 --- a/caffe2/operators/quantized/int8_roi_align_op_test.cc +++ b/caffe2/operators/quantized/int8_roi_align_op_test.cc @@ -10,9 +10,7 @@ TEST(Int8RoIAlign, RoIAlign) { const int H = 100; const int W = 110; auto XQ = q({N, H, W, C}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->scale = 0.01f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->zero_point = 127; auto X = dq(*XQ); const int n_rois = 10; @@ -29,14 +27,12 @@ TEST(Int8RoIAlign, RoIAlign) { rois_array.push_back(std::min(w1, w2)); rois_array.push_back(std::max(h1, h2)); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) add_input({n_rois, 5}, rois_array, "RoIs", &ws); auto xop = CreateOperatorDef( "RoIAlign", "", {"X", "RoIs"}, {"Y"}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {MakeArgument("spatial_scale", 0.25f), MakeArgument("pooled_h", 2), MakeArgument("pooled_w", 2), @@ -47,14 +43,11 @@ TEST(Int8RoIAlign, RoIAlign) { "", {"XQ", "RoIs"}, {"YQ"}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {MakeArgument("spatial_scale", 0.25f), MakeArgument("pooled_h", 2), MakeArgument("pooled_w", 2), MakeArgument("sampling_ratio", 2), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_zero_point", 127), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_scale", 0.01f)}); int8Copy(ws.CreateBlob("XQ")->GetMutable(), *XQ); BlobGetMutableTensor(ws.CreateBlob("X"), CPU)->CopyFrom(*X); diff --git a/caffe2/operators/quantized/int8_test.cc b/caffe2/operators/quantized/int8_test.cc index a91e8de85d56c..0441b72cf3083 100644 --- a/caffe2/operators/quantized/int8_test.cc +++ b/caffe2/operators/quantized/int8_test.cc @@ -20,7 +20,6 @@ namespace caffe2 { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, ReLU) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 224, 224, 3}); auto X = dq(*XQ); auto xop = CreateOperatorDef("Relu", "", {"X"}, {"Y"}); @@ -46,7 +45,6 @@ TEST(Int8, ReLU) { // xplat/caffe2:caffe2_testAndroid // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, DISABLED_LeakyReLU) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 224, 224, 3}); auto X = dq(*XQ); const float alpha = 0.1; @@ -82,7 +80,6 @@ TEST(Int8, Softmax) { {"XQ"}, {"YQ"}, {MakeArgument("Y_zero_point", 0), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_scale", 1.0 / 256)}); Workspace ws; int8Copy(ws.CreateBlob("XQ")->GetMutable(), *XQ); @@ -109,7 +106,6 @@ TEST(Int8, Sigmoid) { {"XQ"}, {"YQ"}, {MakeArgument("Y_zero_point", 0), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_scale", 1.0 / 256)}); Workspace ws; int8Copy(ws.CreateBlob("XQ")->GetMutable(), *XQ); @@ -127,7 +123,6 @@ TEST(Int8, Sigmoid) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, MaxPool) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 25, 25, 16}); auto X = dq(*XQ); auto xop = CreateOperatorDef( @@ -158,7 +153,6 @@ TEST(Int8, MaxPool) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, AveragePool) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 25, 25, 16}); auto X = dq(*XQ); auto xop = CreateOperatorDef( @@ -189,7 +183,6 @@ TEST(Int8, AveragePool) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, ResizeNearest) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 25, 25, 16}); auto X = dq(*XQ); auto xop = CreateOperatorDef( @@ -223,7 +216,6 @@ TEST(Int8, ResizeNearest) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, ChannelShuffle) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({2, 25, 25, 32}); auto X = dq(*XQ); auto xop = CreateOperatorDef( @@ -263,10 +255,8 @@ TEST(Int8, ChannelShuffle) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, Concat) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ0 = q({2, 25, 25, 16}); auto X0 = dq(*XQ0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ1 = q({2, 25, 25, 24}); auto X1 = dq(*XQ1); auto xop = CreateOperatorDef( @@ -305,9 +295,7 @@ TEST(Int8, Concat) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, Add) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ0 = q({1, 10, 10, 20}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ1 = q({1, 10, 10, 20}); auto X0 = dq(*XQ0); auto X1 = dq(*XQ1); @@ -335,9 +323,7 @@ TEST(Int8, Add) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, SumRelu) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ0 = q({1, 10, 10, 20}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ1 = q({1, 10, 10, 20}); auto X0 = dq(*XQ0); auto X1 = dq(*XQ1); @@ -395,17 +381,13 @@ void biassetq(int8::Int8TensorCPU* dst, const std::vector& vs) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, Conv) { auto XQ = q({2, 2, 4, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->zero_point = 127; setq( XQ.get(), std::vector{1, 1, 1, 1, 2, 2, 2, 2, 1, 2, 3, 4, 1, 2, 3, 4}); auto WQ = q({3, 2, 2, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->zero_point = 127; setq( WQ.get(), @@ -444,7 +426,6 @@ TEST(Int8, Conv) { {MakeArgument("kernel", 2), MakeArgument("order", "NHWC"), MakeArgument("stride", 2), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_zero_point", 127), MakeArgument("Y_scale", 1.0)}); Workspace ws; @@ -471,20 +452,14 @@ TEST(Int8, Conv) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, Grouped1x1Conv) { auto XQ = q({1, 3, 2, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->zero_point = 127; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) setq(XQ.get(), std::vector{1, 4, 3, 2, 9, 3, 8, 2, 6, 7, 8, 2, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3, 8, 1, 7, 4, 2, 1, 3, 8, 5, 3, 1}); // G = 2 auto WQ = q({4, 1, 1, 2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->zero_point = 127; setq(WQ.get(), {1, 2, 3, 4, -1, -2, -3, -4}); auto BQ = biasq({4}, XQ->scale * WQ->scale); @@ -508,7 +483,6 @@ TEST(Int8, Grouped1x1Conv) { {MakeArgument("kernel", 1), MakeArgument("order", "NHWC"), MakeArgument("group", 2), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_zero_point", 127), MakeArgument("Y_scale", 1.0)}); Workspace ws; @@ -544,21 +518,15 @@ TEST(Int8, Grouped1x1Conv) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, Conv2) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 3, 6, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->zero_point = 127; setq( XQ.get(), std::vector{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3, 2, 1, -1, -2, -3, 4, 3, 2, -2, -3, -4, 5, 4, 3, -3, -4, -5}); auto WQ = q({1, 2, 2, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->zero_point = 127; setq(WQ.get(), {1, 2, 3, 4}); auto BQ = biasq({1}, XQ->scale * WQ->scale); @@ -584,7 +552,6 @@ TEST(Int8, Conv2) { MakeArgument("order", "NHWC"), MakeArgument("stride_w", 3), MakeArgument("stride_h", 1), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_zero_point", 127), MakeArgument("Y_scale", 1.0)}); Workspace ws; @@ -609,20 +576,14 @@ TEST(Int8, Conv2) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, DepthwiseConv) { auto XQ = q({1, 3, 2, 2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->zero_point = 127; // setq(XQ.get(), std::vector{1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) setq(XQ.get(), std::vector{1, 4, 3, 2, 9, 3, 8, 2, 6, 7, 8, 2}); auto WQ = q({2, 2, 2, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->zero_point = 127; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) setq(WQ.get(), {1, 2, 3, 4, -9, 10, -11, 12}); auto BQ = biasq({2}, XQ->scale * WQ->scale); biassetq(BQ.get(), {1, 2}); @@ -645,7 +606,6 @@ TEST(Int8, DepthwiseConv) { {MakeArgument("kernel", 2), MakeArgument("order", "NHWC"), MakeArgument("group", 2), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_zero_point", 127), MakeArgument("Y_scale", 1.0)}); Workspace ws; @@ -675,27 +635,17 @@ TEST(Int8, DepthwiseConv) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, DepthwiseConv3x3) { auto XQ = q({1, 3, 3, 3}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->zero_point = 127; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) setq(XQ.get(), std::vector{1, 4, 3, 2, 9, 3, 8, 2, 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 7, 8, 2, 3, 4, 5, 2, 4, 4, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9, 8, 7, 6, 5, 4, 3, 2, 1}); auto WQ = q({3, 3, 3, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->zero_point = 127; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) setq(WQ.get(), std::vector{1, -4, 3, 2, -9, 3, -8, 2, 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 7, 8, -2, -3, 4, -5, -2, 4, 4, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -9, 8, -7, 6, -5, 4, 3, -2, 1}); auto BQ = biasq({3}, XQ->scale * WQ->scale); biassetq(BQ.get(), {1, 2, 3}); @@ -718,7 +668,6 @@ TEST(Int8, DepthwiseConv3x3) { {MakeArgument("kernel", 3), MakeArgument("order", "NHWC"), MakeArgument("group", 3), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_zero_point", 127), MakeArgument("Y_scale", 1.0)}); Workspace ws; @@ -747,30 +696,18 @@ TEST(Int8, DepthwiseConv3x3) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, DepthwiseConv5x5) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 5, 5, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->zero_point = 127; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) setq(XQ.get(), std::vector{1, 4, 3, 2, 9, 3, 8, 2, 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 7, 8, 2, 3, 4, 5, 2, 4, 4, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9, 8, 7, 6, 5, 4, 3}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto WQ = q({1, 5, 5, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->zero_point = 127; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) setq(WQ.get(), std::vector{1, -4, 3, 2, -9, 3, -8, 2, 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 7, 8, -2, -3, 4, -5, -2, 4, 4, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) -9, 8, -7, 6, -5, 4, 3}); auto BQ = biasq({1}, XQ->scale * WQ->scale); biassetq(BQ.get(), {1}); @@ -782,7 +719,6 @@ TEST(Int8, DepthwiseConv5x5) { "", {"XT", "WT", "B"}, {"YT"}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {MakeArgument("kernel", 5), MakeArgument("order", "NCHW"), MakeArgument("group", 1)}); @@ -791,11 +727,9 @@ TEST(Int8, DepthwiseConv5x5) { "", {"XQ", "WQ", "BQ"}, {"YQ"}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {MakeArgument("kernel", 5), MakeArgument("order", "NHWC"), MakeArgument("group", 1), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_zero_point", 127), MakeArgument("Y_scale", 1.0)}); Workspace ws; @@ -824,21 +758,15 @@ TEST(Int8, DepthwiseConv5x5) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, ConvTranspose) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 3, 6, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->zero_point = 127; setq( XQ.get(), std::vector{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 3, 2, 1, -1, -2, -3, 4, 3, 2, -2, -3, -4, 5, 4, 3, -3, -4, -5}); auto WQ = q({1, 2, 2, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->zero_point = 127; setq(WQ.get(), {1, 2, 3, 4}); auto BQ = biasq({1}, XQ->scale * WQ->scale); @@ -864,7 +792,6 @@ TEST(Int8, ConvTranspose) { MakeArgument("order", "NHWC"), MakeArgument("stride_w", 1), MakeArgument("stride_h", 2), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_zero_point", 127), MakeArgument("Y_scale", 1.0)}); Workspace ws; @@ -884,30 +811,19 @@ TEST(Int8, ConvTranspose) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, FC) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({2, 10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) XQ->zero_point = 127; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) setq(XQ.get(), {1, 2, 3, 4, 5, 6, 7, 8, -9, -10, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1, 2, 3, 4, 5, 6, 7, -8, 9, -10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto WQ = q({3, 10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->scale = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) WQ->zero_point = 127; setq( WQ.get(), { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // u = 0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // u = 1 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // u = 1 }); auto BQ = biasq({3}, XQ->scale * WQ->scale); @@ -921,7 +837,6 @@ TEST(Int8, FC) { "", {"XQ", "WQ", "BQ"}, {"YQ"}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {MakeArgument("Y_zero_point", 127), MakeArgument("Y_scale", 1.0)}); Workspace ws; @@ -948,7 +863,6 @@ TEST(Int8, FC) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, GivenTensorFill) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector shape = {1, 25, 25, 16}; auto XQ = q(shape); auto X = dq(*XQ); @@ -984,9 +898,7 @@ TEST(Int8, GivenTensorFill) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, GivenIntTensorFill) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector shape = {32}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = biasq(shape, 1. / 255 * 1. / 255); auto X = biasdq(*XQ); vector v( @@ -1021,7 +933,6 @@ TEST(Int8, GivenIntTensorFill) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, QuantDeQuant) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector shape = {1, 25, 25, 16}; auto XQ = q(shape); auto X = dq(*XQ); @@ -1043,14 +954,12 @@ TEST(Int8, QuantDeQuant) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, Reshape) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 25, 25, 16}); auto xop = CreateOperatorDef( "Int8Reshape", "", {"XQ"}, {"YQ", "old_shape"}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {MakeArgument("shape", vector{0, -1, 2000}), MakeArgument("Y_scale", XQ->scale), MakeArgument("Y_zero_point", XQ->zero_point)}); @@ -1065,7 +974,6 @@ TEST(Int8, Reshape) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, Flatten) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 25, 25, 16}); auto xop = CreateOperatorDef( "Int8Flatten", @@ -1086,11 +994,9 @@ TEST(Int8, Flatten) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, Slice) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 25, 25, 16}); auto X = dq(*XQ); vector starts = {0, 3, 0, 0}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vector ends = {-1, 5, -1, -1}; auto xop = CreateOperatorDef( "Slice", @@ -1124,7 +1030,6 @@ TEST(Int8, Slice) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Int8, DISABLED_Transpose) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto XQ = q({1, 50, 25, 16}); auto xop = CreateOperatorDef( "Int8Transpose", diff --git a/caffe2/operators/reservoir_sampling.cc b/caffe2/operators/reservoir_sampling.cc index 838f80f9d5036..d89d8a31d588b 100644 --- a/caffe2/operators/reservoir_sampling.cc +++ b/caffe2/operators/reservoir_sampling.cc @@ -104,10 +104,8 @@ class ReservoirSamplingOp final : public Operator { auto output_num = std::min(numToCollect_, output_batch_size + num_to_copy); // output_num is >= output_batch_size - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output->ExtendTo(output_num, 50); if (pos_to_object) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pos_to_object->ExtendTo(output_num, 50); // ExtendTo doesn't zero-initialize tensors any more, explicitly clear // the memory @@ -224,11 +222,9 @@ REGISTER_CPU_OPERATOR(ReservoirSampling, ReservoirSamplingOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(ReservoirSampling) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs({4, 7}) .NumOutputs({2, 4}) .NumInputsOutputs([](int in, int out) { return in / 3 == out / 2; }) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .EnforceInplace({{0, 0}, {1, 1}, {5, 2}, {6, 3}}) .SetDoc(R"DOC( Collect `DATA` tensor into `RESERVOIR` of size `num_to_collect`. `DATA` is @@ -265,13 +261,11 @@ This operator is thread-safe. "(Optional, int64) If provided, used for deduplicating object in the " "reservoir") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "OBJECT_TO_POS_MAP_IN", "(Optional) Auxiliary bookkeeping map. This should be created from " " `CreateMap` with keys of type int64 and values of type int32") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, "POS_TO_OBJECT_IN", "(Optional) Tensor of type int64 used for bookkeeping in deduplication") diff --git a/caffe2/operators/resize_3d_op.cc b/caffe2/operators/resize_3d_op.cc index b241cd740f5b2..eca92e9309207 100644 --- a/caffe2/operators/resize_3d_op.cc +++ b/caffe2/operators/resize_3d_op.cc @@ -70,7 +70,6 @@ bool ResizeNearest3DOp::RunOnDeviceWithOrderNCHW() { float* Ydata = Y->template mutable_data(); // Specialized implementation for fast 2x upsampling - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (width_scale_ == 2.0 && height_scale_ == 2.0) { CAFFE_ENFORCE(temporal_scale_ == 1 || temporal_scale_ == 2, "temporal_scale must be either 1 or 2"); diff --git a/caffe2/operators/resize_op.cc b/caffe2/operators/resize_op.cc index 0014aec3ea67e..0b61a894dd6ed 100644 --- a/caffe2/operators/resize_op.cc +++ b/caffe2/operators/resize_op.cc @@ -86,7 +86,6 @@ bool ResizeNearestOp::RunOnDeviceWithOrderNCHW() { float* Ydata = Y->template mutable_data(); // Specialized implementation for fast 2x upsampling - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (width_scale_ == 2.0 && height_scale_ == 2.0) { resizeNearestNCHW2x( batch_size, num_channels, input_height, input_width, Xdata, Ydata); diff --git a/caffe2/operators/rmac_regions_op.cc b/caffe2/operators/rmac_regions_op.cc index 7a9f90142efca..891b70d693cfc 100644 --- a/caffe2/operators/rmac_regions_op.cc +++ b/caffe2/operators/rmac_regions_op.cc @@ -11,7 +11,6 @@ bool RMACRegionsOp::RunOnDevice() { // RoIs auto* output = Output( 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0, 5}, at::dtype()); // [batch_id x1 y1 x2 y2] format of ROIPoolOp @@ -28,7 +27,6 @@ bool RMACRegionsOp::RunOnDevice() { int step = 0; if (W != H) { int min_step = 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int max_step = 6; float cur_min = FLT_MAX; for (int idx = min_step; idx <= max_step; ++idx) { @@ -65,9 +63,7 @@ bool RMACRegionsOp::RunOnDevice() { (l + Hd - 1 > 0) ? ((H - region_size) / (1.0 * (l + Hd - 1))) : 0; int cur_rows = output->dim32(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output->Extend((l + Wd) * (l + Hd), 50); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto* outputData = output->template mutable_data() + cur_rows * 5; for (int i = 0; i < l + Wd; ++i) { @@ -99,16 +95,13 @@ bool RMACRegionsOp::RunOnDevice() { // Replicate regions for all items in batch int num_rois = output->dim32(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output->Extend((batch_size - 1) * num_rois, 50); auto* outputData = output->template mutable_data(); for (int b = 1; b < batch_size; ++b) { // Copy all rois - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::copy_n(outputData, num_rois * 5, outputData + b * num_rois * 5); // Override batch index for (int r = 0; r < num_rois; ++r) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) outputData[(b * num_rois + r) * 5] = b; } } diff --git a/caffe2/operators/rnn/recurrent_network_executor.cc b/caffe2/operators/rnn/recurrent_network_executor.cc index cf9658d08d89c..69eee50d20253 100644 --- a/caffe2/operators/rnn/recurrent_network_executor.cc +++ b/caffe2/operators/rnn/recurrent_network_executor.cc @@ -217,11 +217,9 @@ void ThreadedRecurrentNetworkExecutor::_Exec() { // Wait until threads finish. Timer t; while (!failed_ && countdown_ > 0) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) cv_.wait_for(lk, std::chrono::seconds(30), [&] { // Log if we are still running, so that we catch deadlocks.. there // should not be any deadlocks, but... - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (t.Seconds() > 10) { LOG(INFO) << "RNN Executor still running, remaining ops: " << countdown_; diff --git a/caffe2/operators/roi_align_gradient_op.cc b/caffe2/operators/roi_align_gradient_op.cc index 5273267f0c9a3..8b46ca55a955b 100644 --- a/caffe2/operators/roi_align_gradient_op.cc +++ b/caffe2/operators/roi_align_gradient_op.cc @@ -101,14 +101,12 @@ void ROIAlignBackwardFeature( const T* offset_bottom_rois = bottom_rois + n * rois_cols; int roi_batch_ind = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (rois_cols == 5) { roi_batch_ind = offset_bottom_rois[0]; offset_bottom_rois++; } // Do not using rounding; this implementation detail is critical - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) T roi_offset = continuous_coordinate ? T(0.5) : 0; T roi_start_w = offset_bottom_rois[0] * spatial_scale - roi_offset; T roi_start_h = offset_bottom_rois[1] * spatial_scale - roi_offset; diff --git a/caffe2/operators/roi_align_op.cc b/caffe2/operators/roi_align_op.cc index decbc817a5843..83f07117d81a7 100644 --- a/caffe2/operators/roi_align_op.cc +++ b/caffe2/operators/roi_align_op.cc @@ -102,7 +102,6 @@ C10_EXPORT bool RoIAlignOp::RunOnDeviceWithOrderNCHW( for (int64_t n = 0; n < N; ++n) { const int64_t roi_batch_idx = roi_cols == 4 ? 0 : R[n * roi_cols]; const float* X_ptr = X + roi_batch_idx * C * H * W; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const float* R_ptr = R + n * roi_cols + (roi_cols == 5); float* Y_ptr = Y + n * C * pooled_h_ * pooled_w_; @@ -189,7 +188,6 @@ C10_EXPORT bool RoIAlignOp::RunOnDeviceWithOrderNHWC( for (int64_t n = 0; n < N; ++n) { const int64_t roi_batch_idx = roi_cols == 4 ? 0 : R[n * roi_cols]; const float* X_ptr = X + roi_batch_idx * C * H * W; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const float* R_ptr = R + n * roi_cols + (roi_cols == 5); float* Y_ptr = Y + n * C * pooled_h_ * pooled_w_; diff --git a/caffe2/operators/roi_align_rotated_op.cc b/caffe2/operators/roi_align_rotated_op.cc index dfc415617d349..c546af75c42b2 100644 --- a/caffe2/operators/roi_align_rotated_op.cc +++ b/caffe2/operators/roi_align_rotated_op.cc @@ -162,20 +162,17 @@ void ROIAlignRotatedForward( // roi could have 5 or 6 columns const T* offset_bottom_rois = bottom_rois + n * roi_cols; int roi_batch_ind = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (roi_cols == 6) { roi_batch_ind = offset_bottom_rois[0]; offset_bottom_rois++; } // Do not round - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) T roi_offset = continuous_coordinate ? T(0.5) : 0; T roi_center_w = offset_bottom_rois[0] * spatial_scale - roi_offset; T roi_center_h = offset_bottom_rois[1] * spatial_scale - roi_offset; T roi_width = offset_bottom_rois[2] * spatial_scale; T roi_height = offset_bottom_rois[3] * spatial_scale; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) T theta = offset_bottom_rois[4] * M_PI / 180.0; if (continuous_coordinate) { @@ -207,9 +204,7 @@ void ROIAlignRotatedForward( // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). // Appropriate translation needs to be applied after. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) T roi_start_h = -roi_height / 2.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) T roi_start_w = -roi_width / 2.0; pre_calc_for_bilinear_interpolate( height, diff --git a/caffe2/operators/rsqrt_op.cc b/caffe2/operators/rsqrt_op.cc index 6f1ec3fbb66f9..8dff496159788 100644 --- a/caffe2/operators/rsqrt_op.cc +++ b/caffe2/operators/rsqrt_op.cc @@ -21,7 +21,6 @@ bool RsqrtGradientFunctor::Forward( // NOLINTNEXTLINE(modernize-use-transparent-functors) dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies()); EigenVectorMap(dX, size) = ConstEigenVectorMap(dY, size).array() * - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ConstEigenVectorMap(Y, size).array().cube() * static_cast(-0.5); return true; } diff --git a/caffe2/operators/segment_reduction_op.cc b/caffe2/operators/segment_reduction_op.cc index 872bb93c88714..af79802c328c1 100644 --- a/caffe2/operators/segment_reduction_op.cc +++ b/caffe2/operators/segment_reduction_op.cc @@ -46,7 +46,6 @@ OpSchema::Cost CostInferenceForSparseLengths( // gradient of SparseLengthsWeightedSum // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseLengthsIndicesInGradientWeightedSumWithMainInputGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(2); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) diff --git a/caffe2/operators/softmax_with_loss_op.cc b/caffe2/operators/softmax_with_loss_op.cc index 4137c99a0b52d..3e892638a51dd 100644 --- a/caffe2/operators/softmax_with_loss_op.cc +++ b/caffe2/operators/softmax_with_loss_op.cc @@ -244,14 +244,12 @@ bool SoftmaxWithLossOp::RunOnDevice() { "Label prob seems incorrect: label prob value must be nonnegative:", " ", label_data[i * D + j]); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l += -log(std::max(Pdata[i * D + j], 1e-20f)) * label_data[i * D + j] * weight; total_prob += label_data[i * D + j]; } loss_sum += l; CAFFE_ENFORCE( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::abs(total_prob - 1.) < 1e-5f, "Label prob seems incorrect: label prob values do not sum to 1.0: ", total_prob, diff --git a/caffe2/operators/sparse_dropout_with_replacement_op.cc b/caffe2/operators/sparse_dropout_with_replacement_op.cc index 09af2c021a9c8..9e4db94fb4297 100644 --- a/caffe2/operators/sparse_dropout_with_replacement_op.cc +++ b/caffe2/operators/sparse_dropout_with_replacement_op.cc @@ -31,7 +31,6 @@ bool SparseDropoutWithReplacementOp::RunOnDevice() { int32_t total_output_length = 0; vector selected(Lengths.numel(), true); for (int i = 0; i < Lengths.numel(); ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (dist(gen) > 0.5) { output_lengths_data[i] = input_lengths_data[i]; } else { diff --git a/caffe2/operators/sparse_lp_regularizer_op.cc b/caffe2/operators/sparse_lp_regularizer_op.cc index 6c24c3a1c19d9..ff21a6402b3b3 100644 --- a/caffe2/operators/sparse_lp_regularizer_op.cc +++ b/caffe2/operators/sparse_lp_regularizer_op.cc @@ -26,7 +26,6 @@ bool SparseLpRegularizerOp::DoRunWithType() { // embedding length, e.g. 32, 64, 128 auto block_size = Input(PARAM).size_from_dim(1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (p_ == 2.0) { // L2 regularization #ifdef LOG_FIRST_N LOG_FIRST_N(INFO, 3) diff --git a/caffe2/operators/spatial_batch_norm_gradient_op.cc b/caffe2/operators/spatial_batch_norm_gradient_op.cc index 67ac47a070855..139bde8e9508b 100644 --- a/caffe2/operators/spatial_batch_norm_gradient_op.cc +++ b/caffe2/operators/spatial_batch_norm_gradient_op.cc @@ -145,10 +145,8 @@ REGISTER_CPU_OPERATOR(SpatialBNGradient, SpatialBNGradientOp); // Output: dX, dscale, dbias // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SpatialBNGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs({5, 7}) .NumOutputs(3) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .AllowInplace({{5, 1}, {6, 2}}); namespace { diff --git a/caffe2/operators/spatial_batch_norm_op.cc b/caffe2/operators/spatial_batch_norm_op.cc index 60161f4678eed..afa4c8c5630a5 100644 --- a/caffe2/operators/spatial_batch_norm_op.cc +++ b/caffe2/operators/spatial_batch_norm_op.cc @@ -29,11 +29,8 @@ REGISTER_CPU_OPERATOR(SpatialBN, SpatialBNOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SpatialBN) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs({5, 7}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumOutputs({1, 5}) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .AllowInplace({{0, 0}, {5, 3}, {6, 4}}) .EnforceInplace({{3, 1}, {4, 2}}) .CostInferenceFunction(CostInferenceForSpatialBN) @@ -119,12 +116,10 @@ Github Links: "var", "The running variance (training) or the estimated variance (testing) as a 1-dimensional tensor of size $C$.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "sums", "*(optional)* Per-channel sums of elements to be used to determine the mean and variance for this batch.") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, "sumsq", "*(optional)* Per-channel sum of elements squared per channel to be used to determine the variance for this batch.") diff --git a/caffe2/operators/spatial_softmax_with_loss_op.cc b/caffe2/operators/spatial_softmax_with_loss_op.cc index 8a0b380103b3a..b5e77ccfc477a 100644 --- a/caffe2/operators/spatial_softmax_with_loss_op.cc +++ b/caffe2/operators/spatial_softmax_with_loss_op.cc @@ -101,7 +101,6 @@ bool SpatialSoftmaxWithLossOp::RunOnDevice() { for (int y = 0; y < H; ++y) { for (int x = 0; x < W; ++x) { // Subtract max on each cell for numerical reasons - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float max_val = (-1e20f); for (int c = 0; c < D; ++c) { // TODO optimize @@ -152,7 +151,6 @@ bool SpatialSoftmaxWithLossOp::RunOnDevice() { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) float w = weights ? weights[label_idx] : 1.0; total_weight += w; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sum_label_xent += -log(std::max(Pdata[idx], 1e-20f)) * w; } } diff --git a/caffe2/operators/sqr_op.cc b/caffe2/operators/sqr_op.cc index d5ffa0ec5ac7c..8286cad45bade 100644 --- a/caffe2/operators/sqr_op.cc +++ b/caffe2/operators/sqr_op.cc @@ -73,7 +73,6 @@ class GetSqrGradient : public GradientMakerBase { std::vector GetGradientDefs() override { Argument scale_arg; scale_arg.set_name("scale"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scale_arg.set_f(2.0); return std::vector{CreateOperatorDef( "Scale", diff --git a/caffe2/operators/sqrt_op.cc b/caffe2/operators/sqrt_op.cc index aef67707e0baf..11fab9eae0d42 100644 --- a/caffe2/operators/sqrt_op.cc +++ b/caffe2/operators/sqrt_op.cc @@ -76,7 +76,6 @@ class GetSqrtGradient : public GradientMakerBase { std::vector GetGradientDefs() override { Argument scale_arg; scale_arg.set_name("scale"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) scale_arg.set_f(0.5); return std::vector{CreateOperatorDef( "Scale", diff --git a/caffe2/operators/string_ops_test.cc b/caffe2/operators/string_ops_test.cc index 222bb716c2dd8..e38be2b6b39c4 100644 --- a/caffe2/operators/string_ops_test.cc +++ b/caffe2/operators/string_ops_test.cc @@ -85,7 +85,6 @@ TEST_F(StringJoinOpTest, testString2DJoin) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(StringJoinOpTest, testFloat1DJoin) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input = {3.90f, 5.234f, 8.12f}; auto blob = std::make_unique(); @@ -107,9 +106,7 @@ TEST_F(StringJoinOpTest, testFloat1DJoin) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(StringJoinOpTest, testFloat2DJoin) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector> input = {{1.23f, 2.45f, 3.56f}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4.67f, 5.90f, 6.32f}}; auto blob = std::make_unique(); @@ -132,7 +129,6 @@ TEST_F(StringJoinOpTest, testFloat2DJoin) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(StringJoinOpTest, testLong2DJoin) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector> input = {{100, 200}, {1000, 2000}}; auto blob = std::make_unique(); diff --git a/caffe2/operators/stylizer_ops.cc b/caffe2/operators/stylizer_ops.cc index 718851f9eb3d9..3796e07060ce9 100644 --- a/caffe2/operators/stylizer_ops.cc +++ b/caffe2/operators/stylizer_ops.cc @@ -78,7 +78,6 @@ class PackedInt8BGRANHWCToNCHWCStylizerPreprocessOp auto* noiseBlob = ws_->CreateBlob("__CAFFE2_STYLIZER_NOISE__"); auto defaultNoiseSize = OperatorBase::GetSingleArgument( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "noise_size", 491 /* prime to avoid artifacts */); if (!BlobIsTensorType(*noiseBlob, CPU)) { @@ -126,7 +125,6 @@ class PackedInt8BGRANHWCToNCHWCStylizerPreprocessOp math::RandGaussian( size, 0.0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) OperatorBase::GetSingleArgument("noise_std", 10.0), noise->template mutable_data(), &context_); diff --git a/caffe2/operators/text_file_reader_utils.cc b/caffe2/operators/text_file_reader_utils.cc index 39b5307603643..916f9b0b69874 100644 --- a/caffe2/operators/text_file_reader_utils.cc +++ b/caffe2/operators/text_file_reader_utils.cc @@ -86,7 +86,6 @@ void Tokenizer::next(char* start, char* end, TokenizedString& tokenized) { FileReader::FileReader(const std::string& path, size_t bufferSize) : bufferSize_(bufferSize), buffer_(new char[bufferSize]) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) fd_ = open(path.c_str(), O_RDONLY, 0777); if (fd_ < 0) { throw std::runtime_error( diff --git a/caffe2/operators/text_file_reader_utils_test.cc b/caffe2/operators/text_file_reader_utils_test.cc index 987252b920ffd..43c5ae2aabcd4 100644 --- a/caffe2/operators/text_file_reader_utils_test.cc +++ b/caffe2/operators/text_file_reader_utils_test.cc @@ -75,7 +75,6 @@ TEST(TextFileReaderUtilsTest, TokenizeTest) { range.start = nullptr; range.end = nullptr; } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t endIdx = std::min(charIdx + 10, ch.size()); range.start = &ch.front() + charIdx; range.end = &ch.front() + endIdx; @@ -109,7 +108,6 @@ TEST(TextFileReaderUtilsTest, TokenizeTest) { outFile << ch; outFile.close(); for (int numPasses = 1; numPasses <= 2; ++numPasses) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) FileReader fr(tmpname, 5); BufferedTokenizer fileTokenizer(tokenizer, &fr, numPasses); // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) diff --git a/caffe2/operators/utility_ops_test.cc b/caffe2/operators/utility_ops_test.cc index 7e09580b15253..4b1f3d355204b 100644 --- a/caffe2/operators/utility_ops_test.cc +++ b/caffe2/operators/utility_ops_test.cc @@ -34,7 +34,6 @@ TEST(UtilityOpTest, testReshapeWithScalar) { def.add_output("XNew"); def.add_output("OldShape"); def.add_arg()->CopyFrom(MakeArgument("shape", vector{1})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AddConstInput(vector(), 3.14, "X", &ws); // execute the op unique_ptr op(CreateOperator(def, &ws)); diff --git a/caffe2/operators/weighted_sample_op.cc b/caffe2/operators/weighted_sample_op.cc index c7aac11ef8112..661154f4c97af 100644 --- a/caffe2/operators/weighted_sample_op.cc +++ b/caffe2/operators/weighted_sample_op.cc @@ -46,7 +46,6 @@ bool WeightedSampleOp::RunOnDevice() { 1, 0.0f, cum_mass_[cum_mass_.size() - 1], &r, &context_); // Makes the element in cum_mass_ slightly bigger // to compensate inaccuracy introduced due to rounding, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) cum_mass_[cum_mass_.size() - 1] += 0.01f; auto lb = lower_bound(cum_mass_.begin(), cum_mass_.end(), r); CAFFE_ENFORCE(lb != cum_mass_.end(), "Cannot find ", r, " in cum_mass_."); diff --git a/caffe2/opt/backend_cutting.cc b/caffe2/opt/backend_cutting.cc index 1dbc940bee1f1..b1e0e31af8f80 100644 --- a/caffe2/opt/backend_cutting.cc +++ b/caffe2/opt/backend_cutting.cc @@ -339,7 +339,6 @@ void DumpGraph(NNGraph* g, const std::string& fname) { auto hash = std::hash{}(device_annotation->getDevice()); std::stringstream hex_stream; hex_stream << std::hex << hash; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) labelMap["color"] = "#" + hex_stream.str().substr(0, 6); labelMap["fontcolor"] = labelMap["color"]; } diff --git a/caffe2/opt/backend_cutting_test.cc b/caffe2/opt/backend_cutting_test.cc index 1ff6b133c08ae..7e233eb8bd05d 100644 --- a/caffe2/opt/backend_cutting_test.cc +++ b/caffe2/opt/backend_cutting_test.cc @@ -141,7 +141,6 @@ TEST(BackendCuttingTest, skipPath) { op->add_input("N4"); op->add_output("N5"); AddConv(&net, 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AddConv(&net, 5); op = net.add_op(); op->set_type("Concat"); diff --git a/caffe2/opt/bound_shape_inference_test.cc b/caffe2/opt/bound_shape_inference_test.cc index b23b92837082a..89660b278432b 100644 --- a/caffe2/opt/bound_shape_inference_test.cc +++ b/caffe2/opt/bound_shape_inference_test.cc @@ -65,9 +65,7 @@ TEST(BoundShapeInference, SparseLengthsSum) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1000, 16})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -76,7 +74,6 @@ TEST(BoundShapeInference, SparseLengthsSum) { out_shape, "Weights", {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1000, 16}); verifyShapeInfo( out_shape, @@ -94,7 +91,6 @@ TEST(BoundShapeInference, SparseLengthsSum) { out_shape, "Out", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 16}); } @@ -112,10 +108,8 @@ TEST(BoundShapeInference, SparseLengthsSumSparseLookup) { "Remapping", makeTensorInfo( {TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1000}, TensorProto_DataType_INT32)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); shape_map.emplace( "Indices", @@ -173,10 +167,8 @@ TEST(BoundShapeInference, SparseLengthsSumFused8BitRowwise) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1000, 58}, TensorProto_DataType_INT8)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -185,7 +177,6 @@ TEST(BoundShapeInference, SparseLengthsSumFused8BitRowwise) { out_shape, "Weights", {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1000, 58}, TensorProto_DataType_INT8); verifyShapeInfo( @@ -204,7 +195,6 @@ TEST(BoundShapeInference, SparseLengthsSumFused8BitRowwise) { out_shape, "Out", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 50}); } @@ -223,17 +213,14 @@ TEST(BoundShapeInference, SparseLengthsSum8BitRowwiseSparse) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1000, 58}, TensorProto_DataType_INT8)); shape_map.emplace( "Mapping", makeTensorInfo( {TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2000}, TensorProto_DataType_INT32)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -242,14 +229,12 @@ TEST(BoundShapeInference, SparseLengthsSum8BitRowwiseSparse) { out_shape, "Weights", {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1000, 58}, TensorProto_DataType_INT8); verifyShapeInfo( out_shape, "Mapping", {TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2000}, TensorProto_DataType_INT32); verifyShapeInfo( @@ -268,7 +253,6 @@ TEST(BoundShapeInference, SparseLengthsSum8BitRowwiseSparse) { out_shape, "Out", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 50}); } @@ -287,10 +271,8 @@ TEST(BoundShapeInference, SparseLengthsSumFused4BitRowwise) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1000, 54}, TensorProto_DataType_INT8)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -299,7 +281,6 @@ TEST(BoundShapeInference, SparseLengthsSumFused4BitRowwise) { out_shape, "Weights", {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1000, 54}, TensorProto_DataType_INT8); verifyShapeInfo( @@ -318,7 +299,6 @@ TEST(BoundShapeInference, SparseLengthsSumFused4BitRowwise) { out_shape, "Out", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 100}); } @@ -329,7 +309,6 @@ TEST(BoundShapeInference, LengthsRangeFill) { CreateOperatorDef("LengthsRangeFill", "", {"X"}, {"Y"}, {})); net.add_op()->CopyFrom(CreateOperatorDef("Copy", "", {"Y"}, {"Z"}, {})); ShapeInfoMap shape_map; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -361,7 +340,6 @@ TEST(BoundShapeInference, ConstantFill) { net.add_op()->CopyFrom( CreateOperatorDef("ConstantFill", "", {"X"}, {"Y"}, {})); ShapeInfoMap shape_map; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); BoundShapeInferencer eng(spec); shape_map.emplace( @@ -369,7 +347,6 @@ TEST(BoundShapeInference, ConstantFill) { makeTensorInfo( {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {20, 1024})); eng.InferBoundShapeAndType(net, shape_map, nullptr); const auto& out_shape = eng.shape_info(); @@ -377,7 +354,6 @@ TEST(BoundShapeInference, ConstantFill) { out_shape, "Y", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {20, 1024}, TensorProto_DataType_FLOAT); } @@ -386,9 +362,7 @@ TEST(BoundShapeInference, ConstantFill) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BoundShapeInference, DISABLED_ON_WINDOWS(Reshape)) { NetDef net; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector new_shape{-1, 8}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector new_shape2{2, 8}; net.add_op()->CopyFrom( CreateOperatorDef("FC", "", {"X0", "W0", "B0"}, {"X1"}, {})); @@ -412,12 +386,9 @@ TEST(BoundShapeInference, DISABLED_ON_WINDOWS(Reshape)) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 1024})); shape_map.emplace( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B0", makeTensorInfo({TensorBoundShape_DimType_CONSTANT}, {16})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -426,20 +397,17 @@ TEST(BoundShapeInference, DISABLED_ON_WINDOWS(Reshape)) { out_shape, "X0", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 1024}); verifyShapeInfo( out_shape, "X1", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 16}); verifyShapeInfo( out_shape, "Y1", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, // TODO - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size * 16 / 8, 8}); EXPECT_TRUE(out_shape.find("Y2") == out_shape.end()); } @@ -453,14 +421,12 @@ TEST(BoundShapeInference, ConcatMissingInput) { {"I0", "I1"}, {"Cout", "split_info"}, {MakeArgument("axis", 1), MakeArgument("add_axis", 1)})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); ShapeInfoMap shape_map; shape_map.emplace( "I0", makeTensorInfo( {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60})); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -469,7 +435,6 @@ TEST(BoundShapeInference, ConcatMissingInput) { out_shape, "I0", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60}); verifyShapeInfo( out_shape, @@ -477,7 +442,6 @@ TEST(BoundShapeInference, ConcatMissingInput) { {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 2, 60}); } @@ -493,7 +457,6 @@ TEST( {"I0"}, {"Cout", "split_info"}, {MakeArgument("Y_zero_point", 0), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_scale", 0.05)})); net.add_op()->CopyFrom(CreateOperatorDef( "Int8FC", @@ -501,9 +464,7 @@ TEST( {"Cout", "W0", "B0"}, {"Y"}, {MakeArgument("Y_zero_point", 0), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument("Y_scale", 0.05)})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); ShapeInfoMap shape_map; shape_map.emplace( @@ -511,7 +472,6 @@ TEST( makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 101}, TensorProto_DataType_INT8, true)); @@ -519,7 +479,6 @@ TEST( "B0", makeTensorInfo( {TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16}, TensorProto_DataType_INT32, true)); @@ -530,13 +489,11 @@ TEST( out_shape, "I0", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 101}); verifyShapeInfo( out_shape, "Cout", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 101}, TensorProto_DataType_UINT8, true); @@ -544,7 +501,6 @@ TEST( out_shape, "Y", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 16}, TensorProto_DataType_UINT8, true); @@ -561,24 +517,20 @@ TEST(BoundShapeInference, ConcatInferInputBackwards) { {MakeArgument("axis", 1)})); net.add_op()->CopyFrom( CreateOperatorDef("FCTransposed", "", {"Cout", "W0", "B0"}, {"Y"}, {})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); ShapeInfoMap shape_map; shape_map.emplace( "I0", makeTensorInfo( {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60})); shape_map.emplace( "W0", makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {101, 16})); shape_map.emplace( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B0", makeTensorInfo({TensorBoundShape_DimType_CONSTANT}, {16})); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -587,25 +539,21 @@ TEST(BoundShapeInference, ConcatInferInputBackwards) { out_shape, "I0", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60}); verifyShapeInfo( out_shape, "Cout", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 101}); verifyShapeInfo( out_shape, "Y", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 16}); verifyShapeInfo( out_shape, "I1", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 101 - 60}); } @@ -620,20 +568,17 @@ TEST(BoundShapeInference, ElementwiseInferInputBackwards) { {"I00", "I11"}, {"Outt"}, {MakeArgument("broadcast", 1)})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); ShapeInfoMap shape_map; shape_map.emplace( "Out", makeTensorInfo( {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60})); shape_map.emplace( "Outt", makeTensorInfo( {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 50})); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -642,25 +587,21 @@ TEST(BoundShapeInference, ElementwiseInferInputBackwards) { out_shape, "I0", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60}); verifyShapeInfo( out_shape, "I1", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60}); verifyShapeInfo( out_shape, "I00", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 50}); verifyShapeInfo( out_shape, "I11", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 50}); } @@ -676,32 +617,27 @@ TEST(BoundShapeInference, ElementwiseOp) { {"I00", "I11"}, {"Outt"}, {MakeArgument("broadcast", 1)})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); ShapeInfoMap shape_map; shape_map.emplace( "I0", makeTensorInfo( {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60})); shape_map.emplace( "I00", makeTensorInfo( {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 50})); shape_map.emplace( "I3", makeTensorInfo( {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 40})); shape_map.emplace( "I4", makeTensorInfo( {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 40})); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -710,31 +646,26 @@ TEST(BoundShapeInference, ElementwiseOp) { out_shape, "I1", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60}); verifyShapeInfo( out_shape, "Out", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60}); verifyShapeInfo( out_shape, "I11", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 50}); verifyShapeInfo( out_shape, "Outt", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 50}); verifyShapeInfo( out_shape, "Out3", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 40}); } @@ -746,16 +677,13 @@ TEST(BoundShapeInference, Bucketize) { "", {"In"}, {"Out"}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {MakeArgument>("boundaries", {1.0, 2.0})})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); ShapeInfoMap shape_map; shape_map.emplace( "In", makeTensorInfo( {TensorBoundShape_DimType_BATCH_OF_FEATURE_MAX, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60})); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -764,7 +692,6 @@ TEST(BoundShapeInference, Bucketize) { out_shape, "Out", {TensorBoundShape_DimType_BATCH_OF_FEATURE_MAX, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 60}, TensorProto_DataType_INT32); } @@ -780,7 +707,6 @@ TEST(BoundShapeInference, Split) { {"X"}, {"Y2", "Y3", "Y4"}, {MakeArgument("axis", 1), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MakeArgument>("split", {4, 30, 14})})); net.add_op()->CopyFrom(CreateOperatorDef( "Split", @@ -788,14 +714,12 @@ TEST(BoundShapeInference, Split) { {"X1"}, {"Y5", "Y6"}, {MakeArgument("axis", 1), MakeArgument("add_axis", 1)})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); ShapeInfoMap shape_map; shape_map.emplace( "X", makeTensorInfo( {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 48})); shape_map.emplace( "X1", @@ -803,7 +727,6 @@ TEST(BoundShapeInference, Split) { {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 2, 48})); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -812,7 +735,6 @@ TEST(BoundShapeInference, Split) { out_shape, "X", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 48}); verifyShapeInfo( out_shape, @@ -820,19 +742,16 @@ TEST(BoundShapeInference, Split) { {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 2, 48}); verifyShapeInfo( out_shape, "Y0", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 48 / 2}); verifyShapeInfo( out_shape, "Y1", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 48 / 2}); verifyShapeInfo( out_shape, @@ -843,25 +762,21 @@ TEST(BoundShapeInference, Split) { out_shape, "Y3", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 30}); verifyShapeInfo( out_shape, "Y4", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 14}); verifyShapeInfo( out_shape, "Y5", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 48}); verifyShapeInfo( out_shape, "Y6", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 48}); } @@ -881,20 +796,16 @@ TEST(BoundShapeInference, DISABLED_ON_WINDOWS(FC)) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 1024})); shape_map.emplace( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B0", makeTensorInfo({TensorBoundShape_DimType_CONSTANT}, {16})); shape_map.emplace( "W1", makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 1024})); shape_map.emplace( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B1", makeTensorInfo({TensorBoundShape_DimType_CONSTANT}, {1024})); shape_map.emplace( @@ -902,15 +813,12 @@ TEST(BoundShapeInference, DISABLED_ON_WINDOWS(FC)) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 1024})); shape_map.emplace( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B2", makeTensorInfo({TensorBoundShape_DimType_CONSTANT}, {16})); shape_map.emplace( "quant_param", makeTensorInfo({TensorBoundShape_DimType_CONSTANT}, {1})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -919,31 +827,26 @@ TEST(BoundShapeInference, DISABLED_ON_WINDOWS(FC)) { out_shape, "X0", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 1024}); verifyShapeInfo( out_shape, "Out0", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 16}); verifyShapeInfo( out_shape, "X1", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 16}); verifyShapeInfo( out_shape, "Out1", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 1024}); verifyShapeInfo( out_shape, "X2", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 1024}, TensorProto_DataType_UINT8, true); @@ -951,7 +854,6 @@ TEST(BoundShapeInference, DISABLED_ON_WINDOWS(FC)) { out_shape, "Out2", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 16}, TensorProto_DataType_UINT8, true); @@ -969,12 +871,9 @@ TEST(BoundShapeInference, FC3D) { {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 1, 1024})); shape_map.emplace( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B0", makeTensorInfo({TensorBoundShape_DimType_CONSTANT}, {16})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -983,13 +882,11 @@ TEST(BoundShapeInference, FC3D) { out_shape, "X0", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 1024}); verifyShapeInfo( out_shape, "Out0", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {spec.max_batch_size, 16}); } @@ -1004,9 +901,7 @@ TEST(BoundShapeInference, Quantization) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 64})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -1015,7 +910,6 @@ TEST(BoundShapeInference, Quantization) { out_shape, "Out_w", {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 72}, TensorProto_DataType_UINT8); } @@ -1028,7 +922,6 @@ TEST(BoundShapeInference, Tile) { "", {"blob"}, {"blob_tile"}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {MakeArgument("tiles", 32), MakeArgument("axis", 0), MakeArgument("dynamic", 1)})); @@ -1038,9 +931,7 @@ TEST(BoundShapeInference, Tile) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1, 16})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(32, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -1049,10 +940,8 @@ TEST(BoundShapeInference, Tile) { out_shape, "blob_tile", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {32, 16}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec2(8, 1000); BoundShapeInferencer eng2(spec2); eng2.InferBoundShapeAndType(net, shape_map, nullptr); @@ -1061,7 +950,6 @@ TEST(BoundShapeInference, Tile) { out_shape2, "blob_tile", {TensorBoundShape_DimType_BATCH, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {8, 16}); } @@ -1094,18 +982,15 @@ TEST(BoundShapeInference, Combo0) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1000, 16})); shape_map.emplace( "Weights1", makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {20000, 16})); shape_map.emplace( "Indices", makeTensorInfo({TensorBoundShape_DimType_CONSTANT}, {2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(20, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -1133,9 +1018,7 @@ TEST(BoundShapeInference, Softmax) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1, 16})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(32, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -1144,7 +1027,6 @@ TEST(BoundShapeInference, Softmax) { out_shape, "output", {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1, 16}); } @@ -1163,9 +1045,7 @@ TEST(BoundShapeInference, LpNorm) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1, 16})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(32, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -1193,9 +1073,7 @@ TEST(BoundShapeInference, Transpose) { makeTensorInfo( {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1, 16})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BoundShapeSpec spec(32, 1000); BoundShapeInferencer eng(spec); eng.InferBoundShapeAndType(net, shape_map, nullptr); @@ -1204,6 +1082,5 @@ TEST(BoundShapeInference, Transpose) { out_shape, "output", {TensorBoundShape_DimType_CONSTANT, TensorBoundShape_DimType_CONSTANT}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 1}); } diff --git a/caffe2/opt/bound_shape_inferencer.cc b/caffe2/opt/bound_shape_inferencer.cc index 4b1cf9b91a01d..4c13dd331e7fd 100644 --- a/caffe2/opt/bound_shape_inferencer.cc +++ b/caffe2/opt/bound_shape_inferencer.cc @@ -475,7 +475,6 @@ void BoundShapeInferencer::InferSparseLengthsSum(const OperatorDef& op) { op.type() == "SparseLengthsWeightedSumFused8BitRowwise" || op.type() == "SparseLengthsSum8BitRowwiseSparse" || op.type() == "SparseLengthsWeightedSum8BitRowwiseSparse") { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output_dim1 -= 8; } // If the op is SparseLengthsSumFused4BitRowwise, we need to extract 2 bytes diff --git a/caffe2/opt/converter_nomigraph_test.cc b/caffe2/opt/converter_nomigraph_test.cc index 6b26525186e47..d8e24783abf2d 100644 --- a/caffe2/opt/converter_nomigraph_test.cc +++ b/caffe2/opt/converter_nomigraph_test.cc @@ -7,7 +7,6 @@ TEST(Converter, Basic) { using namespace caffe2::testing; caffe2::NetDef net; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 10; ++i) { // NOLINTNEXTLINE(clang-analyzer-security.insecureAPI.rand) if (rand() % 2) { diff --git a/caffe2/opt/device_test.cc b/caffe2/opt/device_test.cc index d8fef51c0be30..c060e276da993 100644 --- a/caffe2/opt/device_test.cc +++ b/caffe2/opt/device_test.cc @@ -16,7 +16,6 @@ using namespace nom::repr; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DeviceTest, InsertCopies) { caffe2::NetDef net; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 9; ++i) { if (i % 3 == 0) { caffe2::OperatorDef* def = net.add_op(); diff --git a/caffe2/opt/distributed_test.cc b/caffe2/opt/distributed_test.cc index fbc32430af86a..e8260cd5a7dc5 100644 --- a/caffe2/opt/distributed_test.cc +++ b/caffe2/opt/distributed_test.cc @@ -81,7 +81,6 @@ TEST(Distributed, InsertDeviceOptions) { caffe2::injectDataEdgeIndicators(&net); auto nn = caffe2::convertToNNModule(net); caffe2::DeviceOption d; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d.set_device_type(1337); caffe2::addBlobDeviceOptions({{"X", d}, {"Y", d}, {"W", d}}, &nn); @@ -102,7 +101,6 @@ TEST(Distributed, InsertDeviceOptionsFailureCase) { caffe2::injectDataEdgeIndicators(&net); auto nn = caffe2::convertToNNModule(net); caffe2::DeviceOption d; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d.set_device_type(1337); // We can only use correct blob names, expect failure otherwise // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) @@ -155,7 +153,6 @@ TEST(Converter, InjectDataEdgeIndicators) { TEST(Converter, OverloadedConvertToNNModule) { auto net = fakeNet(); caffe2::DeviceOption d; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d.set_device_type(1337); auto nn = caffe2::convertToNNModule(net, {{"X", d}, {"Y", d}, {"W", d}}); @@ -174,7 +171,6 @@ TEST(Converter, OverloadedConvertToNNModule) { TEST(Converter, OverloadedConvertToNNModuleFailure) { auto net = fakeNet(); caffe2::DeviceOption d; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d.set_device_type(1337); // We can only use correct blob names, expect failure otherwise // NOLINTNEXTLINE(hicpp-avoid-goto,cppcoreguidelines-avoid-goto) diff --git a/caffe2/opt/mobile_test.cc b/caffe2/opt/mobile_test.cc index cc8b97b58d1e9..1606426d3c54b 100644 --- a/caffe2/opt/mobile_test.cc +++ b/caffe2/opt/mobile_test.cc @@ -14,7 +14,6 @@ // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MobileTest, Convolution) { caffe2::NetDef net; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 10; ++i) { if (i % 3) { caffe2::OperatorDef* def = net.add_op(); diff --git a/caffe2/opt/onnxifi_op.cc b/caffe2/opt/onnxifi_op.cc index fe1645a8e116d..fc70adff986bd 100644 --- a/caffe2/opt/onnxifi_op.cc +++ b/caffe2/opt/onnxifi_op.cc @@ -707,7 +707,6 @@ bool OnnxifiOp::RunOnDevice() { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) onnxStatus eventStatus; std::string message; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t messageLength = 512; message.resize(messageLength); diff --git a/caffe2/opt/onnxifi_transformer.cc b/caffe2/opt/onnxifi_transformer.cc index 282e0bd4fc446..e0aa8c2531805 100644 --- a/caffe2/opt/onnxifi_transformer.cc +++ b/caffe2/opt/onnxifi_transformer.cc @@ -186,7 +186,6 @@ void fillModelInfo(::ONNX_NAMESPACE::ModelProto* model) { model->set_producer_name("caffe2"); auto* opset_id = model->add_opset_import(); opset_id->set_domain(""); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) opset_id->set_version(7); } diff --git a/caffe2/opt/optimize_ideep.cc b/caffe2/opt/optimize_ideep.cc index c2f12d91f754c..e12d128b26d37 100644 --- a/caffe2/opt/optimize_ideep.cc +++ b/caffe2/opt/optimize_ideep.cc @@ -242,7 +242,6 @@ bool fuseConvBNAndAffCh(repr::NNModule* nn, caffe2::Workspace* ws) { } auto bnOrAffChInputs = repr::nn::getInputs(bnOrAffChNode); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int numInputs = isBN ? 5 : 3; // NOLINTNEXTLINE(clang-diagnostic-sign-compare) if (bnOrAffChInputs.size() < numInputs) { diff --git a/caffe2/opt/split_slss_test.cc b/caffe2/opt/split_slss_test.cc index 41d5a1b5dcda4..24abd87042b20 100644 --- a/caffe2/opt/split_slss_test.cc +++ b/caffe2/opt/split_slss_test.cc @@ -88,7 +88,6 @@ void check( TEST(splitSparseLengthsSumSparse, sweep) { std::vector has_weights = {true, false}; std::vector fallbacks = {true, false}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector bits = {2, 4, 8}; for (const auto has_weight : has_weights) { for (const auto bit : bits) { diff --git a/caffe2/perfkernels/embedding_lookup_avx2.cc b/caffe2/perfkernels/embedding_lookup_avx2.cc index 5f109e449881d..aee5c30963b80 100644 --- a/caffe2/perfkernels/embedding_lookup_avx2.cc +++ b/caffe2/perfkernels/embedding_lookup_avx2.cc @@ -26,7 +26,6 @@ static bool EmbeddingLookup_int32_t_float_float__avx2_fma( // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) const int fused_block_size = block_size + 0; int dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -73,128 +72,82 @@ static bool EmbeddingLookup_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop64 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (64)), vop64); _mm_prefetch( reinterpret_cast(&ip_next_T0[64]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop72 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (72)), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop80 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (80)), vop80); _mm_prefetch( reinterpret_cast(&ip_next_T0[80]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop88 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (88)), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop96 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (96)), vop96); _mm_prefetch( reinterpret_cast(&ip_next_T0[96]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop104 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (104)), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop112 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (112)), vop112); _mm_prefetch( reinterpret_cast(&ip_next_T0[112]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop120 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (120)), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -233,68 +186,46 @@ static bool EmbeddingLookup_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -329,38 +260,28 @@ static bool EmbeddingLookup_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -393,19 +314,16 @@ static bool EmbeddingLookup_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -414,7 +332,6 @@ static bool EmbeddingLookup_int32_t_float_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -445,7 +362,6 @@ static bool EmbeddingLookup_int32_t_float_float__avx2_fma( } const float* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -463,7 +379,6 @@ static bool EmbeddingLookup_int32_t_float_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -543,7 +458,6 @@ static bool EmbeddingLookup_int64_t_float_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 0; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -590,128 +504,82 @@ static bool EmbeddingLookup_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop64 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (64)), vop64); _mm_prefetch( reinterpret_cast(&ip_next_T0[64]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop72 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (72)), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop80 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (80)), vop80); _mm_prefetch( reinterpret_cast(&ip_next_T0[80]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop88 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (88)), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop96 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (96)), vop96); _mm_prefetch( reinterpret_cast(&ip_next_T0[96]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop104 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (104)), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop112 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (112)), vop112); _mm_prefetch( reinterpret_cast(&ip_next_T0[112]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop120 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (120)), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -750,68 +618,46 @@ static bool EmbeddingLookup_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -846,38 +692,28 @@ static bool EmbeddingLookup_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -910,19 +746,16 @@ static bool EmbeddingLookup_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -931,7 +764,6 @@ static bool EmbeddingLookup_int64_t_float_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -962,7 +794,6 @@ static bool EmbeddingLookup_int64_t_float_float__avx2_fma( } const float* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -980,7 +811,6 @@ static bool EmbeddingLookup_int64_t_float_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -1061,7 +891,6 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) const int fused_block_size = block_size + 0; int dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1115,28 +944,24 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1144,28 +969,24 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (64)))), vop64); _mm_prefetch( @@ -1173,28 +994,24 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (72)))), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (80)))), vop80); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (88)))), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (96)))), vop96); _mm_prefetch( @@ -1202,94 +1019,60 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (104)))), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (112)))), vop112); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (120)))), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1335,28 +1118,24 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1364,62 +1143,44 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1461,46 +1222,36 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1540,20 +1291,17 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -1564,7 +1312,6 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -1595,7 +1342,6 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( } const at::Half* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -1619,7 +1365,6 @@ static bool EmbeddingLookup_int32_t_half_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -1699,7 +1444,6 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 0; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1753,28 +1497,24 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1782,28 +1522,24 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (64)))), vop64); _mm_prefetch( @@ -1811,28 +1547,24 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (72)))), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (80)))), vop80); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (88)))), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (96)))), vop96); _mm_prefetch( @@ -1840,94 +1572,60 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (104)))), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (112)))), vop112); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (120)))), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1973,28 +1671,24 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -2002,62 +1696,44 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2099,46 +1775,36 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2178,20 +1844,17 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -2202,7 +1865,6 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -2233,7 +1895,6 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( } const at::Half* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -2257,7 +1918,6 @@ static bool EmbeddingLookup_int64_t_half_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -2338,7 +1998,6 @@ static bool EmbeddingLookup_int32_t_uint8_t_float__avx2_fma( // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) const int fused_block_size = block_size + 0; int dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2397,56 +2056,48 @@ static bool EmbeddingLookup_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (64))))), _mm256_add_ps(vop64, vbio)); _mm_prefetch( @@ -2454,122 +2105,84 @@ static bool EmbeddingLookup_int32_t_uint8_t_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (72))))), _mm256_add_ps(vop72, vbio)); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (80))))), _mm256_add_ps(vop80, vbio)); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (88))))), _mm256_add_ps(vop88, vbio)); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (96))))), _mm256_add_ps(vop96, vbio)); // skip unnecessary prefetch of (&ip_next_T0[96]) vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (104))))), _mm256_add_ps(vop104, vbio)); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (112))))), _mm256_add_ps(vop112, vbio)); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (120))))), _mm256_add_ps(vop120, vbio)); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2620,90 +2233,68 @@ static bool EmbeddingLookup_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2750,46 +2341,36 @@ static bool EmbeddingLookup_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2834,20 +2415,17 @@ static bool EmbeddingLookup_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -2856,7 +2434,6 @@ static bool EmbeddingLookup_int32_t_uint8_t_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -2892,7 +2469,6 @@ static bool EmbeddingLookup_int32_t_uint8_t_float__avx2_fma( } const uint8_t* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -2913,7 +2489,6 @@ static bool EmbeddingLookup_int32_t_uint8_t_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -2993,7 +2568,6 @@ static bool EmbeddingLookup_int64_t_uint8_t_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 0; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3052,56 +2626,48 @@ static bool EmbeddingLookup_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (64))))), _mm256_add_ps(vop64, vbio)); _mm_prefetch( @@ -3109,122 +2675,84 @@ static bool EmbeddingLookup_int64_t_uint8_t_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (72))))), _mm256_add_ps(vop72, vbio)); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (80))))), _mm256_add_ps(vop80, vbio)); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (88))))), _mm256_add_ps(vop88, vbio)); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (96))))), _mm256_add_ps(vop96, vbio)); // skip unnecessary prefetch of (&ip_next_T0[96]) vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (104))))), _mm256_add_ps(vop104, vbio)); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (112))))), _mm256_add_ps(vop112, vbio)); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (120))))), _mm256_add_ps(vop120, vbio)); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3275,90 +2803,68 @@ static bool EmbeddingLookup_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3405,46 +2911,36 @@ static bool EmbeddingLookup_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3489,20 +2985,17 @@ static bool EmbeddingLookup_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -3511,7 +3004,6 @@ static bool EmbeddingLookup_int64_t_uint8_t_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -3547,7 +3039,6 @@ static bool EmbeddingLookup_int64_t_uint8_t_float__avx2_fma( } const uint8_t* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -3568,7 +3059,6 @@ static bool EmbeddingLookup_int64_t_uint8_t_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); diff --git a/caffe2/perfkernels/embedding_lookup_fused_8bit_rowwise_avx2.cc b/caffe2/perfkernels/embedding_lookup_fused_8bit_rowwise_avx2.cc index 55728cbf3928c..67c5aed3f79d3 100644 --- a/caffe2/perfkernels/embedding_lookup_fused_8bit_rowwise_avx2.cc +++ b/caffe2/perfkernels/embedding_lookup_fused_8bit_rowwise_avx2.cc @@ -25,7 +25,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_float_float__avx2_fma( // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) const int fused_block_size = block_size + 2; int dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -72,128 +71,82 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop64 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (64)), vop64); _mm_prefetch( reinterpret_cast(&ip_next_T0[64]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop72 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (72)), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop80 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (80)), vop80); _mm_prefetch( reinterpret_cast(&ip_next_T0[80]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop88 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (88)), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop96 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (96)), vop96); _mm_prefetch( reinterpret_cast(&ip_next_T0[96]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop104 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (104)), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop112 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (112)), vop112); _mm_prefetch( reinterpret_cast(&ip_next_T0[112]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop120 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (120)), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -232,68 +185,46 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -328,38 +259,28 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -392,19 +313,16 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -413,7 +331,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_float_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -444,7 +361,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_float_float__avx2_fma( } const float* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -462,7 +378,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_float_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -537,7 +452,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_float_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 2; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -584,128 +498,82 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop64 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (64)), vop64); _mm_prefetch( reinterpret_cast(&ip_next_T0[64]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop72 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (72)), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop80 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (80)), vop80); _mm_prefetch( reinterpret_cast(&ip_next_T0[80]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop88 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (88)), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop96 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (96)), vop96); _mm_prefetch( reinterpret_cast(&ip_next_T0[96]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop104 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (104)), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop112 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (112)), vop112); _mm_prefetch( reinterpret_cast(&ip_next_T0[112]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop120 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (120)), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -744,68 +612,46 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -840,38 +686,28 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -904,19 +740,16 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -925,7 +758,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_float_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -956,7 +788,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_float_float__avx2_fma( } const float* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -974,7 +805,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_float_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -1050,7 +880,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) const int fused_block_size = block_size + 4; int dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1104,28 +933,24 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1133,28 +958,24 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (64)))), vop64); _mm_prefetch( @@ -1162,28 +983,24 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (72)))), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (80)))), vop80); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (88)))), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (96)))), vop96); _mm_prefetch( @@ -1191,94 +1008,60 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (104)))), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (112)))), vop112); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (120)))), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1324,28 +1107,24 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1353,62 +1132,44 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1450,46 +1211,36 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1529,20 +1280,17 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -1553,7 +1301,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -1584,7 +1331,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( } const at::Half* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -1608,7 +1354,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_half_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -1683,7 +1428,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 4; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1737,28 +1481,24 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1766,28 +1506,24 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (64)))), vop64); _mm_prefetch( @@ -1795,28 +1531,24 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (72)))), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (80)))), vop80); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (88)))), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (96)))), vop96); _mm_prefetch( @@ -1824,94 +1556,60 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (104)))), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (112)))), vop112); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (120)))), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1957,28 +1655,24 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1986,62 +1680,44 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2083,46 +1759,36 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2162,20 +1828,17 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -2186,7 +1849,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -2217,7 +1879,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( } const at::Half* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -2241,7 +1902,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_half_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -2317,7 +1977,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_uint8_t_float__avx2_fma( // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) const int fused_block_size = block_size + 8; int dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2378,56 +2037,48 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (64))))), _mm256_add_ps(vop64, vbio)); _mm_prefetch( @@ -2435,122 +2086,84 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_uint8_t_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (72))))), _mm256_add_ps(vop72, vbio)); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (80))))), _mm256_add_ps(vop80, vbio)); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (88))))), _mm256_add_ps(vop88, vbio)); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (96))))), _mm256_add_ps(vop96, vbio)); // skip unnecessary prefetch of (&ip_next_T0[96]) vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (104))))), _mm256_add_ps(vop104, vbio)); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (112))))), _mm256_add_ps(vop112, vbio)); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (120))))), _mm256_add_ps(vop120, vbio)); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2603,90 +2216,68 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2735,46 +2326,36 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2821,20 +2402,17 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -2843,7 +2421,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_uint8_t_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -2881,7 +2458,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_uint8_t_float__avx2_fma( } const uint8_t* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -2902,7 +2478,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int32_t_uint8_t_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -2977,7 +2552,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_uint8_t_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 8; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3038,56 +2612,48 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (64))))), _mm256_add_ps(vop64, vbio)); _mm_prefetch( @@ -3095,122 +2661,84 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_uint8_t_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (72))))), _mm256_add_ps(vop72, vbio)); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (80))))), _mm256_add_ps(vop80, vbio)); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (88))))), _mm256_add_ps(vop88, vbio)); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (96))))), _mm256_add_ps(vop96, vbio)); // skip unnecessary prefetch of (&ip_next_T0[96]) vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (104))))), _mm256_add_ps(vop104, vbio)); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (112))))), _mm256_add_ps(vop112, vbio)); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (120))))), _mm256_add_ps(vop120, vbio)); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3263,90 +2791,68 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3395,46 +2901,36 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3481,20 +2977,17 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || lengths[rangeIndex] == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -3503,7 +2996,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_uint8_t_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -3541,7 +3033,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_uint8_t_float__avx2_fma( } const uint8_t* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -3562,7 +3053,6 @@ static bool Fused8BitRowwiseEmbeddingLookup_int64_t_uint8_t_float__avx2_fma( float len_inv = 1.0f / lengths[rangeIndex]; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); diff --git a/caffe2/perfkernels/embedding_lookup_fused_8bit_rowwise_idx_avx2.cc b/caffe2/perfkernels/embedding_lookup_fused_8bit_rowwise_idx_avx2.cc index d8cc74017361a..85ed73d7cc7f2 100644 --- a/caffe2/perfkernels/embedding_lookup_fused_8bit_rowwise_idx_avx2.cc +++ b/caffe2/perfkernels/embedding_lookup_fused_8bit_rowwise_idx_avx2.cc @@ -25,7 +25,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_float_float__avx2_fma( // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) const int fused_block_size = block_size + 2; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -76,127 +75,81 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop64 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (64)), vop64); _mm_prefetch( reinterpret_cast(&ip_next_T0[64]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop72 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (72)), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop80 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (80)), vop80); _mm_prefetch( reinterpret_cast(&ip_next_T0[80]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop88 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (88)), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop96 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (96)), vop96); _mm_prefetch( reinterpret_cast(&ip_next_T0[96]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop104 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (104)), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop112 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (112)), vop112); _mm_prefetch( reinterpret_cast(&ip_next_T0[112]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop120 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (120)), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -239,67 +192,45 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -338,37 +269,27 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -405,18 +326,15 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -425,7 +343,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_float_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -460,7 +377,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_float_float__avx2_fma( } const float* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -477,7 +393,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_float_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -553,7 +468,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_float_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 2; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -602,127 +516,81 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop64 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (64)), vop64); _mm_prefetch( reinterpret_cast(&ip_next_T0[64]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop72 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (72)), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop80 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (80)), vop80); _mm_prefetch( reinterpret_cast(&ip_next_T0[80]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop88 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (88)), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop96 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (96)), vop96); _mm_prefetch( reinterpret_cast(&ip_next_T0[96]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop104 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (104)), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop112 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (112)), vop112); _mm_prefetch( reinterpret_cast(&ip_next_T0[112]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop120 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (120)), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -763,67 +631,45 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -860,37 +706,27 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -925,18 +761,15 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -945,7 +778,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_float_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -978,7 +810,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_float_float__avx2_fma( } const float* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -995,7 +826,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_float_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -1072,7 +902,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) const int fused_block_size = block_size + 4; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1130,28 +959,24 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1159,28 +984,24 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (64)))), vop64); _mm_prefetch( @@ -1188,28 +1009,24 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (72)))), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (80)))), vop80); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (88)))), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (96)))), vop96); _mm_prefetch( @@ -1217,93 +1034,59 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (104)))), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (112)))), vop112); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (120)))), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1353,28 +1136,24 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1382,61 +1161,43 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1482,45 +1243,35 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1564,19 +1315,16 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -1587,7 +1335,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -1622,7 +1369,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( } const at::Half* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -1645,7 +1391,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_half_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -1720,7 +1465,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 4; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1776,28 +1520,24 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1805,28 +1545,24 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (64)))), vop64); _mm_prefetch( @@ -1834,28 +1570,24 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (72)))), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (80)))), vop80); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (88)))), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (96)))), vop96); _mm_prefetch( @@ -1863,93 +1595,59 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (104)))), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (112)))), vop112); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (120)))), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1997,28 +1695,24 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -2026,61 +1720,43 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2124,45 +1800,35 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2204,19 +1870,16 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -2227,7 +1890,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -2260,7 +1922,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( } const at::Half* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -2283,7 +1944,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_half_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -2359,7 +2019,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) const int fused_block_size = block_size + 8; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2424,56 +2083,48 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (64))))), _mm256_add_ps(vop64, vbio)); _mm_prefetch( @@ -2481,121 +2132,83 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (72))))), _mm256_add_ps(vop72, vbio)); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (80))))), _mm256_add_ps(vop80, vbio)); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (88))))), _mm256_add_ps(vop88, vbio)); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (96))))), _mm256_add_ps(vop96, vbio)); // skip unnecessary prefetch of (&ip_next_T0[96]) vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (104))))), _mm256_add_ps(vop104, vbio)); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (112))))), _mm256_add_ps(vop112, vbio)); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (120))))), _mm256_add_ps(vop120, vbio)); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2652,89 +2265,67 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2787,45 +2378,35 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2876,19 +2457,16 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -2897,7 +2475,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -2939,7 +2516,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( } const uint8_t* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -2959,7 +2535,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -3036,7 +2611,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 8; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3099,56 +2673,48 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (64))))), _mm256_add_ps(vop64, vbio)); _mm_prefetch( @@ -3156,121 +2722,83 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (72))))), _mm256_add_ps(vop72, vbio)); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (80))))), _mm256_add_ps(vop80, vbio)); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (88))))), _mm256_add_ps(vop88, vbio)); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (96))))), _mm256_add_ps(vop96, vbio)); // skip unnecessary prefetch of (&ip_next_T0[96]) vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (104))))), _mm256_add_ps(vop104, vbio)); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (112))))), _mm256_add_ps(vop112, vbio)); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (120))))), _mm256_add_ps(vop120, vbio)); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3325,89 +2853,67 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3458,45 +2964,35 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3545,19 +3041,16 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -3566,7 +3059,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -3606,7 +3098,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( } const uint8_t* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -3626,7 +3117,6 @@ static bool Fused8BitRowwiseEmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); diff --git a/caffe2/perfkernels/embedding_lookup_idx_avx2.cc b/caffe2/perfkernels/embedding_lookup_idx_avx2.cc index 2a86f78be7bb2..674af836ba10b 100644 --- a/caffe2/perfkernels/embedding_lookup_idx_avx2.cc +++ b/caffe2/perfkernels/embedding_lookup_idx_avx2.cc @@ -26,7 +26,6 @@ static bool EmbeddingLookupIdx_int32_t_float_float__avx2_fma( // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) const int fused_block_size = block_size + 0; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -77,127 +76,81 @@ static bool EmbeddingLookupIdx_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop64 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (64)), vop64); _mm_prefetch( reinterpret_cast(&ip_next_T0[64]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop72 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (72)), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop80 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (80)), vop80); _mm_prefetch( reinterpret_cast(&ip_next_T0[80]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop88 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (88)), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop96 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (96)), vop96); _mm_prefetch( reinterpret_cast(&ip_next_T0[96]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop104 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (104)), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop112 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (112)), vop112); _mm_prefetch( reinterpret_cast(&ip_next_T0[112]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop120 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (120)), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -240,67 +193,45 @@ static bool EmbeddingLookupIdx_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -339,37 +270,27 @@ static bool EmbeddingLookupIdx_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -406,18 +327,15 @@ static bool EmbeddingLookupIdx_int32_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -426,7 +344,6 @@ static bool EmbeddingLookupIdx_int32_t_float_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -461,7 +378,6 @@ static bool EmbeddingLookupIdx_int32_t_float_float__avx2_fma( } const float* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -478,7 +394,6 @@ static bool EmbeddingLookupIdx_int32_t_float_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -558,7 +473,6 @@ static bool EmbeddingLookupIdx_int64_t_float_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 0; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -607,127 +521,81 @@ static bool EmbeddingLookupIdx_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop64 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (64)), vop64); _mm_prefetch( reinterpret_cast(&ip_next_T0[64]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop72 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (72)), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop80 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (80)), vop80); _mm_prefetch( reinterpret_cast(&ip_next_T0[80]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop88 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (88)), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop96 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (96)), vop96); _mm_prefetch( reinterpret_cast(&ip_next_T0[96]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop104 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (104)), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop112 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (112)), vop112); _mm_prefetch( reinterpret_cast(&ip_next_T0[112]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop120 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (120)), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -768,67 +636,45 @@ static bool EmbeddingLookupIdx_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop32 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (32)), vop32); _mm_prefetch( reinterpret_cast(&ip_next_T0[32]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop40 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (40)), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop48 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (48)), vop48); _mm_prefetch( reinterpret_cast(&ip_next_T0[48]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop56 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (56)), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -865,37 +711,27 @@ static bool EmbeddingLookupIdx_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop16 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (16)), vop16); _mm_prefetch( reinterpret_cast(&ip_next_T0[16]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop24 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (24)), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -930,18 +766,15 @@ static bool EmbeddingLookupIdx_int64_t_float_float__avx2_fma( vop0 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (0)), vop0); _mm_prefetch( reinterpret_cast(&ip_next_T0[0]), _MM_HINT_T0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) vop8 = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (8)), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -950,7 +783,6 @@ static bool EmbeddingLookupIdx_int64_t_float_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -983,7 +815,6 @@ static bool EmbeddingLookupIdx_int64_t_float_float__avx2_fma( } const float* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -1000,7 +831,6 @@ static bool EmbeddingLookupIdx_int64_t_float_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -1081,7 +911,6 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) const int fused_block_size = block_size + 0; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1139,28 +968,24 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1168,28 +993,24 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (64)))), vop64); _mm_prefetch( @@ -1197,28 +1018,24 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (72)))), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (80)))), vop80); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (88)))), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (96)))), vop96); _mm_prefetch( @@ -1226,93 +1043,59 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (104)))), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (112)))), vop112); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (120)))), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1362,28 +1145,24 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1391,61 +1170,43 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1491,45 +1252,35 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1573,19 +1324,16 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -1596,7 +1344,6 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -1631,7 +1378,6 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( } const at::Half* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -1654,7 +1400,6 @@ static bool EmbeddingLookupIdx_int32_t_half_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -1734,7 +1479,6 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 0; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -1790,28 +1534,24 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -1819,28 +1559,24 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (64)))), vop64); _mm_prefetch( @@ -1848,28 +1584,24 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (72)))), vop72); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (80)))), vop80); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (88)))), vop88); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (96)))), vop96); _mm_prefetch( @@ -1877,93 +1609,59 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (104)))), vop104); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (112)))), vop112); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (120)))), vop120); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2011,28 +1709,24 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (32)))), vop32); _mm_prefetch( @@ -2040,61 +1734,43 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (40)))), vop40); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (48)))), vop48); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (56)))), vop56); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2138,45 +1814,35 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (16)))), vop16); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (24)))), vop24); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2218,19 +1884,16 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtph_ps( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadu_si128(reinterpret_cast(ip + (8)))), vop8); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -2241,7 +1904,6 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -2274,7 +1936,6 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( } const at::Half* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -2297,7 +1958,6 @@ static bool EmbeddingLookupIdx_int64_t_half_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -2378,7 +2038,6 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) const int fused_block_size = block_size + 0; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2441,56 +2100,48 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (64))))), _mm256_add_ps(vop64, vbio)); _mm_prefetch( @@ -2498,121 +2149,83 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (72))))), _mm256_add_ps(vop72, vbio)); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (80))))), _mm256_add_ps(vop80, vbio)); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (88))))), _mm256_add_ps(vop88, vbio)); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (96))))), _mm256_add_ps(vop96, vbio)); // skip unnecessary prefetch of (&ip_next_T0[96]) vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (104))))), _mm256_add_ps(vop104, vbio)); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (112))))), _mm256_add_ps(vop112, vbio)); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (120))))), _mm256_add_ps(vop120, vbio)); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2667,89 +2280,67 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2800,45 +2391,35 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -2887,19 +2468,16 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -2908,7 +2486,6 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( for (int rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -2948,7 +2525,6 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( } const uint8_t* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -2968,7 +2544,6 @@ static bool EmbeddingLookupIdx_int32_t_uint8_t_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); @@ -3048,7 +2623,6 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( const int64_t prefdist_T0 = 16; const int64_t fused_block_size = block_size + 0; int64_t dataInd = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (block_size == 128) { // unrolling 16 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3109,56 +2683,48 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) vop64 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (64))))), _mm256_add_ps(vop64, vbio)); _mm_prefetch( @@ -3166,121 +2732,83 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( vop72 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (72))))), _mm256_add_ps(vop72, vbio)); // skip unnecessary prefetch of (&ip_next_T0[72]) vop80 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (80))))), _mm256_add_ps(vop80, vbio)); // skip unnecessary prefetch of (&ip_next_T0[80]) vop88 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (88))))), _mm256_add_ps(vop88, vbio)); // skip unnecessary prefetch of (&ip_next_T0[88]) vop96 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (96))))), _mm256_add_ps(vop96, vbio)); // skip unnecessary prefetch of (&ip_next_T0[96]) vop104 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (104))))), _mm256_add_ps(vop104, vbio)); // skip unnecessary prefetch of (&ip_next_T0[104]) vop112 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (112))))), _mm256_add_ps(vop112, vbio)); // skip unnecessary prefetch of (&ip_next_T0[112]) vop120 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (120))))), _mm256_add_ps(vop120, vbio)); // skip unnecessary prefetch of (&ip_next_T0[120]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], vop64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], vop72); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], vop80); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], vop88); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], vop96); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], vop104); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], vop112); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], vop120); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[64], _mm256_mul_ps(vop64, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[72], _mm256_mul_ps(vop72, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[80], _mm256_mul_ps(vop80, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[88], _mm256_mul_ps(vop88, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[96], _mm256_mul_ps(vop96, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[104], _mm256_mul_ps(vop104, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[112], _mm256_mul_ps(vop112, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[120], _mm256_mul_ps(vop120, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 64) { // unrolling 8 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3333,89 +2861,67 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) vop32 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (32))))), _mm256_add_ps(vop32, vbio)); // skip unnecessary prefetch of (&ip_next_T0[32]) vop40 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (40))))), _mm256_add_ps(vop40, vbio)); // skip unnecessary prefetch of (&ip_next_T0[40]) vop48 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (48))))), _mm256_add_ps(vop48, vbio)); // skip unnecessary prefetch of (&ip_next_T0[48]) vop56 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (56))))), _mm256_add_ps(vop56, vbio)); // skip unnecessary prefetch of (&ip_next_T0[56]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], vop32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], vop40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], vop48); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], vop56); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[32], _mm256_mul_ps(vop32, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[40], _mm256_mul_ps(vop40, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[48], _mm256_mul_ps(vop48, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[56], _mm256_mul_ps(vop56, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 32) { // unrolling 4 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3464,45 +2970,35 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) vop16 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (16))))), _mm256_add_ps(vop16, vbio)); // skip unnecessary prefetch of (&ip_next_T0[16]) vop24 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (24))))), _mm256_add_ps(vop24, vbio)); // skip unnecessary prefetch of (&ip_next_T0[24]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], vop16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], vop24); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[16], _mm256_mul_ps(vop16, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[24], _mm256_mul_ps(vop24, vlen_inv)); } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (block_size == 16) { // unrolling 2 times for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { @@ -3549,19 +3045,16 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( vop8 = _mm256_fmadd_ps( vwgt, _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm_loadl_epi64(reinterpret_cast(ip + (8))))), _mm256_add_ps(vop8, vbio)); // skip unnecessary prefetch of (&ip_next_T0[8]) } if (!normalize_by_lengths || length == 0) { _mm256_storeu_ps(&op[0], vop0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], vop8); } else { __m256 vlen_inv = _mm256_set1_ps(1.0f / length); _mm256_storeu_ps(&op[0], _mm256_mul_ps(vop0, vlen_inv)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_storeu_ps(&op[8], _mm256_mul_ps(vop8, vlen_inv)); } } @@ -3570,7 +3063,6 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( for (int64_t rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) { float* op = &out[rangeIndex * block_size]; int64_t j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps(op + j, _mm256_setzero_ps()); } @@ -3608,7 +3100,6 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( } const uint8_t* ip_next_T0 = &input[idx_pref_T0 * fused_block_size]; j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], @@ -3628,7 +3119,6 @@ static bool EmbeddingLookupIdx_int64_t_uint8_t_float__avx2_fma( float len_inv = 1.0f / length; __m256 vlen_inv = _mm256_set1_ps(len_inv); j = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; j + 8 <= block_size; j += 8) { _mm256_storeu_ps( &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv)); diff --git a/caffe2/perfkernels/fused_nbit_rowwise_conversion.cc b/caffe2/perfkernels/fused_nbit_rowwise_conversion.cc index 0b9746574d0c9..461827a3cae14 100644 --- a/caffe2/perfkernels/fused_nbit_rowwise_conversion.cc +++ b/caffe2/perfkernels/fused_nbit_rowwise_conversion.cc @@ -34,7 +34,6 @@ void FloatToFused8BitRowwiseQuantized__base( *std::max_element(input_row, input_row + input_columns); float range = maximum_element - minimum_element; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output_row_scale_bias[0] = range / 255.0f; output_row_scale_bias[1] = minimum_element; const auto inverse_scale = 255.0f / (range + kEpsilon); @@ -104,7 +103,6 @@ void FloatToFusedNBitRowwiseQuantizedSBHalf__base( int input_rows, int input_columns, std::uint8_t* output) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_elem_per_byte = 8 / bit_rate; int output_columns = // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) @@ -165,7 +163,6 @@ void FusedNBitRowwiseQuantizedSBHalfToFloat__base( int input_rows, int input_columns, float* output) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_elem_per_byte = 8 / bit_rate; int output_columns = // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions) diff --git a/caffe2/perfkernels/math_cpu_avx2.cc b/caffe2/perfkernels/math_cpu_avx2.cc index 0228f43818dcb..325d9c4591ef7 100644 --- a/caffe2/perfkernels/math_cpu_avx2.cc +++ b/caffe2/perfkernels/math_cpu_avx2.cc @@ -49,9 +49,7 @@ void quantize_and_compress__avx2( 0xff, // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions) 0xff, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0x0c, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0x08, 0x04, 0x00, @@ -79,16 +77,13 @@ void quantize_and_compress__avx2( 0xff, // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions) 0xff, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0x0c, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0x08, 0x04, 0x00); __m256i permute_mask_v = _mm256_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) uint64_t data_per_byte = 8 / bitwidth; uint64_t tail = input_size % data_per_byte; tail = tail ? data_per_byte - tail : 0; @@ -131,13 +126,11 @@ void quantize_and_compress__avx2( _mm256_min_ps(_mm256_set1_ps(max_q), rounded_v)); __m256i qval_v = _mm256_cvtps_epi32(rounded_v); __m256i orval_v = _mm256_cvtepu8_epi32(_mm_lddqu_si128( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) reinterpret_cast(output_data + 10 + i))); orval_v = _mm256_or_si256(orval_v, _mm256_slli_epi32(qval_v, bit_start)); orval_v = _mm256_shuffle_epi8(orval_v, shuffle_mask_v); orval_v = _mm256_permutevar8x32_epi32(orval_v, permute_mask_v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *reinterpret_cast(output_data + 10 + i) = _mm256_extract_epi64(orval_v, 0); } @@ -151,9 +144,7 @@ void quantize_and_compress__avx2( rounded = rounded > 0.0f ? rounded : 0.0f; uint8_t qval = rounded; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) uint8_t orval = output_data[10 + i]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output_data[10 + i] = orval | static_cast(qval << bit_start); } bit_start += bitwidth; @@ -176,13 +167,11 @@ void quantize_and_compress__avx2( __m256i qval_v = _mm256_cvtps_epi32(_mm256_round_ps( thetimes_v, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); __m256i orval_v = _mm256_cvtepu8_epi32(_mm_lddqu_si128( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) reinterpret_cast(output_data + 10 + i))); orval_v = _mm256_or_si256(orval_v, _mm256_slli_epi32(qval_v, bit_start)); orval_v = _mm256_shuffle_epi8(orval_v, shuffle_mask_v); orval_v = _mm256_permutevar8x32_epi32(orval_v, permute_mask_v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *reinterpret_cast(output_data + 10 + i) = _mm256_extract_epi64(orval_v, 0); } @@ -195,9 +184,7 @@ void quantize_and_compress__avx2( thetimes = thetimes > 0.0f ? thetimes : 0.0f; uint8_t qval = nearbyint(thetimes); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) uint8_t orval = output_data[10 + i]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output_data[10 + i] = orval | static_cast(qval << bit_start); } bit_start += bitwidth; @@ -236,7 +223,6 @@ void decompress_and_dequantize__avx2( constexpr int VLEN = 8; for (; i < stride / VLEN * VLEN; i += VLEN) { __m128i in_v = _mm_lddqu_si128( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) reinterpret_cast(input_data + 10 + i)); __m256i out_epi32_v = _mm256_and_si256( _mm256_srli_epi32(_mm256_cvtepu8_epi32(in_v), bit_start), diff --git a/caffe2/perfkernels/math_cpu_base.cc b/caffe2/perfkernels/math_cpu_base.cc index b86fcdf78bbdb..fd3ba83cd4a90 100644 --- a/caffe2/perfkernels/math_cpu_base.cc +++ b/caffe2/perfkernels/math_cpu_base.cc @@ -28,7 +28,6 @@ void quantize_and_compress__base( uint64_t bitwidth, bool random, const float* random_buffer) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) uint64_t data_per_byte = 8 / bitwidth; uint64_t tail = input_size % data_per_byte; tail = tail ? data_per_byte - tail : 0; @@ -68,9 +67,7 @@ void quantize_and_compress__base( rounded = rounded > 0.0f ? rounded : 0.0f; uint8_t qval = rounded; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) uint8_t orval = output_data[10 + i]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output_data[10 + i] = orval | static_cast(qval << bit_start); } bit_start += bitwidth; @@ -89,9 +86,7 @@ void quantize_and_compress__base( thetimes = thetimes > 0.0f ? thetimes : 0.0f; uint8_t qval = nearbyint(thetimes); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) uint8_t orval = output_data[10 + i]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output_data[10 + i] = orval | static_cast(qval << bit_start); } bit_start += bitwidth; diff --git a/caffe2/predictor/emulator/data_filler_test.cc b/caffe2/predictor/emulator/data_filler_test.cc index 63812d43187fa..cb87d24380adf 100644 --- a/caffe2/predictor/emulator/data_filler_test.cc +++ b/caffe2/predictor/emulator/data_filler_test.cc @@ -13,7 +13,6 @@ TEST(DataFiller, FillNetInputTest) { .newOp("Concat", {"X0", "X1", "X2"}, {"concat_out", "split_info"}) .addArgument("axis", 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input_dim = {30, 20}; std::vector>> input_dims = { {/* X0 */ input_dim, /* X1 */ input_dim, /* X2 */ input_dim}}; diff --git a/caffe2/predictor/predictor_test.cc b/caffe2/predictor/predictor_test.cc index 8378aa964e677..e78e9f3c322de 100644 --- a/caffe2/predictor/predictor_test.cc +++ b/caffe2/predictor/predictor_test.cc @@ -168,7 +168,6 @@ class PredictorTest : public testing::Test { public: void SetUp() override { DeviceOption op; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) op.set_random_seed(1701); ctx_ = std::make_unique(op); NetDef init, run; diff --git a/caffe2/quantization/server/activation_distribution_observer.cc b/caffe2/quantization/server/activation_distribution_observer.cc index f898ff669d080..9d50ac8f8ec3c 100644 --- a/caffe2/quantization/server/activation_distribution_observer.cc +++ b/caffe2/quantization/server/activation_distribution_observer.cc @@ -707,9 +707,7 @@ RegisterQuantizationParamsNetObserver::RegisterQuantizationParamsNetObserver( ++nwords_first_line; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bool new_format = nwords_first_line == 6; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (!new_format && nwords_first_line != 5) { LOG(WARNING) << "min_max file " << min_max_file_name << " has an invalid format"; @@ -749,10 +747,8 @@ RegisterQuantizationParamsNetObserver::RegisterQuantizationParamsNetObserver( unique_ptr qfactory(GetQuantizationFactoryOf(op)); qparams = qfactory->ChooseQuantizationParams(min, max, is_weight); } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qparams.scale = 0.1f; qparams.zero_point = -min / qparams.scale; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qparams.precision = 8; } @@ -804,12 +800,10 @@ RegisterQuantizationParamsWithHistogramNetObserver:: // NOLINTNEXTLINE(cppcoreguidelines-init-variables) float min, max; ist >> op_index >> op_type >> i >> tensor_name >> min >> max >> nbins; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nwords_first_line != nbins + 7) { ist.str(first_line); ist.clear(); ist >> op_index >> i >> tensor_name >> min >> max >> nbins; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nwords_first_line == nbins + 6) { new_format = false; } else { @@ -871,9 +865,7 @@ RegisterQuantizationParamsWithHistogramNetObserver:: unique_ptr qfactory(GetQuantizationFactoryOf(op)); qparams = qfactory->ChooseQuantizationParams(hist, is_weight); } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qparams.scale = 0.1f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qparams.precision = 8; qparams.zero_point = (isinf(min / qparams.scale) || isnan(min / qparams.scale)) diff --git a/caffe2/quantization/server/caffe2_dnnlowp_utils.cc b/caffe2/quantization/server/caffe2_dnnlowp_utils.cc index 96708d8b7c78f..4923a71395515 100644 --- a/caffe2/quantization/server/caffe2_dnnlowp_utils.cc +++ b/caffe2/quantization/server/caffe2_dnnlowp_utils.cc @@ -498,12 +498,10 @@ NetDef AddScaleZeroOffsetArgumentsWithHistogram( // NOLINTNEXTLINE(cppcoreguidelines-init-variables) float min, max; ist >> op_index >> op_type >> i >> tensor_name >> min >> max >> nbins; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nwords_first_line != nbins + 7) { ist.str(first_line); ist.clear(); ist >> op_index >> i >> tensor_name >> min >> max >> nbins; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nwords_first_line == nbins + 6) { new_format = false; } else { diff --git a/caffe2/quantization/server/conv_dnnlowp_acc16_op.cc b/caffe2/quantization/server/conv_dnnlowp_acc16_op.cc index fc6afb905bddf..6d3ec0103c12b 100644 --- a/caffe2/quantization/server/conv_dnnlowp_acc16_op.cc +++ b/caffe2/quantization/server/conv_dnnlowp_acc16_op.cc @@ -26,7 +26,6 @@ C10_DECLARE_bool(caffe2_dnnlowp_shared_int32_buffer); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) C10_DEFINE_double( caffe2_dnnlowp_acc16_density_threshold, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.05, "If density of outlier is higher than this, fallback to 32-bit accumulation"); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) @@ -179,7 +178,6 @@ bool ConvDNNLowPAcc16Op::GetQuantizationParameters_() { // Separate out outliers if (!Wq_outlier_ && this->order_ == StorageOrder::NHWC && - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) nbits_in_non_outlier_ < 8) { CAFFE_ENFORCE(!W_quantized_.empty()); @@ -234,7 +232,6 @@ bool ConvDNNLowPAcc16Op::GetQuantizationParameters_() { if (!reason.empty()) { static int log_occurences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurences < 32) { ++log_occurences; C10_LOG_FIRST_N(WARNING, 10) @@ -243,10 +240,8 @@ bool ConvDNNLowPAcc16Op::GetQuantizationParameters_() { } } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nbits_in_non_outlier_ < 8 && this->order_ != StorageOrder::NHWC) { static int log_occurences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurences < 32) { ++log_occurences; C10_LOG_FIRST_N(WARNING, 10) @@ -406,7 +401,6 @@ bool ConvDNNLowPAcc16Op::RunOnDeviceWithOrderNCHW() { W_quantized_.data() + (M / group_) * group_id * kernel_dim; static int log_occurences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurences < 32) { ++log_occurences; C10_LOG_FIRST_N(WARNING, 10) @@ -574,7 +568,6 @@ void ConvDNNLowPAcc16Op::DispatchFBGEMM_( M, group_); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nbits_in_non_outlier_ < 8) { DoSpmdmOnInpBuffer< typename ReQuantizeOutput::outType, @@ -609,7 +602,6 @@ template void ConvDNNLowPAcc16Op::ConvOutlier_( const uint8_t* col_buffer, vector* Y_int32) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nbits_in_non_outlier_ < 8) { const Tensor& X = InputTensorCPU_(INPUT); auto& filter = InputTensorCPU_(FILTER); diff --git a/caffe2/quantization/server/conv_dnnlowp_op.cc b/caffe2/quantization/server/conv_dnnlowp_op.cc index 40386d872a92b..15bf04887636f 100644 --- a/caffe2/quantization/server/conv_dnnlowp_op.cc +++ b/caffe2/quantization/server/conv_dnnlowp_op.cc @@ -86,7 +86,6 @@ template bool ConvDNNLowPOp::TakeDepthWise3x3FastPath_() { const Tensor& X = InputTensorCPU_(INPUT); return this->order_ == StorageOrder::NHWC && is_same::value && - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) !Acc16() && group_ == X.dim32(X.dim() - 1) && group_ % 8 == 0 && this->kernel_.size() == 2 && kernel_h() == 3 && kernel_w() == 3 && stride_h() == stride_w() && (stride_h() == 1 || stride_h() == 2) && @@ -100,7 +99,6 @@ template bool ConvDNNLowPOp::TakeDepthWise3x3x3FastPath_() { const Tensor& X = InputTensorCPU_(INPUT); bool ret = this->order_ == StorageOrder::NHWC && is_same::value && - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) !Acc16() && group_ == X.dim32(X.dim() - 1) && group_ % 8 == 0 && this->kernel_.size() == 3 && this->kernel_[0] == 3 && this->kernel_[1] == 3 && this->kernel_[2] == 3 && @@ -159,7 +157,6 @@ fbgemm::conv_param_t<3> ConvDNNLowPOp::GetConv3DParam_() { this->pads_[2], this->pads_[3], this->pads_[4], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) this->pads_[5]}); } @@ -314,7 +311,6 @@ void ConvDNNLowPOp::QuantizeBias_() { std::abs( bias_qparams.scale - in_qparams_[INPUT].scale * FilterQuantizationParams(0).scale), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1e-4); } CAFFE_ENFORCE_EQ(bias_qparams.zero_point, 0); @@ -331,7 +327,6 @@ void ConvDNNLowPOp::QuantizeBias_() { b_data[i], 0, in_qparams_[INPUT].scale * FilterQuantizationParams(g).scale, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 32, true /* signed */); } @@ -464,7 +459,6 @@ void ConvDNNLowPOp::QuantizeWeight_() { if (this->template InputIsType(FILTER) && quantize_groupwise_) { static int log_occurences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurences < 32) { ++log_occurences; LOG(WARNING) << "Cannot do group-wise quantization for " @@ -571,7 +565,6 @@ void ConvDNNLowPOp::QuantizeWeight_() { } if (!reason.empty()) { static int log_occurences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurences < 32) { ++log_occurences; LOG(WARNING) << "Conv with weight " << this->debug_def().input(FILTER) @@ -877,7 +870,6 @@ void ConvDNNLowPOp::RunOnDeviceEpilogueNHWC_( if (!dnnlowp::HasStaticQuantization(this)) { if (quantize_groupwise_) { static int log_occurences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurences < 32) { ++log_occurences; LOG(WARNING) << "Cannot do group-wise quantization without " @@ -1036,7 +1028,6 @@ void ConvDNNLowPOp::PartitionGroupedNHWCConv_( group_end, i_begin, i_end, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 32); } @@ -1107,7 +1098,6 @@ const T* ConvDNNLowPOp::Im2ColNHWC_(Tensor* col_buffer) { this->pads_[2], this->pads_[3], this->pads_[4], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) this->pads_[5], this->stride_[0], this->stride_[1], @@ -1560,7 +1550,6 @@ void ConvDNNLowPOp::ConvNHWCCore_( this->pads_[2], this->pads_[3], this->pads_[4], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) this->pads_[5]}); PackAWithIm2Col packA( diff --git a/caffe2/quantization/server/dnnlowp.cc b/caffe2/quantization/server/dnnlowp.cc index 7d002765b3eca..3e42279a045f9 100644 --- a/caffe2/quantization/server/dnnlowp.cc +++ b/caffe2/quantization/server/dnnlowp.cc @@ -62,13 +62,11 @@ C10_DEFINE_string( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) C10_DEFINE_double( caffe2_dnnlowp_weight_p99_threshold, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.99, "P99 threshold to select out from the full histogram for weights."); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) C10_DEFINE_double( caffe2_dnnlowp_activation_p99_threshold, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.99, "P99 threshold to select out from the full histogram for activations."); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) diff --git a/caffe2/quantization/server/elementwise_linear_dnnlowp_op.cc b/caffe2/quantization/server/elementwise_linear_dnnlowp_op.cc index 50c37e86f3190..e584502427dea 100644 --- a/caffe2/quantization/server/elementwise_linear_dnnlowp_op.cc +++ b/caffe2/quantization/server/elementwise_linear_dnnlowp_op.cc @@ -66,7 +66,6 @@ bool ElementwiseLinearDNNLowPOp::RunOnDevice() { b_data[i], 0, in_qparams_[0].scale * in_qparams_[1].scale, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 32, true /* signed */); } diff --git a/caffe2/quantization/server/elementwise_sum_dnnlowp_op_avx2.cc b/caffe2/quantization/server/elementwise_sum_dnnlowp_op_avx2.cc index 8e4c80da7a4d1..e3fb18129fc8e 100644 --- a/caffe2/quantization/server/elementwise_sum_dnnlowp_op_avx2.cc +++ b/caffe2/quantization/server/elementwise_sum_dnnlowp_op_avx2.cc @@ -25,7 +25,6 @@ void ElementWiseSumAVX2( float c_scale, int32_t c_zero_point) { __m256i permute_mask_v = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00); int len_aligned = len / (VLEN * 4) * (VLEN * 4); @@ -39,7 +38,6 @@ void ElementWiseSumAVX2( _mm_loadl_epi64(reinterpret_cast(input0 + j)), // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions) _mm_set1_epi8(0x80))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_set1_epi32(0x80))); in_v0 = _mm256_fmadd_ps( in_v0, @@ -51,7 +49,6 @@ void ElementWiseSumAVX2( _mm_loadl_epi64(reinterpret_cast(input1 + j)), // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions) _mm_set1_epi8(0x80))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_set1_epi32(0x80))); __m256 acc_v = _mm256_fmadd_ps(in_v1, _mm256_set1_ps(b_scale), in_v0); @@ -66,7 +63,6 @@ void ElementWiseSumAVX2( reinterpret_cast(input0 + j + VLEN)), // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions) _mm_set1_epi8(0x80))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_set1_epi32(0x80))); in_v0 = _mm256_fmadd_ps( in_v0, @@ -79,7 +75,6 @@ void ElementWiseSumAVX2( reinterpret_cast(input1 + j + VLEN)), // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions) _mm_set1_epi8(0x80))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_set1_epi32(0x80))); acc_v = _mm256_fmadd_ps(in_v1, _mm256_set1_ps(b_scale), in_v0); @@ -94,7 +89,6 @@ void ElementWiseSumAVX2( reinterpret_cast(input0 + j + 2 * VLEN)), // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions) _mm_set1_epi8(0x80))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_set1_epi32(0x80))); in_v0 = _mm256_fmadd_ps( in_v0, @@ -107,7 +101,6 @@ void ElementWiseSumAVX2( reinterpret_cast(input1 + j + 2 * VLEN)), // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions) _mm_set1_epi8(0x80))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_set1_epi32(0x80))); acc_v = _mm256_fmadd_ps(in_v1, _mm256_set1_ps(b_scale), in_v0); @@ -122,7 +115,6 @@ void ElementWiseSumAVX2( reinterpret_cast(input0 + j + 3 * VLEN)), // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions) _mm_set1_epi8(0x80))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_set1_epi32(0x80))); in_v0 = _mm256_fmadd_ps( in_v0, @@ -135,7 +127,6 @@ void ElementWiseSumAVX2( reinterpret_cast(input1 + j + 3 * VLEN)), // NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-narrowing-conversions) _mm_set1_epi8(0x80))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) _mm256_set1_epi32(0x80))); acc_v = _mm256_fmadd_ps(in_v1, _mm256_set1_ps(b_scale), in_v0); @@ -156,7 +147,6 @@ void ElementWiseSumAVX2( __m256i xyzw_clamped_v = _mm256_max_epu8( ReluFused ? _mm256_set1_epi8(c_zero_point) : _mm256_setzero_si256(), _mm256_min_epu8( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) xyzw_packed_v, _mm256_set1_epi8(static_cast(255)))); xyzw_clamped_v = @@ -170,7 +160,6 @@ void ElementWiseSumAVX2( float transformed_val = c_zero_point + acc / c_scale; output[j] = std::max( ReluFused ? c_zero_point : 0.0f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::min(255.0f, nearbyint(transformed_val))); } } diff --git a/caffe2/quantization/server/fb_fc_packed_op.cc b/caffe2/quantization/server/fb_fc_packed_op.cc index b10aa05feb64b..ce396cb867f03 100644 --- a/caffe2/quantization/server/fb_fc_packed_op.cc +++ b/caffe2/quantization/server/fb_fc_packed_op.cc @@ -81,7 +81,6 @@ void PackedGemmMatrixFP16ShapeFunctions::SetupExternalTensorDescriptor( blob->template Get>().get(); // setup data and type - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) desc->dataType = 10; // ONNXIFI_DATATYPE_FLOAT16 desc->buffer = reinterpret_cast(packed->pmat()); diff --git a/caffe2/quantization/server/fbgemm_pack_op.cc b/caffe2/quantization/server/fbgemm_pack_op.cc index 2e0c7f8614340..a8ac261020346 100644 --- a/caffe2/quantization/server/fbgemm_pack_op.cc +++ b/caffe2/quantization/server/fbgemm_pack_op.cc @@ -207,7 +207,6 @@ void QuantizeConvBias( CAFFE_ENFORCE_LE( std::abs( bias_qparams.scale - in_qparams.scale * filter_qparams[0].scale), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1e-4); CAFFE_ENFORCE_EQ(bias_qparams.zero_point, 0); b_quantized.resize(bias.numel()); @@ -235,7 +234,6 @@ void QuantizeConvBias( bdata[i], 0, in_qparams.scale * filter_qparams[g].scale, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 32, true /* signed */); } else { @@ -295,7 +293,6 @@ bool FullyConnectedDNNLowPPackWeightOp::RunOnDevice() { } if (this->InputIsType(0) && quantize_channelwise_) { static int log_occurences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurences < 32) { ++log_occurences; LOG(WARNING) << "Cannot do row-wise quantization for " @@ -313,7 +310,6 @@ bool FullyConnectedDNNLowPPackWeightOp::RunOnDevice() { K, N, W_quantized.data(), Y->qparams, *Y->column_offsets); if (this->debug_def().engine() == "DNNLOWP_ACC16") { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nbits_in_non_outlier_ < 8) { Y->W_outlier.reset( ExtractOutlierMatrix(1, K, N, nbits_in_non_outlier_, W_quantized)); @@ -413,7 +409,6 @@ bool ConvDNNLowPPackWeightOp::TakeDepthWise3x3FastPath_() { // The number of input channels per group int C_per_group = filter.dim32(filter.dim() - 1); return this->debug_def().engine() != "DNNLOWP_ACC16" && group_ == M && - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) C_per_group == 1 && group_ % 8 == 0 && this->kernel_.size() == 2 && kernel_h() == 3 && kernel_w() == 3 && stride_h() == stride_w() && (stride_h() == 1 || stride_h() == 2) && dilation_h() == 1 && @@ -428,7 +423,6 @@ bool ConvDNNLowPPackWeightOp::TakeDepthWise3x3x3FastPath_() { // The number of input channels per group int C_per_group = filter.dim32(filter.dim() - 1); bool ret = this->debug_def().engine() != "DNNLOWP_ACC16" && group_ == M && - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) C_per_group == 1 && group_ % 8 == 0 && this->kernel_.size() == 3 && this->kernel_[0] == 3 && this->kernel_[1] == 3 && this->kernel_[2] == 3 && (this->stride_[0] == 1 || this->stride_[0] == 2) && @@ -482,7 +476,6 @@ fbgemm::conv_param_t<3> ConvDNNLowPPackWeightOp::GetConv3DParam_() { this->pads_[2], this->pads_[3], this->pads_[4], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) this->pads_[5]}); } @@ -540,7 +533,6 @@ bool ConvDNNLowPPackWeightOp::RunOnDevice() { if (this->InputIsType(FILTER) && quantize_groupwise_) { static int log_occurences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurences < 32) { ++log_occurences; LOG(WARNING) << "Cannot do group-wise quantization for " @@ -595,7 +587,6 @@ bool ConvDNNLowPPackWeightOp::RunOnDevice() { // When nbits_in_non_outlier == 0, we fall back to acc32 if (this->debug_def().engine() == "DNNLOWP_ACC16" && !fallback_to_32_bit_accumulation) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nbits_in_non_outlier_ < 8) { int outlier_cnt = CountOutliers( group_, kernel_dim, M, nbits_in_non_outlier_, W_quantized); diff --git a/caffe2/quantization/server/fully_connected_dnnlowp_acc16_op.cc b/caffe2/quantization/server/fully_connected_dnnlowp_acc16_op.cc index 1c2d7993eeb79..c1da34a552e02 100644 --- a/caffe2/quantization/server/fully_connected_dnnlowp_acc16_op.cc +++ b/caffe2/quantization/server/fully_connected_dnnlowp_acc16_op.cc @@ -69,10 +69,8 @@ bool FullyConnectedDNNLowPAcc16Op::RunOnDevice() { << nbits_in_non_outlier_; } } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (!Wq_acc16_packed_ && nbits_in_non_outlier_ < 8) { static int log_occurences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurences < 32) { ++log_occurences; LOG(WARNING) << "FC DNNLOWP_ACC16 using outlier-aware quantization"; @@ -148,7 +146,6 @@ bool FullyConnectedDNNLowPAcc16Op::RunOnDevice() { this->b_quantized_data_, N); // ncols per quant group - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nbits_in_non_outlier_ < 8) { DoSpmdmOnInpBuffer< typename ReQuantizeOutput::outType, @@ -189,7 +186,6 @@ bool FullyConnectedDNNLowPAcc16Op::RunOnDevice() { this->b_dequantized_data_, N); // ncols per quant group - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nbits_in_non_outlier_ < 8) { DoSpmdmOnInpBuffer< typename ReQuantizeForFloat::outType, diff --git a/caffe2/quantization/server/fully_connected_dnnlowp_op.cc b/caffe2/quantization/server/fully_connected_dnnlowp_op.cc index 3617973f31042..637b2338c3d04 100644 --- a/caffe2/quantization/server/fully_connected_dnnlowp_op.cc +++ b/caffe2/quantization/server/fully_connected_dnnlowp_op.cc @@ -72,7 +72,6 @@ bool FullyConnectedDNNLowPOp::RunOnDevice() { dequantize_output_) { if (!GetCpuId().avx2()) { static int log_occurences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurences < 32) { ++log_occurences; LOG(WARNING) @@ -81,7 +80,6 @@ bool FullyConnectedDNNLowPOp::RunOnDevice() { } } else { static int log_occurences = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (log_occurences < 32) { ++log_occurences; LOG(WARNING) << "Falling back to the default Caffe2 operator because " @@ -809,7 +807,6 @@ bool FullyConnectedDNNLowPOp::GetQuantizationParameters_() { std::abs( bias_qparams.scale - in_qparams_[0].scale * filter_qparams_[0].scale), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1e-4); } CAFFE_ENFORCE_EQ(bias_qparams.zero_point, 0); @@ -831,7 +828,6 @@ bool FullyConnectedDNNLowPOp::GetQuantizationParameters_() { b_dequantized_data_[j], 0, in_qparams_[0].scale * filter_qparams_[0].scale, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 32); } b_quantized_data_ = b_quantized_->data(); diff --git a/caffe2/quantization/server/fully_connected_fake_lowp_op_avx2.cc b/caffe2/quantization/server/fully_connected_fake_lowp_op_avx2.cc index 76c2cce91b29c..a95182a69c53d 100644 --- a/caffe2/quantization/server/fully_connected_fake_lowp_op_avx2.cc +++ b/caffe2/quantization/server/fully_connected_fake_lowp_op_avx2.cc @@ -32,7 +32,6 @@ void fp32_to_bfp16(const float* source, size_t size, float* dest) { __m256 wmask = _mm256_broadcast_ss(reinterpret_cast(&mask)); size_t i = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; i < (size / 8) * 8; i += 8) { __m256 data = _mm256_loadu_ps(&source[i]); _mm256_storeu_ps(&dest[i], _mm256_and_ps(wmask, data)); @@ -53,7 +52,6 @@ void fp32_to_bfp24(const float* source, size_t size, float* dest) { __m256 wmask = _mm256_broadcast_ss(reinterpret_cast(&mask)); size_t i = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; i < (size / 8) * 8; i += 8) { __m256 data = _mm256_loadu_ps(&source[i]); _mm256_storeu_ps(&dest[i], _mm256_and_ps(wmask, data)); @@ -74,7 +72,6 @@ void fp32_to_bfp14(const float* source, size_t size, float* dest) { __m256 wmask = _mm256_broadcast_ss((float*)(&mask)); size_t i = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; i < (size / 8) * 8; i += 8) { __m256 data = _mm256_loadu_ps(&source[i]); _mm256_storeu_ps(&dest[i], _mm256_and_ps(wmask, data)); @@ -98,7 +95,6 @@ void fp32_to_bfp16_scalar(const float* source, size_t size, float* dest) { // convert to IEEE float16 void fp32_to_fp16(const float* source, size_t size, float* dest) { size_t i = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; i < (size / 8) * 8; i += 8) { __m128i vin_fp16 = _mm256_cvtps_ph(_mm256_loadu_ps(&source[i]), 0); _mm256_storeu_ps(&dest[i], _mm256_cvtph_ps(vin_fp16)); @@ -122,7 +118,6 @@ void fp32_to_bfp16_round(const float* source, size_t size, float* dest) { __m256i wmask = _mm256_set1_epi32(mask); size_t i = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; i < (size / 8) * 8; i += 8) { __m256i v32int = _mm256_add_epi32( _mm256_loadu_si256(reinterpret_cast(&source[i])), diff --git a/caffe2/quantization/server/group_norm_dnnlowp_op.cc b/caffe2/quantization/server/group_norm_dnnlowp_op.cc index 6f3b408a3228a..f628de8ceadf4 100644 --- a/caffe2/quantization/server/group_norm_dnnlowp_op.cc +++ b/caffe2/quantization/server/group_norm_dnnlowp_op.cc @@ -10,7 +10,6 @@ GroupNormDNNLowPOp::GroupNormDNNLowPOp( : BaseType(operator_def, ws), OP_SINGLE_ARG(bool, OpSchema::Arg_IsTest, is_test_, true), OP_SINGLE_ARG(int, "group", group_, 32), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) OP_SINGLE_ARG(float, "epsilon", epsilon_, 1e-5), order_(StringToStorageOrder( this->template GetSingleArgument("order", "NCHW"))), @@ -106,7 +105,6 @@ void GroupNormDNNLowPOp::QuantizeGammaImpl() { gamma_dequantized_data_[i], gamma_qparams.zero_point, gamma_qparams.scale, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 32); } } @@ -131,7 +129,6 @@ void GroupNormDNNLowPOp::QuantizeBeta() { CAFFE_ENFORCE_LE( std::abs( beta_qparams.scale - X_qparams.scale * gamma_qparams.scale), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1e-4); } CAFFE_ENFORCE_EQ(beta_qparams.zero_point, 0); @@ -156,7 +153,6 @@ void GroupNormDNNLowPOp::QuantizeBeta() { beta_dequantized_data_[i], beta_qparams.zero_point, beta_qparams.scale, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 32); } } @@ -471,7 +467,6 @@ void GroupNormDNNLowPOp::ComputeQuantizedInvStd( #endif for (int i = 0; i < N; ++i) { rsig_quantized[i] = fbgemm::Quantize( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) rsig[i], rsig_qparams_.zero_point, rsig_qparams_.scale, 32); } } @@ -494,7 +489,6 @@ void GroupNormDNNLowPOp::ComputeQuantizedFusedParams( internal_qparams_.scale = rsig_qparams_.scale * gamma_qparams.scale * X_qparams.scale; internal_qparams_.zero_point = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) internal_qparams_.precision = 32; const float real_multiplier = 1.0f / rsig_qparams_.scale; const auto beta_requantization_params = diff --git a/caffe2/quantization/server/group_norm_dnnlowp_op_avx2.cc b/caffe2/quantization/server/group_norm_dnnlowp_op_avx2.cc index 9c38b66cc372d..366bb6adf4b1b 100644 --- a/caffe2/quantization/server/group_norm_dnnlowp_op_avx2.cc +++ b/caffe2/quantization/server/group_norm_dnnlowp_op_avx2.cc @@ -44,7 +44,6 @@ void SegmentMomentsAVX2( int32_t sumsq_arr[8]; _mm256_storeu_si256(reinterpret_cast<__m256i*>(sum_arr), sum_v); _mm256_storeu_si256(reinterpret_cast<__m256i*>(sumsq_arr), sumsq_v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 8; ++i) { *sum += static_cast(sum_arr[i]); *sumsq += static_cast(sumsq_arr[i]); diff --git a/caffe2/quantization/server/kl_minimization.cc b/caffe2/quantization/server/kl_minimization.cc index 6eb61c4330915..86944301b6465 100644 --- a/caffe2/quantization/server/kl_minimization.cc +++ b/caffe2/quantization/server/kl_minimization.cc @@ -164,7 +164,6 @@ TensorQuantizationParams KLDivergenceMinimization::ChooseQuantizationParams( selected_sum += bins[i]; } VLOG(2) << "best quantization range covers " - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << (double)selected_sum / total_sum * 100 << " %%"; VLOG(2) << "best start_bin " << best_start_bin << " nbins_selected " diff --git a/caffe2/quantization/server/lstm_unit_dnnlowp_op.cc b/caffe2/quantization/server/lstm_unit_dnnlowp_op.cc index 508aa1cedb189..bd957d7b25fd0 100644 --- a/caffe2/quantization/server/lstm_unit_dnnlowp_op.cc +++ b/caffe2/quantization/server/lstm_unit_dnnlowp_op.cc @@ -164,7 +164,6 @@ static void LSTMUnit( f_times_c_prev, 0, c_to_tanh_params.real_multiplier, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 32, true /*signed*/); int32_t c_temp = f_times_c_prev_rescaled + i_times_g; diff --git a/caffe2/quantization/server/norm_minimization.cc b/caffe2/quantization/server/norm_minimization.cc index c95f8a744f6fd..c429734d70e7c 100644 --- a/caffe2/quantization/server/norm_minimization.cc +++ b/caffe2/quantization/server/norm_minimization.cc @@ -87,7 +87,6 @@ TensorQuantizationParams NormMinimization::NonlinearQuantizationParamsSearch( CDF.push_back(sum); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double stepsize = 0.00001; // experiment on the granularity double alpha = 0.0f, beta = 1.0f; // lowerbound and upperbound int start_bin = 0; @@ -185,7 +184,6 @@ TensorQuantizationParams NormMinimization::NonlinearQuantizationParamsSearch( selected_sum += bins_f[i]; } VLOG(2) << "best quantization range covers " - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << (double)selected_sum / total * 100 << " %%"; // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) @@ -341,7 +339,6 @@ TensorQuantizationParams NormMinimization::ChooseQuantizationParams( for (int i = i_begin; i < i_end; ++i) { selected_sum += bins_f[i]; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VLOG(2) << "best quantization range covers " << selected_sum / total_sum * 100 << " %%"; diff --git a/caffe2/quantization/server/norm_minimization_avx2.cc b/caffe2/quantization/server/norm_minimization_avx2.cc index 1d4149b4fc04c..ca6d735f35664 100644 --- a/caffe2/quantization/server/norm_minimization_avx2.cc +++ b/caffe2/quantization/server/norm_minimization_avx2.cc @@ -16,10 +16,8 @@ float L2MinimizationKernelAVX2( int start_bin) { float norm = 0; constexpr int VLEN = 8; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float norm_delta_default = dst_bin_width * dst_bin_width * dst_bin_width / 12; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) __m256i identity_v = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0); __m256 bin_width_v = _mm256_set1_ps(bin_width); __m256 bin_width_inverse_v = _mm256_set1_ps(1.0f / bin_width); diff --git a/caffe2/quantization/server/p99.cc b/caffe2/quantization/server/p99.cc index 5c5927cf62dc8..85f4c3567b15a 100644 --- a/caffe2/quantization/server/p99.cc +++ b/caffe2/quantization/server/p99.cc @@ -36,9 +36,7 @@ TensorQuantizationParams P99::ChooseQuantizationParams( sum += bins_f[i]; CDF[i] = (double)sum / total_sum; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CAFFE_ENFORCE(threshold_ > 0.5 && threshold_ < 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double left_quantile = (1.0f - threshold_) / 2.0f; double right_quantile = 1.0f - left_quantile; int i_begin = 0; diff --git a/caffe2/quantization/server/sigmoid.cc b/caffe2/quantization/server/sigmoid.cc index 1932f2318aa79..f29a9970ba2ec 100644 --- a/caffe2/quantization/server/sigmoid.cc +++ b/caffe2/quantization/server/sigmoid.cc @@ -14,7 +14,6 @@ Sigmoid::Sigmoid(double max_abs_err) : tanh_(max_abs_err) { in_qparams_.precision = num_in_bits_; // -2 x_sq is mapped to -127, 0 is mapped to 0, 2 x_sq is mapped to 127 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) out_qparams_.scale = 0.5 / ((1 << (num_out_bits_ - 1)) - 1); out_qparams_.zero_point = 0; out_qparams_.precision = num_out_bits_; diff --git a/caffe2/quantization/server/spatial_batch_norm_dnnlowp_op.cc b/caffe2/quantization/server/spatial_batch_norm_dnnlowp_op.cc index cda3d42958bfe..8bf9f88df9851 100644 --- a/caffe2/quantization/server/spatial_batch_norm_dnnlowp_op.cc +++ b/caffe2/quantization/server/spatial_batch_norm_dnnlowp_op.cc @@ -9,7 +9,6 @@ SpatialBNDNNLowPOp::SpatialBNDNNLowPOp( const OperatorDef& operator_def, Workspace* ws) : DNNLowPOp>(operator_def, ws), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) OP_SINGLE_ARG(double, "epsilon", epsilon_, 1e-5), order_(StringToStorageOrder( this->template GetSingleArgument("order", "NCHW"))) { @@ -126,7 +125,6 @@ bool SpatialBNDNNLowPOp::RunOnDevice() { std::max(quantized_down, out_qparams_.zero_point); } Y_data[(i * C + c) * HxW + j] = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) fbgemm::clamp(quantized_down, 8); } } @@ -155,7 +153,6 @@ bool SpatialBNDNNLowPOp::RunOnDevice() { quantized_down = std::max(quantized_down, out_qparams_.zero_point); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Y_data[i * C + c] = fbgemm::clamp(quantized_down, 8); } } diff --git a/caffe2/quantization/server/spatial_batch_norm_dnnlowp_op_avx2.cc b/caffe2/quantization/server/spatial_batch_norm_dnnlowp_op_avx2.cc index cf33b834154a4..53cd7fadb06f1 100644 --- a/caffe2/quantization/server/spatial_batch_norm_dnnlowp_op_avx2.cc +++ b/caffe2/quantization/server/spatial_batch_norm_dnnlowp_op_avx2.cc @@ -140,7 +140,6 @@ void SpatialBNNHWCAVX2_uint8( if (ReluFused) { // static if quantized_down = std::max(quantized_down, out_zero_point); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Y_ptr[n + j] = fbgemm::clamp(quantized_down, 8); } } diff --git a/caffe2/quantization/server/tanh.cc b/caffe2/quantization/server/tanh.cc index adb701f75f152..5eea9e91257c0 100644 --- a/caffe2/quantization/server/tanh.cc +++ b/caffe2/quantization/server/tanh.cc @@ -80,9 +80,7 @@ Tanh::Tanh(double max_abs_err) : max_abs_err_(max_abs_err) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) int i; for (i = x_pq_index_; i < in_pos_qmax; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double y_begin = tanh((i - 0.5) * in_qparams_.scale); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double y_end = tanh((i + 0.5) * in_qparams_.scale); int y_avg_q = nearbyint((y_begin + y_end) / 2 / out_qparams_.scale); diff --git a/caffe2/quantization/server/transpose.cc b/caffe2/quantization/server/transpose.cc index d5fbbe37c67ef..34c66fbc0ea43 100644 --- a/caffe2/quantization/server/transpose.cc +++ b/caffe2/quantization/server/transpose.cc @@ -9,7 +9,6 @@ void transpose_4rows(int N, const std::uint8_t* src, std::uint8_t* dst) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) int j; // vectorized loop - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (j = 0; j < N / 32 * 32; j += 32) { // a : a0 a1 ... a31 // b : b0 b1 ... b31 @@ -42,19 +41,15 @@ void transpose_4rows(int N, const std::uint8_t* src, std::uint8_t* dst) { // Storing with 128-bit lanes are permuted so that everything is in order _mm256_storeu_si256( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (__m256i*)(dst + j * M + 0 * 32), _mm256_permute2f128_si256(y0, y1, 0x20)); _mm256_storeu_si256( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (__m256i*)(dst + j * M + 1 * 32), _mm256_permute2f128_si256(y2, y3, 0x20)); _mm256_storeu_si256( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (__m256i*)(dst + j * M + 2 * 32), _mm256_permute2f128_si256(y0, y1, 0x31)); _mm256_storeu_si256( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (__m256i*)(dst + j * M + 3 * 32), _mm256_permute2f128_si256(y2, y3, 0x31)); } diff --git a/caffe2/queue/blobs_queue.cc b/caffe2/queue/blobs_queue.cc index eb5cc7a5b4656..4398cf8164813 100644 --- a/caffe2/queue/blobs_queue.cc +++ b/caffe2/queue/blobs_queue.cc @@ -79,7 +79,6 @@ bool BlobsQueue::blockingRead( // NOLINTNEXTLINE(clang-diagnostic-unused-variable) CAFFE_EVENT(stats_, queue_balance, -1); if (timeout_secs > 0) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::chrono::milliseconds timeout_ms(int(timeout_secs * 1000)); cv_.wait_for( g, timeout_ms, [this, canRead]() { return closing_ || canRead(); }); diff --git a/caffe2/serialize/inline_container.cc b/caffe2/serialize/inline_container.cc index 71da5121f314c..d74dfb4a4a1cc 100644 --- a/caffe2/serialize/inline_container.cc +++ b/caffe2/serialize/inline_container.cc @@ -189,7 +189,6 @@ size_t getPadding( padding_buf[0] = 'F'; padding_buf[1] = 'B'; padding_buf[2] = (uint8_t)padding_size; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) padding_buf[3] = (uint8_t)(padding_size >> 8); return padding_size_plus_fbxx; } @@ -262,7 +261,6 @@ std::tuple PyTorchStreamReader::getRecord(const std::string } static int64_t read_le_16(uint8_t* buf) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return buf[0] + (buf[1] << 8); } @@ -385,7 +383,6 @@ void PyTorchStreamWriter::writeEndOfFile() { // Rewrites version info std::string version = c10::to_string(version_); version.push_back('\n'); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (version_ >= 0x6L) { writeRecord(".data/version", version.c_str(), version.size()); } else { diff --git a/caffe2/serialize/inline_container_test.cc b/caffe2/serialize/inline_container_test.cc index 9fd07d94d9b61..3a9f511ee9cf9 100644 --- a/caffe2/serialize/inline_container_test.cc +++ b/caffe2/serialize/inline_container_test.cc @@ -12,7 +12,6 @@ namespace { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(PyTorchStreamWriterAndReader, SaveAndLoad) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t kFieldAlignment = 64L; std::ostringstream oss; diff --git a/caffe2/sgd/adadelta_op.cc b/caffe2/sgd/adadelta_op.cc index 98291acce3d20..48cbeb2b3ccb6 100644 --- a/caffe2/sgd/adadelta_op.cc +++ b/caffe2/sgd/adadelta_op.cc @@ -6,7 +6,6 @@ namespace caffe2 { REGISTER_CPU_OPERATOR(Adadelta, AdadeltaOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(Adadelta) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(3) .AllowInplace({{0, 0}, {1, 1}, {2, 2}}) @@ -44,7 +43,6 @@ and returns (new_param, new_moment, new_moment_delta). REGISTER_CPU_OPERATOR(SparseAdadelta, SparseAdadeltaOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseAdadelta) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(3) .EnforceOneToOneInplace() @@ -61,7 +59,6 @@ runs the dense AdaDelta update on (param, grad, moment[indices], .Input(2, "moment_delta", "Average of squared parameter updates") .Input(3, "indices", "Sparse indices") .Input(4, "grad", "Gradient computed") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(5, "lr", "learning rate") .Output(0, "output_param", "Updated parameters") .Output(1, "output_moment", "Updated average squared gradient") diff --git a/caffe2/sgd/adagrad_fused.cc b/caffe2/sgd/adagrad_fused.cc index 78339fe790448..7fd10a67fb6c2 100644 --- a/caffe2/sgd/adagrad_fused.cc +++ b/caffe2/sgd/adagrad_fused.cc @@ -24,7 +24,6 @@ struct adagrad_update_prefetch_inlined { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseAdagradFusedWithSparseLengthsSumGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(2) .EnforceOneToOneInplace() @@ -49,7 +48,6 @@ SparseLengthsIndicesInGradientSumGradient operator. .Input(3, "grad", "Gradient computed") .Input(4, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "lengths", "Non negative vector with sum of elements equal to indices length") @@ -71,7 +69,6 @@ REGISTER_CPU_OPERATOR( // SparseAdagradFusedWithSparseLengthsSumGradient op // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseAdagradFusedWithSparseLengthsSumGradientApprox) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(2) .EnforceOneToOneInplace() @@ -96,7 +93,6 @@ SparseLengthsIndicesInGradientSumGradient operator. .Input(3, "grad", "Gradient computed") .Input(4, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "lengths", "Non negative vector with sum of elements equal to indices length") @@ -116,7 +112,6 @@ REGISTER_CPU_OPERATOR( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseAdagradFusedWithSparseLengthsMeanGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(2) .EnforceOneToOneInplace() @@ -141,7 +136,6 @@ SparseLengthsIndicesInGradientMeanGradient operator. .Input(3, "grad", "Gradient computed") .Input(4, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "lengths", "Non negative vector with sum of elements equal to indices length") @@ -163,7 +157,6 @@ REGISTER_CPU_OPERATOR( // SparseAdagradFusedWithSparseLengthsMeanGradient op // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseAdagradFusedWithSparseLengthsMeanGradientApprox) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(2) .EnforceOneToOneInplace() @@ -188,7 +181,6 @@ SparseLengthsIndicesInGradientMeanGradient operator. .Input(3, "grad", "Gradient computed") .Input(4, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "lengths", "Non negative vector with sum of elements equal to indices length") @@ -208,7 +200,6 @@ REGISTER_CPU_OPERATOR( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseAdagradFusedWithSparseLengthsWeightedSumGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(7) .NumOutputs(3) .EnforceInplace({{0, 0}, {1, 1}}) @@ -235,10 +226,8 @@ SparseLengthsIndicesInGradientWeightedSumWithMainInputGradient operator. "indices", "Integer vector containing indices of the first dimension of param for the slices that are being updated") .Input(4, "grad", "Gradient computed") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(5, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, "lengths", "Non negative vector with sum of elements equal to indices length") @@ -258,7 +247,6 @@ REGISTER_CPU_OPERATOR( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(7) .NumOutputs(3) .EnforceInplace({{0, 0}, {1, 1}}) @@ -288,10 +276,8 @@ SparseLengthsIndicesInGradientWeightedSumWithMainInputGradient operator. "indices", "Integer vector containing indices of the first dimension of param for the slices that are being updated") .Input(4, "grad", "Gradient computed") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(5, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, "lengths", "Non negative vector with sum of elements equal to indices length") diff --git a/caffe2/sgd/adagrad_op.cc b/caffe2/sgd/adagrad_op.cc index 2513bf7563b9d..4bbfffa84d5d8 100644 --- a/caffe2/sgd/adagrad_op.cc +++ b/caffe2/sgd/adagrad_op.cc @@ -20,7 +20,6 @@ static OpSchema::Cost CostInferenceForAdagrad( // +3: updading moments // +3: updating effective lr (including 1 sqrt) // +2: updating params - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c.flops = grad_size * 10; uint64_t bytes_written = @@ -103,7 +102,6 @@ static OpSchema::Cost CostInferenceForSparseAdagrad( // See adagrad_op.h (note that decay is 1 for SparseAdagrad). // 2 multiplications, 3 additions, 1 division, and 1 sqrt // (optimistically count sqrt as one flop). - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c.flops = grad_size * 7; c.bytes_written = grad_size * (sizeof(param.data_type()) + sizeof(moment.data_type())); @@ -120,7 +118,6 @@ REGISTER_CPU_OPERATOR(SparseAdagrad, SparseAdagradOp); REGISTER_CPU_OPERATOR_WITH_ENGINE(SparseAdagrad, SIMD, SparseAdagradOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseAdagrad) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(2) .EnforceOneToOneInplace() @@ -164,7 +161,6 @@ static OpSchema::Cost CostInferenceForRowWiseSparseAdagrad( // +2: applying weight decay and add to grads // +2: updading moments // +5: updating params - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c.flops = n * 9; c.bytes_written = n * (sizeof(param.data_type()) + sizeof(moment.data_type())); @@ -175,7 +171,6 @@ static OpSchema::Cost CostInferenceForRowWiseSparseAdagrad( } else { // 5 per block (not counting index transforms) // 8 for each value of a block - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c.flops = n * (5 + (block_size * 8)); c.bytes_written = n * sizeof(moment.data_type()) + n * block_size * (param.data_type()); @@ -198,7 +193,6 @@ REGISTER_CPU_OPERATOR_WITH_ENGINE( RowWiseSparseAdagradOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(RowWiseSparseAdagrad) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(2) .EnforceOneToOneInplace() diff --git a/caffe2/sgd/adam_op.cc b/caffe2/sgd/adam_op.cc index ce2b0902a582c..dd74a937c9c19 100644 --- a/caffe2/sgd/adam_op.cc +++ b/caffe2/sgd/adam_op.cc @@ -6,7 +6,6 @@ namespace caffe2 { REGISTER_CPU_OPERATOR(Adam, AdamOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(Adam) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(3, 4) .AllowInplace({{0, 0}, {1, 1}, {2, 2}}) @@ -16,7 +15,6 @@ OPERATOR_SCHEMA(Adam) vector in_dev(def.input_size(), op_device); vector out_dev(def.output_size(), op_device); // ITER input lives on CPU - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) in_dev[5] = DeviceOption(); return std::make_pair(in_dev, out_dev); }) @@ -43,7 +41,6 @@ and returns (param_o, m1_o, m2_o, grad_o), in which grad_o is an optional output .Input(2, "moment_2", "Second moment history") .Input(3, "grad", "Gradient computed") .Input(4, "lr", "learning rate") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(5, "iter", "iteration number") .Output(0, "output_param", "Updated parameters") .Output(1, "output_moment_1", "Updated first moment") @@ -57,7 +54,6 @@ and returns (param_o, m1_o, m2_o, grad_o), in which grad_o is an optional output REGISTER_CPU_OPERATOR(SparseAdam, SparseAdamOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseAdam) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(7) .NumOutputs(3, 4) .EnforceInplace({{0, 0}, {1, 1}, {2, 2}}) @@ -67,7 +63,6 @@ OPERATOR_SCHEMA(SparseAdam) vector in_dev(def.input_size(), op_device); vector out_dev(def.output_size(), op_device); // ITER input lives on CPU - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) in_dev[6] = DeviceOption(); return std::make_pair(in_dev, out_dev); }) @@ -85,9 +80,7 @@ OPERATOR_SCHEMA(SparseAdam) .Input(2, "moment_2", "Second moment history") .Input(3, "indices", "Sparse indices") .Input(4, "grad", "Gradient computed") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(5, "lr", "learning rate") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(6, "iter", "iteration number") .Output(0, "output_param", "Updated parameters") .Output(1, "output_moment_1", "Updated first moment") @@ -104,7 +97,6 @@ REGISTER_CPU_OPERATOR( RowWiseSparseAdamOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(RowWiseSparseAdam) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(7) .NumOutputs(3, 4) .EnforceInplace({{0, 0}, {1, 1}, {2, 2}}) @@ -114,7 +106,6 @@ OPERATOR_SCHEMA(RowWiseSparseAdam) vector in_dev(def.input_size(), op_device); vector out_dev(def.output_size(), op_device); // ITER input lives on CPU - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) in_dev[6] = DeviceOption(); return std::make_pair(in_dev, out_dev); }) @@ -135,9 +126,7 @@ OPERATOR_SCHEMA(RowWiseSparseAdam) .Input(2, "moment_2", "Second moment history") .Input(3, "indices", "Sparse indices") .Input(4, "grad", "Gradient computed") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(5, "lr", "learning rate") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(6, "iter", "iteration number") .Output(0, "output_param", "Updated parameters") .Output(1, "output_moment_1", "Updated first moment") diff --git a/caffe2/sgd/decay_adagrad_op.cc b/caffe2/sgd/decay_adagrad_op.cc index f73cb7f1ca700..8a77525c45851 100644 --- a/caffe2/sgd/decay_adagrad_op.cc +++ b/caffe2/sgd/decay_adagrad_op.cc @@ -6,7 +6,6 @@ namespace caffe2 { REGISTER_CPU_OPERATOR(DecayAdagrad, DecayAdagradOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(DecayAdagrad) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(3) .AllowInplace({{0, 0}, {1, 1}, {2, 2}}) @@ -16,7 +15,6 @@ OPERATOR_SCHEMA(DecayAdagrad) vector in_dev(def.input_size(), op_device); vector out_dev(def.output_size(), op_device); // ITER input lives on CPU - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) in_dev[5] = DeviceOption(); return std::make_pair(in_dev, out_dev); }) @@ -41,7 +39,6 @@ and returns (param_o, m1_o, m2_o) .Input(2, "moment_2", "Second moment history") .Input(3, "grad", "Gradient computed") .Input(4, "lr", "learning rate") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(5, "iter", "iteration number") .Output(0, "output_param", "Updated parameters") .Output(1, "output_moment_1", "Updated first moment") diff --git a/caffe2/sgd/ftrl_op.cc b/caffe2/sgd/ftrl_op.cc index 8b1213e5e3d4d..8a9053c409e17 100644 --- a/caffe2/sgd/ftrl_op.cc +++ b/caffe2/sgd/ftrl_op.cc @@ -143,7 +143,6 @@ SHOULD_NOT_DO_GRADIENT(Ftrl); REGISTER_CPU_OPERATOR(SparseFtrl, SparseFtrlOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseFtrl) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(4, 5) .NumOutputs(2) .EnforceInplace({{0, 0}, {1, 1}}); diff --git a/caffe2/sgd/lars_op.cc b/caffe2/sgd/lars_op.cc index 4dadd71711972..d4cae4b8121c8 100644 --- a/caffe2/sgd/lars_op.cc +++ b/caffe2/sgd/lars_op.cc @@ -25,7 +25,6 @@ REGISTER_CPU_OPERATOR(Lars, LarsOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(Lars) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(1) .SetDoc(R"DOC( diff --git a/caffe2/sgd/momentum_sgd_op.cc b/caffe2/sgd/momentum_sgd_op.cc index f53baf4611906..b45f48eacc4a8 100644 --- a/caffe2/sgd/momentum_sgd_op.cc +++ b/caffe2/sgd/momentum_sgd_op.cc @@ -84,7 +84,6 @@ REGISTER_CPU_OPERATOR( SparseMomentumSGDUpdateOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseMomentumSGDUpdate) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(3) .AllowInplace({{0, 0}}) diff --git a/caffe2/sgd/rowwise_adagrad_fused.cc b/caffe2/sgd/rowwise_adagrad_fused.cc index 9f9733a7f8761..655eaac357114 100644 --- a/caffe2/sgd/rowwise_adagrad_fused.cc +++ b/caffe2/sgd/rowwise_adagrad_fused.cc @@ -4,7 +4,6 @@ namespace caffe2 { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(RowWiseSparseAdagradFusedWithSparseLengthsSumGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(2) .EnforceOneToOneInplace() @@ -29,7 +28,6 @@ SparseLengthsSumGradient operator. .Input(3, "grad", "Gradient computed") .Input(4, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "lengths", "Non negative vector with sum of elements equal to indices length") @@ -65,7 +63,6 @@ REGISTER_CPU_OPERATOR_WITH_ENGINE( // RowWiseSparseAdagradFusedWithSparseLengthsSumGradient op // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(RowWiseSparseAdagradFusedWithSparseLengthsSumGradientApprox) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(2) .EnforceOneToOneInplace() @@ -90,7 +87,6 @@ SparseLengthsSumGradient operator. .Input(3, "grad", "Gradient computed") .Input(4, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "lengths", "Non negative vector with sum of elements equal to indices length") @@ -124,7 +120,6 @@ REGISTER_CPU_OPERATOR_WITH_ENGINE( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(RowWiseSparseAdagradFusedWithSparseLengthsMeanGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(2) .EnforceOneToOneInplace() @@ -149,7 +144,6 @@ SparseLengthsMeanGradient operator. .Input(3, "grad", "Gradient computed") .Input(4, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "lengths", "Non negative vector with sum of elements equal to indices length") @@ -182,7 +176,6 @@ REGISTER_CPU_OPERATOR_WITH_ENGINE( // RowWiseSparseAdagradFusedWithSparseLengthsMeanGradient op // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(RowWiseSparseAdagradFusedWithSparseLengthsMeanGradientApprox) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(2) .EnforceOneToOneInplace() @@ -207,7 +200,6 @@ SparseLengthsMeanGradient operator. .Input(3, "grad", "Gradient computed") .Input(4, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, "lengths", "Non negative vector with sum of elements equal to indices length") @@ -241,7 +233,6 @@ REGISTER_CPU_OPERATOR_WITH_ENGINE( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(7) .NumOutputs(3) .EnforceInplace({{0, 0}, {1, 1}}) @@ -268,10 +259,8 @@ operator. "indices", "Integer vector containing indices of the first dimension of param for the slices that are being updated") .Input(4, "grad", "Gradient computed") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(5, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, "lengths", "Non negative vector with sum of elements equal to indices length") @@ -302,7 +291,6 @@ REGISTER_CPU_OPERATOR_WITH_ENGINE( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA( RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(7) .NumOutputs(3) .EnforceInplace({{0, 0}, {1, 1}}) @@ -332,10 +320,8 @@ operator. "indices", "Integer vector containing indices of the first dimension of param for the slices that are being updated") .Input(4, "grad", "Gradient computed") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(5, "lr", "learning rate") .Input( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, "lengths", "Non negative vector with sum of elements equal to indices length") diff --git a/caffe2/sgd/storm_op.cc b/caffe2/sgd/storm_op.cc index 5a47dbafd6078..dcebba3311fa1 100644 --- a/caffe2/sgd/storm_op.cc +++ b/caffe2/sgd/storm_op.cc @@ -6,7 +6,6 @@ namespace caffe2 { REGISTER_CPU_OPERATOR(Storm, StormOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(Storm) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(3) .AllowInplace({{0, 0}, {1, 1}, {2, 2}}) @@ -46,7 +45,6 @@ for new_moment by using the gradient from the current iteration. REGISTER_CPU_OPERATOR(SparseStorm, SparseStormOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseStorm) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(6) .NumOutputs(3) .EnforceOneToOneInplace() @@ -63,7 +61,6 @@ as in the dense case. .Input(2, "grad_sq_sum", "Sum of observed squared gradients.") .Input(3, "grad", "Gradients computed.") .Input(4, "indices", "Sparse indices.") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(5, "lr", "Learning rate, k in the original paper.") .Output(0, "output_param", "Updated parameters.") .Output(1, "output_moment", "Updated moment.") diff --git a/caffe2/sgd/wngrad_op.cc b/caffe2/sgd/wngrad_op.cc index f5fd90537b5be..c3638ab0f4319 100644 --- a/caffe2/sgd/wngrad_op.cc +++ b/caffe2/sgd/wngrad_op.cc @@ -41,7 +41,6 @@ Optionally returns effective_lr and update as well. REGISTER_CPU_OPERATOR(SparseWngrad, SparseWngradOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SparseWngrad) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(2) .EnforceOneToOneInplace() diff --git a/caffe2/sgd/yellowfin_op.cc b/caffe2/sgd/yellowfin_op.cc index 7be299c2dfa01..490a53d95a8e3 100644 --- a/caffe2/sgd/yellowfin_op.cc +++ b/caffe2/sgd/yellowfin_op.cc @@ -6,12 +6,9 @@ namespace caffe2 { REGISTER_CPU_OPERATOR(YellowFin, YellowFinOp); // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(YellowFin) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(10) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumOutputs(8) .AllowInplace( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}, {6, 6}, {7, 7}}) .SetDoc(R"DOC( @@ -30,26 +27,18 @@ nesterov and zero_debias for debias of moving average. .Input(2, "lr", "Learning rate") .Input(3, "mu", "Momentum coefficient") .Input(4, "curv_win", "Memory for latest curvature ranges") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(5, "g_avg", "Moving average of gradient") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(6, "g2_avg", "Moving average of squared gradient") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(7, "scalars_memory", "Memory for stateful scalars") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(8, "grad", "Gradient computed") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Input(9, "iter", "Iteration number") .Output(0, "output_param", "Parameters to be updated") .Output(1, "output_moment", "Momentum") .Output(2, "output_lr", "Output learning rate") .Output(3, "output_mu", "Output momentum coefficient") .Output(4, "output_curv_win", "Output memory for latest curvature ranges") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Output(5, "output_g_avg", "Output moving average of gradient") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Output(6, "output_g2_avg", "Output moving average of squared gradient") - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .Output(7, "output_scalars_memory", "Output memory for stateful scalars") .Arg("beta", "Default 0.999") .Arg("curv_win_width", "Default 20") diff --git a/caffe2/share/contrib/depthwise/depthwise3x3_conv_op.cc b/caffe2/share/contrib/depthwise/depthwise3x3_conv_op.cc index caa9b5e83ceb2..2f32c70ea5f30 100644 --- a/caffe2/share/contrib/depthwise/depthwise3x3_conv_op.cc +++ b/caffe2/share/contrib/depthwise/depthwise3x3_conv_op.cc @@ -532,15 +532,10 @@ class Depthwise3x3ConvOp final : public ConvPoolOpBase { size_t(stride_w()), size_t(pad_t()), gmacs, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t.Seconds() * 1E3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0 * 1E3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0 * 1E3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0 * 1E3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0 * 1E3, gflops); CAFFE_ENFORCE(ret > 0); diff --git a/caffe2/share/contrib/depthwise/depthwise3x3_conv_op_test.cc b/caffe2/share/contrib/depthwise/depthwise3x3_conv_op_test.cc index ab7d80decd3a1..86e679b54a198 100644 --- a/caffe2/share/contrib/depthwise/depthwise3x3_conv_op_test.cc +++ b/caffe2/share/contrib/depthwise/depthwise3x3_conv_op_test.cc @@ -23,17 +23,14 @@ void AddNoiseInput( tensor->Resize(shape); math::RandGaussian( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor->size(), 0.0f, 3.0f, tensor->mutable_data(), &context); for (auto i = 0; i < tensor->size(); ++i) { tensor->mutable_data()[i] = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::min(-5.0f, std::max(5.0f, tensor->mutable_data()[i])); } } inline float relativeError(float a, float b) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return std::abs(a - b) / (0.5f * (std::abs(a) + std::abs(b))); } @@ -110,7 +107,6 @@ void compare( unique_ptr referenceOp(CreateOperator(referenceOpDef, &ws)); EXPECT_NE(nullptr, referenceOp.get()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 10; ++i) { EXPECT_TRUE(depthwiseOp->Run()); } @@ -118,7 +114,6 @@ void compare( EXPECT_NE(nullptr, depthwiseOutputBlob); auto& depthwiseOutput = depthwiseOutputBlob->Get(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 10; ++i) { EXPECT_TRUE(referenceOp->Run()); } @@ -173,14 +168,10 @@ void runConv( int strideH, int strideW, int group = 1, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int planesIn = randInt(1, 6), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int planesOut = randInt(1, 6), int n = randInt(1, 2)) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int h = randInt(20, 100); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int w = randInt(20, 100); // This pad restriction is imposed by NNPACK int padT = std::min(randInt(0, 3), kernelH - 1); @@ -203,9 +194,7 @@ void runConv( padB, padR, group, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.05f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.1f); } diff --git a/caffe2/share/contrib/nnpack/conv_op.cc b/caffe2/share/contrib/nnpack/conv_op.cc index ce5f95dd7a9bb..271f5a2c94778 100644 --- a/caffe2/share/contrib/nnpack/conv_op.cc +++ b/caffe2/share/contrib/nnpack/conv_op.cc @@ -421,15 +421,10 @@ bool NNPACKConvOp::RunOnDeviceWithOrderNCHW() { size_t(output_subsample.width), size_t(padding.top), gmacs, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) profile.total * 1E3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) profile.input_transform * 1E3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) profile.kernel_transform * 1E3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) profile.block_multiplication * 1E3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) profile.output_transform * 1E3, gflops); CAFFE_ENFORCE(ret > 0); diff --git a/caffe2/share/contrib/nnpack/nnpack_test.cc b/caffe2/share/contrib/nnpack/nnpack_test.cc index 665d7dac0e533..317928447e975 100644 --- a/caffe2/share/contrib/nnpack/nnpack_test.cc +++ b/caffe2/share/contrib/nnpack/nnpack_test.cc @@ -23,17 +23,14 @@ void AddNoiseInput( tensor->Resize(shape); math::RandGaussian( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor->size(), 0.0f, 3.0f, tensor->mutable_data(), &context); for (auto i = 0; i < tensor->size(); ++i) { tensor->mutable_data()[i] = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::min(-5.0f, std::max(5.0f, tensor->mutable_data()[i])); } } inline float relativeError(float a, float b) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return std::abs(a - b) / (0.5f * (std::abs(a) + std::abs(b))); } @@ -134,7 +131,6 @@ void compare( EXPECT_NE(nullptr, activationOp.get()); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 10; ++i) { EXPECT_TRUE(nnpackOp->Run()); } @@ -142,7 +138,6 @@ void compare( EXPECT_NE(nullptr, nnpackOutputBlob); auto& nnpackOutput = nnpackOutputBlob->Get(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 10; ++i) { EXPECT_TRUE(referenceOp->Run()); if (activationOp) { @@ -204,16 +199,12 @@ void runConv( int strideW, int group = 1, std::string algo = "", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int planesIn = randInt(1, 6), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int planesOut = randInt(1, 6), int n = randInt(1, 2), std::string convolutionTransformStrategy = "COMPUTE", std::string activation = "identity") { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int h = randInt(20, 100); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int w = randInt(20, 100); // This pad restriction is imposed by NNPACK int padT = std::min(randInt(0, 3), kernelH - 1); @@ -239,9 +230,7 @@ void runConv( algo, convolutionTransformStrategy, activation, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.05f, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.1f); } @@ -267,9 +256,7 @@ TEST(NNPACK, Conv_3x3s1_precompute) { 1, group, "WINOGRAD", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) group * randInt(1, 8), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) group * randInt(1, 8), 1, "PRECOMPUTE"); @@ -294,9 +281,7 @@ TEST(NNPACK, Conv_3x3s1_FP16_precompute) { 1, group, "WINOGRAD_FP16", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) group * randInt(1, 8), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) group * randInt(1, 8), 1, "PRECOMPUTE"); @@ -306,7 +291,6 @@ TEST(NNPACK, Conv_3x3s1_FP16_precompute) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NNPACK, Conv_NxNs1) { for (int i = 0; i < kIters; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int kernel = randInt(2, 10); runConv(kernel, kernel, 1, 1); } @@ -316,9 +300,7 @@ TEST(NNPACK, Conv_NxNs1) { TEST(NNPACK, Conv_1x1s1) { for (int i = 0; i < kIters; ++i) { auto group = randInt(1, 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inChannels = randInt(1, 8) * group; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outChannels = randInt(1, 8) * group; auto n = 1; runConv(1, 1, 1, 1, group, "DIRECT", inChannels, outChannels, n); @@ -329,9 +311,7 @@ TEST(NNPACK, Conv_1x1s1) { TEST(NNPACK, ConvRelu_1x1s1) { for (int i = 0; i < kIters; ++i) { auto group = randInt(1, 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inChannels = randInt(1, 8) * group; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outChannels = randInt(1, 8) * group; auto n = 1; runConv( @@ -353,9 +333,7 @@ TEST(NNPACK, ConvRelu_1x1s1) { TEST(NNPACK, Conv_1x1s1_precompute) { for (int i = 0; i < kIters; ++i) { auto group = randInt(1, 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inChannels = randInt(1, 8) * group; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outChannels = randInt(1, 8) * group; auto n = 1; runConv( @@ -367,11 +345,8 @@ TEST(NNPACK, Conv_1x1s1_precompute) { TEST(NNPACK, Conv_NxNs_grouped) { for (int i = 0; i < kIters; ++i) { int group = randInt(2, 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int iC = randInt(1, 6) * group; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int oC = randInt(1, 6) * group; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int kernel = randInt(2, 10); int n = randInt(1, 2); runConv(kernel, kernel, 1, 1, group, "", iC, oC, n); @@ -382,11 +357,8 @@ TEST(NNPACK, Conv_NxNs_grouped) { TEST(NNPACK, Conv_NxNs_grouped_precompute) { for (int i = 0; i < kIters; ++i) { int group = randInt(2, 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int iC = randInt(1, 6) * group; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int oC = randInt(1, 6) * group; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int kernel = randInt(2, 10); int n = randInt(1, 2); runConv(kernel, kernel, 1, 1, group, "", iC, oC, n, "PRECOMPUTE"); @@ -396,7 +368,6 @@ TEST(NNPACK, Conv_NxNs_grouped_precompute) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NNPACK, Conv_NxNsW) { for (int i = 0; i < 3; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int kernel = randInt(3, 5); int stride = randInt(1, kernel - 1); runConv(kernel, kernel, stride, stride); @@ -406,7 +377,6 @@ TEST(NNPACK, Conv_NxNsW) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NNPACK, ConvRelu_NxNsW) { for (int i = 0; i < 3; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int kernel = randInt(3, 5); int stride = randInt(1, kernel - 1); runConv(kernel, kernel, stride, stride, 1, "", 1, 1, 1, "COMPUTE", "Relu"); @@ -416,9 +386,7 @@ TEST(NNPACK, ConvRelu_NxNsW) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NNPACK, Conv_HxWsHxW) { for (int i = 0; i < 3; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int kernelH = randInt(2, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int kernelW = randInt(2, 5); int strideH = randInt(1, kernelH - 1); int strideW = randInt(1, kernelW - 1); diff --git a/caffe2/transforms/pattern_net_transform_test.cc b/caffe2/transforms/pattern_net_transform_test.cc index 6fef8ee49376d..d1c73082c78db 100644 --- a/caffe2/transforms/pattern_net_transform_test.cc +++ b/caffe2/transforms/pattern_net_transform_test.cc @@ -92,7 +92,6 @@ TEST(PatternNetTransformTest, TestGenerateTransform) { for (int i = 0; i < 4; i++) { EXPECT_FALSE(g.is_node_active(i)); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 4; i < 8; i++) { EXPECT_TRUE(g.is_node_active(i)); } @@ -101,7 +100,6 @@ TEST(PatternNetTransformTest, TestGenerateTransform) { EXPECT_TRUE(g.node(5).children.count(6)); EXPECT_TRUE(g.node(6).children.count(7)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 4; i < 8; i++) { EXPECT_EQ(g.node(i).op.input().size(), 1); EXPECT_EQ(g.node(i).op.output().size(), 1); @@ -129,7 +127,6 @@ TEST(PatternNetTransformTest, TestRepeatedTransform) { NetDef netdef; AddOp(&netdef, "DummyCounterOp1", {"in"}, {"out"}); AddOp(&netdef, "DummyCounterOp2", {"out"}, {"out"}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 99; i++) { AddOp(&netdef, "DummyCounterOp1", {"out"}, {"out"}); AddOp(&netdef, "DummyCounterOp2", {"out"}, {"out"}); @@ -155,7 +152,6 @@ TEST(PatternNetTransformTest, TestRepeatedTransform) { NetDef replaced_netdef = g.GetNetDef(); EXPECT_EQ(replaced_netdef.op_size(), 200); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 200; i++) { EXPECT_EQ(replaced_netdef.op(i).type(), "DummyCounterOp3"); } @@ -350,13 +346,11 @@ TEST(PatternNetTransformTest, TestSingularArgumentMatching) { { auto arg = op->add_arg(); arg->set_name("stride_w"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) arg->set_i(5); } { auto arg = op->add_arg(); arg->set_name("stride_h"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) arg->set_i(5); } diff --git a/caffe2/utils/bench_utils.cc b/caffe2/utils/bench_utils.cc index 3b285a80b3bee..27cd621de4971 100644 --- a/caffe2/utils/bench_utils.cc +++ b/caffe2/utils/bench_utils.cc @@ -82,7 +82,6 @@ uint32_t wipe_cache() { CAFFE_ENFORCE(wipe_buffer != nullptr); } uint32_t hash = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (uint32_t i = 0; i * sizeof(uint32_t) < wipe_size; i += 8) { // NOLINTNEXTLINE(clang-analyzer-core.uninitialized.Assign) hash ^= wipe_buffer[i]; diff --git a/caffe2/utils/cpuid.cc b/caffe2/utils/cpuid.cc index fc0ecd7900b32..3491af746e08b 100644 --- a/caffe2/utils/cpuid.cc +++ b/caffe2/utils/cpuid.cc @@ -73,13 +73,11 @@ CpuId::CpuId() { uint32_t f1a; __asm__("cpuid" : "=a"(f1a), "=c"(f1c_), "=d"(f1d_) : "a"(1) : "ebx"); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (n >= 7) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) uint32_t f7a; __asm__("cpuid" : "=a"(f7a), "=b"(f7b_), "=c"(f7c_) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) : "a"(7), "c"(0) : "edx"); } diff --git a/caffe2/utils/fixed_divisor_test.cc b/caffe2/utils/fixed_divisor_test.cc index eb14e73c18cc5..3555c4c5ab0e3 100644 --- a/caffe2/utils/fixed_divisor_test.cc +++ b/caffe2/utils/fixed_divisor_test.cc @@ -49,11 +49,8 @@ TEST(FixedDivisorTest, FixedDivisorInt32Test) { std::uniform_int_distribution v_dist(0, kMax); std::uniform_int_distribution q_dist(1, kMax); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::uniform_int_distribution v_small_dist(0, 1000); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::uniform_int_distribution q_small_dist(1, 1000); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 10000; ++i) { auto q = q_dist(rd); auto v = v_dist(rd); diff --git a/caffe2/utils/math_cpu.cc b/caffe2/utils/math_cpu.cc index 3c06ed4fc3b77..62c570f5bbe12 100644 --- a/caffe2/utils/math_cpu.cc +++ b/caffe2/utils/math_cpu.cc @@ -2042,7 +2042,6 @@ C10_EXPORT void Im2ColNd( pad[2], pad[3], pad[4], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pad[5], stride[0], stride[1], @@ -2383,7 +2382,6 @@ C10_EXPORT void Im2ColNd( pad[2], pad[3], pad[4], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pad[5], stride[0], stride[1], @@ -2672,7 +2670,6 @@ C10_EXPORT void Col2ImNd( pad[2], pad[3], pad[4], - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pad[5], stride[0], stride[1], diff --git a/caffe2/utils/math_test.cc b/caffe2/utils/math_test.cc index 254858e9430c5..07bf1faf8e8d9 100644 --- a/caffe2/utils/math_test.cc +++ b/caffe2/utils/math_test.cc @@ -19,11 +19,8 @@ namespace caffe2 { TEST(MathTest, GemmNoTransNoTrans) { DeviceOption option; CPUContext cpu_context(option); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor X(std::vector{5, 10}, CPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor W(std::vector{10, 6}, CPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor Y(std::vector{5, 6}, CPU); EXPECT_EQ(X.numel(), 50); EXPECT_EQ(W.numel(), 60); @@ -45,11 +42,8 @@ TEST(MathTest, GemmNoTransNoTrans) { math::Gemm( CblasNoTrans, CblasNoTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kOne, X.data(), @@ -65,11 +59,8 @@ TEST(MathTest, GemmNoTransNoTrans) { math::Gemm( CblasNoTrans, CblasNoTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kOne, X.data(), @@ -85,11 +76,8 @@ TEST(MathTest, GemmNoTransNoTrans) { math::Gemm( CblasNoTrans, CblasNoTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kPointFive, X.data(), @@ -107,11 +95,8 @@ TEST(MathTest, GemmNoTransNoTrans) { TEST(MathTest, GemmNoTransTrans) { DeviceOption option; CPUContext cpu_context(option); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor X(std::vector{5, 10}, CPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor W(std::vector{6, 10}, CPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor Y(std::vector{5, 6}, CPU); EXPECT_EQ(X.numel(), 50); EXPECT_EQ(W.numel(), 60); @@ -133,11 +118,8 @@ TEST(MathTest, GemmNoTransTrans) { math::Gemm( CblasNoTrans, CblasTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kOne, X.data(), @@ -153,11 +135,8 @@ TEST(MathTest, GemmNoTransTrans) { math::Gemm( CblasNoTrans, CblasTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kOne, X.data(), @@ -172,11 +151,8 @@ TEST(MathTest, GemmNoTransTrans) { math::Gemm( CblasNoTrans, CblasTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kPointFive, X.data(), @@ -201,13 +177,10 @@ class GemmBatchedTest void SetUp() override { cpu_context_ = make_unique(option_); ReinitializeTensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) &X_, std::vector{3, 5, 10}, at::dtype().device(CPU)); ReinitializeTensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) &W_, std::vector{3, 6, 10}, at::dtype().device(CPU)); ReinitializeTensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) &Y_, std::vector{3, 5, 6}, at::dtype().device(CPU)); math::Set( X_.numel(), 1, X_.mutable_data(), cpu_context_.get()); @@ -234,11 +207,8 @@ class GemmBatchedTest trans_X_ ? CblasTrans : CblasNoTrans, trans_W_ ? CblasTrans : CblasNoTrans, 3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, alpha, X_array.data(), @@ -259,11 +229,8 @@ class GemmBatchedTest trans_X_ ? CblasTrans : CblasNoTrans, trans_W_ ? CblasTrans : CblasNoTrans, 3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, alpha, X_data, @@ -301,30 +268,20 @@ class GemmBatchedTest // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_P(GemmBatchedTest, GemmBatchedFloatTest) { RunGemmBatched(1.0f, 0.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VerifyOutput(10.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) RunGemmBatched(1.0f, 0.5f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VerifyOutput(15.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) RunGemmBatched(0.5f, 1.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VerifyOutput(20.0f); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_P(GemmBatchedTest, GemmStridedBatchedFloatTest) { RunGemmStridedBatched(1.0f, 0.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VerifyOutput(10.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) RunGemmStridedBatched(1.0f, 0.5f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VerifyOutput(15.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) RunGemmStridedBatched(0.5f, 1.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) VerifyOutput(20.0f); } @@ -340,11 +297,8 @@ INSTANTIATE_TEST_CASE_P( TEST(MathTest, GemvNoTrans) { DeviceOption option; CPUContext cpu_context(option); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor A(std::vector{5, 10}, CPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor X(std::vector{10}, CPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor Y(std::vector{5}, CPU); EXPECT_EQ(A.numel(), 50); EXPECT_EQ(X.numel(), 10); @@ -365,9 +319,7 @@ TEST(MathTest, GemvNoTrans) { const float kZero = 0.0; math::Gemv( CblasNoTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kOne, A.data(), @@ -381,9 +333,7 @@ TEST(MathTest, GemvNoTrans) { // Test Accumulate math::Gemv( CblasNoTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kOne, A.data(), @@ -397,9 +347,7 @@ TEST(MathTest, GemvNoTrans) { // Test Accumulate math::Gemv( CblasNoTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kPointFive, A.data(), @@ -416,11 +364,8 @@ TEST(MathTest, GemvNoTrans) { TEST(MathTest, GemvTrans) { DeviceOption option; CPUContext cpu_context(option); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor A(std::vector{6, 10}, CPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor X(std::vector{6}, CPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor Y(std::vector{10}, CPU); EXPECT_EQ(A.numel(), 60); EXPECT_EQ(X.numel(), 6); @@ -441,9 +386,7 @@ TEST(MathTest, GemvTrans) { const float kZero = 0.0; math::Gemv( CblasTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kOne, A.data(), @@ -457,9 +400,7 @@ TEST(MathTest, GemvTrans) { // Test Accumulate math::Gemv( CblasTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kOne, A.data(), @@ -473,9 +414,7 @@ TEST(MathTest, GemvTrans) { // Test Accumulate math::Gemv( CblasTrans, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, kPointFive, A.data(), @@ -491,9 +430,7 @@ TEST(MathTest, GemvTrans) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MathTest, FloatToHalfConversion) { float a = 1.0f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float b = 1.75f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float c = 128.125f; float converted_a = static_cast(at::Half(a)); @@ -555,18 +492,14 @@ class BroadcastTest : public testing::Test { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(BroadcastTest, BroadcastFloatTest) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) RunBroadcastTest({2}, {2}, {1.0f, 2.0f}, {1.0f, 2.0f}); RunBroadcastTest({1}, {2}, {1.0f}, {1.0f, 1.0f}); RunBroadcastTest({1}, {2, 2}, {1.0f}, {1.0f, 1.0f, 1.0f, 1.0f}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) RunBroadcastTest({2, 1}, {2, 2}, {1.0f, 2.0f}, {1.0f, 1.0f, 2.0f, 2.0f}); RunBroadcastTest( {2, 1}, {2, 2, 2}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1.0f, 2.0f}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1.0f, 1.0f, 2.0f, 2.0f, 1.0f, 1.0f, 2.0f, 2.0f}); } @@ -583,10 +516,8 @@ class RandFixedSumTest : public testing::Test { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(RandFixedSumTest, UpperBound) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector l(20); math::RandFixedSum( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 20, 1, 1000, 1000, l.data(), cpu_context_.get()); } diff --git a/caffe2/utils/murmur_hash3.cc b/caffe2/utils/murmur_hash3.cc index 347e1f9dcef0e..28b373165c98e 100644 --- a/caffe2/utils/murmur_hash3.cc +++ b/caffe2/utils/murmur_hash3.cc @@ -45,12 +45,10 @@ #endif inline uint32_t rotl32(uint32_t x, int8_t r) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return (x << r) | (x >> (32 - r)); } inline uint64_t rotl64(uint64_t x, int8_t r) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return (x << r) | (x >> (64 - r)); } @@ -77,15 +75,10 @@ FORCE_INLINE uint64_t getblock64(const uint64_t* p, int i) { // Finalization mix - force all bits of a hash block to avalanche FORCE_INLINE uint32_t fmix32(uint32_t h) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h ^= h >> 16; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h *= 0x85ebca6b; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h ^= h >> 13; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h *= 0xc2b2ae35; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h ^= h >> 16; return h; @@ -94,13 +87,10 @@ FORCE_INLINE uint32_t fmix32(uint32_t h) { //---------- FORCE_INLINE uint64_t fmix64(uint64_t k) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k ^= k >> 33; k *= BIG_CONSTANT(0xff51afd7ed558ccd); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k ^= k >> 33; k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k ^= k >> 33; return k; @@ -131,7 +121,6 @@ void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* out) { h1 ^= k1; h1 = ROTL32(h1, 13); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h1 = h1 * 5 + 0xe6546b64; } @@ -144,11 +133,9 @@ void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* out) { switch (len & 3) { case 3: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= tail[2] << 16; FALLTHROUGH_INTENDED; case 2: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= tail[1] << 8; FALLTHROUGH_INTENDED; case 1: @@ -192,7 +179,6 @@ void MurmurHash3_x86_128( //---------- // body - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const uint32_t* blocks = (const uint32_t*)(data + nblocks * 16); for (int i = -nblocks; i; i++) { @@ -208,7 +194,6 @@ void MurmurHash3_x86_128( h1 = ROTL32(h1, 19); h1 += h2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h1 = h1 * 5 + 0x561ccd1b; k2 *= c2; @@ -218,7 +203,6 @@ void MurmurHash3_x86_128( h2 = ROTL32(h2, 17); h2 += h3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h2 = h2 * 5 + 0x0bcaa747; k3 *= c3; @@ -228,7 +212,6 @@ void MurmurHash3_x86_128( h3 = ROTL32(h3, 15); h3 += h4; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h3 = h3 * 5 + 0x96cd1c35; k4 *= c4; @@ -238,14 +221,12 @@ void MurmurHash3_x86_128( h4 = ROTL32(h4, 13); h4 += h1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h4 = h4 * 5 + 0x32ac3b17; } //---------- // tail - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const uint8_t* tail = (const uint8_t*)(data + nblocks * 16); uint32_t k1 = 0; @@ -253,21 +234,14 @@ void MurmurHash3_x86_128( uint32_t k3 = 0; uint32_t k4 = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) switch (len & 15) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 15: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k4 ^= tail[14] << 16; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 14: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k4 ^= tail[13] << 8; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 13: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k4 ^= tail[12] << 0; k4 *= c4; k4 = ROTL32(k4, 18); @@ -275,24 +249,16 @@ void MurmurHash3_x86_128( h4 ^= k4; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 12: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k3 ^= tail[11] << 24; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 11: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k3 ^= tail[10] << 16; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 10: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k3 ^= tail[9] << 8; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 9: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k3 ^= tail[8] << 0; k3 *= c3; k3 = ROTL32(k3, 17); @@ -300,22 +266,15 @@ void MurmurHash3_x86_128( h3 ^= k3; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 8: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k2 ^= tail[7] << 24; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 7: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k2 ^= tail[6] << 16; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 6: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k2 ^= tail[5] << 8; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 5: k2 ^= tail[4] << 0; k2 *= c2; @@ -325,15 +284,12 @@ void MurmurHash3_x86_128( FALLTHROUGH_INTENDED; case 4: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= tail[3] << 24; FALLTHROUGH_INTENDED; case 3: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= tail[2] << 16; FALLTHROUGH_INTENDED; case 2: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= tail[1] << 8; FALLTHROUGH_INTENDED; case 1: @@ -409,7 +365,6 @@ void MurmurHash3_x64_128( h1 = ROTL64(h1, 27); h1 += h2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h1 = h1 * 5 + 0x52dce729; k2 *= c2; @@ -419,54 +374,37 @@ void MurmurHash3_x64_128( h2 = ROTL64(h2, 31); h2 += h1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h2 = h2 * 5 + 0x38495ab5; } //---------- // tail - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const uint8_t* tail = (const uint8_t*)(data + nblocks * 16); uint64_t k1 = 0; uint64_t k2 = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) switch (len & 15) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 15: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k2 ^= ((uint64_t)tail[14]) << 48; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 14: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k2 ^= ((uint64_t)tail[13]) << 40; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 13: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k2 ^= ((uint64_t)tail[12]) << 32; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 12: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k2 ^= ((uint64_t)tail[11]) << 24; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 11: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k2 ^= ((uint64_t)tail[10]) << 16; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 10: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k2 ^= ((uint64_t)tail[9]) << 8; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 9: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k2 ^= ((uint64_t)tail[8]) << 0; k2 *= c2; k2 = ROTL64(k2, 33); @@ -474,36 +412,25 @@ void MurmurHash3_x64_128( h2 ^= k2; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 8: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= ((uint64_t)tail[7]) << 56; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 7: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= ((uint64_t)tail[6]) << 48; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 6: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= ((uint64_t)tail[5]) << 40; FALLTHROUGH_INTENDED; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 5: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= ((uint64_t)tail[4]) << 32; FALLTHROUGH_INTENDED; case 4: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= ((uint64_t)tail[3]) << 24; FALLTHROUGH_INTENDED; case 3: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= ((uint64_t)tail[2]) << 16; FALLTHROUGH_INTENDED; case 2: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k1 ^= ((uint64_t)tail[1]) << 8; FALLTHROUGH_INTENDED; case 1: diff --git a/caffe2/utils/proto_utils.cc b/caffe2/utils/proto_utils.cc index 4760ca1a2c06b..c85aa9dd5ea44 100644 --- a/caffe2/utils/proto_utils.cc +++ b/caffe2/utils/proto_utils.cc @@ -210,7 +210,6 @@ C10_EXPORT bool ParseProtoFromLargeString(const string& str, Message* proto) { ::google::protobuf::io::ArrayInputStream input_stream(str.data(), str.size()); ::google::protobuf::io::CodedInputStream coded_stream(&input_stream); // Set PlanDef message size limit to 2G. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) setTotalBytesLimit(coded_stream, 2147483647, 512LL << 20); return proto->ParseFromCodedStream(&coded_stream); } @@ -229,7 +228,6 @@ C10_EXPORT void WriteProtoToTextFile( const Message& proto, const char* filename, bool throwIfError) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644); FileOutputStream* output = new FileOutputStream(fd); if(!google::protobuf::TextFormat::Print(proto, output)) { @@ -258,7 +256,6 @@ C10_EXPORT bool ReadProtoFromBinaryFile( // A hack to manually allow using very large protocol buffers. #if GOOGLE_PROTOBUF_VERSION >= 3011000 // Only take one parameter since protobuf 3.11 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) coded_input->SetTotalBytesLimit(2147483647); #else // Total bytes hard limit / warning limit are set to 2GB and 512MB respectively. @@ -274,7 +271,6 @@ C10_EXPORT bool ReadProtoFromBinaryFile( C10_EXPORT void WriteProtoToBinaryFile( const MessageLite& proto, const char* filename) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644); CAFFE_ENFORCE_NE( fd, -1, "File cannot be created: ", filename, " error number: ", errno); diff --git a/caffe2/utils/simple_queue_test.cc b/caffe2/utils/simple_queue_test.cc index 0204d6caf1a67..b47dc6aba352f 100644 --- a/caffe2/utils/simple_queue_test.cc +++ b/caffe2/utils/simple_queue_test.cc @@ -30,7 +30,6 @@ TEST(SimpleQueueTest, SingleProducerSingleConsumer) { // NOLINTNEXTLINE(modernize-make-unique) gQueue.reset(new SimpleQueue()); std::thread consumer(ConsumerFunction, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 10; ++i) { gQueue->Push(i); } @@ -44,7 +43,6 @@ TEST(SimpleQueueTest, SingleProducerDoubleConsumer) { gQueue.reset(new SimpleQueue()); std::thread consumer0(ConsumerFunction, 0); std::thread consumer1(ConsumerFunction, 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 10; ++i) { gQueue->Push(i); } @@ -58,9 +56,7 @@ TEST(SimpleQueueTest, SingleProducerDoubleConsumer) { TEST(SimpleQueueTest, DoubleProducerDoubleConsumer) { // NOLINTNEXTLINE(modernize-make-unique) gQueue.reset(new SimpleQueue()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::thread producer0(ProducerFunction, 0, 0, 10); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::thread producer1(ProducerFunction, 0, 10, 10); std::thread consumer0(ConsumerFunction, 2); std::thread consumer1(ConsumerFunction, 3); diff --git a/caffe2/utils/threadpool/ThreadPool.cc b/caffe2/utils/threadpool/ThreadPool.cc index b9b09fb7ebd45..8d46f8c3f6ba7 100644 --- a/caffe2/utils/threadpool/ThreadPool.cc +++ b/caffe2/utils/threadpool/ThreadPool.cc @@ -56,12 +56,10 @@ size_t getDefaultNumThreads() { } break; #endif - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 5: /* 4+1 big.LITTLE */ numThreads = 4; break; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 6: /* 2+4 big.LITTLE */ numThreads = 2; @@ -71,7 +69,6 @@ size_t getDefaultNumThreads() { /* 4+4 big.LITTLE */ numThreads = 4; break; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 10: /* 4+4+2 Min.Med.Max, running on Med cores */ numThreads = 4; diff --git a/modules/detectron/select_smooth_l1_loss_op.cc b/modules/detectron/select_smooth_l1_loss_op.cc index a142d8f498548..408e6b3abe487 100644 --- a/modules/detectron/select_smooth_l1_loss_op.cc +++ b/modules/detectron/select_smooth_l1_loss_op.cc @@ -68,7 +68,6 @@ tensor that encodes bounding box regression predictions. // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SelectSmoothL1LossGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(1) .Input( diff --git a/modules/detectron/smooth_l1_loss_op.cc b/modules/detectron/smooth_l1_loss_op.cc index eccfbaf3717e6..a74baabf8f1e7 100644 --- a/modules/detectron/smooth_l1_loss_op.cc +++ b/modules/detectron/smooth_l1_loss_op.cc @@ -78,7 +78,6 @@ where N is the number of batch elements in the input predictions. // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SmoothL1LossGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(1) .Input( diff --git a/modules/detectron/softmax_focal_loss_op.cc b/modules/detectron/softmax_focal_loss_op.cc index daf5db79df04a..e257c557b4384 100644 --- a/modules/detectron/softmax_focal_loss_op.cc +++ b/modules/detectron/softmax_focal_loss_op.cc @@ -80,7 +80,6 @@ See: https://arxiv.org/abs/1708.02002 for details. // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) OPERATOR_SCHEMA(SoftmaxFocalLossGradient) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .NumInputs(5) .NumOutputs(1) .Input(0, "scores", "See SoftmaxFocalLoss.") diff --git a/modules/observers/net_observer_reporter_print.cc b/modules/observers/net_observer_reporter_print.cc index a9538b4b41ac5..dca9cbba44bf1 100644 --- a/modules/observers/net_observer_reporter_print.cc +++ b/modules/observers/net_observer_reporter_print.cc @@ -24,7 +24,6 @@ void NetObserverReporterPrint::report( if ((p.first == "NET_DELAY") && (info.size() == 1)) { // for Net_delay perf caffe2_perf.push_back({{"type", "NET"}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"value", c10::to_string(p.second.latency * 1000)}, {"unit", "us"}, {"metric", "latency"}}); @@ -34,7 +33,6 @@ void NetObserverReporterPrint::report( c10::to_string( p.second.cpuMilliseconds / p.second.latency * - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100), }, {"unit", "percent"}, @@ -45,7 +43,6 @@ void NetObserverReporterPrint::report( std::string args_str = get_op_args(p.second); std::string type = p.first; caffe2_perf.push_back({{"type", type}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"value", c10::to_string(p.second.latency * 1000)}, {"unit", "us"}, {"metric", "latency"}}); @@ -55,7 +52,6 @@ void NetObserverReporterPrint::report( c10::to_string( p.second.cpuMilliseconds / p.second.latency * - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100), }, {"unit", "percent"}, diff --git a/modules/observers/perf_observer.cc b/modules/observers/perf_observer.cc index c7bba8a7d3e8a..3bed1c0333964 100644 --- a/modules/observers/perf_observer.cc +++ b/modules/observers/perf_observer.cc @@ -132,7 +132,6 @@ double getWallClockTimeMilliseconds() { uint64_t now = mach_absolute_time(); now = now * info.numer / info.denom; // convert to nanoseconds - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return now / 1000000.0; #else return getClockTimeMilliseconds(CLOCK_MONOTONIC); @@ -169,13 +168,9 @@ double getCpuTimeMilliseconds() { return 0.0; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return ru.ru_utime.tv_sec * 1000.0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) + ru.ru_utime.tv_usec / 1000.0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) + ru.ru_stime.tv_sec * 1000.0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) + ru.ru_stime.tv_usec / 1000.0; #else return getClockTimeMilliseconds(CLOCK_PROCESS_CPUTIME_ID); diff --git a/test/cpp/api/any.cpp b/test/cpp/api/any.cpp index 7c2a3e626b437..011234f8b2336 100644 --- a/test/cpp/api/any.cpp +++ b/test/cpp/api/any.cpp @@ -15,7 +15,6 @@ struct AnyModuleTest : torch::test::SeedingFixture {}; TEST_F(AnyModuleTest, SimpleReturnType) { struct M : torch::nn::Module { int forward() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 123; } }; @@ -87,7 +86,6 @@ TEST_F(AnyModuleTest, WrongArgumentType) { }; AnyModule any(M{}); ASSERT_THROWS_WITH( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) any.forward(5.0), "Expected argument #0 to be of type float, " "but received value of type double"); @@ -123,17 +121,14 @@ TEST_F(AnyModuleTest, WrongNumberOfArguments) { } struct M_default_arg_with_macro : torch::nn::Module { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double forward(int a, int b = 2, double c = 3.0) { return a + b + c; } protected: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) FORWARD_HAS_DEFAULT_ARGS({1, torch::nn::AnyValue(2)}, {2, torch::nn::AnyValue(3.0)}) }; struct M_default_arg_without_macro : torch::nn::Module { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double forward(int a, int b = 2, double c = 3.0) { return a + b + c; } @@ -152,7 +147,6 @@ TEST_F(AnyModuleTest, PassingArgumentsToModuleWithDefaultArgumentsInForwardMetho any.forward(), "M_default_arg_with_macro's forward() method expects at least 1 argument(s) and at most 3 argument(s), but received 0."); ASSERT_THROWS_WITH( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) any.forward(1, 2, 3.0, 4), "M_default_arg_with_macro's forward() method expects at least 1 argument(s) and at most 3 argument(s), but received 4."); } @@ -183,7 +177,6 @@ TEST_F(AnyModuleTest, PassingArgumentsToModuleWithDefaultArgumentsInForwardMetho "If " + module_name + "'s forward() method has default arguments, " "please make sure the forward() method is declared with a corresponding `FORWARD_HAS_DEFAULT_ARGS` macro."); ASSERT_THROWS_WITH( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) any.forward(1, 2, 3.0, 4), module_name + "'s forward() method expects 3 argument(s), but received 4."); } @@ -200,7 +193,6 @@ struct M : torch::nn::Module { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(AnyModuleTest, GetWithCorrectTypeSucceeds) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AnyModule any(M{5}); ASSERT_EQ(any.get().value, 5); } @@ -212,14 +204,12 @@ TEST_F(AnyModuleTest, GetWithIncorrectTypeThrows) { return input; } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AnyModule any(M{5}); ASSERT_THROWS_WITH(any.get(), "Attempted to cast module"); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(AnyModuleTest, PtrWithBaseClassSucceeds) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AnyModule any(M{5}); auto ptr = any.ptr(); ASSERT_NE(ptr, nullptr); @@ -228,7 +218,6 @@ TEST_F(AnyModuleTest, PtrWithBaseClassSucceeds) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(AnyModuleTest, PtrWithGoodDowncastSuccceeds) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AnyModule any(M{5}); auto ptr = any.ptr(); ASSERT_NE(ptr, nullptr); @@ -242,7 +231,6 @@ TEST_F(AnyModuleTest, PtrWithBadDowncastThrows) { return input; } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AnyModule any(M{5}); ASSERT_THROWS_WITH(any.ptr(), "Attempted to cast module"); } @@ -259,7 +247,6 @@ TEST_F(AnyModuleTest, DefaultStateIsEmpty) { }; AnyModule any; ASSERT_TRUE(any.is_empty()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) any = std::make_shared(5); ASSERT_FALSE(any.is_empty()); ASSERT_EQ(any.get().value, 5); @@ -322,7 +309,6 @@ TEST_F(AnyModuleTest, ConstructsFromModuleHolder) { using torch::nn::ModuleHolder::get; }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AnyModule any(M{5}); ASSERT_EQ(any.get().value, 5); ASSERT_EQ(any.get()->value, 5); @@ -374,7 +360,6 @@ struct AnyValueTest : torch::test::SeedingFixture {}; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(AnyValueTest, CorrectlyAccessesIntWhenCorrectType) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto value = make_value(5); ASSERT_NE(value.try_get(), nullptr); // const and non-const types have the same typeid(), @@ -422,7 +407,6 @@ TEST_F(AnyValueTest, CorrectlyAccessesReferencesWhenCorrectType) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(AnyValueTest, TryGetReturnsNullptrForTheWrongType) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto value = make_value(5); ASSERT_NE(value.try_get(), nullptr); ASSERT_EQ(value.try_get(), nullptr); @@ -432,7 +416,6 @@ TEST_F(AnyValueTest, TryGetReturnsNullptrForTheWrongType) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(AnyValueTest, GetThrowsForTheWrongType) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto value = make_value(5); ASSERT_NE(value.try_get(), nullptr); ASSERT_THROWS_WITH( @@ -447,7 +430,6 @@ TEST_F(AnyValueTest, GetThrowsForTheWrongType) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(AnyValueTest, MoveConstructionIsAllowed) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto value = make_value(5); auto copy = make_value(std::move(value)); ASSERT_NE(copy.try_get(), nullptr); @@ -456,9 +438,7 @@ TEST_F(AnyValueTest, MoveConstructionIsAllowed) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(AnyValueTest, MoveAssignmentIsAllowed) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto value = make_value(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto copy = make_value(10); copy = std::move(value); ASSERT_NE(copy.try_get(), nullptr); @@ -467,7 +447,6 @@ TEST_F(AnyValueTest, MoveAssignmentIsAllowed) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(AnyValueTest, TypeInfoIsCorrectForInt) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto value = make_value(5); ASSERT_EQ(value.type_info().hash_code(), typeid(int).hash_code()); } diff --git a/test/cpp/api/autograd.cpp b/test/cpp/api/autograd.cpp index 3761153966748..e6f1379f5abcd 100644 --- a/test/cpp/api/autograd.cpp +++ b/test/cpp/api/autograd.cpp @@ -91,7 +91,6 @@ TEST(AutogradAPITests, GradNonLeafTest) { Variable y = torch::randn({2, 2}, torch::requires_grad()); Variable grad_output = torch::ones({2, 2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 5; ++ i) { auto res = simple_fn(x, y); auto input_grads = grad({res}, {x}, {grad_output}, {}, true); @@ -100,7 +99,6 @@ TEST(AutogradAPITests, GradNonLeafTest) { ASSERT_VARIABLE_EQ(input_grads[0], grad_x_expected); ASSERT_FALSE(x.grad().defined()); ASSERT_FALSE(y.grad().defined()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x = x + 0.05 * input_grads[0]; } @@ -196,7 +194,6 @@ TEST(AutogradAPITests, AnomalyMode) { torch::autograd::DetectAnomalyGuard detect_anomaly; { WarningCapture warnings; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({5.0}, torch::requires_grad()); auto y = x * x; auto z = y * y; @@ -209,7 +206,6 @@ TEST(AutogradAPITests, AnomalyMode) { WarningCapture warnings; // Double backward auto x = torch::tensor({0.0}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = x.pow(1.5); auto gr = // NOLINTNEXTLINE(bugprone-argument-comment) @@ -246,9 +242,7 @@ TEST(CustomAutogradTest, CustomFunction) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable x = torch::randn({5,5}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable y = torch::randn({5,5}, torch::requires_grad()); auto res = MyFunction::apply(x,2,y); auto go = torch::ones({}, torch::requires_grad()); @@ -351,7 +345,6 @@ TEST(CustomAutogradTest, NoGradCustomFunction) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({5,5}, torch::requires_grad()); { at::NoGradGuard no_grad; @@ -372,13 +365,11 @@ TEST(CustomAutogradTest, MarkDirty) { } static variable_list backward(AutogradContext *ctx, variable_list grad_output) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return { (grad_output[0]*2.0) }; } }; // Clone here because modifying leafs inplace is not allowed - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5,5}, torch::requires_grad()).clone(); auto version_before = x._version(); auto out = MyFunction::apply(x); @@ -401,7 +392,6 @@ TEST(CustomAutogradTest, MarkNonDifferentiable) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5,5}, torch::requires_grad()); auto mask = MyFunction::apply(x); ASSERT_FALSE(mask.requires_grad()); @@ -427,7 +417,6 @@ TEST(CustomAutogradTest, MarkNonDifferentiableMixed) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5,5}, torch::requires_grad()); auto out = MyFunction::apply(x); @@ -451,7 +440,6 @@ TEST(CustomAutogradTest, MarkNonDifferentiableNone) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5,5}, torch::requires_grad()); auto r = MyFunction::apply(x * x); (r * x).sum().backward(); @@ -470,9 +458,7 @@ TEST(CustomAutogradTest, ReturnLeafInplace) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable x = torch::randn({5,5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable y = torch::randn({5,5}, torch::requires_grad()); auto out = Inplace::apply(x,y); @@ -497,7 +483,6 @@ TEST(CustomAutogradTest, ReturnDuplicateInplace) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5,5}, torch::requires_grad()); ASSERT_THROWS_WITH(DoubleInplace::apply(x), "leaf Variable that requires grad"); @@ -520,7 +505,6 @@ TEST(CustomAutogradTest, ReturnDuplicate) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5,5}, torch::requires_grad()); auto out = DoubleDuplicate::apply(x); ASSERT_TRUE(torch::equal(out[0],out[1])); @@ -542,7 +526,6 @@ TEST(CustomAutogradTest, SaveEmptyForBackward) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable x = torch::randn({5,5}, torch::requires_grad()); auto y = MyFunction::apply(x); y.sum().backward(); @@ -557,16 +540,13 @@ TEST(CustomAutogradTest, InvalidGradients) { } static variable_list backward(AutogradContext *ctsx, variable_list grad_outputs) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return {torch::randn(10, torch::dtype(torch::kFloat).requires_grad(true))}; } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input1 = torch::randn({5,5}, torch::dtype(torch::kFloat).requires_grad(true)); ASSERT_THROWS_WITH( MyFunction::apply(input1).sum().backward(), "expected shape"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input2 = torch::randn(10, torch::dtype(torch::kDouble).requires_grad(true)); } @@ -582,7 +562,6 @@ TEST(CustomAutogradTest, NoGradInput) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable x = torch::randn({5,5}, torch::requires_grad()); Variable y; { @@ -632,7 +611,6 @@ TEST(CustomAutogradTest, DepNoGrad) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn(5, torch::requires_grad()); auto out = F1::apply(x); Variable &a = out[0], &b = out[1]; @@ -707,7 +685,6 @@ TEST(CustomAutogradTest, DeepReentrant) { }; // This should not stack overflow - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::tensor({8193}, torch::dtype(torch::kFloat).requires_grad(true)); DeepReenter::apply(v).sum().backward(); } @@ -749,9 +726,7 @@ TEST(CustomAutogradTest, ReentrantPriority) { } }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = MyFunction::apply(torch::tensor({6}, torch::dtype(torch::kFloat).requires_grad(true))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = Reenter::apply(torch::tensor({9}, torch::dtype(torch::kFloat).requires_grad(true))); auto v = a*b; v.backward(); @@ -768,9 +743,7 @@ TEST(CustomAutogradTest, ReentrantPriority) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(CustomAutogradTest, Hooks) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable x = torch::ones({5,5}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable y = torch::ones({5,5})*4; y.set_requires_grad(true); @@ -787,19 +760,16 @@ TEST(CustomAutogradTest, Hooks) { auto hook_1 = z.register_hook([&bw_hook](Variable grad){ bw_hook(1, grad); }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) z.backward(torch::ones({5,5}), true, true); ASSERT_EQ(counter, 1); auto hook_2 = z.register_hook([&bw_hook](Variable grad){ bw_hook(2, grad); }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) z.backward(torch::ones({5,5}), true, true); ASSERT_EQ(counter, 4); z.remove_hook(hook_2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) z.backward(torch::ones({5,5}), true, true); ASSERT_EQ(counter, 5); @@ -810,13 +780,11 @@ TEST(CustomAutogradTest, Hooks) { z.remove_hook(hook_1); z.register_hook(bw_hook_modify); y.grad().zero_(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) z.backward(torch::ones({5,5}), true, false); ASSERT_VARIABLE_EQ(y.grad(), (x+1)*2); y.register_hook(bw_hook_modify); y.grad().zero_(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) z.backward(torch::ones({5,5}), false, false); ASSERT_VARIABLE_EQ(y.grad(), (x+1)*4); @@ -842,9 +810,7 @@ TEST(CustomAutogradTest, HookNone) { was_called = true; }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5,5}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = torch::randn({5,5}); auto out = NoneGradientFunction::apply(x,y); @@ -858,15 +824,12 @@ TEST(CustomAutogradTest, HookNone) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(CustomAutogradTest, BackwardWithInputs) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable x = torch::randn({5,5}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable y = torch::randn({5,5}, torch::requires_grad()); Variable z = x * x + x * y + y * y; Variable x_grad_expected = 2 * x + y; Variable y_grad_expected = x + 2 * y; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) z.backward(torch::ones({5, 5}), false, false, {x}); ASSERT_VARIABLE_EQ(x.grad(), x_grad_expected); @@ -875,9 +838,7 @@ TEST(CustomAutogradTest, BackwardWithInputs) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(CustomAutogradTest, BackwardWithEmptyInputs) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable x = torch::randn({5,5}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable y = torch::randn({5,5}, torch::requires_grad()); Variable z = x * x + x * y + y * y; Variable x_grad_expected = 2 * x + y; @@ -887,9 +848,7 @@ TEST(CustomAutogradTest, BackwardWithEmptyInputs) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(CustomAutogradTest, BackwardWithNonLeafInputs) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable x = torch::randn({5,5}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Variable y = torch::randn({5,5}, torch::requires_grad()); Variable z = x * x; Variable w = z + x * y + y * y; diff --git a/test/cpp/api/dataloader.cpp b/test/cpp/api/dataloader.cpp index 95aadab5365c6..c57f2c4aa7d6d 100644 --- a/test/cpp/api/dataloader.cpp +++ b/test/cpp/api/dataloader.cpp @@ -26,7 +26,6 @@ using namespace torch::data; // NOLINT const std::chrono::milliseconds kMillisecond(1); struct DummyDataset : datasets::Dataset { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) explicit DummyDataset(size_t size = 100) : size_(size) {} int get(size_t index) override { @@ -44,7 +43,6 @@ struct DummyDataset : datasets::Dataset { TEST(DataTest, DatasetCallsGetCorrectly) { DummyDataset d; std::vector batch = d.get_batch({0, 1, 2, 3, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector expected = {1, 2, 3, 4, 5}; ASSERT_EQ(batch, expected); } @@ -172,7 +170,6 @@ TEST(DataTest, InfiniteStreamDataset) { auto data_loader = torch::data::make_data_loader( std::move(dataset), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::StreamSampler(/*epoch_size=*/39), kBatchSize); @@ -192,7 +189,6 @@ TEST(DataTest, InfiniteStreamDataset) { TEST(DataTest, NoSequencerIsIdentity) { using namespace torch::data::detail::sequencers; // NOLINT NoSequencer no_sequencer; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const auto value = no_sequencer.next([] { return 5; }).value(); ASSERT_EQ(value, 5); } @@ -283,7 +279,6 @@ TEST(DataTest, CollationReducesBatch) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, SequentialSamplerReturnsIndicesInOrder) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::SequentialSampler sampler(10); ASSERT_EQ(sampler.next(3).value(), std::vector({0, 1, 2})); ASSERT_EQ(sampler.next(5).value(), std::vector({3, 4, 5, 6, 7})); @@ -293,7 +288,6 @@ TEST(DataTest, SequentialSamplerReturnsIndicesInOrder) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, SequentialSamplerReturnsLessValuesForLastBatch) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::SequentialSampler sampler(5); ASSERT_EQ(sampler.next(3).value(), std::vector({0, 1, 2})); ASSERT_EQ(sampler.next(100).value(), std::vector({3, 4})); @@ -302,7 +296,6 @@ TEST(DataTest, SequentialSamplerReturnsLessValuesForLastBatch) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, SequentialSamplerResetsWell) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::SequentialSampler sampler(5); ASSERT_EQ(sampler.next(5).value(), std::vector({0, 1, 2, 3, 4})); ASSERT_FALSE(sampler.next(2).has_value()); @@ -313,11 +306,9 @@ TEST(DataTest, SequentialSamplerResetsWell) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, SequentialSamplerResetsWithNewSizeWell) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::SequentialSampler sampler(5); ASSERT_EQ(sampler.next(5).value(), std::vector({0, 1, 2, 3, 4})); ASSERT_FALSE(sampler.next(2).has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sampler.reset(7); ASSERT_EQ( sampler.next(7).value(), std::vector({0, 1, 2, 3, 4, 5, 6})); @@ -330,19 +321,16 @@ TEST(DataTest, SequentialSamplerResetsWithNewSizeWell) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, CanSaveAndLoadSequentialSampler) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::SequentialSampler a(10); ASSERT_EQ(a.index(), 0); std::stringstream stream; torch::save(a, stream); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::SequentialSampler b(10); torch::load(b, stream); ASSERT_EQ(b.index(), 0); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::SequentialSampler a(10); a.next(3); a.next(4); @@ -350,7 +338,6 @@ TEST(DataTest, CanSaveAndLoadSequentialSampler) { std::stringstream stream; torch::save(a, stream); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::SequentialSampler b(10); torch::load(b, stream); ASSERT_EQ(b.index(), 7); @@ -359,7 +346,6 @@ TEST(DataTest, CanSaveAndLoadSequentialSampler) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, RandomSamplerReturnsIndicesInCorrectRange) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::RandomSampler sampler(10); std::vector indices = sampler.next(3).value(); @@ -368,7 +354,6 @@ TEST(DataTest, RandomSamplerReturnsIndicesInCorrectRange) { ASSERT_LT(i, 10); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) indices = sampler.next(5).value(); for (auto i : indices) { ASSERT_GE(i, 0); @@ -386,7 +371,6 @@ TEST(DataTest, RandomSamplerReturnsIndicesInCorrectRange) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, RandomSamplerReturnsLessValuesForLastBatch) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::RandomSampler sampler(5); ASSERT_EQ(sampler.next(3).value().size(), 3); ASSERT_EQ(sampler.next(100).value().size(), 2); @@ -395,7 +379,6 @@ TEST(DataTest, RandomSamplerReturnsLessValuesForLastBatch) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, RandomSamplerResetsWell) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::RandomSampler sampler(5); ASSERT_EQ(sampler.next(5).value().size(), 5); ASSERT_FALSE(sampler.next(2).has_value()); @@ -406,11 +389,9 @@ TEST(DataTest, RandomSamplerResetsWell) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, RandomSamplerResetsWithNewSizeWell) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::RandomSampler sampler(5); ASSERT_EQ(sampler.next(5).value().size(), 5); ASSERT_FALSE(sampler.next(2).has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sampler.reset(7); ASSERT_EQ(sampler.next(7).value().size(), 7); ASSERT_FALSE(sampler.next(2).has_value()); @@ -422,20 +403,17 @@ TEST(DataTest, RandomSamplerResetsWithNewSizeWell) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, SavingAndLoadingRandomSamplerYieldsSameSequence) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::RandomSampler a(10); std::stringstream stream; torch::save(a, stream); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::RandomSampler b(10); torch::load(b, stream); ASSERT_EQ(a.next(10).value(), b.next(10).value()); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::RandomSampler a(10); a.next(3); ASSERT_EQ(a.index(), 3); @@ -443,12 +421,10 @@ TEST(DataTest, SavingAndLoadingRandomSamplerYieldsSameSequence) { std::stringstream stream; torch::save(a, stream); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::RandomSampler b(10); torch::load(b, stream); ASSERT_EQ(b.index(), 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b_sequence = b.next(10).value(); ASSERT_EQ(b_sequence.size(), 7); ASSERT_EQ(a.next(10).value(), b_sequence); @@ -457,7 +433,6 @@ TEST(DataTest, SavingAndLoadingRandomSamplerYieldsSameSequence) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, StreamSamplerReturnsTheBatchSizeAndThenRemainder) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::StreamSampler sampler(/*epoch_size=*/100); ASSERT_EQ(sampler.next(10).value(), 10); ASSERT_EQ(sampler.next(2).value(), 2); @@ -468,7 +443,6 @@ TEST(DataTest, StreamSamplerReturnsTheBatchSizeAndThenRemainder) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, StreamSamplerResetsWell) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::StreamSampler sampler(/*epoch_size=*/5); ASSERT_EQ(sampler.next(5).value().size(), 5); ASSERT_FALSE(sampler.next(2).has_value()); @@ -479,11 +453,9 @@ TEST(DataTest, StreamSamplerResetsWell) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, StreamSamplerResetsWithNewSizeWell) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::StreamSampler sampler(/*epoch_size=*/5); ASSERT_EQ(sampler.next(5).value().size(), 5); ASSERT_FALSE(sampler.next(2).has_value()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sampler.reset(7); ASSERT_EQ(sampler.next(7).value().size(), 7); ASSERT_FALSE(sampler.next(2).has_value()); @@ -494,7 +466,6 @@ TEST(DataTest, StreamSamplerResetsWithNewSizeWell) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, TensorDatasetConstructsFromSingleTensor) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) datasets::TensorDataset dataset(torch::eye(5)); ASSERT_TRUE( torch::tensor({0, 0, 1, 0, 0}, torch::kFloat32).allclose(dataset.get(2))); @@ -502,7 +473,6 @@ TEST(DataTest, TensorDatasetConstructsFromSingleTensor) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, TensorDatasetConstructsFromInitializerListOfTensors) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector vector = torch::eye(5).chunk(5); datasets::TensorDataset dataset(vector); ASSERT_TRUE( @@ -562,7 +532,6 @@ struct TensorStringDataset } torch::optional size() const override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 100; } }; @@ -604,14 +573,12 @@ struct DummyTensorDataset } torch::optional size() const override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 100; } }; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, NormalizeTransform) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto dataset = DummyTensorDataset().map(transforms::Normalize(0.5, 0.1)); // Works for zero (one implicit) channels @@ -630,7 +597,6 @@ TEST(DataTest, NormalizeTransform) { // Works for two channels with different moments dataset = DummyTensorDataset().map( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) transforms::Normalize({0.5, 1.5}, {0.1, 0.2})); output = dataset.get_batch(2); ASSERT_EQ(output.size(), 1); @@ -645,7 +611,6 @@ TEST(DataTest, NormalizeTransform) { << output[0].data; // Works for three channels with one moment value - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dataset = DummyTensorDataset().map(transforms::Normalize(1.5, 0.2)); output = dataset.get_batch(3); ASSERT_EQ(output.size(), 1); @@ -655,7 +620,6 @@ TEST(DataTest, NormalizeTransform) { // Works for three channels with different moments dataset = DummyTensorDataset().map( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) transforms::Normalize({0.5, 1.5, -1.5}, {0.1, 0.2, 0.2})); output = dataset.get_batch(3); ASSERT_EQ(output.size(), 1); @@ -692,7 +656,6 @@ struct UnCopyableDataset : public datasets::Dataset { } torch::optional size() const override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 100; } }; @@ -747,9 +710,7 @@ TEST(DataTest, QueuePushAndPopFromDifferentThreads) { { Queue queue; std::thread thread([&queue] { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::this_thread::sleep_for(20 * kMillisecond); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) queue.push(123); }); ASSERT_EQ(queue.pop(), 123); @@ -834,7 +795,6 @@ struct UncopyableDataset : datasets::Dataset { return 1 + index; } torch::optional size() const override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 100; } }; @@ -917,7 +877,6 @@ struct TestIndexSampler : public samplers::Sampler { TEST(DataTest, CanUseCustomTypeAsIndexType) { const int kBatchSize = 10; auto data_loader = torch::data::make_data_loader( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TestIndexDataset(23), TestIndexSampler(23), kBatchSize); size_t i = 0; @@ -931,7 +890,6 @@ TEST(DataTest, CanUseCustomTypeAsIndexType) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, DistributedRandomSamplerSingleReplicaProduceCorrectSamples) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t sample_count = 10; samplers::DistributedRandomSampler drs(sample_count); @@ -951,7 +909,6 @@ TEST(DataTest, DistributedRandomSamplerSingleReplicaProduceCorrectSamples) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, DistributedRandomSamplerMultiReplicaProduceCorrectSamples) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t sample_count = 10; size_t num_replicas = 3; @@ -983,13 +940,11 @@ TEST(DataTest, DistributedRandomSamplerMultiReplicaProduceCorrectSamples) { for (size_t batch_size = 1; batch_size <= 3; ++batch_size) { size_t local_sample_count = static_cast(std::ceil(sample_count * 1.0 / num_replicas)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector output1{0, 0, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9}; test_function(true, local_sample_count, output1, batch_size); local_sample_count = static_cast(std::floor(sample_count * 1.0 / num_replicas)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector output2{0, 1, 2, 3, 4, 5, 6, 7, 8}; test_function(false, local_sample_count, output2, batch_size); } @@ -998,19 +953,16 @@ TEST(DataTest, DistributedRandomSamplerMultiReplicaProduceCorrectSamples) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, CanSaveAndLoadDistributedRandomSampler) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::DistributedRandomSampler a(10); ASSERT_EQ(a.index(), 0); std::stringstream stream; torch::save(a, stream); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::DistributedRandomSampler b(10); torch::load(b, stream); ASSERT_EQ(b.index(), 0); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::DistributedRandomSampler a(10); a.next(3); a.next(4); @@ -1018,19 +970,16 @@ TEST(DataTest, CanSaveAndLoadDistributedRandomSampler) { std::stringstream stream; torch::save(a, stream); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::DistributedRandomSampler b(10); torch::load(b, stream); ASSERT_EQ(b.index(), 7); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::DistributedRandomSampler a(10); a.set_epoch(3); std::stringstream stream; torch::save(a, stream); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::DistributedRandomSampler b(10); torch::load(b, stream); ASSERT_EQ(b.epoch(), 3); @@ -1039,7 +988,6 @@ TEST(DataTest, CanSaveAndLoadDistributedRandomSampler) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, DistributedSequentialSamplerSingleReplicaProduceCorrectSamples) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t sample_count = 10; size_t batch_size = 3; samplers::DistributedSequentialSampler dss(sample_count); @@ -1060,7 +1008,6 @@ TEST(DataTest, DistributedSequentialSamplerSingleReplicaProduceCorrectSamples) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, DistributedSequentialSamplerMultiReplicaProduceCorrectSamples) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t sample_count = 10; size_t num_replicas = 3; @@ -1093,13 +1040,11 @@ TEST(DataTest, DistributedSequentialSamplerMultiReplicaProduceCorrectSamples) { for (size_t batch_size = 1; batch_size <= 3; ++batch_size) { size_t local_sample_count = static_cast(std::ceil(sample_count * 1.0 / num_replicas)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector output1{0, 0, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9}; test_function(true, local_sample_count, output1, batch_size); local_sample_count = static_cast(std::floor(sample_count * 1.0 / num_replicas)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector output2{0, 1, 2, 3, 4, 5, 6, 7, 8}; test_function(false, local_sample_count, output2, batch_size); } @@ -1108,19 +1053,16 @@ TEST(DataTest, DistributedSequentialSamplerMultiReplicaProduceCorrectSamples) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataTest, CanSaveAndLoadDistributedSequentialSampler) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::DistributedSequentialSampler a(10); ASSERT_EQ(a.index(), 0); std::stringstream stream; torch::save(a, stream); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::DistributedSequentialSampler b(10); torch::load(b, stream); ASSERT_EQ(b.index(), 0); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::DistributedSequentialSampler a(10); a.next(3); a.next(4); @@ -1128,7 +1070,6 @@ TEST(DataTest, CanSaveAndLoadDistributedSequentialSampler) { std::stringstream stream; torch::save(a, stream); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) samplers::DistributedSequentialSampler b(10); torch::load(b, stream); ASSERT_EQ(b.index(), 7); @@ -1149,7 +1090,6 @@ TEST(DataLoaderTest, DataLoaderOptionsDefaultAsExpected) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataLoaderTest, DataLoaderOptionsCoalesceOptionalValues) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto partial_options = DataLoaderOptions(32).workers(10); FullDataLoaderOptions full_options(partial_options); ASSERT_EQ(full_options.batch_size, 32); @@ -1185,7 +1125,6 @@ TEST( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataLoaderTest, IteratorsCompareEqualToThemselves) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto data_loader = torch::data::make_data_loader(DummyDataset(), 32); auto begin = data_loader->begin(); ASSERT_EQ(begin, begin); @@ -1195,7 +1134,6 @@ TEST(DataLoaderTest, IteratorsCompareEqualToThemselves) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataLoaderTest, ValidIteratorsCompareUnequalToEachOther) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto data_loader = torch::data::make_data_loader(DummyDataset(), 32); auto i = data_loader->begin(); auto j = data_loader->begin(); @@ -1206,7 +1144,6 @@ TEST(DataLoaderTest, ValidIteratorsCompareUnequalToEachOther) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataLoaderTest, SentinelIteratorsCompareEqualToEachOther) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto data_loader = torch::data::make_data_loader(DummyDataset(), 32); auto i = data_loader->end(); auto j = data_loader->end(); @@ -1279,7 +1216,6 @@ TEST(DataLoaderTest, CanUseIteratorAlgorithms) { return 1 + indices.front(); } torch::optional size() const override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 10; } }; @@ -1356,7 +1292,6 @@ TEST(DataLoaderTest, DereferencingSentinelIteratorThrows) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(DataLoaderTest, YieldsCorrectBatchSize) { DummyDataset dataset; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto data_loader = torch::data::make_data_loader(dataset, 25); auto iterator = data_loader->begin(); ASSERT_EQ(iterator->size(), 25); @@ -1372,7 +1307,6 @@ TEST( ReturnsLastBatchWhenSmallerThanBatchSizeWhenDropLastIsFalse) { DummyDataset dataset; auto data_loader = torch::data::make_data_loader( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dataset, DataLoaderOptions(33).drop_last(false)); auto iterator = data_loader->begin(); ASSERT_EQ(iterator->size(), 33); @@ -1388,7 +1322,6 @@ TEST( DoesNotReturnLastBatchWhenSmallerThanBatchSizeWhenDropLastIsTrue) { DummyDataset dataset; auto data_loader = torch::data::make_data_loader( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dataset, DataLoaderOptions(33).drop_last(true)); auto iterator = data_loader->begin(); ASSERT_EQ(iterator->size(), 33); @@ -1408,12 +1341,10 @@ TEST(DataLoaderTest, RespectsTimeout) { D(std::shared_ptr b) : baton(std::move(b)) {} int get(size_t index) override { std::unique_lock lock(baton->mutex); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) baton->cv.wait_for(lock, 1000 * kMillisecond); return 0; } torch::optional size() const override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 100; } std::shared_ptr baton; @@ -1422,7 +1353,6 @@ TEST(DataLoaderTest, RespectsTimeout) { auto baton = std::make_shared(); auto data_loader = torch::data::make_data_loader( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) D{baton}, DataLoaderOptions().workers(1).timeout(10 * kMillisecond)); auto start = std::chrono::system_clock::now(); @@ -1589,13 +1519,11 @@ TEST(DataLoaderTest, TestExceptionsArePropagatedFromWorkers) { throw std::invalid_argument("badness"); } torch::optional size() const override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 100; } }; auto data_loader = torch::data::make_data_loader( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) D{}, samplers::RandomSampler(100), DataLoaderOptions().workers(2)); auto iterator = data_loader->begin(); @@ -1624,7 +1552,6 @@ TEST(DataLoaderTest, StatefulDatasetWithNoWorkers) { return torch::nullopt; } torch::optional size() const override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 100; } void reset() override { @@ -1637,7 +1564,6 @@ TEST(DataLoaderTest, StatefulDatasetWithNoWorkers) { auto data_loader = torch::data::make_data_loader(D{}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 10; ++i) { const auto number_of_iterations = std::distance(data_loader->begin(), data_loader->end()); @@ -1665,7 +1591,6 @@ TEST(DataLoaderTest, StatefulDatasetWithManyWorkers) { return torch::nullopt; } torch::optional size() const override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 100; } void reset() override { @@ -1681,7 +1606,6 @@ TEST(DataLoaderTest, StatefulDatasetWithManyWorkers) { torch::data::datasets::make_shared_dataset(), DataLoaderOptions().workers(kNumberOfWorkers)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 10; ++i) { const auto number_of_iterations = std::distance(data_loader->begin(), data_loader->end()); @@ -1707,7 +1631,6 @@ TEST(DataLoaderTest, StatefulDatasetWithMap) { return torch::nullopt; } torch::optional size() const override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 100; } void reset() override { @@ -1727,7 +1650,6 @@ TEST(DataLoaderTest, StatefulDatasetWithMap) { })), DataLoaderOptions{}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 10; ++i) { const auto number_of_iterations = std::distance(data_loader->begin(), data_loader->end()); @@ -1759,7 +1681,6 @@ TEST(DataLoaderTest, StatefulDatasetWithCollate) { return torch::nullopt; } torch::optional size() const override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 100; } void reset() override { @@ -1948,7 +1869,6 @@ TEST(DataLoaderTest, ChunkDataSetGetBatchWithUnevenBatchSize) { using BatchType = datasets::ChunkDataReader::ChunkType; BatchType read_chunk(size_t chunk_index) override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchType batch_data(10, 0); return batch_data; } @@ -1986,11 +1906,9 @@ TEST(DataLoaderTest, ChunkDataSetGetBatchWithUnevenBatchSize) { ++iterator) { DummyChunkDataReader::BatchType batch = *iterator; auto batch_size = batch.size(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (batch_size == 17) { ASSERT_TRUE(batch.size() == 17 || batch.size() == 3); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (batch_size == 30) { ASSERT_TRUE(batch.size() == 20); } @@ -2239,7 +2157,6 @@ TEST(DataLoaderTest, ChunkDatasetLoad) { sampler, sampler, datasets::ChunkDatasetOptions( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) prefetch_count, batch_size, 20 /*cache size*/)); torch::load(*dataset, tempfile.name); @@ -2253,7 +2170,6 @@ TEST(DataLoaderTest, ChunkDatasetLoad) { // For the first epoch, the returned batch should be returned from the // third chunk, because the check point skipped the first two chunks. But // for the next epoch, it should start from the first batch. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int initial_value = epoch_index == 0 ? 15 : 0; for (auto iterator = data_loader->begin(); iterator != data_loader->end(); @@ -2261,7 +2177,6 @@ TEST(DataLoaderTest, ChunkDatasetLoad) { DummyChunkDataReader::BatchType batch = *iterator; std::vector expected_result; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t expected_size = (epoch_index > 0 && iteration_count == 3) ? 5 : 10; expected_result.resize(expected_size); std::iota(expected_result.begin(), expected_result.end(), initial_value); diff --git a/test/cpp/api/expanding-array.cpp b/test/cpp/api/expanding-array.cpp index 2042deddc14c0..d0c09f0cc58c0 100644 --- a/test/cpp/api/expanding-array.cpp +++ b/test/cpp/api/expanding-array.cpp @@ -12,7 +12,6 @@ struct ExpandingArrayTest : torch::test::SeedingFixture {}; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ExpandingArrayTest, CanConstructFromInitializerList) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::ExpandingArray<5> e({1, 2, 3, 4, 5}); ASSERT_EQ(e.size(), 5); for (size_t i = 0; i < e.size(); ++i) { @@ -22,7 +21,6 @@ TEST_F(ExpandingArrayTest, CanConstructFromInitializerList) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ExpandingArrayTest, CanConstructFromVector) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::ExpandingArray<5> e(std::vector{1, 2, 3, 4, 5}); ASSERT_EQ(e.size(), 5); for (size_t i = 0; i < e.size(); ++i) { @@ -32,7 +30,6 @@ TEST_F(ExpandingArrayTest, CanConstructFromVector) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ExpandingArrayTest, CanConstructFromArray) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::ExpandingArray<5> e(std::array({1, 2, 3, 4, 5})); ASSERT_EQ(e.size(), 5); for (size_t i = 0; i < e.size(); ++i) { @@ -42,7 +39,6 @@ TEST_F(ExpandingArrayTest, CanConstructFromArray) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ExpandingArrayTest, CanConstructFromSingleValue) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::ExpandingArray<5> e(5); ASSERT_EQ(e.size(), 5); for (size_t i = 0; i < e.size(); ++i) { diff --git a/test/cpp/api/fft.cpp b/test/cpp/api/fft.cpp index 92e8da19aa0f5..eb024b135e054 100644 --- a/test/cpp/api/fft.cpp +++ b/test/cpp/api/fft.cpp @@ -34,7 +34,6 @@ torch::Tensor naive_dft(torch::Tensor x, bool forward=true) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(FFTTest, fft) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = torch::randn(128, torch::kComplexDouble); auto actual = torch::fft::fft(t); auto expect = naive_dft(t); @@ -43,7 +42,6 @@ TEST(FFTTest, fft) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(FFTTest, fft_real) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = torch::randn(128, torch::kDouble); auto actual = torch::fft::fft(t); auto expect = torch::fft::fft(t.to(torch::kComplexDouble)); @@ -52,24 +50,18 @@ TEST(FFTTest, fft_real) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(FFTTest, fft_pad) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = torch::randn(128, torch::kComplexDouble); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto actual = torch::fft::fft(t, 200); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expect = torch::fft::fft(torch::constant_pad_nd(t, {0, 72})); ASSERT_TRUE(torch::allclose(actual, expect)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) actual = torch::fft::fft(t, 64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expect = torch::fft::fft(torch::constant_pad_nd(t, {0, -64})); ASSERT_TRUE(torch::allclose(actual, expect)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(FFTTest, fft_norm) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = torch::randn(128, torch::kComplexDouble); // NOLINTNEXTLINE(bugprone-argument-comment) auto unnorm = torch::fft::fft(t, /*n=*/{}, /*axis=*/-1, /*norm=*/{}); @@ -84,17 +76,14 @@ TEST(FFTTest, fft_norm) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(FFTTest, ifft) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto T = torch::randn(128, torch::kComplexDouble); auto actual = torch::fft::ifft(T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expect = naive_dft(T, /*forward=*/false) / 128; ASSERT_TRUE(torch::allclose(actual, expect)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(FFTTest, fft_ifft) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = torch::randn(77, torch::kComplexDouble); auto T = torch::fft::fft(t); ASSERT_EQ(T.size(0), 77); @@ -108,17 +97,14 @@ TEST(FFTTest, fft_ifft) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(FFTTest, rfft) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = torch::randn(129, torch::kDouble); auto actual = torch::fft::rfft(t); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expect = torch::fft::fft(t.to(torch::kComplexDouble)).slice(0, 0, 65); ASSERT_TRUE(torch::allclose(actual, expect)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(FFTTest, rfft_irfft) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = torch::randn(128, torch::kDouble); auto T = torch::fft::rfft(t); ASSERT_EQ(T.size(0), 65); @@ -132,21 +118,16 @@ TEST(FFTTest, rfft_irfft) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(FFTTest, ihfft) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto T = torch::randn(129, torch::kDouble); auto actual = torch::fft::ihfft(T); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expect = torch::fft::ifft(T.to(torch::kComplexDouble)).slice(0, 0, 65); ASSERT_TRUE(torch::allclose(actual, expect)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(FFTTest, hfft_ihfft) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = torch::randn(64, torch::kComplexDouble); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t[0] = .5; // Must be purely real to satisfy hermitian symmetry - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto T = torch::fft::hfft(t, 127); ASSERT_EQ(T.size(0), 127); ASSERT_EQ(T.scalar_type(), torch::kDouble); diff --git a/test/cpp/api/functional.cpp b/test/cpp/api/functional.cpp index 7393ca1a4a875..768f3c45fb892 100644 --- a/test/cpp/api/functional.cpp +++ b/test/cpp/api/functional.cpp @@ -12,19 +12,13 @@ struct FunctionalTest : torch::test::SeedingFixture {}; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Conv1d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(30, torch::dtype(torch::kFloat).requires_grad(true)).reshape({2, 3, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::arange(18, torch::dtype(torch::kFloat).requires_grad(true)).reshape({2, 3, 3}); auto y = F::conv1d(x, weight, F::Conv1dFuncOptions().stride(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{ 312., 348., 384.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 798., 915., 1032.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 852., 888., 924.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2553., 2670., 2787.}}}, torch::kFloat); ASSERT_TRUE(torch::allclose(y, expected)); @@ -34,23 +28,15 @@ TEST_F(FunctionalTest, Conv1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Conv2dEven) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(75, torch::dtype(torch::kFloat).requires_grad(true)).reshape({1, 3, 5, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::arange(54, torch::dtype(torch::kFloat).requires_grad(true)).reshape({2, 3, 3, 3}); auto y = F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{15219., 15570., 15921.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16974., 17325., 17676.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {18729., 19080., 19431.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{37818., 38898., 39978.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {43218., 44298., 45378.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {48618., 49698., 50778.}}}}, torch::kFloat); ASSERT_TRUE(torch::allclose(y, expected)); @@ -60,23 +46,15 @@ TEST_F(FunctionalTest, Conv2dEven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Conv2dUneven) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(60, torch::dtype(torch::kFloat).requires_grad(true)).reshape({1, 3, 5, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::arange(36, torch::dtype(torch::kFloat).requires_grad(true)).reshape({2, 3, 3, 2}); auto y = F::conv2d(x, weight, F::Conv2dFuncOptions().stride(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{ 5289., 5442., 5595.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5901., 6054., 6207.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6513., 6666., 6819.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{13227., 13704., 14181.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {15135., 15612., 16089.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {17043., 17520., 17997.}}}}, torch::kFloat); ASSERT_TRUE(torch::allclose(y, expected)); @@ -86,52 +64,32 @@ TEST_F(FunctionalTest, Conv2dUneven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Conv3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(375, torch::dtype(torch::kFloat).requires_grad(true)).reshape({1, 3, 5, 5, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::arange(162, torch::dtype(torch::kFloat).requires_grad(true)).reshape({2, 3, 3, 3, 3}); auto y = F::conv3d(x, weight, F::Conv3dFuncOptions().stride(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{{ 700704., 703944., 707184.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 716904., 720144., 723384.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 733104., 736344., 739584.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 781704., 784944., 788184.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 797904., 801144., 804384.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 814104., 817344., 820584.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 862704., 865944., 869184.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 878904., 882144., 885384.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 895104., 898344., 901584.}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{1724220., 1734021., 1743822.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1773225., 1783026., 1792827.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1822230., 1832031., 1841832.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1969245., 1979046., 1988847.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2018250., 2028051., 2037852.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2067255., 2077056., 2086857.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2214270., 2224071., 2233872.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2263275., 2273076., 2282877.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2312280., 2322081., 2331882.}}}}}, torch::kFloat); ASSERT_TRUE(torch::allclose(y, expected)); @@ -141,7 +99,6 @@ TEST_F(FunctionalTest, Conv3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MaxPool1d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({1, 1, 5}); auto y = F::max_pool1d(x, F::MaxPool1dFuncOptions(3).stride(2)); @@ -152,7 +109,6 @@ TEST_F(FunctionalTest, MaxPool1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MaxPool2d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5}); auto y = F::max_pool2d(x, F::MaxPool2dFuncOptions(3).stride(2)); @@ -172,7 +128,6 @@ TEST_F(FunctionalTest, MaxPool2dBackward) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MaxPool3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5, 5}); auto y = F::max_pool3d(x, F::MaxPool3dFuncOptions(3).stride(2)); @@ -183,7 +138,6 @@ TEST_F(FunctionalTest, MaxPool3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, AvgPool1d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({1, 1, 5}); auto y = F::avg_pool1d(x, F::AvgPool1dFuncOptions(3).stride(2)); @@ -194,7 +148,6 @@ TEST_F(FunctionalTest, AvgPool1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, AvgPool2d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5}); auto y = F::avg_pool2d(x, F::AvgPool2dFuncOptions(3).stride(2)); @@ -205,7 +158,6 @@ TEST_F(FunctionalTest, AvgPool2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, AvgPool3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5, 5}); auto y = F::avg_pool3d(x, F::AvgPool3dFuncOptions(3).stride(2)); @@ -216,7 +168,6 @@ TEST_F(FunctionalTest, AvgPool3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, FractionalMaxPool2d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5}); auto y = F::fractional_max_pool2d(x, F::FractionalMaxPool2dFuncOptions(3).output_size(2)); @@ -237,7 +188,6 @@ TEST_F(FunctionalTest, FractionalMaxPool2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, FractionalMaxPool3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5, 5}); auto y = F::fractional_max_pool3d(x, F::FractionalMaxPool3dFuncOptions(3).output_size(2)); @@ -266,7 +216,6 @@ TEST_F(FunctionalTest, LPPool1d) { int stride = 2; int kernel_size = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({1, 1, 5}); auto y = F::lp_pool1d(x, F::LPPool1dFuncOptions(norm_type, kernel_size).stride(stride)); auto expected = (torch::pow(torch::tensor({{{1, 1}}}, torch::kFloat), norm_type) * kernel_size).pow(1. / norm_type); @@ -282,7 +231,6 @@ TEST_F(FunctionalTest, LPPool2d) { int stride = 2; std::vector kernel_size({2, 3}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({1, 2, 5}); auto y = F::lp_pool2d(x, F::LPPool2dFuncOptions(norm_type, kernel_size).stride(stride)); auto expected = (torch::pow(torch::tensor({{{1, 1}}}, torch::kFloat), norm_type) * (kernel_size[0] * kernel_size[1])).pow(1. / norm_type); @@ -294,26 +242,20 @@ TEST_F(FunctionalTest, LPPool2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, CosineSimilarity) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input1 = torch::tensor({{1, 2, 3}, {4, 5, 6}}, torch::kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input2 = torch::tensor({{1, 8, 3}, {2, 1, 6}}, torch::kFloat); auto output = F::cosine_similarity(input1, input2, F::CosineSimilarityFuncOptions().dim(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.8078, 0.8721}, torch::kFloat); ASSERT_TRUE(output.allclose(expected, 1e-04)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, SmoothL1LossDefaultOptions) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.2, 4.7}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); auto output = F::smooth_l1_loss(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(0.0233335, torch::kFloat); auto s = output.sum(); s.backward(); @@ -323,14 +265,11 @@ TEST_F(FunctionalTest, SmoothL1LossDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, SmoothL1LossBeta) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.5, 10.0}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); auto output = // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,bugprone-argument-comment) F::smooth_l1_loss(input, target, /*reduction=*/torch::kMean, /*beta=*/0.5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(1.67, torch::kFloat); auto s = output.sum(); s.backward(); @@ -340,14 +279,11 @@ TEST_F(FunctionalTest, SmoothL1LossBeta) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, SmoothL1LossNoReduction) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.2, 4.7}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); auto output = // NOLINTNEXTLINE(bugprone-argument-comment) F::smooth_l1_loss(input, target, /*reduction=*/torch::kNone); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.005, 0.02, 0.045}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -357,13 +293,10 @@ TEST_F(FunctionalTest, SmoothL1LossNoReduction) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, HuberLossDefaultOptions) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.2, 4.7}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); auto output = F::huber_loss(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(0.0233335, torch::kFloat); auto s = output.sum(); s.backward(); @@ -373,14 +306,10 @@ TEST_F(FunctionalTest, HuberLossDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, HuberLossDelta) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.5, 10.0}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto options = F::HuberLossFuncOptions().reduction(torch::kMean).delta(0.5); auto output = F::huber_loss(input, target, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(1.67 * 0.5, torch::kFloat); auto s = output.sum(); s.backward(); @@ -390,13 +319,10 @@ TEST_F(FunctionalTest, HuberLossDelta) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, HuberLossNoReduction) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.2, 4.7}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); auto options = F::HuberLossFuncOptions().reduction(torch::kNone); auto output = F::huber_loss(input, target, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.005, 0.02, 0.045}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -406,12 +332,10 @@ TEST_F(FunctionalTest, HuberLossNoReduction) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, SoftMarginLossDefaultOptions) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({2., 4., 1., 3.}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({-1., 1., 1., -1.}, torch::kFloat); auto output = F::soft_margin_loss(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({1.3767317}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -422,12 +346,10 @@ TEST_F(FunctionalTest, SoftMarginLossDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MultiLabelSoftMarginLossDefaultOptions) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{0., 2., 2., 0.}, {2., 1., 0., 1.}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({{0., 0., 1., 0.}, {1., 0., 1., 1.}}, torch::kFloat); auto output = F::multilabel_soft_margin_loss(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.7608436}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -438,12 +360,10 @@ TEST_F(FunctionalTest, MultiLabelSoftMarginLossDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, SoftMarginLossNoReduction) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({2., 4., 1., 3.}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({-1., 1., 1., -1.}, torch::kFloat); auto output = F::soft_margin_loss(input, target, torch::kNone); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({2.1269281, 0.01814993, 0.3132617, 3.0485873}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -454,15 +374,12 @@ TEST_F(FunctionalTest, SoftMarginLossNoReduction) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MultiLabelSoftMarginLossWeightedNoReduction) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{0., 2., 2., 0.}, {2., 1., 0., 1.}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({{0., 0., 1., 0.}, {1., 0., 1., 1.}}, torch::kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::tensor({0.1, 0.6, 0.4, 0.8}, torch::kFloat); auto options = F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone).weight(weight); auto output = F::multilabel_soft_margin_loss(input, target, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.4876902, 0.3321295}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -473,13 +390,10 @@ TEST_F(FunctionalTest, MultiLabelSoftMarginLossWeightedNoReduction) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, PairwiseDistance) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input1 = torch::tensor({{1, 2, 3}, {4, 5, 6}}, torch::kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input2 = torch::tensor({{1, 8, 3}, {2, 1, 6}}, torch::kFloat); auto output = F::pairwise_distance(input1, input2, F::PairwiseDistanceFuncOptions().p(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({6, 6}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } @@ -487,19 +401,14 @@ TEST_F(FunctionalTest, PairwiseDistance) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, PDist) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{-1.0, -5.0, -1.0}, {2.0, 4.0, 6.0}}); auto output = F::pdist(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({11.7898}); ASSERT_TRUE(output.allclose(expected)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{1.0, -1.0}, {1.0, 3.0}, {3.0, 3.0}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto output = F::pdist(input, 1.5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({4.0, 4.8945, 2.0}); ASSERT_TRUE(output.allclose(expected)); } @@ -507,7 +416,6 @@ TEST_F(FunctionalTest, PDist) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, AdaptiveMaxPool1d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({1, 1, 5}); auto y = F::adaptive_max_pool1d(x, F::AdaptiveMaxPool1dFuncOptions(3)); @@ -518,7 +426,6 @@ TEST_F(FunctionalTest, AdaptiveMaxPool1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, AdaptiveMaxPool2d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5}); auto y = F::adaptive_max_pool2d(x, F::AdaptiveMaxPool2dFuncOptions(3)); @@ -529,7 +436,6 @@ TEST_F(FunctionalTest, AdaptiveMaxPool2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, AdaptiveMaxPool3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5, 5}); auto y = F::adaptive_max_pool3d(x, F::AdaptiveMaxPool3dFuncOptions(3)); @@ -540,7 +446,6 @@ TEST_F(FunctionalTest, AdaptiveMaxPool3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, AdaptiveAvgPool1d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({1, 1, 5}); auto y = F::adaptive_avg_pool1d(x, F::AdaptiveAvgPool1dFuncOptions(3)); @@ -551,7 +456,6 @@ TEST_F(FunctionalTest, AdaptiveAvgPool1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, AdaptiveAvgPool2d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5}); auto y = F::adaptive_avg_pool2d(x, F::AdaptiveAvgPool2dFuncOptions(3)); @@ -562,7 +466,6 @@ TEST_F(FunctionalTest, AdaptiveAvgPool2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, AdaptiveAvgPool3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5, 5}); auto y = F::adaptive_avg_pool3d(x, F::AdaptiveAvgPool3dFuncOptions(3)); @@ -573,9 +476,7 @@ TEST_F(FunctionalTest, AdaptiveAvgPool3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, L1Loss) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({5,6}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::empty({5,6}).random_(2); auto output = F::l1_loss(torch::sigmoid(input), target); auto s = output.sum(); @@ -587,9 +488,7 @@ TEST_F(FunctionalTest, L1Loss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MSELoss) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({5,6}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::empty({5,6}).random_(2); auto output = F::mse_loss(torch::sigmoid(input), target); auto s = output.sum(); @@ -601,9 +500,7 @@ TEST_F(FunctionalTest, MSELoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, BCELoss) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({5,6}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::empty({5,6}).random_(2); auto output = F::binary_cross_entropy(torch::sigmoid(input), target); auto s = output.sum(); @@ -616,9 +513,7 @@ TEST_F(FunctionalTest, BCELoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, KLDivLoss) { KLDivLoss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({5,6}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::empty({5,6}).random_(2); auto output = F::kl_div(torch::sigmoid(input), target); auto s = output.sum(); @@ -630,13 +525,10 @@ TEST_F(FunctionalTest, KLDivLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, HingeEmbeddingLoss) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{2, 22, 4}, {20, 10, 0}}, torch::kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({{2, 6, 4}, {1, 10, 0}}, torch::kFloat); auto output = F::hinge_embedding_loss( input, target, F::HingeEmbeddingLossFuncOptions().margin(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({10}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); @@ -644,13 +536,10 @@ TEST_F(FunctionalTest, HingeEmbeddingLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, GridSample) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(9, torch::kFloat).view(std::vector({1, 1, 3, 3})); auto grid = torch::tensor({{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-2., -1.}, {-1., -1.}, {0., -1.}}, {{-1., 0.}, {0., 0.}, {1., 0.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0., 1.}, {1., 1.}, {2., 1.}} }}, torch::kFloat); @@ -660,7 +549,6 @@ TEST_F(FunctionalTest, GridSample) { .padding_mode(torch::kZeros) .align_corners(true); auto output = F::grid_sample(input, grid, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{0., 0., 1.}, {3., 4., 5.}, {7., 8., 0.}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); @@ -671,7 +559,6 @@ TEST_F(FunctionalTest, GridSample) { .padding_mode(torch::kZeros) .align_corners(false); output = F::grid_sample(input, grid, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{{{0., 0., 0.5}, {1.5, 4., 2.5}, {3.5, 2., 0.}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); @@ -687,7 +574,6 @@ TEST_F(FunctionalTest, GridSample) { .padding_mode(torch::kZeros) .align_corners(true); output = F::grid_sample(input, grid, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{{{0., 0., 1.}, {3., 4., 5.}, {7., 8., 0.}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); @@ -698,7 +584,6 @@ TEST_F(FunctionalTest, GridSample) { .padding_mode(torch::kBorder) .align_corners(true); output = F::grid_sample(input, grid, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{{{0., 0., 1.}, {3., 4., 5.}, {7., 8., 8.}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); @@ -709,7 +594,6 @@ TEST_F(FunctionalTest, GridSample) { .padding_mode(torch::kReflection) .align_corners(true); output = F::grid_sample(input, grid, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{{{1., 0., 1.}, {3., 4., 5.}, {7., 8., 7.}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); @@ -719,22 +603,17 @@ TEST_F(FunctionalTest, GridSample) { TEST_F(FunctionalTest, AffineGrid) { { // 2D affine. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto theta = torch::arange(1., 13) .view(std::vector({2, 2, 3})); auto size = std::vector({2, 3, 2, 2}); auto align_corners = true; auto output = F::affine_grid(theta, size, !align_corners); auto expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{1.50, 1.50}, {2.50, 5.50}}, {{3.50, 6.50}, {4.50, 10.50}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{1.50, 1.50}, {8.50, 11.50}}, {{9.50, 12.50}, {16.50, 22.50}}}}); auto output_aligned = F::affine_grid(theta, size, align_corners); auto expected_aligned = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{0.0, -3.0}, {2.0, 5.0}}, {{4.0, 7.0}, {6.0, 15.0}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-6.0, -9.0}, {8.0, 11.0}}, {{10.0, 13.0}, {24.0, 33.0}}}}); ASSERT_TRUE(output.allclose(expected)); @@ -742,38 +621,25 @@ TEST_F(FunctionalTest, AffineGrid) { } { // 3D affine. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto theta = torch::arange(1., 13) .view(std::vector({1, 3, 4})); auto size = std::vector({1, 1, 3, 2, 2}); auto align_corners = true; auto output = F::affine_grid(theta, size, !align_corners); auto expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{{0.5000, -2.1667, -4.8333}, {1.5000, 2.8333, 4.1667}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.5000, 3.8333, 5.1667}, {3.5000, 8.8333, 14.1667}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{2.5000, 2.5000, 2.5000}, {3.5000, 7.5000, 11.5000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4.5000, 8.5000, 12.5000}, {5.5000, 13.5000, 21.5000}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{4.5000, 7.1667, 9.8333}, {5.5000, 12.1667, 18.8333}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{6.5000, 13.1667, 19.8333}, {7.5000, 18.1667, 28.8333}}}}}); auto output_aligned = F::affine_grid(theta, size, align_corners); auto expected_aligned = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::tensor({{{{{-2.0, -10.0, -18.0}, {0.0, 0.0, 0.0}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.0, 2.0, 2.0}, {4.0, 12.0, 20.0}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{1.0, -3.0, -7.0}, {3.0, 7.0, 11.0}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{5.0, 9.0, 13.0}, {7.0, 19.0, 31.0}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{4.0, 4.0, 4.0}, {6.0, 14.0, 22.0}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{8.0, 16.0, 24.0}, {10.0, 26.0, 42.0}}}}}); ASSERT_TRUE(output.allclose(expected, 1e-2)); @@ -837,16 +703,13 @@ TEST_F(FunctionalTest, AffineGrid) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MultiMarginLoss) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::tensor({0.3, 0.3, 0.4}, torch::kFloat); auto input = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.2, 0.2, 0.6}, {0.1, 0.8, 0.1}, {0.9, 0.09, 0.01}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({2, 1, 0}, torch::kLong); auto output = F::multi_margin_loss( input, target, F::MultiMarginLossFuncOptions().margin(2).weight(weight)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.305556}, torch::kFloat); ASSERT_TRUE(output.allclose(expected, 1e-04)); @@ -854,15 +717,11 @@ TEST_F(FunctionalTest, MultiMarginLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, CosineEmbeddingLoss) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input1 = torch::tensor({{2, 3, 4}, {6, 2, 4}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input2 = torch::tensor({{2, 3, 5}, {9, 12, 0}}); auto target = torch::tensor({1, -1}); auto output = F::cosine_embedding_loss( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input1, input2, target, F::CosineEmbeddingLossFuncOptions().margin(0.5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.1004}, torch::kFloat); ASSERT_TRUE(output.allclose(expected, 1e-4)); @@ -870,11 +729,9 @@ TEST_F(FunctionalTest, CosineEmbeddingLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MultiLabelMarginLossDefaultOptions) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{0.1, 0.2, 0.4, 0.8}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({{3, 0, -1, 1}}, torch::kLong); auto output = F::multilabel_margin_loss(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.8500}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -885,12 +742,10 @@ TEST_F(FunctionalTest, MultiLabelMarginLossDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MultiLabelMarginLossNoReduction) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{0.1, 0.2, 0.4, 0.8}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({{3, 0, -1, 1}}, torch::kLong); auto output = F::multilabel_margin_loss( input, target, torch::kNone); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.8500}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -901,9 +756,7 @@ TEST_F(FunctionalTest, MultiLabelMarginLossNoReduction) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, TripletMarginLoss) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto anchor = torch::tensor({{3., 3.}}, torch::kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto positive = torch::tensor({{2., 2.}}, torch::kFloat); auto negative = torch::tensor({{0., 0.}}, torch::kFloat); auto output = F::triplet_margin_loss( @@ -921,7 +774,6 @@ TEST_F(FunctionalTest, TripletMarginWithDistanceLossDefaultParity) { std::vector reductions = {torch::kSum, torch::kMean, torch::kNone}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector margins = {0.5, 1.0, 1.5}; std::vector swaps = {true, false}; @@ -929,13 +781,10 @@ TEST_F(FunctionalTest, TripletMarginWithDistanceLossDefaultParity) { for (auto& margin : margins) { for (const auto& swap : swaps) { auto anchor = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::randn({100, 128}, torch::dtype(torch::kFloat).requires_grad(true)); auto positive = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::randn({100, 128}, torch::dtype(torch::kFloat).requires_grad(true)); auto negative = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::randn({100, 128}, torch::dtype(torch::kFloat).requires_grad(true)); auto basicOptions = F::TripletMarginLossFuncOptions() @@ -970,18 +819,13 @@ TEST_F(FunctionalTest, TripletMarginWithDistanceLossDefaultParity) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, NLLLoss) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{-0.1315, -3.1315, -2.5315}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-3.7038, -0.1038, -2.6038}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-2.3422, -1.3422, -0.4422}}, torch::kFloat); auto target = torch::tensor({1, 0, 2}, torch::kLong); auto output = F::nll_loss( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input, target, F::NLLLossFuncOptions().ignore_index(-100).reduction(torch::kMean)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(2.4258, torch::kFloat); ASSERT_TRUE(output.allclose(expected, 1e-04)); ASSERT_TRUE(F::nll_loss(input, target).allclose(expected, 1e-04)); @@ -989,13 +833,10 @@ TEST_F(FunctionalTest, NLLLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, CrossEntropy) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{3., 3.}, {2., 2.}}, torch::kFloat); auto target = torch::tensor({0, 1}, torch::kLong); auto output = F::cross_entropy( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input, target, F::CrossEntropyFuncOptions().ignore_index(-100).reduction(torch::kMean)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(0.6931, torch::kFloat); ASSERT_TRUE(output.allclose(expected, 1e-04)); @@ -1004,7 +845,6 @@ TEST_F(FunctionalTest, CrossEntropy) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MaxUnpool1d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({{{2, 4, 5}}}, torch::dtype(torch::kFloat).requires_grad(true)); auto indices = torch::tensor({{{1, 3, 4}}}, torch::kLong); auto y = F::max_unpool1d(x, indices, F::MaxUnpool1dFuncOptions(3)); @@ -1014,11 +854,9 @@ TEST_F(FunctionalTest, MaxUnpool1d) { y, torch::tensor({{{0, 2, 0, 4, 5, 0, 0, 0, 0}}}, torch::kFloat))); ASSERT_EQ(y.sizes(), std::vector({1, 1, 9})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x = torch::tensor({{{2, 4, 5}}}, torch::dtype(torch::kFloat).requires_grad(true)); indices = torch::tensor({{{1, 3, 4}}}, torch::kLong); y = F::max_unpool1d( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, indices, F::MaxUnpool1dFuncOptions(3).output_size(std::vector({1, 1, 9}))); ASSERT_EQ(y.ndimension(), 3); @@ -1026,7 +864,6 @@ TEST_F(FunctionalTest, MaxUnpool1d) { y, torch::tensor({{{0, 2, 0, 4, 5, 0, 0, 0, 0}}}, torch::kFloat))); ASSERT_EQ(y.sizes(), std::vector({1, 1, 9})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x = torch::tensor({{{2, 4, 5}}}, torch::dtype(torch::kFloat).requires_grad(true)); indices = torch::tensor({{{1, 3, 4}}}, torch::kLong); y = F::max_unpool1d(x, indices, F::MaxUnpool1dFuncOptions(3).stride(2).padding(1)); @@ -1040,30 +877,18 @@ TEST_F(FunctionalTest, MaxUnpool1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MaxUnpool2d) { auto indices = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{ 6, 8, 9}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 18, 19}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {21, 23, 24}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{ 6, 8, 9}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 18, 19}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {21, 23, 24}}}}, torch::kLong); auto x = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{ 6, 8, 9}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 18, 19}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {21, 23, 24}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{31, 33, 34}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {41, 43, 44}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {46, 48, 49}}}}, torch::dtype(torch::kFloat).requires_grad(true)); auto y = F::max_unpool2d(x, indices, F::MaxUnpool2dFuncOptions(3).stride(2).padding(1)); @@ -1084,9 +909,7 @@ TEST_F(FunctionalTest, MaxUnpool2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, MaxUnpool3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto indices = torch::tensor({{{{{26}}}}}, torch::kLong); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({{{{{26}}}}}, torch::dtype(torch::kFloat).requires_grad(true)); auto y = F::max_unpool3d(x, indices, F::MaxUnpool3dFuncOptions(3)); @@ -1109,7 +932,6 @@ TEST_F(FunctionalTest, ELU) { const auto size = 3; for (const auto inplace : {false, true}) { for (const auto alpha : {0.0, 0.42, 1.0, 4.2, 42.42}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); auto y_exp = torch::max(torch::zeros_like(x), x) + @@ -1133,7 +955,6 @@ TEST_F(FunctionalTest, SELU) { const double scale = 1.0507009873554804934193349852946; const double alpha = 1.6732632423543772848170429916717; for (const auto inplace : {false, true}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({5, 5}); auto expected = scale * (torch::max(torch::zeros_like(input), input) + @@ -1148,7 +969,6 @@ TEST_F(FunctionalTest, SELU) { } } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(0, 9, torch::kDouble).view({3, 3}); auto output = F::selu(input); auto expected = F::selu(input, false); @@ -1184,7 +1004,6 @@ TEST_F(FunctionalTest, GELU) { TEST_F(FunctionalTest, Hardshrink) { const auto size = 3; for (const auto lambda : {-4.2, -1.0, -0.42, 0.0, 0.42, 1.0, 4.2, 42.42}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}).set_requires_grad(true); auto y = F::hardshrink(x, F::HardshrinkFuncOptions().lambda(lambda)); @@ -1203,7 +1022,6 @@ TEST_F(FunctionalTest, Hardshrink) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, OneHot) { { // Test #1 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0, 5, torch::kLong); auto y = F::one_hot(x % 3); auto expected = torch::tensor( @@ -1215,9 +1033,7 @@ TEST_F(FunctionalTest, OneHot) { } { // Test #2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0, 5, torch::kLong); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = F::one_hot(x % 3, 5); auto expected = torch::tensor( {{1, 0, 0, 0, 0}, @@ -1233,7 +1049,6 @@ TEST_F(FunctionalTest, OneHot) { } { // Test #3 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0, 6, torch::kLong); auto y = F::one_hot(x.view(std::vector({3, 2})) % 3); auto expected = torch::tensor( @@ -1254,7 +1069,6 @@ TEST_F(FunctionalTest, Hardtanh) { for (const auto min_val : {-4.2, -1.0, -0.42, 0.0}) { for (const auto max_val : {0.0, 0.42, 1.0, 4.2}) { for (const auto inplace : {false, true}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); auto y_exp = (x < min_val) * min_val + @@ -1280,7 +1094,6 @@ TEST_F(FunctionalTest, LeakyReLU) { const auto size = 3; for (const auto negative_slope : {0.0, 0.42, 1.0}) { for (const auto inplace : {false, true}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); auto y_exp = (x < 0) * x * negative_slope + (x >= 0) * x; @@ -1302,7 +1115,6 @@ TEST_F(FunctionalTest, LeakyReLU) { TEST_F(FunctionalTest, LogSigmoid) { const auto size = 3; LogSigmoid model; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); auto y = F::logsigmoid(x); @@ -1317,7 +1129,6 @@ TEST_F(FunctionalTest, LogSigmoid) { TEST_F(FunctionalTest, GumbelSoftmax) { // Test 1: No-options { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto logits = torch::randn({5}); int expected_count = 1; auto y_draw = F::gumbel_softmax(logits); @@ -1332,7 +1143,6 @@ TEST_F(FunctionalTest, GumbelSoftmax) { // Test 2: 1D shape, 0 and -1 dim for(const auto dim: {0, -1}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto logits = torch::randn({5}); int expected_count = 1; auto y_draw = F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(dim)); @@ -1346,9 +1156,7 @@ TEST_F(FunctionalTest, GumbelSoftmax) { } { // Test 3: 2D shape, 1 dim - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto logits = torch::randn({5, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int expected_count = 5; auto y_draw = F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(1)); @@ -1366,7 +1174,6 @@ TEST_F(FunctionalTest, GumbelSoftmax) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-magic-numbers) int expected[] = {5*3, 5*4}; for(auto i=0; i<2; i++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto logits = torch::randn({5, 4, 3}); int expected_count = expected[i]; auto y_draw = F::gumbel_softmax(logits, F::GumbelSoftmaxFuncOptions().hard(true).dim(dims[i])); @@ -1380,9 +1187,7 @@ TEST_F(FunctionalTest, GumbelSoftmax) { } { // Test 5: Straight through - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_draws = 100; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto logits = torch::tensor({{0.2, 0.8, 0.1}}); logits = logits.reshape({1, 3}); logits.requires_grad(); @@ -1412,7 +1217,6 @@ TEST_F(FunctionalTest, GumbelSoftmax) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Softmax) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(10, torch::kFloat).reshape({2, 5}); // NOLINTNEXTLINE(bugprone-argument-comment) auto output = F::softmax(input, /*dim=*/1); @@ -1426,7 +1230,6 @@ TEST_F(FunctionalTest, Softmax) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Softmin) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(10, torch::kFloat).reshape({2, 5}); // NOLINTNEXTLINE(bugprone-argument-comment) auto output = F::softmin(input, /*dim=*/1); @@ -1440,7 +1243,6 @@ TEST_F(FunctionalTest, Softmin) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, LogSoftmax) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(10, torch::kFloat).reshape({2, 5}); // NOLINTNEXTLINE(bugprone-argument-comment) auto output = F::log_softmax(input, /*dim=*/1); @@ -1465,9 +1267,7 @@ TEST_F(FunctionalTest, PReLU) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, LayerNorm) { const auto input = torch::randn({2, 2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = F::layer_norm(input, F::LayerNormFuncOptions({2, 2}).eps(2e-5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y_exp = torch::layer_norm(input, {2, 2}, torch::Tensor(), torch::Tensor(), 2e-5); ASSERT_TRUE(torch::allclose(y, y_exp)); } @@ -1475,9 +1275,7 @@ TEST_F(FunctionalTest, LayerNorm) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, GroupNorm) { const auto input = torch::randn({2, 2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = F::group_norm(input, F::GroupNormFuncOptions(2).eps(2e-5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y_exp = torch::group_norm(input, 2, torch::Tensor(), torch::Tensor(), 2e-5); ASSERT_TRUE(torch::allclose(y, y_exp)); } @@ -1551,7 +1349,6 @@ TEST_F(FunctionalTest, Linear) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Embedding) { const auto input = torch::tensor({{1,2,4,5}, {4,3,2,9}}, torch::kLong); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::empty({10, 3}); torch::nn::init::normal_(weight); auto y = F::embedding(input, weight); @@ -1563,7 +1360,6 @@ TEST_F(FunctionalTest, Embedding) { TEST_F(FunctionalTest, EmbeddingBag) { const auto input = torch::tensor({1,2,4,5,4,3,2,9}, torch::kLong); auto offsets = torch::tensor({0,4}, torch::kLong); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::empty({10, 3}); torch::nn::init::normal_(weight); auto y = F::embedding_bag(input, weight, F::EmbeddingBagFuncOptions().mode(torch::kSum).offsets(offsets).padding_idx(4)); @@ -1580,25 +1376,20 @@ TEST_F(FunctionalTest, EmbeddingBag) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Bilinear) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input1 = torch::tensor({{1, 2, 3}, {7, 6, 5}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input2 = torch::tensor({{7, 4}, {8 ,9}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::tensor({{{2, 3}, {9, 7}, {8, 6}}}); auto bias = torch::tensor({1}); auto y_with_bias = F::bilinear(input1, input2, weight, bias); ASSERT_EQ(y_with_bias.ndimension(), 2); ASSERT_EQ(y_with_bias.sizes(), torch::IntArrayRef({2, 1})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y_with_bias_exp = torch::tensor({{449}, {1702}}).reshape({2, 1}); ASSERT_TRUE(torch::allclose(y_with_bias, y_with_bias_exp, 1e-4, 1e-7)); auto y_no_bias = F::bilinear(input1, input2, weight); ASSERT_EQ(y_no_bias.ndimension(), 2); ASSERT_EQ(y_no_bias.sizes(), torch::IntArrayRef({2, 1})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y_no_bias_exp = torch::tensor({{448, 1701}}).reshape({2, 1}); ASSERT_TRUE(torch::allclose(y_no_bias, y_no_bias_exp, 1e-4, 1e-7)); } @@ -1609,7 +1400,6 @@ TEST_F(FunctionalTest, Normalize) { {{{0.00000000, 0.10000000, 0.2000, 0.30000000, 0.40000000}, {0.14285715, 0.17142858, 0.2000, 0.22857143, 0.25714287}}}, torch::requires_grad().dtype(torch::kFloat)); { // Test #1 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{{0, 1, 2, 3, 4}, {5, 6, 7, 8, 9}}}, torch::dtype(torch::kFloat).requires_grad(true)); auto norm = F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1)); @@ -1623,9 +1413,7 @@ TEST_F(FunctionalTest, Normalize) { } { // Test #2 Check variations of optional arguments - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{{0, 1, 2, 3, 4}, {5, 6, 7, 8, 9}}}, torch::dtype(torch::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto output = torch::randn({1,2,5}, torch::dtype(torch::kFloat)); // non-null output argument F::normalize(input, F::NormalizeFuncOptions().p(1).dim(-1).out(output)); @@ -1648,7 +1436,6 @@ TEST_F(FunctionalTest, Normalize) { TEST_F(FunctionalTest, ReLU) { const auto size = 3; for (const auto inplace : {false, true}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); auto y_exp = (x < 0) * 0 + (x >= 0) * x; @@ -1677,7 +1464,6 @@ TEST_F(FunctionalTest, ReLU) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, ReLUDefaultOptions) { const auto size = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); auto y_exp = (x < 0) * 0 + (x >= 0) * x; @@ -1692,10 +1478,8 @@ TEST_F(FunctionalTest, ReLUDefaultOptions) { TEST_F(FunctionalTest, ReLU6) { const auto size = 3; for (const auto inplace : {false, true}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y_exp = (x < 0) * 0 + ((x >= 0) * (x <= 6)) * x + (x > 6) * 6; auto y = F::relu6(x, F::ReLU6FuncOptions().inplace(inplace)); @@ -1722,10 +1506,8 @@ TEST_F(FunctionalTest, ReLU6) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, ReLU6DefaultOptions) { const auto size = 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y_exp = (x < 0) * 0 + ((x >= 0) * (x <= 6)) * x + (x > 6) * 6; auto y = F::relu6(x); @@ -1740,7 +1522,6 @@ TEST_F(FunctionalTest, RReLU) { for (const auto lower : {0.01, 0.1, 0.2}) { for (const auto upper : {0.3, 0.4, 0.5}) { for (const auto inplace : {false, true}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); auto x_copy = x.clone(); @@ -1766,7 +1547,6 @@ TEST_F(FunctionalTest, RReLUDefaultOptions) { const auto size = 3; const auto lower = 1.0 / 8.0; const auto upper = 1.0 / 3.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); auto x_copy = x.clone(); @@ -1784,7 +1564,6 @@ TEST_F(FunctionalTest, CELU) { const auto size = 3; for (const auto inplace : {false, true}) { for (const auto alpha : {0.42, 1.0, 4.2, 42.42}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); auto y_exp = torch::max(torch::zeros_like(x), x) + @@ -1806,7 +1585,6 @@ TEST_F(FunctionalTest, CELU) { TEST_F(FunctionalTest, CELUDefaultOptions) { const auto size = 3; const auto alpha = 1.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); auto y_exp = torch::max(torch::zeros_like(x), x) + @@ -1821,21 +1599,14 @@ TEST_F(FunctionalTest, CELUDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, PixelShuffle) { auto x = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{-17, 19}, {-1, 2}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{7, 14}, {-3, 1}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0, -2}, {-12, 14}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-15, 0}, {-3, 9}}}}, torch::kFloat); auto y_exp = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{-17, 7, 19, 14}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0, -15, -2, 0}, {-1, -3, 2, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-12, -3, 14, 9}}}}, torch::kFloat); auto y = F::pixel_shuffle(x, 2); @@ -1847,17 +1618,12 @@ TEST_F(FunctionalTest, PixelShuffle) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, PixelUnshuffle) { auto x = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{-17, 7, 19, 14}, {0, -15, -2, 0}, {-1, -3, 2, 1}, {-12, -3, 14, 9}}}}, torch::kFloat); auto y_exp = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{-17, 19}, {-1, 2}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{7, 14}, {-3, 1}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0, -2}, {-12, 14}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-15, 0}, {-3, 9}}}}, torch::kFloat); auto y = F::pixel_unshuffle(x, 2); @@ -1872,7 +1638,6 @@ TEST_F(FunctionalTest, Softplus) { const auto size = 3; for (const auto beta : {0.5, 1.0, 2.0}) { for (const auto threshold : {1.0, 3.0, 5.0}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-3.0, 3.0, 61); x.resize_({size, size, size}); auto y_exp = @@ -1893,7 +1658,6 @@ TEST_F(FunctionalTest, SoftplusDefaultOptions) { const auto size = 3; const auto beta = 1.0; const auto threshold = 20.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-3.0, 3.0, 61); x.resize_({size, size, size}); auto y_exp = @@ -1911,11 +1675,8 @@ TEST_F(FunctionalTest, Fold) { auto input = torch::ones({1, 3 * 2 * 2, 2}, torch::kDouble); auto output = F::fold(input, F::FoldFuncOptions({3, 2}, {2, 2})); auto expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{1.0, 1.0}, {2.0, 2.0}, {1.0, 1.0}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.0, 1.0}, {2.0, 2.0}, {1.0, 1.0}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.0, 1.0}, {2.0, 2.0}, {1.0, 1.0}}}}, torch::kDouble); @@ -1925,24 +1686,16 @@ TEST_F(FunctionalTest, Fold) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Unfold) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(0, 12, torch::kDouble).view({1, 2, 2, 3}); auto output = F::unfold(input, F::UnfoldFuncOptions({2, 2}).padding(1).stride(2)); auto expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{0.0, 0.0, 0.0, 4.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0, 0.0, 3.0, 5.0}, {0.0, 1.0, 0.0, 0.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0, 2.0, 0.0, 0.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0, 0.0, 0.0, 10.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0, 0.0, 9.0, 11.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0, 7.0, 0.0, 0.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6.0, 8.0, 0.0, 0.0}}}, torch::kDouble); @@ -1954,7 +1707,6 @@ TEST_F(FunctionalTest, Unfold) { TEST_F(FunctionalTest, Softshrink) { const auto size = 3; for (const auto lambda : {0.0, 0.42, 1.0, 4.2, 42.42}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}).set_requires_grad(true); // NOLINTNEXTLINE(bugprone-argument-comment) @@ -1975,7 +1727,6 @@ TEST_F(FunctionalTest, Softshrink) { TEST_F(FunctionalTest, SoftshrinkDefaultOptions) { const auto size = 3; const auto lambda = 0.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}).set_requires_grad(true); auto y = F::softshrink(x); @@ -1991,7 +1742,6 @@ TEST_F(FunctionalTest, SoftshrinkDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Softsign) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn(100) * 10; auto y_exp = x / (1 + x.abs()); auto y = F::softsign(x); @@ -2001,7 +1751,6 @@ TEST_F(FunctionalTest, Softsign) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Tanhshrink) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn(100) * 10; auto y_exp = x - x.tanh(); auto y = F::tanhshrink(x); @@ -2015,7 +1764,6 @@ TEST_F(FunctionalTest, Threshold) { for (const auto threshold : {0.5, 1.0, 2.0}) { for (const auto value : {0.5, 1.0, 2.0}) { for (const auto inplace : {false, true}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-3.0, 3.0, 61); x.resize_({size, size, size}); auto y_exp = (x <= threshold) * value + (x > threshold) * x; @@ -2036,18 +1784,12 @@ TEST_F(FunctionalTest, Threshold) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, BatchNorm1d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_features = 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double momentum = 0.1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({2, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto mean = torch::randn(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto variance = torch::rand(5); auto weight = torch::ones({num_features}); auto bias = torch::zeros({num_features}); @@ -2060,25 +1802,18 @@ TEST_F(FunctionalTest, BatchNorm1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, BatchNorm1dDefaultOptions) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({2, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto mean = torch::randn(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto variance = torch::rand(5); auto output = F::batch_norm(input, mean, variance); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = (input - mean) / torch::sqrt(variance + 1e-5); ASSERT_TRUE(output.allclose(expected)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, BatchNorm2d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_features = 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double momentum = 0.1; auto input = torch::randn({2, num_features, 4, 4}); @@ -2095,9 +1830,7 @@ TEST_F(FunctionalTest, BatchNorm2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, BatchNorm2dDefaultOptions) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_features = 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; auto input = torch::randn({2, num_features, 4, 4}); @@ -2110,11 +1843,8 @@ TEST_F(FunctionalTest, BatchNorm2dDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, BatchNorm3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_features = 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double momentum = 0.1; auto input = torch::randn({2, num_features, 2, 2, 2}); @@ -2131,9 +1861,7 @@ TEST_F(FunctionalTest, BatchNorm3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, BatchNorm3dDefaultOptions) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_features = 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; auto input = torch::randn({2, num_features, 2, 2, 2}); @@ -2146,18 +1874,12 @@ TEST_F(FunctionalTest, BatchNorm3dDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, InstanceNorm1d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_features = 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double momentum = 0.1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(40.).view({2, 5, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto mean = torch::arange(5.); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto variance = torch::arange(5.); auto weight = torch::arange((double)num_features); auto bias = torch::arange((double)num_features); @@ -2171,64 +1893,41 @@ TEST_F(FunctionalTest, InstanceNorm1d) { .momentum(momentum) .eps(eps)); auto expected = torch::tensor({{{ 0.0000, 0.0000, 0.0000, 0.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.3416, 0.5528, 1.4472, 2.3416}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6833, 1.1056, 2.8944, 4.6833}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.0249, 1.6584, 4.3416, 7.0249}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.3665, 2.2112, 5.7888, 9.3665}}, {{ 0.0000, 0.0000, 0.0000, 0.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.3416, 0.5528, 1.4472, 2.3416}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6833, 1.1056, 2.8944, 4.6833}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.0249, 1.6584, 4.3416, 7.0249}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.3665, 2.2112, 5.7888, 9.3665}}}); ASSERT_TRUE(output.allclose(expected, 2e-04)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, InstanceNorm1dDefaultOptions) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(40.).view({2, 5, 4}); auto output = F::instance_norm(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{-1.3416, -0.4472, 0.4472, 1.3416}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.3416, -0.4472, 0.4472, 1.3416}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.3416, -0.4472, 0.4472, 1.3416}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.3416, -0.4472, 0.4472, 1.3416}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.3416, -0.4472, 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472, 0.4472, 1.3416}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.3416, -0.4472, 0.4472, 1.3416}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.3416, -0.4472, 0.4472, 1.3416}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.3416, -0.4472, 0.4472, 1.3416}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-1.3416, -0.4472, 0.4472, 1.3416}}}); ASSERT_TRUE(output.allclose(expected, 2e-04)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, InstanceNorm2d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_features = 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double momentum = 0.1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(2. * num_features * 2 * 2).view({2, num_features, 2, 2}); auto mean = torch::arange((double)num_features); auto variance = torch::arange((double)num_features); @@ -2245,106 +1944,63 @@ TEST_F(FunctionalTest, InstanceNorm2d) { .eps(eps)); auto expected = torch::tensor({{{{ 0.0000, 0.0000}, { 0.0000, 0.0000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.3416, 0.5528}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.4472, 2.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.6833, 1.1056}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2.8944, 4.6833}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.0249, 1.6584}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4.3416, 7.0249}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3665, 2.2112}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5.7888, 9.3665}}}, {{{ 0.0000, 0.0000}, { 0.0000, 0.0000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.3416, 0.5528}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.4472, 2.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.6833, 1.1056}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2.8944, 4.6833}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.0249, 1.6584}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4.3416, 7.0249}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3665, 2.2112}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5.7888, 9.3665}}}}); ASSERT_TRUE(output.allclose(expected, 2e-04)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, InstanceNorm2dDefaultOptions) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_features = 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(2. * num_features * 2 * 2).view({2, num_features, 2, 2}); auto output = F::instance_norm(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}}}); ASSERT_TRUE(output.allclose(expected, 2e-04)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, InstanceNorm3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_features = 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double momentum = 0.1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(2. * num_features * 2 * 2 * 2).view({2, num_features, 2, 2, 2}); auto mean = torch::arange((double)num_features); auto variance = torch::arange((double)num_features); @@ -2363,166 +2019,91 @@ TEST_F(FunctionalTest, InstanceNorm3d) { { 0.0000, 0.0000}}, {{ 0.0000, 0.0000}, { 0.0000, 0.0000}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-0.5275, -0.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.3453, 0.7818}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 1.2182, 1.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2.0911, 2.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.0550, -0.1822}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.6907, 1.5636}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 2.4364, 3.3093}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4.1822, 5.0550}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5826, -0.2733}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0360, 2.3453}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 3.6547, 4.9640}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6.2733, 7.5826}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-2.1101, -0.3644}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.3814, 3.1271}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 4.8729, 6.6186}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 8.3644, 10.1101}}}}, {{{{ 0.0000, 0.0000}, { 0.0000, 0.0000}}, {{ 0.0000, 0.0000}, { 0.0000, 0.0000}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-0.5275, -0.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.3453, 0.7818}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 1.2182, 1.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2.0911, 2.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.0550, -0.1822}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.6907, 1.5636}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 2.4364, 3.3093}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4.1822, 5.0550}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5826, -0.2733}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0360, 2.3453}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 3.6547, 4.9640}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6.2733, 7.5826}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-2.1101, -0.3644}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.3814, 3.1271}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 4.8729, 6.6186}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 8.3644, 10.1101}}}}}); ASSERT_TRUE(output.allclose(expected, 2e-04)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, InstanceNorm3dDefaultOptions) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_features = 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 1e-05; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(2. * num_features * 2 * 2 * 2).view({2, num_features, 2, 2, 2}); auto output = F::instance_norm(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}}}); ASSERT_TRUE(output.allclose(expected, 2e-04)); } @@ -2597,7 +2178,6 @@ TEST_F(FunctionalTest, Interpolate) { ASSERT_THROWS_WITH( F::interpolate( input, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) F::InterpolateFuncOptions().size(std::vector({3, 4, 4})).scale_factor(std::vector({0.5}))), "only one of size or scale_factor should be defined"); ASSERT_THROWS_WITH( @@ -2618,210 +2198,127 @@ TEST_F(FunctionalTest, Interpolate) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Pad) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(6, torch::kDouble).reshape({1, 2, 3}); auto output = F::pad(input, F::PadFuncOptions({1, 2}).mode(torch::kCircular)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{2., 0., 1., 2., 0., 1.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {5., 3., 4., 5., 3., 4.}}}, torch::kDouble); ASSERT_EQ(output.sizes(), std::vector({1, 2, 6})); ASSERT_TRUE(output.allclose(expected, 1e-04)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(9, torch::kDouble).reshape({1, 1, 3, 3}); auto output = F::pad(input, F::PadFuncOptions({3, 3, 3, 1}).mode(torch::kCircular)); auto expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{0., 1., 2., 0., 1., 2., 0., 1., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3., 4., 5., 3., 4., 5., 3., 4., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 7., 8., 6., 7., 8., 6., 7., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 1., 2., 0., 1., 2., 0., 1., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3., 4., 5., 3., 4., 5., 3., 4., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 7., 8., 6., 7., 8., 6., 7., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 1., 2., 0., 1., 2., 0., 1., 2.}}}}, torch::kDouble); ASSERT_EQ(output.sizes(), std::vector({1, 1, 7, 9})); ASSERT_TRUE(output.allclose(expected, 1e-04)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(12, torch::kDouble).reshape({1, 1, 2, 2, 3}); auto output = F::pad(input, F::PadFuncOptions({3, 3, 2, 1, 2, 2}).mode(torch::kCircular)); auto expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{{ 0., 1., 2., 0., 1., 2., 0., 1., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3., 4., 5., 3., 4., 5., 3., 4., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0., 1., 2., 0., 1., 2., 0., 1., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3., 4., 5., 3., 4., 5., 3., 4., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0., 1., 2., 0., 1., 2., 0., 1., 2.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 6., 7., 8., 6., 7., 8., 6., 7., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 10., 11., 9., 10., 11., 9., 10., 11.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 7., 8., 6., 7., 8., 6., 7., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 10., 11., 9., 10., 11., 9., 10., 11.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 7., 8., 6., 7., 8., 6., 7., 8.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0., 1., 2., 0., 1., 2., 0., 1., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3., 4., 5., 3., 4., 5., 3., 4., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0., 1., 2., 0., 1., 2., 0., 1., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3., 4., 5., 3., 4., 5., 3., 4., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0., 1., 2., 0., 1., 2., 0., 1., 2.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 6., 7., 8., 6., 7., 8., 6., 7., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 10., 11., 9., 10., 11., 9., 10., 11.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 7., 8., 6., 7., 8., 6., 7., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 10., 11., 9., 10., 11., 9., 10., 11.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 7., 8., 6., 7., 8., 6., 7., 8.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0., 1., 2., 0., 1., 2., 0., 1., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3., 4., 5., 3., 4., 5., 3., 4., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0., 1., 2., 0., 1., 2., 0., 1., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3., 4., 5., 3., 4., 5., 3., 4., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0., 1., 2., 0., 1., 2., 0., 1., 2.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 6., 7., 8., 6., 7., 8., 6., 7., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 10., 11., 9., 10., 11., 9., 10., 11.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 7., 8., 6., 7., 8., 6., 7., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 10., 11., 9., 10., 11., 9., 10., 11.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 7., 8., 6., 7., 8., 6., 7., 8.}}}}}, torch::kDouble); ASSERT_EQ(output.sizes(), std::vector({1, 1, 6, 5, 9})); ASSERT_TRUE(output.allclose(expected, 1e-04)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(16, torch::kDouble).reshape({2, 2, 2, 2}); auto output = F::pad(input, F::PadFuncOptions({1, 1, 1, 1}).mode(torch::kReflect)); auto expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{ 3., 2., 3., 2.}, { 1., 0., 1., 0.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3., 2., 3., 2.}, { 1., 0., 1., 0.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 7., 6., 7., 6.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5., 4., 5., 4.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 7., 6., 7., 6.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5., 4., 5., 4.}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{11., 10., 11., 10.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 8., 9., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {11., 10., 11., 10.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 8., 9., 8.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{15., 14., 15., 14.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {13., 12., 13., 12.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {15., 14., 15., 14.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {13., 12., 13., 12.}}}}, torch::kDouble); ASSERT_EQ(output.sizes(), std::vector({2, 2, 4, 4})); ASSERT_TRUE(output.allclose(expected, 1e-04)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(12, torch::kDouble).reshape({1, 1, 2, 2, 3}); auto output = F::pad(input, F::PadFuncOptions({1, 2, 2, 1, 1, 2}).mode(torch::kReplicate)); auto expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{{ 0., 0., 1., 2., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0., 0., 1., 2., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0., 0., 1., 2., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3., 3., 4., 5., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3., 3., 4., 5., 5., 5.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0., 0., 1., 2., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0., 0., 1., 2., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0., 0., 1., 2., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3., 3., 4., 5., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3., 3., 4., 5., 5., 5.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 6., 6., 7., 8., 8., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 6., 7., 8., 8., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 6., 7., 8., 8., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 9., 10., 11., 11., 11.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 9., 10., 11., 11., 11.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 6., 6., 7., 8., 8., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 6., 7., 8., 8., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 6., 7., 8., 8., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 9., 10., 11., 11., 11.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 9., 10., 11., 11., 11.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 6., 6., 7., 8., 8., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 6., 7., 8., 8., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6., 6., 7., 8., 8., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 9., 10., 11., 11., 11.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9., 9., 10., 11., 11., 11.}}}}}, torch::kDouble); ASSERT_EQ(output.sizes(), std::vector({1, 1, 5, 5, 6})); ASSERT_TRUE(output.allclose(expected, 1e-04)); @@ -2964,22 +2461,14 @@ TEST_F(FunctionalTest, MarginRankingLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, ConvTranspose1d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(20.).view({2, 2, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::arange(18.).view({2, 3, 3}); auto y = F::conv_transpose1d(x, weight, F::ConvTranspose1dFuncOptions().stride(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{ 45., 104., 179., 212., 245., 188., 107.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 60., 140., 242., 293., 344., 260., 146.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 75., 176., 305., 374., 443., 332., 185.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 135., 304., 509., 542., 575., 428., 237.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 210., 460., 752., 803., 854., 620., 336.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 285., 616., 995., 1064., 1133., 812., 435.}}}); ASSERT_TRUE(torch::allclose(y, expected)); @@ -2989,52 +2478,29 @@ TEST_F(FunctionalTest, ConvTranspose1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, ConvTranspose2dEven) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(50.).view({1, 2, 5, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::arange(54.).view({2, 3, 3, 3}); auto y = F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{ 675., 1402., 2183., 2270., 2357., 1634., 849.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1560., 3240., 5044., 5236., 5428., 3760., 1952.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2685., 5574., 8673., 8988., 9303., 6438., 3339.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3180., 6594., 10248., 10563., 10878., 7518., 3894.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3675., 7614., 11823., 12138., 12453., 8598., 4449.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2820., 5832., 9040., 9268., 9496., 6544., 3380.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1605., 3314., 5129., 5252., 5375., 3698., 1907.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 900., 1870., 2912., 3053., 3194., 2210., 1146.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2100., 4356., 6772., 7072., 7372., 5092., 2636.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3630., 7518., 11670., 12147., 12624., 8706., 4500.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4395., 9078., 14055., 14532., 15009., 10326., 5325.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5160., 10638., 16440., 16917., 17394., 11946., 6150.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3900., 8028., 12388., 12724., 13060., 8956., 4604.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2190., 4502., 6938., 7115., 7292., 4994., 2564.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 1125., 2338., 3641., 3836., 4031., 2786., 1443.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2640., 5472., 8500., 8908., 9316., 6424., 3320.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4575., 9462., 14667., 15306., 15945., 10974., 5661.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5610., 11562., 17862., 18501., 19140., 13134., 6756.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6645., 13662., 21057., 21696., 22335., 15294., 7851.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4980., 10224., 15736., 16180., 16624., 11368., 5828.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2775., 5690., 8747., 8978., 9209., 6290., 3221.}}}}); ASSERT_TRUE(torch::allclose(y, expected)); @@ -3044,52 +2510,29 @@ TEST_F(FunctionalTest, ConvTranspose2dEven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, ConvTranspose2dUneven) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(40.).view({1, 2, 5, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::arange(36.).view({2, 3, 3, 2}); auto y = F::conv_transpose2d(x, weight, F::ConvTranspose2dFuncOptions().stride(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{ 360., 758., 796., 834., 440.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 832., 1752., 1836., 1920., 1012.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1432., 3014., 3152., 3290., 1732.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1696., 3566., 3704., 3842., 2020.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1960., 4118., 4256., 4394., 2308.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1504., 3152., 3252., 3352., 1756.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 856., 1790., 1844., 1898., 992.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 480., 1010., 1072., 1134., 596.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1120., 2352., 2484., 2616., 1372.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1936., 4058., 4268., 4478., 2344.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2344., 4898., 5108., 5318., 2776.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2752., 5738., 5948., 6158., 3208.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2080., 4328., 4476., 4624., 2404.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1168., 2426., 2504., 2582., 1340.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 600., 1262., 1348., 1434., 752.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1408., 2952., 3132., 3312., 1732.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2440., 5102., 5384., 5666., 2956.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2992., 6230., 6512., 6794., 3532.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3544., 7358., 7640., 7922., 4108.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2656., 5504., 5700., 5896., 3052.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1480., 3062., 3164., 3266., 1688.}}}}); ASSERT_TRUE(torch::allclose(y, expected)); @@ -3099,46 +2542,26 @@ TEST_F(FunctionalTest, ConvTranspose2dUneven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, ConvTranspose3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(16.).view({1, 2, 2, 2, 2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::arange(32.).view({2, 2, 2, 2, 2}); auto y = F::conv_transpose3d(x, weight, F::ConvTranspose3dFuncOptions().stride(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{{ 128., 280., 154.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 304., 664., 364.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 184., 400., 218.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 352., 768., 420.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 832., 1808., 984.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 496., 1072., 580.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 256., 552., 298.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 592., 1272., 684.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 344., 736., 394.}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{ 192., 424., 234.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 464., 1016., 556.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 280., 608., 330.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 544., 1184., 644.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1280., 2768., 1496.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 752., 1616., 868.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 384., 824., 442.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 880., 1880., 1004.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 504., 1072., 570.}}}}}); ASSERT_TRUE(torch::allclose(y, expected)); @@ -3148,7 +2571,6 @@ TEST_F(FunctionalTest, ConvTranspose3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, AlphaDropout) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn(5000); auto input_mean = input.mean(); auto input_std = input.std(); @@ -3164,7 +2586,6 @@ TEST_F(FunctionalTest, AlphaDropout) { } } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto output = F::detail::alpha_dropout(input, 0.5, false, false); ASSERT_TRUE(torch::allclose(input_mean, output.mean(), 0.1)); ASSERT_TRUE(torch::allclose(input_std, output.std(), 0.1)); @@ -3172,7 +2593,6 @@ TEST_F(FunctionalTest, AlphaDropout) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, FeatureAlphaDropout) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn(5000); auto input_mean = input.mean(); auto input_std = input.std(); @@ -3195,7 +2615,6 @@ TEST_F(FunctionalTest, FeatureAlphaDropout) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Dropout) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn(5000); auto input_mean = input.mean(); auto input_std = input.std(); @@ -3213,7 +2632,6 @@ TEST_F(FunctionalTest, Dropout) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Dropout2d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({50, 100}); auto input_mean = input.mean(); auto input_std = input.std(); @@ -3231,7 +2649,6 @@ TEST_F(FunctionalTest, Dropout2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(FunctionalTest, Dropout3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({50, 10, 10}); auto input_mean = input.mean(); auto input_std = input.std(); @@ -3490,9 +2907,7 @@ TEST_F(FunctionalTest, BCEWithLogitsLoss) { { // test BCE with logits gives same result as sigmoid and bce loss auto sigmoid = Sigmoid(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::rand({64, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto output = torch::rand({64, 4}) - 0.5; ASSERT_TRUE(torch::allclose( @@ -3511,7 +2926,6 @@ TEST_F(FunctionalTest, BCEWithLogitsLoss) { )); target = torch::zeros({4, 1}, torch::kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output = torch::empty({4, 1}, torch::kFloat).fill_(-100); ASSERT_TRUE(torch::allclose( @@ -3556,7 +2970,6 @@ TEST_F(FunctionalTest, BCEWithLogitsLoss) { F::BinaryCrossEntropyWithLogitsFuncOptions().weight(weight) ); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) weight = weight.expand({16, 4}).contiguous(); auto out2 = F::binary_cross_entropy_with_logits(output, target, F::BinaryCrossEntropyWithLogitsFuncOptions().weight(weight) @@ -3564,13 +2977,11 @@ TEST_F(FunctionalTest, BCEWithLogitsLoss) { ASSERT_TRUE(torch::allclose(out1, out2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) weight = torch::rand({16, 1}); out1 = F::binary_cross_entropy_with_logits(output, target, F::BinaryCrossEntropyWithLogitsFuncOptions().weight(weight) ); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) weight = weight.expand({16, 4}).contiguous(); out2 = F::binary_cross_entropy_with_logits(output, target, F::BinaryCrossEntropyWithLogitsFuncOptions().weight(weight) diff --git a/test/cpp/api/init.cpp b/test/cpp/api/init.cpp index 006e1f5b4123e..d437a5e195cb4 100644 --- a/test/cpp/api/init.cpp +++ b/test/cpp/api/init.cpp @@ -32,7 +32,6 @@ void check_exact_values( auto tensor = layerParameters[p].to(torch::kFloat64); auto expectedTensor = expectedLayerParameters[p].to(torch::kFloat64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (!tensor.allclose(expectedTensor, /*rtol=*/1e-3, /*atol=*/5e-4)) { std::cout << "layer " << i << ": " << tensor << " != " << expectedTensor << " (parameter " << p << ")" << std::endl; @@ -47,17 +46,14 @@ void check_initializer_against_baseline( std::vector> expected) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto layer1 = torch::nn::Linear(7, 15); initializer(layer1->weight); layer1->to(torch::kFloat64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto layer2 = torch::nn::Linear(15, 15); initializer(layer2->weight); layer2->to(torch::kFloat64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto layer3 = torch::nn::Linear(15, 2); initializer(layer3->weight); layer3->to(torch::kFloat64); diff --git a/test/cpp/api/integration.cpp b/test/cpp/api/integration.cpp index 4df984c9582f1..ec00030c76fc0 100644 --- a/test/cpp/api/integration.cpp +++ b/test/cpp/api/integration.cpp @@ -16,24 +16,17 @@ const double kPi = 3.1415926535898; class CartPole { // Translated from openai/gym's cartpole.py public: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double gravity = 9.8; double masscart = 1.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double masspole = 0.1; double total_mass = (masspole + masscart); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double length = 0.5; // actually half the pole's length; double polemass_length = (masspole * length); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double force_mag = 10.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double tau = 0.02; // seconds between state updates; // Angle at which to fail the episode - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double theta_threshold_radians = 12 * 2 * kPi / 360; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double x_threshold = 2.4; int steps_beyond_done = -1; @@ -55,7 +48,6 @@ class CartPole { } void reset() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) state = torch::empty({4}).uniform_(-0.05, 0.05); steps_beyond_done = -1; step_ = 0; @@ -78,7 +70,6 @@ class CartPole { auto temp = (force + polemass_length * theta_dot * theta_dot * sintheta) / total_mass; auto thetaacc = (gravity * sintheta - costheta * temp) / - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (length * (4.0 / 3.0 - masspole * costheta * costheta / total_mass)); auto xacc = temp - polemass_length * thetaacc * costheta / total_mass; @@ -90,7 +81,6 @@ class CartPole { done = x < -x_threshold || x > x_threshold || theta < -theta_threshold_radians || theta > theta_threshold_radians || - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) step_ > 200; if (!done) { @@ -154,7 +144,6 @@ bool test_mnist( auto result = std::get<1>(forward_op(images).max(/*dim=*/1)); torch::Tensor correct = (result == targets).to(torch::kFloat32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return correct.sum().item() > (test_dataset.size().value() * 0.8); } @@ -164,13 +153,9 @@ struct IntegrationTest : torch::test::SeedingFixture {}; TEST_F(IntegrationTest, CartPole) { torch::manual_seed(0); auto model = std::make_shared(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto linear = model->add(Linear(4, 128), "linear"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto policyHead = model->add(Linear(128, 2), "policy"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto valueHead = model->add(Linear(128, 1), "action"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto optimizer = torch::optim::Adam(model->parameters(), 1e-3); std::vector saved_log_probs; @@ -203,13 +188,11 @@ TEST_F(IntegrationTest, CartPole) { auto R = 0.; // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) for (int i = rewards.size() - 1; i >= 0; i--) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) R = rewards[i] + 0.99 * R; rewards[i] = R; } auto r_t = torch::from_blob( rewards.data(), {static_cast(rewards.size())}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) r_t = (r_t - r_t.mean()) / (r_t.std() + 1e-5); std::vector policy_loss; @@ -234,13 +217,11 @@ TEST_F(IntegrationTest, CartPole) { }; auto env = CartPole(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double running_reward = 10.0; for (size_t episode = 0;; episode++) { env.reset(); auto state = env.getState(); int t = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; t < 10000; t++) { auto action = selectAction(state); env.step(action); @@ -253,7 +234,6 @@ TEST_F(IntegrationTest, CartPole) { break; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) running_reward = running_reward * 0.99 + t * 0.01; finishEpisode(); /* @@ -262,7 +242,6 @@ TEST_F(IntegrationTest, CartPole) { episode, t, running_reward); } */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (running_reward > 150) { break; } @@ -274,17 +253,11 @@ TEST_F(IntegrationTest, CartPole) { TEST_F(IntegrationTest, MNIST_CUDA) { torch::manual_seed(0); auto model = std::make_shared(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto conv1 = model->add(Conv2d(1, 10, 5), "conv1"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto conv2 = model->add(Conv2d(10, 20, 5), "conv2"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto drop = Dropout(0.3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto drop2d = Dropout2d(0.3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto linear1 = model->add(Linear(320, 50), "linear1"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto linear2 = model->add(Linear(50, 10), "linear2"); auto forward = [&](torch::Tensor x) { @@ -293,7 +266,6 @@ TEST_F(IntegrationTest, MNIST_CUDA) { x = drop2d->forward(x); x = torch::max_pool2d(x, {2, 2}).relu(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x = x.view({-1, 320}); x = linear1->forward(x).clamp_min(0); x = drop->forward(x); @@ -303,7 +275,6 @@ TEST_F(IntegrationTest, MNIST_CUDA) { }; auto optimizer = torch::optim::SGD( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model->parameters(), torch::optim::SGDOptions(1e-2).momentum(0.5)); ASSERT_TRUE(test_mnist( @@ -319,17 +290,11 @@ TEST_F(IntegrationTest, MNIST_CUDA) { TEST_F(IntegrationTest, MNISTBatchNorm_CUDA) { torch::manual_seed(0); auto model = std::make_shared(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto conv1 = model->add(Conv2d(1, 10, 5), "conv1"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto batchnorm2d = model->add(BatchNorm2d(10), "batchnorm2d"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto conv2 = model->add(Conv2d(10, 20, 5), "conv2"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto linear1 = model->add(Linear(320, 50), "linear1"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto batchnorm1 = model->add(BatchNorm1d(50), "batchnorm1"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto linear2 = model->add(Linear(50, 10), "linear2"); auto forward = [&](torch::Tensor x) { @@ -338,7 +303,6 @@ TEST_F(IntegrationTest, MNISTBatchNorm_CUDA) { x = conv2->forward(x); x = torch::max_pool2d(x, {2, 2}).relu(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x = x.view({-1, 320}); x = linear1->forward(x).clamp_min(0); x = batchnorm1->forward(x); @@ -348,7 +312,6 @@ TEST_F(IntegrationTest, MNISTBatchNorm_CUDA) { }; auto optimizer = torch::optim::SGD( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model->parameters(), torch::optim::SGDOptions(1e-2).momentum(0.5)); ASSERT_TRUE(test_mnist( diff --git a/test/cpp/api/jit.cpp b/test/cpp/api/jit.cpp index 8bd6ff5089017..d146e71653db4 100644 --- a/test/cpp/api/jit.cpp +++ b/test/cpp/api/jit.cpp @@ -119,7 +119,6 @@ TEST(TorchScriptTest, TestOptionalArgMatching) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TorchScriptTest, TestPickle) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::IValue float_value(2.3); // TODO: when tensors are stored in the pickle, delete this @@ -129,7 +128,6 @@ TEST(TorchScriptTest, TestPickle) { torch::IValue ivalue = torch::jit::unpickle(data.data(), data.size()); double diff = ivalue.toDouble() - float_value.toDouble(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double eps = 0.0001; ASSERT_TRUE(diff < eps && diff > -eps); } diff --git a/test/cpp/api/memory.cpp b/test/cpp/api/memory.cpp index db79910eb9c94..3fb21890ac84a 100644 --- a/test/cpp/api/memory.cpp +++ b/test/cpp/api/memory.cpp @@ -14,7 +14,6 @@ struct TestValue { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MakeUniqueTest, ForwardRvaluesCorrectly) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto ptr = torch::make_unique(123); ASSERT_FALSE(ptr->lvalue_.has_value()); ASSERT_TRUE(ptr->rvalue_.has_value()); @@ -23,7 +22,6 @@ TEST(MakeUniqueTest, ForwardRvaluesCorrectly) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MakeUniqueTest, ForwardLvaluesCorrectly) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int x = 5; auto ptr = torch::make_unique(x); ASSERT_TRUE(ptr->lvalue_.has_value()); diff --git a/test/cpp/api/misc.cpp b/test/cpp/api/misc.cpp index 38c9db9f2667b..eb009664822c9 100644 --- a/test/cpp/api/misc.cpp +++ b/test/cpp/api/misc.cpp @@ -51,9 +51,7 @@ TEST(UtilsTest, WarnOnce) { TEST(NoGradTest, SetsGradModeCorrectly) { torch::manual_seed(0); torch::NoGradGuard guard; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::nn::Linear model(5, 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({10, 5}, torch::requires_grad()); auto y = model->forward(x); torch::Tensor s = y.sum(); diff --git a/test/cpp/api/module.cpp b/test/cpp/api/module.cpp index e4ab3d50befae..eff3dbafcdc52 100644 --- a/test/cpp/api/module.cpp +++ b/test/cpp/api/module.cpp @@ -33,7 +33,6 @@ TEST_F(ModuleTest, CanEnableAndDisableTrainingMode) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModuleTest, ZeroGrad) { Linear module(3, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::ones({8, 3}, torch::requires_grad()); auto loss = module(weight).sum(); loss.backward(); @@ -56,9 +55,7 @@ TEST_F(ModuleTest, ZeroGrad) { TEST_F(ModuleTest, ZeroGradWithUndefined) { struct TestModule : torch::nn::Module { TestModule() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x = register_parameter("x", torch::ones(5, torch::requires_grad())); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) y = register_parameter("y", torch::ones(5, torch::requires_grad())); } torch::Tensor x, y; @@ -117,7 +114,6 @@ TEST_F(ModuleTest, ReplaceModule) { } }; auto model = std::make_shared(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model->l1 = model->replace_module("l1", torch::nn::Linear(5, 6)); ASSERT_EQ(model->named_parameters()["l1.weight"].size(0), 6); ASSERT_EQ(model->l1.get(), model->named_modules()["l1"]->as()); @@ -150,7 +146,6 @@ TEST_F(ModuleTest, RegisterParameterThrowsForEmptyOrDottedName) { TEST_F(ModuleTest, RegisterParameterThrowsForDuplicateModuleName) { struct TestModel : public torch::nn::Module {}; TestModel model; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model.register_parameter("p", torch::ones(5)); ASSERT_THROWS_WITH( model.register_parameter("p", torch::ones(5)), @@ -196,7 +191,6 @@ TEST_F(ModuleTest, RegisterBufferThrowsForEmptyOrDottedName) { TEST_F(ModuleTest, RegisterBufferThrowsForDuplicateModuleName) { struct TestModel : public torch::nn::Module {}; TestModel model; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model.register_buffer("p", torch::ones(5)); ASSERT_THROWS_WITH( model.register_buffer("p", torch::ones(5)), "Buffer 'p' already defined"); @@ -247,7 +241,6 @@ void test_DeviceOrDtypeConversionSkipsUndefinedTensor( torch::Device to_device, torch::Dtype to_dtype) { { // Case 1: Undefined tensors as parameters - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear module(LinearOptions(10, 20).bias(false)); ASSERT_TRUE(module->weight.defined()); ASSERT_FALSE(module->bias.defined()); @@ -264,7 +257,6 @@ void test_DeviceOrDtypeConversionSkipsUndefinedTensor( } { // Case 2: Undefined tensors as buffers - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm1d module(BatchNorm1dOptions(5).track_running_stats(false).affine(true)); ASSERT_TRUE(module->weight.defined()); ASSERT_FALSE(module->running_mean.defined()); @@ -294,7 +286,6 @@ TEST_F(ModuleTest, DeviceOrDtypeConversionSkipsUndefinedTensor_CUDA) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModuleTest, ParametersAndBuffersAccessorSkipsUndefinedTensor) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear module(LinearOptions(10, 20).bias(false)); auto params = module->parameters(); @@ -306,7 +297,6 @@ TEST_F(ModuleTest, ParametersAndBuffersAccessorSkipsUndefinedTensor) { ASSERT_TRUE(pointer_equal(named_params["weight"], module->weight)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm1d module(BatchNorm1dOptions(5).track_running_stats(false).affine(false)); auto buffers = module->buffers(); @@ -315,7 +305,6 @@ TEST_F(ModuleTest, ParametersAndBuffersAccessorSkipsUndefinedTensor) { ASSERT_EQ(named_buffers.size(), 0); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm1d module(BatchNorm1dOptions(5).track_running_stats(true).affine(false)); auto buffers = module->buffers(); @@ -334,7 +323,6 @@ TEST_F(ModuleTest, ParametersAndBuffersAccessorSkipsUndefinedTensor) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModuleTest, Conversion_MultiCUDA) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear module(128, 64); for (auto& parameter : module->parameters()) { ASSERT_EQ(parameter.device(), torch::Device(torch::kCPU)); @@ -411,11 +399,8 @@ struct TestDistinctParametersModule reset(); } void reset() override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l1 = register_module("l1", Linear(10, 3)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l2 = register_module("l2", Linear(3, 5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l3 = register_module("l3", Linear(5, 100)); buffer = register_buffer("buf", torch::ones({2, 2})); } @@ -557,7 +542,6 @@ TEST_F(ModuleTest, CloneCopiesTheValuesOfVariablesOfSubmodules) { { torch::NoGradGuard no_grad; a->module->weight += 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a->module->value = 123; } @@ -581,11 +565,8 @@ TEST_F(ModuleTest, CloneToDevicePreservesTheDeviceOfParameters_CUDA) { reset(); } void reset() override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l1 = register_module("l1", Linear(10, 3)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l2 = register_module("l2", Linear(3, 5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l3 = register_module("l3", Linear(5, 100)); buffer = register_buffer("buf", torch::ones({2, 2})); } @@ -621,11 +602,8 @@ TEST_F( reset(); } void reset() override { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l1 = register_module("l1", Linear(10, 3)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l2 = register_module("l2", Linear(3, 5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l3 = register_module("l3", Linear(5, 100)); buffer = register_buffer("buf", torch::ones({2, 2})); } @@ -701,7 +679,6 @@ TEST_F(ModuleTest, ContainsBuffersWithTheCorrectName) { } struct AImpl : torch::nn::Module { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AImpl() : x_(123) {} AImpl(int x) : x_(x) {} int x_; @@ -722,7 +699,6 @@ TEST_F( TEST_F( ModuleTest, ValueConstructorOfModuleHolderCallsCorrectConstructorInImpl) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) A a(5); ASSERT_TRUE(a); ASSERT_FALSE(a.is_empty()); @@ -906,11 +882,8 @@ std::shared_ptr make_deeply_nested_test_container() { {TestContainer(1, {TestContainer(2), TestContainer(3)}), TestContainer(4), TestContainer( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {TestContainer(6), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TestContainer(7, {TestContainer(8), TestContainer(9)})})})); } @@ -921,15 +894,10 @@ make_key_value_pairs_for_deeply_nested_container() { {"test_prefix.0.0", 2}, {"test_prefix.0.1", 3}, {"test_prefix.1", 4}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"test_prefix.2", 5}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"test_prefix.2.0", 6}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"test_prefix.2.1", 7}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"test_prefix.2.1.0", 8}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"test_prefix.2.1.1", 9}}; } diff --git a/test/cpp/api/moduledict.cpp b/test/cpp/api/moduledict.cpp index c163827885348..76b50a8273aab 100644 --- a/test/cpp/api/moduledict.cpp +++ b/test/cpp/api/moduledict.cpp @@ -132,10 +132,8 @@ TEST_F(ModuleDictTest, Keys) { }; torch::OrderedDict> ordereddict = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"linear", Linear(10, 3).ptr()}, {"conv", Conv2d(1, 2, 3).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"dropout", Dropout(0.5).ptr()}, }; ModuleDict dict(ordereddict); @@ -177,16 +175,11 @@ TEST_F(ModuleDictTest, Values) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModuleDictTest, SanityCheckForHoldingStandardModules) { torch::OrderedDict> ordereddict = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"linear", Linear(10, 3).ptr()}, {"conv", Conv2d(1, 2, 3).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"dropout", Dropout(0.5).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"batch", BatchNorm2d(5).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"embedding", Embedding(4, 10).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"lstm", LSTM(4, 5).ptr()} }; ModuleDict dict(ordereddict); @@ -197,7 +190,6 @@ TEST_F(ModuleDictTest, HasReferenceSemantics) { torch::OrderedDict> ordereddict = { {"linear1", Linear(2, 3).ptr()}, {"linear2", Linear(3, 4).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"linear3", Linear(4, 5).ptr()}, }; ModuleDict first(ordereddict); @@ -264,10 +256,8 @@ TEST_F(ModuleDictTest, IsCloneable_CUDA) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModuleDictTest, RegistersElementsAsSubmodules) { torch::OrderedDict> ordereddict1 = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"linear", Linear(10, 3).ptr()}, {"conv", Conv2d(1, 2, 3).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"test", Dropout(0.5).ptr()}, }; ModuleDict dict(ordereddict1); @@ -279,9 +269,7 @@ TEST_F(ModuleDictTest, RegistersElementsAsSubmodules) { // Update Existing torch::OrderedDict> ordereddict2 = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"lstm", LSTM(4, 5).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"test", BatchNorm2d(5).ptr()} }; dict->update(ordereddict2); @@ -316,16 +304,11 @@ TEST_F(ModuleDictTest, CloneToDevice_CUDA) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModuleDictTest, PrettyPrintModuleDict) { torch::OrderedDict> ordereddict = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"linear", Linear(10, 3).ptr()}, {"conv", Conv2d(1, 2, 3).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"dropout", Dropout(0.5).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"batch", BatchNorm2d(5).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"embedding", Embedding(4, 10).ptr()}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"lstm", LSTM(4, 5).ptr()} }; ModuleDict dict(ordereddict); diff --git a/test/cpp/api/modulelist.cpp b/test/cpp/api/modulelist.cpp index 6af4cbcdc1e07..df06ee4ca9ace 100644 --- a/test/cpp/api/modulelist.cpp +++ b/test/cpp/api/modulelist.cpp @@ -164,16 +164,11 @@ TEST_F(ModuleListTest, AccessWithPtr) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModuleListTest, SanityCheckForHoldingStandardModules) { ModuleList list( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear(10, 3), Conv2d(1, 2, 3), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dropout(0.5), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm2d(5), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Embedding(4, 10), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LSTM(4, 5)); } @@ -210,7 +205,6 @@ TEST_F(ModuleListTest, ExtendPushesModulesFromOtherModuleList) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModuleListTest, HasReferenceSemantics) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ModuleList first(Linear(2, 3), Linear(4, 4), Linear(4, 5)); ModuleList second(first); @@ -259,7 +253,6 @@ TEST_F(ModuleListTest, IsCloneable) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModuleListTest, RegistersElementsAsSubmodules) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ModuleList list(Linear(10, 3), Conv2d(1, 2, 3), Dropout2d(0.5)); auto modules = list->children(); @@ -292,16 +285,11 @@ TEST_F(ModuleListTest, CloneToDevice_CUDA) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModuleListTest, PrettyPrintModuleList) { ModuleList list( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear(10, 3), Conv2d(1, 2, 3), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dropout(0.5), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm2d(5), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Embedding(4, 10), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LSTM(4, 5)); ASSERT_EQ( c10::str(list), @@ -320,7 +308,6 @@ TEST_F(ModuleListTest, RangeBasedForLoop) { torch::nn::ModuleList mlist( torch::nn::Linear(3, 4), torch::nn::BatchNorm1d(4), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::nn::Dropout(0.5) ); diff --git a/test/cpp/api/modules.cpp b/test/cpp/api/modules.cpp index 3f05533f99c60..f2c945fa800e2 100644 --- a/test/cpp/api/modules.cpp +++ b/test/cpp/api/modules.cpp @@ -16,11 +16,8 @@ using namespace torch::test; class TestModel : public torch::nn::Module { public: TestModel() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) : l1(register_module("l1", Linear(10, 3))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l2(register_module("l2", Linear(3, 5))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l3(register_module("l3", Linear(5, 100))) {} Linear l1, l2, l3; @@ -29,9 +26,7 @@ class TestModel : public torch::nn::Module { class NestedModel : public torch::nn::Module { public: NestedModel() - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) : param_(register_parameter("param", torch::empty({3, 2, 21}))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l1(register_module("l1", Linear(5, 20))), t(register_module("test", std::make_shared())) {} @@ -45,19 +40,13 @@ struct ModulesTest : torch::test::SeedingFixture {}; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Conv1d) { Conv1d model(Conv1dOptions(3, 2, 3).stride(1).bias(false)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model->weight.set_data(torch::arange(18, torch::dtype(torch::kFloat)).reshape({2, 3, 3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(30, torch::dtype(torch::kFloat).requires_grad(true)).reshape({2, 3, 5}); auto y = model(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{ 312., 348., 384.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 798., 915., 1032.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 852., 888., 924.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2553., 2670., 2787.}}}, torch::kFloat); ASSERT_TRUE(torch::allclose(y, expected)); @@ -80,23 +69,15 @@ TEST_F(ModulesTest, Conv1dSameStrided) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Conv2dEven) { Conv2d model(Conv2dOptions(3, 2, 3).stride(1).bias(false)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model->weight.set_data(torch::arange(54, torch::dtype(torch::kFloat)).reshape({2, 3, 3, 3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(75, torch::dtype(torch::kFloat).requires_grad(true)).reshape({1, 3, 5, 5}); auto y = model(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{15219., 15570., 15921.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16974., 17325., 17676.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {18729., 19080., 19431.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{37818., 38898., 39978.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {43218., 44298., 45378.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {48618., 49698., 50778.}}}}, torch::kFloat); ASSERT_TRUE(torch::allclose(y, expected)); @@ -109,23 +90,15 @@ TEST_F(ModulesTest, Conv2dEven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Conv2dUneven) { Conv2d model(Conv2dOptions(3, 2, {3, 2}).stride({1, 1}).bias(false)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model->weight.set_data(torch::arange(36, torch::dtype(torch::kFloat)).reshape({2, 3, 3, 2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(60, torch::dtype(torch::kFloat).requires_grad(true)).reshape({1, 3, 5, 4}); auto y = model(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{ 5289., 5442., 5595.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5901., 6054., 6207.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6513., 6666., 6819.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{13227., 13704., 14181.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {15135., 15612., 16089.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {17043., 17520., 17997.}}}}, torch::kFloat); ASSERT_TRUE(torch::allclose(y, expected)); @@ -151,51 +124,31 @@ TEST_F(ModulesTest, Conv2dSameStrided) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Conv3d) { Conv3d model(Conv3dOptions(3, 2, 3).stride(1).bias(false)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model->weight.set_data(torch::arange(162, torch::dtype(torch::kFloat)).reshape({2, 3, 3, 3, 3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(375, torch::dtype(torch::kFloat).requires_grad(true)).reshape({1, 3, 5, 5, 5}); auto y = model(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{{ 700704., 703944., 707184.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 716904., 720144., 723384.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 733104., 736344., 739584.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 781704., 784944., 788184.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 797904., 801144., 804384.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 814104., 817344., 820584.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 862704., 865944., 869184.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 878904., 882144., 885384.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 895104., 898344., 901584.}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{1724220., 1734021., 1743822.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1773225., 1783026., 1792827.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1822230., 1832031., 1841832.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1969245., 1979046., 1988847.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2018250., 2028051., 2037852.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2067255., 2077056., 2086857.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2214270., 2224071., 2233872.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2263275., 2273076., 2282877.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2312280., 2322081., 2331882.}}}}}, torch::kFloat); ASSERT_TRUE(torch::allclose(y, expected)); @@ -207,7 +160,6 @@ TEST_F(ModulesTest, Conv3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Conv3dSameStrided) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto options = Conv3dOptions(3, 2, {3, 4, 5}); options.stride(1).padding(torch::kSame); Conv3d model_valid(options); @@ -222,22 +174,14 @@ TEST_F(ModulesTest, Conv3dSameStrided) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, ConvTranspose1d) { ConvTranspose1d model(ConvTranspose1dOptions(3, 2, 3).stride(1).bias(false)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model->weight.set_data(torch::arange(18.).view({2, 3, 3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(20.).reshape({2, 2, 5}); auto y = model(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{ 45., 104., 179., 212., 245., 188., 107.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 60., 140., 242., 293., 344., 260., 146.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 75., 176., 305., 374., 443., 332., 185.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 135., 304., 509., 542., 575., 428., 237.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 210., 460., 752., 803., 854., 620., 336.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 285., 616., 995., 1064., 1133., 812., 435.}}}); ASSERT_TRUE(torch::allclose(y, expected)); @@ -250,52 +194,29 @@ TEST_F(ModulesTest, ConvTranspose1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, ConvTranspose2dEven) { ConvTranspose2d model(ConvTranspose2dOptions(3, 2, 3).stride(1).bias(false)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model->weight.set_data(torch::arange(54.).view({2, 3, 3, 3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(50.).view({1, 2, 5, 5}); auto y = model(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{ 675., 1402., 2183., 2270., 2357., 1634., 849.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1560., 3240., 5044., 5236., 5428., 3760., 1952.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2685., 5574., 8673., 8988., 9303., 6438., 3339.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3180., 6594., 10248., 10563., 10878., 7518., 3894.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3675., 7614., 11823., 12138., 12453., 8598., 4449.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2820., 5832., 9040., 9268., 9496., 6544., 3380.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1605., 3314., 5129., 5252., 5375., 3698., 1907.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 900., 1870., 2912., 3053., 3194., 2210., 1146.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2100., 4356., 6772., 7072., 7372., 5092., 2636.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3630., 7518., 11670., 12147., 12624., 8706., 4500.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4395., 9078., 14055., 14532., 15009., 10326., 5325.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5160., 10638., 16440., 16917., 17394., 11946., 6150.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 3900., 8028., 12388., 12724., 13060., 8956., 4604.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2190., 4502., 6938., 7115., 7292., 4994., 2564.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 1125., 2338., 3641., 3836., 4031., 2786., 1443.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2640., 5472., 8500., 8908., 9316., 6424., 3320.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4575., 9462., 14667., 15306., 15945., 10974., 5661.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5610., 11562., 17862., 18501., 19140., 13134., 6756.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6645., 13662., 21057., 21696., 22335., 15294., 7851.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4980., 10224., 15736., 16180., 16624., 11368., 5828.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2775., 5690., 8747., 8978., 9209., 6290., 3221.}}}}); ASSERT_TRUE(torch::allclose(y, expected)); @@ -308,52 +229,29 @@ TEST_F(ModulesTest, ConvTranspose2dEven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, ConvTranspose2dUneven) { ConvTranspose2d model(ConvTranspose2dOptions(3, 2, {3, 2}).stride({1, 1}).bias(false)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model->weight.set_data(torch::arange(36.).view({2, 3, 3, 2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(40.).view({1, 2, 5, 4}); auto y = model(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{ 360., 758., 796., 834., 440.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 832., 1752., 1836., 1920., 1012.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1432., 3014., 3152., 3290., 1732.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1696., 3566., 3704., 3842., 2020.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1960., 4118., 4256., 4394., 2308.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1504., 3152., 3252., 3352., 1756.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 856., 1790., 1844., 1898., 992.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 480., 1010., 1072., 1134., 596.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1120., 2352., 2484., 2616., 1372.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1936., 4058., 4268., 4478., 2344.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2344., 4898., 5108., 5318., 2776.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2752., 5738., 5948., 6158., 3208.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2080., 4328., 4476., 4624., 2404.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1168., 2426., 2504., 2582., 1340.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 600., 1262., 1348., 1434., 752.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1408., 2952., 3132., 3312., 1732.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2440., 5102., 5384., 5666., 2956.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2992., 6230., 6512., 6794., 3532.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3544., 7358., 7640., 7922., 4108.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2656., 5504., 5700., 5896., 3052.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1480., 3062., 3164., 3266., 1688.}}}}); ASSERT_TRUE(torch::allclose(y, expected)); @@ -366,46 +264,26 @@ TEST_F(ModulesTest, ConvTranspose2dUneven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, ConvTranspose3d) { ConvTranspose3d model(ConvTranspose3dOptions(2, 2, 2).stride(1).bias(false)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model->weight.set_data(torch::arange(32.).reshape({2, 2, 2, 2, 2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(16.).reshape({1, 2, 2, 2, 2}); auto y = model(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{{ 128., 280., 154.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 304., 664., 364.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 184., 400., 218.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 352., 768., 420.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 832., 1808., 984.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 496., 1072., 580.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 256., 552., 298.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 592., 1272., 684.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 344., 736., 394.}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{ 192., 424., 234.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 464., 1016., 556.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 280., 608., 330.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 544., 1184., 644.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1280., 2768., 1496.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 752., 1616., 868.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 384., 824., 442.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 880., 1880., 1004.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 504., 1072., 570.}}}}}); ASSERT_TRUE(torch::allclose(y, expected)); @@ -418,7 +296,6 @@ TEST_F(ModulesTest, ConvTranspose3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MaxPool1d) { MaxPool1d model(MaxPool1dOptions(3).stride(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({1, 1, 5}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -433,7 +310,6 @@ TEST_F(ModulesTest, MaxPool1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MaxPool1dReturnIndices) { MaxPool1d model(MaxPool1dOptions(3).stride(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({1, 1, 5}, torch::requires_grad()); torch::Tensor y, indices; std::tie(y, indices) = model->forward_with_indices(x); @@ -449,7 +325,6 @@ TEST_F(ModulesTest, MaxPool1dReturnIndices) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MaxPool2dEven) { MaxPool2d model(MaxPool2dOptions(3).stride(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -464,7 +339,6 @@ TEST_F(ModulesTest, MaxPool2dEven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MaxPool2dUneven) { MaxPool2d model(MaxPool2dOptions({3, 2}).stride({2, 2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 4}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -479,7 +353,6 @@ TEST_F(ModulesTest, MaxPool2dUneven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MaxPool2dReturnIndices) { MaxPool2d model(MaxPool2dOptions(3).stride(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5}, torch::requires_grad()); torch::Tensor y, indices; std::tie(y, indices) = model->forward_with_indices(x); @@ -499,7 +372,6 @@ TEST_F(ModulesTest, MaxPool2dReturnIndices) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MaxPool3d) { MaxPool3d model(MaxPool3dOptions(3).stride(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5, 5}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -514,7 +386,6 @@ TEST_F(ModulesTest, MaxPool3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MaxPool3dReturnIndices) { MaxPool3d model(MaxPool3dOptions(3).stride(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5, 5}, torch::requires_grad()); torch::Tensor y, indices; std::tie(y, indices) = model->forward_with_indices(x); @@ -539,7 +410,6 @@ TEST_F(ModulesTest, MaxPool3dReturnIndices) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AvgPool1d) { AvgPool1d model(AvgPool1dOptions(3).stride(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({1, 1, 5}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -554,7 +424,6 @@ TEST_F(ModulesTest, AvgPool1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AvgPool2dEven) { AvgPool2d model(AvgPool2dOptions(3).stride(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -569,7 +438,6 @@ TEST_F(ModulesTest, AvgPool2dEven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AvgPool2dUneven) { AvgPool2d model(AvgPool2dOptions({3, 2}).stride({2, 2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 4}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -584,7 +452,6 @@ TEST_F(ModulesTest, AvgPool2dUneven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AvgPool3d) { AvgPool3d model(AvgPool3dOptions(3).stride(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5, 5}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -599,7 +466,6 @@ TEST_F(ModulesTest, AvgPool3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, FractionalMaxPool2d) { FractionalMaxPool2d model(FractionalMaxPool2dOptions(3).output_size(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -614,7 +480,6 @@ TEST_F(ModulesTest, FractionalMaxPool2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, FractionalMaxPool2dReturnIndices) { FractionalMaxPool2d model(FractionalMaxPool2dOptions(3).output_size(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5}, torch::requires_grad()); torch::Tensor y, indices; std::tie(y, indices) = model->forward_with_indices(x); @@ -634,7 +499,6 @@ TEST_F(ModulesTest, FractionalMaxPool2dReturnIndices) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, FractionalMaxPool3d) { FractionalMaxPool3d model(FractionalMaxPool3dOptions(3).output_size(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5, 5}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -649,7 +513,6 @@ TEST_F(ModulesTest, FractionalMaxPool3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, FractionalMaxPool3dReturnIndices) { FractionalMaxPool3d model(FractionalMaxPool3dOptions(3).output_size(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({2, 5, 5, 5}, torch::requires_grad()); torch::Tensor y, indices; std::tie(y, indices) = model->forward_with_indices(x); @@ -678,7 +541,6 @@ TEST_F(ModulesTest, LPPool1d) { int kernel_size = 3; LPPool1d model(LPPool1dOptions(norm_type, kernel_size).stride(stride)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({1, 1, 5}); auto y = model(x); auto expected = (torch::pow(torch::tensor({{{1, 1}}}, torch::kFloat), norm_type) * kernel_size).pow(1. / norm_type); @@ -695,7 +557,6 @@ TEST_F(ModulesTest, LPPool2d) { std::vector kernel_size({2, 3}); LPPool2d model(LPPool2dOptions(norm_type, kernel_size).stride(stride)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({1, 2, 5}); auto y = model(x); auto expected = (torch::pow(torch::tensor({{{1, 1}}}, torch::kFloat), norm_type) * (kernel_size[0] * kernel_size[1])).pow(1. / norm_type); @@ -721,10 +582,8 @@ TEST_F(ModulesTest, Identity) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Flatten) { Flatten flatten; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{1, 3, 4}, {2, 5, 6}}, torch::dtype(torch::kFloat).requires_grad(true)); auto output = flatten->forward(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{1, 3, 4}, {2, 5, 6}}, torch::kFloat); auto s = output.sum(); @@ -735,17 +594,13 @@ TEST_F(ModulesTest, Flatten) { // Testing with optional arguments start_dim and end_dim Flatten flatten_optional_dims(FlattenOptions().start_dim(2).end_dim(3)); input = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{9, 10}, {11, 12}}, {{13, 14}, {15, 16}}} }, torch::dtype(torch::kFloat).requires_grad(true)); // Tensor with sizes (2, 2, 2, 2) output = flatten_optional_dims->forward(input); expected = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, 2, 3, 4}, {5, 6, 7, 8}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{9, 10, 11, 12}, {13, 14, 15, 16}} }, torch::kFloat); // Tensor with sizes (2, 2, 4) @@ -789,7 +644,6 @@ TEST_F(ModulesTest, Unflatten) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveMaxPool1d) { AdaptiveMaxPool1d model(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({{{1, 2, 3, 4, 5}}}, torch::dtype(torch::kFloat).requires_grad(true)); auto y = model(x); torch::Tensor s = y.sum(); @@ -804,7 +658,6 @@ TEST_F(ModulesTest, AdaptiveMaxPool1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveMaxPool1dReturnIndices) { AdaptiveMaxPool1d model(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({{{1, 2, 3, 4, 5}}}, torch::dtype(torch::kFloat).requires_grad(true)); torch::Tensor y, indices; std::tie(y, indices) = model->forward_with_indices(x); @@ -819,9 +672,7 @@ TEST_F(ModulesTest, AdaptiveMaxPool1dReturnIndices) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveMaxPool2dEven) { AdaptiveMaxPool2d model(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0., 50); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.resize_({2, 5, 5}).set_requires_grad(true); auto y = model(x); torch::Tensor s = y.sum(); @@ -843,9 +694,7 @@ TEST_F(ModulesTest, AdaptiveMaxPool2dEven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveMaxPool2dUneven) { AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0., 40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.resize_({2, 5, 4}).set_requires_grad(true); auto y = model(x); torch::Tensor s = y.sum(); @@ -867,9 +716,7 @@ TEST_F(ModulesTest, AdaptiveMaxPool2dUneven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveMaxPool2dReturnIndicesEven) { AdaptiveMaxPool2d model(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0., 50); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.resize_({2, 5, 5}).set_requires_grad(true); torch::Tensor y, indices; std::tie(y, indices) = model->forward_with_indices(x); @@ -904,9 +751,7 @@ TEST_F(ModulesTest, AdaptiveMaxPool2dReturnIndicesEven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveMaxPool2dReturnIndicesUneven) { AdaptiveMaxPool2d model(AdaptiveMaxPool2dOptions({3, 2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0., 40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.resize_({2, 5, 4}).set_requires_grad(true); torch::Tensor y, indices; std::tie(y, indices) = model->forward_with_indices(x); @@ -941,7 +786,6 @@ TEST_F(ModulesTest, AdaptiveMaxPool2dReturnIndicesUneven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveMaxPool3d) { AdaptiveMaxPool3d model(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0., 64); x.resize_({1, 4, 4, 4}).set_requires_grad(true); auto y = model(x); @@ -968,7 +812,6 @@ TEST_F(ModulesTest, AdaptiveMaxPool3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveMaxPool3dReturnIndices) { AdaptiveMaxPool3d model(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0., 64); x.resize_({1, 4, 4, 4}).set_requires_grad(true); torch::Tensor y, indices; @@ -1010,7 +853,6 @@ TEST_F(ModulesTest, AdaptiveMaxPool3dReturnIndices) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveAvgPool1d) { AdaptiveAvgPool1d model(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({{{1, 2, 3, 4, 5}}}, torch::dtype(torch::kFloat).requires_grad(true)); auto y = model(x); torch::Tensor s = y.sum(); @@ -1026,9 +868,7 @@ TEST_F(ModulesTest, AdaptiveAvgPool1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveAvgPool2dEven) { AdaptiveAvgPool2d model(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0., 50); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.resize_({2, 5, 5}).set_requires_grad(true); auto y = model(x); torch::Tensor s = y.sum(); @@ -1051,9 +891,7 @@ TEST_F(ModulesTest, AdaptiveAvgPool2dEven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveAvgPool2dUneven) { AdaptiveAvgPool2d model(AdaptiveAvgPool2dOptions({3, 2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0., 40); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.resize_({2, 5, 4}).set_requires_grad(true); auto y = model(x); torch::Tensor s = y.sum(); @@ -1076,7 +914,6 @@ TEST_F(ModulesTest, AdaptiveAvgPool2dUneven) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AdaptiveAvgPool3d) { AdaptiveAvgPool3d model(3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0., 64); x.resize_({1, 4, 4, 4}).set_requires_grad(true); auto y = model(x); @@ -1103,7 +940,6 @@ TEST_F(ModulesTest, AdaptiveAvgPool3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MaxUnpool1d) { auto indices = torch::tensor({{{1, 3, 4}}}, torch::kLong); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({{{2, 4, 5}}}, torch::dtype(torch::kFloat).requires_grad(true)); auto model = MaxUnpool1d{3}; auto y = model->forward(x, indices); @@ -1114,10 +950,8 @@ TEST_F(ModulesTest, MaxUnpool1d) { ASSERT_EQ(y.sizes(), std::vector({1, 1, 9})); indices = torch::tensor({{{1, 3, 4}}}, torch::kLong); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x = torch::tensor({{{2, 4, 5}}}, torch::dtype(torch::kFloat).requires_grad(true)); model = MaxUnpool1d{MaxUnpool1dOptions(3).stride(2).padding(1)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) y = model->forward(x, indices, std::vector({1, 1, 5})); ASSERT_EQ(y.dim(), 3); @@ -1130,7 +964,6 @@ TEST_F(ModulesTest, MaxUnpool1d) { TEST_F(ModulesTest, MaxPool1d_MaxUnpool1d) { MaxPool1d pool {MaxPool1dOptions(2).stride(2)}; MaxUnpool1d unpool {MaxUnpool1dOptions(2).stride(2)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{{1, 2, 3, 4, 5, 6, 7, 8}}}, torch::kFloat); torch::Tensor output, indices; std::tie(output, indices) = pool->forward_with_indices(input); @@ -1139,7 +972,6 @@ TEST_F(ModulesTest, MaxPool1d_MaxUnpool1d) { torch::tensor({{{0, 2, 0, 4, 0, 6, 0, 8}}} , torch::kFloat))); // Example showcasing the use of output_size - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input = torch::tensor({{{1, 2, 3, 4, 5, 6, 7, 8, 9}}}, torch::kFloat); std::tie(output, indices) = pool->forward_with_indices(input); ASSERT_TRUE(torch::allclose( @@ -1153,30 +985,18 @@ TEST_F(ModulesTest, MaxPool1d_MaxUnpool1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MaxUnpool2d) { auto indices = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{ 6, 8, 9}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 18, 19}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {21, 23, 24}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{ 6, 8, 9}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 18, 19}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {21, 23, 24}}}}, torch::kLong); auto x = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{ 6, 8, 9}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 18, 19}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {21, 23, 24}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{31, 33, 34}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {41, 43, 44}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {46, 48, 49}}}}, torch::dtype(torch::kFloat).requires_grad(true)); auto model = MaxUnpool2d{MaxUnpool2dOptions(3).stride(2).padding(1)}; auto y = model->forward(x, indices); @@ -1201,11 +1021,8 @@ TEST_F(ModulesTest, MaxPool2d_MaxUnpool2d) { MaxPool2d pool {MaxPool2dOptions(2).stride(2)}; MaxUnpool2d unpool {MaxUnpool2dOptions(2).stride(2)}; auto input = torch::tensor({{{{ 1, 2, 3, 4}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5, 6, 7, 8}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9, 10, 11, 12}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {13, 14, 15, 16}}}}, torch::kFloat); torch::Tensor output, indices; std::tie(output, indices) = pool->forward_with_indices(input); @@ -1227,9 +1044,7 @@ TEST_F(ModulesTest, MaxPool2d_MaxUnpool2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MaxUnpool3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto indices = torch::tensor({{{{{26}}}}}, torch::kLong); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({{{{{26}}}}}, torch::dtype(torch::kFloat).requires_grad(true)); auto model = MaxUnpool3d{3}; auto y = model->forward(x, indices); @@ -1251,22 +1066,14 @@ TEST_F(ModulesTest, MaxUnpool3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MaxUnpool3dOutputSize) { auto indices = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{{21, 23}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {29, 31}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{53, 55}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {61, 63}}}}}, torch::kLong); auto x = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{{21, 23}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {29, 31}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{53, 55}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {61, 63}}}}}, torch::dtype(torch::kFloat).requires_grad(true)); auto model = MaxUnpool3d{MaxUnpool3dOptions(3).stride(2).padding(1)}; auto y = model->forward(x, indices, std::vector({1, 1, 4, 4, 4})); @@ -1296,7 +1103,6 @@ TEST_F(ModulesTest, MaxUnpool3dOutputSize) { TEST_F(ModulesTest, MaxPool3d_MaxUnpool3d) { MaxPool3d pool {MaxPool3dOptions(3).stride(2)}; MaxUnpool3d unpool {MaxUnpool3dOptions(3).stride(2)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({20, 16, 51, 33, 15}); torch::Tensor output, indices; std::tie(output, indices) = pool->forward_with_indices(input); @@ -1307,9 +1113,7 @@ TEST_F(ModulesTest, MaxPool3d_MaxUnpool3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Linear) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear model(5, 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({10, 5}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -1326,9 +1130,7 @@ TEST_F(ModulesTest, Linear) { ASSERT_TRUE(torch::allclose(y, y_exp)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear model(LinearOptions(5, 2).bias(false)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({10, 5}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -1390,11 +1192,9 @@ TEST_F(ModulesTest, LocalResponseNorm) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, LayerNorm) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LayerNorm model(LayerNormOptions({2, 2}).eps(2e-5)); auto x = torch::randn({2, 2}, torch::requires_grad()); auto y = model(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y_exp = torch::layer_norm(x, {2, 2}, model->weight, model->bias, 2e-5); torch::Tensor s = y.sum(); @@ -1411,11 +1211,9 @@ TEST_F(ModulesTest, LayerNorm) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, GroupNorm) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) GroupNorm model(GroupNormOptions(2, 2).eps(2e-5)); auto x = torch::randn({2, 2}, torch::requires_grad()); auto y = model(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y_exp = torch::group_norm(x, 2, model->weight, model->bias, 2e-5); torch::Tensor s = y.sum(); @@ -1432,11 +1230,8 @@ TEST_F(ModulesTest, GroupNorm) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Bilinear) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Bilinear model(5, 3, 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x1 = torch::randn({10, 5}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x2 = torch::randn({10, 3}, torch::requires_grad()); auto y = model(x1, x2); torch::Tensor s = y.sum(); @@ -1457,11 +1252,8 @@ TEST_F(ModulesTest, Fold) { auto input = torch::ones({1, 3 * 2 * 2, 2}, torch::requires_grad()); auto output = model(input); auto expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{1.0, 1.0}, {2.0, 2.0}, {1.0, 1.0}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.0, 1.0}, {2.0, 2.0}, {1.0, 1.0}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.0, 1.0}, {2.0, 2.0}, {1.0, 1.0}}}}, torch::kFloat); auto s = output.sum(); @@ -1473,7 +1265,6 @@ TEST_F(ModulesTest, Fold) { } { // input wrong dimension - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Fold model(FoldOptions({8, 8}, {3, 3})); ASSERT_THROWS_WITH( model(torch::randn({1, 3, 16, 16})), @@ -1485,25 +1276,16 @@ TEST_F(ModulesTest, Fold) { TEST_F(ModulesTest, Unfold) { { Unfold model(UnfoldOptions({2, 2}).padding(1).stride(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(2., 14, torch::requires_grad()).view({1, 2, 2, 3}); auto output = model(input); auto expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{0.0, 0.0, 0.0, 6.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0, 0.0, 5.0, 7.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0, 3.0, 0.0, 0.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.0, 4.0, 0.0, 0.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0, 0.0, 0.0, 12.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0, 0.0, 11.0, 13.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0, 9.0, 0.0, 0.0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {8.0, 10.0, 0.0, 0.0}}}, torch::kFloat); auto s = output.sum(); @@ -1534,14 +1316,10 @@ TEST_F(ModulesTest, Unfold) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, SimpleContainer) { auto model = std::make_shared(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto l1 = model->add(Linear(10, 3), "l1"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto l2 = model->add(Linear(3, 5), "l2"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto l3 = model->add(Linear(5, 100), "l3"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({1000, 10}, torch::requires_grad()); x = l1(x).clamp_min(0); x = l2(x).clamp_min(0); @@ -1565,7 +1343,6 @@ TEST_F(ModulesTest, EmbeddingBasic) { // Cannot get gradients to change indices (input) - only for embedding // params - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::full({10}, dict_size - 1, torch::kInt64); auto y = model(x); torch::Tensor s = y.sum(); @@ -1581,9 +1358,7 @@ TEST_F(ModulesTest, EmbeddingBasic) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, EmbeddingList) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Embedding model(6, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::full({2, 3}, 5, torch::kInt64); auto y = model(x); torch::Tensor s = y.sum(); @@ -1597,7 +1372,6 @@ TEST_F(ModulesTest, EmbeddingList) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, EmbeddingFromPretrained) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::tensor({{1., 2.3, 3.}, {4., 5.1, 6.3}}); Embedding embedding = torch::nn::Embedding::from_pretrained(weight); auto input = torch::tensor({1}, torch::kLong); @@ -1606,7 +1380,6 @@ TEST_F(ModulesTest, EmbeddingFromPretrained) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, EmbeddingBagFromPretrained) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::tensor({{1., 2.3, 3.}, {4., 5.1, 6.3}}); EmbeddingBag embeddingbag = torch::nn::EmbeddingBag::from_pretrained(weight); auto input = torch::zeros({{1, 2}}, torch::kLong); @@ -1616,9 +1389,7 @@ TEST_F(ModulesTest, EmbeddingBagFromPretrained) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, AlphaDropout) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AlphaDropout alpha_dropout(0.5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor x = torch::ones(100, torch::requires_grad()); torch::Tensor y = alpha_dropout(x); @@ -1637,9 +1408,7 @@ TEST_F(ModulesTest, AlphaDropout) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, FeatureAlphaDropout) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) FeatureAlphaDropout feature_alpha_dropout(0.5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor x = torch::ones({10, 10}, torch::requires_grad()); torch::Tensor y = feature_alpha_dropout(x); @@ -1660,9 +1429,7 @@ TEST_F(ModulesTest, FeatureAlphaDropout) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Dropout) { for (const auto inplace : {false, true}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dropout dropout(DropoutOptions(0.5).inplace(inplace)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor x = torch::ones(100); if (!inplace) { x.requires_grad_(true); @@ -1680,7 +1447,6 @@ TEST_F(ModulesTest, Dropout) { } dropout->eval(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) y = dropout(torch::ones(100)); ASSERT_EQ(y.sum().item(), 100); } @@ -1689,9 +1455,7 @@ TEST_F(ModulesTest, Dropout) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Dropout2d) { for (const auto inplace : {false, true}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dropout2d dropout(Dropout2dOptions(0.5).inplace(inplace)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor x = torch::ones({10, 10}); if (!inplace) { x.requires_grad_(true); @@ -1710,7 +1474,6 @@ TEST_F(ModulesTest, Dropout2d) { } dropout->eval(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) y = dropout(torch::ones({10, 10})); ASSERT_EQ(y.sum().item(), 100); } @@ -1719,9 +1482,7 @@ TEST_F(ModulesTest, Dropout2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Dropout3d) { for (const auto inplace : {false, true}) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dropout3d dropout(Dropout3dOptions(0.5).inplace(inplace)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor x = torch::ones({4, 5, 5}); if (!inplace) { x.requires_grad_(true); @@ -1741,7 +1502,6 @@ TEST_F(ModulesTest, Dropout3d) { } dropout->eval(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) y = dropout(torch::ones({4, 5, 5})); ASSERT_EQ(y.sum().item(), 100); } @@ -1775,14 +1535,12 @@ TEST_F(ModulesTest, FunctionalCallsSuppliedFunction) { was_called = true; return input; }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto output = functional(torch::ones(5, torch::requires_grad())); ASSERT_TRUE(was_called); ASSERT_TRUE(output.equal(torch::ones(5, torch::requires_grad()))); was_called = false; // Use the call operator overload here. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output = functional(torch::ones(5, torch::requires_grad())); ASSERT_TRUE(was_called); ASSERT_TRUE(output.equal(torch::ones(5, torch::requires_grad()))); @@ -1805,7 +1563,6 @@ TEST_F(ModulesTest, FunctionalArgumentBinding) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, BatchNorm1dStateful) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm1d bn(5); ASSERT_TRUE(bn->options.track_running_stats()); @@ -1834,7 +1591,6 @@ TEST_F(ModulesTest, BatchNorm1dStateful) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, BatchNorm1dStateless) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm1d bn(BatchNorm1dOptions(5).track_running_stats(false).affine(false)); ASSERT_FALSE(bn->running_mean.defined()); @@ -1846,31 +1602,20 @@ TEST_F(ModulesTest, BatchNorm1dStateless) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, BatchNorm1d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm1d bn(5); bn->eval(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(2. * 5 * 2).view({2, 5, 2}).requires_grad_(); auto output = bn->forward(input); auto expected = torch::tensor({{{ 0.0000, 1.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2.0000, 3.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4.0000, 5.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6.0000, 7.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 8.0000, 9.0000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{10.0000, 10.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {11.9999, 12.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {13.9999, 14.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {15.9999, 16.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {17.9999, 18.9999}}}); ASSERT_TRUE(output.allclose(expected)); auto s = output.sum(); @@ -1881,7 +1626,6 @@ TEST_F(ModulesTest, BatchNorm1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, BatchNorm2dStateful) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm2d bn(5); ASSERT_TRUE(bn->options.track_running_stats()); @@ -1910,7 +1654,6 @@ TEST_F(ModulesTest, BatchNorm2dStateful) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, BatchNorm2dStateless) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm2d bn(BatchNorm2dOptions(5).track_running_stats(false).affine(false)); ASSERT_FALSE(bn->running_mean.defined()); @@ -1922,51 +1665,30 @@ TEST_F(ModulesTest, BatchNorm2dStateless) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, BatchNorm2d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm2d bn(5); bn->eval(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(2. * 5 * 2 * 2).view({2, 5, 2, 2}).requires_grad_(); auto output = bn->forward(input); auto expected = torch::tensor({{{{ 0.0000, 1.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2.0000, 3.0000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 4.0000, 5.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6.0000, 7.0000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 8.0000, 9.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {10.0000, 10.9999}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11.9999, 12.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {13.9999, 14.9999}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{15.9999, 16.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {17.9999, 18.9999}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{19.9999, 20.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {21.9999, 22.9999}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{23.9999, 24.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {25.9999, 26.9999}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{27.9999, 28.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {29.9998, 30.9998}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{31.9998, 32.9998}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {33.9998, 34.9998}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{35.9998, 36.9998}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {37.9998, 38.9998}}}}); ASSERT_TRUE(output.allclose(expected)); auto s = output.sum(); @@ -1977,7 +1699,6 @@ TEST_F(ModulesTest, BatchNorm2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, BatchNorm3dStateful) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm3d bn(5); ASSERT_TRUE(bn->options.track_running_stats()); @@ -2006,7 +1727,6 @@ TEST_F(ModulesTest, BatchNorm3dStateful) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, BatchNorm3dStateless) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm3d bn(BatchNorm3dOptions(5).track_running_stats(false).affine(false)); ASSERT_FALSE(bn->running_mean.defined()); @@ -2018,91 +1738,50 @@ TEST_F(ModulesTest, BatchNorm3dStateless) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, BatchNorm3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm3d bn(5); bn->eval(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(2. * 5 * 2 * 2 * 2).view({2, 5, 2, 2, 2}).requires_grad_(); auto output = bn->forward(input); auto expected = torch::tensor({{{{{ 0.0000, 1.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 2.0000, 3.0000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 4.0000, 5.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6.0000, 7.0000}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{ 8.0000, 9.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {10.0000, 10.9999}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11.9999, 12.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {13.9999, 14.9999}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{15.9999, 16.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {17.9999, 18.9999}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{19.9999, 20.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {21.9999, 22.9999}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{23.9999, 24.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {25.9999, 26.9999}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{27.9999, 28.9999}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {29.9998, 30.9998}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{31.9998, 32.9998}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {33.9998, 34.9998}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{35.9998, 36.9998}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {37.9998, 38.9998}}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{39.9998, 40.9998}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {41.9998, 42.9998}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{43.9998, 44.9998}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {45.9998, 46.9998}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{47.9998, 48.9998}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {49.9997, 50.9997}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{51.9997, 52.9997}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {53.9997, 54.9997}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{55.9997, 56.9997}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {57.9997, 58.9997}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{59.9997, 60.9997}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {61.9997, 62.9997}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{63.9997, 64.9997}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {65.9997, 66.9997}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{67.9997, 68.9997}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {69.9996, 70.9996}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{71.9996, 72.9996}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {73.9996, 74.9996}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{75.9996, 76.9996}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {77.9996, 78.9996}}}}}); ASSERT_TRUE(output.allclose(expected)); auto s = output.sum(); @@ -2113,7 +1792,6 @@ TEST_F(ModulesTest, BatchNorm3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, InstanceNorm1dStateful) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) InstanceNorm1d instance_norm(InstanceNorm1dOptions(5).track_running_stats(true).affine(true)); ASSERT_TRUE(instance_norm->options.track_running_stats()); @@ -2142,7 +1820,6 @@ TEST_F(ModulesTest, InstanceNorm1dStateful) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, InstanceNorm1dStateless) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) InstanceNorm1d instance_norm(InstanceNorm1dOptions(5).track_running_stats(false).affine(false)); ASSERT_FALSE(instance_norm->running_mean.defined()); @@ -2154,11 +1831,9 @@ TEST_F(ModulesTest, InstanceNorm1dStateless) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, InstanceNorm1d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) InstanceNorm1d instance_norm(5); instance_norm->eval(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(2. * 5 * 2).view({2, 5, 2}).requires_grad_(); auto output = instance_norm->forward(input); auto expected = torch::tensor({{{-1.0000, 1.0000}, @@ -2180,7 +1855,6 @@ TEST_F(ModulesTest, InstanceNorm1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, InstanceNorm2dStateful) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) InstanceNorm2d instance_norm(InstanceNorm2dOptions(5).track_running_stats(true).affine(true)); ASSERT_TRUE(instance_norm->options.track_running_stats()); @@ -2209,7 +1883,6 @@ TEST_F(ModulesTest, InstanceNorm2dStateful) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, InstanceNorm2dStateless) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) InstanceNorm2d instance_norm(InstanceNorm2dOptions(5).track_running_stats(false).affine(false)); ASSERT_FALSE(instance_norm->running_mean.defined()); @@ -2221,52 +1894,30 @@ TEST_F(ModulesTest, InstanceNorm2dStateless) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, InstanceNorm2d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) InstanceNorm2d instance_norm(5); instance_norm->eval(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(2. * 5 * 2 * 2).view({2, 5, 2, 2}).requires_grad_(); auto output = instance_norm->forward(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-1.3416, -0.4472}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0.4472, 1.3416}}}}); ASSERT_TRUE(output.allclose(expected, 1e-3)); auto s = output.sum(); @@ -2277,7 +1928,6 @@ TEST_F(ModulesTest, InstanceNorm2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, InstanceNorm3dStateful) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) InstanceNorm3d instance_norm(InstanceNorm3dOptions(5).track_running_stats(true).affine(true)); ASSERT_TRUE(instance_norm->options.track_running_stats()); @@ -2306,7 +1956,6 @@ TEST_F(ModulesTest, InstanceNorm3dStateful) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, InstanceNorm3dStateless) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) InstanceNorm3d instance_norm(InstanceNorm3dOptions(5).track_running_stats(false).affine(false)); ASSERT_FALSE(instance_norm->running_mean.defined()); @@ -2318,92 +1967,50 @@ TEST_F(ModulesTest, InstanceNorm3dStateless) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, InstanceNorm3d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) InstanceNorm3d instance_norm(5); instance_norm->eval(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(2. * 5 * 2 * 2 * 2).view({2, 5, 2, 2, 2}).requires_grad_(); auto output = instance_norm->forward(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-1.5275, -1.0911}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6547, -0.2182}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 0.2182, 0.6547}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 1.0911, 1.5275}}}}}); ASSERT_TRUE(output.allclose(expected, 1e-3)); auto s = output.sum(); @@ -2414,11 +2021,9 @@ TEST_F(ModulesTest, InstanceNorm3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Linear_CUDA) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear model(5, 2); model->to(torch::kCUDA); auto x = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::randn({10, 5}, torch::device(torch::kCUDA).requires_grad(true)); auto y = model(x); torch::Tensor s = y.sum(); @@ -2434,11 +2039,9 @@ TEST_F(ModulesTest, Linear_CUDA) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Linear2_CUDA) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear model(5, 2); model->to(torch::kCUDA); model->to(torch::kCPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({10, 5}, torch::requires_grad()); auto y = model(x); torch::Tensor s = y.sum(); @@ -2455,9 +2058,7 @@ TEST_F(ModulesTest, Linear2_CUDA) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, L1Loss) { L1Loss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({5,6}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::empty({5,6}).random_(2); auto output = loss->forward(torch::sigmoid(input), target); auto s = output.sum(); @@ -2470,9 +2071,7 @@ TEST_F(ModulesTest, L1Loss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MSELoss) { MSELoss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({5,6}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::empty({5,6}).random_(2); auto output = loss->forward(torch::sigmoid(input), target); auto s = output.sum(); @@ -2485,9 +2084,7 @@ TEST_F(ModulesTest, MSELoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, BCELoss) { BCELoss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({5,6}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::empty({5,6}).random_(2); auto output = loss->forward(torch::sigmoid(input), target); auto s = output.sum(); @@ -2500,9 +2097,7 @@ TEST_F(ModulesTest, BCELoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, KLDivLoss) { KLDivLoss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({5,6}, torch::requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::empty({5,6}).random_(2); auto output = loss->forward(torch::sigmoid(input), target); auto s = output.sum(); @@ -2515,12 +2110,9 @@ TEST_F(ModulesTest, KLDivLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, HingeEmbeddingLoss) { HingeEmbeddingLoss loss(HingeEmbeddingLossOptions().margin(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{2, 22, 4}, {20, 10, 0}}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({{2, 6, 4}, {1, 10, 0}}, torch::kFloat); auto output = loss->forward(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({10}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2531,14 +2123,11 @@ TEST_F(ModulesTest, HingeEmbeddingLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MultiMarginLoss) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::tensor({0.3, 0.3, 0.4}, torch::kFloat); MultiMarginLoss loss(MultiMarginLossOptions().margin(2).weight(weight)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{0.2, 0.2, 0.6}, {0.1, 0.8, 0.1}, {0.9, 0.09, 0.01}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({2, 1, 0}, torch::kLong); auto output = loss->forward(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.305556}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2549,15 +2138,11 @@ TEST_F(ModulesTest, MultiMarginLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, CosineEmbeddingLoss) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CosineEmbeddingLoss cos(CosineEmbeddingLossOptions().margin(0.5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input1 = torch::tensor({{2, 3, 4}, {6, 2, 4}}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input2 = torch::tensor({{2, 3, 5}, {9, 12, 0}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({1, -1}); auto output = cos(input1, input2, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.1004}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2570,12 +2155,9 @@ TEST_F(ModulesTest, CosineEmbeddingLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, SmoothL1LossDefaultOptions) { SmoothL1Loss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.2, 4.7}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); auto output = loss(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(0.0233335, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2587,12 +2169,9 @@ TEST_F(ModulesTest, SmoothL1LossDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, HuberLossDefaultOptions) { HuberLoss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.2, 4.7}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); auto output = loss(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(0.0233335, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2604,11 +2183,9 @@ TEST_F(ModulesTest, HuberLossDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MultiLabelMarginLossDefaultOptions) { MultiLabelMarginLoss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{0.1, 0.2, 0.4, 0.8}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({{3, 0, -1, 1}}, torch::kLong); auto output = loss->forward(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.8500}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2620,12 +2197,9 @@ TEST_F(ModulesTest, MultiLabelMarginLossDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, SmoothL1LossNoReduction) { SmoothL1Loss loss(/*reduction=*/torch::kNone); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.2, 4.7}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); auto output = loss(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.005, 0.02, 0.045}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2637,12 +2211,9 @@ TEST_F(ModulesTest, SmoothL1LossNoReduction) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, HuberLossNoReduction) { HuberLoss loss(/*reduction=*/torch::kNone); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.2, 4.7}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); auto output = loss(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.005, 0.02, 0.045}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2654,11 +2225,9 @@ TEST_F(ModulesTest, HuberLossNoReduction) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MultiLabelMarginLossNoReduction) { MultiLabelMarginLoss loss(torch::kNone); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{0.1, 0.2, 0.4, 0.8}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({{3, 0, -1, 1}}, torch::kLong); auto output = loss->forward(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.8500}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2669,15 +2238,11 @@ TEST_F(ModulesTest, MultiLabelMarginLossNoReduction) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, SmoothL1LossBeta) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto options = SmoothL1LossOptions().beta(0.2); SmoothL1Loss loss(options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.2, 4.7}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); auto output = loss(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(0.108333, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2688,15 +2253,11 @@ TEST_F(ModulesTest, SmoothL1LossBeta) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, HuberLossDelta) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto options = HuberLossOptions().delta(0.2); HuberLoss loss(options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({0.1, 1.2, 4.7}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::tensor({0., 1., 5.}, torch::kFloat); auto output = loss(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(0.0216666, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2708,9 +2269,7 @@ TEST_F(ModulesTest, HuberLossDelta) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, TripletMarginLoss) { TripletMarginLoss loss(TripletMarginLossOptions().margin(1.0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto anchor = torch::tensor({{3., 3.}}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto positive = torch::tensor({{2., 2.}}, torch::dtype(torch::kFloat).requires_grad(true)); auto negative = torch::tensor({{0., 0.}}, torch::dtype(torch::kFloat).requires_grad(true)); auto output = loss->forward(anchor, positive, negative); @@ -2730,7 +2289,6 @@ TEST_F(ModulesTest, TripletMarginWithDistanceLossDefaultParity) { std::vector reductions = {torch::kSum, torch::kMean, torch::kNone}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector margins = {0.5, 1.0, 1.5}; std::vector swaps = {true, false}; @@ -2738,13 +2296,10 @@ TEST_F(ModulesTest, TripletMarginWithDistanceLossDefaultParity) { for (auto& margin : margins) { for (const auto swap : swaps) { auto anchor = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::randn({100, 128}, torch::dtype(torch::kFloat).requires_grad(true)); auto positive = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::randn({100, 128}, torch::dtype(torch::kFloat).requires_grad(true)); auto negative = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::randn({100, 128}, torch::dtype(torch::kFloat).requires_grad(true)); auto basicOptions = TripletMarginLossOptions() @@ -2795,7 +2350,6 @@ TEST_F(ModulesTest, TripletMarginWithDistanceLossFunctionalParity) { std::vector reductions = {torch::kSum, torch::kMean, torch::kNone}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector margins = {0.5, 1.0, 1.5}; std::vector swaps = {true, false}; @@ -2817,13 +2371,10 @@ TEST_F(ModulesTest, TripletMarginWithDistanceLossFunctionalParity) { .swap(swap); auto anchor = torch::randn( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {100, 128}, torch::dtype(torch::kFloat).requires_grad(true)); auto positive = torch::randn( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {100, 128}, torch::dtype(torch::kFloat).requires_grad(true)); auto negative = torch::randn( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {100, 128}, torch::dtype(torch::kFloat).requires_grad(true)); TripletMarginWithDistanceLoss distanceLoss(moduleOptions); @@ -2844,16 +2395,12 @@ TEST_F(ModulesTest, TripletMarginWithDistanceLossFunctionalParity) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, NLLLoss) { NLLLoss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{-0.1315, -3.1315, -2.5315}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-3.7038, -0.1038, -2.6038}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-2.3422, -1.3422, -0.4422}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({1, 0, 2}, torch::kLong); auto output = loss->forward(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(2.4258, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2867,11 +2414,9 @@ TEST_F(ModulesTest, NLLLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, CrossEntropyLoss) { CrossEntropyLoss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{3., 3.}, {2., 2.}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({0, 1}, torch::kLong); auto output = loss->forward(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor(0.6931, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2886,12 +2431,9 @@ TEST_F(ModulesTest, CrossEntropyLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, CosineSimilarity) { CosineSimilarity cos(CosineSimilarityOptions().dim(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input1 = torch::tensor({{1, 2, 3}, {4, 5, 6}}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input2 = torch::tensor({{1, 8, 3}, {2, 1, 6}}, torch::dtype(torch::kFloat).requires_grad(true)); auto output = cos->forward(input1, input2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.8078, 0.8721}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2903,11 +2445,9 @@ TEST_F(ModulesTest, CosineSimilarity) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, SoftMarginLossDefaultOptions) { SoftMarginLoss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({2., 4., 1., 3.}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({-1., 1., 1., -1.}, torch::kFloat); auto output = loss->forward(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({1.3767317}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2919,11 +2459,9 @@ TEST_F(ModulesTest, SoftMarginLossDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MultiLabelSoftMarginLossDefaultOptions) { MultiLabelSoftMarginLoss loss; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{0., 2., 2., 0.}, {2., 1., 0., 1.}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({{0., 0., 1., 0.}, {1., 0., 1., 1.}}, torch::kFloat); auto output = loss->forward(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.7608436}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2935,11 +2473,9 @@ TEST_F(ModulesTest, MultiLabelSoftMarginLossDefaultOptions) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, SoftMarginLossNoReduction) { SoftMarginLoss loss(torch::kNone); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({2., 4., 1., 3.}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({-1., 1., 1., -1.}, torch::kFloat); auto output = loss->forward(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({2.1269281, 0.01814993, 0.3132617, 3.0485873}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2950,15 +2486,12 @@ TEST_F(ModulesTest, SoftMarginLossNoReduction) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, MultiLabelSoftMarginLossWeightedNoReduction) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({{0., 2., 2., 0.}, {2., 1., 0., 1.}}, torch::dtype(torch::kFloat).requires_grad(true)); auto target = torch::tensor({{0., 0., 1., 0.}, {1., 0., 1., 1.}}, torch::kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::tensor({0.1, 0.6, 0.4, 0.8}, torch::kFloat); auto options = MultiLabelSoftMarginLossOptions().reduction(torch::kNone).weight(weight); MultiLabelSoftMarginLoss loss = MultiLabelSoftMarginLoss(options); auto output = loss->forward(input, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({0.4876902, 0.3321295}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2970,12 +2503,9 @@ TEST_F(ModulesTest, MultiLabelSoftMarginLossWeightedNoReduction) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, PairwiseDistance) { PairwiseDistance dist(PairwiseDistanceOptions().p(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input1 = torch::tensor({{1, 2, 3}, {4, 5, 6}}, torch::dtype(torch::kFloat).requires_grad(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input2 = torch::tensor({{1, 8, 3}, {2, 1, 6}}, torch::dtype(torch::kFloat).requires_grad(true)); auto output = dist->forward(input1, input2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({6, 6}, torch::kFloat); auto s = output.sum(); s.backward(); @@ -2990,7 +2520,6 @@ TEST_F(ModulesTest, ELU) { for (const auto alpha : {0.0, 0.42, 1.0, 4.2, 42.42}) { for (const auto inplace : {false, true}) { ELU model {ELUOptions().alpha(alpha).inplace(inplace)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); if (!inplace) { @@ -3020,7 +2549,6 @@ TEST_F(ModulesTest, ELU) { TEST_F(ModulesTest, SELU) { for (const auto inplace : {false, true}) { SELU model(inplace); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::randn({5, 5}); if (!inplace) { input.requires_grad_(true); @@ -3050,7 +2578,6 @@ TEST_F(ModulesTest, Hardshrink) { const auto size = 3; for (const auto lambda : {-4.2, -1.0, -0.42, 0.0, 0.42, 1.0, 4.2, 42.42}) { Hardshrink model {HardshrinkOptions().lambda(lambda)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}).set_requires_grad(true); auto y = model(x); @@ -3072,7 +2599,6 @@ TEST_F(ModulesTest, Hardtanh) { for (const auto max_val : {0.42, 1.0, 4.2}) { for (const auto inplace : {false, true}) { Hardtanh model {HardtanhOptions().min_val(min_val).max_val(max_val).inplace(inplace)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); if (!inplace) { @@ -3101,19 +2627,14 @@ TEST_F(ModulesTest, Hardtanh) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, HardtanhMinValGEMaxVal) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ASSERT_THROWS_WITH(Hardtanh{HardtanhOptions().min_val(0.42).max_val(0.42)}, "max_val must be greater than min_val"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ASSERT_THROWS_WITH(Hardtanh{HardtanhOptions().min_val(0.42).max_val(-0.42)}, "max_val must be greater than min_val"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Hardtanh ht {HardtanhOptions().min_val(-0.42).max_val(0.42)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ht->options.min_val(0.42); ASSERT_THROWS_WITH(ht->reset(), "max_val must be greater than min_val"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ht->options.max_val(-0.42); ASSERT_THROWS_WITH(ht->reset(), "max_val must be greater than min_val"); } @@ -3124,7 +2645,6 @@ TEST_F(ModulesTest, LeakyReLU) { for (const auto inplace : {false, true}) { for (const auto negative_slope : {0.0, 0.42, 1.0}) { LeakyReLU model {LeakyReLUOptions().negative_slope(negative_slope).inplace(inplace)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); if (!inplace) { @@ -3152,7 +2672,6 @@ TEST_F(ModulesTest, LeakyReLU) { TEST_F(ModulesTest, LogSigmoid) { const auto size = 3; LogSigmoid model; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}).set_requires_grad(true); auto y = model(x); @@ -3170,7 +2689,6 @@ TEST_F(ModulesTest, LogSigmoid) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Softmax) { Softmax m(/*dim=*/1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(10, torch::kFloat).reshape({2, 5}); auto output = m(input); auto sum = torch::sum(torch::exp(input), 1); @@ -3184,7 +2702,6 @@ TEST_F(ModulesTest, Softmax) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Softmin) { Softmin m(/*dim=*/1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(10, torch::kFloat).reshape({2, 5}); auto output = m(input); auto sum = torch::sum(torch::exp(-input), 1); @@ -3198,7 +2715,6 @@ TEST_F(ModulesTest, Softmin) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, LogSoftmax) { LogSoftmax m(/*dim=*/1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(10, torch::kFloat).reshape({2, 5}); auto output = m(input); auto sum = torch::sum(torch::exp(input), 1); @@ -3213,18 +2729,14 @@ TEST_F(ModulesTest, LogSoftmax) { TEST_F(ModulesTest, AdaptiveLogSoftmaxWithLoss) { { // log_probs actually returns log_proba - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdaptiveLogSoftmaxWithLoss asfm(AdaptiveLogSoftmaxWithLossOptions(8, 4, {2}).div_value(2.)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({4, 8}); auto logprob_out = asfm->log_prob(x); ASSERT_TRUE(torch::allclose(torch::exp(logprob_out).data().sum(1), torch::ones(4))); } { // test predict - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdaptiveLogSoftmaxWithLoss asfm(AdaptiveLogSoftmaxWithLossOptions(8, 10, {4, 8}).div_value(2.).head_bias(true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({64, 8}); auto logprob_out = asfm->log_prob(x); auto predict_out = asfm->predict(x); @@ -3232,20 +2744,15 @@ TEST_F(ModulesTest, AdaptiveLogSoftmaxWithLoss) { } { // cluster sizes - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdaptiveLogSoftmaxWithLoss asfm(AdaptiveLogSoftmaxWithLossOptions(16, 20, {4, 10, 15}).div_value(2.)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(100, 132, torch::kFloat).reshape({2, 16}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = torch::tensor({0, 17}, torch::kLong); auto asm_out = asfm(x, y); ASSERT_EQ(asm_out.output.sizes(), std::vector({2})); } { // forward returns the same thing as log_probs - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdaptiveLogSoftmaxWithLoss asfm(AdaptiveLogSoftmaxWithLossOptions(8, 4, {2}).div_value(2.)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({4, 8}); auto logprob_out = asfm->log_prob(x); NLLLoss nll_loss; @@ -3266,7 +2773,6 @@ TEST_F(ModulesTest, AdaptiveLogSoftmaxWithLoss) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Softmax2d) { Softmax2d m; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(24, torch::kFloat).reshape({1, 2, 3, 4}); auto output = m(input); auto sum = torch::sum(torch::exp(input), 1); @@ -3312,7 +2818,6 @@ TEST_F(ModulesTest, ReLU) { for (const auto inplace : {false, true}) { const auto size = 3; ReLU model(inplace); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); if (!inplace) { @@ -3340,7 +2845,6 @@ TEST_F(ModulesTest, ReLU6) { for (const auto inplace : {false, true}) { const auto size = 3; ReLU6 model(inplace); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); if (!inplace) { @@ -3353,7 +2857,6 @@ TEST_F(ModulesTest, ReLU6) { ASSERT_EQ(s.ndimension(), 0); ASSERT_EQ(y.ndimension(), 3); ASSERT_EQ(y.sizes(), std::vector({size, size, size})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y_exp = (x_orig < 0) * 0 + ((x_orig >= 0) * (x_orig <= 6)) * x_orig + (x_orig > 6) * 6; ASSERT_TRUE(torch::allclose(y, y_exp)); if (inplace) { @@ -3371,7 +2874,6 @@ TEST_F(ModulesTest, RReLU) { for (const auto upper : {0.3, 0.4, 0.5}) { for (const auto inplace : {false, true}) { RReLU model {RReLUOptions().lower(lower).upper(upper).inplace(inplace)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); if (!inplace) { @@ -3403,7 +2905,6 @@ TEST_F(ModulesTest, CELU) { for (const auto inplace : {false, true}) { for (const auto alpha : {0.42, 1.0, 4.2, 42.42}) { CELU model {CELUOptions().alpha(alpha).inplace(inplace)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}); if (!inplace) { @@ -3460,7 +2961,6 @@ TEST_F(ModulesTest, GELU) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Sigmoid) { Sigmoid model; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn(100) * 10; auto y_exp = 1 / (1 + torch::exp(-x)); auto y = model(x); @@ -3472,21 +2972,14 @@ TEST_F(ModulesTest, Sigmoid) { TEST_F(ModulesTest, PixelShuffle) { PixelShuffle module(/*upscale_factor=*/2); auto x = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{-17, 19}, {-1, 2}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{7, 14}, {-3, 1}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0, -2}, {-12, 14}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-15, 0}, {-3, 9}}}}, torch::kFloat); auto y_exp = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{-17, 7, 19, 14}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0, -15, -2, 0}, {-1, -3, 2, 1}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-12, -3, 14, 9}}}}, torch::kFloat); auto y = module(x); @@ -3499,17 +2992,12 @@ TEST_F(ModulesTest, PixelShuffle) { TEST_F(ModulesTest, PixelUnshuffle) { PixelUnshuffle module(/*downscale_factor=*/2); auto x = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{-17, 7, 19, 14}, {0, -15, -2, 0}, {-1, -3, 2, 1}, {-12, -3, 14, 9}}}}, torch::kFloat); auto y_exp = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{-17, 19}, {-1, 2}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{7, 14}, {-3, 1}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0, -2}, {-12, 14}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-15, 0}, {-3, 9}}}}, torch::kFloat); auto y = module(x); @@ -3525,7 +3013,6 @@ TEST_F(ModulesTest, Softplus) { for (const auto beta : {0.5, 1.0, 2.0}) { for (const auto threshold : {1.0, 3.0, 5.0}) { Softplus model {SoftplusOptions().beta(beta).threshold(threshold)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-3.0, 3.0, 61); x.resize_({size, size, size}); auto y_exp = @@ -3545,7 +3032,6 @@ TEST_F(ModulesTest, Softshrink) { const auto size = 3; for (const auto lambda : {0.0, 0.42, 1.0, 4.2, 42.42}) { Softshrink model {/*lambda=*/lambda}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-10.0, 10.0, size * size * size); x.resize_({size, size, size}).set_requires_grad(true); auto y = model(x); @@ -3564,7 +3050,6 @@ TEST_F(ModulesTest, Softshrink) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Softsign) { Softsign model; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn(100) * 10; auto y_exp = x / (1 + x.abs()); auto y = model(x); @@ -3575,7 +3060,6 @@ TEST_F(ModulesTest, Softsign) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Tanh) { Tanh model; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn(100) * 10; auto y_exp = (x.exp() - (-x).exp()) / (x.exp() + (-x).exp()); auto y = model(x); @@ -3586,7 +3070,6 @@ TEST_F(ModulesTest, Tanh) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, Tanhshrink) { Tanhshrink model; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn(100) * 10; auto y_exp = x - x.tanh(); auto y = model(x); @@ -3601,7 +3084,6 @@ TEST_F(ModulesTest, Threshold) { for (const auto value : {0.5, 1.0, 2.0}) { for (const auto inplace : {false, true}) { Threshold model {ThresholdOptions(threshold, value).inplace(inplace)}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::linspace(-3.0, 3.0, 61); x.resize_({size, size, size}); auto x_orig = x.clone(); @@ -3661,11 +3143,9 @@ TEST_F(ModulesTest, Upsampling1D) { .scale_factor(std::vector({3})) .mode(torch::kLinear) .align_corners(false)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::zeros({1, 1, 9}); input.narrow(2, 0, 4).normal_(); auto output = model->forward(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = model->forward(input.narrow(2, 0, 5)); ASSERT_TRUE(torch::allclose(output.narrow(2, 0, 15), expected)); @@ -3828,7 +3308,6 @@ TEST_F(ModulesTest, MarginRankingLoss) { )); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MarginRankingLoss loss {MarginRankingLossOptions().margin(0.5).reduction(torch::kSum)}; const auto input1 = torch::randn(15) * 10; const auto input2 = torch::randn(15) * 10; @@ -3840,7 +3319,6 @@ TEST_F(ModulesTest, MarginRankingLoss) { )); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) MarginRankingLoss loss {MarginRankingLossOptions().margin(0.5).reduction(torch::kMean)}; const auto input1 = torch::randn(15) * 10; const auto input2 = torch::randn(15) * 10; @@ -3876,9 +3354,7 @@ TEST_F(ModulesTest, BCEWithLogitsLoss) { { // test BCE with logits gives same result as sigmoid and bce loss auto sigmoid = Sigmoid(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto target = torch::rand({64, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto output = torch::rand({64, 4}) - 0.5; ASSERT_TRUE(torch::allclose( @@ -3897,7 +3373,6 @@ TEST_F(ModulesTest, BCEWithLogitsLoss) { )); target = torch::zeros({4, 1}, torch::kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) output = torch::empty({4, 1}, torch::kFloat).fill_(-100); ASSERT_TRUE(torch::allclose( @@ -3941,7 +3416,6 @@ TEST_F(ModulesTest, BCEWithLogitsLoss) { BCEWithLogitsLossOptions().weight(weight) )(output, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) weight = weight.expand({16, 4}).contiguous(); auto out2 = BCEWithLogitsLoss( BCEWithLogitsLossOptions().weight(weight) @@ -3949,13 +3423,11 @@ TEST_F(ModulesTest, BCEWithLogitsLoss) { ASSERT_TRUE(torch::allclose(out1, out2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) weight = torch::rand({16, 1}); out1 = BCEWithLogitsLoss( BCEWithLogitsLossOptions().weight(weight) )(output, target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) weight = weight.expand({16, 4}).contiguous(); out2 = BCEWithLogitsLoss( BCEWithLogitsLossOptions().weight(weight) @@ -4114,12 +3586,9 @@ namespace detail { bool saved_kv = false, bool same_embed_dim = false) { std::random_device device; std::mt19937 generator(device()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::uniform_int_distribution d_2_10(2, 10); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::uniform_int_distribution d_3_10(3, 10); bool registration_checked = false; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 100; i++) { const auto batch_sz = d_2_10(generator); const auto seq_len = d_2_10(generator); @@ -4131,7 +3600,6 @@ namespace detail { if (same_embed_dim) { kv_dim = d_model; } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::uniform_int_distribution d(5, 20); kv_dim = d(generator); while (kv_dim == d_model) { @@ -4465,23 +3933,17 @@ TEST_F(ModulesTest, PrettyPrintUnflatten) { TEST_F(ModulesTest, ReflectionPad1d) { { ReflectionPad1d m(ReflectionPad1dOptions(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(8, torch::kFloat).reshape({1, 2, 4}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{2., 1., 0., 1., 2., 3., 2., 1.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 5., 4., 5., 6., 7., 6., 5.}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } { ReflectionPad1d m(ReflectionPad1dOptions({3, 1})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(8, torch::kFloat).reshape({1, 2, 4}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{3., 2., 1., 0., 1., 2., 3., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {7., 6., 5., 4., 5., 6., 7., 6.}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } @@ -4491,39 +3953,25 @@ TEST_F(ModulesTest, ReflectionPad1d) { TEST_F(ModulesTest, ReflectionPad2d) { { ReflectionPad2d m(ReflectionPad2dOptions(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(9, torch::kFloat).reshape({1, 1, 3, 3}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{8., 7., 6., 7., 8., 7., 6.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {5., 4., 3., 4., 5., 4., 3.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 1., 0., 1., 2., 1., 0.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {5., 4., 3., 4., 5., 4., 3.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {8., 7., 6., 7., 8., 7., 6.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {5., 4., 3., 4., 5., 4., 3.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 1., 0., 1., 2., 1., 0.}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } { ReflectionPad2d m(ReflectionPad2dOptions({1, 1, 2, 0})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(9, torch::kFloat).reshape({1, 1, 3, 3}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{7., 6., 7., 8., 7.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4., 3., 4., 5., 4.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1., 0., 1., 2., 1.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4., 3., 4., 5., 4.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {7., 6., 7., 8., 7.}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } @@ -4533,23 +3981,17 @@ TEST_F(ModulesTest, ReflectionPad2d) { TEST_F(ModulesTest, ReplicationPad1d) { { ReplicationPad1d m(ReplicationPad1dOptions(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(8, torch::kFloat).reshape({1, 2, 4}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{0., 0., 0., 1., 2., 3., 3., 3.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4., 4., 4., 5., 6., 7., 7., 7.}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } { ReplicationPad1d m(ReplicationPad1dOptions({3, 1})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(8, torch::kFloat).reshape({1, 2, 4}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{0., 0., 0., 0., 1., 2., 3., 3.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4., 4., 4., 4., 5., 6., 7., 7.}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } @@ -4559,39 +4001,25 @@ TEST_F(ModulesTest, ReplicationPad1d) { TEST_F(ModulesTest, ReplicationPad2d) { { ReplicationPad2d m(ReplicationPad2dOptions(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(9, torch::kFloat).reshape({1, 1, 3, 3}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{0., 0., 0., 1., 2., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 0., 0., 1., 2., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 0., 0., 1., 2., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3., 3., 3., 4., 5., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 6., 7., 8., 8., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 6., 7., 8., 8., 8.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 6., 7., 8., 8., 8.}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } { ReplicationPad2d m(ReplicationPad2dOptions({1, 1, 2, 0})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(9, torch::kFloat).reshape({1, 1, 3, 3}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{0., 0., 1., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 0., 1., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 0., 1., 2., 2.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3., 3., 4., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 8., 8.}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } @@ -4601,89 +4029,54 @@ TEST_F(ModulesTest, ReplicationPad2d) { TEST_F(ModulesTest, ReplicationPad3d) { { ReplicationPad3d m(ReplicationPad3dOptions(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(8, torch::kFloat).reshape({1, 1, 2, 2, 2}); auto output = m(input); auto expected = torch::tensor({{{{{0., 0., 1., 1.}, {0., 0., 1., 1.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 2., 3., 3.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 2., 3., 3.}}, {{0., 0., 1., 1.}, {0., 0., 1., 1.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 2., 3., 3.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 2., 3., 3.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4., 4., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4., 4., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4., 4., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4., 4., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7.}}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } { ReplicationPad3d m(ReplicationPad3dOptions({1, 2, 1, 2, 1, 2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(8, torch::kFloat).reshape({1, 1, 2, 2, 2}); auto output = m(input); auto expected = torch::tensor({{{{{0., 0., 1., 1., 1.}, {0., 0., 1., 1., 1.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 2., 3., 3., 3.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 2., 3., 3., 3.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 2., 3., 3., 3.}}, {{0., 0., 1., 1., 1.}, {0., 0., 1., 1., 1.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 2., 3., 3., 3.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 2., 3., 3., 3.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2., 2., 3., 3., 3.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4., 4., 5., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4., 4., 5., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7., 7.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7., 7.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7., 7.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4., 4., 5., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4., 4., 5., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7., 7.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7., 7.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7., 7.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4., 4., 5., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4., 4., 5., 5., 5.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7., 7.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7., 7.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6., 6., 7., 7., 7.}}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } @@ -4693,16 +4086,12 @@ TEST_F(ModulesTest, ReplicationPad3d) { TEST_F(ModulesTest, ZeroPad2d) { { ZeroPad2d m(ZeroPad2dOptions(2)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(9, torch::kFloat).reshape({1, 1, 3, 3}); auto output = m(input); auto expected = torch::tensor({{{{0., 0., 0., 0., 0., 0., 0.}, {0., 0., 0., 0., 0., 0., 0.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 0., 0., 1., 2., 0., 0.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 0., 3., 4., 5., 0., 0.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 0., 6., 7., 8., 0., 0.}, {0., 0., 0., 0., 0., 0., 0.}, {0., 0., 0., 0., 0., 0., 0.}}}}, torch::kFloat); @@ -4710,16 +4099,12 @@ TEST_F(ModulesTest, ZeroPad2d) { } { ZeroPad2d m(ZeroPad2dOptions({1, 1, 2, 0})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(9, torch::kFloat).reshape({1, 1, 3, 3}); auto output = m(input); auto expected = torch::tensor({{{{0., 0., 0., 0., 0.}, {0., 0., 0., 0., 0.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 0., 1., 2., 0.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 3., 4., 5., 0.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0., 6., 7., 8., 0.}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } @@ -4728,26 +4113,18 @@ TEST_F(ModulesTest, ZeroPad2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, ConstantPad1d) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ConstantPad1d m(ConstantPad1dOptions(2, 3.5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(8, torch::kFloat).reshape({1, 2, 4}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{3.5000, 3.5000, 0.0000, 1.0000, 2.0000, 3.0000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 4.0000, 5.0000, 6.0000, 7.0000, 3.5000, 3.5000}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ConstantPad1d m(ConstantPad1dOptions({3, 1}, 3.5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(6, torch::kFloat).reshape({1, 2, 3}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{3.5000, 3.5000, 3.5000, 0.0000, 1.0000, 2.0000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.0000, 4.0000, 5.0000, 3.5000}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } @@ -4756,38 +4133,25 @@ TEST_F(ModulesTest, ConstantPad1d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, ConstantPad2d) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ConstantPad2d m(ConstantPad2dOptions(2, 3.5)); auto input = torch::arange(4, torch::kFloat).reshape({1, 2, 2}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 0.0000, 1.0000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 2.0000, 3.0000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ConstantPad2d m(ConstantPad2dOptions({3, 0, 2, 1}, 3.5)); auto input = torch::arange(4, torch::kFloat).reshape({1, 2, 2}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 0.0000, 1.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 2.0000, 3.0000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } @@ -4796,100 +4160,55 @@ TEST_F(ModulesTest, ConstantPad2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, ConstantPad3d) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ConstantPad3d m(ConstantPad3dOptions(1, 3.5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(8, torch::kFloat).reshape({1, 1, 2, 2, 2}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{{3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 0.0000, 1.0000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 2.0000, 3.0000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 4.0000, 5.0000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 6.0000, 7.0000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000}}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ConstantPad3d m(ConstantPad3dOptions({1, 2, 1, 2, 1, 2}, 3.5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(8, torch::kFloat).reshape({1, 1, 2, 2, 2}); auto output = m(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{{3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 0.0000, 1.0000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 2.0000, 3.0000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 4.0000, 5.0000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 6.0000, 7.0000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.5000, 3.5000, 3.5000, 3.5000, 3.5000}}}}}, torch::kFloat); ASSERT_TRUE(output.allclose(expected)); } @@ -4898,19 +4217,12 @@ TEST_F(ModulesTest, ConstantPad3d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, CrossMapLRN2d) { /// size 3, default options - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::arange(9, torch::kFloat32).view({1, 1, 3, 3}).requires_grad_(true); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{0.00000000, 0.99997497, 1.99980010}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.99932500, 3.99840070, 4.99687700}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {5.99460600, 6.99143740, 7.98722360}}}}, torch::kFloat32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto grad_expected = torch::tensor({{{{1.00000000, 0.99992496, 0.99970007}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.99932520, 0.99880093, 0.99812720}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.99730474, 0.99633380, 0.99521490}}}}, torch::kFloat32); auto crossmaplrn2d = CrossMapLRN2d(3); auto output = crossmaplrn2d(input); @@ -4920,50 +4232,34 @@ TEST_F(ModulesTest, CrossMapLRN2d) { ASSERT_TRUE(output.allclose(expected)); /// size change - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) crossmaplrn2d = CrossMapLRN2d(CrossMapLRN2dOptions(4).alpha(1e-4).beta(0.75).k(1)); output = crossmaplrn2d(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{{{0.00000000, 0.99998120, 1.99985000}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.99949400, 3.99880050, 4.99765800}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {5.99595300, 6.99357600, 7.99041300}}}}, torch::kFloat32); ASSERT_TRUE(output.allclose(expected)); /// alpha change - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) crossmaplrn2d = CrossMapLRN2d(CrossMapLRN2dOptions(3).alpha(1e-3).beta(0.75).k(1)); output = crossmaplrn2d(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{{{0.00000000, 0.99975010, 1.99800230}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.99326750, 3.98407440, 4.96897600}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {5.94656100, 6.91545720, 7.87434340}}}}, torch::kFloat32); ASSERT_TRUE(output.allclose(expected)); /// beta change - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) crossmaplrn2d = CrossMapLRN2d(CrossMapLRN2dOptions(3).alpha(1e-4).beta(0.95).k(1)); output = crossmaplrn2d(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{{{0.00000000, 0.99996830, 1.99974680}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.99914500, 3.99797440, 4.99604460}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {5.99316840, 6.98915600, 7.98382000}}}}, torch::kFloat32); ASSERT_TRUE(output.allclose(expected)); /// k change - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) crossmaplrn2d = CrossMapLRN2d(CrossMapLRN2dOptions(3).alpha(1e-4).beta(0.75).k(2)); output = crossmaplrn2d(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{{{0.00000000, 0.59459610, 1.18914770}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1.78361000, 2.37793870, 2.97208900}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3.56601700, 4.15967700, 4.75302650}}}}, torch::kFloat32); ASSERT_TRUE(output.allclose(expected)); } @@ -4975,20 +4271,14 @@ TEST_F(ModulesTest, RNNCell) { auto input = torch::randn({3, 1}); auto hx = torch::randn({3, 2}); auto output = rnn(input, hx); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{-0.5078, 0.4380}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.7215, 0.2969}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1304, 0.0653}}); ASSERT_TRUE(torch::allclose(output, expected, 1e-05, 2e-04)); output = rnn(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{-0.0775, 0.6688}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.0734, 0.4759}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.0725, 0.4225}}); ASSERT_TRUE(torch::allclose(output, expected, 1e-05, 2e-04)); } @@ -5003,17 +4293,11 @@ TEST_F(ModulesTest, LSTMCell) { auto output = rnn(input, std::make_tuple(hx, cx)); auto output_hx = std::get<0>(output); auto output_cx = std::get<1>(output); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected_hx = torch::tensor({{-0.2462, 0.0810}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.2206, 0.1867}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.0146, 0.0429}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected_cx = torch::tensor({{-0.4480, 0.1071}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.6245, 0.2687}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.0322, 0.0518}}); ASSERT_TRUE(torch::allclose(output_hx, expected_hx, 1e-05, 2e-04)); ASSERT_TRUE(torch::allclose(output_cx, expected_cx, 1e-05, 2e-04)); @@ -5021,17 +4305,11 @@ TEST_F(ModulesTest, LSTMCell) { output = rnn(input); output_hx = std::get<0>(output); output_cx = std::get<1>(output); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected_hx = torch::tensor({{-0.1331, 0.1634}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1494, 0.2869}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1428, 0.2263}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected_cx = torch::tensor({{-0.2679, 0.2180}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.3049, 0.3493}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.2896, 0.2853}}); ASSERT_TRUE(torch::allclose(output_hx, expected_hx, 1e-05, 2e-04)); ASSERT_TRUE(torch::allclose(output_cx, expected_cx, 1e-05, 2e-04)); @@ -5044,20 +4322,14 @@ TEST_F(ModulesTest, GRUCell) { auto input = torch::randn({3, 1}); auto hx = torch::randn({3, 2}); auto output = rnn(input, hx); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{ 1.0243, 0.3227}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.5659, 0.0330}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.4030, -0.2800}}); ASSERT_TRUE(torch::allclose(output, expected, 1e-05, 2e-04)); output = rnn(input); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{-0.0085, 0.1095}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1291, 0.2675}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1339, 0.2725}}); ASSERT_TRUE(torch::allclose(output, expected, 1e-05, 2e-04)); } @@ -5561,10 +4833,8 @@ TEST_F(ModulesTest, PrettyPrintTripletMarginWithDistanceLoss) { auto distanceOptions = TripletMarginWithDistanceLossOptions() .distance_function([&](const torch::Tensor& x, const torch::Tensor& y) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return torch::pairwise_distance(x, y, 2.0, 1e-6); }) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .margin(1.5) .swap(true) .reduction(torch::kMean); @@ -5696,7 +4966,6 @@ TEST_F(ModulesTest, PrettyPrintNestedModel) { InnerTestModule() : torch::nn::Module("InnerTestModule"), fc(register_module("fc", torch::nn::Linear(3, 4))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) table(register_module("table", torch::nn::Embedding(10, 2))) {} torch::nn::Linear fc; @@ -5706,9 +4975,7 @@ TEST_F(ModulesTest, PrettyPrintNestedModel) { struct TestModule : torch::nn::Module { TestModule() : torch::nn::Module("TestModule"), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) fc(register_module("fc", torch::nn::Linear(4, 5))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) table(register_module("table", torch::nn::Embedding(EmbeddingOptions(10, 2)))), inner(register_module("inner", std::make_shared())) { } @@ -6001,7 +5268,6 @@ TEST_F(ModulesTest, PrettyPrintGRUCell) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(ModulesTest, PrettyPrintAdaptiveLogSoftmaxWithLoss) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdaptiveLogSoftmaxWithLoss asfm(AdaptiveLogSoftmaxWithLossOptions(8, 4, {2}).div_value(2.)); ASSERT_EQ( c10::str(asfm), @@ -6016,7 +5282,6 @@ TEST_F(ModulesTest, PrettyPrintAdaptiveLogSoftmaxWithLoss) { ")"); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdaptiveLogSoftmaxWithLoss asfm(AdaptiveLogSoftmaxWithLossOptions(8, 10, {4, 8}).div_value(2.).head_bias(true)); ASSERT_EQ( c10::str(asfm), diff --git a/test/cpp/api/nn_utils.cpp b/test/cpp/api/nn_utils.cpp index 3caebea52a8ab..019f9c0c34a82 100644 --- a/test/cpp/api/nn_utils.cpp +++ b/test/cpp/api/nn_utils.cpp @@ -18,7 +18,6 @@ struct PackedSequenceTest : torch::test::SeedingFixture {}; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(NNUtilsTest, ClipGradNorm) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto l = Linear(10, 10); float max_norm = 2; auto compute_norm = [&](float norm_type) -> float { @@ -52,19 +51,13 @@ TEST_F(NNUtilsTest, ClipGradNorm) { }; std::vector grads = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::arange(1.0, 101).view({10, 10}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::ones({10}).div(1000), }; std::vector norm_types = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 2.0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 4.0, std::numeric_limits::infinity(), }; @@ -84,9 +77,7 @@ TEST_F(NNUtilsTest, ClipGradNorm) { } // Small gradients should be left unchanged grads = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::rand({10, 10}).div(10000), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::ones(10).div(500), }; for (auto norm_type : norm_types) { @@ -104,11 +95,8 @@ TEST_F(NNUtilsTest, ClipGradNorm) { ASSERT_EQ(scaled[0].item().toFloat(), 1); } // should accept a single tensor as input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto p1 = torch::randn({10, 10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto p2 = torch::randn({10, 10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto g = torch::arange(1., 101).view({10, 10}); p1.mutable_grad() = g.clone(); p2.mutable_grad() = g.clone(); @@ -128,15 +116,10 @@ TEST_F(NNUtilsTest, ClipGradNormErrorIfNonfinite) { using Vector = std::vector; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Vector norms_pos = {0.1, 1, 2, 3.5, inf}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Vector norms_neg = {-0.1, -1, -2, -3.5}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Vector norms_neg_plus_0 = {0, -0.1, -1, -2, -3.5}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Vector norms_except_0 = {0.1, 1, 2, 3.5, inf, -0.1, -1, -2, -3.5}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Vector norms_all = {0, 0.1, 1, 2, 3.5, inf, -0.1, -1, -2, -3.5}; // Each entry in test_cases has the following values, in this order: @@ -169,35 +152,23 @@ TEST_F(NNUtilsTest, ClipGradNormErrorIfNonfinite) { std::make_tuple(true, true, Vector({nan}), norms_except_0, Vector({0})), // Test a grad that should never error - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::make_tuple(false, false, Vector({2e22, -2e22}), Vector(), norms_all), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::make_tuple(false, true, Vector({2e22, -2e22}), Vector(), norms_all), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::make_tuple(true, false, Vector({2e22, -2e22}), Vector(), norms_all), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::make_tuple(true, true, Vector({2e22, -2e22}), Vector(), norms_all), // Test a grad that will overflow to inf for only some norm orders std::make_tuple( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) false, false, Vector({2e200, -2e200}), Vector({3.5, 2, -2, -3.5}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Vector({inf, 1, 0.1, 0, -1, -0.1})), std::make_tuple( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) false, true, Vector({2e200, -2e200}), Vector({3.5, 2}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Vector({inf, 1, 0.1, 0, -1, -0.1, -2, -3.5})), std::make_tuple( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) true, false, Vector({2e200, -2e200}), Vector({3.5, 2}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Vector({inf, 1, 0.1, 0, -1, -0.1, -2, -3.5})), std::make_tuple( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) false, true, Vector({2e200, -2e200}), Vector({3.5, 2}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Vector({inf, 1, 0.1, 0, -1, -0.1, -2, -3.5})), }); @@ -206,7 +177,6 @@ TEST_F(NNUtilsTest, ClipGradNormErrorIfNonfinite) { bool grad_only_one_elem, bool prefix_finite_grad_param, torch::DeviceType device_type) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto param = torch::ones(10, torch::TensorOptions().dtype(torch::kDouble).device(device_type).requires_grad(true)); if (grad_only_one_elem) { param[1].mul(scalar).sum().backward(); @@ -310,14 +280,10 @@ TEST_F(NNUtilsTest, ClipGradNormErrorIfNonfinite) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(NNUtilsTest, ClipGradValue) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto l = Linear(10, 10); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float clip_value = 2.5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor grad_w = torch::arange(-50., 50).view({10, 10}).div_(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor grad_b = torch::ones({10}).mul_(2); std::vector> grad_lists = { {grad_w, grad_b}, {grad_w, torch::Tensor()}}; @@ -340,11 +306,8 @@ TEST_F(NNUtilsTest, ClipGradValue) { } // Should accept a single Tensor as input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto p1 = torch::randn({10, 10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto p2 = torch::randn({10, 10}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto g = torch::arange(-50., 50).view({10, 10}).div_(5); p1.mutable_grad() = g.clone(); p2.mutable_grad() = g.clone(); @@ -356,31 +319,22 @@ TEST_F(NNUtilsTest, ClipGradValue) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(NNUtilsTest, ConvertParameters) { std::vector parameters{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::arange(9, torch::kFloat32), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::arange(9, torch::kFloat32).view({3, 3}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::arange(8, torch::kFloat32).view({2, 2, 2}) }; auto expected = torch::cat({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::arange(9, torch::kFloat32), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::arange(9, torch::kFloat32).view(-1), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::arange(8, torch::kFloat32).view(-1) }); auto vector = utils::parameters_to_vector(parameters); ASSERT_TRUE(vector.allclose(expected)); std::vector zero_parameters{ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::zeros({9}, torch::kFloat32), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::zeros({9}, torch::kFloat32).view({3, 3}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::zeros({8}, torch::kFloat32).view({2, 2, 2}) }; @@ -390,9 +344,7 @@ TEST_F(NNUtilsTest, ConvertParameters) { } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto conv1 = Conv2d(3, 10, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto fc1 = Linear(10, 20); auto model = Sequential(conv1, fc1); @@ -400,13 +352,10 @@ TEST_F(NNUtilsTest, ConvertParameters) { ASSERT_EQ(vec.size(0), 980); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto conv1 = Conv2d(3, 10, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto fc1 = Linear(10, 20); auto model = Sequential(conv1, fc1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto vec = torch::arange(0., 980); utils::vector_to_parameters(vec, model->parameters()); @@ -429,7 +378,6 @@ std::vector PackedSequenceTest_ordered_sequence(torch::ScalarType }, tensor_type)); } for (auto& s : seqs) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s.random_(-128, 128); } sort( @@ -473,9 +421,7 @@ void assert_is_same_packed_sequence(const rnn_utils::PackedSequence& a, const rn // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(PackedSequenceTest, WrongOrder) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = torch::ones({25, 300}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = torch::ones({22, 300}); auto b_a = rnn_utils::pad_sequence({b, a}); // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) @@ -576,12 +522,9 @@ TEST_F(NNUtilsTest, PackSequence) { // single dimensional auto a = torch::tensor({1, 2, 3}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = torch::tensor({4, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = torch::tensor({6}); rnn_utils::PackedSequence packed = rnn_utils::pack_sequence({a, b, c}, /*enforce_sorted=*/false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({1, 4, 6, 2, 5, 3}); ASSERT_TRUE(torch::allclose(packed.batch_sizes(), torch::tensor({3, 2, 1}))); ASSERT_TRUE(torch::allclose(packed.data(), expected)); @@ -610,7 +553,6 @@ TEST_F(NNUtilsTest, PackSequence) { "You can pass `enforce_sorted=False`"); // more dimensions - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t maxlen = 9; for (int64_t num_dim : std::vector{0, 1, 2, 3}) { std::vector sequences; @@ -619,7 +561,6 @@ TEST_F(NNUtilsTest, PackSequence) { for (int64_t i = maxlen; i > 0; i--) { int64_t seq_len = i * i; lengths_vec.emplace_back(seq_len); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector tensor_sizes{seq_len, 5}; tensor_sizes.insert( tensor_sizes.end(), @@ -683,7 +624,6 @@ TEST_F(NNUtilsTest, PackPaddedSequence) { std::vector tensors_to_be_cat; for (int64_t i = 1; i < sorted_lengths.size() + 1; i++) { int64_t l = sorted_lengths.at(i-1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensors_to_be_cat.emplace_back(pad(i * 100 + torch::arange(1., 5 * l + 1).view({l, 1, 5}), max_length)); } auto padded = torch::cat(tensors_to_be_cat, 1); @@ -691,7 +631,6 @@ TEST_F(NNUtilsTest, PackPaddedSequence) { for (int64_t n = 0; n < batch_sizes.size(0); n++) { int64_t batch_size = batch_sizes[n].item(); for (int64_t i = 0; i < batch_size; i++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected_data_vec.emplace_back(torch::arange(1., 6) + (i + 1) * 100 + 5 * n); } } @@ -723,11 +662,8 @@ TEST_F(NNUtilsTest, PackPaddedSequence) { std::vector, bool>> test_cases = { // sorted_lengths, should_shuffle - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{10, 8, 4, 2, 2, 2, 1}, false}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11, 10, 8, 6, 4, 3, 1}, false}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11, 10, 8, 6, 4, 3, 1}, true} }; @@ -780,7 +716,6 @@ TEST_F(NNUtilsTest, PackPaddedSequence) { ASSERT_TRUE(torch::allclose( padded.grad().narrow(0, 0, l).select(1, i), grad_output.narrow(0, 0, l).select(1, i))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (l < 10) { ASSERT_EQ( padded.grad().narrow(0, l, padded.grad().size(0) - l).select(1, i).abs().sum().item(), @@ -811,15 +746,12 @@ TEST_F(NNUtilsTest, PadSequence) { // single dimensional auto a = torch::tensor({1, 2, 3}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = torch::tensor({4, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = torch::tensor({6}); torch::Tensor expected, padded; // batch_first = true - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{4, 5, 0}, {1, 2, 3}, {6, 0, 0}}); padded = rnn_utils::pad_sequence({b, a, c}, true); ASSERT_TRUE(padded.allclose(expected)); @@ -829,26 +761,22 @@ TEST_F(NNUtilsTest, PadSequence) { ASSERT_TRUE(padded.allclose(expected.transpose(0, 1))); // pad with non-zero value - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{4, 5, 1}, {1, 2, 3}, {6, 1, 1}}); padded = rnn_utils::pad_sequence({b, a, c}, true, 1); ASSERT_TRUE(padded.allclose(expected)); // Test pad sorted sequence - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) expected = torch::tensor({{1, 2, 3}, {4, 5, 0}, {6, 0, 0}}); padded = rnn_utils::pad_sequence({a, b, c}, true); ASSERT_TRUE(padded.allclose(expected)); // more dimensions - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t maxlen = 9; for (int64_t num_dim : std::vector{0, 1, 2, 3}) { std::vector sequences; std::vector trailing_dims(num_dim, 4); for (int64_t i = 1; i < maxlen + 1; i++) { int64_t seq_len = i * i; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector tensor_sizes{seq_len, 5}; tensor_sizes.insert( tensor_sizes.end(), diff --git a/test/cpp/api/optim.cpp b/test/cpp/api/optim.cpp index f7c7e0c1805bf..2122c8819dd7b 100644 --- a/test/cpp/api/optim.cpp +++ b/test/cpp/api/optim.cpp @@ -21,10 +21,8 @@ bool test_optimizer_xor(Options options) { torch::manual_seed(0); Sequential model( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear(2, 8), Functional(torch::sigmoid), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear(8, 1), Functional(torch::sigmoid)); @@ -35,7 +33,6 @@ bool test_optimizer_xor(Options options) { float running_loss = 1; int epoch = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) while (running_loss > 0.1) { auto inputs = torch::empty({kBatchSize, 2}); auto labels = torch::empty({kBatchSize}); @@ -104,20 +101,15 @@ void check_exact_values( assign_parameter( parameters, "0.weight", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::tensor({-0.2109, -0.4976, -0.1413, -0.3420, -0.2524, 0.6976}, torch::kFloat64)); assign_parameter( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) parameters, "0.bias", torch::tensor({-0.1085, -0.2979, 0.6892}, torch::kFloat64)); assign_parameter( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) parameters, "2.weight", torch::tensor({-0.0508, -0.3941, -0.2843}, torch::kFloat64)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assign_parameter(parameters, "2.bias", torch::tensor({-0.0711}, torch::kFloat64)); auto optimizer = OptimizerClass(parameters.values(), options); torch::Tensor input = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::tensor({0.1, 0.2, 0.3, 0.4, 0.5, 0.6}, torch::kFloat64).reshape({3, 2}); for (size_t i = 0; i < kIterations; ++i) { @@ -126,7 +118,6 @@ void check_exact_values( auto loss = output.sum(); loss.backward(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto closure = []() { return torch::tensor({10}); }; optimizer.step(closure); @@ -138,7 +129,6 @@ void check_exact_values( // Always compare using double dtype, regardless of the original dtype of the tensors auto computed = parameters[p]->flatten().to(torch::kFloat64); auto expected = expected_parameters.at(i / kSampleEvery).at(p).to(torch::kFloat64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (!computed.allclose(expected, /*rtol=*/1e-3, /*atol=*/5e-4)) { std::cout << "Iteration " << i << ": " << computed << " != " << expected << " (parameter " << p << ")" @@ -155,7 +145,6 @@ TEST(OptimTest, OptimizerAccessors) { auto options = AdagradOptions(1.0); std::vector params; for (size_t i = 0; i < 3; i++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) params.push_back(torch::randn(10)); } auto optimizer = Adagrad(params, options); @@ -301,7 +290,6 @@ TEST(OptimTest, ProducesPyTorchValues_Adam) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OptimTest, ProducesPyTorchValues_AdamWithWeightDecay) { check_exact_values( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdamOptions(1.0).weight_decay(1e-2), expected_parameters::Adam_with_weight_decay()); } @@ -309,7 +297,6 @@ TEST(OptimTest, ProducesPyTorchValues_AdamWithWeightDecay) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OptimTest, ProducesPyTorchValues_AdamWithWeightDecayAndAMSGrad) { check_exact_values( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdamOptions(1.0).weight_decay(1e-6).amsgrad(true), expected_parameters::Adam_with_weight_decay_and_amsgrad()); } @@ -353,7 +340,6 @@ TEST(OptimTest, ProducesPyTorchValues_Adagrad) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OptimTest, ProducesPyTorchValues_AdagradWithWeightDecay) { check_exact_values( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdagradOptions(1.0).weight_decay(1e-2), expected_parameters::Adagrad_with_weight_decay()); } @@ -361,7 +347,6 @@ TEST(OptimTest, ProducesPyTorchValues_AdagradWithWeightDecay) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OptimTest, ProducesPyTorchValues_AdagradWithWeightDecayAndLRDecay) { check_exact_values( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdagradOptions(1.0).weight_decay(1e-6).lr_decay(1e-3), expected_parameters::Adagrad_with_weight_decay_and_lr_decay()); } @@ -369,14 +354,12 @@ TEST(OptimTest, ProducesPyTorchValues_AdagradWithWeightDecayAndLRDecay) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OptimTest, ProducesPyTorchValues_RMSprop) { check_exact_values( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) RMSpropOptions(0.1), expected_parameters::RMSprop()); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OptimTest, ProducesPyTorchValues_RMSpropWithWeightDecay) { check_exact_values( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) RMSpropOptions(0.1).weight_decay(1e-2), expected_parameters::RMSprop_with_weight_decay()); } @@ -384,7 +367,6 @@ TEST(OptimTest, ProducesPyTorchValues_RMSpropWithWeightDecay) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OptimTest, ProducesPyTorchValues_RMSpropWithWeightDecayAndCentered) { check_exact_values( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) RMSpropOptions(0.1).weight_decay(1e-6).centered(true), expected_parameters::RMSprop_with_weight_decay_and_centered()); } @@ -394,7 +376,6 @@ TEST( OptimTest, ProducesPyTorchValues_RMSpropWithWeightDecayAndCenteredAndMomentum) { check_exact_values( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) RMSpropOptions(0.1).weight_decay(1e-6).centered(true).momentum(0.9), expected_parameters:: RMSprop_with_weight_decay_and_centered_and_momentum()); @@ -402,14 +383,12 @@ TEST( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OptimTest, ProducesPyTorchValues_SGD) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) check_exact_values(SGDOptions(0.1), expected_parameters::SGD()); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OptimTest, ProducesPyTorchValues_SGDWithWeightDecay) { check_exact_values( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SGDOptions(0.1).weight_decay(1e-2), expected_parameters::SGD_with_weight_decay()); } @@ -417,7 +396,6 @@ TEST(OptimTest, ProducesPyTorchValues_SGDWithWeightDecay) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OptimTest, ProducesPyTorchValues_SGDWithWeightDecayAndMomentum) { check_exact_values( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SGDOptions(0.1).weight_decay(1e-2).momentum(0.9), expected_parameters::SGD_with_weight_decay_and_momentum()); } @@ -425,7 +403,6 @@ TEST(OptimTest, ProducesPyTorchValues_SGDWithWeightDecayAndMomentum) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(OptimTest, ProducesPyTorchValues_SGDWithWeightDecayAndNesterovMomentum) { check_exact_values( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SGDOptions(0.1).weight_decay(1e-6).momentum(0.9).nesterov(true), expected_parameters::SGD_with_weight_decay_and_nesterov_momentum()); } @@ -448,16 +425,13 @@ TEST(OptimTest, ProducesPyTorchValues_LBFGS_with_line_search) { TEST(OptimTest, ZeroGrad) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear model(2, 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SGD optimizer(model->parameters(), 0.1); for (const auto& parameter : model->parameters()) { ASSERT_FALSE(parameter.grad().defined()); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto output = model->forward(torch::ones({5, 2})); auto loss = output.sum(); loss.backward(); @@ -502,7 +476,6 @@ TEST(OptimTest, ExternalVectorOfParameters) { TEST(OptimTest, AddParameter_LBFGS) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector parameters = {torch::randn({5, 5})}; std::vector original_parameters = {parameters[0].clone()}; @@ -553,7 +526,6 @@ void check_lr_change( TEST(OptimTest, CheckLRChange_StepLR_Adam) { torch::Tensor parameters = torch::zeros({1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto optimizer = Adam({parameters}, AdamOptions().lr(1e-3)); const unsigned step_size = 20; diff --git a/test/cpp/api/parallel_benchmark.cpp b/test/cpp/api/parallel_benchmark.cpp index df21becba074c..70567edf2eeaf 100644 --- a/test/cpp/api/parallel_benchmark.cpp +++ b/test/cpp/api/parallel_benchmark.cpp @@ -79,12 +79,10 @@ void AtLaunch_WithData(int32_t numIters, int32_t vecSize) { } int main(int argc, char** argv) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int32_t N = 1000000; AtLaunch_Base(N); AtLaunch_WithData(N, 0); AtLaunch_WithData(N, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AtLaunch_WithData(N, 256); return 0; } diff --git a/test/cpp/api/parameterdict.cpp b/test/cpp/api/parameterdict.cpp index 363c66495da9a..03427df193839 100644 --- a/test/cpp/api/parameterdict.cpp +++ b/test/cpp/api/parameterdict.cpp @@ -80,16 +80,11 @@ TEST_F(ParameterDictTest, SimpleUpdate) { ParameterDict wrongDict; ParameterDict rightDict; dict->insert("A", torch::tensor({1.0})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict->insert("B", torch::tensor({2.0})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dict->insert("C", torch::tensor({3.0})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) wrongDict->insert("A", torch::tensor({5.0})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) wrongDict->insert("D", torch::tensor({5.0})); ASSERT_THROWS_WITH(dict->update(*wrongDict), "Parameter 'D' is not defined"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) rightDict->insert("A", torch::tensor({5.0})); dict->update(*rightDict); ASSERT_EQ(dict->size(), 3); @@ -100,9 +95,7 @@ TEST_F(ParameterDictTest, SimpleUpdate) { TEST_F(ParameterDictTest, Keys) { torch::OrderedDict params = { {"a", torch::tensor({1.0})}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"b", torch::tensor({2.0})}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"c", torch::tensor({1.0, 2.0})}}; auto dict = torch::nn::ParameterDict(params); std::vector keys = dict->keys(); @@ -147,11 +140,8 @@ TEST_F(ParameterDictTest, Get) { TEST_F(ParameterDictTest, PrettyPrintParameterDict) { torch::OrderedDict params = { {"a", torch::tensor({1.0})}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"b", torch::tensor({2.0, 1.0})}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"c", torch::tensor({{3.0}, {2.1}})}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"d", torch::tensor({{3.0, 1.3}, {1.2, 2.1}})}}; auto dict = torch::nn::ParameterDict(params); ASSERT_EQ( diff --git a/test/cpp/api/rnn.cpp b/test/cpp/api/rnn.cpp index 36d5bc66261eb..135a797b3c663 100644 --- a/test/cpp/api/rnn.cpp +++ b/test/cpp/api/rnn.cpp @@ -11,7 +11,6 @@ template bool test_RNN_xor(Func&& model_maker, bool cuda = false) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto nhid = 32; auto model = std::make_shared(); auto l1 = model->add(Linear(1, nhid), "l1"); @@ -23,7 +22,6 @@ bool test_RNN_xor(Func&& model_maker, bool cuda = false) { } auto lo = model->add(Linear(nout, 1), "lo"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::optim::Adam optimizer(model->parameters(), 1e-2); auto forward_op = [&](torch::Tensor x) { auto T = x.size(0); @@ -41,13 +39,9 @@ bool test_RNN_xor(Func&& model_maker, bool cuda = false) { float running_loss = 1; int epoch = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto max_epoch = 1500; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) while (running_loss > 1e-2) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto bs = 16U; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto nlen = 5U; const auto backend = cuda ? torch::kCUDA : torch::kCPU; @@ -134,10 +128,8 @@ struct RNNTest : torch::test::SeedingFixture {}; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(RNNTest, CheckOutputSizes) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LSTM model(LSTMOptions(128, 64).num_layers(3).dropout(0.2)); // Input size is: sequence length, batch size, input size - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({10, 16, 128}, torch::requires_grad()); auto output = model->forward(x); auto y = x.mean(); @@ -163,10 +155,8 @@ TEST_F(RNNTest, CheckOutputSizes) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(RNNTest, CheckOutputSizesProj) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LSTM model(LSTMOptions(128, 64).num_layers(3).dropout(0.2).proj_size(32)); // Input size is: sequence length, batch size, input size - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({10, 16, 128}, torch::requires_grad()); auto output = model->forward(x); auto y = x.mean(); @@ -220,11 +210,8 @@ TEST_F(RNNTest, CheckOutputValuesMatchPyTorch) { auto flat = std::get<0>(out).view(3 * 4 * 2); // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) float c_out[] = {0.4391, 0.5402, 0.4330, 0.5324, 0.4261, 0.5239, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.4183, 0.5147, 0.6822, 0.8064, 0.6726, 0.7968, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.6620, 0.7860, 0.6501, 0.7741, 0.7889, 0.9003, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.7769, 0.8905, 0.7635, 0.8794, 0.7484, 0.8666}; for (size_t i = 0; i < 3 * 4 * 2; i++) { ASSERT_LT(std::abs(flat[i].item() - c_out[i]), 1e-3); @@ -243,41 +230,24 @@ TEST_F(RNNTest, CheckOutputValuesMatchPyTorch) { ASSERT_EQ(cx.size(1), 4); ASSERT_EQ(cx.size(2), 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) flat = torch::cat({hx, cx}, 0).view(16); // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) float h_out[] = {0.7889, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.9003, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.7769, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.8905, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.7635, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.8794, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.7484, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0.8666, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.1647, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.6106, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.1425, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.5726, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.1187, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.5329, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.0931, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1.4911}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 16; i++) { ASSERT_LT(std::abs(flat[i].item() - h_out[i]), 1e-3); } @@ -316,11 +286,9 @@ TEST_F(RNNTest, EndToEndRNNTanh) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(RNNTest, Sizes_CUDA) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LSTM model(LSTMOptions(128, 64).num_layers(3).dropout(0.2)); model->to(torch::kCUDA); auto x = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::randn({10, 16, 128}, torch::requires_grad().device(torch::kCUDA)); auto output = model->forward(x); auto y = x.mean(); @@ -347,11 +315,9 @@ TEST_F(RNNTest, Sizes_CUDA) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(RNNTest, SizesProj_CUDA) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LSTM model(LSTMOptions(128, 64).num_layers(3).dropout(0.2).proj_size(32)); model->to(torch::kCUDA); auto x = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::randn({10, 16, 128}, torch::requires_grad().device(torch::kCUDA)); auto output = model->forward(x); auto y = x.mean(); @@ -426,7 +392,6 @@ TEST_F(RNNTest, PrettyPrintRNNs) { // https://github.com/pytorch/pytorch/issues/19545 // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(RNNTest, BidirectionalFlattenParameters) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) GRU gru(GRUOptions(100, 256).num_layers(2).bidirectional(true)); gru->flatten_parameters(); } @@ -465,9 +430,7 @@ std::tuple> lstm_output_ void BidirectionalGRUReverseForward(bool cuda) { auto opt = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(false) .device(cuda ? torch::kCUDA : torch::kCPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({1, 2, 3, 4, 5}, opt).reshape({5, 1, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input_reversed = torch::tensor({5, 4, 3, 2, 1}, opt).reshape({5, 1, 1}); auto gru_options = GRUOptions(1, 1).num_layers(1).batch_first(false); @@ -518,9 +481,7 @@ TEST_F(RNNTest, BidirectionalGRUReverseForward_CUDA) { void BidirectionalLSTMReverseForwardTest(bool cuda) { auto opt = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(false) .device(cuda ? torch::kCUDA : torch::kCPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::tensor({1, 2, 3, 4, 5}, opt).reshape({5, 1, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input_reversed = torch::tensor({5, 4, 3, 2, 1}, opt).reshape({5, 1, 1}); auto lstm_opt = LSTMOptions(1, 1).num_layers(1).batch_first(false); @@ -593,10 +554,8 @@ TEST_F(RNNTest, BidirectionalMultilayerGRU_CPU_vs_CUDA) { // Create the same inputs auto input_opt = torch::TensorOptions() .dtype(torch::kFloat32).requires_grad(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input_cpu = torch::tensor({1, 2, 3, 4, 5, 6}, input_opt) .reshape({3, 1, 2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input_cuda = torch::tensor({1, 2, 3, 4, 5, 6}, input_opt) .reshape({3, 1, 2}).to(torch::kCUDA); @@ -645,10 +604,8 @@ TEST_F(RNNTest, BidirectionalMultilayerLSTM_CPU_vs_CUDA) { auto options = torch::TensorOptions() .dtype(torch::kFloat32).requires_grad(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input_cpu = torch::tensor({1, 2, 3, 4, 5, 6}, options) .reshape({3, 1, 2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input_cuda = torch::tensor({1, 2, 3, 4, 5, 6}, options) .reshape({3, 1, 2}).to(torch::kCUDA); @@ -697,10 +654,8 @@ TEST_F(RNNTest, BidirectionalMultilayerLSTMProj_CPU_vs_CUDA) { auto options = torch::TensorOptions() .dtype(torch::kFloat32).requires_grad(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input_cpu = torch::tensor({1, 2, 3, 4, 5, 6}, options) .reshape({3, 1, 2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input_cuda = torch::tensor({1, 2, 3, 4, 5, 6}, options) .reshape({3, 1, 2}).to(torch::kCUDA); @@ -734,11 +689,8 @@ TEST_F(RNNTest, UsePackedSequenceAsInput) { torch::nn::utils::rnn::PackedSequence packed_input = torch::nn::utils::rnn::pack_sequence({torch::ones({3, 2})}); auto rnn_output = m->forward_with_packed_input(packed_input); auto expected_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.0645, -0.7274, 0.4531}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.3970, -0.6950, 0.6009}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.3877, -0.7310, 0.6806}}); ASSERT_TRUE(torch::allclose(std::get<0>(rnn_output).data(), expected_output, 1e-05, 2e-04)); @@ -752,11 +704,8 @@ TEST_F(RNNTest, UsePackedSequenceAsInput) { torch::nn::utils::rnn::PackedSequence packed_input = torch::nn::utils::rnn::pack_sequence({torch::ones({3, 2})}); auto rnn_output = m->forward_with_packed_input(packed_input); auto expected_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.2693, -0.1240, 0.0744}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.3889, -0.1919, 0.1183}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.4425, -0.2314, 0.1386}}); ASSERT_TRUE(torch::allclose(std::get<0>(rnn_output).data(), expected_output, 1e-05, 2e-04)); @@ -770,11 +719,8 @@ TEST_F(RNNTest, UsePackedSequenceAsInput) { torch::nn::utils::rnn::PackedSequence packed_input = torch::nn::utils::rnn::pack_sequence({torch::ones({3, 2})}); auto rnn_output = m->forward_with_packed_input(packed_input); auto expected_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.1134, 0.0467, 0.2336}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1189, 0.0502, 0.2960}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1138, 0.0484, 0.3110}}); ASSERT_TRUE(torch::allclose(std::get<0>(rnn_output).data(), expected_output, 1e-05, 2e-04)); diff --git a/test/cpp/api/sequential.cpp b/test/cpp/api/sequential.cpp index c119e1fe7bac4..77014ff245bcd 100644 --- a/test/cpp/api/sequential.cpp +++ b/test/cpp/api/sequential.cpp @@ -247,7 +247,6 @@ TEST_F(SequentialTest, CallingForwardChainsCorrectly) { TEST_F(SequentialTest, CallingForwardWithTheWrongReturnTypeThrows) { struct M : public torch::nn::Module { int forward() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 5; } }; @@ -275,10 +274,8 @@ TEST_F(SequentialTest, TheReturnTypeOfForwardDefaultsToTensor) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(SequentialTest, ForwardReturnsTheLastValue) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Sequential sequential(Linear(10, 3), Linear(3, 5), Linear(5, 100)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({1000, 10}, torch::requires_grad()); auto y = sequential->forward(x); ASSERT_EQ(y.ndimension(), 2); @@ -289,16 +286,11 @@ TEST_F(SequentialTest, ForwardReturnsTheLastValue) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(SequentialTest, SanityCheckForHoldingStandardModules) { Sequential sequential( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear(10, 3), Conv2d(1, 2, 3), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dropout(0.5), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm2d(5), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Embedding(4, 10), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LSTM(4, 5)); } @@ -351,7 +343,6 @@ TEST_F(SequentialTest, ExtendPushesModulesFromOtherSequential) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(SequentialTest, HasReferenceSemantics) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Sequential first(Linear(2, 3), Linear(4, 4), Linear(4, 5)); Sequential second(first); @@ -400,7 +391,6 @@ TEST_F(SequentialTest, IsCloneable) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(SequentialTest, RegistersElementsAsSubmodules) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Sequential sequential(Linear(10, 3), Conv2d(1, 2, 3), Dropout2d(0.5)); auto modules = sequential->children(); @@ -426,16 +416,11 @@ TEST_F(SequentialTest, CloneToDevice_CUDA) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST_F(SequentialTest, PrettyPrintSequential) { Sequential sequential( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear(10, 3), Conv2d(1, 2, 3), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dropout(0.5), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BatchNorm2d(5), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Embedding(4, 10), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LSTM(4, 5)); ASSERT_EQ( c10::str(sequential), @@ -449,16 +434,11 @@ TEST_F(SequentialTest, PrettyPrintSequential) { ")"); Sequential sequential_named({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"linear", Linear(10, 3)}, {"conv2d", Conv2d(1, 2, 3)}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"dropout", Dropout(0.5)}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"batchnorm2d", BatchNorm2d(5)}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"embedding", Embedding(4, 10)}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {"lstm", LSTM(4, 5)} }); ASSERT_EQ( @@ -477,120 +457,74 @@ TEST_F(SequentialTest, PrettyPrintSequential) { TEST_F(SequentialTest, ModuleForwardMethodOptionalArg) { { Sequential sequential(Identity(), ConvTranspose1d(ConvTranspose1dOptions(3, 2, 3).stride(1).bias(false))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::dynamic_pointer_cast(sequential[1])->weight.set_data(torch::arange(18.).reshape({3, 2, 3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(30.).reshape({2, 3, 5}); auto y = sequential->forward(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{ 150., 333., 552., 615., 678., 501., 276.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 195., 432., 714., 804., 894., 654., 357.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 420., 918., 1497., 1560., 1623., 1176., 636.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 600., 1287., 2064., 2154., 2244., 1599., 852.}}}); ASSERT_TRUE(torch::allclose(y, expected)); } { Sequential sequential(Identity(), ConvTranspose2d(ConvTranspose2dOptions(3, 2, 3).stride(1).bias(false))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::dynamic_pointer_cast(sequential[1])->weight.set_data(torch::arange(54.).reshape({3, 2, 3, 3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(75.).reshape({1, 3, 5, 5}); auto y = sequential->forward(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{ 2250., 4629., 7140., 7311., 7482., 5133., 2640.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4995., 10272., 15837., 16206., 16575., 11364., 5841.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 8280., 17019., 26226., 26820., 27414., 18783., 9648.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9225., 18954., 29196., 29790., 30384., 20808., 10683.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {10170., 20889., 32166., 32760., 33354., 22833., 11718.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 7515., 15420., 23721., 24144., 24567., 16800., 8613.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 4140., 8487., 13044., 13269., 13494., 9219., 4722.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 2925., 6006., 9246., 9498., 9750., 6672., 3423.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 6480., 13296., 20454., 20985., 21516., 14712., 7542.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {10710., 21960., 33759., 34596., 35433., 24210., 12402.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {12060., 24705., 37944., 38781., 39618., 27045., 13842.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {13410., 27450., 42129., 42966., 43803., 29880., 15282.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 9810., 20064., 30768., 31353., 31938., 21768., 11124.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 5355., 10944., 16770., 17076., 17382., 11838., 6045.}}}}); ASSERT_TRUE(torch::allclose(y, expected)); } { Sequential sequential(Identity(), ConvTranspose3d(ConvTranspose3dOptions(2, 2, 2).stride(1).bias(false))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::dynamic_pointer_cast(sequential[1])->weight.set_data(torch::arange(32.).reshape({2, 2, 2, 2, 2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(16.).reshape({1, 2, 2, 2, 2}); auto y = sequential->forward(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{{{ 128., 280., 154.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 304., 664., 364.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 184., 400., 218.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 352., 768., 420.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 832., 1808., 984.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 496., 1072., 580.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 256., 552., 298.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 592., 1272., 684.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 344., 736., 394.}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{ 192., 424., 234.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 464., 1016., 556.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 280., 608., 330.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 544., 1184., 644.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1280., 2768., 1496.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 752., 1616., 868.}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{ 384., 824., 442.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 880., 1880., 1004.}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 504., 1072., 570.}}}}}); ASSERT_TRUE(torch::allclose(y, expected)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto weight = torch::tensor({{1., 2.3, 3.}, {4., 5.1, 6.3}}); Sequential sequential(Identity(), EmbeddingBag::from_pretrained(weight)); auto x = torch::tensor({{1, 0}}, torch::kLong); auto y = sequential->forward(x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({2.5000, 3.7000, 4.6500}); ASSERT_TRUE(torch::allclose(y, expected)); } { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t embed_dim = 8; int64_t num_heads = 4; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t batch_size = 8; int64_t src_len = 3; int64_t tgt_len = 1; @@ -605,106 +539,68 @@ TEST_F(SequentialTest, ModuleForwardMethodOptionalArg) { auto attn_output = std::get<0>(output); auto attn_output_expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{0.0674, -0.0056, 0.1324, 0.0922, 0.0160, -0.0934, -0.1700, 0.1663}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0674, -0.0056, 0.1324, 0.0922, 0.0160, -0.0934, -0.1700, 0.1663}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0674, -0.0056, 0.1324, 0.0922, 0.0160, -0.0934, -0.1700, 0.1663}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0674, -0.0056, 0.1324, 0.0922, 0.0160, -0.0934, -0.1700, 0.1663}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0674, -0.0056, 0.1324, 0.0922, 0.0160, -0.0934, -0.1700, 0.1663}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0674, -0.0056, 0.1324, 0.0922, 0.0160, -0.0934, -0.1700, 0.1663}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0674, -0.0056, 0.1324, 0.0922, 0.0160, -0.0934, -0.1700, 0.1663}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.0674, -0.0056, 0.1324, 0.0922, 0.0160, -0.0934, -0.1700, 0.1663}}}); ASSERT_TRUE(torch::allclose(attn_output, attn_output_expected, 1e-05, 2e-04)); auto attn_output_weights = std::get<1>(output); auto attn_output_weights_expected = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{0.3333, 0.3333, 0.3333}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.3333, 0.3333, 0.3333}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.3333, 0.3333, 0.3333}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.3333, 0.3333, 0.3333}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.3333, 0.3333, 0.3333}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.3333, 0.3333, 0.3333}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.3333, 0.3333, 0.3333}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.3333, 0.3333, 0.3333}}}); ASSERT_TRUE(torch::allclose(attn_output_weights, attn_output_weights_expected, 1e-05, 2e-04)); } { auto indices = torch::tensor({{{1, 3, 4}}}, torch::kLong); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({{{2, 4, 5}}}, torch::dtype(torch::kFloat)); Sequential sequential(MaxUnpool1d(3)); auto y = sequential->forward(x, indices); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = torch::tensor({{{0, 2, 0, 4, 5, 0, 0, 0, 0}}}, torch::kFloat); ASSERT_TRUE(torch::allclose(y, expected)); } { auto indices = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{ 6, 8, 9}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 18, 19}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {21, 23, 24}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{ 6, 8, 9}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 18, 19}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {21, 23, 24}}}}, torch::kLong); auto x = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{ 6, 8, 9}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {16, 18, 19}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {21, 23, 24}}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{31, 33, 34}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {41, 43, 44}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {46, 48, 49}}}}, torch::dtype(torch::kFloat)); Sequential sequential(MaxUnpool2d(MaxUnpool2dOptions(3).stride(2).padding(1))); auto y = sequential->forward(x, indices); auto expected = torch::tensor( {{{{ 0, 0, 0, 0, 0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0, 6, 0, 8, 9}, { 0, 0, 0, 0, 0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0, 16, 0, 18, 19}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0, 21, 0, 23, 24}}}, {{{ 0, 0, 0, 0, 0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0, 31, 0, 33, 34}, { 0, 0, 0, 0, 0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0, 41, 0, 43, 44}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0, 46, 0, 48, 49}}}} , torch::kFloat); ASSERT_TRUE(torch::allclose(y, expected)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto indices = torch::tensor({{{{{26}}}}}, torch::kLong); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({{{{{26}}}}}, torch::dtype(torch::kFloat).requires_grad(true)); Sequential sequential(MaxUnpool3d(3)); auto y = sequential->forward(x, indices); @@ -717,7 +613,6 @@ TEST_F(SequentialTest, ModuleForwardMethodOptionalArg) { { 0, 0, 0}}, {{ 0, 0, 0}, { 0, 0, 0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) { 0, 0, 26}}}}}, torch::kFloat); ASSERT_TRUE(torch::allclose(y, expected)); } @@ -727,17 +622,11 @@ TEST_F(SequentialTest, ModuleForwardMethodOptionalArg) { auto x = torch::ones({2, 3, 2}); auto rnn_output = sequential->forward>(x); auto expected_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-0.0645, -0.7274, 0.4531}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.0645, -0.7274, 0.4531}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.0645, -0.7274, 0.4531}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.3970, -0.6950, 0.6009}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.3970, -0.6950, 0.6009}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.3970, -0.6950, 0.6009}}}); ASSERT_TRUE(torch::allclose(std::get<0>(rnn_output), expected_output, 1e-05, 2e-04)); } @@ -747,17 +636,11 @@ TEST_F(SequentialTest, ModuleForwardMethodOptionalArg) { auto x = torch::ones({2, 3, 2}); auto rnn_output = sequential->forward>>(x); auto expected_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-0.2693, -0.1240, 0.0744}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.2693, -0.1240, 0.0744}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.2693, -0.1240, 0.0744}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.3889, -0.1919, 0.1183}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.3889, -0.1919, 0.1183}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.3889, -0.1919, 0.1183}}}); ASSERT_TRUE(torch::allclose(std::get<0>(rnn_output), expected_output, 1e-05, 2e-04)); } @@ -767,17 +650,11 @@ TEST_F(SequentialTest, ModuleForwardMethodOptionalArg) { auto x = torch::ones({2, 3, 2}); auto rnn_output = sequential->forward>(x); auto expected_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{-0.1134, 0.0467, 0.2336}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1134, 0.0467, 0.2336}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1134, 0.0467, 0.2336}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.1189, 0.0502, 0.2960}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1189, 0.0502, 0.2960}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1189, 0.0502, 0.2960}}}); ASSERT_TRUE(torch::allclose(std::get<0>(rnn_output), expected_output, 1e-05, 2e-04)); } @@ -787,9 +664,7 @@ TEST_F(SequentialTest, ModuleForwardMethodOptionalArg) { auto x = torch::ones({2, 2}); auto rnn_output = sequential->forward(x); auto expected_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.0645, -0.7274, 0.4531}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.0645, -0.7274, 0.4531}}); ASSERT_TRUE(torch::allclose(rnn_output, expected_output, 1e-05, 2e-04)); } @@ -799,9 +674,7 @@ TEST_F(SequentialTest, ModuleForwardMethodOptionalArg) { auto x = torch::ones({2, 2}); auto rnn_output = sequential->forward>(x); auto expected_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.2693, -0.1240, 0.0744}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.2693, -0.1240, 0.0744}}); ASSERT_TRUE(torch::allclose(std::get<0>(rnn_output), expected_output, 1e-05, 2e-04)); } @@ -811,9 +684,7 @@ TEST_F(SequentialTest, ModuleForwardMethodOptionalArg) { auto x = torch::ones({2, 2}); auto rnn_output = sequential->forward(x); auto expected_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{-0.1134, 0.0467, 0.2336}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {-0.1134, 0.0467, 0.2336}}); ASSERT_TRUE(torch::allclose(rnn_output, expected_output, 1e-05, 2e-04)); } diff --git a/test/cpp/api/serialize.cpp b/test/cpp/api/serialize.cpp index dc769de585321..e969a5ae0ebdf 100644 --- a/test/cpp/api/serialize.cpp +++ b/test/cpp/api/serialize.cpp @@ -20,10 +20,8 @@ using namespace torch::optim; namespace { Sequential xor_model() { return Sequential( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear(2, 8), Functional(at::sigmoid), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Linear(8, 1), Functional(at::sigmoid)); } @@ -67,11 +65,8 @@ void is_optimizer_state_equal( template void test_serialize_optimizer(DerivedOptimizerOptions options, bool only_has_global_state = false) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model1 = Linear(5, 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model2 = Linear(5, 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model3 = Linear(5, 2); // Models 1, 2, 3 will have the same parameters. @@ -99,14 +94,12 @@ void test_serialize_optimizer(DerivedOptimizerOptions options, bool only_has_glo auto optim3_2 = OptimizerClass( model3->parameters(), options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({10, 5}); auto step = [&x](torch::optim::Optimizer& optimizer, Linear model) { optimizer.zero_grad(); auto y = model->forward(x).sum(); y.backward(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto closure = []() { return torch::tensor({10}); }; optimizer.step(closure); }; @@ -244,7 +237,6 @@ TEST(SerializeTest, TryReadFunc) { TEST(SerializeTest, Basic) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5, 5}); auto y = save_and_load(x); @@ -257,7 +249,6 @@ TEST(SerializeTest, Basic) { TEST(SerializeTest, BasicToFile) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5, 5}); auto tempfile = c10::make_tempfile(); @@ -275,7 +266,6 @@ TEST(SerializeTest, BasicToFile) { TEST(SerializeTest, BasicViaFunc) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5, 5}); std::string serialized; @@ -308,9 +298,7 @@ TEST(SerializeTest, BasicViaFunc) { TEST(SerializeTest, Resized) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({11, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.resize_({5, 5}); auto y = save_and_load(x); @@ -323,9 +311,7 @@ TEST(SerializeTest, Resized) { TEST(SerializeTest, Sliced) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({11, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x = x.slice(0, 1, 5); auto y = save_and_load(x); @@ -338,7 +324,6 @@ TEST(SerializeTest, Sliced) { TEST(SerializeTest, NonContiguous) { torch::manual_seed(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({11, 5}); x = x.slice(1, 1, 4); auto y = save_and_load(x); @@ -352,7 +337,6 @@ TEST(SerializeTest, NonContiguous) { TEST(SerializeTest, ErrorOnMissingKey) { struct B : torch::nn::Module { B(const std::string& name_c) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) register_buffer(name_c, torch::ones(5, torch::kFloat)); } }; @@ -402,14 +386,11 @@ TEST(SerializeTest, XOR) { auto model3 = xor_model(); auto optimizer = torch::optim::SGD( model->parameters(), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::optim::SGDOptions(1e-1).momentum(0.9).nesterov(true).weight_decay( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1e-6)); float running_loss = 1; int epoch = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) while (running_loss > 0.1) { torch::Tensor loss = getLoss(model, 4); optimizer.zero_grad(); @@ -426,18 +407,14 @@ TEST(SerializeTest, XOR) { torch::save(model, tempfile.name); torch::load(model2, tempfile.name); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto loss = getLoss(model2, 100); ASSERT_LT(loss.item(), 0.1); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(SerializeTest, Optim) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model1 = Linear(5, 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model2 = Linear(5, 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model3 = Linear(5, 2); // Models 1, 2, 3 will have the same parameters. @@ -456,22 +433,16 @@ TEST(SerializeTest, Optim) { // Make some optimizers with momentum (and thus state) auto optim1 = torch::optim::SGD( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model1->parameters(), torch::optim::SGDOptions(1e-1).momentum(0.9)); auto optim2 = torch::optim::SGD( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model2->parameters(), torch::optim::SGDOptions(1e-1).momentum(0.9)); auto optim2_2 = torch::optim::SGD( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model2->parameters(), torch::optim::SGDOptions(1e-1).momentum(0.9)); auto optim3 = torch::optim::SGD( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model3->parameters(), torch::optim::SGDOptions(1e-1).momentum(0.9)); auto optim3_2 = torch::optim::SGD( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model3->parameters(), torch::optim::SGDOptions(1e-1).momentum(0.9)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({10, 5}); auto step = [&x](torch::optim::Optimizer& optimizer, Linear model) { @@ -512,17 +483,13 @@ TEST(SerializeTest, Optim) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(SerializeTest, Optim_Adagrad) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) test_serialize_optimizer(AdagradOptions(1e-1)); // bc compatibility check - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model1 = Linear(5, 2); auto optim1 = torch::optim::Adagrad( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model1->parameters(), torch::optim::AdagradOptions(1e-1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({10, 5}); auto step = [&x](torch::optim::Optimizer& optimizer, Linear model) { optimizer.zero_grad(); @@ -531,7 +498,6 @@ TEST(SerializeTest, Optim_Adagrad) { optimizer.step(); }; step(optim1, model1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto optim1_2 = Adagrad(model1->parameters(), torch::optim::AdagradOptions(1e-1)); // fill up with optim1 sum_buffers @@ -558,19 +524,15 @@ TEST(SerializeTest, Optim_Adagrad) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(SerializeTest, Optim_SGD) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) test_serialize_optimizer(SGDOptions(1e-1).momentum(0.9)); // bc compatibility check - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model1 = Linear(5, 2); auto model1_params = model1->parameters(); // added a tensor for lazy init check - when all params do not have a momentum buffer entry model1_params.emplace_back(torch::randn({2,3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto optim1 = torch::optim::SGD(model1_params, torch::optim::SGDOptions(0.01).momentum(0.9)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({10, 5}); auto step = [&x](torch::optim::Optimizer& optimizer, Linear model) { optimizer.zero_grad(); @@ -598,7 +560,6 @@ TEST(SerializeTest, Optim_SGD) { write_tensors_to_archive(output_archive, "momentum_buffers", momentum_buffers); write_int_value(output_archive, "iteration_", iteration_); output_archive.save_to(optim_tempfile_old_format.name); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto optim1_2 = SGD(model1_params, torch::optim::SGDOptions(1e-1).momentum(0.9)); OLD_SERIALIZATION_LOGIC_WARNING_CHECK(torch::load, optim1_2, optim_tempfile_old_format.name); is_optimizer_state_equal(optim1.state(), optim1_2.state()); @@ -606,19 +567,15 @@ TEST(SerializeTest, Optim_SGD) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(SerializeTest, Optim_Adam) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) test_serialize_optimizer(AdamOptions().lr(0.99999).amsgrad(true).weight_decay(0.5)); // bc compatibility check - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model1 = Linear(5, 2); auto model1_params = model1->parameters(); // added a tensor for lazy init check - when all params do not have entry in buffers model1_params.emplace_back(torch::randn({2,3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto optim1 = torch::optim::Adam(model1_params, torch::optim::AdamOptions().weight_decay(0.5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({10, 5}); auto step = [&x](torch::optim::Optimizer& optimizer, Linear model) { optimizer.zero_grad(); @@ -661,19 +618,15 @@ TEST(SerializeTest, Optim_Adam) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(SerializeTest, Optim_AdamW) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) test_serialize_optimizer(AdamWOptions().lr(0.99999).amsgrad(true).betas(std::make_tuple(0.999, 0.1))); // bc compatibility check - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model1 = Linear(5, 2); auto model1_params = model1->parameters(); // added a tensor for lazy init check - when all params do not have entry in buffers model1_params.emplace_back(torch::randn({2,3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto optim1 = torch::optim::AdamW(model1_params, torch::optim::AdamWOptions().weight_decay(0.5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({10, 5}); auto step = [&x](torch::optim::Optimizer& optimizer, Linear model) { optimizer.zero_grad(); @@ -716,12 +669,10 @@ TEST(SerializeTest, Optim_AdamW) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(SerializeTest, Optim_RMSprop) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto options = RMSpropOptions(0.1).momentum(0.9).centered(true); test_serialize_optimizer(options); // bc compatibility check - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model1 = Linear(5, 2); auto model1_params = model1->parameters(); @@ -729,7 +680,6 @@ TEST(SerializeTest, Optim_RMSprop) { model1_params.emplace_back(torch::randn({2,3})); auto optim1 = torch::optim::RMSprop(model1_params, options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({10, 5}); auto step = [&x](torch::optim::Optimizer& optimizer, Linear model) { optimizer.zero_grad(); @@ -785,20 +735,17 @@ TEST(SerializeTest, Optim_RMSprop) { TEST(SerializeTest, Optim_LBFGS) { test_serialize_optimizer(LBFGSOptions(), true); // bc compatibility check - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto model1 = Linear(5, 2); auto model1_params = model1->parameters(); // added a tensor for lazy init check - when all params do not have entry in buffers model1_params.emplace_back(torch::randn({2,3})); auto optim1 = torch::optim::LBFGS(model1_params, torch::optim::LBFGSOptions()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::ones({10, 5}); auto step = [&x](torch::optim::Optimizer& optimizer, Linear model) { optimizer.zero_grad(); auto y = model->forward(x).sum(); y.backward(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto closure = []() { return torch::tensor({10}); }; optimizer.step(closure); }; @@ -872,14 +819,11 @@ TEST(SerializeTest, XOR_CUDA) { auto model3 = xor_model(); auto optimizer = torch::optim::SGD( model->parameters(), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::optim::SGDOptions(1e-1).momentum(0.9).nesterov(true).weight_decay( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1e-6)); float running_loss = 1; int epoch = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) while (running_loss > 0.1) { torch::Tensor loss = getLoss(model, 4); optimizer.zero_grad(); @@ -896,12 +840,10 @@ TEST(SerializeTest, XOR_CUDA) { torch::save(model, tempfile.name); torch::load(model2, tempfile.name); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto loss = getLoss(model2, 100); ASSERT_LT(loss.item(), 0.1); model2->to(torch::kCUDA); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) loss = getLoss(model2, 100, true); ASSERT_LT(loss.item(), 0.1); @@ -909,7 +851,6 @@ TEST(SerializeTest, XOR_CUDA) { torch::save(model2, tempfile2.name); torch::load(model3, tempfile2.name); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) loss = getLoss(model3, 100, true); ASSERT_LT(loss.item(), 0.1); } @@ -920,7 +861,6 @@ TEST( CanSerializeModulesWithIntermediateModulesWithoutParametersOrBuffers) { struct C : torch::nn::Module { C() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) register_buffer("foo", torch::ones(5, torch::kInt32)); } }; @@ -1016,7 +956,6 @@ TEST(SerializeTest, UnserializableSubmoduleIsIgnoredWhenLoadingModule) { struct B : torch::nn::Module { B() { register_module("relu1", torch::nn::Functional(torch::relu)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) register_buffer("foo", torch::zeros(5, torch::kInt32)); } }; diff --git a/test/cpp/api/special.cpp b/test/cpp/api/special.cpp index d6dc24817e028..43c07a06c8f7a 100644 --- a/test/cpp/api/special.cpp +++ b/test/cpp/api/special.cpp @@ -9,7 +9,6 @@ // properly in C++ // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(SpecialTest, special) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = torch::randn(128, torch::kDouble); torch::special::gammaln(t); } diff --git a/test/cpp/api/static.cpp b/test/cpp/api/static.cpp index 7ebdba6d35bc3..9bf1105a8a62d 100644 --- a/test/cpp/api/static.cpp +++ b/test/cpp/api/static.cpp @@ -51,7 +51,6 @@ TEST(TestStatic, EnableIfModule) { struct A : torch::nn::Module { int forward() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 5; } }; @@ -64,7 +63,6 @@ struct B : torch::nn::Module { struct C : torch::nn::Module { float forward(torch::Tensor& tensor) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 5.0; } }; @@ -100,7 +98,6 @@ TEST(TestStatic, ReturnTypeOfForward) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TestStatic, Apply) { std::vector v; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::apply([&v](int x) { v.push_back(x); }, 1, 2, 3, 4, 5); ASSERT_EQ(v.size(), 5); for (size_t i = 0; i < v.size(); ++i) { diff --git a/test/cpp/api/tensor.cpp b/test/cpp/api/tensor.cpp index ec4ffea3c5928..a0c227e3c71d8 100644 --- a/test/cpp/api/tensor.cpp +++ b/test/cpp/api/tensor.cpp @@ -158,49 +158,41 @@ TEST(TensorTest, ToDoesNotCopyWhenOptionsAreAllTheSame) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, AtTensorCtorScalar) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = at::tensor(123); ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.dtype(), at::kInt); ASSERT_EQ(tensor[0].item(), 123); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor(123.456f); ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.dtype(), at::kFloat); ASSERT_TRUE(almost_equal(tensor[0], 123.456f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor(123.456); ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.dtype(), at::kDouble); ASSERT_TRUE(almost_equal(tensor[0], 123.456)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor(123, at::dtype(at::kFloat)) + 0.5; ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.dtype(), at::kFloat); ASSERT_TRUE(almost_equal(tensor[0], 123.5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor(c10::complex(1.0, 2.0)) + 0.5; ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.dtype(), at::kComplexFloat); ASSERT_TRUE(almost_equal(tensor[0], c10::complex(1.5, 2.0))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor(c10::complex(1.0, 2.0), at::dtype(at::kComplexFloat)) + 0.5; ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.dtype(), at::kComplexFloat); ASSERT_TRUE(almost_equal(tensor[0], c10::complex(1.5, 2.0))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor(c10::complex(1.0, 2.0)) + 0.5; ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.dtype(), at::kComplexDouble); ASSERT_TRUE(almost_equal(tensor[0], c10::complex(1.5, 2.0))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor(c10::complex(1.0, 2.0), at::dtype(at::kComplexDouble)) + 0.5; ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.dtype(), at::kComplexDouble); @@ -223,7 +215,6 @@ TEST(TensorTest, AtTensorCtorSingleDim) { ASSERT_TRUE(exactly_equal(tensor[1], 2)); ASSERT_TRUE(exactly_equal(tensor[2], 3)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor({1.5, 2.25, 3.125}); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), at::kDouble); @@ -231,7 +222,6 @@ TEST(TensorTest, AtTensorCtorSingleDim) { ASSERT_TRUE(almost_equal(tensor[1], 2.25)); ASSERT_TRUE(almost_equal(tensor[2], 3.125)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor({c10::complex(1.5, 0.15), c10::complex(1.5, 0.15), c10::complex(3.125, 0.3125)}); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), at::kComplexFloat); @@ -239,7 +229,6 @@ TEST(TensorTest, AtTensorCtorSingleDim) { ASSERT_TRUE(almost_equal(tensor[1], c10::complex(1.5, 0.15))); ASSERT_TRUE(almost_equal(tensor[2], c10::complex(3.125, 0.3125))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor({c10::complex(1.5, 0.15), c10::complex(1.5, 0.15), c10::complex(3.125, 0.3125)}); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), at::kComplexDouble); @@ -247,7 +236,6 @@ TEST(TensorTest, AtTensorCtorSingleDim) { ASSERT_TRUE(almost_equal(tensor[1], c10::complex(1.5, 0.15))); ASSERT_TRUE(almost_equal(tensor[2], c10::complex(3.125, 0.3125))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor({1.1, 2.2, 3.3}, at::dtype(at::kInt)); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), at::kInt); @@ -256,7 +244,6 @@ TEST(TensorTest, AtTensorCtorSingleDim) { ASSERT_TRUE(exactly_equal(tensor[1], 2)); ASSERT_TRUE(exactly_equal(tensor[2], 3)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor(std::vector({1.5, 2.25, 3.125})); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), at::kDouble); @@ -264,7 +251,6 @@ TEST(TensorTest, AtTensorCtorSingleDim) { ASSERT_TRUE(almost_equal(tensor[1], 2.25)); ASSERT_TRUE(almost_equal(tensor[2], 3.125)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor(std::vector>({c10::complex(1.5, 0.15), c10::complex(1.5, 0.15), c10::complex(3.125, 0.3125)})); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), at::kComplexFloat); @@ -272,7 +258,6 @@ TEST(TensorTest, AtTensorCtorSingleDim) { ASSERT_TRUE(almost_equal(tensor[1], c10::complex(1.5, 0.15))); ASSERT_TRUE(almost_equal(tensor[2], c10::complex(3.125, 0.3125))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor(std::vector>({c10::complex(1.5, 0.15), c10::complex(1.5, 0.15), c10::complex(3.125, 0.3125)})); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), at::kComplexDouble); @@ -280,7 +265,6 @@ TEST(TensorTest, AtTensorCtorSingleDim) { ASSERT_TRUE(almost_equal(tensor[1], c10::complex(1.5, 0.15))); ASSERT_TRUE(almost_equal(tensor[2], c10::complex(3.125, 0.3125))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; tensor = at::tensor(v); ASSERT_EQ(tensor.numel(), v.size()); @@ -289,7 +273,6 @@ TEST(TensorTest, AtTensorCtorSingleDim) { ASSERT_TRUE(exactly_equal(tensor[i], v.at(i))); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector w = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.0}; tensor = at::tensor(w); ASSERT_EQ(tensor.numel(), w.size()); @@ -299,9 +282,7 @@ TEST(TensorTest, AtTensorCtorSingleDim) { } std::vector> x = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1.1, -1.1}, {2.2, -2.2}, {3.3, -3.3}, {4.4, -4.4}, {5.5, -5.5}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6.6, -6.6}, {7.7, -7.7}, {8.8, -8.8}, {9.9, -9.9}, {10.0, -10.0} }; tensor = at::tensor(x); @@ -314,7 +295,6 @@ TEST(TensorTest, AtTensorCtorSingleDim) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, AtTensorCastRealToComplex) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = at::tensor(std::vector({1.5, 2.5, 3.5}), at::kComplexDouble); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), at::kComplexDouble); @@ -322,7 +302,6 @@ TEST(TensorTest, AtTensorCastRealToComplex) { ASSERT_TRUE(almost_equal(tensor[1], c10::complex(2.5))); ASSERT_TRUE(almost_equal(tensor[2], c10::complex(3.5))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor({1.5, 2.5, 3.5}, at::kComplexDouble); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), at::kComplexDouble); @@ -330,7 +309,6 @@ TEST(TensorTest, AtTensorCastRealToComplex) { ASSERT_TRUE(almost_equal(tensor[1], c10::complex(2.5))); ASSERT_TRUE(almost_equal(tensor[2], c10::complex(3.5))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = at::tensor(1.5, at::kComplexDouble); ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.dtype(), at::kComplexDouble); @@ -340,17 +318,14 @@ TEST(TensorTest, AtTensorCastRealToComplex) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, AtTensorCastComplexToRealErrorChecks) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ASSERT_THROWS_WITH(at::tensor(c10::complex(0.1, 0.2), at::kFloat), "\"tensor_cpu\" not implemented for 'Float'"); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ASSERT_THROWS_WITH(at::tensor({c10::complex(0.1, 0.2)}, at::kFloat), "\"tensor_cpu\" not implemented for 'Float'"); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ASSERT_THROWS_WITH(at::tensor(std::vector>{c10::complex(0.1, 0.2)}, at::kFloat), "\"tensor_cpu\" not implemented for 'Float'"); } @@ -358,7 +333,6 @@ TEST(TensorTest, AtTensorCastComplexToRealErrorChecks) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, TorchTensorCtorScalarIntegralType) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = torch::tensor(123); ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.sizes(), std::vector({})); @@ -369,21 +343,18 @@ TEST(TensorTest, TorchTensorCtorScalarIntegralType) { void test_TorchTensorCtorScalarFloatingType_expected_dtype(c10::ScalarType default_dtype) { AutoDefaultDtypeMode dtype_mode(default_dtype); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = torch::tensor(123.456f); ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.sizes(), std::vector({})); ASSERT_EQ(tensor.dtype(), default_dtype); ASSERT_TRUE(almost_equal(tensor, 123.456f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = torch::tensor(123.456); ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.sizes(), std::vector({})); ASSERT_EQ(tensor.dtype(), default_dtype); ASSERT_TRUE(almost_equal(tensor, 123.456)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = torch::tensor({123.456}); ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.sizes(), std::vector({1})); @@ -458,7 +429,6 @@ TEST(TensorTest, TorchTensorCtorSingleDimIntegralType) { void test_TorchTensorCtorSingleDimFloatingType_expected_dtype(c10::ScalarType default_dtype) { AutoDefaultDtypeMode dtype_mode(default_dtype); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = torch::tensor({1.5, 2.25, 3.125}); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.sizes(), std::vector({3})); @@ -467,7 +437,6 @@ void test_TorchTensorCtorSingleDimFloatingType_expected_dtype(c10::ScalarType de ASSERT_TRUE(almost_equal(tensor[1], 2.25)); ASSERT_TRUE(almost_equal(tensor[2], 3.125)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = torch::tensor({1.5f, 2.25f, 3.125f}); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.sizes(), std::vector({3})); @@ -476,7 +445,6 @@ void test_TorchTensorCtorSingleDimFloatingType_expected_dtype(c10::ScalarType de ASSERT_TRUE(almost_equal(tensor[1], 2.25f)); ASSERT_TRUE(almost_equal(tensor[2], 3.125f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = torch::tensor(at::ArrayRef({1.5f, 2.25f, 3.125f})); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), default_dtype); @@ -484,7 +452,6 @@ void test_TorchTensorCtorSingleDimFloatingType_expected_dtype(c10::ScalarType de ASSERT_TRUE(almost_equal(tensor[1], 2.25)); ASSERT_TRUE(almost_equal(tensor[2], 3.125)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = torch::tensor(std::vector({1.5f, 2.25f, 3.125f})); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.sizes(), std::vector({3})); @@ -493,7 +460,6 @@ void test_TorchTensorCtorSingleDimFloatingType_expected_dtype(c10::ScalarType de ASSERT_TRUE(almost_equal(tensor[1], 2.25)); ASSERT_TRUE(almost_equal(tensor[2], 3.125)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = torch::tensor(at::ArrayRef({1.5, 2.25, 3.125})); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), default_dtype); @@ -501,7 +467,6 @@ void test_TorchTensorCtorSingleDimFloatingType_expected_dtype(c10::ScalarType de ASSERT_TRUE(almost_equal(tensor[1], 2.25)); ASSERT_TRUE(almost_equal(tensor[2], 3.125)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = torch::tensor(std::vector({1.5, 2.25, 3.125})); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.sizes(), std::vector({3})); @@ -592,7 +557,6 @@ TEST(TensorTest, TorchTensorCtorMultiDimIntegralType) { void test_TorchTensorCtorMultiDimFloatingType_expected_dtype(c10::ScalarType default_dtype) { AutoDefaultDtypeMode dtype_mode(default_dtype); { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = torch::tensor({{1.0, 2.0}}); ASSERT_EQ(tensor.dtype(), default_dtype); ASSERT_EQ(tensor.sizes(), std::vector({1, 2})); @@ -600,7 +564,6 @@ void test_TorchTensorCtorMultiDimFloatingType_expected_dtype(c10::ScalarType def ASSERT_FALSE(tensor.requires_grad()); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = torch::tensor({{{{{{{{1.0, 2.0, 3.0}}}}}, {{{{{4.0, 5.0, 6.0}}}}}, {{{{{7.0, 8.0, 9.0}}}}}}}}); ASSERT_EQ(tensor.dtype(), default_dtype); ASSERT_EQ(tensor.sizes(), std::vector({1, 1, 3, 1, 1, 1, 1, 3})); @@ -664,12 +627,10 @@ TEST(TensorTest, TorchTensorCtorMultiDimErrorChecks) { "Expected all sub-lists to have sizes: 2 (e.g. {5, 6}), but got sub-list {7} with sizes: 1"); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ASSERT_THROWS_WITH(torch::tensor({{{1, 2.0}, {1, 2.0}}}), "Expected all elements of the tensor to have the same scalar type: Int, but got element of scalar type: Double"); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ASSERT_THROWS_WITH(torch::tensor({{{true, 2.0, 3}, {true, 2.0, 3}}}), "Expected all elements of the tensor to have the same scalar type: Bool, but got element of scalar type: Double"); } @@ -685,7 +646,6 @@ TEST(TensorTest, TorchTensorCtorMultiDimErrorChecks) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, TorchTensorCastRealToComplex) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = torch::tensor(std::vector({1.5, 2.5, 3.5}), torch::kComplexDouble); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), torch::kComplexDouble); @@ -693,7 +653,6 @@ TEST(TensorTest, TorchTensorCastRealToComplex) { ASSERT_TRUE(almost_equal(tensor[1], c10::complex(2.5))); ASSERT_TRUE(almost_equal(tensor[2], c10::complex(3.5))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = torch::tensor({1.5, 2.5, 3.5}, torch::kComplexDouble); ASSERT_EQ(tensor.numel(), 3); ASSERT_EQ(tensor.dtype(), torch::kComplexDouble); @@ -701,7 +660,6 @@ TEST(TensorTest, TorchTensorCastRealToComplex) { ASSERT_TRUE(almost_equal(tensor[1], c10::complex(2.5))); ASSERT_TRUE(almost_equal(tensor[2], c10::complex(3.5))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = torch::tensor(1.5, torch::kComplexDouble); ASSERT_EQ(tensor.numel(), 1); ASSERT_EQ(tensor.dtype(), torch::kComplexDouble); @@ -711,17 +669,14 @@ TEST(TensorTest, TorchTensorCastRealToComplex) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, TorchTensorCastComplexToRealErrorChecks) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ASSERT_THROWS_WITH(torch::tensor(c10::complex(0.1, 0.2), torch::kFloat), "value cannot be converted to type float without overflow"); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ASSERT_THROWS_WITH(torch::tensor({c10::complex(0.1, 0.2), c10::complex(0.3, 0.4)}, torch::kFloat), "value cannot be converted to type float without overflow"); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ASSERT_THROWS_WITH(torch::tensor(std::vector>{c10::complex(0.1, 0.2), c10::complex(0.3, 0.4)}, torch::kFloat), "can not do torch::tensor(complex, dtype=non-complex) because complex can not be casted to real number without loss of information"); } @@ -731,7 +686,6 @@ void test_TorchTensorCtorMultiDim_CUDA_expected_dtype(c10::ScalarType default_dt AutoDefaultDtypeMode dtype_mode(default_dtype); auto tensor = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{{{{{{1.0, 2.0, 3.0}}}}}, {{{{{4.0, 5.0, 6.0}}}}}, {{{{{7.0, 8.0, 9.0}}}}}}}}, torch::dtype(default_dtype).device(torch::kCUDA)); ASSERT_TRUE(tensor.device().is_cuda()); @@ -859,7 +813,6 @@ void test_Arange_expected_dtype(c10::ScalarType default_dtype) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, Arange) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0, 5); ASSERT_EQ(x.dtype(), torch::kLong); } @@ -920,31 +873,25 @@ TEST(TensorTest, PrettyPrintTensorDataContainer) { TEST(TensorTest, TensorDataContainerCallingAccessorOfWrongType) { { ASSERT_THROWS_WITH( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::detail::TensorDataContainer(1.1).init_list(), "Can only call `init_list()` on a TensorDataContainer that has `is_init_list() == true`"); ASSERT_THROWS_WITH( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::detail::TensorDataContainer(1.1).tensor(), "Can only call `tensor()` on a TensorDataContainer that has `is_tensor() == true`"); } { ASSERT_THROWS_WITH( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::detail::TensorDataContainer({1.1, 2.2}).scalar(), "Can only call `scalar()` on a TensorDataContainer that has `is_scalar() == true`"); ASSERT_THROWS_WITH( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::detail::TensorDataContainer({1.1, 2.2}).tensor(), "Can only call `tensor()` on a TensorDataContainer that has `is_tensor() == true`"); } { ASSERT_THROWS_WITH( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::detail::TensorDataContainer(at::ArrayRef({1.1, 2.2})).scalar(), "Can only call `scalar()` on a TensorDataContainer that has `is_scalar() == true`"); ASSERT_THROWS_WITH( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::detail::TensorDataContainer(at::ArrayRef({1.1, 2.2})).init_list(), "Can only call `init_list()` on a TensorDataContainer that has `is_init_list() == true`"); } @@ -952,7 +899,6 @@ TEST(TensorTest, TensorDataContainerCallingAccessorOfWrongType) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, FromBlob) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector v = {1.0, 2.0, 3.0}; auto tensor = torch::from_blob( v.data(), v.size(), torch::dtype(torch::kFloat64).requires_grad(true)); @@ -985,9 +931,7 @@ TEST(TensorTest, FromBlobWithStrides) { // clang-format off std::vector v = { 1, 2, 3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 4, 5, 6, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 7, 8, 9 }; // clang-format on @@ -1011,13 +955,11 @@ TEST(TensorTest, FromBlobWithStrides) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, Item) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor tensor = torch::tensor(3.14); torch::Scalar scalar = tensor.item(); ASSERT_NEAR(scalar.to(), 3.14, 1e-5); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor tensor = torch::tensor(123); torch::Scalar scalar = tensor.item(); ASSERT_EQ(scalar.to(), 123); @@ -1027,13 +969,11 @@ TEST(TensorTest, Item) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, Item_CUDA) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor tensor = torch::tensor(3.14, torch::kCUDA); torch::Scalar scalar = tensor.item(); ASSERT_NEAR(scalar.to(), 3.14, 1e-5); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor tensor = torch::tensor(123, torch::kCUDA); torch::Scalar scalar = tensor.item(); ASSERT_EQ(scalar.to(), 123); @@ -1056,7 +996,6 @@ TEST(TensorTest, Data) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, BackwardAndGrad) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({5}, torch::dtype(torch::kFloat).requires_grad(true)); auto y = x * x; y.backward(); @@ -1073,7 +1012,6 @@ TEST(TensorTest, BackwardCreatesOnesGrad) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, BackwardNonScalarOutputs) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5, 5}, torch::requires_grad()); auto y = x * x; ASSERT_THROWS_WITH(y.backward(), @@ -1082,7 +1020,6 @@ TEST(TensorTest, BackwardNonScalarOutputs) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, IsLeaf) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({5}, torch::dtype(torch::kFloat).requires_grad(true)); auto y = x * x; ASSERT_TRUE(x.is_leaf()); @@ -1091,7 +1028,6 @@ TEST(TensorTest, IsLeaf) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, OutputNr) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({5}, torch::dtype(torch::kFloat).requires_grad(true)); auto y = x * x; ASSERT_EQ(x.output_nr(), 0); @@ -1110,7 +1046,6 @@ TEST(TensorTest, Version) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, Detach) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({5}, torch::dtype(torch::kFloat).requires_grad(true)); auto y = x * x; const auto y_detached = y.detach(); @@ -1121,7 +1056,6 @@ TEST(TensorTest, Detach) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, DetachInplace) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({5}, torch::dtype(torch::kFloat).requires_grad(true)); auto y = x * x; auto y_detached = y.detach_(); @@ -1133,9 +1067,7 @@ TEST(TensorTest, DetachInplace) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, SetData) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = torch::randn({5}); ASSERT_FALSE(torch::equal(x, y)); ASSERT_NE(x.data_ptr(), y.data_ptr()); @@ -1147,7 +1079,6 @@ TEST(TensorTest, SetData) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorTest, RequiresGradInplace) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor({5.0}); x.requires_grad_(true); ASSERT_TRUE(x.requires_grad()); diff --git a/test/cpp/api/tensor_indexing.cpp b/test/cpp/api/tensor_indexing.cpp index 912617495c14d..95cc5140da7d2 100644 --- a/test/cpp/api/tensor_indexing.cpp +++ b/test/cpp/api/tensor_indexing.cpp @@ -73,9 +73,7 @@ TEST(TensorIndexingTest, TensorIndex) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestNoIndices) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor tensor = torch::randn({20, 20}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor value = torch::randn({20, 20}); std::vector indices; @@ -91,33 +89,23 @@ TEST(TensorIndexingTest, TestNoIndices) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestAdvancedIndexingWithListOfTensor) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor tensor = torch::randn({20, 20}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor index = torch::arange(10, torch::kLong).cpu(); torch::Tensor result = at::index(tensor, {index}); torch::Tensor result_with_init_list = tensor.index({index}); ASSERT_TRUE(result.equal(result_with_init_list)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor tensor = torch::randn({20, 20}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor index = torch::arange(10, torch::kLong).cpu(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor result = at::index_put_(tensor, {index}, torch::ones({20})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor result_with_init_list = tensor.index_put_({index}, torch::ones({20})); ASSERT_TRUE(result.equal(result_with_init_list)); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor tensor = torch::randn({20, 20}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor index = torch::arange(10, torch::kLong).cpu(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor result = at::index_put_(tensor, {index}, torch::ones({1, 20})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor result_with_init_list = tensor.index_put_({index}, torch::ones({1, 20})); ASSERT_TRUE(result.equal(result_with_init_list)); } @@ -125,14 +113,12 @@ TEST(TensorIndexingTest, TestAdvancedIndexingWithListOfTensor) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestSingleInt) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::randn({5, 7, 3}); ASSERT_EQ(v.index({4}).sizes(), torch::IntArrayRef({7, 3})); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestMultipleInt) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::randn({5, 7, 3}); ASSERT_EQ(v.index({4}).sizes(), torch::IntArrayRef({7, 3})); ASSERT_EQ(v.index({4, Slice(), 1}).sizes(), torch::IntArrayRef({7})); @@ -144,7 +130,6 @@ TEST(TensorIndexingTest, TestMultipleInt) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestNone) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::randn({5, 7, 3}); ASSERT_EQ(v.index({None}).sizes(), torch::IntArrayRef({1, 5, 7, 3})); ASSERT_EQ(v.index({Slice(), None}).sizes(), torch::IntArrayRef({5, 1, 7, 3})); @@ -154,25 +139,18 @@ TEST(TensorIndexingTest, TestNone) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestStep) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::arange(10); assert_tensor_equal(v.index({Slice(None, None, 1)}), v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(v.index({Slice(None, None, 2)}), torch::tensor({0, 2, 4, 6, 8})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(v.index({Slice(None, None, 3)}), torch::tensor({0, 3, 6, 9})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(v.index({Slice(None, None, 11)}), torch::tensor({0})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(v.index({Slice(1, 6, 2)}), torch::tensor({1, 3, 5})); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestStepAssignment) { auto v = torch::zeros({4, 4}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) v.index_put_({0, Slice(1, None, 2)}, torch::tensor({3., 4.})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(v.index({0}), torch::tensor({0., 3., 0., 4.})); assert_tensor_equal(v.index({Slice(1, None)}).sum(), torch::tensor(0)); } @@ -180,7 +158,6 @@ TEST(TensorIndexingTest, TestStepAssignment) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestBoolIndices) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::randn({5, 7, 3}); auto boolIndices = torch::tensor({true, false, true, true, false}, torch::kBool); ASSERT_EQ(v.index({boolIndices}).sizes(), torch::IntArrayRef({3, 7, 3})); @@ -205,19 +182,15 @@ TEST(TensorIndexingTest, TestBoolIndices) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestBoolIndicesAccumulate) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto mask = torch::zeros({10}, torch::kBool); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = torch::ones({10, 10}); y.index_put_({mask}, {y.index({mask})}, /*accumulate=*/true); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(y, torch::ones({10, 10})); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestMultipleBoolIndices) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::randn({5, 7, 3}); // note: these broadcast together and are transposed to the first dim auto mask1 = torch::tensor({1, 0, 1, 1, 0}, torch::kBool); @@ -228,7 +201,6 @@ TEST(TensorIndexingTest, TestMultipleBoolIndices) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestByteMask) { { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::randn({5, 7, 3}); auto mask = torch::tensor({1, 0, 1, 1, 0}, torch::kByte); { @@ -248,15 +220,12 @@ TEST(TensorIndexingTest, TestByteMask) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestByteMaskAccumulate) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto mask = torch::zeros({10}, torch::kUInt8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = torch::ones({10, 10}); { WarningCapture warnings; y.index_put_({mask}, y.index({mask}), /*accumulate=*/true); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(y, torch::ones({10, 10})); ASSERT_EQ(count_substr_occurrences(warnings.str(), "indexing with dtype torch.uint8 is now deprecated"), 2); @@ -265,7 +234,6 @@ TEST(TensorIndexingTest, TestByteMaskAccumulate) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestMultipleByteMask) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::randn({5, 7, 3}); // note: these broadcast together and are transposed to the first dim auto mask1 = torch::tensor({1, 0, 1, 1, 0}, torch::kByte); @@ -281,9 +249,7 @@ TEST(TensorIndexingTest, TestMultipleByteMask) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestByteMask2d) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::randn({5, 7, 3}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = torch::randn({5, 7}); int64_t num_ones = (c > 0).sum().item().to(); auto r = v.index({c > 0}); @@ -292,7 +258,6 @@ TEST(TensorIndexingTest, TestByteMask2d) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestIntIndices) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::randn({5, 7, 3}); ASSERT_EQ(v.index({torch::tensor({0, 4, 2})}).sizes(), torch::IntArrayRef({3, 7, 3})); ASSERT_EQ(v.index({Slice(), torch::tensor({0, 4, 2})}).sizes(), torch::IntArrayRef({5, 3, 3})); @@ -303,29 +268,24 @@ TEST(TensorIndexingTest, TestIntIndices) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestIntIndices2d) { // From the NumPy indexing example - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0, 12, torch::kLong).view({4, 3}); auto rows = torch::tensor({{0, 0}, {3, 3}}); auto columns = torch::tensor({{0, 2}, {0, 2}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(x.index({rows, columns}), torch::tensor({{0, 2}, {9, 11}})); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestIntIndicesBroadcast) { // From the NumPy indexing example - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0, 12, torch::kLong).view({4, 3}); auto rows = torch::tensor({0, 3}); auto columns = torch::tensor({0, 2}); auto result = x.index({rows.index({Slice(), None}), columns}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(result, torch::tensor({{0, 2}, {9, 11}})); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestEmptyIndex) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0, 12).view({4, 3}); auto idx = torch::tensor({}, torch::kLong); ASSERT_EQ(x.index({idx}).numel(), 0); @@ -344,23 +304,18 @@ TEST(TensorIndexingTest, TestEmptyIndex) { TEST(TensorIndexingTest, TestEmptyNdimIndex) { torch::Device device(torch::kCPU); { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5}, device); assert_tensor_equal( torch::empty({0, 2}, device), x.index({torch::empty({0, 2}, torch::TensorOptions(torch::kInt64).device(device))})); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({2, 3, 4, 5}, device); assert_tensor_equal( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::empty({2, 0, 6, 4, 5}, device), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.index({Slice(), torch::empty({0, 6}, torch::TensorOptions(torch::kInt64).device(device))})); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::empty({10, 0}); ASSERT_EQ(x.index({torch::tensor({1, 2})}).sizes(), torch::IntArrayRef({2, 0})); ASSERT_EQ(x.index({torch::tensor({}, torch::kLong), torch::tensor({}, torch::kLong)}).sizes(), torch::IntArrayRef({0})); @@ -372,19 +327,15 @@ TEST(TensorIndexingTest, TestEmptyNdimIndex) { TEST(TensorIndexingTest, TestEmptyNdimIndex_CUDA) { torch::Device device(torch::kCUDA); { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5}, device); assert_tensor_equal( torch::empty({0, 2}, device), x.index({torch::empty({0, 2}, torch::TensorOptions(torch::kInt64).device(device))})); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({2, 3, 4, 5}, device); assert_tensor_equal( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::empty({2, 0, 6, 4, 5}, device), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.index({Slice(), torch::empty({0, 6}, torch::TensorOptions(torch::kInt64).device(device))})); } } @@ -392,7 +343,6 @@ TEST(TensorIndexingTest, TestEmptyNdimIndex_CUDA) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestEmptyNdimIndexBool) { torch::Device device(torch::kCPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5}, device); // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) ASSERT_THROW(x.index({torch::empty({0, 2}, torch::TensorOptions(torch::kUInt8).device(device))}), c10::Error); @@ -401,7 +351,6 @@ TEST(TensorIndexingTest, TestEmptyNdimIndexBool) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestEmptyNdimIndexBool_CUDA) { torch::Device device(torch::kCUDA); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({5}, device); // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) ASSERT_THROW(x.index({torch::empty({0, 2}, torch::TensorOptions(torch::kUInt8).device(device))}), c10::Error); @@ -410,7 +359,6 @@ TEST(TensorIndexingTest, TestEmptyNdimIndexBool_CUDA) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestEmptySlice) { torch::Device device(torch::kCPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({2, 3, 4, 5}, device); auto y = x.index({Slice(), Slice(), Slice(), 1}); auto z = y.index({Slice(), Slice(1, 1), Slice()}); @@ -423,7 +371,6 @@ TEST(TensorIndexingTest, TestEmptySlice) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestEmptySlice_CUDA) { torch::Device device(torch::kCUDA); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::randn({2, 3, 4, 5}, device); auto y = x.index({Slice(), Slice(), Slice(), 1}); auto z = y.index({Slice(), Slice(1, 1), Slice()}); @@ -472,12 +419,10 @@ TEST(TensorIndexingTest, TestIndexSetitemBoolsSlices) { auto neg_ones_expanded = neg_ones.unsqueeze(0).unsqueeze(0); a.index_put_({true}, neg_ones_expanded); assert_tensor_equal(a, neg_ones); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a.index_put_({false}, 5); assert_tensor_equal(a, neg_ones); a.index_put_({true_tensor}, neg_ones_expanded * 2); assert_tensor_equal(a, neg_ones * 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a.index_put_({false_tensor}, 5); assert_tensor_equal(a, neg_ones * 2); a.index_put_({None}, neg_ones_expanded * 3); @@ -526,7 +471,6 @@ TEST(TensorIndexingTest, TestSetitemExpansionError) { auto true_tensor = torch::tensor(true); auto a = torch::randn({2, 3}); // check prefix with non-1s doesn't work - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector tensor_sizes{5, 1}; tensor_sizes.insert( tensor_sizes.end(), @@ -579,7 +523,6 @@ TEST(TensorIndexingTest, TestSetitemScalars) { a_set_with_number.index_put_({0}, b); a_set_with_scalar.index_put_({zero}, b); assert_tensor_equal(a_set_with_number, a_set_with_scalar); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a.index_put_({1, zero}, 7.7); ASSERT_TRUE(a.index({1, 0}).allclose(torch::tensor(7.7))); @@ -589,7 +532,6 @@ TEST(TensorIndexingTest, TestSetitemScalars) { ASSERT_THROW(r.index_put_({Slice()}, 8.8), c10::Error); // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-avoid-goto,hicpp-avoid-goto) ASSERT_THROW(r.index_put_({zero}, 8.8), c10::Error); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) r.index_put_({"..."}, 9.9); ASSERT_TRUE(r.allclose(torch::tensor(9.9))); } @@ -597,10 +539,8 @@ TEST(TensorIndexingTest, TestSetitemScalars) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestBasicAdvancedCombined) { // From the NumPy indexing example - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0, 12).to(torch::kLong).view({4, 3}); assert_tensor_equal(x.index({Slice(1, 2), Slice(1, 3)}), x.index({Slice(1, 2), torch::tensor({1, 2})})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(x.index({Slice(1, 2), Slice(1, 3)}), torch::tensor({{4, 5}})); // Check that it is a copy @@ -622,27 +562,21 @@ TEST(TensorIndexingTest, TestBasicAdvancedCombined) { TEST(TensorIndexingTest, TestIntAssignment) { { auto x = torch::arange(0, 4).to(torch::kLong).view({2, 2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.index_put_({1}, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(x, torch::tensor({{0, 1}, {5, 5}})); } { auto x = torch::arange(0, 4).to(torch::kLong).view({2, 2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x.index_put_({1}, torch::arange(5, 7).to(torch::kLong)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(x, torch::tensor({{0, 1}, {5, 6}})); } } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestByteTensorAssignment) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0., 16).to(torch::kFloat).view({4, 4}); auto b = torch::tensor({true, false, true, false}, torch::kByte); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto value = torch::tensor({3., 4., 5., 6.}); { @@ -654,16 +588,13 @@ TEST(TensorIndexingTest, TestByteTensorAssignment) { } assert_tensor_equal(x.index({0}), value); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(x.index({1}), torch::arange(4, 8).to(torch::kLong)); assert_tensor_equal(x.index({2}), value); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(x.index({3}), torch::arange(12, 16).to(torch::kLong)); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestVariableSlicing) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0, 16).view({4, 4}); auto indices = torch::tensor({0, 1}, torch::kInt); int i = indices[0].item(); @@ -673,22 +604,17 @@ TEST(TensorIndexingTest, TestVariableSlicing) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestEllipsisTensor) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0, 9).to(torch::kLong).view({3, 3}); auto idx = torch::tensor({0, 2}); assert_tensor_equal(x.index({"...", idx}), torch::tensor({{0, 2}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {3, 5}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6, 8}})); assert_tensor_equal(x.index({idx, "..."}), torch::tensor({{0, 1, 2}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {6, 7, 8}})); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestOutOfBoundIndex) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::arange(0, 100).view({2, 5, 10}); ASSERT_THROWS_WITH(x.index({0, 5}), "index 5 is out of bounds for dimension 1 with size 5"); ASSERT_THROWS_WITH(x.index({4, 5}), "index 4 is out of bounds for dimension 0 with size 2"); @@ -698,7 +624,6 @@ TEST(TensorIndexingTest, TestOutOfBoundIndex) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorIndexingTest, TestZeroDimIndex) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = torch::tensor(10); auto runner = [&]() -> torch::Tensor { @@ -767,9 +692,7 @@ TEST(NumpyTests, TestEmptyFancyIndex) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NumpyTests, TestEllipsisIndex) { auto a = torch::tensor({{1, 2, 3}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4, 5, 6}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {7, 8, 9}}); ASSERT_FALSE(a.index({"..."}).is_same(a)); assert_tensor_equal(a.index({"..."}), a); @@ -796,13 +719,10 @@ TEST(NumpyTests, TestEllipsisIndex) { TEST(NumpyTests, TestSingleIntIndex) { // Single integer index selects one row auto a = torch::tensor({{1, 2, 3}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4, 5, 6}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {7, 8, 9}}); assert_tensor_equal(a.index({0}), torch::tensor({1, 2, 3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(a.index({-1}), torch::tensor({7, 8, 9})); // Index out of bounds produces IndexError @@ -819,9 +739,7 @@ TEST(NumpyTests, TestSingleIntIndex) { TEST(NumpyTests, TestSingleBoolIndex) { // Single boolean index auto a = torch::tensor({{1, 2, 3}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4, 5, 6}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {7, 8, 9}}); assert_tensor_equal(a.index({true}), a.index({None})); @@ -830,7 +748,6 @@ TEST(NumpyTests, TestSingleBoolIndex) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NumpyTests, TestBooleanShapeMismatch) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto arr = torch::ones({5, 4, 3}); auto index = torch::tensor({true}); @@ -882,25 +799,19 @@ TEST(NumpyTests, TestBooleanIndexingTwodim) { // Indexing a 2-dimensional array with // 2-dimensional boolean array auto a = torch::tensor({{1, 2, 3}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4, 5, 6}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {7, 8, 9}}); auto b = torch::tensor({{true, false, true}, {false, true, false}, {true, false, true}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(a.index({b}), torch::tensor({1, 3, 5, 7, 9})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(a.index({b.index({1})}), torch::tensor({{4, 5, 6}})); assert_tensor_equal(a.index({b.index({0})}), a.index({b.index({2})})); // boolean assignment a.index_put_({b}, 0); assert_tensor_equal(a, torch::tensor({{0, 2, 0}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4, 0, 6}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0, 8, 0}})); } @@ -939,24 +850,19 @@ TEST(NumpyTests, TestBooleanListIndexing) { // Indexing a 2-dimensional array with // boolean lists auto a = torch::tensor({{1, 2, 3}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {4, 5, 6}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {7, 8, 9}}); auto b = torch::tensor({true, false, false}); auto c = torch::tensor({true, true, false}); assert_tensor_equal(a.index({b}), torch::tensor({{1, 2, 3}})); assert_tensor_equal(a.index({b, b}), torch::tensor({1})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(a.index({c}), torch::tensor({{1, 2, 3}, {4, 5, 6}})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assert_tensor_equal(a.index({c, c}), torch::tensor({1, 5})); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NumpyTests, TestEverythingReturnsViews) { // Before `...` would return a itself. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = torch::tensor({5}); ASSERT_FALSE(a.is_same(a.index({"..."}))); @@ -965,7 +871,6 @@ TEST(NumpyTests, TestEverythingReturnsViews) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NumpyTests, TestBroaderrorsIndexing) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = torch::zeros({5, 5}); // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) ASSERT_THROW(a.index({torch::tensor({0, 1}), torch::tensor({0, 1, 2})}), c10::Error); @@ -975,19 +880,14 @@ TEST(NumpyTests, TestBroaderrorsIndexing) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NumpyTests, TestTrivialFancyOutOfBounds) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = torch::zeros({5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto ind = torch::ones({20}, torch::kInt64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ind.index_put_({-1}, 10); // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) ASSERT_THROW(a.index({ind}), c10::Error); // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) ASSERT_THROW(a.index_put_({ind}, 0), c10::Error); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ind = torch::ones({20}, torch::kInt64); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ind.index_put_({0}, 11); // NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto) ASSERT_THROW(a.index({ind}), c10::Error); @@ -998,9 +898,7 @@ TEST(NumpyTests, TestTrivialFancyOutOfBounds) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NumpyTests, TestIndexIsLarger) { // Simple case of fancy index broadcasting of the index. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = torch::zeros({5, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a.index_put_({torch::tensor({{0}, {1}, {2}}), torch::tensor({0, 1, 2})}, torch::tensor({2., 3., 4.})); ASSERT_TRUE((a.index({Slice(None, 3), Slice(None, 3)}) == torch::tensor({2., 3., 4.})).all().item()); @@ -1008,14 +906,10 @@ TEST(NumpyTests, TestIndexIsLarger) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(NumpyTests, TestBroadcastSubspace) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = torch::zeros({100, 100}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto v = torch::arange(0., 100).index({Slice(), None}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = torch::arange(99, -1, -1).to(torch::kLong); a.index_put_({b}, v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto expected = b.to(torch::kDouble).unsqueeze(1).expand({100, 100}); assert_tensor_equal(a, expected); } diff --git a/test/cpp/api/tensor_options.cpp b/test/cpp/api/tensor_options.cpp index a3d746a304f74..2c3b6e4aa0ee5 100644 --- a/test/cpp/api/tensor_options.cpp +++ b/test/cpp/api/tensor_options.cpp @@ -70,23 +70,19 @@ TEST(TensorOptionsTest, ConstructsWellFromCPUTypes) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorOptionsTest, ConstructsWellFromCPUTensors) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto options = empty(5, kDouble).options(); REQUIRE_OPTIONS(kCPU, -1, kDouble, kStrided); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) options = empty(5, getDeprecatedTypeProperties(Backend::SparseCPU, kByte)).options(); REQUIRE_OPTIONS(kCPU, -1, kByte, kSparse); } // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorOptionsTest, ConstructsWellFromVariables) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto options = torch::empty(5).options(); REQUIRE_OPTIONS(kCPU, -1, kFloat, kStrided); ASSERT_FALSE(options.requires_grad()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) options = torch::empty(5, at::requires_grad()).options(); REQUIRE_OPTIONS(kCPU, -1, kFloat, kStrided); ASSERT_FALSE(options.requires_grad()); @@ -156,18 +152,15 @@ TEST(DefaultDtypeTest, NewTensorsHaveCorrectDefaultDtype) { AutoDefaultDtypeMode dtype_mode(kFloat); set_default_dtype(caffe2::TypeMeta::Make()); { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = torch::ones(5); ASSERT_EQ(tensor.dtype(), kInt); } set_default_dtype(caffe2::TypeMeta::Make()); { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = torch::ones(5); ASSERT_EQ(tensor.dtype(), kDouble); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto tensor = torch::ones(5, kFloat); ASSERT_EQ(tensor.dtype(), kFloat); } diff --git a/test/cpp/api/tensor_options_cuda.cpp b/test/cpp/api/tensor_options_cuda.cpp index 831f2b7a014ad..2806268cd8e35 100644 --- a/test/cpp/api/tensor_options_cuda.cpp +++ b/test/cpp/api/tensor_options_cuda.cpp @@ -57,11 +57,9 @@ TEST(TensorOptionsTest, ConstructsWellFromCUDATypes_CUDA) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(TensorOptionsTest, ConstructsWellFromCUDATensors_MultiCUDA) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto options = empty(5, device(kCUDA).dtype(kDouble)).options(); REQUIRE_OPTIONS(kCUDA, 0, kDouble, kStrided); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) options = empty(5, getDeprecatedTypeProperties(Backend::SparseCUDA, kByte)).options(); REQUIRE_OPTIONS(kCUDA, 0, kByte, kSparse); @@ -69,7 +67,6 @@ TEST(TensorOptionsTest, ConstructsWellFromCUDATensors_MultiCUDA) { Tensor tensor; { DeviceGuard guard(CUDADevice(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = empty(5, device(kCUDA)); } options = tensor.options(); @@ -77,7 +74,6 @@ TEST(TensorOptionsTest, ConstructsWellFromCUDATensors_MultiCUDA) { { DeviceGuard guard(CUDADevice(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tensor = empty(5, device(kCUDA).layout(kSparse)); } options = tensor.options(); diff --git a/test/cpp/api/transformer.cpp b/test/cpp/api/transformer.cpp index c65cc783c8393..037a8711e178c 100644 --- a/test/cpp/api/transformer.cpp +++ b/test/cpp/api/transformer.cpp @@ -23,7 +23,6 @@ template T_LAYER get_a_test_layer(const torch::TensorOptions& tensor_options) { int64_t d_model = 4; int64_t nhead = 2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t dim_feedforward = 16; double dropout = 0.0; @@ -48,10 +47,8 @@ void transformer_encoder_layer_test_helper(bool is_cuda) { get_a_test_layer(tensor_options); // relu test case 1 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor encoder_input = torch::tensor({{{20, 30, 40, 50}}}, tensor_options); torch::Tensor result = model(encoder_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor ref_output = torch::tensor({{{2.258703, 0.127985, -0.697881, 0.170862}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); @@ -68,13 +65,10 @@ void transformer_encoder_layer_test_helper(bool is_cuda) { ASSERT_TRUE(torch::isnan(result).all().item().to()); // relu test case 2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) encoder_input = torch::tensor({{{1, 2, 3, 4}}, {{5, 6, 7, 8}}}, tensor_options); result = model(encoder_input).detach(); ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.272644, 0.119035, -0.691669, 0.153486}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.272644, 0.119035, -0.691669, 0.153486}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); @@ -89,42 +83,29 @@ void transformer_encoder_layer_test_helper(bool is_cuda) { mask = torch::tensor({{1, 0}}, tensor_options) == 1; result = model(encoder_input, /*src_mask=*/torch::Tensor{}, /*src_key_padding_mask=*/mask).detach(); ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.301516, 0.092249, -0.679101, 0.103088}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.301516, 0.092249, -0.679101, 0.103088}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); // relu test case 3 encoder_input = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.7462, 0.6653, 0.5679, 0.4891}, {0.5387, 0.1655, 0.3565, 0.0471}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8335, 0.2799, 0.5031, 0.2947}, {0.1402, 0.0318, 0.7636, 0.1346}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.6333, 0.9344, 0.1376, 0.9938}, {0.8924, 0.2872, 0.6692, 0.2944}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.9897, 0.6915, 0.3154, 0.1733}, {0.8645, 0.3513, 0.3064, 0.0767}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8117, 0.2366, 0.4838, 0.7881}, {0.3718, 0.4945, 0.9511, 0.0864}}}, tensor_options); result = model(encoder_input).detach(); ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428589, 0.020835, -0.602055, -0.085249}, {2.427987, 0.021213, -0.602496, -0.084103}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.424689, 0.019155, -0.604793, -0.085672}, {2.413863, 0.022211, -0.612486, -0.072490}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.433774, 0.021598, -0.598343, -0.087548}, {2.425104, 0.019748, -0.604515, -0.084839}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.436185, 0.022682, -0.596625, -0.087261}, {2.433556, 0.021891, -0.598509, -0.086832}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.416246, 0.017512, -0.610712, -0.082961}, {2.422901, 0.024187, -0.606178, -0.074929}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); // all 0 values are NOT masked - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mask = torch::zeros({2, 5}, tensor_options) == 1; result = model(encoder_input, /*src_mask=*/torch::Tensor{}, /*src_key_padding_mask=*/mask).detach(); ASSERT_EQ(result.sizes(), ref_output.sizes()); @@ -136,52 +117,35 @@ void transformer_encoder_layer_test_helper(bool is_cuda) { mask[1][4] = 1; result = model(encoder_input, /*src_mask=*/torch::Tensor{}, /*src_key_padding_mask=*/mask).detach(); ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.429026, 0.020793, -0.601741, -0.085642}, {2.428811, 0.021445, -0.601912, -0.084252}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.425009, 0.019155, -0.604566, -0.085899}, {2.415408, 0.02249 , -0.611415, -0.073}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.434199, 0.021682, -0.598039, -0.087699}, {2.42598, 0.019941, -0.603896, -0.085091}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.436457, 0.022736, -0.59643 , -0.08736}, {2.434021, 0.022093, -0.598179, -0.08679}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.416531, 0.017498, -0.610513, -0.083181}, {2.4242, 0.024653, -0.605266, -0.074959}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); // gelu test case 1 model.get()->options.activation(torch::kGELU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) encoder_input = torch::tensor({{{20, 30, 40, 50}}}, tensor_options); result = model(encoder_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.249815, 0.131006, -0.702199, 0.177868}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); // gelu test case 2 encoder_input = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.7462, 0.6653, 0.5679, 0.4891}, {0.5387, 0.1655, 0.3565, 0.0471}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8335, 0.2799, 0.5031, 0.2947}, {0.1402, 0.0318, 0.7636, 0.1346}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.6333, 0.9344, 0.1376, 0.9938}, {0.8924, 0.2872, 0.6692, 0.2944}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.9897, 0.6915, 0.3154, 0.1733}, {0.8645, 0.3513, 0.3064, 0.0767}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8117, 0.2366, 0.4838, 0.7881}, {0.3718, 0.4945, 0.9511, 0.0864}}}, tensor_options); result = model(encoder_input); ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.42163188, 0.03227153, -0.60714219, -0.05908082}, {2.42151276, 0.03302179, -0.60722523, -0.05762651}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.41926761, 0.02974034, -0.60879519, -0.0621269}, {2.41626395, 0.03539356, -0.61087842, -0.04978623}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.42382808, 0.03218872, -0.6055963, -0.06073591}, {2.41983477, 0.03085259, -0.60840145, -0.06046414}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.42500749, 0.03328855, -0.60476388, -0.0595334}, {2.4237977, 0.03290575, -0.60561789, -0.05940082}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.41383916, 0.02686345, -0.61256377, -0.06380707}, {2.42000277, 0.03800944, -0.60824798, -0.04754947}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); @@ -209,15 +173,12 @@ void transformer_decoder_layer_test_helper(bool is_cuda){ TransformerDecoderLayerOptions>(tensor_options); // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor decoder_input = torch::tensor({{{20, 30, 40, 50}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor memory_input = torch::tensor({{{60, 70, 80, 90}}}, tensor_options); torch::Tensor result = model(decoder_input, memory_input).detach(); torch::Tensor ref_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{2.314351, 0.094805, -0.671322, 0.101977}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -225,15 +186,11 @@ void transformer_decoder_layer_test_helper(bool is_cuda){ /*equal_nan=*/true)); // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{9, 10, 11, 12}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11, 12, 13, 14}}}, tensor_options); memory_input = torch::tensor({{{1, 2, 3, 4}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.422245, 0.051716, -0.606338, -0.024756}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.422245, 0.051716, -0.606338, -0.024756}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -242,16 +199,11 @@ void transformer_decoder_layer_test_helper(bool is_cuda){ // deterministic input decoder_input = torch::tensor({{{1, 2, 3, 4}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{5, 6, 7, 8}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{9, 10, 11, 12}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11, 12, 13, 14}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.343536, 0.085561, -0.654954, 0.074991}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.343536, 0.085561, -0.654954, 0.074991}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -260,52 +212,30 @@ void transformer_decoder_layer_test_helper(bool is_cuda){ // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{0.4517, 0.6793, 0.5313, 0.0034}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.2678, 0.3677, 0.4459, 0.7166}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8100, 0.3716, 0.4096, 0.1976}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.6958, 0.8844, 0.6081, 0.8315}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.0494, 0.9343, 0.5955, 0.3830}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5404, 0.3464, 0.9378, 0.6200}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{0.7462, 0.6653, 0.5679, 0.4891}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5387, 0.1655, 0.3565, 0.0471}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8335, 0.2799, 0.5031, 0.2947}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.1402, 0.0318, 0.7636, 0.1346}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.6333, 0.9344, 0.1376, 0.9938}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8924, 0.2872, 0.6692, 0.2944}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.9897, 0.6915, 0.3154, 0.1733}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8645, 0.3513, 0.3064, 0.0767}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8117, 0.2366, 0.4838, 0.7881}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.3718, 0.4945, 0.9511, 0.0864}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.430065, 0.027862, -0.601136, -0.073096}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431935, 0.028907, -0.599809, -0.072488}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428457, 0.027053, -0.602275, -0.073462}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431970, 0.029387, -0.599789, -0.071621}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.431934, 0.028196, -0.599802, -0.073809}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432306, 0.028858, -0.599542, -0.072846}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -318,17 +248,11 @@ void transformer_decoder_layer_test_helper(bool is_cuda){ torch::Tensor key_padding_mask = torch::zeros({2, 3}, tensor_options) == 1; result = model(decoder_input, memory_input, t_mask, m_mask, key_padding_mask).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.430065, 0.027862, -0.601136, -0.073096}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431935, 0.028907, -0.599809, -0.072488}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428457, 0.027053, -0.602275, -0.073462}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431970, 0.029387, -0.599789, -0.071621}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.431934, 0.028196, -0.599802, -0.073809}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432306, 0.028858, -0.599542, -0.072846}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -341,17 +265,11 @@ void transformer_decoder_layer_test_helper(bool is_cuda){ key_padding_mask[1][2] = 1; result = model(decoder_input, memory_input, t_mask, m_mask, key_padding_mask).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.430025, 0.027643, -0.601164, -0.073476}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.4323, 0.029375, -0.599553, -0.071881}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428523, 0.026838, -0.602226, -0.07391}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432634, 0.029842, -0.599318, -0.071253}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.432278, 0.028152, -0.599555, -0.074139}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432659, 0.029244, -0.599294, -0.072382}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -360,21 +278,14 @@ void transformer_decoder_layer_test_helper(bool is_cuda){ // memory_key_padding_mask torch::Tensor t_key_padding_mask = {}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) key_padding_mask = torch::zeros({2, 5}, tensor_options) == 1; result = model(decoder_input, memory_input, t_mask, m_mask, t_key_padding_mask, key_padding_mask).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.430065, 0.027862, -0.601136, -0.073096}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431935, 0.028907, -0.599809, -0.072488}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428457, 0.027053, -0.602275, -0.073462}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431970, 0.029387, -0.599789, -0.071621}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.431934, 0.028196, -0.599802, -0.073809}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432306, 0.028858, -0.599542, -0.072846}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -387,17 +298,11 @@ void transformer_decoder_layer_test_helper(bool is_cuda){ key_padding_mask[1][4] = 1; result = model(decoder_input, memory_input, t_mask, m_mask, t_key_padding_mask, key_padding_mask).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.429757, 0.027358, -0.601351, -0.073816}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432692, 0.028583, -0.599263, -0.073634}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428247, 0.02662, -0.602419, -0.074123}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432657, 0.029055, -0.599293, -0.072732}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.431515, 0.027687, -0.600096, -0.074459}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.433075, 0.028543, -0.598987, -0.073985}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -428,15 +333,12 @@ void transformer_decoder_layer_test_helper_gelu(bool is_cuda) { model.get()->options.activation(torch::kGELU); // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor decoder_input = torch::tensor({{{20, 30, 40, 50}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor memory_input = torch::tensor({{{60, 70, 80, 90}}}, tensor_options); torch::Tensor result = model(decoder_input, memory_input).detach(); torch::Tensor ref_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{2.306435, 0.095946, -0.675796, 0.10687}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -444,16 +346,12 @@ void transformer_decoder_layer_test_helper_gelu(bool is_cuda) { /*equal_nan=*/true)); // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{9, 10, 11, 12}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11, 12, 13, 14}}}, tensor_options); memory_input = torch::tensor({{{1, 2, 3, 4}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.415448, 0.054389, -0.610932, -0.0156613}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.415448, 0.054389, -0.610932, -0.0156613}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -462,18 +360,13 @@ void transformer_decoder_layer_test_helper_gelu(bool is_cuda) { // deterministic input decoder_input = torch::tensor({{{1, 2, 3, 4}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{5, 6, 7, 8}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{9, 10, 11, 12}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11, 12, 13, 14}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.338531, 0.087709, -0.65776, 0.080646}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.338531, 0.087709, -0.65776, 0.080646}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -482,53 +375,31 @@ void transformer_decoder_layer_test_helper_gelu(bool is_cuda) { // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{0.4517, 0.6793, 0.5313, 0.0034}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.2678, 0.3677, 0.4459, 0.7166}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8100, 0.3716, 0.4096, 0.1976}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.6958, 0.8844, 0.6081, 0.8315}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.0494, 0.9343, 0.5955, 0.3830}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5404, 0.3464, 0.9378, 0.6200}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{0.7462, 0.6653, 0.5679, 0.4891}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5387, 0.1655, 0.3565, 0.0471}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8335, 0.2799, 0.5031, 0.2947}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.1402, 0.0318, 0.7636, 0.1346}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.6333, 0.9344, 0.1376, 0.9938}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8924, 0.2872, 0.6692, 0.2944}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.9897, 0.6915, 0.3154, 0.1733}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8645, 0.3513, 0.3064, 0.0767}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8117, 0.2366, 0.4838, 0.7881}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.3718, 0.4945, 0.9511, 0.0864}}}, tensor_options); result = model(decoder_input, memory_input).detach(); ref_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{2.42049104, 0.03443088, -0.60793706, -0.05436271}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.42210631, 0.03546578, -0.60679895, -0.05357488}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.41907674, 0.0336104, -0.60892977, -0.05490462}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.42216881, 0.03586554, -0.6067524, -0.05289126}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.42205716, 0.03488046, -0.60683681, -0.05460596}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.42240309, 0.0354595, -0.60659063, -0.05378816}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -561,33 +432,22 @@ void transformer_encoder_test_helper(bool is_cuda) { } torch::Tensor encoder_input = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.7462, 0.6653, 0.5679, 0.4891}, {0.5387, 0.1655, 0.3565, 0.0471}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8335, 0.2799, 0.5031, 0.2947}, {0.1402, 0.0318, 0.7636, 0.1346}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.6333, 0.9344, 0.1376, 0.9938}, {0.8924, 0.2872, 0.6692, 0.2944}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.9897, 0.6915, 0.3154, 0.1733}, {0.8645, 0.3513, 0.3064, 0.0767}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8117, 0.2366, 0.4838, 0.7881}, {0.3718, 0.4945, 0.9511, 0.0864}}}, tensor_options); torch::Tensor result = model(encoder_input).detach(); torch::Tensor ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428589, 0.020835, -0.602055, -0.085249}, {2.427987, 0.021213, -0.602496, -0.084103}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.424689, 0.019155, -0.604793, -0.085672}, {2.413863, 0.022211, -0.612486, -0.072490}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.433774, 0.021598, -0.598343, -0.087548}, {2.425104, 0.019748, -0.604515, -0.084839}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.436185, 0.022682, -0.596625, -0.087261}, {2.433556, 0.021891, -0.598509, -0.086832}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.416246, 0.017512, -0.610712, -0.082961}, {2.422901, 0.024187, -0.606178, -0.074929}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); // all 0 values are NOT masked - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor mask = torch::zeros({2, 5}, tensor_options) == 1; result = model(encoder_input, /*src_mask=*/torch::Tensor{}, /*src_key_padding_mask=*/mask).detach(); ASSERT_EQ(result.sizes(), ref_output.sizes()); @@ -599,15 +459,10 @@ void transformer_encoder_test_helper(bool is_cuda) { mask[1][4] = 1; result = model(encoder_input, /*src_mask=*/torch::Tensor{}, /*src_key_padding_mask=*/mask).detach(); ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.429026, 0.020793, -0.601741, -0.085642}, {2.428811, 0.021445, -0.601912, -0.084252}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.425009, 0.019155, -0.604566, -0.085899}, {2.415408, 0.02249 , -0.611415, -0.073}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.434199, 0.021682, -0.598039, -0.087699}, {2.42598, 0.019941, -0.603896, -0.085091}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.436457, 0.022736, -0.59643 , -0.08736}, {2.434021, 0.022093, -0.598179, -0.08679}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.416531, 0.017498, -0.610513, -0.083181}, {2.4242, 0.024653, -0.605266, -0.074959}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); @@ -619,35 +474,24 @@ void transformer_encoder_test_helper(bool is_cuda) { } result = model(encoder_input, /*src_mask=*/torch::Tensor{}, /*src_key_padding_mask=*/mask).detach(); ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.419051, 0.017446, -0.608738, -0.085003}, {2.419102, 0.017452, -0.608703, -0.085026}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.419043, 0.017445, -0.608744, -0.084999}, {2.419052, 0.017446, -0.608738, -0.085004}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.419067, 0.017448, -0.608727, -0.085010}, {2.419098, 0.017452, -0.608706, -0.085024}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.419072, 0.017449, -0.608724, -0.085012}, {2.419119, 0.017455, -0.608691, -0.085034}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.419019, 0.017442, -0.608761, -0.084989}, {2.419075, 0.017449, -0.608722, -0.085014}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model = TransformerEncoder(TransformerEncoderOptions(encoder_layer, 6)); if (is_cuda) { model->to(torch::kCUDA); } result = model(encoder_input, /*src_mask=*/torch::Tensor{}, /*src_key_padding_mask=*/mask).detach(); ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.419101, 0.017453, -0.608703, -0.085025}, {2.419101, 0.017453, -0.608704, -0.085025}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.419101, 0.017453, -0.608703, -0.085025}, {2.419101, 0.017453, -0.608704, -0.085025}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.419101, 0.017453, -0.608703, -0.085025}, {2.419101, 0.017453, -0.608704, -0.085025}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.419101, 0.017453, -0.608703, -0.085025}, {2.419101, 0.017453, -0.608704, -0.085025}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.419101, 0.017453, -0.608703, -0.085025}, {2.419101, 0.017453, -0.608704, -0.085025}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); @@ -660,35 +504,24 @@ void transformer_encoder_test_helper(bool is_cuda) { } result = model(encoder_input, /*src_mask=*/torch::Tensor{}, /*src_key_padding_mask=*/mask).detach(); ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.695949, -0.357635, -0.893077, -0.445238}, {1.695955, -0.357639, -0.893050, -0.445266}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.695948, -0.357634, -0.893082, -0.445233}, {1.695950, -0.357635, -0.893077, -0.445238}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.695951, -0.357636, -0.893069, -0.445246}, {1.695955, -0.357639, -0.893052, -0.445264}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.695952, -0.357636, -0.893066, -0.445249}, {1.695957, -0.357641, -0.893041, -0.445276}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.695946, -0.357632, -0.893095, -0.445220}, {1.695952, -0.357637, -0.893065, -0.445251}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model = TransformerEncoder(TransformerEncoderOptions(encoder_layer, 6).norm(AnyModule(norm))); if (is_cuda) { model->to(torch::kCUDA); } result = model(encoder_input, /*src_mask=*/torch::Tensor{}, /*src_key_padding_mask=*/mask).detach(); ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.695955, -0.357639, -0.893051, -0.445265}, {1.695955, -0.357639, -0.893051, -0.445265}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.695955, -0.357639, -0.893051, -0.445265}, {1.695955, -0.357639, -0.893051, -0.445265}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.695955, -0.357639, -0.893051, -0.445265}, {1.695955, -0.357639, -0.893051, -0.445265}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.695955, -0.357639, -0.893051, -0.445265}, {1.695955, -0.357639, -0.893051, -0.445265}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.695955, -0.357639, -0.893051, -0.445265}, {1.695955, -0.357639, -0.893051, -0.445265}}}, tensor_options); ASSERT_EQ(result.sizes(), ref_output.sizes()); ASSERT_TRUE(torch::allclose(result, ref_output, 1e-7, 1e-5, /*equal_nan=*/true)); @@ -800,15 +633,12 @@ void transformer_decoder_test_helper(bool is_cuda) { } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor decoder_input = torch::tensor({{{20, 30, 40, 50}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) torch::Tensor memory_input = torch::tensor({{{60, 70, 80, 90}}}, tensor_options); torch::Tensor result = model(decoder_input, memory_input).detach(); torch::Tensor ref_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{2.314351, 0.094805, -0.671322, 0.101977}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -816,15 +646,11 @@ void transformer_decoder_test_helper(bool is_cuda) { /*equal_nan=*/true)); // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{9, 10, 11, 12}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11, 12, 13, 14}}}, tensor_options); memory_input = torch::tensor({{{1, 2, 3, 4}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.422245, 0.051716, -0.606338, -0.024756}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.422245, 0.051716, -0.606338, -0.024756}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -833,16 +659,11 @@ void transformer_decoder_test_helper(bool is_cuda) { // deterministic input decoder_input = torch::tensor({{{1, 2, 3, 4}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{5, 6, 7, 8}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{9, 10, 11, 12}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11, 12, 13, 14}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.343536, 0.085561, -0.654954, 0.074991}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.343536, 0.085561, -0.654954, 0.074991}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -851,52 +672,30 @@ void transformer_decoder_test_helper(bool is_cuda) { // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{0.4517, 0.6793, 0.5313, 0.0034}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.2678, 0.3677, 0.4459, 0.7166}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8100, 0.3716, 0.4096, 0.1976}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.6958, 0.8844, 0.6081, 0.8315}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.0494, 0.9343, 0.5955, 0.3830}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5404, 0.3464, 0.9378, 0.6200}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{0.7462, 0.6653, 0.5679, 0.4891}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5387, 0.1655, 0.3565, 0.0471}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8335, 0.2799, 0.5031, 0.2947}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.1402, 0.0318, 0.7636, 0.1346}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.6333, 0.9344, 0.1376, 0.9938}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8924, 0.2872, 0.6692, 0.2944}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.9897, 0.6915, 0.3154, 0.1733}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8645, 0.3513, 0.3064, 0.0767}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8117, 0.2366, 0.4838, 0.7881}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.3718, 0.4945, 0.9511, 0.0864}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.430065, 0.027862, -0.601136, -0.073096}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431935, 0.028907, -0.599809, -0.072488}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428457, 0.027053, -0.602275, -0.073462}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431970, 0.029387, -0.599789, -0.071621}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.431934, 0.028196, -0.599802, -0.073809}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432306, 0.028858, -0.599542, -0.072846}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -909,17 +708,11 @@ void transformer_decoder_test_helper(bool is_cuda) { torch::Tensor key_padding_mask = torch::zeros({2, 3}, tensor_options) == 1; result = model(decoder_input, memory_input, t_mask, m_mask, key_padding_mask).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.430065, 0.027862, -0.601136, -0.073096}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431935, 0.028907, -0.599809, -0.072488}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428457, 0.027053, -0.602275, -0.073462}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431970, 0.029387, -0.599789, -0.071621}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.431934, 0.028196, -0.599802, -0.073809}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432306, 0.028858, -0.599542, -0.072846}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -932,17 +725,11 @@ void transformer_decoder_test_helper(bool is_cuda) { key_padding_mask[1][2] = 1; result = model(decoder_input, memory_input, t_mask, m_mask, key_padding_mask).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.430025, 0.027643, -0.601164, -0.073476}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.4323, 0.029375, -0.599553, -0.071881}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428523, 0.026838, -0.602226, -0.07391}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432634, 0.029842, -0.599318, -0.071253}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.432278, 0.028152, -0.599555, -0.074139}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432659, 0.029244, -0.599294, -0.072382}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -951,21 +738,14 @@ void transformer_decoder_test_helper(bool is_cuda) { // memory_key_padding_mask torch::Tensor t_key_padding_mask = {}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) key_padding_mask = torch::zeros({2, 5}, tensor_options) == 1; result = model(decoder_input, memory_input, t_mask, m_mask, t_key_padding_mask, key_padding_mask).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.430065, 0.027862, -0.601136, -0.073096}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431935, 0.028907, -0.599809, -0.072488}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428457, 0.027053, -0.602275, -0.073462}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.431970, 0.029387, -0.599789, -0.071621}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.431934, 0.028196, -0.599802, -0.073809}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432306, 0.028858, -0.599542, -0.072846}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -978,17 +758,11 @@ void transformer_decoder_test_helper(bool is_cuda) { key_padding_mask[1][4] = 1; result = model(decoder_input, memory_input, t_mask, m_mask, t_key_padding_mask, key_padding_mask).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.429757, 0.027358, -0.601351, -0.073816}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432692, 0.028583, -0.599263, -0.073634}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.428247, 0.02662, -0.602419, -0.074123}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.432657, 0.029055, -0.599293, -0.072732}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.431515, 0.027687, -0.600096, -0.074459}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.433075, 0.028543, -0.598987, -0.073985}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -1001,13 +775,10 @@ void transformer_decoder_test_helper(bool is_cuda) { model->to(torch::kCUDA); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{20, 30, 40, 50}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{60, 70, 80, 90}}}, tensor_options); result = model(decoder_input, memory_input).detach(); ref_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{2.31316, 0.0950293, -0.671995, 0.102802}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -1015,58 +786,35 @@ void transformer_decoder_test_helper(bool is_cuda) { /*equal_nan=*/true)); // multiple layers no norm - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model = TransformerDecoder(TransformerDecoderOptions(decoder_layer, 6)); if (is_cuda) { model->to(torch::kCUDA); } // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{0.4517, 0.6793, 0.5313, 0.0034}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.2678, 0.3677, 0.4459, 0.7166}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8100, 0.3716, 0.4096, 0.1976}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.6958, 0.8844, 0.6081, 0.8315}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.0494, 0.9343, 0.5955, 0.3830}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5404, 0.3464, 0.9378, 0.6200}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{0.7462, 0.6653, 0.5679, 0.4891}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5387, 0.1655, 0.3565, 0.0471}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8335, 0.2799, 0.5031, 0.2947}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.1402, 0.0318, 0.7636, 0.1346}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.6333, 0.9344, 0.1376, 0.9938}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8924, 0.2872, 0.6692, 0.2944}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.9897, 0.6915, 0.3154, 0.1733}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8645, 0.3513, 0.3064, 0.0767}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8117, 0.2366, 0.4838, 0.7881}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.3718, 0.4945, 0.9511, 0.0864}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.42794, 0.026164, -0.60263, -0.0747591}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.43113, 0.0279516, -0.600376, -0.0736896}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.42794, 0.026164, -0.60263, -0.0747591}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.43113, 0.0279516, -0.600376, -0.0736896}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.42794, 0.026164, -0.60263, -0.0747591}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.43113, 0.0279516, -0.600376, -0.0736896}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -1082,13 +830,10 @@ void transformer_decoder_test_helper(bool is_cuda) { model->to(torch::kCUDA); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{20, 30, 40, 50}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{60, 70, 80, 90}}}, tensor_options); result = model(decoder_input, memory_input).detach(); ref_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{1.66166, -0.326986, -1.01466, -0.320017}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -1097,58 +842,35 @@ void transformer_decoder_test_helper(bool is_cuda) { // multiple layers with norm model = TransformerDecoder( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TransformerDecoderOptions(decoder_layer, 6).norm(AnyModule(norm))); if (is_cuda) { model->to(torch::kCUDA); } // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{0.4517, 0.6793, 0.5313, 0.0034}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.2678, 0.3677, 0.4459, 0.7166}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8100, 0.3716, 0.4096, 0.1976}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.6958, 0.8844, 0.6081, 0.8315}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.0494, 0.9343, 0.5955, 0.3830}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5404, 0.3464, 0.9378, 0.6200}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{0.7462, 0.6653, 0.5679, 0.4891}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5387, 0.1655, 0.3565, 0.0471}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8335, 0.2799, 0.5031, 0.2947}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.1402, 0.0318, 0.7636, 0.1346}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.6333, 0.9344, 0.1376, 0.9938}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8924, 0.2872, 0.6692, 0.2944}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.9897, 0.6915, 0.3154, 0.1733}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8645, 0.3513, 0.3064, 0.0767}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8117, 0.2366, 0.4838, 0.7881}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.3718, 0.4945, 0.9511, 0.0864}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{1.69559, -0.357291, -0.894741, -0.443553}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1.69571, -0.357363, -0.894154, -0.444196}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.69559, -0.357291, -0.894741, -0.443553}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1.69571, -0.357363, -0.894154, -0.444196}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.69559, -0.357291, -0.894741, -0.443553}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1.69571, -0.357363, -0.894154, -0.444196}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -1163,15 +885,12 @@ void transformer_decoder_test_helper(bool is_cuda) { } // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{20, 30, 40, 50}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{60, 70, 80, 90}}}, tensor_options); result = model(decoder_input, memory_input).detach(); ref_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{2.306435, 0.095946, -0.675796, 0.10687}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -1179,16 +898,12 @@ void transformer_decoder_test_helper(bool is_cuda) { /*equal_nan=*/true)); // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{9, 10, 11, 12}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11, 12, 13, 14}}}, tensor_options); memory_input = torch::tensor({{{1, 2, 3, 4}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.415448, 0.054389, -0.610932, -0.0156613}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.415448, 0.054389, -0.610932, -0.0156613}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -1197,18 +912,13 @@ void transformer_decoder_test_helper(bool is_cuda) { // deterministic input decoder_input = torch::tensor({{{1, 2, 3, 4}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{5, 6, 7, 8}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{9, 10, 11, 12}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{11, 12, 13, 14}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.338531, 0.087709, -0.65776, 0.080646}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.338531, 0.087709, -0.65776, 0.080646}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -1216,53 +926,31 @@ void transformer_decoder_test_helper(bool is_cuda) { /*equal_nan=*/true)); // deterministic input - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{0.4517, 0.6793, 0.5313, 0.0034}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.2678, 0.3677, 0.4459, 0.7166}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8100, 0.3716, 0.4096, 0.1976}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.6958, 0.8844, 0.6081, 0.8315}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.0494, 0.9343, 0.5955, 0.3830}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5404, 0.3464, 0.9378, 0.6200}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{0.7462, 0.6653, 0.5679, 0.4891}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5387, 0.1655, 0.3565, 0.0471}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8335, 0.2799, 0.5031, 0.2947}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.1402, 0.0318, 0.7636, 0.1346}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.6333, 0.9344, 0.1376, 0.9938}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8924, 0.2872, 0.6692, 0.2944}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.9897, 0.6915, 0.3154, 0.1733}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8645, 0.3513, 0.3064, 0.0767}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8117, 0.2366, 0.4838, 0.7881}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.3718, 0.4945, 0.9511, 0.0864}}}, tensor_options); result = model(decoder_input, memory_input).detach(); ref_output = torch::tensor( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{{2.42049104, 0.03443088, -0.60793706, -0.05436271}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.42210631, 0.03546578, -0.60679895, -0.05357488}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.41907674, 0.0336104, -0.60892977, -0.05490462}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.42216881, 0.03586554, -0.6067524, -0.05289126}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.42205716, 0.03488046, -0.60683681, -0.05460596}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.42240309, 0.0354595, -0.60659063, -0.05378816}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -1270,57 +958,34 @@ void transformer_decoder_test_helper(bool is_cuda) { /*equal_nan=*/true)); // Multiple layers no norm - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model = TransformerDecoder(TransformerDecoderOptions(decoder_layer, 6)); if (is_cuda) { model->to(torch::kCUDA); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{0.4517, 0.6793, 0.5313, 0.0034}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.2678, 0.3677, 0.4459, 0.7166}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8100, 0.3716, 0.4096, 0.1976}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.6958, 0.8844, 0.6081, 0.8315}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.0494, 0.9343, 0.5955, 0.3830}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5404, 0.3464, 0.9378, 0.6200}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{0.7462, 0.6653, 0.5679, 0.4891}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5387, 0.1655, 0.3565, 0.0471}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8335, 0.2799, 0.5031, 0.2947}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.1402, 0.0318, 0.7636, 0.1346}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.6333, 0.9344, 0.1376, 0.9938}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8924, 0.2872, 0.6692, 0.2944}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.9897, 0.6915, 0.3154, 0.1733}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8645, 0.3513, 0.3064, 0.0767}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8117, 0.2366, 0.4838, 0.7881}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.3718, 0.4945, 0.9511, 0.0864}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{2.41859, 0.0328114, -0.609269, -0.0560386}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.42138, 0.034598, -0.607316, -0.0546574}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.41859, 0.0328114, -0.609269, -0.0560386}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.42138, 0.034598, -0.607316, -0.0546574}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.41859, 0.0328114, -0.609269, -0.0560386}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {2.42138, 0.034598, -0.607316, -0.0546574}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -1329,58 +994,35 @@ void transformer_decoder_test_helper(bool is_cuda) { // Multiple layers with norm norm = LayerNorm(LayerNormOptions({decoder_layer.get()->options.d_model()})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) model = TransformerDecoder(TransformerDecoderOptions(decoder_layer, 6).norm(AnyModule(norm))); if (is_cuda) { model->to(torch::kCUDA); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) decoder_input = torch::tensor({{{0.4517, 0.6793, 0.5313, 0.0034}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.2678, 0.3677, 0.4459, 0.7166}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8100, 0.3716, 0.4096, 0.1976}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.6958, 0.8844, 0.6081, 0.8315}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.0494, 0.9343, 0.5955, 0.3830}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5404, 0.3464, 0.9378, 0.6200}}}, tensor_options); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memory_input = torch::tensor({{{0.7462, 0.6653, 0.5679, 0.4891}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.5387, 0.1655, 0.3565, 0.0471}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8335, 0.2799, 0.5031, 0.2947}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.1402, 0.0318, 0.7636, 0.1346}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.6333, 0.9344, 0.1376, 0.9938}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8924, 0.2872, 0.6692, 0.2944}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.9897, 0.6915, 0.3154, 0.1733}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.8645, 0.3513, 0.3064, 0.0767}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{0.8117, 0.2366, 0.4838, 0.7881}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {0.3718, 0.4945, 0.9511, 0.0864}}}, tensor_options); result = model(decoder_input, memory_input).detach(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref_output = torch::tensor({{{1.69298, -0.355163, -0.906375, -0.431439}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1.69305, -0.355195, -0.906062, -0.431791}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.69298, -0.355163, -0.906375, -0.431439}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1.69305, -0.355195, -0.906062, -0.431791}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.69298, -0.355163, -0.906375, -0.431439}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1.69305, -0.355195, -0.906062, -0.431791}}}, tensor_options); ASSERT_EQ(result.sizes().size(),ref_output.sizes().size()); @@ -1460,7 +1102,6 @@ void transformer_test_helper(bool is_cuda) { .nhead(2) .num_encoder_layers(2) .num_decoder_layers(1) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .dim_feedforward(16) .dropout(0.0) .activation(torch::kReLU)); @@ -1473,12 +1114,10 @@ void transformer_test_helper(bool is_cuda) { // transformer with customized encoder/decoder LayerNorm enorm(LayerNormOptions({4})); TransformerEncoder encoder(TransformerEncoderOptions( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TransformerEncoderLayerOptions(4, 2).dim_feedforward(16).dropout(0.0), 2).norm(AnyModule(enorm))); LayerNorm dnorm(LayerNormOptions({4})); TransformerDecoder decoder(TransformerDecoderOptions( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TransformerDecoderLayerOptions(4, 2).dim_feedforward(16).dropout(0.0), 1).norm(AnyModule(dnorm))); Transformer model_cus(TransformerOptions() @@ -1494,23 +1133,16 @@ void transformer_test_helper(bool is_cuda) { // test cases torch::Tensor src = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.0, 2.0, 3.0, 4.0}, {5.0, 6.0, 7.0, 8.0}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{9.0, 10.0, 11.0, 12.0}, {13.0, 14.0, 15.0, 16.0}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{17.0, 18.0, 19.0, 20.0}, {21.0, 22.0, 23.0, 24.0}}}, tensor_options); torch::Tensor tgt = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1.0, 2.0, 3.0, 4.0}, {5.0, 6.0, 7.0, 8.0}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{9.0, 10.0, 11.0, 12.0}, {13.0, 14.0, 15.0, 16.0}}}, tensor_options); torch::Tensor ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.695875, 0.347114, -0.044355, -0.549541}, {2.696091, 0.347015, -0.044770, -0.548522}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.695875, 0.347114, -0.044355, -0.549541}, {2.696091, 0.347015, -0.044770, -0.548522}}}, tensor_options); torch::Tensor result = model(src, tgt); torch::Tensor result_cus = model_cus(src, tgt); @@ -1520,9 +1152,7 @@ void transformer_test_helper(bool is_cuda) { torch::Tensor src_mask = Transformer::Impl::generate_square_subsequent_mask(src.size(0)).to(tensor_options); ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.695875, 0.347114, -0.044355, -0.549541}, {2.696091, 0.347015, -0.044770, -0.548522}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.695875, 0.347114, -0.044355, -0.549541}, {2.696091, 0.347015, -0.044770, -0.548522}}}, tensor_options); result = model(src, tgt, src_mask); result_cus = model_cus(src, tgt, src_mask); @@ -1534,9 +1164,7 @@ void transformer_test_helper(bool is_cuda) { tgt_key_padding_mask[0][0] = 1; tgt_key_padding_mask[1][1] = 1; ref_output = torch::tensor({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.696114, 0.347004, -0.044813, -0.548417}, {2.696091, 0.347015, -0.044770, -0.548522}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2.696114, 0.347004, -0.044813, -0.548417}, {2.696091, 0.347015, -0.044770, -0.548522}}}, tensor_options); result = model(src, tgt, src_mask, torch::Tensor(), torch::Tensor(), torch::Tensor(), tgt_key_padding_mask); result_cus = model_cus(src, tgt, src_mask, torch::Tensor(), torch::Tensor(), torch::Tensor(), tgt_key_padding_mask); @@ -1562,7 +1190,6 @@ TEST_F(TransformerTest, TransformerArgsCorrectness) { .nhead(2) .num_encoder_layers(2) .num_decoder_layers(1) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .dim_feedforward(16) .dropout(0.0) .activation(torch::kReLU)); diff --git a/test/cpp/jit/test_argument_spec.cpp b/test/cpp/jit/test_argument_spec.cpp index 78509807c4477..3021d8b8f5b3a 100644 --- a/test/cpp/jit/test_argument_spec.cpp +++ b/test/cpp/jit/test_argument_spec.cpp @@ -56,7 +56,6 @@ TEST(ArgumentSpecTest, CompleteArgumentSpec_CUDA) { {var(CF, {1}, true), var(CD, {1, 2}, false), var(GF, {}, true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) var(GD, {4, 5, 6}, false), undef()}); @@ -68,7 +67,6 @@ TEST(ArgumentSpecTest, CompleteArgumentSpec_CUDA) { {var(CF, {1}, true), var(CD, {1, 2}, false), var(GF, {}, true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) var(GD, {4, 5, 6}, false), undef()}); list2[1].toTensor().transpose_(0, 1); @@ -153,7 +151,6 @@ TEST(ArgumentSpecTest, Basic_CUDA) { {var(CF, {1}, true), var(CD, {1, 2}, false), var(GF, {}, true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) var(GD, {4, 5, 6}, false), undef()}); @@ -165,7 +162,6 @@ TEST(ArgumentSpecTest, Basic_CUDA) { {var(CF, {1}, true), var(CD, {1, 2}, false), var(GF, {}, true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) var(GD, {4, 5, 6}, false), undef()}); list2[1].toTensor().transpose_(0, 1); diff --git a/test/cpp/jit/test_autodiff.cpp b/test/cpp/jit/test_autodiff.cpp index cecffb348cf4f..1d561960d9386 100644 --- a/test/cpp/jit/test_autodiff.cpp +++ b/test/cpp/jit/test_autodiff.cpp @@ -184,7 +184,6 @@ TEST(AutodiffTest, Differentiate) { // Note: can't use IRParser for this test due to issue #23989 auto graph = std::make_shared(); std::vector sizes{2, 3, 4}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector strides{12, 4, 1}; const auto type = TensorType::create( at::ScalarType::Float, @@ -216,7 +215,6 @@ TEST(AutodiffTest, Differentiate) { auto grad_spec = differentiate(graph); std::vector expected_captured_inputs = {0, 1}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector expected_captured_outputs = {1, 2, 3, 4, 5, 6, 7}; std::vector expected_input_vjps = {0, 1}; std::vector expected_output_vjps = {0, 1}; diff --git a/test/cpp/jit/test_backend.cpp b/test/cpp/jit/test_backend.cpp index 5cb375b15850c..fcd626d920908 100644 --- a/test/cpp/jit/test_backend.cpp +++ b/test/cpp/jit/test_backend.cpp @@ -23,7 +23,6 @@ TEST(BackendTest, ToBackend) { )"); std::vector inputs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) inputs.emplace_back(2.0 * torch::ones({})); inputs.emplace_back(1.0 * torch::ones({})); auto ref = m.forward(inputs).toTuple()->elements(); @@ -96,7 +95,6 @@ TEST(BackendTest, ToBackendNotAvailable) { )"); std::vector inputs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) inputs.emplace_back(2.0 * torch::ones({})); inputs.emplace_back(1.0 * torch::ones({})); auto ref = m.forward(inputs).toTuple()->elements(); @@ -125,7 +123,6 @@ TEST(BackendTest, TestCompiler) { )"); std::vector inputs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) inputs.emplace_back(2.0 * torch::ones({})); inputs.emplace_back(1.0 * torch::ones({})); auto ref = m.forward(inputs); diff --git a/test/cpp/jit/test_backend_compiler_lib.cpp b/test/cpp/jit/test_backend_compiler_lib.cpp index 42d97944c007c..3a4543ddd3722 100644 --- a/test/cpp/jit/test_backend_compiler_lib.cpp +++ b/test/cpp/jit/test_backend_compiler_lib.cpp @@ -88,7 +88,6 @@ class BackendWithCompiler : public PyTorchBackendInterface { instruction.size() > 15, "Constant value is expected in ", instruction); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto sub = instruction.substr(15); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) const_val = stod(sub); diff --git a/test/cpp/jit/test_custom_class_registrations.cpp b/test/cpp/jit/test_custom_class_registrations.cpp index 5e4fbb4ee005a..35d35f4cf640f 100644 --- a/test/cpp/jit/test_custom_class_registrations.cpp +++ b/test/cpp/jit/test_custom_class_registrations.cpp @@ -382,7 +382,6 @@ TORCH_LIBRARY(_TorchScriptTesting, m) { .def(torch::init>()) .def_pickle( [](c10::intrusive_ptr self) { // __getstate__ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return std::vector{1, 3, 3, 7}; }, [](std::vector state) { // __setstate__ diff --git a/test/cpp/jit/test_custom_operators.cpp b/test/cpp/jit/test_custom_operators.cpp index 929e20a6a1bf5..a9ca9189e5e25 100644 --- a/test/cpp/jit/test_custom_operators.cpp +++ b/test/cpp/jit/test_custom_operators.cpp @@ -31,7 +31,6 @@ TEST(CustomOperatorTest, InferredSchema) { ASSERT_EQ(op->schema().returns()[0].type()->kind(), TypeKind::TensorType); Stack stack; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) push(stack, 2.0f, at::ones(5)); op->getOperation()(&stack); at::Tensor output; @@ -63,7 +62,6 @@ TEST(CustomOperatorTest, ExplicitSchema) { ASSERT_EQ(op->schema().returns()[0].type()->kind(), TypeKind::TensorType); Stack stack; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) push(stack, 2.0f, at::ones(5)); op->getOperation()(&stack); at::Tensor output; @@ -108,14 +106,11 @@ TEST(CustomOperatorTest, ListParameters) { Stack stack; push(stack, c10::List({1, 2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) push(stack, c10::List({1.0, 2.0})); push( stack, c10::List>( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {c10::complex(2.4, -5.5), c10::complex(-1.3, 2)})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) push(stack, c10::List({at::ones(5)})); op->getOperation()(&stack); c10::List output; @@ -148,7 +143,6 @@ TEST(CustomOperatorTest, ListParameters2) { op->schema().returns()[0].type()->isSubtypeOf(ListType::ofTensors())); Stack stack; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) push(stack, c10::List({at::ones(5)})); op->getOperation()(&stack); c10::List output; @@ -261,7 +255,6 @@ TEST(TestCustomOperator, OperatorGeneratorBasic) { ASSERT_EQ(op->schema().returns()[0].type()->kind(), TypeKind::TensorType); Stack stack; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) push(stack, 2.0f, at::ones(5)); op->getOperation()(&stack); at::Tensor output; diff --git a/test/cpp/jit/test_fuser.cpp b/test/cpp/jit/test_fuser.cpp index 88217932b908a..00b8c01bf888e 100644 --- a/test/cpp/jit/test_fuser.cpp +++ b/test/cpp/jit/test_fuser.cpp @@ -112,7 +112,6 @@ TEST(FuserTest, TestOne_CUDA) { // with the "wrong" dimensions, and then use transpose to get an // appropriately sized view. for (const auto i : c10::irange(graph.inputs().size())) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector dims = {128, 128, 32}; std::swap(dims[ti], dims[tj]); inputs.push_back(at::rand(dims, at::kCUDA).transpose(ti, tj)); @@ -164,9 +163,7 @@ TEST(FuserTest, FusedConcat_CUDA) { %3 : Tensor = prim::FusedConcat[dim=2](%0, %2) return (%2, %3))IR"; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({3, 4, 5}, at::kCUDA); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = at::rand({4, 3, 5}, at::kCUDA).transpose(0, 1); const auto o_r = a * b; diff --git a/test/cpp/jit/test_ir.cpp b/test/cpp/jit/test_ir.cpp index d2e5bf6e7bc05..ac8724ee79ba4 100644 --- a/test/cpp/jit/test_ir.cpp +++ b/test/cpp/jit/test_ir.cpp @@ -15,7 +15,6 @@ TEST(IRTest, Attributes) { auto four = attr::perm; Node* n = g.create(Symbol::fromQualString("foo::bar")); Node& attr = *n; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) attr.f_(one, 3.4)->i_(two, 5)->s_(three, "what"); ASSERT_EQ(attr.f(one), 3.4); ASSERT_EQ(attr.s(three), "what"); @@ -31,7 +30,6 @@ TEST(IRTest, Attributes) { Node& attr2 = *n2; attr2.copyAttributes(attr); ASSERT_EQ(attr2.s(one), "no"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) attr2.f_(one, 5); ASSERT_EQ(attr.s(one), "no"); ASSERT_EQ(attr2.f(one), 5); diff --git a/test/cpp/jit/test_lite_interpreter.cpp b/test/cpp/jit/test_lite_interpreter.cpp index 150bef351ffaa..0c6104e7bfb16 100644 --- a/test/cpp/jit/test_lite_interpreter.cpp +++ b/test/cpp/jit/test_lite_interpreter.cpp @@ -32,9 +32,7 @@ TEST(LiteInterpreterTest, UpsampleNearest2d) { )"); std::vector inputs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) inputs.emplace_back(torch::rand({1, 3, 128, 128})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) inputs.emplace_back(at::Scalar(2.0)); auto ref = m.forward(inputs); @@ -123,9 +121,7 @@ TEST(LiteInterpreterTest, Conv) { std::vector inputs; Module m("m"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) m.register_parameter("weight", torch::ones({20, 1, 5, 5}), false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) m.register_parameter("bias", torch::ones({20}), false); m.define(R"( def forward(self, input): @@ -168,7 +164,6 @@ TEST(LiteInterpreterTest, Inline) { mobile::Module bc = _load_for_mobile(ss); std::vector inputs({torch::ones({})}); auto output = bc.get_method("foo3")(inputs); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AT_ASSERT(output.toTensor().item() == 7.0); } @@ -239,7 +234,6 @@ TEST(LiteInterpreterTest, Prim) { )JIT"); std::vector inputs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto minput = 3.5 * torch::ones({}); inputs.emplace_back(minput); auto ref = m.run_method("forward", minput); @@ -268,7 +262,6 @@ TEST(LiteInterpreterTest, PrimScalar) { )JIT"); std::vector inputs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto minput = 3.5 * torch::ones({}); inputs.emplace_back(minput); auto ref = m.run_method("forward", minput); @@ -315,7 +308,6 @@ TEST(LiteInterpreterTest, WrongMethodName) { m._save_for_mobile(ss); mobile::Module bc = _load_for_mobile(ss); std::vector inputs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto minput = 5 * torch::ones({}); inputs.emplace_back(minput); ASSERT_THROWS_WITH_MESSAGE( @@ -337,7 +329,6 @@ TEST(LiteInterpreterTest, SetState) { )"); std::vector inputs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto minput = 5 * torch::ones({}); inputs.emplace_back(minput); @@ -966,7 +957,6 @@ TEST(LiteInterpreterTest, FindAndRunMethod) { )"); std::vector inputs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto minput = 5 * torch::ones({}); inputs.emplace_back(minput); auto ref = m.get_method("add_it")(inputs); @@ -997,7 +987,6 @@ TEST(LiteInterpreterTest, RunMethodVariadic) { )"); std::vector inputs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inputx = 5 * torch::ones({}); auto inputy = 4 * torch::ones({}); auto ref = m.run_method("add_three", inputx, inputy); @@ -1073,7 +1062,6 @@ TEST(LiteInterpreterTest, ExtraFiles) { for (auto& file_name : all_files) { if (file_name.find("extra/") == 0) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) loaded_extra_files[file_name.substr(6)] = ""; } } @@ -1086,9 +1074,7 @@ TEST(LiteInterpreterTest, ExtraFiles) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(LiteInterpreterTest, OpNameExportFetchRootOperators) { torch::jit::Module m("m"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) m.register_parameter("weight", torch::ones({20, 1, 5, 5}), false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) m.register_parameter("bias", torch::ones({20}), false); m.define(R"( def forward(self, input): diff --git a/test/cpp/jit/test_lite_trainer.cpp b/test/cpp/jit/test_lite_trainer.cpp index 59673d9cfedbf..7255857d4f0be 100644 --- a/test/cpp/jit/test_lite_trainer.cpp +++ b/test/cpp/jit/test_lite_trainer.cpp @@ -26,9 +26,7 @@ TEST(LiteTrainerTest, Params) { b = 1.0 return self.foo * x + b )"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double learning_rate = 0.1, momentum = 0.1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int n_epoc = 10; // init: y = x + 1; // target: y = 2 x + 1 @@ -198,9 +196,7 @@ TEST(LiteTrainerTest, SGD) { b = 1.0 return self.foo * x + b )"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double learning_rate = 0.1, momentum = 0.1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int n_epoc = 10; // init: y = x + 1; // target: y = 2 x + 1 @@ -252,7 +248,6 @@ TEST(LiteTrainerTest, SGD) { namespace { struct DummyDataset : torch::data::datasets::Dataset { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) explicit DummyDataset(size_t size = 100) : size_(size) {} int get(size_t index) override { @@ -273,7 +268,6 @@ TEST(LiteTrainerTest, SequentialSampler) { const int kBatchSize = 10; auto data_loader = torch::data::make_data_loader( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) DummyDataset(25), kBatchSize); int i = 1; diff --git a/test/cpp/jit/test_misc.cpp b/test/cpp/jit/test_misc.cpp index 1729eea9d6994..83073a84dff32 100644 --- a/test/cpp/jit/test_misc.cpp +++ b/test/cpp/jit/test_misc.cpp @@ -141,9 +141,7 @@ TEST(FromQualStringTest, Basic) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(THNNConvTest, Basic) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input_size = {4, 3, 15, 17}; // B x C x H x W - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector kernel_size = {3, 5}; std::vector stride = {1, 2}; std::vector padding = {2, 1}; @@ -242,12 +240,9 @@ TEST(ATenNativeBatchNormTest, Basic) { // aten::native_batch_norm(Tensor input, Tensor weight, Tensor bias, Tensor // running_mean, Tensor running_var, bool training, float momentum, float eps) // -> (Tensor, Tensor, Tensor) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input_size = {4, 3, 15, 17}; // B x C x H x W bool training = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float momentum = 0.9; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float eps = 1e-5; // make inputs @@ -673,7 +668,6 @@ TEST(TopologicalIndexTest, Reindex) { auto anchor = graph.create(prim::AutogradZero); graph.appendNode(anchor); // Inserting to the same place a lot will trigger reindexing - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 100; ++i) { auto n = graph.create(prim::AutogradZero); n->insertAfter(anchor); @@ -681,9 +675,7 @@ TEST(TopologicalIndexTest, Reindex) { } // Nodes should be in reverse order - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 100; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto j = i + 1; j < 100; ++j) { ASSERT_TRUE(nodes[i]->isAfter(nodes[j])); } @@ -983,12 +975,10 @@ TEST(RecordFunctionTest, SampledCallbacks) { addGlobalCallback(RecordFunctionCallback(nonSampledCallback)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto handle = setup_sampled_callback(0.5); auto run_test_function = []() { auto t = torch::randn({1, 2, 3}, at::kCPU); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto k = 0; k < 1000; k++) { invokeTestRecordFunction(t); } @@ -1150,7 +1140,6 @@ TEST(RecordFunctionTest, Callbacks) { [](const RecordFunction& /* unused */) -> std::unique_ptr { auto ctx = std::make_unique(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ctx->a = 123; ctx->b = "test_str"; ids.push_back(1); @@ -1177,7 +1166,6 @@ TEST(RecordFunctionTest, Callbacks) { [](const RecordFunction& /* unused */) -> std::unique_ptr { auto ctx = std::make_unique(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ctx->a = 234; ctx->b = "test_thread_str"; ids.push_back(2); @@ -1342,11 +1330,9 @@ TEST(ThreadLocalDebugInfoTest, Basic) { TORCH_CHECK( c10::ThreadLocalDebugInfo::get(c10::DebugInfoKind::TEST_INFO) == nullptr); auto debug_info = std::make_shared(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) debug_info->setModelId(42); { c10::DebugInfoGuard guard(c10::DebugInfoKind::TEST_INFO, debug_info); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkDebugInfo(c10::DebugInfoKind::TEST_INFO, 42); } @@ -1356,7 +1342,6 @@ TEST(ThreadLocalDebugInfoTest, Basic) { { c10::DebugInfoGuard guard(c10::DebugInfoKind::TEST_INFO, debug_info); at::launch([]() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkDebugInfo(c10::DebugInfoKind::TEST_INFO, 42); done = true; }); @@ -1370,7 +1355,6 @@ TEST(ThreadLocalDebugInfoTest, Basic) { done = false; auto handle = addGlobalCallback(RecordFunctionCallback( [](const RecordFunction&) -> std::unique_ptr { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkDebugInfo(c10::DebugInfoKind::TEST_INFO, 42); done = true; return nullptr; @@ -1391,23 +1375,17 @@ TEST(ThreadLocalDebugInfoTest, Basic) { { c10::DebugInfoGuard guard(c10::DebugInfoKind::TEST_INFO, debug_info); { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkDebugInfo(c10::DebugInfoKind::TEST_INFO, 42); { auto debug_info = std::make_shared(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) debug_info->setModelId(314); c10::DebugInfoGuard guard(c10::DebugInfoKind::TEST_INFO_2, debug_info); { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkDebugInfo(c10::DebugInfoKind::TEST_INFO, 42); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkDebugInfo(c10::DebugInfoKind::TEST_INFO_2, 314); done = false; at::launch([]() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkDebugInfo(c10::DebugInfoKind::TEST_INFO, 42); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) checkDebugInfo(c10::DebugInfoKind::TEST_INFO_2, 314); done = true; }); @@ -1580,7 +1558,6 @@ graph(%a): return (%a))IR", &*graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector stack = {IValue(torch::randn({22}, at::kCPU))}; auto run = [&](std::shared_ptr& graph, std::vector stack) { GraphExecutor executor(graph, ""); @@ -1745,7 +1722,6 @@ TEST(LoopPeelerTest, LoopWithTerminationCondition) { // peeling 5 iterations should update the termination // condition to false { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LoopsPeeler peeler(true_pred, 5); auto copy = f.graph()->copy(); peeler.run(copy); @@ -1802,7 +1778,6 @@ TEST(LoopPeelerTest, SimpleNestedLoops) { } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LoopsPeeler peeler(true_pred, 5); auto copy = f.graph()->copy(); peeler.run(copy); @@ -1843,7 +1818,6 @@ TEST(LoopPeelerTest, SimpleNestedLoops2) { } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LoopsPeeler peeler(true_pred, 5); auto copy = f.graph()->copy(); peeler.run(copy); @@ -1988,9 +1962,7 @@ TEST(ProfilerTest, Basic) { auto mm = std::find_if(begin, end, [](Node* n) { return n->kind() == aten::add; }); ASSERT_NE(mm, end); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector mm_expected{4, 2048}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector eltwise{4, 512}; checkShape(mm->inputs().at(0)->node()->ty(attr::profiled_type), mm_expected); auto mul_n = @@ -2037,7 +2009,6 @@ def foo(x): ASSERT_EQ(std::get<0>(callstack_vector[0]), &cu->get_function("bar")); break; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 7: { // Const 7 comes from function 'ham', which gets inlined to 'baz', // which is then inlined to 'foo'. The callstack for the corresponding @@ -2049,7 +2020,6 @@ def foo(x): ASSERT_EQ(std::get<0>(callstack_vector[1]), &cu->get_function("ham")); break; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 11: { // Const 11 comes from function 'foo', which is not inlined anywhere // and thus it should not have a callstack. @@ -2239,7 +2209,6 @@ TEST(FuturesTest, Basic) { int32_t sat1 = 0; int32_t sat2 = 0; f1->addCallback([&](Future& /* unused */) { ++sat1; }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) f1->markCompleted(43); ASSERT_TRUE(f1->completed()); ASSERT_TRUE(f1->hasValue()); @@ -2297,7 +2266,6 @@ TEST(FuturesTest, Then) { done = true; }); ASSERT_FALSE(done); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) f1->markCompleted(42); ASSERT_TRUE(done); } @@ -2323,7 +2291,6 @@ TEST(FuturesTest, CollectAll) { futures.push_back(s1); auto c2 = collectAll(futures); ASSERT_FALSE(c2->completed()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1->markCompleted(5); ASSERT_TRUE(c2->completed()); ASSERT_EQ(c2->value().toList().size(), 1); @@ -2343,10 +2310,8 @@ TEST(FuturesTest, CollectAll) { futures.push_back(s3); auto c4 = collectAll(futures); ASSERT_FALSE(c4->completed()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s3->markCompleted(7); ASSERT_FALSE(c4->completed()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s2->markCompleted(6); ASSERT_TRUE(c4->completed()); ASSERT_EQ(c4->value().toList().size(), 3); @@ -2388,7 +2353,6 @@ TEST(FuturesTest, CollectAny) { futures.push_back(s1); auto c2 = collectAny(futures); ASSERT_FALSE(c2->completed()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1->markCompleted(5); ASSERT_TRUE(c2->completed()); ASSERT_TRUE(c2->value().isInt()); @@ -2408,7 +2372,6 @@ TEST(FuturesTest, CollectAny) { futures.push_back(s3); auto c4 = collectAny(futures); ASSERT_FALSE(c4->completed()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s3->markCompleted(7); ASSERT_TRUE(c4->completed()); ASSERT_EQ(c4->value().toInt(), 7); @@ -2544,9 +2507,7 @@ TEST(ComputeFlopsTest, Basic) { // Test aten::conv2d extra_args.clear(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input_size = {4, 5, 6, 7}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector weight_size = {3, 5, 2, 1}; std::vector padding = {1, 0}; std::vector stride = {1, 1}; @@ -2561,9 +2522,7 @@ TEST(ComputeFlopsTest, Basic) { ASSERT_EQ(flops, 13440); // Test aten::conv2d fail - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input_size = {4, 5, 6, 7}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) weight_size = {4, 5, 6}; extra_args["input_size"] = at::IValue(at::IntArrayRef(input_size)); extra_args["weight_size"] = at::IValue(at::IntArrayRef(weight_size)); @@ -2571,7 +2530,6 @@ TEST(ComputeFlopsTest, Basic) { ASSERT_EQ(flops, 0); // Test aten::conv2d fail 2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) weight_size = {3, 5, 2, 1}; stride = {0, 0}; extra_args["weight_size"] = at::IValue(at::IntArrayRef(input_size)); @@ -2581,7 +2539,6 @@ TEST(ComputeFlopsTest, Basic) { // Test aten::conv2d fail 3 extra_args.clear(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input_size = {4, 5, 6, 7}; extra_args["input_size"] = at::IValue(at::IntArrayRef(input_size)); flops = computeFlops(std::string("aten::conv2d"), extra_args); @@ -2589,9 +2546,7 @@ TEST(ComputeFlopsTest, Basic) { // Test aten::mm extra_args.clear(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector mat1_sizes = {3, 4, 5, 6}; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector mat2_sizes = {6, 5, 4, 3}; extra_args["mat1_size"] = at::IValue(at::IntArrayRef(mat1_sizes)); extra_args["mat2_size"] = at::IValue(at::IntArrayRef(mat2_sizes)); @@ -2605,7 +2560,6 @@ TEST(ComputeFlopsTest, Basic) { // Test aten::add.Tensor extra_args.clear(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector mat_sizes = {3, 4, 5, 6}; extra_args["mat_size"] = at::IValue(at::IntArrayRef(mat_sizes)); flops = computeFlops(std::string("aten::add"), extra_args); @@ -2613,7 +2567,6 @@ TEST(ComputeFlopsTest, Basic) { // Test aten::mul.Tensor extra_args.clear(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mat_sizes = {3, 4, 5, 6}; extra_args["mat_size"] = at::IValue(at::IntArrayRef(mat_sizes)); flops = computeFlops(std::string("aten::mul"), extra_args); diff --git a/test/cpp/jit/test_module_api.cpp b/test/cpp/jit/test_module_api.cpp index e3b62f76d1c1c..3c670e41546ac 100644 --- a/test/cpp/jit/test_module_api.cpp +++ b/test/cpp/jit/test_module_api.cpp @@ -202,11 +202,9 @@ TEST(ModuleAPITest, DeepCopy) { cls->addAttribute(tensor_attr, TensorType::get()); cls->addAttribute(tensor_list_attr, ListType::ofTensors()); Module m(cu, cls); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::List list({at::rand(5), at::rand(5)}); m.setattr(int_attr, IValue(2)); m.setattr(str_attr, IValue("str")); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) m.setattr(tensor_attr, at::randn(5)); m.setattr(tensor_list_attr, list); @@ -282,13 +280,9 @@ TEST(ModuleAPITest, DeepCopyPreservesAliasing) { cls->addAttribute(attr3, TensorType::get()); cls->addAttribute(attr4, TensorType::get()); Module m(cu, cls); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t1 = at::rand(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t2 = at::rand(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t3 = at::rand(5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t4 = at::rand({5, 2}); c10::List list1({t1, t2}); c10::List list2({t1, t3}); diff --git a/test/cpp/jit/test_save_load.cpp b/test/cpp/jit/test_save_load.cpp index 24630e5f53fa3..d942562600226 100644 --- a/test/cpp/jit/test_save_load.cpp +++ b/test/cpp/jit/test_save_load.cpp @@ -94,12 +94,10 @@ TEST(SerializationTest, ExtraFileHooksWithSecret) { TEST(SerializationTest, TypeTags) { auto list = c10::List>(); list.push_back(c10::List({1, 2, 3})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) list.push_back(c10::List({4, 5, 6})); auto dict = c10::Dict(); dict.insert("Hello", torch::ones({2, 2})); auto dict_list = c10::List>(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 5; i++) { auto another_dict = c10::Dict(); another_dict.insert("Hello" + std::to_string(i), torch::ones({2, 2})); diff --git a/test/cpp/jit/test_schema_matching.cpp b/test/cpp/jit/test_schema_matching.cpp index 6d9814c292e54..20cae59f43dee 100644 --- a/test/cpp/jit/test_schema_matching.cpp +++ b/test/cpp/jit/test_schema_matching.cpp @@ -32,7 +32,6 @@ TEST(SchemaMatchingTest, VarType) { return torch.test_vartype(a, 2.0) )"); auto result = m.run_method("test"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_INTERNAL_ASSERT(result.toDouble() == 2.0); const std::string error_example = R"JIT( @@ -73,7 +72,6 @@ TEST(SchemaMatchingTest, VarType2) { return torch.test_vartype2(3.0, a) )JIT"); auto result = m.run_method("test"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_INTERNAL_ASSERT(result.toDouble() == 3.0); static const auto error_exam2 = R"JIT( diff --git a/test/cpp/jit/test_subgraph_utils.cpp b/test/cpp/jit/test_subgraph_utils.cpp index df93f6f86422b..c526e785b0b30 100644 --- a/test/cpp/jit/test_subgraph_utils.cpp +++ b/test/cpp/jit/test_subgraph_utils.cpp @@ -139,12 +139,10 @@ graph(%a : Tensor, %b : Tensor, %c : Tensor): parse_map); std::string ref_full_name = "graph_tanh_mul_div_mul_tanh_tanh_tanh_tanh"; std::string full_name = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SubgraphUtils::generateNameForGraph(graph, 80, "graph"); ASSERT_EQ(full_name, ref_full_name); std::string truncated_name = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) SubgraphUtils::generateNameForGraph(graph, 10, "graph"); ASSERT_LE(truncated_name.size(), ref_full_name.size()); diff --git a/test/cpp/jit/test_utils.cpp b/test/cpp/jit/test_utils.cpp index 256733367bb2c..54faafdae64a5 100644 --- a/test/cpp/jit/test_utils.cpp +++ b/test/cpp/jit/test_utils.cpp @@ -196,7 +196,6 @@ bool checkRtol(const at::Tensor& diff, const std::vector inputs) { for (auto& tensor : inputs) { maxValue = fmax(tensor.abs().max().item(), maxValue); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return diff.abs().max().item() < 2e-6 * maxValue; } bool almostEqual(const at::Tensor& a, const at::Tensor& b) { diff --git a/test/cpp/tensorexpr/test_aten.cpp b/test/cpp/tensorexpr/test_aten.cpp index 79c98b3421e07..1059b1fcc09c5 100644 --- a/test/cpp/tensorexpr/test_aten.cpp +++ b/test/cpp/tensorexpr/test_aten.cpp @@ -319,7 +319,6 @@ TEST(ATen, addcmulInt) { a_v(i) = i; b_v(i) = 2 * i + 1; c_v(i) = 3 * i + 2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d_v(i) = 5 * i + 3; } @@ -363,7 +362,6 @@ TEST(ATen, addcmulFloat) { a_v(i) = i; b_v(i) = 2 * i + 1; c_v(i) = 3 * i + 2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d_v(i) = 5 * i + 3; } @@ -686,7 +684,6 @@ TEST(ATen, reluInt) { PaddedBuffer b_v(kTotalSize); for (int i = 0; i < kTotalSize; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a_v(i) = i - 64; } @@ -717,7 +714,6 @@ TEST(ATen, reluFloat) { PaddedBuffer b_v(kTotalSize); for (int i = 0; i < kTotalSize; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a_v(i) = i - 64; } @@ -746,7 +742,6 @@ TEST(ATen, logFloat) { PaddedBuffer b_v(kTotalSize); for (int i = 0; i < kTotalSize; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a_v(i) = i + 10; } @@ -875,7 +870,6 @@ TEST(ATen, log10Float) { PaddedBuffer b_v(kTotalSize); for (int i = 0; i < kTotalSize; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a_v(i) = i + 10; } @@ -904,7 +898,6 @@ TEST(ATen, log2Float) { PaddedBuffer b_v(kTotalSize); for (int i = 0; i < kTotalSize; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a_v(i) = i + 10; } @@ -1038,9 +1031,7 @@ TEST(ATen, geInt) { Placeholder a(BufHandle("A", {N}, kInt)); Placeholder b(BufHandle("B", {N}, kInt)); Placeholder c(BufHandle("C", {N}, kInt)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_buffer(N, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_buffer(N, 5); std::vector c_buffer(N, 0); @@ -1067,7 +1058,6 @@ TEST(ATen, gtInt) { Placeholder a(BufHandle("A", {N}, kInt)); Placeholder b(BufHandle("B", {N}, kInt)); Placeholder c(BufHandle("C", {N}, kInt)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_buffer(N, 6); std::vector b_buffer(N, 3); std::vector c_buffer(N, 0); @@ -1095,9 +1085,7 @@ TEST(ATen, leInt) { Placeholder a(BufHandle("A", {N}, kInt)); Placeholder b(BufHandle("B", {N}, kInt)); Placeholder c(BufHandle("C", {N}, kInt)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_buffer(N, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_buffer(N, 5); std::vector c_buffer(N, 0); @@ -1124,9 +1112,7 @@ TEST(ATen, ltInt) { Placeholder a(BufHandle("A", {N}, kInt)); Placeholder b(BufHandle("B", {N}, kInt)); Placeholder c(BufHandle("C", {N}, kInt)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_buffer(N, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_buffer(N, 5); std::vector c_buffer(N, 1); diff --git a/test/cpp/tensorexpr/test_boundsinference.cpp b/test/cpp/tensorexpr/test_boundsinference.cpp index a33ef46dbf765..1e69286ec09b7 100644 --- a/test/cpp/tensorexpr/test_boundsinference.cpp +++ b/test/cpp/tensorexpr/test_boundsinference.cpp @@ -48,7 +48,6 @@ TEST(BoundsInference, _1) { // For this loop bounds inference should yield the following: // {{b, kStore, 0, 99}, {a, kLoad, 0, 99}} KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle n(100); Placeholder a(BufHandle("a", {n}, kFloat)); Tensor* b = @@ -60,12 +59,10 @@ TEST(BoundsInference, _1) { ASSERT_EQ(bounds_info.size(), 2); ASSERT_EQ(bounds_info.at(a.data()).size(), 1); ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(a.data())[0], {{0, 99}}); ASSERT_EQ(bounds_info.at(b->buf()).size(), 1); ASSERT_EQ(bounds_info.at(b->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(b->buf())[0], {{0, 99}}); } @@ -103,12 +100,9 @@ TEST(BoundsInference, _3) { // For this loop bounds inference should yield the following: // {{b, kStore, 0, 99}, {a, kLoad, 0, 109}} KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle n(100); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a(BufHandle("a", {n + 10}, kFloat)); Tensor* b = Compute("b", {{n, "i"}}, [&](const VarHandle& i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a.load(i) * a.load(i + 10); }); LoopNest l({b}); @@ -118,12 +112,10 @@ TEST(BoundsInference, _3) { ASSERT_EQ(bounds_info.size(), 2); ASSERT_EQ(bounds_info.at(a.data()).size(), 1); ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(a.data())[0], {{0, 109}}); ASSERT_EQ(bounds_info.at(b->buf()).size(), 1); ASSERT_EQ(bounds_info.at(b->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(b->buf())[0], {{0, 99}}); } @@ -138,9 +130,7 @@ TEST(BoundsInference, _4) { // for x in 0..320: // c[y,x] = a[y,x] * b[y,x] KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle W(320); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle H(200); Placeholder a(BufHandle("a", {H, W}, kFloat)); Tensor* b = Compute( @@ -161,17 +151,14 @@ TEST(BoundsInference, _4) { ASSERT_EQ(bounds_info.at(a.data()).size(), 1); ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(a.data())[0], {{0, 199}, {0, 319}}); ASSERT_EQ(bounds_info.at(b->buf()).size(), 1); ASSERT_EQ(bounds_info.at(b->buf())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(b->buf())[0], {{0, 199}, {0, 319}}); ASSERT_EQ(bounds_info.at(c->buf()).size(), 1); ASSERT_EQ(bounds_info.at(c->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(c->buf())[0], {{0, 199}, {0, 319}}); } { @@ -181,17 +168,14 @@ TEST(BoundsInference, _4) { ASSERT_EQ(bounds_info.at(a.data()).size(), 1); ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(a.data())[0], {{-1, -1}, {0, 319}}); ASSERT_EQ(bounds_info.at(b->buf()).size(), 1); ASSERT_EQ(bounds_info.at(b->buf())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(b->buf())[0], {{-1, -1}, {0, 319}}); ASSERT_EQ(bounds_info.at(c->buf()).size(), 1); ASSERT_EQ(bounds_info.at(c->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(c->buf())[0], {{-1, -1}, {0, 319}}); } { @@ -227,7 +211,6 @@ TEST(BoundsInference, _5) { // for i_tail in 0..100%16: // b[i_tail + (100/16)*16] = a[i_tail + (100/16)*16]; KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle n(100); Placeholder a(BufHandle("a", {n}, kFloat)); Tensor* b = @@ -241,7 +224,6 @@ TEST(BoundsInference, _5) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) For* tail; std::vector loops = l.getLoopStmtsFor(b); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l.splitWithTail(loops[0], 16, &outer, &inner, &tail); { @@ -251,12 +233,10 @@ TEST(BoundsInference, _5) { ASSERT_EQ(bounds_info.at(a.data()).size(), 1); ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(a.data())[0], {{0, 95}}); ASSERT_EQ(bounds_info.at(b->buf()).size(), 1); ASSERT_EQ(bounds_info.at(b->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(b->buf())[0], {{0, 95}}); } { @@ -266,12 +246,10 @@ TEST(BoundsInference, _5) { ASSERT_EQ(bounds_info.at(a.data()).size(), 1); ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(a.data())[0], {{96, 99}}); ASSERT_EQ(bounds_info.at(b->buf()).size(), 1); ASSERT_EQ(bounds_info.at(b->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(b->buf())[0], {{96, 99}}); } } @@ -287,13 +265,9 @@ TEST(BoundsInference, _6) { // for x in 0..32: // c[y,x] = a[y+100,x+100] * b[y*2,x*5] KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle W(320); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle H(200); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle CW(32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle CH(20); Placeholder a(BufHandle("a", {H, W}, kFloat)); Tensor* b = Compute( @@ -302,7 +276,6 @@ TEST(BoundsInference, _6) { }); Tensor* c = Compute( "c", {{CH, "y"}, {CW, "x"}}, [&](const VarHandle& y, const VarHandle& x) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a.load(y + 100, x + 100) * b->load(y * 2, x * 5); }); LoopNest l({c}); @@ -315,17 +288,14 @@ TEST(BoundsInference, _6) { ASSERT_EQ(bounds_info.at(a.data()).size(), 1); ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(a.data())[0], {{100, 119}, {100, 131}}); ASSERT_EQ(bounds_info.at(b->buf()).size(), 1); ASSERT_EQ(bounds_info.at(b->buf())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(b->buf())[0], {{0, 38}, {0, 155}}); ASSERT_EQ(bounds_info.at(c->buf()).size(), 1); ASSERT_EQ(bounds_info.at(c->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(c->buf())[0], {{0, 19}, {0, 31}}); } { @@ -335,17 +305,14 @@ TEST(BoundsInference, _6) { ASSERT_EQ(bounds_info.at(a.data()).size(), 1); ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(a.data())[0], {{-1, -1}, {100, 131}}); ASSERT_EQ(bounds_info.at(b->buf()).size(), 1); ASSERT_EQ(bounds_info.at(b->buf())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(b->buf())[0], {{-1, -1}, {0, 155}}); ASSERT_EQ(bounds_info.at(c->buf()).size(), 1); ASSERT_EQ(bounds_info.at(c->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(c->buf())[0], {{-1, -1}, {0, 31}}); } { @@ -370,9 +337,7 @@ TEST(BoundsInference, _6) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BoundsInference, Adjacent) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle H(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a(BufHandle("a", {20}, kFloat)); Tensor* b = Compute("b", {{H, "x"}}, [&](const VarHandle& x) { return a.load(x); }); @@ -389,12 +354,10 @@ TEST(BoundsInference, Adjacent) { // reads from a[0:5], writes to b[0:5] ASSERT_EQ(bounds_info.at(a.data()).size(), 1); ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(a.data())[0], {{0, 5}}); ASSERT_EQ(bounds_info.at(b->buf()).size(), 1); ASSERT_EQ(bounds_info.at(b->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(b->buf())[0], {{0, 5}}); } { @@ -405,12 +368,10 @@ TEST(BoundsInference, Adjacent) { // reads from a[0+6:5+6], writes to c[0:5] ASSERT_EQ(bounds_info.at(a.data()).size(), 1); ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(a.data())[0], {{6, 11}}); ASSERT_EQ(bounds_info.at(c->buf()).size(), 1); ASSERT_EQ(bounds_info.at(c->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(c->buf())[0], {{0, 5}}); } { @@ -422,17 +383,14 @@ TEST(BoundsInference, Adjacent) { // merged. ASSERT_EQ(bounds_info.at(a.data()).size(), 1); ASSERT_EQ(bounds_info.at(a.data())[0].kind, kLoad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(a.data())[0], {{0, 11}}); ASSERT_EQ(bounds_info.at(b->buf()).size(), 1); ASSERT_EQ(bounds_info.at(b->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(b->buf())[0], {{0, 5}}); ASSERT_EQ(bounds_info.at(c->buf()).size(), 1); ASSERT_EQ(bounds_info.at(c->buf())[0].kind, kStore); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bounds_info.at(c->buf())[0], {{0, 5}}); } } @@ -440,22 +398,15 @@ TEST(BoundsInference, Adjacent) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BoundsInference, MultipleTopLoopLoad) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a(BufHandle("a", {100}, kFloat)); Tensor* b = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Compute("b", {{64, "x"}}, [&](const VarHandle& x) { return a.load(x); }); Tensor* c = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "c", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{32, "x"}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) [&](const VarHandle& x) { return a.load(x + 10); }); Tensor* d = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "d", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{96, "x"}}, [&](const VarHandle& x) { return a.load(x + 2); }); LoopNest l({b, c, d}); @@ -475,7 +426,6 @@ TEST(BoundsInference, MultipleTopLoopLoad) { // start: Min of the 3 load bounds = Min of loop starts + offset = 0+0 (b). // stop: Max of the 3 load bounds = Max of loop stops + offset - 1 = // 96 + 2 - 1 (d). - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bound, {{0, 97}}); } @@ -486,7 +436,6 @@ TEST(BoundsInference, MultipleTopLoopLoad) { auto bound = bounds[0]; ASSERT_EQ(bound.kind, TensorAccessKind::kStore); // Just the loop extents for b. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bound, {{0, 63}}); } { @@ -495,7 +444,6 @@ TEST(BoundsInference, MultipleTopLoopLoad) { auto bound = bounds[0]; ASSERT_EQ(bound.kind, TensorAccessKind::kStore); // Just the loop extents for c. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bound, {{0, 31}}); } { @@ -504,7 +452,6 @@ TEST(BoundsInference, MultipleTopLoopLoad) { auto bound = bounds[0]; ASSERT_EQ(bound.kind, TensorAccessKind::kStore); // Just the loop extents for d. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bound, {{0, 95}}); } } @@ -512,24 +459,17 @@ TEST(BoundsInference, MultipleTopLoopLoad) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BoundsInference, MultipleTopLoopStore) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("a", {100}, kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("b", {100}, kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("c", {100}, kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle d("d", {100}, kFloat); VarHandle x("x", kInt); // Same as above but the offsets are on the Store now. // Can't do this through ComputeAPI without transforms we don't have yet. Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 0, 64, Store::make(b, {x}, Load::make(a, {x}))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 32, Store::make(c, {x + 10}, Load::make(a, {x}))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 96, Store::make(d, {x + 2}, Load::make(a, {x})))}); auto bounds_info = inferBounds(stmt); @@ -544,7 +484,6 @@ TEST(BoundsInference, MultipleTopLoopStore) { auto bound = bounds[0]; ASSERT_EQ(bound.kind, TensorAccessKind::kLoad); // Bounds: there are no offsets, so this is just the max loop bounds. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bound, {{0, 95}}); } @@ -556,7 +495,6 @@ TEST(BoundsInference, MultipleTopLoopStore) { ASSERT_EQ(bound.kind, TensorAccessKind::kStore); // This should be equivalent to {offset, extent + offset} for the b loop. // b loop has no offset, so just the loop extents. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bound, {{0, 63}}); } { @@ -566,7 +504,6 @@ TEST(BoundsInference, MultipleTopLoopStore) { ASSERT_EQ(bound.kind, TensorAccessKind::kStore); // This should be equivalent to {offset, extent + offset} for the c loop. // Offset is 10, extent is 32-1. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bound, {{10, 41}}); } { @@ -576,7 +513,6 @@ TEST(BoundsInference, MultipleTopLoopStore) { ASSERT_EQ(bound.kind, TensorAccessKind::kStore); // This should be equivalent to {offset, extent + offset} for the d loop. // Offset is 2, extent is 96-1. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) verifyConstBounds(bound, {{2, 97}}); } } @@ -586,27 +522,19 @@ TEST(BoundsInference, CacheReads) { KernelScope kernel_scope; Tensor* A = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "A", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) { return i * j; }); Tensor* B = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 30, j + 3); }); Tensor* C = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "C", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 10, j + 20) + A->load(i + 30, j + 40); }); @@ -665,7 +593,6 @@ TEST(BoundsInference, Flattened) { KernelScope kernel_scope; Tensor* b = Compute( "b", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{3, "z"}, {4, "y"}, {5, "x"}}, [&](const VarHandle& z, const VarHandle& y, const VarHandle& x) { return x * y + z; @@ -692,11 +619,8 @@ TEST(BoundsInference, Flattened) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(BoundsInference, GetPotentialHazards) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -714,7 +638,6 @@ TEST(BoundsInference, GetPotentialHazards) { Store* store1 = Store::make(a, {0}, Load::make(b, {0})); Store* store2 = Store::make(b, {0}, 3); Store* store3 = Store::make(a, {0}, Load::make(b, {0})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store* store4 = Store::make(c, {0}, 5); Stmt* stmt = Block::make({store1, store2, store3, store4}); @@ -751,15 +674,11 @@ TEST(BoundsInference, GetPotentialHazardsLoopNoHazard) { KernelScope kernel_scope; Tensor* A = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "A", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) { return i * j; }); Tensor* B = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) { return (i + 1) * (j + 1); }); @@ -784,18 +703,13 @@ TEST(BoundsInference, GetPotentialHazardsLoopCall) { KernelScope kernel_scope; Tensor* A = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "A", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) { return i * j; }); Tensor* B = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {64, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i, j) + 5; }); @@ -819,9 +733,7 @@ TEST(BoundsInference, GetPotentialHazardsLoopSplit) { KernelScope kernel_scope; Tensor* A = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "A", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) { return i * j; }); @@ -831,7 +743,6 @@ TEST(BoundsInference, GetPotentialHazardsLoopSplit) { // Splitting with tail by something offset creates a tail which also writes to // A. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l.splitWithTail(l.getLoopStmtsFor(A)[0], 5, &outer, &inner, &tail); using namespace analysis; @@ -854,14 +765,11 @@ TEST(BoundsInference, HasConflictingOverlapSameBufferWithPartialOverlap) { // for (int k = 10; k < 100; k++) { // A[k-1] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {200}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 10, 100, Store::make(a_buf, {j}, Mul::make(10, j))); auto forK = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(k, 10, 100, Store::make(a_buf, {k - 1}, Mul::make(20, k))); auto par = Block::make({forJ, forK}); @@ -882,13 +790,10 @@ TEST(BoundsInference, HasConflictingOverlapSameBufferWithFullOverlap) { // for (int k = 10; k < 100; k++) { // A[k] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {200}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 10, 100, Store::make(a_buf, {j}, Mul::make(10, j))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 10, 100, Store::make(a_buf, {k}, Mul::make(20, k))); auto par = Block::make({forJ, forK}); @@ -909,16 +814,12 @@ TEST(BoundsInference, HasConflictingOverlapSameBufferWithFullOverlapRAW) { // for (int k = 10; k < 100; k++) { // B[k] = A[k]; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {200}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {200}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 10, 100, Store::make(a_buf, {j}, Mul::make(10, j))); auto forK = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(k, 10, 100, Store::make(b_buf, {k}, Load::make(a_buf, {k}))); auto par = Block::make({forJ, forK}); @@ -939,14 +840,11 @@ TEST(BoundsInference, HasConflictingOverlapSameBufferNotOverlapping) { // for (int k = 10; k < 100; k++) { // A[k+100] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {200}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 10, 100, Store::make(a_buf, {j}, Mul::make(10, j))); auto forK = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(k, 10, 100, Store::make(a_buf, {k + 100}, Mul::make(20, k))); auto par = Block::make({forJ, forK}); @@ -971,26 +869,18 @@ TEST(BoundsInference, HasConflictingOverlap2DBufferWithOverlap) { // A[m+1,n] = m + n * 100; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20, 50}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle m("m", kInt); VarHandle n("n", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto storeA1 = Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, storeA1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, forJ); auto storeA2 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a_buf, {m + 1, n}, Add::make(m, Mul::make(n, 100))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forN = For::make(n, 0, 50, storeA2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forM = For::make(m, 0, 20, forN); auto par = Block::make({forI, forM}); @@ -1021,26 +911,18 @@ TEST(BoundsInference, HasConflictingOverlap2DBufferWithNoOverlap) { // A[m+20,n+100] = m + n * 100; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20, 50}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle m("m", kInt); VarHandle n("n", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto storeA1 = Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, storeA1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, forJ); auto storeA2 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a_buf, {m + 20, n + 100}, Add::make(m, Mul::make(n, 100))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forN = For::make(n, 0, 50, storeA2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forM = For::make(m, 0, 20, forN); auto par = Block::make({forI, forM}); @@ -1071,25 +953,17 @@ TEST(BoundsInference, HasConflictingOverlapDifferentBuffers) { // B[m,n] = m + n * 100; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20, 50}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle m("m", kInt); VarHandle n("n", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto storeA1 = Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, storeA1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, forJ); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto storeA2 = Store::make(b_buf, {m, n}, Add::make(m, Mul::make(n, 100))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forN = For::make(n, 0, 50, storeA2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forM = For::make(m, 0, 20, forN); auto par = Block::make({forI, forM}); @@ -1116,24 +990,18 @@ TEST(BoundsInference, HasConflictingOverlapDueToRAWDependence) { // for (int k = 0; k < 100; k++) { // B[k] = 20 * A[99-k]; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j))); auto forK = For::make( k, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Store::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_buf, {k}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k})))); auto par = Block::make({forJ, forK}); @@ -1154,24 +1022,18 @@ TEST(BoundsInference, HasConflictingOverlapDueToWARDependence) { // for (int j = 0; j < 100; j++) { // A[j] = 10 * j; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); auto forK = For::make( k, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Store::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_buf, {k}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k})))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j))); auto par = Block::make({forK, forJ}); @@ -1192,33 +1054,23 @@ TEST(BoundsInference, HasConflictingOverlapWithLoads) { // for (int j = 10; j < 100; j++) { // C[j] = 10 * A[j]; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c_buf("C", {100}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); auto forK = For::make( k, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Store::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_buf, {k}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k})))); auto forJ = For::make( j, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(c_buf, {j}, Mul::make(10, Load::make(a_buf, {j})))); auto par = Block::make({forK, forJ}); @@ -1240,32 +1092,20 @@ TEST(BoundsInference, IsOverlapping) { // A[i + 50] = i * 50; // storeA2 // A[i + 150] = i * 150; // storeA3 // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {300}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c_buf("C", {100}, kInt); VarHandle i("i", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto storeA1 = Store::make(a_buf, {i}, i * 10); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto loadA1 = Load::make(a_buf, {ExprHandle(99) - i}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto storeB = Store::make(b_buf, {i}, Mul::make(loadA1, 20)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto loadA2 = Load::make(a_buf, {i + 100}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto storeC = Store::make(c_buf, {i}, Mul::make(loadA2, 10)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto storeA2 = Store::make(a_buf, {i + 50}, i * 50); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto storeA3 = Store::make(a_buf, {i + 150}, i * 150); auto forI = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) i, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Block::make({storeA1, storeB, storeC, storeA2, storeA3})); tensorexpr::analysis::MemDependencyChecker analyzer; diff --git a/test/cpp/tensorexpr/test_conv.cpp b/test/cpp/tensorexpr/test_conv.cpp index 7d3c343de8a19..8e6b017ad92d8 100644 --- a/test/cpp/tensorexpr/test_conv.cpp +++ b/test/cpp/tensorexpr/test_conv.cpp @@ -15,7 +15,6 @@ namespace F = torch::nn::functional; // Generate test data with few bits of precision, to minimize error // accumulation from floating-point reordering. static at::Tensor genTestData(c10::IntArrayRef args) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return at::trunc(at::randn(args) * 256.0f) / 256.0f; } @@ -210,9 +209,7 @@ TEST(Conv, Conv2D) { auto const& oh = v[2]; auto const& ow = v[3]; auto const& c = v[4]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto const& r = v[5]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto const& s = v[6]; // FIXME: We have to use `call` and construct a `std::vector` here // because the `operator()` overload is only specialized for a small diff --git a/test/cpp/tensorexpr/test_cpp_codegen.cpp b/test/cpp/tensorexpr/test_cpp_codegen.cpp index 57eeca27546ce..97e8c8109e282 100644 --- a/test/cpp/tensorexpr/test_cpp_codegen.cpp +++ b/test/cpp/tensorexpr/test_cpp_codegen.cpp @@ -36,9 +36,7 @@ TEST(CppPrinter, AllocateOnStackThenFree) { TEST(CppPrinter, AllocateOnHeapThenFree) { KernelScope kernel_scope; std::vector dims = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) new IntImm(20), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) new IntImm(50), new IntImm(3)}; const Buf* buf = new Buf("y", dims, kLong); diff --git a/test/cpp/tensorexpr/test_expr.cpp b/test/cpp/tensorexpr/test_expr.cpp index b680e24e2bc36..30bb8d3853ae9 100644 --- a/test/cpp/tensorexpr/test_expr.cpp +++ b/test/cpp/tensorexpr/test_expr.cpp @@ -35,13 +35,9 @@ TEST(Expr, BasicValueTest) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Expr, BasicValueTest02) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(2.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(3.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle c(4.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle d(5.0f); ExprHandle f = (a + b) - (c + d); SimpleIRExprEval eval(f); @@ -52,10 +48,8 @@ TEST(Expr, BasicValueTest02) { TEST(Expr, LetTest01) { KernelScope kernel_scope; VarHandle x("x", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = ExprHandle(2.f) + (x * ExprHandle(3.f) + ExprHandle(4.f)); SimpleIRExprEval eval(body); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) eval.bindVar(x, ExprHandle(3.f)); ASSERT_EQ(eval.value(), 2 + (3 * 3 + 4)); } @@ -66,12 +60,9 @@ TEST(Expr, LetTest02) { VarHandle x("x", kFloat); VarHandle y("y", kFloat); ExprHandle body = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle(2.f) + (x * ExprHandle(3.f) + ExprHandle(4.f) * y); SimpleIRExprEval eval(body); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) eval.bindVar(x, ExprHandle(3.f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) eval.bindVar(y, ExprHandle(6.f)); ASSERT_EQ(eval.value(), 2 + (3 * 3 + 4 * 6)); } @@ -94,12 +85,10 @@ TEST(Expr, LetStmtTest01) { PaddedBuffer b_v(1); PaddedBuffer b_ref(1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a_v(0) = 23; b_ref(0) = a_v(0); eval(a_v, b_v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(b_v, b_ref, 1e-5); } @@ -117,10 +106,8 @@ TEST(Expr, IntTest) { TEST(Expr, FloatTest) { KernelScope kernel_scope; VarHandle x("x", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = ExprHandle(2.f) + (x * ExprHandle(3.f) + ExprHandle(4.f)); SimpleIRExprEval eval(body); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) eval.bindVar(x, ExprHandle(3.f)); ASSERT_EQ(eval.value(), 2 + (3 * 3 + 4)); } @@ -235,7 +222,6 @@ TEST(Expr, VectorAdd01) { } SimpleIREvaluator ir_eval(stmt, {a_buf, b_buf, c_buf}); ir_eval(a_v, b_v, c_v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(c_v, c_ref, 1e-5); } @@ -289,7 +275,6 @@ TEST(Expr, CompareSelectDtypes) { std::vector a_buffer(N, 1); std::vector b_buffer(N, 1); std::vector c_buffer(N, 0.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_ref(N, 3.14f); VarHandle i("i", kInt); @@ -304,9 +289,7 @@ TEST(Expr, CompareSelectDtypes) { CompareSelect::make( a.load(i), b.load(i), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) FloatImm::make(3.14f), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) FloatImm::make(2.78f), CompareSelectOperation::kEQ))); @@ -319,7 +302,6 @@ TEST(Expr, CompareSelectDtypes) { assertAllEqual(a_buffer, 1); assertAllEqual(b_buffer, 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(c_buffer, c_ref, 1e-7); } @@ -329,10 +311,8 @@ TEST(Expr, IntrinsicsDtypes) { constexpr int N = 256; Placeholder a(BufHandle("A", {N}, kDouble)); Placeholder b(BufHandle("B", {N}, kDouble)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_buffer(N, -10.0); std::vector b_buffer(N, 0.0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_ref(N, 10.0); VarHandle i("i", kInt); @@ -344,9 +324,7 @@ TEST(Expr, IntrinsicsDtypes) { ASSERT_EQ(a_buffer.size(), N); ASSERT_EQ(b_buffer.size(), N); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(a_buffer, -10.0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(b_buffer, b_ref, 1e-7); } @@ -358,12 +336,9 @@ TEST(Expr, Substitute01) { const Expr* e = new Mul(new Sub(x, new FloatImm(1.0f)), new Add(x, y)); const Var* z = new Var("z", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const Expr* e2 = Substitute(e, {{x, new Add(z, new FloatImm(5.0f))}}); const Expr* e2_ref = new Mul( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) new Sub(new Add(z, new FloatImm(5.0f)), new FloatImm(1.0f)), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) new Add(new Add(z, new FloatImm(5.0f)), y)); std::ostringstream oss; oss << *e2; @@ -476,7 +451,6 @@ TEST(Expr, BinaryMath01) { for (const TestConfig& test_config : test_configs) { const float v1 = 0.8765f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float v2 = 1.2345f; ExprHandle v_expr = test_config.func(ExprHandle(v1), ExprHandle(v2)); float v_ref = test_config.ref_func(v1, v2); @@ -601,11 +575,8 @@ TEST(Expr, LogicalOps03) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Expr, BitwiseOps) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(59); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(11); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle c(101); ExprHandle d(2); ExprHandle f = (((a ^ (b << 1)) & c) >> 2) | d; @@ -625,17 +596,13 @@ TEST(Expr, DynamicShapeAdd) { VarHandle i("i", kInt); Stmt* s = For::make(i, 0, n, c.store({i}, a.load(i) + b.load(i))); std::vector aData(size, 1.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector bData(size, 2.0f); std::vector cData(size, 0.0f); SimpleIREvaluator(s, {a, b, c, n})(aData, bData, cData, size); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(cData, std::vector(size, 3.0f), 1e-7); }; testWithSize(1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testWithSize(16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testWithSize(37); } @@ -660,13 +627,11 @@ void testCond01() { a_ref(i) = i * 3; } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(a_v, a_ref, 1e-5); } void testIfThenElse01() { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle v = ifThenElse(ExprHandle(1), ExprHandle(1.0f), ExprHandle(2.0f)); std::ostringstream oss; @@ -679,7 +644,6 @@ void testIfThenElse01() { void testIfThenElse02() { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle v = ifThenElse(ExprHandle(0), ExprHandle(1.0f), ExprHandle(2.0f)); std::ostringstream oss; @@ -693,7 +657,6 @@ void testIfThenElse02() { void testIfThenElse03() { KernelScope kernel_scope; ExprHandle v = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ifThenElse(BoolImm::make(false), ExprHandle(1.0f), ExprHandle(2.0f)); std::ostringstream oss; @@ -710,7 +673,6 @@ void testStmtClone() { Placeholder a_buf("a", kInt, {N}); VarHandle index = VarHandle("index", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Stmt* body = a_buf.store({index}, 5); Stmt* loop = For::make(index, 0, N, body); @@ -720,14 +682,11 @@ void testStmtClone() { SimpleIREvaluator(loop, {a_buf})(orig_loop_results); SimpleIREvaluator(cloned_loop, {a_buf})(cloned_loop_results); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(orig_loop_results, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(cloned_loop_results, 5); // Let's add another assign to the body in the cloned loop and verify that the // original statement hasn't changed while the cloned one has. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Stmt* body_addition = a_buf.store({index}, 33); Block* cloned_body = static_cast(static_cast(cloned_loop)->body()); @@ -738,9 +697,7 @@ void testStmtClone() { SimpleIREvaluator(loop, {a_buf})(orig_loop_results_after_mutation); SimpleIREvaluator(cloned_loop, {a_buf})(cloned_loop_results_after_mutation); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(orig_loop_results_after_mutation, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(cloned_loop_results_after_mutation, 33); } diff --git a/test/cpp/tensorexpr/test_external_calls.cpp b/test/cpp/tensorexpr/test_external_calls.cpp index ce4d17a1c78d8..e3c7150ff4b79 100644 --- a/test/cpp/tensorexpr/test_external_calls.cpp +++ b/test/cpp/tensorexpr/test_external_calls.cpp @@ -21,13 +21,9 @@ using namespace torch::jit::tensorexpr; TEST(ExternalCall, Conv2d_float) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder Input("Input", kFloat, {1, 3, 224, 224}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder Weight("Weight", kFloat, {16, 3, 3, 3}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder Bias("Bias", kFloat, {16}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle ResultBuf("Result", {1, 16, 112, 112}, kFloat); int64_t stride = 2; int64_t pad = 1; @@ -52,11 +48,8 @@ TEST(ExternalCall, Conv2d_float) { .layout(at::kStrided) .device(at::kCPU) .requires_grad(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor input = at::ones({1, 3, 224, 224}, options) * 5.f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor weight = at::ones({16, 3, 3, 3}, options) * 6.f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor bias = at::ones({16}, options) * 11.f; at::Tensor ref = at::conv2d( input, @@ -68,13 +61,9 @@ TEST(ExternalCall, Conv2d_float) { groups); at::Tensor nnc_result; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input_buf(1 * 3 * 224 * 224, 5.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector weight_buf(16 * 3 * 3 * 3, 6.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector bias_buf(16, 11.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector result_buf(1 * 16 * 112 * 112, -1.f); #ifdef TORCH_ENABLE_LLVM @@ -88,7 +77,6 @@ TEST(ExternalCall, Conv2d_float) { SimpleIREvaluator ir_eval(l.root_stmt(), {Input, Weight, Bias, Result}); ir_eval.call({input_buf, weight_buf, bias_buf, result_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options); ASSERT_TRUE(at::allclose(nnc_result, ref)); } @@ -98,13 +86,9 @@ TEST(ExternalCall, Conv2d_int) { // A similar test, but now using kInt tensors KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder Input("Input", kInt, {1, 3, 224, 224}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder Weight("Weight", kInt, {16, 3, 3, 3}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder Bias("Bias", kInt, {16}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle ResultBuf("Result", {1, 16, 112, 112}, kInt); int64_t stride = 2; int64_t pad = 1; @@ -129,11 +113,8 @@ TEST(ExternalCall, Conv2d_int) { .layout(at::kStrided) .device(at::kCPU) .requires_grad(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor input = at::ones({1, 3, 224, 224}, options) * 5; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor weight = at::ones({16, 3, 3, 3}, options) * 6; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor bias = at::ones({16}, options) * 11; at::Tensor ref = at::conv2d( input, @@ -145,13 +126,9 @@ TEST(ExternalCall, Conv2d_int) { groups); at::Tensor nnc_result; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input_buf(1 * 3 * 224 * 224, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector weight_buf(16 * 3 * 3 * 3, 6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector bias_buf(16, 11); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector result_buf(1 * 16 * 112 * 112, -1); #ifdef TORCH_ENABLE_LLVM @@ -165,7 +142,6 @@ TEST(ExternalCall, Conv2d_int) { SimpleIREvaluator ir_eval(l.root_stmt(), {Input, Weight, Bias, Result}); ir_eval.call({input_buf, weight_buf, bias_buf, result_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options); ASSERT_TRUE(at::allclose(nnc_result, ref)); } @@ -174,11 +150,8 @@ TEST(ExternalCall, Conv2d_int) { TEST(ExternalCall, Conv2d_nobias_noargs) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder Input("Input", kFloat, {1, 16, 112, 112}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder Weight("Weight", kFloat, {16, 16, 1, 1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle ResultBuf("Result", {1, 16, 112, 112}, kFloat); Tensor* Result = new Tensor( @@ -197,18 +170,13 @@ TEST(ExternalCall, Conv2d_nobias_noargs) { .layout(at::kStrided) .device(at::kCPU) .requires_grad(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor input = at::ones({1, 16, 112, 112}, options) * 5.f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor weight = at::ones({16, 16, 1, 1}, options) * 6.f; at::Tensor ref = at::conv2d(input, weight); at::Tensor nnc_result; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input_buf(1 * 16 * 112 * 112, 5.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector weight_buf(16 * 16 * 1 * 1, 6.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector result_buf(1 * 16 * 112 * 112, -1.f); #ifdef TORCH_ENABLE_LLVM @@ -222,7 +190,6 @@ TEST(ExternalCall, Conv2d_nobias_noargs) { SimpleIREvaluator ir_eval(l.root_stmt(), {Input, Weight, Result}); ir_eval.call({input_buf, weight_buf, result_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options); ASSERT_TRUE(at::allclose(nnc_result, ref)); } @@ -231,13 +198,9 @@ TEST(ExternalCall, Conv2d_nobias_noargs) { TEST(ExternalCall, Addmm_float) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder Input("Input", kFloat, {100, 300}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder Mat1("Mat1", kFloat, {100, 200}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder Mat2("Mat2", kFloat, {200, 300}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle ResultBuf("Result", {100, 300}, kFloat); int64_t beta = 2; int64_t alpha = 2; @@ -260,29 +223,21 @@ TEST(ExternalCall, Addmm_float) { .layout(at::kStrided) .device(at::kCPU) .requires_grad(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor input = at::ones({100, 300}, options) * 5.f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor mat1 = at::ones({100, 200}, options) * 6.f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor mat2 = at::ones({200, 300}, options) * 11.f; at::Tensor ref = at::addmm(input, mat1, mat2, beta, alpha); at::Tensor nnc_result; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input_buf(100 * 300, 5.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector mat1_buf(100 * 200, 6.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector mat2_buf(200 * 300, 11.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector result_buf(100 * 300, -1.f); #ifdef TORCH_ENABLE_LLVM LLVMCodeGen llvm_codegen(l.root_stmt(), {Input, Mat1, Mat2, Result}); llvm_codegen.call({input_buf, mat1_buf, mat2_buf, result_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) nnc_result = at::from_blob(result_buf.data(), {100, 300}, options); ASSERT_TRUE(at::allclose(nnc_result, ref)); #endif @@ -290,7 +245,6 @@ TEST(ExternalCall, Addmm_float) { SimpleIREvaluator ir_eval(l.root_stmt(), {Input, Mat1, Mat2, Result}); ir_eval.call({input_buf, mat1_buf, mat2_buf, result_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) nnc_result = at::from_blob(result_buf.data(), {100, 300}, options); ASSERT_TRUE(at::allclose(nnc_result, ref)); } @@ -307,12 +261,9 @@ TEST(ExternalCall, BinaryFloat) { std::string>; std::vector tests = {}; tests.push_back( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Test{{100, 200}, {200, 300}, {100, 300}, at::matmul, "nnc_aten_matmul"}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tests.push_back(Test{{100, 300}, {300}, {100}, at::mv, "nnc_aten_mv"}); tests.push_back( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Test{{100, 200}, {200, 300}, {100, 300}, at::mm, "nnc_aten_mm"}); for (auto curTest : tests) { std::vector aShape, bShape, resShape; @@ -343,9 +294,7 @@ TEST(ExternalCall, BinaryFloat) { .layout(at::kStrided) .device(at::kCPU) .requires_grad(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor a = at::ones(c10::IntArrayRef(aShape), options) * 5.f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor b = at::ones(c10::IntArrayRef(bShape), options) * 6.f; at::Tensor ref = torchFunc(a, b); @@ -355,9 +304,7 @@ TEST(ExternalCall, BinaryFloat) { }; at::Tensor nnc_result; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_buf(prod(aShape), 5.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_buf(prod(bShape), 6.f); std::vector result_buf(prod(resShape), -1.f); @@ -395,18 +342,14 @@ TEST(ExternalCall, UnaryFloat) { std::vector tests = {}; tests.push_back(Test{// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1, 64, 8, 9}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1, 64, 5, 7}, [](at::Tensor x) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return at::adaptive_avg_pool2d(x, {5, 7}); }, "nnc_aten_adaptive_avg_pool2d", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) toExprHandleVec({5, 7})}); tests.push_back(Test{// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {100, 200}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {100}, [](at::Tensor x) { return at::mean(x, {1}); }, "nnc_aten_mean", @@ -434,7 +377,6 @@ TEST(ExternalCall, UnaryFloat) { .layout(at::kStrided) .device(at::kCPU) .requires_grad(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor a = at::ones(c10::IntArrayRef(aShape), options) * 5.f; at::Tensor ref = torchFunc(a); @@ -444,7 +386,6 @@ TEST(ExternalCall, UnaryFloat) { }; at::Tensor nnc_result; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_buf(prod(aShape), 5.f); std::vector result_buf(prod(resShape), -1.f); @@ -471,28 +412,22 @@ TEST(ExternalCall, ComputeInterop) { // use Tensors built with Compute API. KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle ConvResultBuf("ConvResult", {1, 16, 112, 112}, kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle MatmulResultBuf("MatmulResult", {1, 16, 112, 112}, kFloat); Tensor* Input = Compute( "Input", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, "n"}, {16, "c"}, {112, "h"}, {112, "w"}}, [&](const VarHandle& n, const VarHandle& c, const VarHandle& h, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const VarHandle& w) { return FloatImm::make(5.0f); }); Tensor* Weight = Compute( "Weight", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{16, "n"}, {16, "c"}, {1, "kh"}, {1, "kw"}}, [&](const VarHandle& n, const VarHandle& c, const VarHandle& h, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const VarHandle& w) { return FloatImm::make(6.0f); }); Tensor* ConvResult = new Tensor( @@ -511,7 +446,6 @@ TEST(ExternalCall, ComputeInterop) { {})); Tensor* Result = Compute( "Result", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, "n"}, {16, "c"}, {112, "h"}, {112, "w"}}, [&](const VarHandle& n, const VarHandle& c, @@ -534,24 +468,17 @@ TEST(ExternalCall, ComputeInterop) { .layout(at::kStrided) .device(at::kCPU) .requires_grad(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor input = at::ones({1, 16, 112, 112}, options) * 5.f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor weight = at::ones({16, 16, 1, 1}, options) * 6.f; at::Tensor t = at::conv2d(input, weight); at::Tensor t2 = at::matmul(t, t); at::Tensor ref = t + t2; at::Tensor nnc_result; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector input_buf(1 * 16 * 112 * 112, 5.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector weight_buf(16 * 16 * 1 * 1, 6.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector conv_result_buf(1 * 16 * 112 * 112, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector matmul_result_buf(1 * 16 * 112 * 112, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector result_buf(1 * 16 * 112 * 112, -1.f); #ifdef TORCH_ENABLE_LLVM @@ -569,7 +496,6 @@ TEST(ExternalCall, ComputeInterop) { ir_eval.call( {input_buf, weight_buf, conv_result_buf, matmul_result_buf, result_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) nnc_result = at::from_blob(result_buf.data(), {1, 16, 112, 112}, options); ASSERT_TRUE(at::allclose(nnc_result, ref)); } @@ -580,25 +506,18 @@ TEST(ExternalCall, Inlining) { // can use Tensors built with Compute API. KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle MatmulResultBuf("MatmulResult", {8, 8}, kFloat); Tensor* A = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "A", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{8, "i"}, {8, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return FloatImm::make(5.0f); }); Tensor* B = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{8, "i"}, {8, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return FloatImm::make(4.0f); }); Tensor* MatmulResult = new Tensor( @@ -610,10 +529,8 @@ TEST(ExternalCall, Inlining) { {})); Tensor* Result = Compute( "Result", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{8, "i"}, {8, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return MatmulResult->load(i, j) + FloatImm::make(3.0f); }); @@ -633,16 +550,12 @@ TEST(ExternalCall, Inlining) { .layout(at::kStrided) .device(at::kCPU) .requires_grad(false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor a = at::ones({8, 8}, options) * 5.f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor b = at::ones({8, 8}, options) * 4.f; at::Tensor t = at::matmul(a, b); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Tensor ref = t + 3.f; at::Tensor nnc_result; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector result_buf(8 * 8); #ifdef TORCH_ENABLE_LLVM @@ -656,7 +569,6 @@ TEST(ExternalCall, Inlining) { SimpleIREvaluator ir_eval(l.root_stmt(), {Result}); ir_eval.call({result_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) nnc_result = at::from_blob(result_buf.data(), {8, 8}, options); ASSERT_TRUE(at::allclose(nnc_result, ref)); } diff --git a/test/cpp/tensorexpr/test_ir_printer.cpp b/test/cpp/tensorexpr/test_ir_printer.cpp index 1e6b21f70943a..e02885418052f 100644 --- a/test/cpp/tensorexpr/test_ir_printer.cpp +++ b/test/cpp/tensorexpr/test_ir_printer.cpp @@ -30,13 +30,9 @@ TEST(IRPrinter, BasicValueTest) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(IRPrinter, BasicValueTest02) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(2.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(3.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle c(4.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle d(5.0f); ExprHandle f = (a + b) - (c + d); @@ -50,9 +46,7 @@ TEST(IRPrinter, CastTest) { KernelScope kernel_scope; VarHandle x("x", kHalf); VarHandle y("y", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = ExprHandle(2.f) + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (Cast::make(kFloat, x) * ExprHandle(3.f) + ExprHandle(4.f) * y); std::stringstream ss; @@ -64,7 +58,6 @@ TEST(IRPrinter, CastTest) { TEST(IRPrinter, FunctionName) { KernelScope kernel_scope; int M = 4; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int N = 20; Tensor* producer = Compute( diff --git a/test/cpp/tensorexpr/test_ir_verifier.cpp b/test/cpp/tensorexpr/test_ir_verifier.cpp index f5efc65dcbe75..899531ea85798 100644 --- a/test/cpp/tensorexpr/test_ir_verifier.cpp +++ b/test/cpp/tensorexpr/test_ir_verifier.cpp @@ -52,7 +52,6 @@ TEST(IRVerifier, BitwiseOps) { TEST(IRVerifier, CompareSelect) { KernelScope kernel_scope; const Expr* X = new IntImm(1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const Expr* Y = new FloatImm(3.14f); { auto a = new CompareSelect(X, X, X, Y, kEQ); @@ -84,7 +83,6 @@ TEST(IRVerifier, Load) { const Var* I = new Var("i", kInt); const Var* J = new Var("j", kLong); const Var* K = new Var("k", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const Buf* B = new Buf("b", {new IntImm(10), new IntImm(20)}, kFloat); { // Indices with different int dtypes (kInt, kLong) are ok @@ -151,7 +149,6 @@ TEST(IRVerifier, For) { TEST(IRVerifier, Block) { KernelScope kernel_scope; const Var* I = new Var("i", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const Buf* B = new Buf("B", {new IntImm(10)}, kInt); { Stmt* store = new Store(B, {I}, I); @@ -172,7 +169,6 @@ TEST(IRVerifier, Store) { const Var* I = new Var("i", kInt); const Var* J = new Var("j", kLong); const Var* K = new Var("k", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const Buf* B = new Buf("b", {new IntImm(10), new IntImm(20)}, kFloat); { // Indices with different int dtypes (kInt, kLong) are ok diff --git a/test/cpp/tensorexpr/test_kernel.cpp b/test/cpp/tensorexpr/test_kernel.cpp index 4ab06fc8c9387..c041d28fd9f78 100644 --- a/test/cpp/tensorexpr/test_kernel.cpp +++ b/test/cpp/tensorexpr/test_kernel.cpp @@ -101,11 +101,8 @@ TEST_F(Kernel, _1) { auto graph = std::make_shared(); parseIR(graph_string, &*graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = at::rand({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto o = at::zeros({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); auto ref = a * (a * b); TensorExprKernel k(graph); @@ -126,7 +123,6 @@ TEST_F(Kernel, _1) { std::vector stack = fmap(inputs); k.run(stack); o = stack[0].toTensor(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 5 * 3; i++) { CHECK_EQ(((float*)o.data_ptr())[i], ((float*)ref.data_ptr())[i]); } @@ -145,12 +141,9 @@ TEST_F(Kernel, _2) { auto graph = std::make_shared(); parseIR(graph_string, &*graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); auto b = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::rand({3, 5}, TensorOptions(kCPU).dtype(at::kFloat)).transpose(0, 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto o = at::zeros({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); auto ref = a * (a * b); TensorExprKernel k(graph); @@ -171,7 +164,6 @@ TEST_F(Kernel, _2) { std::vector stack = fmap(inputs); k.run(stack); o = stack[0].toTensor(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 5 * 3; i++) { CHECK_EQ(((float*)o.data_ptr())[i], ((float*)ref.data_ptr())[i]); } @@ -190,12 +182,9 @@ TEST_F(Kernel, _3) { auto graph = std::make_shared(); parseIR(graph_string, &*graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = at::rand({10, 6}, TensorOptions(kCPU).dtype(at::kFloat)) .index({Slice(None, None, 2), Slice(None, None, 2)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto o = at::zeros({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); auto ref = a * (a * b); TensorExprKernel k(graph); @@ -216,7 +205,6 @@ TEST_F(Kernel, _3) { std::vector stack = fmap(inputs); k.run(stack); o = stack[0].toTensor(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 5 * 3; i++) { CHECK_EQ(((float*)o.data_ptr())[i], ((float*)ref.data_ptr())[i]); } @@ -240,12 +228,9 @@ TEST_F(Kernel, DISABLED_Shape_Inference) { auto graph = std::make_shared(); parseIR(graph_string, &*graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = at::rand({10, 6}, TensorOptions(kCPU).dtype(at::kFloat)) .index({Slice(None, None, 2), Slice(None, None, 2)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto o = at::zeros({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); auto ref = a * (a * b); TensorExprKernel k(graph); @@ -266,7 +251,6 @@ TEST_F(Kernel, DISABLED_Shape_Inference) { std::vector stack = fmap(inputs); k.run(stack); o = stack[0].toTensor(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 5 * 3; i++) { CHECK_EQ(((float*)o.data_ptr())[i], ((float*)ref.data_ptr())[i]); } @@ -284,11 +268,8 @@ TEST_F(Kernel, DISABLED_Shape_Inference) { auto graph = std::make_shared(); parseIR(graph_string, &*graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({8, 8}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = at::rand({8, 8}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto o = at::zeros({8, 4}, TensorOptions(kCPU).dtype(at::kFloat)); auto t = torch::chunk(a * b, 2, 1); auto ref = t[0] * t[1]; @@ -310,7 +291,6 @@ TEST_F(Kernel, DISABLED_Shape_Inference) { o = stack[0].toTensor(); CHECK_EQ(o.sizes()[0], 8); CHECK_EQ(o.sizes()[1], 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 8 * 4; i++) { CHECK_EQ(((float*)o.data_ptr())[i], ((float*)ref.data_ptr())[i]); } @@ -393,13 +373,9 @@ TEST_F(Kernel, DISABLED_Shape_Inference) { auto graph = std::make_shared(); parseIR(graph_string, &*graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3, 2}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = at::rand({5, 7, 2}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = at::rand({5, 9, 2}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto o = at::zeros({5, 19, 2}, TensorOptions(kCPU).dtype(at::kFloat)); auto ref = at::cat({a, b, c}, 1); @@ -501,11 +477,8 @@ TEST_F(Kernel, CatInputTypesPromotion) { auto graph = std::make_shared(); parseIR(graph_string, &*graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3, 2}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = at::rand({5, 7, 2}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = at::rand({5, 9, 2}, TensorOptions(kCPU).dtype(at::kDouble)); auto ref = at::cat({a, b, c}, 1); @@ -581,11 +554,8 @@ TEST_F(Kernel, CatWoConditionals) { # CHECK-NEXT: aten_cat)IR"; torch::jit::testing::FileCheck().run(verification_pattern, oss.str()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3, 2}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = at::rand({5, 7, 2}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto c = at::rand({5, 9, 2}, TensorOptions(kCPU).dtype(at::kFloat)); auto ref = at::cat({a, b, c}, 1); @@ -649,7 +619,6 @@ TEST_F(Kernel, DISABLED_SumAllAxes) { %1 : ${dtype} %2 : Tensor = aten::sum(%0, %1) return (%2))IR"; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = iotaTensor({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); for (auto scalar_type : {ScalarType::Undefined, ScalarType::Double}) { @@ -713,7 +682,6 @@ TEST_F(Kernel, SumOneAxis) { %3 : ${dtype} %4 : ${out_dtype}(${size}, strides=[${strides}], device=cpu) = aten::sum(%0, %1, %2, %3) return (%4))IR"; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = iotaTensor({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); for (int dim = -a.dim(); dim < a.dim(); ++dim) { @@ -844,7 +812,6 @@ TEST_F(Kernel, Softmax2D) { %3 : Float(${size}, strides=[${strides}]) = aten::${op}(%0, %1, %2) return (%3))IR"; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); const std::string& verification_template = @@ -913,7 +880,6 @@ TEST_F(Kernel, Softmax3D) { %3 : Float(${size}, strides=[${strides}]) = aten::${op}(%0, %1, %2) return (%3))IR"; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({3, 4, 5}, TensorOptions(kCPU).dtype(at::kFloat)); const std::string& verification_template = @@ -1100,9 +1066,7 @@ TEST_F(Kernel, DISABLED_InlineProducerIntoReduction) { # CHECK-NOT: for)IR"; torch::jit::testing::FileCheck().run(verification_pattern, oss.str()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = at::rand({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); std::vector inputs = {a, b}; std::vector stack = fmap(inputs); @@ -1149,9 +1113,7 @@ TEST_F(Kernel, DISABLED_InlineReductionIntoConsumer) { # CHECK-NOT: for)IR"; torch::jit::testing::FileCheck().run(verification_pattern, oss.str()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = at::rand({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); std::vector inputs = {a, b}; std::vector stack = fmap(inputs); @@ -1175,9 +1137,7 @@ TEST_F(Kernel, SanitizeNames_CUDA) { graph->inputs().at(0)->setDebugName("aten::add:"); graph->inputs().at(1)->setDebugName("aten::add_"); TensorExprKernel k(graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3}, TensorOptions(kCUDA).dtype(at::kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto b = at::rand({5, 3}, TensorOptions(kCUDA).dtype(at::kFloat)); auto ref = a * (a * b); std::vector inputs = {a, b}; @@ -1206,13 +1166,11 @@ TEST_F(Kernel, ConstantTensors) { TensorExprKernel k(graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = at::rand({16, 16}, TensorOptions(kCPU).dtype(at::kFloat)); std::vector inputs = {x}; std::vector stack = fmap(inputs); k.run(stack); auto o = stack[0].toTensor(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = at::ones({16, 16}, TensorOptions(kCPU).dtype(at::kFloat)); auto ref = x * y; ASSERT_TRUE(at::allclose(o, ref)); @@ -1242,15 +1200,12 @@ TEST_F(Kernel, ConstantTensorsNonContiguous) { TensorExprKernel k(graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = at::rand({16, 16}, TensorOptions(kCPU).dtype(at::kFloat)); std::vector inputs = {x}; std::vector stack = fmap(inputs); k.run(stack); auto o = stack[0].toTensor(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = at::arange(0, 256, TensorOptions(kCPU).dtype(at::kFloat)) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) .view({16, 16}) .t(); auto ref = x * y; @@ -1271,18 +1226,14 @@ TEST_F(Kernel, RunFast) { auto graph = std::make_shared(); parseIR(graph_string, &*graph); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = at::rand({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); auto b = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::rand({3, 5}, TensorOptions(kCPU).dtype(at::kFloat)).transpose(0, 1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto o = at::zeros({5, 3}, TensorOptions(kCPU).dtype(at::kFloat)); auto ref = a * (a * b); TensorExprKernel k(graph); k.runFast({a.data_ptr(), b.data_ptr()}, {o.data_ptr()}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 0; i < 5 * 3; i++) { CHECK_EQ(((float*)o.data_ptr())[i], ((float*)ref.data_ptr())[i]); } diff --git a/test/cpp/tensorexpr/test_loopnest.cpp b/test/cpp/tensorexpr/test_loopnest.cpp index 38a21f27ddb9c..5a60faa78c5e7 100644 --- a/test/cpp/tensorexpr/test_loopnest.cpp +++ b/test/cpp/tensorexpr/test_loopnest.cpp @@ -32,9 +32,7 @@ void checkIR(Stmt* s, const std::string& pattern) { TEST(LoopNest, ExprSimple01) { KernelScope kernel_scope; Tensor* tensor = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "f", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{16, "X"}, {5, "y"}}, [](const VarHandle& x, const VarHandle& y) { return ExprHandle(1.0f) + cast(x) * x + cast(y) * y; @@ -56,9 +54,7 @@ TEST(LoopNest, ExprSimple01) { TEST(LoopNest, ExprLower01) { KernelScope kernel_scope; Tensor* tensor = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "f", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{16, "x"}, {5, "y"}}, [](const VarHandle& x, const VarHandle& y) { return ExprHandle(1.0f) + cast(x) * x + cast(y) * y; @@ -77,7 +73,6 @@ TEST(LoopNest, ExprSimple02) { auto func = [](const ExprHandle& x, const ExprHandle& y) { return ExprHandle(1.0f) + cast(x) * x + cast(y) * y; }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{26, "x"}, {5, "y"}}, func); LoopNest l({tensor}); std::vector loops = l.getAllLoopNestsWritingToBuf(tensor->buf()).at(0); @@ -96,10 +91,8 @@ TEST(LoopNest, ExprSimple02) { VarHandle x_inner("x_inner", kInt); VarHandle y("y", kInt); VarHandle x_tail("x_tail", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle f("f", {26, 5}, kFloat); ExprHandle x_1 = x_outer * 4 + x_inner; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle x_outer_end = (ExprHandle(26) - 0) / 4; For* stmt1 = For::make( x_outer, @@ -109,15 +102,12 @@ TEST(LoopNest, ExprSimple02) { x_inner, 0, 4, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(y, 0, 5, Store::make(f, {x_1, y}, func(x_1, y))))); ExprHandle x_2 = x_tail + x_outer_end * 4; For* stmt2 = For::make( x_tail, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (ExprHandle(26) - 0) % 4, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(y, 0, 5, Store::make(f, {x_2, y}, func(x_2, y)))); Stmt* stmt = Block::make({stmt1, stmt2}); @@ -127,24 +117,19 @@ TEST(LoopNest, ExprSimple02) { } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer f_v(26, 5, "f_v"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer f_ref(26, 5, "f_res"); stmt = FlattenIndexes(stmt); SimpleIREvaluator ir_eval(stmt, {tensor}); ir_eval(f_v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int x = 0; x < 26; x++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int y = 0; y < 5; y++) { f_ref(x, y) = 1 + x * x + y * y; } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(f_v, f_ref, 1e-5); } } @@ -183,7 +168,6 @@ TEST(LoopNest, ExprSliceHeadWithLoopOptions) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{10, "x"}}, func); LoopNest l({tensor}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -195,7 +179,6 @@ TEST(LoopNest, ExprSliceHeadWithLoopOptions) { l.sliceHead(loops[0], 2, &head, &tail); Block* body = getSimplifiedBody(l); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(body, {{0, 2}, {0, 8}}); ASSERT_TRUE(tail->loop_options().is_gpu_block_index()); @@ -210,7 +193,6 @@ TEST(LoopNest, ExprSliceTailWithLoopOptions) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{10, "x"}}, func); LoopNest l({tensor}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -228,7 +210,6 @@ TEST(LoopNest, ExprSliceTailWithLoopOptions) { l.sliceTail(tail, 2, &tail_head, &tail_tail); Block* body = getSimplifiedBody(l); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(body, {{0, 6}, {0, 2}, {8, 10}}); ASSERT_TRUE(tail_head->loop_options().is_gpu_block_index()); @@ -246,7 +227,6 @@ TEST(LoopNest, ExprSliceHeadWhenFactorEqualsSize) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{10, "x"}}, func); LoopNest l({tensor}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -254,14 +234,12 @@ TEST(LoopNest, ExprSliceHeadWhenFactorEqualsSize) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) For* tail; std::vector loops = l.getAllLoopNestsWritingToBuf(tensor->buf()).at(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l.sliceHead(loops[0], 10, &head, &tail); ASSERT_EQ(head, loops[0]); ASSERT_EQ(tail, nullptr); Block* body = getSimplifiedBody(l); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(body, {{0, 10}}); } @@ -271,7 +249,6 @@ TEST(LoopNest, ExprSliceHeadWhenFactorLargerThanSize) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{10, "x"}}, func); LoopNest l({tensor}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -279,14 +256,12 @@ TEST(LoopNest, ExprSliceHeadWhenFactorLargerThanSize) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) For* tail; std::vector loops = l.getAllLoopNestsWritingToBuf(tensor->buf()).at(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l.sliceHead(loops[0], 100, &head, &tail); ASSERT_EQ(head, loops[0]); ASSERT_EQ(tail, nullptr); Block* body = getSimplifiedBody(l); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(body, {{0, 10}}); } @@ -296,7 +271,6 @@ TEST(LoopNest, ExprSliceHead) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{10, "x"}}, func); LoopNest l({tensor}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -312,7 +286,6 @@ TEST(LoopNest, ExprSliceHead) { ASSERT_NE(tail, loops[0]); Block* body = getSimplifiedBody(l); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(body, {{0, 4}, {4, 10}}); } @@ -322,7 +295,6 @@ TEST(LoopNest, ExprSliceHeadWithNonZeroStart) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{10, "x"}}, func); LoopNest l({tensor}); std::vector loops = l.getAllLoopNestsWritingToBuf(tensor->buf()).at(0); @@ -340,7 +312,6 @@ TEST(LoopNest, ExprSliceHeadWithNonZeroStart) { // tail_tail: [8, 10) Block* body = getSimplifiedBody(l); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(body, {{0, 6}, {6, 8}, {8, 10}}); } @@ -352,7 +323,6 @@ TEST(LoopNest, ExprSliceTailWhenFactorEqualsSize) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{10, "x"}}, func); LoopNest l({tensor}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -360,14 +330,12 @@ TEST(LoopNest, ExprSliceTailWhenFactorEqualsSize) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) For* tail; std::vector loops = l.getAllLoopNestsWritingToBuf(tensor->buf()).at(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l.sliceTail(loops[0], 10, &head, &tail); ASSERT_EQ(head, nullptr); ASSERT_EQ(tail, loops[0]); Block* body = getSimplifiedBody(l); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(body, {{0, 10}}); } @@ -379,7 +347,6 @@ TEST(LoopNest, ExprSliceTailWhenFactorLargerThanSize) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{10, "x"}}, func); LoopNest l({tensor}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -387,14 +354,12 @@ TEST(LoopNest, ExprSliceTailWhenFactorLargerThanSize) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) For* tail; std::vector loops = l.getAllLoopNestsWritingToBuf(tensor->buf()).at(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l.sliceTail(loops[0], 100, &head, &tail); ASSERT_EQ(head, nullptr); ASSERT_EQ(tail, loops[0]); Block* body = getSimplifiedBody(l); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(body, {{0, 10}}); } @@ -404,7 +369,6 @@ TEST(LoopNest, ExprSliceTail) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{10, "x"}}, func); LoopNest l({tensor}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -420,7 +384,6 @@ TEST(LoopNest, ExprSliceTail) { ASSERT_NE(tail, loops[0]); Block* body = getSimplifiedBody(l); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(body, {{0, 6}, {6, 10}}); } @@ -433,7 +396,6 @@ TEST(LoopNest, ExprSplitAndSlice) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{100, "x"}}, func); LoopNest l({tensor}); @@ -447,7 +409,6 @@ TEST(LoopNest, ExprSplitAndSlice) { // outer: [0, 4) // inner: [0, 21) // tail: [84, 100) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l.splitWithTail(loops[0], 21, &outer, &inner, &tail); l.sliceTail(inner, 2); l.sliceHead(outer, 2); @@ -472,17 +433,14 @@ TEST(LoopNest, ExprSplitAndSlice) { // f[x_tail + 84] = 1.f + float(x_tail + 84); // } Block* body = getSimplifiedBody(l); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(body, {{0, 2}, {2, 4}, {0, 16}}); auto biter = body->begin(); For* loop = dynamic_cast(*biter++); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(loop->body(), {{0, 19}, {19, 21}}); loop = dynamic_cast(*biter); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(loop->body(), {{0, 19}, {19, 21}}); } @@ -494,7 +452,6 @@ TEST(LoopNest, ExprSliceAndNormalize) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{10, "x"}}, func); LoopNest l({tensor}); std::vector loops = l.getAllLoopNestsWritingToBuf(tensor->buf()).at(0); @@ -511,7 +468,6 @@ TEST(LoopNest, ExprSliceAndNormalize) { // normalized_tail: [0, 8) Block* body = getSimplifiedBody(l); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRanges(body, {{0, 2}, {0, 8}}); } @@ -558,9 +514,7 @@ TEST(LoopNest, ExprSliceWithVariableDimension) { testWithDimension(2, {{0, 2}, {2, 2}, {2, 2}}); testWithDimension(3, {{0, 2}, {2, 2}, {2, 3}}); testWithDimension(4, {{0, 2}, {2, 2}, {2, 4}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testWithDimension(5, {{0, 2}, {2, 3}, {3, 5}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testWithDimension(10, {{0, 2}, {2, 8}, {8, 10}}); } @@ -570,7 +524,6 @@ TEST(LoopNest, ExprSplitWithTail) { auto func = [](const ExprHandle& x) { return ExprHandle(1.0f) + cast(x); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{199, "x"}}, func); LoopNest l({tensor}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -580,9 +533,7 @@ TEST(LoopNest, ExprSplitWithTail) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) For* x_tail; std::vector loops = l.getAllLoopNestsWritingToBuf(tensor->buf()).at(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l.splitWithTail(loops[0], 17, &x_outer, &x_inner, &x_tail); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l.splitWithTail(x_outer, 7); Stmt* stmt = l.root_stmt(); @@ -593,14 +544,12 @@ TEST(LoopNest, ExprSplitWithTail) { // Verify that the split loops are ordered correctly. For* loop = dynamic_cast(*biter++); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRange(loop, 0, 7); loop = dynamic_cast(*biter++); assertForRange(loop, 0, 4); loop = dynamic_cast(*biter); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertForRange(loop, 0, 12); } @@ -610,7 +559,6 @@ TEST(LoopNest, ExprSplitWithTailNone) { auto func = [](const ExprHandle& x, const ExprHandle& y) { return ExprHandle(1.0f) + cast(x) * x + cast(y) * y; }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Compute("f", {{24, "x"}, {5, "y"}}, func); LoopNest l({tensor}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -637,7 +585,6 @@ TEST(LoopNest, ExprSplitWithTailNone) { // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks,cppcoreguidelines-avoid-magic-numbers) BufHandle f("f", {24, 5}, kFloat); ExprHandle x_1 = x_outer * 4 + x_inner; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle x_outer_end = (ExprHandle(24) - 0) / 4; Stmt* stmt = new Block({For::make( x_outer, @@ -647,7 +594,6 @@ TEST(LoopNest, ExprSplitWithTailNone) { x_inner, 0, 4, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(y, 0, 5, Store::make(f, {x_1, y}, func(x_1, y)))))}); std::ostringstream oss_ref; @@ -656,23 +602,18 @@ TEST(LoopNest, ExprSplitWithTailNone) { } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer f_v(24, 5, "f_v"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer f_ref(24, 5, "f_res"); SimpleIREvaluator ir_eval(stmt, {tensor}); ir_eval(f_v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int x = 0; x < 24; x++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int y = 0; y < 5; y++) { f_ref(x, y) = 1 + x * x + y * y; } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(f_v, f_ref, 1e-5); } } @@ -709,7 +650,6 @@ TEST(LoopNest, ExprSplitWithMask01) { SimpleIREvaluator(stmt, {a_buf, b_buf, tensor})(a_v, b_v, c_v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(c_v, c_ref, 1e-5); } @@ -822,7 +762,6 @@ TEST(LoopNest, ScheduleBroadcastAddBuffer) { PaddedBuffer a_v(M, N, "a_v"); for (int m = 0; m < M; m++) { for (int n = 0; n < N; n++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a_v(m, n) = 7 * m * n; } } @@ -831,7 +770,6 @@ TEST(LoopNest, ScheduleBroadcastAddBuffer) { PaddedBuffer b_v(N, K, "b_v"); for (int n = 0; n < N; n++) { for (int k = 0; k < K; k++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_v(n, k) = 11 * n * k; } } @@ -847,12 +785,10 @@ TEST(LoopNest, ScheduleBroadcastAddBuffer) { for (int m = 0; m < M; m++) { for (int n = 0; n < N; n++) { for (int k = 0; k < K; k++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c_ref(m, n, k) = 7 * m * n + 11 * n * k; } } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(c_v, c_ref, 1e-5); } @@ -911,7 +847,6 @@ TEST(LoopNest, ScheduleFunctionCall01) { SimpleIREvaluator eval(stmt, {a_buf, b_buf, d}); eval(a_v, b_v, d_v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(d_v, d_ref, 1e-5); } @@ -983,7 +918,6 @@ TEST(LoopNest, ScheduleInlineSimple) { eval1(a_v, b_v, c_v, d_v, y_1); eval2(a_v, b_v, c_v, d_v, y_2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(y_1, y_2, 1e-5); std::ostringstream oss1, oss2; oss1 << *stmt1; @@ -1083,7 +1017,6 @@ void InlineFunc01Helper(const std::vector& inline_order) { SimpleIREvaluator eval(stmt, {a_buf, b_buf, c_buf, d_buf, z}); eval(a_v, b_v, c_v, d_v, z_v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(z_v, z_ref, 1e-5); } @@ -1130,7 +1063,6 @@ TEST(LoopNest, ScheduleInlineRandom) { "x", {{M, "m1"}, {N, "n1"}, {K, "k1"}}, [&](const VarHandle& m, const VarHandle& n, const VarHandle& k) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return Mod::make(Intrinsics::make(kRand, kInt), 5); }); Tensor* y = Compute( @@ -1203,7 +1135,6 @@ TEST(LoopNest, ScheduleInlineRandomLowerDimensions) { const int K = 6; Tensor* x = Compute("x", {{M, "m1"}}, [&](const VarHandle& m) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return Mod::make(Intrinsics::make(kRand, kInt), 5); }); Tensor* y = Compute( @@ -1284,7 +1215,6 @@ TEST(LoopNest, ScheduleInlineIntrinsics) { eval1(a_v, b_v, y_1); eval2(a_v, b_v, y_2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(y_1, y_2, 1e-5); std::ostringstream oss1, oss2; oss1 << *stmt1; @@ -1332,10 +1262,8 @@ TEST(LoopNest, ScheduleInlineRandWithIntrinsics) { TEST(LoopNest, ScheduleSplitAThenInline) { KernelScope kernel_scope; Tensor* a = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Compute("a", {{18, "i"}}, [&](const VarHandle& i) { return i * i; }); Tensor* b = Compute("b", {{2, "j"}}, [&](const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a->load(j + ExprHandle(8)); }); @@ -1350,11 +1278,8 @@ TEST(LoopNest, ScheduleSplitAThenInline) { TEST(LoopNest, ScheduleSplitBThenInline) { KernelScope kernel_scope; Tensor* a = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Compute("a", {{18, "i"}}, [&](const VarHandle& i) { return i * i; }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* b = Compute("b", {{6, "j"}}, [&](const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a->load(j + ExprHandle(8)); }); @@ -1365,12 +1290,10 @@ TEST(LoopNest, ScheduleSplitBThenInline) { l.prepareForCodegen(); Stmt* s = IRSimplifier::simplify(l.root_stmt()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector output(6, 0); SimpleIREvaluator eval(s, {b}); eval(output); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 6; ++i) { ASSERT_EQ(output[i], (i + 8) * (i + 8)); } @@ -1381,10 +1304,8 @@ TEST(LoopNest, ScheduleSplitBThenInline) { TEST(LoopNest, ScheduleSplitTwiceThenInline) { KernelScope kernel_scope; Tensor* a = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Compute("a", {{18, "i"}}, [&](const VarHandle& i) { return i * i; }); Tensor* b = Compute("b", {{2, "j"}}, [&](const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a->load(j + ExprHandle(8)); }); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -1404,11 +1325,8 @@ TEST(LoopNest, ScheduleSplitTwiceThenInline) { TEST(LoopNest, ScheduleInlineThenSplit) { KernelScope kernel_scope; Tensor* a = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Compute("a", {{18, "i"}}, [&](const VarHandle& i) { return i * i; }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* b = Compute("b", {{6, "j"}}, [&](const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a->load(j + ExprHandle(8)); }); @@ -1419,12 +1337,10 @@ TEST(LoopNest, ScheduleInlineThenSplit) { l.splitWithMask(loops.back(), 3); l.prepareForCodegen(); Stmt* s = IRSimplifier::simplify(l.root_stmt()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector output(6, 0); SimpleIREvaluator eval(s, {b}); eval(output); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 6; ++i) { ASSERT_EQ(output[i], (i + 8) * (i + 8)); } @@ -1435,11 +1351,8 @@ TEST(LoopNest, ScheduleInlineThenSplit) { TEST(LoopNest, ScheduleSplitInlineThenSplit) { KernelScope kernel_scope; Tensor* a = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Compute("a", {{18, "i"}}, [&](const VarHandle& i) { return i * i; }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* b = Compute("b", {{16, "j"}}, [&](const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a->load(j + ExprHandle(8)); }); @@ -1452,12 +1365,10 @@ TEST(LoopNest, ScheduleSplitInlineThenSplit) { l.splitWithMask(loops.front(), 2); l.prepareForCodegen(); Stmt* s = IRSimplifier::simplify(l.root_stmt()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector output(16, 0); SimpleIREvaluator eval(s, {b}); eval(output); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 16; ++i) { ASSERT_EQ(output[i], (i + 8) * (i + 8)); } @@ -1467,7 +1378,6 @@ TEST(LoopNest, ScheduleSplitInlineThenSplit) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(LoopNest, ScheduleSplitInlineSimplify) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* a = Compute("a", {{18, "i"}}, [&](const VarHandle& i) { return ExprHandle(4) * i - ExprHandle(2) * i; }); @@ -1486,11 +1396,8 @@ TEST(LoopNest, ScheduleSplitInlineSimplify) { TEST(LoopNest, ScheduleInlineThreeMixedOnce) { KernelScope kernel_scope; Tensor* a = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Compute("a", {{18, "i"}}, [&](const VarHandle& i) { return i * i; }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* b = Compute("b", {{6, "j"}}, [&](const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a->load(j + ExprHandle(8)); }); Tensor* c = Compute( @@ -1520,11 +1427,8 @@ TEST(LoopNest, ScheduleInlineThreeMixedOnce) { TEST(LoopNest, ScheduleInlineThreeMixedTwice) { KernelScope kernel_scope; Tensor* a = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Compute("a", {{18, "i"}}, [&](const VarHandle& i) { return i * i; }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* b = Compute("b", {{6, "j"}}, [&](const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a->load(j + ExprHandle(8)); }); Tensor* c = Compute( @@ -1555,11 +1459,8 @@ TEST(LoopNest, ScheduleInlineThreeMixedTwice) { TEST(LoopNest, ScheduleInlineThreeMixedInner) { KernelScope kernel_scope; Tensor* a = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Compute("a", {{18, "i"}}, [&](const VarHandle& i) { return i * i; }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* b = Compute("b", {{6, "j"}}, [&](const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a->load(j + ExprHandle(8)); }); Tensor* c = Compute( @@ -1589,11 +1490,8 @@ TEST(LoopNest, ScheduleInlineThreeMixedInner) { TEST(LoopNest, ScheduleInlineThreeMixedSplit) { KernelScope kernel_scope; Tensor* a = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Compute("a", {{18, "i"}}, [&](const VarHandle& i) { return i * i; }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* b = Compute("b", {{6, "j"}}, [&](const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a->load(j + ExprHandle(8)); }); Tensor* c = Compute( @@ -1663,7 +1561,6 @@ TEST(LoopNest, ScheduleFuserStyle) { Tensor* b = Compute( "f", {{kTotalSize, "i"}}, [&](const std::vector& axes) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a_buf.load(axes[0]) + 11.0f; }); @@ -1676,7 +1573,6 @@ TEST(LoopNest, ScheduleFuserStyle) { l.prepareForCodegen(); Stmt* s = l.root_stmt(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_data(kTotalSize, 7.0f); std::vector b_data(kTotalSize, 0.0f); std::vector c_data(kTotalSize, 0.0f); @@ -1717,11 +1613,8 @@ TEST(LoopNest, ScheduleFuserThreeArg) { Stmt* s = l.root_stmt(); std::vector a_data(kTotalSize, 1.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_data(kTotalSize, 2.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_data(kTotalSize, 3.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector d_data(kTotalSize, 4.0f); std::vector g_data(kTotalSize, 0.0f); SimpleIREvaluator(s, {a, b, c, d, g})(a_data, b_data, c_data, d_data, g_data); @@ -1747,18 +1640,13 @@ TEST(LoopNest, ScheduleDynamicShape2D) { Stmt* s = l.root_stmt(); SimpleIREvaluator cg(s, {a, b, c, m, n}); std::vector aData(M * N, 1.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector bData(M * N, 2.0f); std::vector cData(M * N, 0.0f); cg.call({aData, bData, cData, M, N}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(cData, std::vector(M * N, 3.0f), 1e-7); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testWithSize(1, 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testWithSize(16, 32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) testWithSize(37, 11); } @@ -1799,15 +1687,11 @@ TEST(LoopNest, LoopNestComputeAt_1) { # CHECK: Free(temp))IR"); // Now check that the loop still produces the correct result. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_data(100, 0); SimpleIREvaluator cg(s, {B, N}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) cg.call({b_data, 100}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_ref(100, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 100; i++) { b_ref[i] = i * i; } @@ -2146,7 +2030,6 @@ TEST(LoopNest, DISABLED_Conv1d_NH) { KernelScope kernel_scope; int N = 4; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int H = 256; int R = 3; int Pad = 1; @@ -2247,7 +2130,6 @@ TEST(LoopNest, LoopNestReorderAxis1) { LoopNest l({tensor}); Stmt* stmt1 = Stmt::clone(l.root_stmt()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector stmt1_output(6, 0); SimpleIREvaluator cg(stmt1, {tensor}); cg.call({stmt1_output}); @@ -2264,12 +2146,10 @@ TEST(LoopNest, LoopNestReorderAxis1) { ASSERT_EQ(order1, "x,y,"); ASSERT_EQ(order2, "y,x,"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector stmt2_output(6, 0); SimpleIREvaluator cg2(stmt2, {tensor}); cg.call({stmt2_output}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 6; ++i) { ASSERT_EQ(stmt1_output[i], stmt2_output[i]); } @@ -2306,7 +2186,6 @@ TEST(LoopNest, LoopNestReorderPartialAxes) { Stmt* stmt1 = Stmt::clone(l.root_stmt()); ASSERT_EQ(loopOrderHelper.getOrder(stmt1), "x,y,z,"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector stmt1_output(24, 0); SimpleIREvaluator cg(stmt1, {tensor}); cg.call({stmt1_output}); @@ -2317,12 +2196,10 @@ TEST(LoopNest, LoopNestReorderPartialAxes) { Stmt* stmt2 = Stmt::clone(l.root_stmt()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector stmt2_output(24, 0); SimpleIREvaluator cg2(stmt2, {tensor}); cg2.call({stmt2_output}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 24; ++i) { ASSERT_EQ(stmt1_output[i], stmt2_output[i]); } @@ -2333,12 +2210,10 @@ TEST(LoopNest, LoopNestReorderPartialAxes) { Stmt* stmt3 = Stmt::clone(l.root_stmt()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector stmt3_output(24, 0); SimpleIREvaluator cg3(stmt3, {tensor}); cg3.call({stmt3_output}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 24; ++i) { ASSERT_EQ(stmt1_output[i], stmt3_output[i]); } @@ -2363,7 +2238,6 @@ TEST(LoopNest, LoopNestReorderInternalAxis) { Stmt* stmt1 = Stmt::clone(l.root_stmt()); ASSERT_EQ(loopOrderHelper.getOrder(stmt1), "w,x,y,z,"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector stmt1_output(24, 0); SimpleIREvaluator cg(stmt1, {tensor}); cg.call({stmt1_output}); @@ -2374,12 +2248,10 @@ TEST(LoopNest, LoopNestReorderInternalAxis) { Stmt* stmt2 = l.root_stmt(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector stmt2_output(24, 0); SimpleIREvaluator cg2(stmt2, {tensor}); cg2.call({stmt2_output}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 24; ++i) { ASSERT_EQ(stmt1_output[i], stmt2_output[i]); } @@ -2403,7 +2275,6 @@ TEST(LoopNest, LoopNestReorderEnclosingAxis) { LoopOrderHelper loopOrderHelper; Stmt* stmt1 = Stmt::clone(l.root_stmt()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector stmt1_output(24, 0); SimpleIREvaluator cg(stmt1, {tensor}); cg.call({stmt1_output}); @@ -2414,12 +2285,10 @@ TEST(LoopNest, LoopNestReorderEnclosingAxis) { Stmt* stmt2 = l.root_stmt(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector stmt2_output(24, 0); SimpleIREvaluator cg2(stmt2, {tensor}); cg2.call({stmt2_output}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 24; ++i) { ASSERT_EQ(stmt1_output[i], stmt2_output[i]); } @@ -2468,7 +2337,6 @@ TEST(LoopNest, LoopNestReorderExtraStatements) { }); LoopNest l({tensor}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder extra(BufHandle("res", {6, 3}, kFloat)); auto loops = l.getAllLoopNestsWritingToBuf(tensor->buf()).at(0); @@ -2476,10 +2344,8 @@ TEST(LoopNest, LoopNestReorderExtraStatements) { VarHandle i = VarHandle(loops[0]->var()); Stmt* store_1 = Store::make(BufHandle(extra.data()), {i, 0}, ExprHandle(1.f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Stmt* store_2 = Store::make(BufHandle(extra.data()), {i, 1}, ExprHandle(2.f)); // stmt 3 is the Function body. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Stmt* store_3 = Store::make(BufHandle(extra.data()), {i, 2}, ExprHandle(4.f)); loops[0]->body()->prepend_stmt(store_1); @@ -2487,9 +2353,7 @@ TEST(LoopNest, LoopNestReorderExtraStatements) { loops[1]->body()->append_stmt(store_3); Stmt* stmt1 = Stmt::clone(l.root_stmt()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector extra1(6, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector res1(24, 0); SimpleIREvaluator cg(stmt1, {tensor, extra}); cg.call({res1, extra1}); @@ -2527,18 +2391,14 @@ TEST(LoopNest, LoopNestReorderExtraStatements) { # CHECK: res[x, 2] = 4 )IR"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector extra2(6, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector res2(24, 0); SimpleIREvaluator cg2(stmt2, {tensor, extra}); cg2.call({res2, extra2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 24; ++i) { ASSERT_EQ(res1[i], res2[i]); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 6; ++i) { ASSERT_EQ(extra1[i], extra2[i]); } @@ -2581,18 +2441,14 @@ TEST(LoopNest, LoopNestReorderExtraStatements) { # CHECK: res[x, 2] = 4 )IR"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector extra3(6, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector res3(24, 0); SimpleIREvaluator cg3(stmt3, {tensor, extra}); cg3.call({res3, extra3}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 24; ++i) { ASSERT_EQ(res1[i], res3[i]); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 6; ++i) { ASSERT_EQ(extra1[i], extra3[i]); } @@ -2611,7 +2467,6 @@ void LoopNestReorderTestHelper( [](const std::vector&) { return -1; }); LoopNest l({c}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder extra(BufHandle("extra", {5}, kInt)); auto loops = l.getAllLoopNestsWritingToBuf(c->buf()).at(0); @@ -2634,7 +2489,6 @@ void LoopNestReorderTestHelper( Stmt* stmt1 = Stmt::clone(l.root_stmt()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector extra1(5, 0); std::vector res1(2 * 3 * 2 * 3 * 2, 0); SimpleIREvaluator cg(stmt1, {c, extra}); @@ -2649,7 +2503,6 @@ void LoopNestReorderTestHelper( if (append) { expected_loops++; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 5; ++i) { expected_loops *= loopExtents[i]; ASSERT_EQ(extra1[i], expected_loops); @@ -2664,7 +2517,6 @@ void LoopNestReorderTestHelper( oss2 << *stmt2; ASSERT_NE(oss.str(), oss2.str()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector extra2(5, 0); std::vector res2(2 * 3 * 2 * 3 * 2, 0); SimpleIREvaluator cg2(stmt2, {c, extra}); @@ -2678,7 +2530,6 @@ void LoopNestReorderTestHelper( expected_loops++; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 5; ++i) { expected_loops *= loopExtents[i]; ASSERT_EQ(extra2[i], expected_loops); @@ -2691,9 +2542,7 @@ void LoopNestReorderTestHelper( // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(LoopNest, LoopNestReorderLongStringOfPreOrphans) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 5; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 5; ++j) { // skip noops, since we check the loop isn't the same after reordering. if (i != j) { @@ -2705,9 +2554,7 @@ TEST(LoopNest, LoopNestReorderLongStringOfPreOrphans) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(LoopNest, LoopNestReorderLongStringOfPostOrphans) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 5; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 5; ++j) { // skip noops, since we check the loop isn't the same after reordering. if (i != j) { @@ -2719,9 +2566,7 @@ TEST(LoopNest, LoopNestReorderLongStringOfPostOrphans) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(LoopNest, LoopNestReorderLongStringFull) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 5; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 5; ++j) { // skip noops, since we check the loop isn't the same after reordering. if (i != j) { @@ -2829,7 +2674,6 @@ TEST(LoopNest, LoopNestReorderInternalLoopNest) { SimpleIREvaluator eval(stmt, {a_buf, b_buf, c_buf, d_buf, z}); eval(a_v, b_v, c_v, d_v, z_v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(z_v, z_ref, 1e-5); } } @@ -2838,9 +2682,7 @@ TEST(LoopNest, LoopNestReorderInternalLoopNest) { TEST(LoopNest, OuterLoopVectorization) { KernelScope kernel_scope; Tensor* tensor = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "f", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{8, "X"}, {8, "y"}}, [](const VarHandle& x, const VarHandle& y) { return ExprHandle(1.0f) + cast(x) * x + cast(y) * y; @@ -2989,7 +2831,6 @@ TEST(LoopNest, UnrollNonLiteralConstantBounds) { auto outer_for = For::make( i, IntImm::make(2) - IntImm::make(1), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) IntImm::make(12) / IntImm::make(3), inner_for); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) @@ -3047,7 +2888,6 @@ TEST(LoopNest, UnrollWithLet) { 0, kTotalSize, Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {Let::make(e, 7), Store::make(a_buf, {x}, e), Store::make(b_buf, {x}, e + 1)})); @@ -3094,7 +2934,6 @@ TEST(LoopNest, NormalizeStartPositive) { auto for_body = Block::make( {Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x})), Store::make(b_buf, {x}, x * 2)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto for_stmt = For::make(x, 50, 100, for_body); Block::make({for_stmt}); @@ -3126,11 +2965,8 @@ TEST(LoopNest, NormalizeStartNegative) { BufHandle b_buf("B", {ExprHandle(kTotalSize)}, kInt); VarHandle x("x", kInt); auto for_body = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {Store::make(a_buf, {x + 50}, Load::make(kInt, b_buf, {x + 50})), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(b_buf, {x + 50}, x * 2)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto for_stmt = For::make(x, -50, 100, for_body); Block::make({for_stmt}); @@ -3166,7 +3002,6 @@ TEST(LoopNest, NormalizeStartZero) { auto for_body = Block::make( {Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x})), Store::make(b_buf, {x}, x * 2)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto for_stmt = For::make(x, 0, 100, for_body); Block::make({for_stmt}); @@ -3202,7 +3037,6 @@ TEST(LoopNest, NormalizeStartVariable) { auto for_body = Block::make( {Store::make(a_buf, {x}, Load::make(kInt, b_buf, {x})), Store::make(b_buf, {x}, x * 2)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto for_stmt = For::make(x, y, 100, for_body); Block::make({for_stmt}); @@ -3231,17 +3065,13 @@ TEST(LoopNest, NormalizeOnNestedOuterLoop) { // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {ExprHandle(50)}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {ExprHandle(100)}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); auto inner_for_body = Store::make( a_buf, {x}, Load::make(a_buf, {x}) + Load::make(b_buf, {y}) + y * 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for = For::make(y, 10, 100, inner_for_body); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto for_stmt = For::make(x, 50, 100, inner_for); Block::make({for_stmt}); @@ -3270,17 +3100,13 @@ TEST(LoopNest, NormalizeOnNestedInnerLoop) { // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {ExprHandle(50)}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {ExprHandle(100)}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); auto inner_for_body = Store::make( a_buf, {x}, Load::make(a_buf, {x}) + Load::make(b_buf, {y}) + y * 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for = For::make(y, 10, 100, inner_for_body); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto for_stmt = For::make(x, 50, 100, inner_for); Block::make({for_stmt}); @@ -3303,7 +3129,6 @@ TEST(LoopNest, NormalizeAndSplitWithTail) { KernelScope kernel_scope; // Create a dummy tensor to construct LoopNest. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle n(100); Placeholder a(BufHandle("a", {n}, kFloat)); Tensor* b = @@ -3317,7 +3142,6 @@ TEST(LoopNest, NormalizeAndSplitWithTail) { const int kTotalSize = 5; BufHandle a_buf("A", {ExprHandle(kTotalSize)}, kInt); VarHandle x("x", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto for_stmt = For::make(x, 5, 10, Store::make(a_buf, {x}, x * 2)); Block::make({for_stmt}); @@ -3329,7 +3153,6 @@ TEST(LoopNest, NormalizeAndSplitWithTail) { For* x_inner; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) For* x_tail; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) l.splitWithTail(for_stmt, 10, &x_outer, &x_inner, &x_tail); auto x_outer_result = IRSimplifier::simplify(x_outer); @@ -3363,14 +3186,11 @@ TEST(LoopNest, FlattenSimpleLoopNest2D) { // A[i,j] = i * j; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {10, 5}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); auto for_body = Block::make({Store::make(a_buf, {i, j}, i * j)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for = For::make(j, 0, 5, for_body); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outer_for = For::make(i, 0, 10, inner_for); Block::make({outer_for}); @@ -3391,14 +3211,11 @@ TEST(LoopNest, FlattenSimpleLoopNest2D) { { SimpleIREvaluator eval1(loops[0], {a_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer inp1(10, 5); eval1(inp1); SimpleIREvaluator eval2(flattened, {a_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer inp2(10, 5); eval2(inp2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(inp1, inp2, 1e-5); } } @@ -3415,17 +3232,13 @@ TEST(LoopNest, FlattenSimpleLoopNest3D) { // } // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {10, 5, 7}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); auto for_body = Block::make({Store::make(a_buf, {i, j, k}, i + j * k)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto for1 = For::make(k, 0, 7, for_body); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto for2 = For::make(j, 0, 5, for1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto for3 = For::make(i, 0, 10, for2); Block::make({for3}); @@ -3446,14 +3259,11 @@ TEST(LoopNest, FlattenSimpleLoopNest3D) { { SimpleIREvaluator eval1(loops[0], {a_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer inp1(10, 5, 7); eval1(inp1); SimpleIREvaluator eval2(flattened, {a_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer inp2(10, 5, 7); eval2(inp2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(inp1, inp2, 1e-5); } } @@ -3468,14 +3278,11 @@ TEST(LoopNest, FlattenLoopNestAfterNormalize) { // A[i - 2,j - 3] = i * j; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {8, 12}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); auto for_body = Block::make({Store::make(a_buf, {i - 2, j - 3}, i * j)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for = For::make(j, 3, 15, for_body); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outer_for = For::make(i, 2, 10, inner_for); Block::make({outer_for}); @@ -3496,14 +3303,11 @@ TEST(LoopNest, FlattenLoopNestAfterNormalize) { { SimpleIREvaluator eval1(loops[0], {a_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer inp1(8, 12); eval1(inp1); SimpleIREvaluator eval2(flattened, {a_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer inp2(8, 12); eval2(inp2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(inp1, inp2, 1e-5); } } @@ -3518,16 +3322,13 @@ TEST(LoopNest, FlattenLoopNestWithNonLiteralConstantBounds) { // A[i,j] = i * j; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {10, 5}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); auto for_body = Block::make({Store::make(a_buf, {i, j}, i * j)}); auto inner_for = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(j, 0, IntImm::make(20) / IntImm::make(4), for_body); auto outer_for = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(i, 0, IntImm::make(15) - IntImm::make(5), inner_for); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto b = Block::make({outer_for}); @@ -3545,14 +3346,11 @@ TEST(LoopNest, FlattenLoopNestWithNonLiteralConstantBounds) { { SimpleIREvaluator eval1(loops[0], {a_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer inp1(10, 5); eval1(inp1); SimpleIREvaluator eval2(flattened, {a_buf}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) PaddedBuffer inp2(10, 5); eval2(inp2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(inp1, inp2, 1e-5); } } @@ -3570,18 +3368,14 @@ TEST(LoopNest, FlattenImperfectLoopNest) { // } // Do not flatten. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {10, 15}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); auto for_body = Block::make({Store::make(a_buf, {i, j}, i * j)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for = For::make(j, 0, 15, for_body); auto outer_for = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) i, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make(a_buf, {i, i}, 0), inner_for})); auto par = Block::make({outer_for}); @@ -3609,18 +3403,14 @@ TEST(LoopNest, FlattenReductionLoopNest) { // } // Do not flatten. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {10, 15}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle s_buf("S", {10}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); auto for_body = Block::make({Store::make( s_buf, {i}, Load::make(s_buf, {i}) + Load::make(a_buf, {i, j}))}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for = For::make(j, 0, 15, for_body); auto outer_for = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(i, 0, 10, Block::make({Store::make(s_buf, {i}, 0), inner_for})); auto par = Block::make({outer_for}); HashProvider hasher; @@ -3672,22 +3462,17 @@ TEST(LoopNest, FlattenIncorrectLoopsAsInput) { // } // Flatten({For_i, For_y}) => should not succeed - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {10, 5}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); auto for_body1 = Block::make({Store::make(a_buf, {i, j}, i * j)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for1 = For::make(j, 0, 5, for_body1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outer_for1 = For::make(i, 0, 10, inner_for1); auto for_body2 = Block::make( {Store::make(a_buf, {x, y}, Load::make(a_buf, {x, y}) + x + y)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for2 = For::make(y, 0, 5, for_body2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outer_for2 = For::make(x, 0, 10, inner_for2); auto par = Block::make({outer_for1, outer_for2}); HashProvider hasher; @@ -3725,27 +3510,19 @@ TEST(LoopNest, CacheReadsSimple) { KernelScope kernel_scope; Tensor* A = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "A", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) { return i * j; }); Tensor* B = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 30, j + 3); }); Tensor* C = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "C", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 10, j + 20) + A->load(i + 30, j + 40); }); @@ -3782,25 +3559,17 @@ TEST(LoopNest, CacheReadsSimple) { #CHECK: Free(A); )IR"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_data(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_data(200, 0); SimpleIREvaluator cg(l.root_stmt(), {B, C}); cg.call({b_data, c_data}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_ref(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_ref(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 20; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 10; ++j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_ref[i * 10 + j] = (i + 30) * (j + 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c_ref[i * 10 + j] = (i + 10) * (j + 20) + (i + 30) * (j + 40); } } @@ -3814,27 +3583,19 @@ TEST(LoopNest, CacheReadsOuter) { KernelScope kernel_scope; Tensor* A = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "A", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) { return i * j; }); Tensor* B = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 30, j + 40) + A->load(i + 31, j + 41); }); Tensor* C = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "C", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 10, j + 20) + A->load(i + 30, j + 40); }); @@ -3851,25 +3612,17 @@ TEST(LoopNest, CacheReadsOuter) { #CHECK: B[10 * i_2 + j_2] = (A_local[(j_2 + 11 * i_2) + 12]) + (A_local[j_2 + 11 * i_2]); )IR"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_data(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_data(200, 0); SimpleIREvaluator cg(l.root_stmt(), {B, C}); cg.call({b_data, c_data}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_ref(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_ref(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 20; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 10; ++j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_ref[i * 10 + j] = (i + 30) * (j + 40) + (i + 31) * (j + 41); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c_ref[i * 10 + j] = (i + 10) * (j + 20) + (i + 30) * (j + 40); } } @@ -3883,27 +3636,19 @@ TEST(LoopNest, CacheReadsInternal) { KernelScope kernel_scope; Tensor* A = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "A", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) { return i * j; }); Tensor* B = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 30, j + 40) + A->load(i + 31, j + 41); }); Tensor* C = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "C", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 10, j + 20) + A->load(i + 30, j + 40); }); @@ -3919,25 +3664,17 @@ TEST(LoopNest, CacheReadsInternal) { #CHECK: B[10 * i_1 + j_2] = (A_local[j_2 + 12]) + (A_local[j_2]); )IR"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_data(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_data(200, 0); SimpleIREvaluator cg(l.root_stmt(), {B, C}); cg.call({b_data, c_data}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_ref(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_ref(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 20; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 10; ++j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_ref[i * 10 + j] = (i + 30) * (j + 40) + (i + 31) * (j + 41); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c_ref[i * 10 + j] = (i + 10) * (j + 20) + (i + 30) * (j + 40); } } @@ -3951,28 +3688,20 @@ TEST(LoopNest, CacheReadsInner) { KernelScope kernel_scope; Tensor* A = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "A", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) { return i * j; }); // note im changing the offset of the first arg of the first call to A. Tensor* B = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 34, j + 40) + A->load(i + 30, j + 41); }); Tensor* C = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "C", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 10, j + 20) + A->load(i + 30, j + 40); }); @@ -3988,25 +3717,17 @@ TEST(LoopNest, CacheReadsInner) { #CHECK: B[10 * i_1 + j_1] = (A_local[1]) + (A_local[8]); )IR"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_data(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_data(200, 0); SimpleIREvaluator cg(l.root_stmt(), {B, C}); cg.call({b_data, c_data}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_ref(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_ref(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 20; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 10; ++j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_ref[i * 10 + j] = (i + 34) * (j + 40) + (i + 30) * (j + 41); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c_ref[i * 10 + j] = (i + 10) * (j + 20) + (i + 30) * (j + 40); } } @@ -4020,27 +3741,19 @@ TEST(LoopNest, CacheWritesSimple) { KernelScope kernel_scope; Tensor* A = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "A", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {64, "j"}}, [](const VarHandle& i, const VarHandle& j) { return i * j; }); Tensor* B = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 30, j + 40) + A->load(i + 31, j + 41); }); Tensor* C = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "C", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{20, "i"}, {10, "j"}}, [&](const VarHandle& i, const VarHandle& j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return A->load(i + 10, j + 20) + A->load(i + 30, j + 40); }); @@ -4061,25 +3774,17 @@ TEST(LoopNest, CacheWritesSimple) { #CHECK-NOT: A_local )IR"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_data(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_data(200, 0); SimpleIREvaluator cg(l.root_stmt(), {B, C}); cg.call({b_data, c_data}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_ref(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector c_ref(200, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 20; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 10; ++j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_ref[i * 10 + j] = (i + 30) * (j + 40) + (i + 31) * (j + 41); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c_ref[i * 10 + j] = (i + 10) * (j + 20) + (i + 30) * (j + 40); } } @@ -4093,22 +3798,17 @@ TEST(LoopNest, DeadStoreElimination) { KernelScope kernel_scope; VarHandle y("y", kInt); VarHandle x("x_tail", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle f("f", {26, 5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle g("g", {26, 5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle x_outer_end = 5; ExprHandle x_2 = x + x_outer_end * 4; For* stmt1 = For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, Block::make({ Store::make(f, {x_2, y}, (x_2 + y)), @@ -4141,28 +3841,20 @@ TEST(LoopNest, DeadStoreEliminationWithIntermediates) { VarHandle x("x", kInt); VarHandle y("y", kInt); VarHandle z("z", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle f("f", {26 * 5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle g("g", {26 * 5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle h("h", {26, 5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle x_outer_end = 5; ExprHandle x_2 = x + x_outer_end * 4; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For* stmt1 = For::make(x, 0, 26 * 5, Store::make(f, {x}, x)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For* stmt2 = For::make(z, 0, 26 * 5, Store::make(g, {z}, z + 1)); For* stmt3 = For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, Block::make({ Store::make(h, {x, y}, Load::make(f, {x * y})), @@ -4195,22 +3887,17 @@ TEST(LoopNest, DeadStoreEliminationWithIntermediates) { TEST(LoopNest, CompoundTensorSimple) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {10, 5}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); auto for_body1 = Block::make({Store::make(a_buf, {i, j}, i * j)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for1 = For::make(j, 0, 5, for_body1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outer_for1 = For::make(i, 0, 10, inner_for1); auto for_body2 = Block::make( {Store::make(a_buf, {x, y}, Load::make(a_buf, {x, y}) + x + y)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for2 = For::make(y, 0, 5, for_body2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outer_for2 = For::make(x, 0, 10, inner_for2); Block* body = Block::make({outer_for1, outer_for2}); @@ -4219,20 +3906,15 @@ TEST(LoopNest, CompoundTensorSimple) { LoopNest l({A}); l.prepareForCodegen(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_data(50, 0); Stmt* s = IRSimplifier::simplify(l.root_stmt()); SimpleIREvaluator cg(s, {A}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_ref(50, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 10; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 5; ++j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a_ref[i * 5 + j] = (i * j) + i + j; } } @@ -4268,30 +3950,23 @@ TEST(LoopNest, InlineConstantIndex) { TEST(LoopNest, CompoundTensorUsed) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {10, 5}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); auto for_body1 = Block::make({Store::make(a_buf, {i, j}, i * j)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for1 = For::make(j, 0, 5, for_body1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outer_for1 = For::make(i, 0, 10, inner_for1); auto for_body2 = Block::make( {Store::make(a_buf, {x, y}, Load::make(a_buf, {x, y}) + x + y)}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_for2 = For::make(y, 0, 5, for_body2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto outer_for2 = For::make(x, 0, 10, inner_for2); Block* body = Block::make({outer_for1, outer_for2}); Tensor* A = new Tensor(a_buf.node(), body); Tensor* B = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "B", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{10, "i"}, {3, "j"}}, [&](const VarHandle& i, const VarHandle& j) { return A->load(i, j + 1) + A->load(i, j + 2); @@ -4301,19 +3976,15 @@ TEST(LoopNest, CompoundTensorUsed) { ASSERT_FALSE(l.computeInline(A->buf())); l.prepareForCodegen(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_data(50, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_data(50, 0); Stmt* s = IRSimplifier::simplify(l.root_stmt()); SimpleIREvaluator cg(s, {B}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_ref(50, 0); auto AT = [](int i, int j) { return i * j + i + j; }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 10; ++i) { for (int j = 0; j < 3; ++j) { b_ref[i * 3 + j] = AT(i, j + 1) + AT(i, j + 2); @@ -4421,11 +4092,9 @@ static void checkColReduce(Stmt* s, Placeholder& p, Tensor* t) { b(i) = 0.0f; } for (int i = 0; i < N; i++) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ref(i) = 76.0f; } SimpleIREvaluator(s, {p, t}).call({a, b}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(b, ref, 1e-5); } @@ -4512,18 +4181,13 @@ TEST(LoopNest, ReorderAxisWithMultipleConds) { // } // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, Store::make(a_buf, {i}, Mul::make(i, j))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto inner_cond = Cond::make(CompareSelect::make(i, 10, kLT), forJ, nullptr); auto outer_cond = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Cond::make(CompareSelect::make(i, 5, kGT), inner_cond, nullptr); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, outer_cond); Stmt* par = Block::make({forI}); LoopNest l(par, {a_buf.node()}); @@ -4553,10 +4217,8 @@ TEST(LoopNest, VectorizeUse) { Tensor* b = Compute( "b", {{N, "n"}}, [&](const VarHandle& n) { return a.load(n) + 1.0f; }); Tensor* c = Compute( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) "c", {{N, "n"}}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) [&](const VarHandle& n) { return b->load(n) + 2.0f; }); LoopNest nest({c}, {b, c}); auto loops = nest.getAllLoopNestsWritingToBuf(b->buf())[0]; @@ -4631,9 +4293,7 @@ TEST(LoopNest, DistributeLoopWithAllStmtsAsPivots) { // B[i] = B[i] + i * k; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); @@ -4642,7 +4302,6 @@ TEST(LoopNest, DistributeLoopWithAllStmtsAsPivots) { auto forJ = For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Store::make( a_buf, {i}, Add::make(Load::make(a_buf, {i}), Mul::make(i, j)))); @@ -4650,11 +4309,9 @@ TEST(LoopNest, DistributeLoopWithAllStmtsAsPivots) { auto forK = For::make( k, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 50, Store::make( b_buf, {i}, Add::make(Load::make(b_buf, {i}), Mul::make(i, k)))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, Block::make({initA, forJ, initB, forK})); auto par = Block::make({forI}); @@ -4699,9 +4356,7 @@ TEST(LoopNest, DistributeLoopWithOneStmtAsPivot) { // B[i] = B[i] + i * k; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); @@ -4710,7 +4365,6 @@ TEST(LoopNest, DistributeLoopWithOneStmtAsPivot) { auto forJ = For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Store::make( a_buf, {i}, Add::make(Load::make(a_buf, {i}), Mul::make(i, j)))); @@ -4718,11 +4372,9 @@ TEST(LoopNest, DistributeLoopWithOneStmtAsPivot) { auto forK = For::make( k, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 50, Store::make( b_buf, {i}, Add::make(Load::make(b_buf, {i}), Mul::make(i, k)))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, Block::make({initA, forJ, initB, forK})); auto par = Block::make({forI}); @@ -4764,9 +4416,7 @@ TEST(LoopNest, DistributeLoopWithoutAnyPivot) { // B[i] = B[i] + i * k; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); @@ -4775,7 +4425,6 @@ TEST(LoopNest, DistributeLoopWithoutAnyPivot) { auto forJ = For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Store::make( a_buf, {i}, Add::make(Load::make(a_buf, {i}), Mul::make(i, j)))); @@ -4783,11 +4432,9 @@ TEST(LoopNest, DistributeLoopWithoutAnyPivot) { auto forK = For::make( k, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 50, Store::make( b_buf, {i}, Add::make(Load::make(b_buf, {i}), Mul::make(i, k)))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, Block::make({initA, forJ, initB, forK})); auto par = Block::make({forI}); @@ -4832,9 +4479,7 @@ TEST(LoopNest, DistributeLoopOverInnerLoops) { // B[i] = B[i] + i * k; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); @@ -4843,7 +4488,6 @@ TEST(LoopNest, DistributeLoopOverInnerLoops) { auto forJ = For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Store::make( a_buf, {i}, Add::make(Load::make(a_buf, {i}), Mul::make(i, j)))); @@ -4851,11 +4495,9 @@ TEST(LoopNest, DistributeLoopOverInnerLoops) { auto forK = For::make( k, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 50, Store::make( b_buf, {i}, Add::make(Load::make(b_buf, {i}), Mul::make(i, k)))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, Block::make({initA, forJ, initB, forK})); auto par = Block::make({forI}); @@ -4893,15 +4535,11 @@ TEST(LoopNest, fuseLoopsSimple) { // for (int k = 0; k < 100; k++) { // B[k] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 0, 100, Store::make(b_buf, {k}, Mul::make(20, k))); auto par = Block::make({forJ, forK}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -4937,19 +4575,14 @@ TEST(LoopNest, fuseLoopsMultiple) { // for (int k = 0; k < 100; k++) { // B[k] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {200}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); auto forI = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(i, 0, 100, Store::make(a_buf, {i + 100}, Add::make(20, i))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 0, 100, Store::make(b_buf, {k}, Mul::make(20, k))); auto par = Block::make({forI, forJ, forK}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -4989,9 +4622,7 @@ TEST(LoopNest, fuseLoopsNested) { // B[n] = B[n] + n * k; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20, 100}, kInt); VarHandle m("m", kInt); VarHandle n("n", kInt); @@ -5001,7 +4632,6 @@ TEST(LoopNest, fuseLoopsNested) { auto forJ = For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Store::make( a_buf, {m}, Add::make(Load::make(a_buf, {m}), Mul::make(m, j)))); @@ -5009,13 +4639,10 @@ TEST(LoopNest, fuseLoopsNested) { auto forK = For::make( k, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 50, Store::make( b_buf, {n}, Add::make(Load::make(b_buf, {n}), Mul::make(n, k)))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forM = For::make(m, 0, 20, Block::make({initA, forJ})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forN = For::make(n, 0, 20, Block::make({initB, forK})); auto par = Block::make({forM, forN}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -5056,9 +4683,7 @@ TEST(LoopNest, fuseLoopsNested2D) { // B[m,n] = m + n * 100; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20, 100}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); @@ -5067,26 +4692,20 @@ TEST(LoopNest, fuseLoopsNested2D) { auto forI = For::make( i, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 20, For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500)))); auto forM = For::make( m, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 20, For::make( n, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 50, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(b_buf, {m, n}, Add::make(m, Mul::make(n, 100))))); auto par = Block::make({forI, forM}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -5123,30 +4742,21 @@ TEST(LoopNest, fuseLoopsNested2DInner) { // B[i,n] = m + n * 100; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20, 100}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle n("n", kInt); auto forJ = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500))); auto forN = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) n, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(b_buf, {i, n}, Add::make(i, Mul::make(n, 100)))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, Block::make({forJ, forN})); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) For* fused_loop; @@ -5179,15 +4789,11 @@ TEST(LoopNest, fuseLoopsDifferentStopBounds) { // for (int k = 0; k < 50; k++) { // B[k] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 0, 50, Store::make(b_buf, {j}, Mul::make(20, k))); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forJ, forK}); @@ -5207,15 +4813,11 @@ TEST(LoopNest, fuseLoopsDifferentStartBounds) { // for (int k = 50; k < 100; k++) { // B[k] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 50, 100, Store::make(b_buf, {j}, Mul::make(20, k))); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forJ, forK}); @@ -5236,16 +4838,12 @@ TEST(LoopNest, fuseLoopsNotContiguous) { // for (int k = 50; k < 100; k++) { // B[k] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j))); auto initB = Store::make(b_buf, {0}, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 50, 100, Store::make(b_buf, {j}, Mul::make(20, k))); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forJ, initB, forK}); @@ -5268,19 +4866,14 @@ TEST(LoopNest, fuseLoopsWithDifferentParents) { // for (int k = 50; k < 100; k++) { // B[k] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {50, 100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, Store::make(a_buf, {i, j}, Mul::make(i, j))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 50, forJ); auto initB = Store::make(b_buf, {0}, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 50, 100, Store::make(b_buf, {j}, Mul::make(20, k))); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forI, initB, forK}); @@ -5300,14 +4893,11 @@ TEST(LoopNest, fuseLoopsWithVariableBounds) { // for (int k = 0; k < N; k++) { // B[k] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); VarHandle N("N", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, N, Store::make(a_buf, {j}, Mul::make(10, j))); // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks,cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 0, N, Store::make(b_buf, {j}, Mul::make(20, k))); @@ -5342,17 +4932,13 @@ TEST(LoopNest, fuseLoopsWithExprBounds) { // for (int k = 0; k < M + N; k++) { // B[k] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); VarHandle M("M", kInt); VarHandle N("N", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, M + N, Store::make(a_buf, {j}, Mul::make(10, j))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 0, M + N, Store::make(b_buf, {j}, Mul::make(20, k))); auto par = Block::make({forJ, forK}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -5385,15 +4971,12 @@ TEST(LoopNest, fuseLoopsWithDifferentExprBounds) { // for (int k = M; k < N + N; k++) { // B[k] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); VarHandle M("M", kInt); VarHandle N("N", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, M, N * 2, Store::make(a_buf, {j}, Mul::make(10, j))); // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks,cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, M, N + N, Store::make(b_buf, {j}, Mul::make(20, k))); @@ -5428,14 +5011,11 @@ TEST(LoopNest, fuseLoopsWithNonOverlappingBufferAccesses) { // for (int k = 10; k < 100; k++) { // A[k+100] = 30 * k // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {200}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 10, 100, Store::make(a_buf, {j}, Mul::make(10, j))); auto forK = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(k, 10, 100, Store::make(a_buf, {k + 100}, Mul::make(30, k))); auto par = Block::make({forJ, forK}); @@ -5473,26 +5053,18 @@ TEST(LoopNest, fuseLoopsWithNonOverlapping2DBufferAccesses) { // A[m+20,n+100] = m + n * 100; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20, 50}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle m("m", kInt); VarHandle n("n", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto storeA1 = Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, storeA1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, forJ); auto storeA2 = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a_buf, {m + 20, n + 100}, Add::make(m, Mul::make(n, 100))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forN = For::make(n, 0, 50, storeA2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forM = For::make(m, 0, 20, forN); auto par = Block::make({forI, forM}); @@ -5531,11 +5103,8 @@ TEST(LoopNest, fuseLoopsWithReductions) { // for (int m = 0; m < 20; m++) { // C[m] = A[m]; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20, 100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c_buf("C", {20}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); @@ -5543,12 +5112,9 @@ TEST(LoopNest, fuseLoopsWithReductions) { auto initA = Store::make(a_buf, {i}, 0); auto sumA = Store::make( a_buf, {i}, Add::make(Load::make(a_buf, {i}), Load::make(b_buf, {i, j}))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, sumA); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, Block::make({initA, forJ})); auto forM = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(m, 0, 20, Store::make(c_buf, {m}, Load::make(a_buf, {m}))); auto par = Block::make({forI, forM}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -5583,14 +5149,11 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies1) { // for (int k = 10; k < 100; k++) { // A[k-1] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {100}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 10, 100, Store::make(a_buf, {j}, Mul::make(10, j))); auto forK = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(k, 10, 100, Store::make(a_buf, {k - 1}, Mul::make(20, k))); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forJ, forK}); @@ -5610,14 +5173,11 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies2) { // for (int k = 10; k < 100; k++) { // A[k+50] = 20 * k; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {150}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 10, 100, Store::make(a_buf, {j}, Mul::make(10, j))); auto forK = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(k, 10, 100, Store::make(a_buf, {k + 50}, Mul::make(20, k))); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forJ, forK}); @@ -5643,9 +5203,7 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies3) { // B[n] = B[n] + n * k; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {25, 100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {20, 50}, kInt); VarHandle m("m", kInt); VarHandle n("n", kInt); @@ -5655,7 +5213,6 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies3) { auto forJ = For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Store::make( a_buf, {m}, Add::make(Load::make(a_buf, {m}), Mul::make(m, j)))); @@ -5663,13 +5220,10 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies3) { auto forK = For::make( k, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 50, Store::make( b_buf, {n}, Add::make(Load::make(b_buf, {n}), Mul::make(n, k)))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forM = For::make(m, 0, 20, Block::make({initA, forJ})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forN = For::make(n, 0, 20, Block::make({initB, forK})); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forM, forN}); @@ -5693,7 +5247,6 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies4) { // A[m+1,n] = m + n * 100; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {30, 100}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); @@ -5702,26 +5255,20 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies4) { auto forI = For::make( i, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 20, For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500)))); auto forM = For::make( m, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 20, For::make( n, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 50, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a_buf, {m + 1, n}, Add::make(m, Mul::make(n, 100))))); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forI, forM}); @@ -5743,25 +5290,19 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies5) { // A[i,n+1] = m + n * 100; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 200}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle n("n", kInt); auto forJ = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a_buf, {i, j}, Mul::make(Mul::make(i, j), 500))); auto forN = For::make( n, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a_buf, {i, n + 1}, Add::make(i, Mul::make(n, 100)))); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores,cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, Block::make({forJ, forN})); @@ -5781,24 +5322,18 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies6) { // for (int k = 0; k < 100; k++) { // B[k] = 20 * A[99-k]; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j))); auto forK = For::make( k, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Store::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_buf, {k}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k})))); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forJ, forK}); @@ -5818,24 +5353,18 @@ TEST(LoopNest, fuseLoopsThatViolateDependencies7) { // for (int j = 0; j < 100; j++) { // A[j] = 10 * j; // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b_buf("B", {100}, kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); auto forK = For::make( k, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, Store::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) b_buf, {k}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Mul::make(20, Load::make(a_buf, {ExprHandle(99) - k})))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 100, Store::make(a_buf, {j}, Mul::make(10, j))); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forK, forJ}); @@ -5856,17 +5385,13 @@ TEST(LoopNest, areLoopsPerfectlyNested) { // } // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 30, 40}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); auto store = Store::make(a_buf, {i, j, k}, Mul::make(Mul::make(i, j), k)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 0, 40, store); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 30, forK); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, forJ); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forI}); @@ -5903,14 +5428,11 @@ TEST(LoopNest, reorderNestedLoops2D) { // A[i,j] = i * j; // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 30, 40}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); auto store = Store::make(a_buf, {i, j}, Mul::make(i, j)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 30, store); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, forJ); auto par = Block::make({forI}); @@ -5935,17 +5457,13 @@ TEST(LoopNest, reorderNestedLoops3D) { // } // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 30, 40}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); auto store = Store::make(a_buf, {i, j, k}, Mul::make(Mul::make(i, j), k)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 0, 40, store); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 30, forK); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, forJ); auto par = Block::make({forI}); @@ -5973,7 +5491,6 @@ TEST(LoopNest, reorderNestedLoops4D) { // } // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 30, 40, 50}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); @@ -5982,15 +5499,10 @@ TEST(LoopNest, reorderNestedLoops4D) { auto store = Store::make( a_buf, {i, j, k, l}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Mul::make(Mul::make(Mul::make(Mul::make(i, j), k), l), 500)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forL = For::make(l, 0, 50, store); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 0, 40, forL); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 30, forK); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, forJ); auto par = Block::make({forI}); @@ -6017,17 +5529,13 @@ TEST(LoopNest, reorderTrivialPermutation) { // } // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 30, 40}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); auto store = Store::make(a_buf, {i, j, k}, Mul::make(Mul::make(i, j), k)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 0, 40, store); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 30, forK); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, forJ); auto par = Block::make({forI}); @@ -6053,17 +5561,13 @@ TEST(LoopNest, reorderInvalidPermutations) { // } // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 30, 40}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); auto store = Store::make(a_buf, {i, j, k}, Mul::make(Mul::make(i, j), k)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 0, 40, store); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 30, forK); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, forJ); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forI}); @@ -6098,17 +5602,13 @@ TEST(LoopNest, reorderInvalidLoopNest) { // } // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a_buf("A", {20, 30, 40}, kInt); VarHandle i("i", kInt); VarHandle j("j", kInt); VarHandle k("k", kInt); auto store = Store::make(a_buf, {i, j, k}, Mul::make(Mul::make(i, j), k)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK = For::make(k, 0, 40, store); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 30, forK); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 20, forJ); // NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores) auto par = Block::make({forI}); @@ -6146,26 +5646,21 @@ TEST(LoopNest, compressBufferSimple) { // B[i,j] = A[i,j] + A[i, j+1] // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* A = new Buf("A", {new IntImm(100), new IntImm(200)}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* B = new Buf("B", {new IntImm(100), new IntImm(200)}, kInt); BufHandle a_buf(A); BufHandle b_buf(B); VarHandle i("i", kInt); VarHandle j("j", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ1 = For::make(j, 0, 200, Store::make(a_buf, {i, j}, sin(i * j))); auto forJ2 = For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 199, Store::make( b_buf, {i, j}, Add::make(Load::make(a_buf, {i, j}), Load::make(a_buf, {i, j + 1})))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 100, Block::make({forJ1, forJ2})); auto par = Block::make({forI}); LoopNest::compressBuffer(A, par); @@ -6198,9 +5693,7 @@ TEST(LoopNest, compressBufferMultipleDims) { // B[i,j] = A[i,j] + A[i,j] // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* A = new Buf("A", {new IntImm(100), new IntImm(200)}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* B = new Buf("B", {new IntImm(100), new IntImm(200)}, kInt); BufHandle a_buf(A); BufHandle b_buf(B); @@ -6211,9 +5704,7 @@ TEST(LoopNest, compressBufferMultipleDims) { b_buf, {i, j}, Add::make(Load::make(a_buf, {i, j}), Load::make(a_buf, {i, j}))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 200, Block::make({store1, store2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 100, forJ); auto par = Block::make({forI}); LoopNest::compressBuffer(A, par); @@ -6250,10 +5741,8 @@ TEST(LoopNest, compressBufferMultipleDims2) { // } // } Buf* A = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) new Buf("A", {new IntImm(100), new IntImm(200), new IntImm(300)}, kInt); Buf* B = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) new Buf("B", {new IntImm(100), new IntImm(200), new IntImm(300)}, kInt); BufHandle a_buf(A); BufHandle b_buf(B); @@ -6261,18 +5750,14 @@ TEST(LoopNest, compressBufferMultipleDims2) { VarHandle j("j", kInt); VarHandle k("k", kInt); auto store1 = Store::make(a_buf, {i, j, k}, sin(i * j * k)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK1 = For::make(k, 0, 300, store1); auto store2 = Store::make( b_buf, {i, j, k}, Add::make( Load::make(a_buf, {i, j, k}), Load::make(a_buf, {i, j, k + 1}))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forK2 = For::make(k, 0, 299, store2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ = For::make(j, 0, 200, Block::make({forK1, forK2})); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 100, forJ); auto par = Block::make({forI}); LoopNest::compressBuffer(A, par); @@ -6309,26 +5794,21 @@ TEST(LoopNest, compressBufferDifferentOrderIndices) { // B[i, j] = A[j, i] + A[j+1, 0] // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* A = new Buf("A", {new IntImm(100), new IntImm(200)}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* B = new Buf("B", {new IntImm(100), new IntImm(200)}, kInt); BufHandle a_buf(A); BufHandle b_buf(B); VarHandle i("i", kInt); VarHandle j("j", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ1 = For::make(j, 0, 200, Store::make(a_buf, {j, i}, sin(i * j))); auto forJ2 = For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 99, Store::make( b_buf, {i, j}, Add::make(Load::make(a_buf, {j, i}), Load::make(a_buf, {j + 1, i})))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 100, Block::make({forJ1, forJ2})); auto par = Block::make({forI}); LoopNest::compressBuffer(A, par); @@ -6363,9 +5843,7 @@ TEST(LoopNest, compressBufferVariableBounds) { // B[i,j] = A[i,j] + A[i, j+1] // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* A = new Buf("A", {new IntImm(100), new IntImm(200)}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* B = new Buf("B", {new IntImm(100), new IntImm(200)}, kInt); BufHandle a_buf(A); BufHandle b_buf(B); @@ -6419,28 +5897,22 @@ TEST(LoopNest, compressBufferNoCommonParentLoops) { // B[i,j] = A[i,j] + A[i, j+1] // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* A = new Buf("A", {new IntImm(100), new IntImm(200)}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* B = new Buf("B", {new IntImm(100), new IntImm(200)}, kInt); BufHandle a_buf(A); BufHandle b_buf(B); VarHandle i("i", kInt); VarHandle j("j", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ1 = For::make(j, 0, 200, Store::make(a_buf, {i, j}, sin(i * j))); auto forJ2 = For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 199, Store::make( b_buf, {i, j}, Add::make(Load::make(a_buf, {i, j}), Load::make(a_buf, {i, j + 1})))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI1 = For::make(i, 0, 100, forJ1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI2 = For::make(i, 0, 100, forJ2); auto par = Block::make({forI1, forI2}); LoopNest::compressBuffer(A, par); @@ -6477,20 +5949,16 @@ TEST(LoopNest, compressBufferIndicesMixed) { // B[i,j] = A[i + j, j] + A[i + j, j+1] // } // } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* A = new Buf("A", {new IntImm(300), new IntImm(200)}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Buf* B = new Buf("B", {new IntImm(100), new IntImm(200)}, kInt); BufHandle a_buf(A); BufHandle b_buf(B); VarHandle i("i", kInt); VarHandle j("j", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forJ1 = For::make(j, 0, 200, Store::make(a_buf, {i + j, j}, sin(i * j))); auto forJ2 = For::make( j, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 199, Store::make( b_buf, @@ -6498,7 +5966,6 @@ TEST(LoopNest, compressBufferIndicesMixed) { Add::make( Load::make(a_buf, {i + j, j}), Load::make(a_buf, {i + j, j + 1})))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto forI = For::make(i, 0, 100, Block::make({forJ1, forJ2})); auto par = Block::make({forI}); LoopNest::compressBuffer(A, par); diff --git a/test/cpp/tensorexpr/test_memdependency.cpp b/test/cpp/tensorexpr/test_memdependency.cpp index 34198f3bd16a3..24e11e6d9ba40 100644 --- a/test/cpp/tensorexpr/test_memdependency.cpp +++ b/test/cpp/tensorexpr/test_memdependency.cpp @@ -481,7 +481,6 @@ TEST(MemDependency, MemDependencyCheckerOverlap) { */ Store* aStore = Store::make(a, {0}, 3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store* a2Store = Store::make(a, {0}, 6); Store* bStore = Store::make(b, {0}, Add::make(Load::make(a, {0}), 1)); @@ -520,7 +519,6 @@ TEST(MemDependency, MemDependencyCheckerLoop) { */ Store* aStore = Store::make(a, {x}, x); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Stmt* loop = For::make(x, 0, 10, aStore); Store* bStore = Store::make(b, {0}, Add::make(Load::make(a, {4}), 1)); @@ -548,9 +546,7 @@ TEST(MemDependency, MemDependencyCheckerLoop) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerLoopReduce) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); @@ -570,7 +566,6 @@ TEST(MemDependency, MemDependencyCheckerLoopReduce) { ExprHandle reduce = ExprHandle(Sum()(a.node(), ExprHandle(1), {x.node()}, {x.node()})); Store* aReduce = Store::make(a, {0}, reduce); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Stmt* loop = For::make(x, 0, 10, aReduce); Store* bStore = Store::make(b, {0}, Load::make(a, {0})); @@ -611,9 +606,7 @@ TEST(MemDependency, MemDependencyCheckerLoopReduce) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerLoopReduceExpanded) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); @@ -632,7 +625,6 @@ TEST(MemDependency, MemDependencyCheckerLoopReduceExpanded) { Store* aInit = Store::make(a, {0}, 0); ExprHandle aLoad = Load::make(a, {x}); Store* aReduce = Store::make(a, {0}, Add::make(aLoad, 1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Stmt* loop = For::make(x, 0, 10, aReduce); Store* bStore = Store::make(b, {0}, Load::make(a, {0})); @@ -669,9 +661,7 @@ TEST(MemDependency, MemDependencyCheckerLoopReduceExpanded) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerInputsOutputs) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); @@ -687,7 +677,6 @@ TEST(MemDependency, MemDependencyCheckerInputsOutputs) { ExprHandle aLoad = Load::make(a, {x}); Store* bStore = Store::make(b, {x}, Max::make(aLoad, 0, true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Stmt* loop = For::make(x, 0, 10, bStore); Stmt* stmt = Block::make({loop}); @@ -726,9 +715,7 @@ TEST(MemDependency, MemDependencyCheckerInputsOutputs) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerOutputDoesntDepend) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); @@ -743,7 +730,6 @@ TEST(MemDependency, MemDependencyCheckerOutputDoesntDepend) { */ Store* bStore = Store::make(b, {x}, Max::make(x, 0, true)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Stmt* loop = For::make(x, 0, 10, bStore); Stmt* stmt = Block::make({loop}); @@ -769,11 +755,8 @@ TEST(MemDependency, MemDependencyCheckerOutputDoesntDepend) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerLoopBounds) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {10}, kInt); VarHandle x("x", kInt); using namespace analysis; @@ -800,17 +783,13 @@ TEST(MemDependency, MemDependencyCheckerLoopBounds) { */ std::vector stmts( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 1, 10, Store::make(b, {x}, Load::make(a, {x}))), For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 1, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9, Store::make(b, {x}, Mul::make(Load::make(b, {x}), 2))), For::make(x, 3, 4, Store::make(c, {x}, Load::make(a, {x}))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(c, {x}, Load::make(b, {x})))}); Stmt* stmt = Block::make(stmts); @@ -960,9 +939,7 @@ TEST(MemDependency, MemDependencyCheckerLoopBounds) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerLoopBoundsIndexShift) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); @@ -993,31 +970,21 @@ TEST(MemDependency, MemDependencyCheckerLoopBoundsIndexShift) { */ Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 1, 10, Store::make(a, {x}, Load::make(a, {x - 1}))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 9, Store::make(a, {x}, Load::make(a, {x + 1}))), For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 9, Store::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {ExprHandle(9) - x}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Load::make(a, {ExprHandle(8) - x}))), For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a, {x}, Load::make(a, {ExprHandle(9) - x}))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(b, {x}, Load::make(a, {x})))}); stmt->accept(&analyzer); @@ -1159,9 +1126,7 @@ TEST(MemDependency, MemDependencyCheckerLoopBoundsIndexShift) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -1187,7 +1152,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { Stmt* stmt = For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make(a, {y}, Add::make(Load::make(a, {y}), 1))})); @@ -1208,7 +1172,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { Stmt* stmt = For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {y + 1}, Add::make(Load::make(a, {y + 1}), 1))})); @@ -1230,7 +1193,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { Stmt* stmt = For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make(a, {0}, Add::make(Load::make(a, {0}), x))})); stmt->accept(&analyzer); @@ -1251,7 +1213,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { Stmt* stmt = For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make(a, {0}, Add::make(Load::make(b, {0}), x))})); stmt->accept(&analyzer); @@ -1271,7 +1232,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { Stmt* stmt = For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make(a, {y}, Add::make(Load::make(a, {y}), x))})); stmt->accept(&analyzer); @@ -1290,7 +1250,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(a, {x}, Load::make(a, {x + 1}))); stmt->accept(&analyzer); @@ -1309,7 +1268,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { analyzer.allowLoopExecutionOrderAnalysis(); Stmt* stmt = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(a, {x}, Load::make(a, {x + 1}))); stmt->accept(&analyzer); @@ -1327,7 +1285,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 1, 10, Store::make(a, {x}, Load::make(a, {x - 1}))); stmt->accept(&analyzer); @@ -1344,7 +1301,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { analyzer.allowLoopExecutionOrderAnalysis(); Stmt* stmt = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 1, 10, Store::make(a, {x}, Load::make(a, {x - 1}))); stmt->accept(&analyzer); @@ -1368,14 +1324,10 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { Stmt* stmt = For::make( x, 3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {ExprHandle(9) - x}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Load::make(a, {ExprHandle(8) - x}))); stmt->accept(&analyzer); @@ -1398,14 +1350,10 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { Stmt* stmt = For::make( x, 3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {ExprHandle(8) - x}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Load::make(a, {ExprHandle(9) - x}))); stmt->accept(&analyzer); @@ -1425,14 +1373,10 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { Stmt* stmt = For::make( x, 3, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {ExprHandle(9) - x}, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Load::make(a, {ExprHandle(8) - x}))); stmt->accept(&analyzer); @@ -1451,7 +1395,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { analyzer.allowLoopExecutionOrderAnalysis(); Stmt* stmt = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 3, 10, Store::make(a, {x - 2}, Load::make(a, {x - 1}))); stmt->accept(&analyzer); @@ -1473,7 +1416,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { // distinct. Stmt* stmt = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(a, {x * 2}, Load::make(a, {x * 2}))); stmt->accept(&analyzer); @@ -1496,10 +1438,8 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { // distinct. Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {x * 2}, Load::make(a, {x * 2 + 1}))); stmt->accept(&analyzer); @@ -1517,10 +1457,8 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 1, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {x * 2}, Load::make(a, {x * 2 - 1}))); stmt->accept(&analyzer); @@ -1538,10 +1476,8 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {x * 2}, Load::make(a, {x * 2 + 2}))); stmt->accept(&analyzer); @@ -1559,10 +1495,8 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 1, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {x * 2}, Load::make(a, {x * 2 - 2}))); stmt->accept(&analyzer); @@ -1580,12 +1514,9 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { // of stride. MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a, {x * 2}, Load::make(a, {x * 2 + 7}))); stmt->accept(&analyzer); @@ -1601,10 +1532,8 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { // Works with offsets which are multiples of the stride. MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {x * 2}, Load::make(a, {x * 2 + 4}))); stmt->accept(&analyzer); @@ -1623,12 +1552,9 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a, {x * 6}, Load::make(a, {x * 6 + 5}))); stmt->accept(&analyzer); @@ -1646,7 +1572,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(a, {x * 2}, Load::make(a, {x * 6}))); stmt->accept(&analyzer); @@ -1663,7 +1588,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(a, {x * 4}, Load::make(a, {x * 2}))); stmt->accept(&analyzer); @@ -1681,12 +1605,9 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a, {x * 2}, Load::make(a, {x * 6 + 1}))); stmt->accept(&analyzer); @@ -1703,12 +1624,9 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a, {x * 2}, Load::make(a, {x * 6 + 4}))); stmt->accept(&analyzer); @@ -1725,12 +1643,9 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a, {x * 2 + 3}, Load::make(a, {x * 6}))); stmt->accept(&analyzer); @@ -1746,10 +1661,8 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { // If they have strides with no common muliple > 1, they overlap. MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {x * 2}, Load::make(a, {x * 3 + 1}))); stmt->accept(&analyzer); @@ -1767,7 +1680,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(a, {x}, Load::make(a, {x + 10}))); stmt->accept(&analyzer); @@ -1783,12 +1695,9 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { // If they have different execution orders they may overlap. MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a, {x}, Load::make(a, {ExprHandle(9) - x}))); stmt->accept(&analyzer); @@ -1806,9 +1715,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { Stmt* stmt = For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a, {x * 2}, Load::make(a, {ExprHandle(19) - x * 2}))); stmt->accept(&analyzer); @@ -1825,7 +1732,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { MemDependencyChecker analyzer; Stmt* stmt = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(a, {x / 2}, Load::make(a, {x / 2}))); stmt->accept(&analyzer); @@ -1841,10 +1747,8 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { // If the stride is not monotonic, they overlap - even with an offset. MemDependencyChecker analyzer; Stmt* stmt = For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {x / 2}, Load::make(a, {x / 2 + 1}))); stmt->accept(&analyzer); @@ -1864,7 +1768,6 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { Stmt* stmt = For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {Mod::make(x, 2)}, Load::make(a, {Mod::make(x, 2)}))); stmt->accept(&analyzer); @@ -1907,9 +1810,7 @@ TEST(MemDependency, MemDependencyCheckerLoopSelfDependency) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerLoopDistinctStrides) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {20}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {20}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -1918,13 +1819,10 @@ TEST(MemDependency, MemDependencyCheckerLoopDistinctStrides) { MemDependencyChecker analyzer({a.node()}, {b.node()}); Stmt* stmt = Block::make( {For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(b, {x * 2 + 1}, Load::make(a, {x * 2 + 1}))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(b, {x * 2}, Load::make(a, {x * 2}))) }); @@ -1973,11 +1871,8 @@ TEST(MemDependency, MemDependencyCheckerLoopDistinctStrides) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerLoopBoundsCond) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {10}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -1999,10 +1894,8 @@ TEST(MemDependency, MemDependencyCheckerLoopBoundsCond) { MemDependencyChecker analyzer({a, b}, {c}); Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 0, 10, Store::make(c, {x}, Load::make(a, {x}))), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(y, 5, CompareSelectOperation::kLT), Store::make(c, {0}, Add::make(Load::make(b, {0}), 1)), Store::make(c, {0}, Add::make(Load::make(b, {1}), 1)))}); @@ -2038,17 +1931,13 @@ TEST(MemDependency, MemDependencyCheckerLoopBoundsCond) { MemDependencyChecker analyzer({a, b}, {c}); Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 0, 10, Store::make(c, {x}, Load::make(a, {x}))), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(y, 5, CompareSelectOperation::kLT), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(c, {x}, Load::make(b, {x}))), For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(c, {x}, Add::make(Load::make(b, {x}), 1))))}); @@ -2083,15 +1972,12 @@ TEST(MemDependency, MemDependencyCheckerLoopBoundsCond) { MemDependencyChecker analyzer({a, b}, {c}); Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 0, 10, Store::make(c, {x}, Load::make(a, {x}))), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(y, 5, CompareSelectOperation::kLT), For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(c, {x}, Add::make(Load::make(b, {x}), 1))), nullptr)}); @@ -2124,16 +2010,13 @@ TEST(MemDependency, MemDependencyCheckerLoopBoundsCond) { MemDependencyChecker analyzer({a, b}, {c}); Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 0, 10, Store::make(c, {x}, Load::make(a, {x}))), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(y, 5, CompareSelectOperation::kLT), nullptr, For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(c, {x}, Add::make(Load::make(b, {x}), 1))))}); @@ -2164,16 +2047,12 @@ TEST(MemDependency, MemDependencyCheckerLoopBoundsCond) { Store* initStore = Store::make(c, {x}, Load::make(a, {x})); ExprHandle conditionalLoad = Load::make(c, {0}); Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 0, 10, initStore), Cond::make( CompareSelect::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) conditionalLoad, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, CompareSelectOperation::kLT), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(c, {0}, 5), nullptr)}); @@ -2191,11 +2070,8 @@ TEST(MemDependency, MemDependencyCheckerLoopBoundsCond) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerIfThenElse) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {10}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -2216,12 +2092,10 @@ TEST(MemDependency, MemDependencyCheckerIfThenElse) { c, {0}, IfThenElse::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(y, 5, CompareSelectOperation::kLT), Add::make(Load::make(b, {0}), 1), Add::make(Load::make(b, {1}), 1))); Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 0, 10, Store::make(c, {x}, Load::make(a, {x}))), ifStore}); @@ -2258,13 +2132,10 @@ TEST(MemDependency, MemDependencyCheckerIfThenElse) { c, {0}, IfThenElse::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(y, 5, CompareSelectOperation::kLT), Add::make(Load::make(b, {0}), 1), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 42)); Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 0, 10, Store::make(c, {x}, Load::make(a, {x}))), ifStore}); @@ -2293,11 +2164,9 @@ TEST(MemDependency, MemDependencyCheckerIfThenElse) { c, {0}, IfThenElse::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(y, 5, CompareSelectOperation::kLT), Load::make(b, {x}), Load::make(a, {x}))); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Stmt* stmt = Block::make({For::make(x, 0, 10, ifStore)}); stmt->accept(&analyzer); @@ -2312,9 +2181,7 @@ TEST(MemDependency, MemDependencyCheckerIfThenElse) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerCutLoop) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); @@ -2331,9 +2198,7 @@ TEST(MemDependency, MemDependencyCheckerCutLoop) { MemDependencyChecker analyzer({a}, {b}); Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 0, 10, Store::make(b, {x}, Load::make(a, {x}))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(b, {5}, 100)}); stmt->accept(&analyzer); @@ -2364,20 +2229,15 @@ TEST(MemDependency, MemDependencyCheckerCutLoop) { MemDependencyChecker analyzer({a}, {b}); For* firstLoop = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(b, {x}, Load::make(a, {x}))); Store* secondStore = Store::make(b, {x}, Add::make(Load::make(b, {x}), 1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For* secondLoop = For::make(x, 4, 7, secondStore); Stmt* stmt = Block::make( {firstLoop, secondLoop, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(b, {4}, 100), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(b, {5}, 101), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(b, {6}, 102)}); stmt->accept(&analyzer); @@ -2403,11 +2263,8 @@ TEST(MemDependency, MemDependencyCheckerCutLoop) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerDynamicShapes) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {100}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {100}, kInt); VarHandle x("x", kInt); @@ -2511,10 +2368,8 @@ TEST(MemDependency, MemDependencyCheckerDynamicShapes) { */ MemDependencyChecker analyzer({a, b}, {c}); Stmt* stmt = Block::make({For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(c, {x}, Load::make(a, {Load::make(b, {x})})))}); @@ -2550,7 +2405,6 @@ TEST(MemDependency, MemDependencyCheckerDynamicShapes) { // The load from A has bounds B[0] to B[9]. ExprHandle loadFromB0 = Load::make(b, {0}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle loadFromB9 = Load::make(b, {9}); ASSERT_TRUE(EQ(history[3]->bounds(), {CB(loadFromB0, loadFromB9)})); } @@ -2562,10 +2416,8 @@ TEST(MemDependency, MemDependencyCheckerDynamicShapes) { */ MemDependencyChecker analyzer({a, b}, {c}); Stmt* stmt = Block::make({For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(c, {Load::make(b, {x})}, Load::make(a, {x})))}); @@ -2611,10 +2463,8 @@ TEST(MemDependency, MemDependencyCheckerDynamicShapes) { */ MemDependencyChecker analyzer({a, b}, {c}); Stmt* stmt = Block::make({For::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(c, {Load::make(b, {Load::make(a, {x})})}, x))}); @@ -2651,7 +2501,6 @@ TEST(MemDependency, MemDependencyCheckerDynamicShapes) { ASSERT_TRUE(EQ(history[2]->bounds(), {CB(0, 9)})); // The load from B as bounds A[0] to A[9]. ExprHandle loadFromA0 = Load::make(a, {0}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle loadFromA9 = Load::make(a, {9}); ASSERT_TRUE(EQ(history[3]->bounds(), {CB(loadFromA0, loadFromA9)})); @@ -2666,7 +2515,6 @@ TEST(MemDependency, MemDependencyCheckerDynamicShapes) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerMultiDim) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int M = 10, N = 9, K = 12; BufHandle a("A", {M, N, K}, kInt); BufHandle b("B", {M, N, K}, kInt); @@ -2746,17 +2594,14 @@ TEST(MemDependency, MemDependencyCheckerMultiDim) { Stmt* stmt = Block::make({For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, For::make( z, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, Store::make(b, {x, y, z}, Load::make(a, {x, y, z})))))}); @@ -2837,7 +2682,6 @@ TEST(MemDependency, MemDependencyCheckerMultiDim) { For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 100, For::make( z, @@ -2959,20 +2803,16 @@ TEST(MemDependency, MemDependencyCheckerComputeAPI) { */ // Can determine if 2 loops created by Compute are dependent. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a_buf("a", kFloat, {4, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder b_buf("b", kFloat, {5, 6}); Tensor* c = Compute( "broadcast_add", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4, "m"}, {5, "n"}, {6, "k"}}, [&](const VarHandle& m, const VarHandle& n, const VarHandle& k) { return a_buf.load(m, n) + b_buf.load(n, k); }); Tensor* d = Compute( "d", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4, "m"}, {5, "n"}, {6, "k"}}, [&](const VarHandle& m, const VarHandle& n, const VarHandle& k) { return c->load(m, n, k) + 1; @@ -3011,20 +2851,16 @@ TEST(MemDependency, MemDependencyCheckerComputeInline) { // Check inlining affects the number of accesses returned. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a_buf("a", kFloat, {4, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder b_buf("b", kFloat, {5, 6}); Tensor* c = Compute( "broadcast_add", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4, "m"}, {5, "n"}, {6, "k"}}, [&](const VarHandle& m, const VarHandle& n, const VarHandle& k) { return a_buf.load(m, n) + b_buf.load(n, k); }); Tensor* d = Compute( "d", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4, "m"}, {5, "n"}, {6, "k"}}, [&](const VarHandle& m, const VarHandle& n, const VarHandle& k) { return c->load(m, n, k) + 1; @@ -3053,13 +2889,10 @@ TEST(MemDependency, MemDependencyCheckerComputeSplit) { using namespace analysis; // Split an axis, so the number of loops != the number of dimensions. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a_buf("a", kFloat, {4, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder b_buf("b", kFloat, {5, 6}); Tensor* c = Compute( "broadcast_add", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4, "m"}, {5, "n"}, {6, "k"}}, [&](const VarHandle& m, const VarHandle& n, const VarHandle& k) { return a_buf.load(m, n) + b_buf.load(n, k); @@ -3106,13 +2939,10 @@ TEST(MemDependency, MemDependencyCheckerComputeReorder) { using namespace analysis; // Reorder an axis, so the loop order doesn't match the indexing order. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a_buf("a", kFloat, {4, 5}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder b_buf("b", kFloat, {5, 6}); Tensor* c = Compute( "broadcast_add", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{4, "m"}, {5, "n"}, {6, "k"}}, [&](const VarHandle& m, const VarHandle& n, const VarHandle& k) { return a_buf.load(m, n) + b_buf.load(n, k); @@ -3178,19 +3008,15 @@ TEST(MemDependency, MemDependencyCheckerComputeReduce) { // Can determine dependencies of a Reduction. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a(BufHandle("a", {2, 3, 6}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder b(BufHandle("b", {2, 3, 6}, kFloat)); Tensor* c = Compute( "scale", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2, "l2"}, {3, "n1"}, {6, "m1"}}, [&](const VarHandle& l, const VarHandle& n, const VarHandle& m) { return b.load(l, n, m) * a.load(l, n, m); }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* d = Reduce("sum", {{2, "l1"}}, Sum(), c, {{3, "n1"}, {6, "m1"}}); LoopNest l({d}, {c, d}); @@ -3216,11 +3042,8 @@ TEST(MemDependency, MemDependencyCheckerComputeReduce) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(MemDependency, MemDependencyCheckerComputeGEMM) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int M = 1024; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int N = 1024; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int K = 2048; using namespace analysis; @@ -3252,7 +3075,6 @@ TEST(MemDependency, MemDependencyCheckerComputeGEMM) { For* no; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) For* ni; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) loop.splitWithMask(n, 16, &no, &ni); } // mo, mi, no, ni, k -> diff --git a/test/cpp/tensorexpr/test_reductions.cpp b/test/cpp/tensorexpr/test_reductions.cpp index 64ce76fd0e004..5d2c0f2a8a0ab 100644 --- a/test/cpp/tensorexpr/test_reductions.cpp +++ b/test/cpp/tensorexpr/test_reductions.cpp @@ -28,18 +28,14 @@ using namespace torch::jit::tensorexpr; TEST(Reductions, ReduceSum1D) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder b(BufHandle("b", {10}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector in(10); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 10; ++j) { in[j] = j; } std::vector out(1, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* c = Reduce("sum", {}, Sum(), b, {{10, "m"}}); LoopNest loop({c}); loop.prepareForCodegen(); @@ -80,7 +76,6 @@ TEST(Reductions, ReduceSum2D) { SimpleIREvaluator cg(s, {b, c, n, m}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) cg.call({in, out, 5, 7}); float expected = 0; @@ -114,7 +109,6 @@ TEST(Reductions, ReduceSum3D) { SimpleIREvaluator cg(s, {b, c, m}); std::vector bData(2 * 3 * M, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector cData(2 * 3, 6.0f); std::vector dData(2, 1.0f); std::vector eData(2, 1.0f); @@ -250,18 +244,14 @@ TEST(Reductions, ReduceProduct) { TEST(Reductions, ReduceMax) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder in_(BufHandle("b", {10}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector in(10); std::vector out(1, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 10; ++j) { in[j] = j; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* dm1 = Reduce("max", {}, Maximum(kFloat), in_, {{10, "m"}}); LoopNest loop({dm1}); @@ -274,11 +264,9 @@ TEST(Reductions, ReduceMax) { ASSERT_EQ(out[0], 9); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder in2_(BufHandle("b", {2, 5}, kFloat)); std::vector out2(2, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* m2d = Reduce("max", {{2, "n"}}, Maximum(kFloat), in2_, {{5, "m"}}); LoopNest loop2({m2d}); @@ -299,15 +287,11 @@ TEST(Reductions, ReduceMinCustomInitializer) { KernelScope kernel_scope; VarHandle minInit("minInit", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder in_(BufHandle("b", {10}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector in(10); std::vector out(1, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 10; ++j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) in[j] = 10 + j; } @@ -316,7 +300,6 @@ TEST(Reductions, ReduceMinCustomInitializer) { {}, Minimum(ExprHandle(minInit)), [&](ParameterList& v) { return in_.load(v); }, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{10, "m"}}); LoopNest loop({min}); @@ -332,7 +315,6 @@ TEST(Reductions, ReduceMinCustomInitializer) { ASSERT_EQ(out[0], 10); // With an initalizer lower than the min, that's the min. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) cg.call({in, out, 5.f}); ASSERT_EQ(out[0], 5); } @@ -344,7 +326,6 @@ TEST(Reductions, ReduceAnyAll) { KernelScope kernel_scope; VarHandle searchValue("searchValue", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder b(BufHandle("b", {4, 10}, kInt)); Reducer anyEqSV(ExprHandle(0), [](ExprHandle a, ExprHandle b) { @@ -358,7 +339,6 @@ TEST(Reductions, ReduceAnyAll) { [&](const auto& i, const auto& j) { return CompareSelect::make(b.load(i, j), searchValue, kEQ); }, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{10, "j"}}); LoopNest loop({any}); @@ -368,12 +348,10 @@ TEST(Reductions, ReduceAnyAll) { SimpleIREvaluator cg(s, {b, any, searchValue}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector in(40, 0); std::vector out(4, 0); // input has 0-39 in 4 rows. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 40; ++i) { in[i] = i; } @@ -385,7 +363,6 @@ TEST(Reductions, ReduceAnyAll) { ASSERT_EQ(out[2], 0); ASSERT_EQ(out[3], 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) cg.call({in, out, 15}); // 15 in the 3rd row @@ -405,7 +382,6 @@ TEST(Reductions, ReduceAnyAll) { [&](const auto& i, const auto& j) { return CompareSelect::make(b.load(i, j), searchValue, kGT); }, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{10, "j"}}); LoopNest loop2({allGreaterThan}); @@ -415,7 +391,6 @@ TEST(Reductions, ReduceAnyAll) { SimpleIREvaluator cg2(s, {b, allGreaterThan, searchValue}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) cg2.call({in, out, 11}); // 11 is in row 2. @@ -440,12 +415,9 @@ TEST(Reductions, ReduceMatmul2D) { Placeholder tA(BufHandle("tA", {3, 2}, kFloat)); Placeholder tB(BufHandle("tB", {2, 3}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector tA_(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector tB_(6); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector out(9, -1.f); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 2; ++j) { @@ -472,10 +444,8 @@ TEST(Reductions, ReduceMatmul2D) { cg.call({tA_, tB_, out}); std::vector expected( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {1.f, 3.f, 5.f, 3.f, 13.f, 23.f, 5.f, 23.f, 41.f}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 9; ++i) { ASSERT_EQ(out[i], expected[i]); } @@ -485,23 +455,17 @@ TEST(Reductions, ReduceMatmul2D) { TEST(Reductions, ReduceRfactorLike) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder in(BufHandle("in", {10, 10}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector in_(100); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 100; ++i) { in_[i] = i; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector in_rf_(10, -2.f); std::vector out(1, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* l1 = Reduce("l1", {{10, "i"}}, Sum(), in, {{10, "j"}}); Placeholder in_rf(BufHandle(l1->buf())); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* l2 = Reduce("l2", {}, Sum(), in_rf, {{10, "i"}}); LoopNest loop({l1, l2}); @@ -541,11 +505,9 @@ TEST(Reductions, ReduceAsProducer) { std::vector aData(2 * 3, 0); std::vector bData(2 * 3 * M, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector dData(2 * 3, 6.0f); for (int i = 0; i < 2 * 3; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) aData[i] = 6 - i; for (int j = 0; j < M; ++j) { bData[i * M + j] = j; @@ -589,13 +551,11 @@ TEST(Reductions, ReduceAsConsumer) { std::vector aData(2 * 3 * M, 0); std::vector bData(2 * 3 * M, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector dData(2, 6.0f); for (int i = 0; i < 2 * 3; ++i) { for (int j = 0; j < M; ++j) { bData[i * M + j] = j + 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) aData[i * M + j] = 6 - i; } } @@ -621,23 +581,16 @@ TEST(Reductions, ReduceAsConsumer) { TEST(Reductions, SplitReduceAxis) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder in(BufHandle("in", {16, 8}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector in_(16 * 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 16; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 8; ++j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) in_[i * 8 + j] = i; } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector out(16, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Reduce("sum", {{16, "m"}}, Sum(), in, {{8, "n"}}); LoopNest l({tensor}); std::vector loops = l.getLoopStmtsFor(tensor); @@ -651,7 +604,6 @@ TEST(Reductions, SplitReduceAxis) { SimpleIREvaluator cg(s, {in, tensor}); cg.call({in_, out}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 16; ++i) { ASSERT_EQ(out[i], i * 8); } @@ -661,22 +613,15 @@ TEST(Reductions, SplitReduceAxis) { TEST(Reductions, SplitNonReduceAxis) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder in(BufHandle("in", {16, 8}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector in_(16 * 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 16; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 8; ++j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) in_[i * 8 + j] = i; } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector out(16, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Reduce("sum", {{16, "m"}}, Sum(), in, {{8, "n"}}); LoopNest l({tensor}); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -704,7 +649,6 @@ TEST(Reductions, SplitNonReduceAxis) { SimpleIREvaluator cg(s, {in, tensor}); cg.call({in_, out}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 16; ++i) { ASSERT_EQ(out[i], i * 8); } @@ -720,12 +664,9 @@ TEST(Reductions, ReorderedReductionInitializer) { SumOp(c(k, n), 0, a(k, m, n), {m}) */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder in(BufHandle("in", {1, 12, 6}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector in_(12 * 6, 1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor_ = Reduce("sum", {{1, "k"}, {12, "n"}}, Sum(), in, {{6, "m"}}); LoopNest l_({tensor_}); @@ -733,7 +674,6 @@ TEST(Reductions, ReorderedReductionInitializer) { Stmt* s_ = Stmt::clone(l_.root_stmt()); s_ = IRSimplifier::simplify(s_); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Reduce("sum", {{1, "k"}, {12, "n"}}, Sum(), in, {{6, "m"}}); LoopNest l({tensor}); @@ -752,17 +692,14 @@ TEST(Reductions, ReorderedReductionInitializer) { s = l.root_stmt(); s = IRSimplifier::simplify(s); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector out1(16, -1.f); SimpleIREvaluator cg(s_, {in, tensor_}); cg.call({in_, out1}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector out2(16, -1.f); SimpleIREvaluator cg2(s, {in, tensor}); cg2.call({in_, out2}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 16; ++i) { ASSERT_EQ(out1[i], out2[i]); } @@ -880,7 +817,6 @@ TEST(Reductions, Reduce3DRfactorOuter) { TEST(Reductions, ReduceRepeatedInternalRfactor) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder in_(BufHandle("in_", {2, 3, 4, 5, 6}, kFloat)); const int InputSize = 2 * 3 * 4 * 5 * 6; @@ -893,12 +829,10 @@ TEST(Reductions, ReduceRepeatedInternalRfactor) { {}, Sum(), in_, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{2, "a"}, {3, "b"}, {4, "c"}, {5, "d"}, {6, "e"}}); LoopNest orig_loop({c}); // Try rfactoring N outer loops - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int rfac_number = 1; rfac_number < 5; rfac_number++) { LoopNest refloop(orig_loop); LoopNest loop(orig_loop); @@ -949,7 +883,6 @@ TEST(Reductions, ReduceSplitTail) { Tensor* c = Reduce("sum", {{M, "m"}}, Sum(), b, {{N, "n"}, {K, "k"}}); LoopNest loop({c}); std::vector loops = loop.getLoopStmtsFor(c); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) loop.splitWithTail(loops[i], 8); loop.prepareForCodegen(); @@ -983,7 +916,6 @@ TEST(Reductions, ReduceSplitNoTail) { Tensor* c = Reduce("sum", {{M, "m"}}, Sum(), b, {{N, "n"}, {K, "k"}}); LoopNest loop({c}); std::vector loops = loop.getLoopStmtsFor(c); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) loop.splitWithTail(loops[i], 5); loop.prepareForCodegen(); @@ -1019,7 +951,6 @@ TEST(Reductions, ReduceOverSplitTail) { Tensor* c = Reduce("sum", {{M, "m"}}, Sum(), b, {{N, "n"}, {K, "k"}}); LoopNest loop({c}); std::vector loops = loop.getLoopStmtsFor(c); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) loop.splitWithTail(loops[i], 16); loop.prepareForCodegen(); @@ -1054,7 +985,6 @@ TEST(Reductions, ReduceSplitMask) { Tensor* c = Reduce("sum", {{M, "m"}}, Sum(), b, {{N, "n"}, {K, "k"}}); LoopNest loop({c}); std::vector loops = loop.getLoopStmtsFor(c); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) loop.splitWithMask(loops[i], 8); loop.prepareForCodegen(); @@ -1088,7 +1018,6 @@ TEST(Reductions, ReduceSplitNoMask) { Tensor* c = Reduce("sum", {{M, "m"}}, Sum(), b, {{N, "n"}, {K, "k"}}); LoopNest loop({c}); std::vector loops = loop.getLoopStmtsFor(c); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) loop.splitWithMask(loops[i], 5); loop.prepareForCodegen(); @@ -1123,7 +1052,6 @@ TEST(Reductions, ReduceOverSplitMask) { Tensor* c = Reduce("sum", {{M, "m"}}, Sum(), b, {{N, "n"}, {K, "k"}}); LoopNest loop({c}); std::vector loops = loop.getLoopStmtsFor(c); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) loop.splitWithMask(loops[i], 16); loop.prepareForCodegen(); @@ -1326,7 +1254,6 @@ TEST(Reductions, ReduceInlineConsumer) { eval1(a_v, b_v, y_1); eval2(a_v, b_v, y_2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(y_1, y_2, 1e-5); std::ostringstream oss1, oss2; oss1 << *stmt1; @@ -1386,7 +1313,6 @@ TEST(Reductions, ReduceInlineReducerInternal) { eval1(a_v, b_v, y_1); eval2(a_v, b_v, y_2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExpectAllNear(y_1, y_2, 1e-5); std::ostringstream oss1, oss2; oss1 << *stmt1; @@ -1498,22 +1424,17 @@ TEST(Reductions, ReductionCacheAccessesInner) { TEST(Reductions, ReductionCacheBodyAccess) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a(BufHandle("a", {24, 32, 12}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder b(BufHandle("b", {24, 32, 12}, kFloat)); Tensor* c = Compute( "scale", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{24, "l2"}, {32, "n1"}, {12, "m1"}}, [&](const VarHandle& l, const VarHandle& n, const VarHandle& m) { return b.load(l, n, m) * a.load(l, n, m); }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* d = Reduce("sum", {{24, "l1"}}, Sum(), c, {{32, "n1"}, {12, "m1"}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* e = Compute("scale", {{24, "l"}}, [&](const VarHandle& l) { return b.load(0, 0, l) * d->load(l); }); @@ -1545,22 +1466,17 @@ TEST(Reductions, ReductionCacheBodyAccess) { TEST(Reductions, ReductionCacheConsumerAccess) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a(BufHandle("a", {24, 32, 12}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder b(BufHandle("b", {24, 32, 12}, kFloat)); Tensor* c = Compute( "scale", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{24, "l2"}, {32, "n1"}, {12, "m1"}}, [&](const VarHandle& l, const VarHandle& n, const VarHandle& m) { return b.load(l, n, m) * a.load(l, n, m); }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* d = Reduce("sum", {{24, "l1"}}, Sum(), c, {{32, "n1"}, {12, "m1"}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* e = Compute("scale", {{24, "l"}}, [&](const VarHandle& l) { return b.load(0, 0, l) * d->load(l); }); @@ -1592,22 +1508,17 @@ TEST(Reductions, ReductionCacheConsumerAccess) { TEST(Reductions, ReductionSplitCacheConsumerAccess) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a(BufHandle("a", {24, 32, 12}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder b(BufHandle("b", {24, 32, 12}, kFloat)); Tensor* c = Compute( "scale", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{24, "l2"}, {32, "n1"}, {12, "m1"}}, [&](const VarHandle& l, const VarHandle& n, const VarHandle& m) { return b.load(l, n, m) * a.load(l, n, m); }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* d = Reduce("sum", {{24, "l1"}}, Sum(), c, {{32, "n1"}, {12, "m1"}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* e = Compute("scale", {{24, "l"}}, [&](const VarHandle& l) { return b.load(0, 0, l) * d->load(l); }); @@ -1648,22 +1559,17 @@ TEST(Reductions, ReductionSplitCacheConsumerAccess) { TEST(Reductions, ReductionReorderCacheConsumerAccess) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a(BufHandle("a", {24, 32, 12}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder b(BufHandle("b", {24, 32, 12}, kFloat)); Tensor* c = Compute( "scale", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{24, "l2"}, {32, "n1"}, {12, "m1"}}, [&](const VarHandle& l, const VarHandle& n, const VarHandle& m) { return b.load(l, n, m) * a.load(l, n, m); }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* d = Reduce("sum", {{24, "l1"}}, Sum(), c, {{32, "n1"}, {12, "m1"}}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* e = Compute("scale", {{24, "l"}}, [&](const VarHandle& l) { return b.load(0, 0, l) * d->load(l); }); @@ -1842,25 +1748,17 @@ TEST(Reductions, ReductionRfactorCacheTempInner) { TEST(Reductions, ReductionVectorize) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector in_(8 * 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 8; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 8; ++j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) in_[i * 8 + j] = i; } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector out_before(8, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector out_after(8, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder in(BufHandle("in", {8, 8}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Reduce("sum", {{8, "m"}}, Sum(), in, {{8, "n"}}); LoopNest l_before({tensor}); LoopNest l(l_before); @@ -1889,7 +1787,6 @@ TEST(Reductions, ReductionVectorize) { s = IRSimplifier::simplify(l.root_stmt()); SimpleIREvaluator cg_after(s, {in, tensor}); cg_after.call({in_, out_after}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 8; ++i) { ASSERT_EQ(out_before[i], out_after[i]); } @@ -1899,10 +1796,8 @@ TEST(Reductions, ReductionVectorize) { TEST(Reductions, ReductionVectorizeInner) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder in(BufHandle("in", {8, 8}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Reduce("sum", {{8, "m"}}, Sum(), in, {{8, "n"}}); LoopNest l({tensor}); @@ -1914,23 +1809,17 @@ TEST(Reductions, ReductionVectorizeInner) { TEST(Reductions, ReductionVectorizeRfactor) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector in_(8 * 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 8; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int j = 0; j < 8; ++j) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) in_[i * 8 + j] = i; } } std::vector out_before(1, -1.f); std::vector out_after(1, -1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder in(BufHandle("in", {8, 8}, kFloat)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor* tensor = Reduce("sum", {}, Sum(), in, {{8, "m"}, {8, "n"}}); LoopNest l_before({tensor}); diff --git a/test/cpp/tensorexpr/test_registerizer.cpp b/test/cpp/tensorexpr/test_registerizer.cpp index 63d25ffdbc326..facbf8c3a09ec 100644 --- a/test/cpp/tensorexpr/test_registerizer.cpp +++ b/test/cpp/tensorexpr/test_registerizer.cpp @@ -22,7 +22,6 @@ TEST(Registerizer, RegisterizerSimple) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {0}), x))}))}); @@ -62,7 +61,6 @@ TEST(Registerizer, RegisterizerSimple) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerLoop) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make( @@ -70,7 +68,6 @@ TEST(Registerizer, RegisterizerLoop) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {x}, Add::make(Load::make(a, {x}), x))}))}); @@ -119,7 +116,6 @@ TEST(Registerizer, RegisterizerLoopFixedLoad) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {x}, Add::make(Load::make(a, {0}), x))}))}); @@ -166,7 +162,6 @@ TEST(Registerizer, RegisterizerLoopInternal) { Stmt* stmt = Block::make({For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {x}, Add::make(Load::make(a, {x}), x)), @@ -214,9 +209,7 @@ TEST(Registerizer, RegisterizerLoopInternal) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerLoopInternalLoadOverlap) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -224,7 +217,6 @@ TEST(Registerizer, RegisterizerLoopInternalLoadOverlap) { Stmt* stmt = Block::make({For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {x}, Add::make(Load::make(b, {y}), Load::make(b, {z}))))}); stmt = IRSimplifier::simplify(stmt); @@ -256,7 +248,6 @@ TEST(Registerizer, RegisterizerLoopInternalRepeated) { {For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {1}), x)), @@ -264,7 +255,6 @@ TEST(Registerizer, RegisterizerLoopInternalRepeated) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {1}), x)), @@ -331,7 +321,6 @@ TEST(Registerizer, RegisterizerLoopInternalRepeatedOverlapLoopVar) { {For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {x}), x)), @@ -339,7 +328,6 @@ TEST(Registerizer, RegisterizerLoopInternalRepeatedOverlapLoopVar) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {x}), x)), @@ -381,7 +369,6 @@ TEST(Registerizer, RegisterizerLoopInternalRepeatedOverlapOther) { {For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {0}, Add::make(x, Load::make(a, {y}))), @@ -389,7 +376,6 @@ TEST(Registerizer, RegisterizerLoopInternalRepeatedOverlapOther) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {0}, Add::make(x, Load::make(a, {y}))), @@ -432,7 +418,6 @@ TEST(Registerizer, RegisterizerMultiVar) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {0}), x)), @@ -483,18 +468,15 @@ TEST(Registerizer, RegisterizerMultiVar) { TEST(Registerizer, RegisterizerVariableLoad) { KernelScope kernel_scope; BufHandle a("A", {1}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); VarHandle x2("x", kInt); Stmt* stmt = Block::make( {Store::make(a, {0}, 0), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(b, {x}, x)), For::make( x2, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make( a, {0}, Add::make(Load::make(a, {0}), Load::make(b, {x2})))}))}); @@ -551,7 +533,6 @@ TEST(Registerizer, RegisterizerSymbolicIndices) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {i}, Add::make(Load::make(a, {i}), x))}))}); @@ -599,12 +580,10 @@ TEST(Registerizer, RegisterizerMultiLoop) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make( a, @@ -659,7 +638,6 @@ TEST(Registerizer, RegisterizerRepeated) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {0}), x)), @@ -717,7 +695,6 @@ TEST(Registerizer, RegisterizerNoLoads) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make(a, {0}, Add::make(x, 1))}))}); @@ -757,7 +734,6 @@ TEST(Registerizer, RegisterizerNoLoads) { TEST(Registerizer, RegisterizerNoRepeatedStores) { KernelScope kernel_scope; BufHandle a("A", {1}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make( @@ -765,7 +741,6 @@ TEST(Registerizer, RegisterizerNoRepeatedStores) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(b, {x}, Add::make(Load::make(a, {0}), x))}))}); @@ -816,7 +791,6 @@ TEST(Registerizer, RegisterizerMultiVarOverlap) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {x}, Add::make(Load::make(a, {0}), x)), @@ -853,7 +827,6 @@ TEST(Registerizer, RegisterizerAllocs) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(b, {0}, Add::make(Load::make(b, {0}), x)), @@ -914,7 +887,6 @@ TEST(Registerizer, RegisterizerNoInitializer) { Stmt* stmt = Block::make({For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make(a, {0}, Add::make(Load::make(a, {0}), x))}))}); @@ -956,7 +928,6 @@ TEST(Registerizer, RegisterizerNoInitializerLoopVar) { Stmt* stmt = Block::make({For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make(a, {x}, Add::make(Load::make(a, {x}), x))}))}); stmt = IRSimplifier::simplify(stmt); @@ -988,7 +959,6 @@ TEST(Registerizer, RegisterizerLoadThenStore) { Stmt* stmt = Block::make({For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(b, {0}, Add::make(Load::make(a, {0}), x)), @@ -1044,7 +1014,6 @@ TEST(Registerizer, RegisterizerParallelized) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make(a, {0}, Add::make(Load::make(a, {0}), x))}), loopOpts)}); @@ -1066,11 +1035,8 @@ TEST(Registerizer, RegisterizerParallelized) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerConditionAfter) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); @@ -1078,7 +1044,6 @@ TEST(Registerizer, RegisterizerConditionAfter) { {Store::make(a, {x}, Load::make(b, {x})), Store::make(c, {x}, Load::make(a, {x})), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Store::make(a, {x}, Add::make(Load::make(a, {x}), 1)), nullptr)}); @@ -1121,17 +1086,13 @@ TEST(Registerizer, RegisterizerConditionAfter) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerConditionBefore) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Store::make(a, {x}, Add::make(Load::make(a, {x}), 1)), nullptr), @@ -1178,11 +1139,8 @@ TEST(Registerizer, RegisterizerConditionBefore) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerConditionInside) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); @@ -1190,7 +1148,6 @@ TEST(Registerizer, RegisterizerConditionInside) { {Store::make(a, {x}, Load::make(b, {x})), Store::make(c, {x}, Load::make(a, {x})), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Store::make(a, {x}, Add::make(Load::make(a, {x}), 1)), nullptr), @@ -1243,11 +1200,8 @@ TEST(Registerizer, RegisterizerConditionInside) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerConditionInsideOverlap1) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -1257,7 +1211,6 @@ TEST(Registerizer, RegisterizerConditionInsideOverlap1) { {Store::make(a, {x}, Load::make(b, {x})), Store::make(c, {x}, Load::make(a, {x})), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Block::make({ Store::make(a, {x}, Add::make(Load::make(a, {x}), 1)), @@ -1308,11 +1261,8 @@ TEST(Registerizer, RegisterizerConditionInsideOverlap1) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerConditionInsideOverlap2) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -1323,7 +1273,6 @@ TEST(Registerizer, RegisterizerConditionInsideOverlap2) { Store::make(a, {x}, Load::make(b, {x + 1})), Store::make(c, {x}, Load::make(a, {x})), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Block::make({ Store::make(a, {x}, Add::make(Load::make(a, {x}), 1)), @@ -1399,22 +1348,17 @@ TEST(Registerizer, RegisterizerConditionInsideOverlap2) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerConditionHidden) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Store::make(a, {x}, Add::make(Load::make(a, {x}), 1)), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kGT), Store::make(a, {x}, Add::make(Load::make(a, {x}), 1)), nullptr)}); @@ -1447,23 +1391,18 @@ TEST(Registerizer, RegisterizerConditionHidden) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerConditionUnhidden) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Store::make(a, {x}, Add::make(Load::make(a, {x}), 1)), nullptr), Store::make(a, {x}, Add::make(Load::make(a, {x}), 1)), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kGT), Store::make(a, {x}, Add::make(Load::make(a, {x}), 1)), nullptr)}); @@ -1514,11 +1453,8 @@ TEST(Registerizer, RegisterizerConditionUnhidden) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerCondCondition) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); @@ -1528,7 +1464,6 @@ TEST(Registerizer, RegisterizerCondCondition) { Cond::make( CompareSelect::make( Load::make(a, {x}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, CompareSelectOperation::kLT), Store::make(c, {x}, Add::make(Load::make(c, {x}), 1)), @@ -1572,19 +1507,14 @@ TEST(Registerizer, RegisterizerCondCondition) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerCondConditionUnhidden) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make({Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(Load::make(a, {x}), 5, CompareSelectOperation::kLT), Store::make(a, {x}, Add::make(Load::make(a, {x}), 1)), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a, {x}, Add::make(Load::make(a, {x}), 10)))}); /* @@ -1627,11 +1557,8 @@ TEST(Registerizer, RegisterizerCondConditionUnhidden) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerIfThenElseHidden) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -1641,7 +1568,6 @@ TEST(Registerizer, RegisterizerIfThenElseHidden) { b, {y}, IfThenElse::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Add::make(Load::make(a, {x}), 1), Add::make(Load::make(a, {x + 1}), 2))), @@ -1649,7 +1575,6 @@ TEST(Registerizer, RegisterizerIfThenElseHidden) { b, {y + 1}, IfThenElse::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Add::make(Load::make(a, {x}), 1), Add::make(Load::make(a, {x + 1}), 2)))}); @@ -1675,11 +1600,8 @@ TEST(Registerizer, RegisterizerIfThenElseHidden) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerIfThenElseUnhidden) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -1690,7 +1612,6 @@ TEST(Registerizer, RegisterizerIfThenElseUnhidden) { b, {y}, IfThenElse::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Add::make(Load::make(a, {x}), 1), Add::make(Load::make(a, {x + 1}), 2))), @@ -1698,7 +1619,6 @@ TEST(Registerizer, RegisterizerIfThenElseUnhidden) { b, {y + 1}, IfThenElse::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Add::make(Load::make(a, {x}), 1), Add::make(Load::make(a, {x + 1}), 2))), @@ -1736,13 +1656,9 @@ TEST(Registerizer, RegisterizerIfThenElseUnhidden) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerIfThenElseNested) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle d("D", {5}, kInt); VarHandle x("x", kInt); @@ -1756,7 +1672,6 @@ TEST(Registerizer, RegisterizerIfThenElseNested) { Load::make(d, {x}), Load::make(b, {x})), IfThenElse::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kEQ), Load::make(c, {x}), Load::make(d, {x}))))}); @@ -1787,9 +1702,7 @@ TEST(Registerizer, RegisterizerIfThenElseNested) { TEST(Registerizer, RegisterizerIfThenElseInternal) { KernelScope kernel_scope; // Making these floats so they don't get simplified to a single access. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kFloat); VarHandle x("x", kInt); @@ -1868,11 +1781,8 @@ TEST(Registerizer, RegisterizerIfThenElseInternal) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerIfThenElseCondition) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); @@ -1884,7 +1794,6 @@ TEST(Registerizer, RegisterizerIfThenElseCondition) { IfThenElse::make( CompareSelect::make( Load::make(a, {x}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, CompareSelectOperation::kLT), Load::make(b, {0}), @@ -1921,11 +1830,8 @@ TEST(Registerizer, RegisterizerIfThenElseCondition) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerIfThenElseConditionUnhidden) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); @@ -1935,11 +1841,9 @@ TEST(Registerizer, RegisterizerIfThenElseConditionUnhidden) { IfThenElse::make( CompareSelect::make( Load::make(a, {x}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, CompareSelectOperation::kLT), Add::make(Load::make(a, {x}), 1), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Add::make(Load::make(a, {x}), 10)))}); /* @@ -1969,36 +1873,28 @@ TEST(Registerizer, RegisterizerIfThenElseConditionUnhidden) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerConditionBranchOnly) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make({For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({ Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Store::make( a, {x}, IfThenElse::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Add::make(Load::make(a, {x}), x), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Add::make(Load::make(a, {x - 5}), x))), Store::make( a, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {x - 5}, IfThenElse::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Add::make(Load::make(a, {x}), x), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Add::make(Load::make(a, {x - 5}), x)))), }))}); stmt = IRSimplifier::simplify(stmt); @@ -2029,11 +1925,8 @@ TEST(Registerizer, RegisterizerConditionBranchOnly) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerCondIfThenElse) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {5}, kInt); VarHandle x("x", kInt); @@ -2042,7 +1935,6 @@ TEST(Registerizer, RegisterizerCondIfThenElse) { IfThenElse::make( CompareSelect::make( Load::make(a, {x}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, CompareSelectOperation::kLT), Load::make(a, {x}), @@ -2086,9 +1978,7 @@ TEST(Registerizer, RegisterizerCondIfThenElse) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerIfThenElseLoop) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -2096,7 +1986,6 @@ TEST(Registerizer, RegisterizerIfThenElseLoop) { Stmt* stmt = For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make( a, @@ -2140,9 +2029,7 @@ TEST(Registerizer, RegisterizerIfThenElseLoop) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerIfThenElseLoopCut) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {5}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {5}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -2150,7 +2037,6 @@ TEST(Registerizer, RegisterizerIfThenElseLoopCut) { Stmt* stmt = Block::make({For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make( a, @@ -2190,11 +2076,9 @@ TEST(Registerizer, RegisterizerPartialAfter) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {0}), x))})), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 1, 10, Store::make(a, {x}, Load::make(a, {x - 1})))}); /* @@ -2246,13 +2130,11 @@ TEST(Registerizer, RegisterizerPartialBefore) { BufHandle a("A", {1}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {For::make(x, 1, 10, Store::make(a, {x}, Load::make(a, {x - 1}))), Store::make(a, {0}, 0), For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {0}), x))}))}); @@ -2312,15 +2194,12 @@ TEST(Registerizer, RegisterizerPartialInside) { For::make( x1, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), x1))), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x2, 1, 10, Store::make(a, {x2}, Load::make(a, {x2 - 1}))), For::make( x3, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), x3)))}); @@ -2390,18 +2269,15 @@ TEST(Registerizer, RegisterizerPartialCondition) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), x))), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Store::make(a, {x}, Load::make(a, {x - 1})), nullptr), For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), x)))}); @@ -2469,12 +2345,10 @@ TEST(Registerizer, RegisterizerPartialConditionInternalCut) { {Store::make(a, {0}, 1), Store::make(a, {0}, 3), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Block::make({Store::make(a, {x}, 1), Store::make(a, {x}, 3)}), nullptr), Store::make(a, {0}, 4), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a, {0}, 6)}); /* @@ -2535,12 +2409,10 @@ TEST(Registerizer, RegisterizerPartialConditionInternalStart) { {Store::make(a, {0}, 1), Store::make(a, {0}, 3), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Block::make({Store::make(a, {x}, 1), Store::make(a, {x}, 3)}), nullptr), Store::make(a, {x}, 4), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Store::make(a, {x}, 6)}); /* @@ -2602,7 +2474,6 @@ TEST(Registerizer, RegisterizerPartialOverlapsTwo) { {Store::make(a, {1}, Load::make(a, {0})), Store::make(a, {0}, Load::make(a, {1})), Store::make(a, {0}, Load::make(a, {1})), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 1, 10, Store::make(a, {x}, x)), Store::make(a, {1}, Load::make(a, {0})), Store::make(a, {0}, Load::make(a, {1})), @@ -2727,7 +2598,6 @@ TEST(Registerizer, RegisterizerNestedConditions) { BufHandle a("A", {1}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make({Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {0}), 1)), @@ -2787,7 +2657,6 @@ TEST(Registerizer, RegisterizerNestedConditionsUnhidden) { Stmt* stmt = Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {0}), 1)), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Block::make( {Store::make(a, {1}, 1), @@ -2848,7 +2717,6 @@ TEST(Registerizer, RegisterizerNestedConditionsHiddenFirst) { Store::make(a, {0}, Add::make(Load::make(a, {0}), 1)), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Block::make({Cond::make( CompareSelect::make(x, 2, CompareSelectOperation::kEQ), @@ -2889,7 +2757,6 @@ TEST(Registerizer, RegisterizerNestedConditionsHiddenSecond) { VarHandle x("x", kInt); Stmt* stmt = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Block::make({Cond::make( CompareSelect::make(x, 2, CompareSelectOperation::kEQ), @@ -2937,7 +2804,6 @@ TEST(Registerizer, RegisterizerNestedConditionsCut) { Stmt* stmt = Block::make( {Store::make(a, {0}, Add::make(Load::make(a, {0}), 1)), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), Block::make( {Store::make(a, {x}, 1), @@ -2973,9 +2839,7 @@ TEST(Registerizer, RegisterizerNestedConditionsCut) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerNestedConditionLoopHidden) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make( @@ -2986,7 +2850,6 @@ TEST(Registerizer, RegisterizerNestedConditionLoopHidden) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(b, {x}, 0), @@ -3024,9 +2887,7 @@ TEST(Registerizer, RegisterizerNestedConditionLoopHidden) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerNestedConditionThreeDeep) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make( @@ -3133,12 +2994,10 @@ TEST(Registerizer, RegisterizerNestedLoopSimple) { Stmt* stmt = Block::make({For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {y}, Add::make(Load::make(a, {y}), x))})))}); @@ -3185,9 +3044,7 @@ TEST(Registerizer, RegisterizerNestedLoopSimple) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerHiddenAccessYes) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -3198,7 +3055,6 @@ TEST(Registerizer, RegisterizerHiddenAccessYes) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(b, {x}, 0), @@ -3208,7 +3064,6 @@ TEST(Registerizer, RegisterizerHiddenAccessYes) { For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make( a, {0}, Add::make(Load::make(a, {0}), 1))), @@ -3273,9 +3128,7 @@ TEST(Registerizer, RegisterizerHiddenAccessYes) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerHiddenAccessNo) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -3284,7 +3137,6 @@ TEST(Registerizer, RegisterizerHiddenAccessNo) { Block::make({For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(b, {x}, 0), @@ -3294,7 +3146,6 @@ TEST(Registerizer, RegisterizerHiddenAccessNo) { For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))), nullptr)}))}), @@ -3358,9 +3209,7 @@ TEST(Registerizer, RegisterizerHiddenAccessNo) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerHiddenAccessMultiLoop) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -3371,12 +3220,10 @@ TEST(Registerizer, RegisterizerHiddenAccessMultiLoop) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Cond::make( CompareSelect::make(y, 3, CompareSelectOperation::kEQ), @@ -3449,22 +3296,18 @@ TEST(Registerizer, RegisterizerTwoConditionalLoops) { VarHandle x("x", kInt); Stmt* stmt = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kGT), For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))), nullptr)}); @@ -3532,24 +3375,19 @@ TEST(Registerizer, RegisterizerTwoConditionalLoopsCut) { VarHandle x("x", kInt); Stmt* stmt = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kLT), For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))), nullptr), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) For::make(x, 0, 10, Store::make(a, {x}, 1)), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(x, 5, CompareSelectOperation::kGT), For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make(a, {0}, Add::make(Load::make(a, {0}), 1))), nullptr)}); @@ -3622,17 +3460,14 @@ TEST(Registerizer, RegisterizerTwoConditionalLoopsCut) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerLoopLetVar) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); Stmt* stmt = Block::make({For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {Let::make(y, 30), Store::make(a, {y}, Add::make(x, Load::make(a, {y})))}))}); @@ -3660,17 +3495,14 @@ TEST(Registerizer, RegisterizerLoopLetVar) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerLoopLetVarOuter) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); Stmt* stmt = Block::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {Let::make(y, 30), For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make( {Store::make(a, {y}, Add::make(x, Load::make(a, {y})))}))}); @@ -3712,7 +3544,6 @@ TEST(Registerizer, RegisterizerLoopLetVarOuter) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerMultiDim) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {3, 4, 5}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make( @@ -3720,7 +3551,6 @@ TEST(Registerizer, RegisterizerMultiDim) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make( a, {0, 1, 2}, Add::make(Load::make(a, {0, 1, 2}), x))}))}); @@ -3761,7 +3591,6 @@ TEST(Registerizer, RegisterizerMultiDim) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerMultiDimPartial) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {3, 4, 5}, kInt); VarHandle x("x", kInt); Stmt* stmt = Block::make( @@ -3769,7 +3598,6 @@ TEST(Registerizer, RegisterizerMultiDimPartial) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make( a, {0, 2, 2}, Add::make(Load::make(a, {0, 1, 4}), x))}))}); @@ -3812,7 +3640,6 @@ TEST(Registerizer, RegisterizerMultiDimPartial) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerMultiDimOverlap) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {3, 4, 5}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -3821,7 +3648,6 @@ TEST(Registerizer, RegisterizerMultiDimOverlap) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make( a, {0, x, 2}, Add::make(Load::make(a, {y, 2, 2}), x))}))}); @@ -3850,7 +3676,6 @@ TEST(Registerizer, RegisterizerMultiDimOverlap) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerMultiDimPartialOverlap) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {3, 4, 5}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -3859,7 +3684,6 @@ TEST(Registerizer, RegisterizerMultiDimPartialOverlap) { For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Block::make({Store::make( a, {0, x, 2}, Add::make(Load::make(a, {y, 2, 4}), x))}))}); @@ -3899,11 +3723,8 @@ TEST(Registerizer, RegisterizerMultiDimPartialOverlap) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerMultiDim3DReduction1) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10, 10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {10, 10, 10}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -3911,17 +3732,14 @@ TEST(Registerizer, RegisterizerMultiDim3DReduction1) { Stmt* stmt = For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, For::make( z, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make( c, @@ -3978,11 +3796,8 @@ TEST(Registerizer, RegisterizerMultiDim3DReduction1) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Registerizer, RegisterizerMultiDim3DReduction2) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle a("A", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle b("B", {10}, kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle c("C", {10}, kInt); VarHandle x("x", kInt); VarHandle y("y", kInt); @@ -3990,18 +3805,15 @@ TEST(Registerizer, RegisterizerMultiDim3DReduction2) { Stmt* stmt = For::make( x, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) For::make( y, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, For::make( z, 0, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, Store::make( c, diff --git a/test/cpp/tensorexpr/test_simplify.cpp b/test/cpp/tensorexpr/test_simplify.cpp index e89df8524e257..d641f11d3f8f8 100644 --- a/test/cpp/tensorexpr/test_simplify.cpp +++ b/test/cpp/tensorexpr/test_simplify.cpp @@ -75,9 +75,7 @@ using SimpleIRExprEval = ExprEval; // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Simplify, ConstantFoldSimple) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(2.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(3.0f); ExprHandle f = (a + b); @@ -92,13 +90,9 @@ TEST(Simplify, ConstantFoldSimple) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Simplify, ConstantFoldTwoLayer) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(2.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(3.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle c(4.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle d(5.0f); ExprHandle f = (a + b) - (c + d); @@ -113,7 +107,6 @@ TEST(Simplify, ConstantFoldTwoLayer) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Simplify, ConstantFoldShifts) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(7); ExprHandle b(2); ExprHandle c(3); @@ -130,11 +123,8 @@ TEST(Simplify, ConstantFoldShifts) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Simplify, ConstantFoldBitwise) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(59); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(22); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle c(101); ExprHandle f = (a ^ b) & c; @@ -149,17 +139,11 @@ TEST(Simplify, ConstantFoldBitwise) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Simplify, ConstantFoldMultiOp) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(2.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(3.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle c(4.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle d(5.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle e(6.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle f(7.0f); ExprHandle fn = ((a / e) - (c + d)) * (f / b); @@ -175,11 +159,8 @@ TEST(Simplify, ConstantFoldMultiOp) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Simplify, ConstantFoldMinMax) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(12.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(15.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle c(17.0f); // x = max(12, min(15, 17)). @@ -199,11 +180,8 @@ TEST(Simplify, ConstantFoldMinMax) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Simplify, ConstantFoldIntrinsics) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(2.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(3.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle c(4.0f); ExprHandle powHandle = Intrinsics::make(kPow, a, b); ExprHandle sinHandle = Intrinsics::make(kSin, powHandle); @@ -250,7 +228,6 @@ TEST(Simplify, ConstantFoldWithVar) { { VarHandle x("x", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = x * (ExprHandle(2.f) + ExprHandle(4.f)); ExprHandle newF = IRSimplifier::simplify(body); @@ -259,7 +236,6 @@ TEST(Simplify, ConstantFoldWithVar) { ASSERT_NE(dynamic_cast(root->rhs()), nullptr); SimpleIRExprEval eval(newF); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) eval.bindVar(x, ExprHandle(3.f)); ASSERT_EQ(eval.value(), 3 * (2 + 4)); } @@ -268,11 +244,8 @@ TEST(Simplify, ConstantFoldWithVar) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Simplify, ConditionalSelectFoldSimple) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(3.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(4.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle c(3.0f); { ExprHandle f = (a > b); @@ -319,11 +292,8 @@ TEST(Simplify, ConditionalSelectFoldSimple) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Simplify, ConditionalSelectFoldTwoLayer) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(3.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(2.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle c(2.0f); ExprHandle d(1.0f); { @@ -372,7 +342,6 @@ TEST(Simplify, ConditionalSelectFoldTwoLayer) { TEST(Simplify, ConditionalSelectFoldWithVar) { KernelScope kernel_scope; VarHandle x("x", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle f = x < 4.f; ExprHandle newF = IRSimplifier::simplify(f); @@ -381,13 +350,11 @@ TEST(Simplify, ConditionalSelectFoldWithVar) { { SimpleIRExprEval eval(newF); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) eval.bindVar(x, ExprHandle(3.f)); ASSERT_EQ(eval.value(), 1); } { SimpleIRExprEval eval(newF); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) eval.bindVar(x, ExprHandle(5.f)); ASSERT_EQ(eval.value(), 0); } @@ -398,7 +365,6 @@ TEST(Simplify, UnFoldableExpr) { KernelScope kernel_scope; VarHandle x("x", kFloat); VarHandle y("y", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (ExprHandle(3) * x) + (ExprHandle(5) * y); ExprHandle newF = IRSimplifier::simplify(body); @@ -408,9 +374,7 @@ TEST(Simplify, UnFoldableExpr) { ASSERT_EQ(dynamic_cast(root->rhs()), nullptr); SimpleIRExprEval eval(newF); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) eval.bindVar(x, ExprHandle(3.f)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) eval.bindVar(y, ExprHandle(2.f)); ASSERT_EQ(eval.value(), 9 + 10); } @@ -419,9 +383,7 @@ TEST(Simplify, UnFoldableExpr) { TEST(Simplify, HashSimple) { KernelScope kernel_scope; VarHandle x("x", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(2.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(3.0f); ExprHandle f = a + b * x; @@ -502,11 +464,8 @@ TEST(Simplify, HashEquivalenceRand) { TEST(Simplify, HashEquivalenceAfterFolding) { KernelScope kernel_scope; VarHandle x("x", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle a(2.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle b(3.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle c(5.0f); ExprHandle f1 = ((a + b) * x); @@ -554,7 +513,6 @@ TEST(Simplify, HashDifferenceTypes) { } // But coerced immediates are if they are the same type: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle f1 = ExprHandle(2.f) + CharImm::make(1); ExprHandle f2 = Cast::make(kFloat, IntImm::make(3)); @@ -767,7 +725,6 @@ TEST(Simplify, SimplifyMultiVar) { KernelScope kernel_scope; VarHandle x("x", kInt); VarHandle y("y", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = x * 24 + y * 34; ExprHandle simplified = IRSimplifier::simplify(body); @@ -1114,7 +1071,6 @@ TEST(Simplify, SimplifySubs) { { // (x + y + 5) * (x - x) => 0 // Cancelling out one side of Mul cancels both. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (x + y + 5) * (x - x); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1206,7 +1162,6 @@ TEST(Simplify, SimplifyMod) { { // Constant folding works. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = ExprHandle(10) % 8; ExprHandle simplified = IRSimplifier::simplify(body); // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) @@ -1299,7 +1254,6 @@ TEST(Simplify, SimplifyMod) { { // Sanity check true with scalars that are multiples. // 12 * x % 4 => 0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (x * 12) % 4; ExprHandle simplified = IRSimplifier::simplify(body); IS_IMM_WITH_VAL(Int, simplified.node(), 0); @@ -1308,7 +1262,6 @@ TEST(Simplify, SimplifyMod) { { // Sanity check not true if the smaller scalar is on LHS. // 4 * x % 12 => 4 * x % 12 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (x * 4) % 12; ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Mod, simplified.node(), mod); @@ -1321,7 +1274,6 @@ TEST(Simplify, SimplifyMod) { { // Both scalar and symbolic in multiple. // (6 * x * y) % (3 * x * y) => 0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (ExprHandle(6) * x * y) % (x * y * 3); ExprHandle simplified = IRSimplifier::simplify(body); IS_IMM_WITH_VAL(Int, simplified.node(), 0); @@ -1486,7 +1438,6 @@ TEST(Simplify, SimplifyFactorization) { { // Factorization attempt without a common divider. // (2 * x) + (5 * y) => (5 * y) + (2 * x) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (ExprHandle(2) * x + ExprHandle(5) * y); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1505,7 +1456,6 @@ TEST(Simplify, SimplifyFactorization) { // Factorization after merging. // (2 * x) + (4 * y) + (8 * x + 6 * y) => 10 * (x + y) ExprHandle body = (ExprHandle(2) * x + ExprHandle(4) * y) + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (ExprHandle(8) * x + ExprHandle(6) * y); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1563,12 +1513,9 @@ TEST(Simplify, SimplifyFactorization) { VarHandle g("g", kInt); VarHandle h("h", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = ExprHandle(0) + (ExprHandle(1024) * a) + (ExprHandle(-1) * b) + (ExprHandle(-1) * c) + (ExprHandle(1) * d) + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (ExprHandle(1) * e) + (ExprHandle(32) * f) + (ExprHandle(-1024) * g) + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (ExprHandle(-32) * h); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1672,7 +1619,6 @@ TEST(Simplify, SimplifyIfComponents) { VarHandle x("x", kInt); VarHandle y("y", kInt); ExprHandle body = IfThenElse::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ((ExprHandle(5) - ExprHandle(4)) * x) > y, ExprHandle(2) * x - x, ExprHandle(2) * y - y); @@ -1724,7 +1670,6 @@ TEST(Simplify, SimplifySymbolicMinMax) { { // Minimum with constant difference between terms. VarHandle x("x", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = Min::make(x + 3, x + 7, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1736,7 +1681,6 @@ TEST(Simplify, SimplifySymbolicMinMax) { { // Maximum with constant difference between terms. VarHandle x("x", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = Max::make(x + 3, x + 7, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1749,7 +1693,6 @@ TEST(Simplify, SimplifySymbolicMinMax) { // Can't simplify multiples because of signedness of variable component. // TODO: maybe we could for unsigned types? VarHandle x("x", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = Max::make(x * 3, x * 7, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1892,7 +1835,6 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(5, Max(x, 8)) => Max(x, 8) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = Max::make(5, Max::make(x, 8, true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1902,7 +1844,6 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(8, Max(x, 5)) => Max(x, 8) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = Max::make(8, Max::make(x, 5, true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1912,7 +1853,6 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(Max(x, 8), 5) => Max(x, 8) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = Max::make(Max::make(x, 8, true), 5, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1922,7 +1862,6 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(Max(x, 5), 8) => Max(x, 8) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = Max::make(Max::make(x, 5, true), 8, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1933,9 +1872,7 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(5, Max(x, Max(y, Max(z, 8)))) => Max(Max(Max(x, 8), y), z) ExprHandle body = Max::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Max::make(x, Max::make(y, Max::make(z, 8, true), true), true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1951,9 +1888,7 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(8, Max(Max(y, Max(z, 5)), x)) => Max(Max(Max(x, 8), y), z) ExprHandle body = Max::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Max::make(Max::make(y, Max::make(z, 5, true), true), x, true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1969,9 +1904,7 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(5, Max(Max(Max(z, 8), y), x)) => Max(Max(Max(x, 8), y), z) ExprHandle body = Max::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Max::make(Max::make(Max::make(z, 8, true), y, true), x, true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -1987,9 +1920,7 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(Max(x, Max(y, Max(5, z))), 8) => Max(Max(Max(x, 8), y), z) ExprHandle body = Max::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Max::make(x, Max::make(y, Max::make(5, z, true), true), true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2005,9 +1936,7 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(Max(Max(y, Max(8, z)), x), 5) => Max(Max(Max(x, 8), y), z) ExprHandle body = Max::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Max::make(Max::make(y, Max::make(z, 8, true), true), x, true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2023,9 +1952,7 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(Max(Max(Max(5, z), y), x), 8) => Max(Max(Max(x, 8), y), z) ExprHandle body = Max::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Max::make(Max::make(Max::make(z, 5, true), y, true), x, true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2043,9 +1970,7 @@ TEST(Simplify, SimplifyNestedMax) { // Do not simplify when all the Max ops do not have the same // propagate_nans. ExprHandle body = Max::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Max::make(Max::make(Max::make(z, 5, true), y, false), x, true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8, false); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2066,9 +1991,7 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(8, Max(Max(x, 5), Max(y, z))) => Max(Max(Max(x, 8), y), z) ExprHandle body = Max::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Max::make(Max::make(x, 5, true), Max::make(y, z, true), true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2084,9 +2007,7 @@ TEST(Simplify, SimplifyNestedMax) { { // Max(Max(Max(x, 5), Max(y, z)), 8) => Max(Max(Max(x, 8), y), z) ExprHandle body = Max::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Max::make(Max::make(x, 5, true), Max::make(y, z, true), true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2235,7 +2156,6 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(5, Min(x, 8)) => Min(x, 8) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = Min::make(5, Min::make(x, 8, true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2245,7 +2165,6 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(8, Min(x, 5)) => Min(x, 8) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = Min::make(8, Min::make(x, 5, true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2255,7 +2174,6 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(Min(x, 8), 5) => Min(x, 8) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = Min::make(Min::make(x, 8, true), 5, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2265,7 +2183,6 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(Min(x, 5), 8) => Min(x, 8) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = Min::make(Min::make(x, 5, true), 8, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2276,9 +2193,7 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(5, Min(x, Min(y, Min(z, 8)))) => Min(Min(Min(x, 5), y), z) ExprHandle body = Min::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Min::make(x, Min::make(y, Min::make(z, 8, true), true), true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2294,9 +2209,7 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(5, Min(Min(y, Min(z, 8)), x)) => Min(Min(Min(x, 5), y), z) ExprHandle body = Min::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Min::make(Min::make(y, Min::make(z, 8, true), true), x, true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2312,9 +2225,7 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(5, Min(Min(Min(z, 8), y), x)) => Min(Min(Min(x, 5), y), z) ExprHandle body = Min::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Min::make(Min::make(Min::make(z, 8, true), y, true), x, true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2330,9 +2241,7 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(Min(x, Min(y, Min(8, z))), 5) => Min(Min(Min(x, 5), y), z) ExprHandle body = Min::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Min::make(x, Min::make(y, Min::make(8, z, true), true), true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2348,9 +2257,7 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(Min(Min(y, Min(8, z)), x), 5) => Min(Min(Min(x, 5), y), z) ExprHandle body = Min::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Min::make(Min::make(y, Min::make(z, 8, true), true), x, true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2366,9 +2273,7 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(Min(Min(Min(8, z), y), x), 5) => Min(Min(Min(x, 5), y), z) ExprHandle body = Min::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Min::make(Min::make(Min::make(z, 8, true), y, true), x, true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 5, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2386,9 +2291,7 @@ TEST(Simplify, SimplifyNestedMin) { // Do not simplify when all the Min ops do not have the same // propagate_nans. ExprHandle body = Min::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Min::make(Min::make(Min::make(z, 5, true), y, false), x, true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8, false); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2409,9 +2312,7 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(8, Min(Min(x, 5), Min(y, z))) => Min(Min(Min(x, 5), y), z) ExprHandle body = Min::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Min::make(Min::make(x, 5, true), Min::make(y, z, true), true), true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2427,9 +2328,7 @@ TEST(Simplify, SimplifyNestedMin) { { // Min(Min(Min(x, 5), Min(y, z)), 8) => Min(Min(Min(x, 5), y), z) ExprHandle body = Min::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Min::make(Min::make(x, 5, true), Min::make(y, z, true), true), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 8, true); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2516,9 +2415,7 @@ TEST(Simplify, SimplifyWontReorderFloat) { VarHandle x("x", kInt); VarHandle y("y", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = ExprHandle(3.f) * (ExprHandle(3) * x) - - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle(3) * (ExprHandle(3.f) * y); ExprHandle simplified = IRSimplifier::simplify(body); @@ -2801,9 +2698,7 @@ TEST(Simplify, SimplifyRoundModPatternFactorization) { // Factorization requiring constant folding. // 20 * (x / (16 / 2)) * 2 + (11 % 6) * (x % (7+1)) => 5 * x. VarHandle x("x", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = ExprHandle(40) * (x / (ExprHandle(16) / 2)) + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (ExprHandle(11) % 6) * (x % (ExprHandle(7) + 1)); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Mul, simplified.node(), mul); @@ -2813,7 +2708,6 @@ TEST(Simplify, SimplifyRoundModPatternFactorization) { { VarHandle x("x", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (x / 5) * 10 + ExprHandle(2) * (x % 5); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Mul, simplified.node(), mul); @@ -2823,7 +2717,6 @@ TEST(Simplify, SimplifyRoundModPatternFactorization) { { VarHandle x("x", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (x / 10) * 0 + x % 5; ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Mod, simplified.node(), mod); @@ -2841,9 +2734,7 @@ TEST(Simplify, SimplifyRoundModPatternMultivar) { // (x/8) * 8 + (y/5)*5 + x%8 + y%5 => y + x. VarHandle x("x", kInt); VarHandle y("y", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (x / ExprHandle(8) * ExprHandle(8)) + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (y / ExprHandle(5) * ExprHandle(5)) + (x % 8) + (y % 5); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Add, simplified.node(), add); @@ -2858,7 +2749,6 @@ TEST(Simplify, SimplifyRoundModPatternMultivar) { VarHandle y("y", kInt); VarHandle z("z", kInt); ExprHandle body = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (y / ExprHandle(8) * ExprHandle(8)) + (x % 8) + (y % 8) + (z % 8); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Add, simplified.node(), add); @@ -2880,9 +2770,7 @@ TEST(Simplify, SimplifyRoundModPatternMultivar) { VarHandle y("y", kInt); VarHandle z("z", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = x + (z + ExprHandle(512) * y) % ExprHandle(16) + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle(16) * ((z + ExprHandle(512) * y) / ExprHandle(16)); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Add, simplified.node(), add); @@ -2902,7 +2790,6 @@ TEST(Simplify, SimplifyModRoundModPattern) { { // t/7 % 9 * 7 + t % 7 => t%63 VarHandle t("t", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (t / 7 % 9) * 7 + t % 7; ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Mod, simplified.node(), mod); @@ -2913,7 +2800,6 @@ TEST(Simplify, SimplifyModRoundModPattern) { { // 2*t/7 % 9 * 7 + 2*t % 7 => 2*t % 63 VarHandle t("t", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (ExprHandle(2) * t / 7 % 9) * 7 + ExprHandle(2) * t % 7; ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Mod, simplified.node(), mod); @@ -3000,7 +2886,6 @@ TEST(Simplify, SimplifyModRoundModPatternFactorization) { // 2 * (t /7 % 9 * 7) + 2 * (t % 7) => 2 * (t % 63) VarHandle t("t", kInt); ExprHandle body = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle(2) * ((t / 7 % 9) * 7) + ExprHandle(2) * (t % 7); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Mul, simplified.node(), mul); @@ -3013,7 +2898,6 @@ TEST(Simplify, SimplifyModRoundModPatternFactorization) { { // t /7 % 9 * 14 + 2* (t % 7) => 2* (t % 63) VarHandle t("t", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (t / 7 % 9) * 14 + ExprHandle(2) * (t % 7); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Mul, simplified.node(), mul); @@ -3026,7 +2910,6 @@ TEST(Simplify, SimplifyModRoundModPatternFactorization) { { // t/14 % 9 * 7 + t/2 % 7 => t/2 % 63 VarHandle t("t", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (t / 14 % 9) * 7 + t / 2 % 7; ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Mod, simplified.node(), mod); @@ -3039,9 +2922,7 @@ TEST(Simplify, SimplifyModRoundModPatternFactorization) { { // t/(7*3) % 9 * 7*3 + t % (7*3) => t % 189 VarHandle t("t", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (t / (ExprHandle(7) * ExprHandle(3)) % 9) * 7 * 3 + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t % (ExprHandle(7) * ExprHandle(3)); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Mod, simplified.node(), mod); @@ -3074,7 +2955,6 @@ TEST(Simplify, SimplifyModRoundModPatternMultivar) { { // t/7 % 9 * 7 + t % 7 + t => t % 63 + t VarHandle t("t", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (t / 7 % 9) * 7 + t % 7 + t; ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Add, simplified.node(), add); @@ -3087,7 +2967,6 @@ TEST(Simplify, SimplifyModRoundModPatternMultivar) { { // t/7 % 9 * 7 + t/8 % 9 * 8 + t % 7 + t % 8 => t % 63 + t % 72 VarHandle t("t", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (t / 7 % 9) * 7 + (t / 8 % 9) * 8 + t % 7 + t % 8; ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Add, simplified.node(), add); @@ -3145,7 +3024,6 @@ TEST(Simplify, SimplifyModRoundModPatternMultivar) { // => io_flat VarHandle t("io_flat", kInt); ExprHandle body = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle(7) * (t / 7 % 9) + t % 7 + ExprHandle(63) * (t / 63); ExprHandle simplified = IRSimplifier::simplify(body); IS_VAR_WITH_NAME(simplified.node(), "io_flat"); @@ -3157,11 +3035,8 @@ TEST(Simplify, SimplifyModRoundModPatternMultivar) { // (i0_flat / 7 % 9) * 7 + // i0_flat % 7 => io_flat VarHandle t("io_flat", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (t / (ExprHandle(11) * 10 * 9 * 7)) * (7 * 9 * 10 * 11) + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (t / (ExprHandle(10) * 9 * 7) % 11) * 7 * 9 * 10 + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (t / (ExprHandle(9) * 7) % 10) * 7 * 9 + (t / 7 % 9) * 7 + t % 7; ExprHandle simplified = IRSimplifier::simplify(body); IS_VAR_WITH_NAME(simplified.node(), "io_flat"); @@ -3205,7 +3080,6 @@ TEST(Simplify, SimplifyDivisionScalarFactorization) { // 8x / 4y => 2x / y. VarHandle x("x", kInt); VarHandle y("y", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (x * 8) / (y * 4); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Div, simplified.node(), div); @@ -3219,7 +3093,6 @@ TEST(Simplify, SimplifyDivisionScalarFactorization) { // Don't change anything if we can't factorize. VarHandle x("x", kInt); VarHandle y("y", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (x * 7) / (y * 4); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Div, simplified.node(), div); @@ -3235,7 +3108,6 @@ TEST(Simplify, SimplifyDivisionScalarFactorization) { // Don't reorder floats. VarHandle x("x", kFloat); VarHandle y("y", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = (x * 8) / (y * 4); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(Div, simplified.node(), div); @@ -3515,7 +3387,6 @@ TEST(Simplify, SimplifyConstantComparisons) { ComparisonTest(2, 1, kNE, 1); // With specified results: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = CompareSelect::make(2, 2, 5, 42, kNE); ExprHandle simplified = IRSimplifier::simplify(body); IS_IMM_WITH_VAL(Int, simplified.node(), 42); @@ -3563,7 +3434,6 @@ TEST(Simplify, SimplifySymbolicComparisons) { { // x == 5 => x == 5 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) body = CompareSelect::make(x, 5, kEQ); ExprHandle simplified = IRSimplifier::simplify(body); IS_NODE_WITH_NAME(CompareSelect, simplified.node(), cmp); @@ -3802,7 +3672,6 @@ TEST(Simplify, SimplifyForWontLoseLoopOptions) { BufHandle c("C", {4}, kInt); VarHandle i("i", kInt); LoopOptions options; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) options.set_gpu_block_index(12); auto body = For::make(i, 0, 1, Store::make(c, {i}, Load::make(a, {i})), options); @@ -3878,13 +3747,11 @@ TEST(Simplify, SimplifyForCleansUp) { KernelScope kernel_scope; { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder a("a", kFloat, {1, 12, 1}); VarHandle x("x", kInt); Tensor* b = Compute( // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) "x", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{1, "i"}, {12, "m"}, {1, "n"}}, [](const VarHandle& i, const VarHandle& m, const VarHandle& n) { return i + m + n; @@ -3913,10 +3780,8 @@ TEST(Simplify, SimplifyEliminateEmptyFor) { { // Flatten many layers around an empty block to an empty block. Stmt* last = new Block({}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 11; ++i) { VarHandle loopVar("loopVar", kInt); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) last = For::make(loopVar, 0, 10, last); } @@ -4002,7 +3867,6 @@ TEST(Simplify, SimplifyFlattenBlock) { { // Flatten many layers around an empty block to an empty block. Stmt* last = new Block({}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 11; ++i) { last = new Block({last}); } @@ -4139,7 +4003,6 @@ TEST(Simplify, SimplifyReorderForCond) { 0, 4, Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(j, 10, CompareSelectOperation::kLT), Store::make(c, {i}, Load::make(a, {i})), nullptr)); @@ -4157,7 +4020,6 @@ TEST(Simplify, SimplifyReorderForCond) { 0, 4, Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(c, {i}, Load::make(a, {i})), nullptr)); @@ -4177,7 +4039,6 @@ TEST(Simplify, SimplifyReorderForCond) { Cond::make( CompareSelect::make( Load::make(c, {0}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, CompareSelectOperation::kLT), Store::make(c, {0}, Load::make(a, {i})), @@ -4197,7 +4058,6 @@ TEST(Simplify, SimplifyReorderForCond) { Cond::make( CompareSelect::make( Load::make(b, {0}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, CompareSelectOperation::kLT), Store::make(c, {0}, Load::make(a, {i})), @@ -4218,7 +4078,6 @@ TEST(Simplify, SimplifyReorderForCond) { Cond::make( CompareSelect::make( Load::make(a, {0}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, CompareSelectOperation::kLT), Store::make(c, {0}, Load::make(a, {i})), @@ -4239,7 +4098,6 @@ TEST(Simplify, SimplifyReorderForCond) { Block::make( {Let::make(j, 3), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(j, 10, CompareSelectOperation::kLT), Store::make(c, {0}, Load::make(a, {i})), nullptr)})); @@ -4260,11 +4118,9 @@ TEST(Simplify, SimplifyReorderForCond) { Cond::make( CompareSelect::make( Load::make(a, {0}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, CompareSelectOperation::kLT), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(j, 10, CompareSelectOperation::kEQ), Store::make(c, {0}, Load::make(a, {i})), nullptr), @@ -4288,11 +4144,9 @@ TEST(Simplify, SimplifyReorderForCond) { Cond::make( CompareSelect::make( Load::make(a, {0}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, CompareSelectOperation::kLT), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kEQ), Store::make(c, {0}, Load::make(a, {i})), nullptr), @@ -4314,7 +4168,6 @@ TEST(Simplify, SimplifyReorderForCond) { 0, 4, Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(j, 10, CompareSelectOperation::kLT), Store::make(c, {0}, Load::make(a, {i})), Store::make(c, {0}, 0))); @@ -4335,7 +4188,6 @@ TEST(Simplify, SimplifyReorderForCond) { Cond::make( CompareSelect::make( Load::make(c, {0}), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, CompareSelectOperation::kLT), Store::make(c, {1}, Load::make(a, {i})), @@ -4360,12 +4212,10 @@ TEST(Simplify, SimplifyFuseConditions) { // if (A) { X }; if (A) { Y }; => if (A) { X; Y } auto body = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {0}, i), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {1}, i), nullptr)}); @@ -4384,12 +4234,10 @@ TEST(Simplify, SimplifyFuseConditions) { // Can't fuse, conditions are not identical in lhs (i != j). auto body = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {0}, i), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(j, 10, CompareSelectOperation::kLT), Store::make(a, {1}, i), nullptr)}); @@ -4412,12 +4260,10 @@ TEST(Simplify, SimplifyFuseConditions) { // Can't fuse, conditions are not identical in rhs (10 != 11). auto body = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {0}, i), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 11, CompareSelectOperation::kLT), Store::make(a, {1}, i), nullptr)}); @@ -4441,12 +4287,10 @@ TEST(Simplify, SimplifyFuseConditions) { // Can't fuse, conditions are not identical in operation (LT vs GT). auto body = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {0}, i), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kGT), Store::make(a, {1}, i), nullptr)}); @@ -4474,7 +4318,6 @@ TEST(Simplify, SimplifyFuseConditions) { {Cond::make( CompareSelect::make( i, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, new IntImm(1), new IntImm(0), @@ -4484,7 +4327,6 @@ TEST(Simplify, SimplifyFuseConditions) { Cond::make( CompareSelect::make( j, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 10, new IntImm(2), new IntImm(0), @@ -4511,12 +4353,10 @@ TEST(Simplify, SimplifyFuseConditions) { // Can fuse with false stmt only. auto body = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), nullptr, Store::make(a, {0}, i)), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), nullptr, Store::make(a, {1}, i))}); @@ -4534,12 +4374,10 @@ TEST(Simplify, SimplifyFuseConditions) { // Can fuse with both true and false stmt. auto body = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {0}, i), Store::make(b, {0}, i)), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {1}, i), Store::make(b, {1}, i))}); @@ -4558,12 +4396,10 @@ TEST(Simplify, SimplifyFuseConditions) { // Can fuse with mismatched true / false stmt existing auto body = Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {0}, i), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), nullptr, Store::make(b, {1}, i))}); @@ -4598,22 +4434,18 @@ TEST(Simplify, SimplifyFuseConditions) { auto body = Block::make({ Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(j, 10, CompareSelectOperation::kLT), Store::make(a, {0}, j), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {0}, i), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {1}, i), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 11, CompareSelectOperation::kLT), Store::make(a, {1}, j), nullptr), @@ -4633,22 +4465,18 @@ TEST(Simplify, SimplifyFuseConditions) { // Can fuse longer sequences of identical conditions. auto body = Block::make({ Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {0}, j), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {0}, i), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {1}, i), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {1}, j), nullptr), @@ -4666,23 +4494,19 @@ TEST(Simplify, SimplifyFuseConditions) { // Can't fuse through a non condition. auto body = Block::make({ Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {0}, j), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {0}, i), nullptr), Store::make(b, {1}, i + j), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {1}, i), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(i, 10, CompareSelectOperation::kLT), Store::make(a, {1}, j), nullptr), @@ -4711,7 +4535,6 @@ TEST(Simplify, SimplifyFuseConditions) { {Cond::make( CompareSelect::make( i * 2, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle(87) % ExprHandle(11), CompareSelectOperation::kLT), Store::make(a, {0}, i), @@ -4719,7 +4542,6 @@ TEST(Simplify, SimplifyFuseConditions) { Cond::make( CompareSelect::make( i * 2, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle(300) / ExprHandle(30), CompareSelectOperation::kLT), Store::make(a, {1}, i), @@ -4783,12 +4605,10 @@ TEST(Simplify, SimplifyFuseConditions) { 4, Block::make( {Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(j, 10, CompareSelectOperation::kLT), Store::make(a, {1}, Load::make(b, {0})), nullptr), Cond::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) CompareSelect::make(j, 10, CompareSelectOperation::kLT), Store::make(a, {2}, Load::make(b, {0})), nullptr)})); @@ -4903,9 +4723,7 @@ TEST(Simplify, SimplifySyncThreads) { TEST(Simplify, SimplifyRampSubBroadcast) { KernelScope kernel_scope; int num_lanes = 4; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle ramp = Ramp::make(ExprHandle(0), ExprHandle(6), num_lanes); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle broadcast = Broadcast::make(ExprHandle(-5), num_lanes); ExprHandle simplified = IRSimplifier::simplify(ramp - broadcast); Ramp* newRamp = simplified.AsNode(); @@ -4919,7 +4737,6 @@ TEST(Simplify, SimplifyRampSubBroadcast) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) TEST(Simplify, SimplifyBroadcastTermExpander) { KernelScope kernel_scope; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int num_lanes = 8; ExprHandle bc0 = Broadcast::make(ExprHandle(0), num_lanes); ExprHandle bc1 = Broadcast::make(ExprHandle(1), num_lanes); diff --git a/test/cpp/tensorexpr/test_train.cpp b/test/cpp/tensorexpr/test_train.cpp index 74ecadccfe481..54902425ae92f 100644 --- a/test/cpp/tensorexpr/test_train.cpp +++ b/test/cpp/tensorexpr/test_train.cpp @@ -68,15 +68,11 @@ TEST(Train, TrainBasic) { SimpleIREvaluator cg( s, {inputs.at(A), inputs.at(B), bindings.at(C), vbindings.at("K")}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto N = 1024; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_vec(N, 21.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_vec(N, 2.0f); std::vector c_vec(N, 0.0f); cg.call({a_vec.data(), b_vec.data(), c_vec.data(), N}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(c_vec, 42.0f); } { @@ -107,17 +103,13 @@ TEST(Train, TrainBasic) { bindings.at(dA), vbindings.at("K")}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto N = 1024; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_vec(N, 21.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_vec(N, 2.0f); std::vector ones_vec(N, 1.0f); std::vector da_vec(N, 0.0f); cg.call({a_vec.data(), b_vec.data(), ones_vec.data(), da_vec.data(), N}); // 2*A*B^2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(da_vec, 168.0f); } // T wrapper @@ -139,15 +131,11 @@ TEST(Train, TrainBasic) { SimpleIREvaluator cg( s, {inputs.at(A), inputs.at(B), bindings.at(C), vbindings.at("K")}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto N = 1024; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_vec(N, 21.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_vec(N, 2.0f); std::vector c_vec(N, 0.0f); cg.call({a_vec.data(), b_vec.data(), c_vec.data(), N}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(c_vec, 23.0f); } { @@ -178,17 +166,13 @@ TEST(Train, TrainBasic) { bindings.at(dA), vbindings.at("K")}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto N = 1024; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_vec(N, 21.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_vec(N, 2.0f); std::vector ones_vec(N, 1.0f); std::vector da_vec(N, 0.0f); cg.call({a_vec.data(), b_vec.data(), ones_vec.data(), da_vec.data(), N}); // 2*A*B^2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(da_vec, 168.0f); } // division gradient @@ -217,17 +201,13 @@ TEST(Train, TrainBasic) { inputs.at(ones), bindings.at(dC), vbindings.at("K")}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto N = 1024; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector a_vec(N, 2.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector b_vec(N, 3.0f); std::vector ones_vec(N, 1.0f); std::vector dc_vec(N, 0.0f); cg.call({a_vec.data(), b_vec.data(), ones_vec.data(), dc_vec.data(), N}); // -2 A^4 / B^3 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(dc_vec, -1.185185185185f); } { @@ -243,13 +223,10 @@ TEST(Train, TrainBasic) { KernelScope kernel_scope; std::tie(s, inputs, bindings, vbindings) = to_tensorexpr(g, {Y}); SimpleIREvaluator cg(s, {inputs.at(X), bindings.at(Y), vbindings.at("K")}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto N = 1024; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector X_vec(N, 2.0f); std::vector Y_vec(1, 0.0f); cg.call({X_vec.data(), Y_vec.data(), N}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(Y_vec, 2048.f); } @@ -267,13 +244,10 @@ TEST(Train, TrainBasic) { KernelScope kernel_scope; std::tie(s, inputs, bindings, vbindings) = to_tensorexpr(g, {Z}); SimpleIREvaluator cg(s, {inputs.at(X), bindings.at(Z), vbindings.at("K")}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto N = 1024; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector X_vec(N, 2.0f); std::vector Z_vec(N, 0.0f); cg.call({X_vec.data(), Z_vec.data(), N}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) assertAllEqual(Z_vec, 2048.f); } @@ -329,17 +303,14 @@ TEST(Train, TrainBasic) { std::vector X_(N, 0.0f); // Generate a random target vector - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector W_ref_(N, 3.0f); std::generate(W_ref_.begin(), W_ref_.end(), gen); std::vector W_(N, 0.0f); std::vector one_(1, 1.0f); std::vector K_(N, 1.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector LR_(1, 0.1f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto i = 0; i < 100; ++i) { std::generate(X_.begin(), X_.end(), gen); cg.call( diff --git a/test/cpp/tensorexpr/test_train_impl.cpp b/test/cpp/tensorexpr/test_train_impl.cpp index a57d4d5e9901b..f66ea5c6feb64 100644 --- a/test/cpp/tensorexpr/test_train_impl.cpp +++ b/test/cpp/tensorexpr/test_train_impl.cpp @@ -429,10 +429,8 @@ to_tensorexpr(const VGraph& graph, std::vector outputs) { std::stringstream ss; auto k = unique_name_map.size() + 1; while (k) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto n = k % 26; ss << "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[n - 1]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) k /= 26; } auto name = ss.str(); diff --git a/test/cpp/tensorexpr/test_type.cpp b/test/cpp/tensorexpr/test_type.cpp index fa6d9d5ed74a9..a7e357641136f 100644 --- a/test/cpp/tensorexpr/test_type.cpp +++ b/test/cpp/tensorexpr/test_type.cpp @@ -16,10 +16,8 @@ TEST(Type, Test01) { ASSERT_EQ(dt1, kInt); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dtype dt2_a(kInt, 8); Dtype dt2_b(kInt, 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dtype dt2_c(ScalarType::Int, 8); ASSERT_EQ(dt2_a, dt2_c); ASSERT_NE(dt2_a, dt2_b); @@ -36,9 +34,7 @@ TEST(Type, Test01) { ASSERT_EQ(kBool, ToDtype()); } { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dtype int32x8(kInt, 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Dtype float32x8(kFloat, 8); ASSERT_NE(int32x8, float32x8); ASSERT_EQ(float32x8, BinaryOpDtype(int32x8, float32x8)); @@ -82,7 +78,6 @@ TEST(Type, BitCasting) { constexpr int16_t ref16 = 1337; constexpr int32_t ref32 = 1337; constexpr int64_t ref64 = 1337; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::Half reff16 = 1337.0f; constexpr float reff32 = 1337.0f; constexpr double reff64 = 1337.0f; @@ -170,9 +165,7 @@ TEST(Type, Propagation) { KernelScope kernel_scope; VarHandle x("x", kFloat); VarHandle y("y", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle body = FloatImm::make(2.f) + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (x * FloatImm::make(3.f) + FloatImm::make(4.f) * y); ASSERT_EQ(body.dtype(), kFloat); } @@ -182,7 +175,6 @@ TEST(Type, Propagation) { VarHandle x("x", kShort); VarHandle y("y", kLong); ExprHandle body = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ShortImm::make(2.f) + (x * ShortImm::make(3) + ShortImm::make(4) * y); ASSERT_EQ(body.dtype(), kLong); } @@ -192,7 +184,6 @@ TEST(Type, Propagation) { VarHandle x("x", kHalf); VarHandle y("y", kDouble); ExprHandle body = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) HalfImm::make(2.f) + (x * HalfImm::make(3) + HalfImm::make(4) * y); ASSERT_EQ(body.dtype(), kDouble); } diff --git a/test/cpp/tensorexpr/tutorial.cpp b/test/cpp/tensorexpr/tutorial.cpp index ef5cb6c8393ab..6d2f21f895841 100644 --- a/test/cpp/tensorexpr/tutorial.cpp +++ b/test/cpp/tensorexpr/tutorial.cpp @@ -71,7 +71,6 @@ int main(int argc, char* argv[]) { // also be a 'Mul' or some other expression. // // Let's construct a simple TE: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Expr* lhs = new IntImm(5); Expr* rhs = new Var("x", kInt); Expr* mul = new Mul(lhs, rhs); @@ -98,7 +97,6 @@ int main(int argc, char* argv[]) { ExprHandle a = Var::make("a", kInt); ExprHandle b = Var::make("b", kFloat); ExprHandle c = Var::make("c", kFloat); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle x = ExprHandle(5) * a + b / (sigmoid(c) - 3.0f); std::cout << "Tensor expression: " << *x.node() << std::endl; // Prints: Tensor expression: float(5 * a) + b / ((sigmoid(c)) - 3.f) @@ -111,7 +109,6 @@ int main(int argc, char* argv[]) { // placeholder similar to Var, but with dimensions info. // // Let's construct a simple load: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) BufHandle A("A", {ExprHandle(64), ExprHandle(32)}, kInt); ExprHandle i = Var::make("i", kInt), j = Var::make("j", kInt); ExprHandle load = Load::make(A.dtype(), A, {i, j}); @@ -131,7 +128,6 @@ int main(int argc, char* argv[]) { // First, let's specify the sizes: std::vector dims = { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) new IntImm(64), new IntImm(32)}; // IntImm stands for Integer Immediate // and represents an integer constant @@ -177,7 +173,6 @@ int main(int argc, char* argv[]) { // dimensions, and a lambda specifying the computation body: Tensor* Z = Compute( "Z", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {32, "j"}}, [](const VarHandle& i, const VarHandle& j) { return i / j; }); std::cout << "Tensor computation: " << *Z << std::endl; @@ -191,11 +186,9 @@ int main(int argc, char* argv[]) { // Tensors might access other tensors and external placeholders in their // expressions. It can be done like so: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder P("P", kInt, {64, 32}); Tensor* R = Compute( "R", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {32, "j"}}, [&](const VarHandle& i, const VarHandle& j) { return Z->load(i, j) * P.load(i, j); @@ -229,20 +222,16 @@ int main(int argc, char* argv[]) { // the computation (how to compute?). // // Let's create a simple tensor expression and construct a loop nest for it. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder A("A", kFloat, {64, 32}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder B("B", kFloat, {64, 32}); Tensor* X = Compute( "X", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {32, "j"}}, [&](const VarHandle& i, const VarHandle& j) { return A.load(i, j) + B.load(i, j); }); Tensor* Y = Compute( "Y", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {32, "j"}}, [&](const VarHandle& i, const VarHandle& j) { return sigmoid(X->load(i, j)); @@ -330,7 +319,6 @@ int main(int argc, char* argv[]) { For* j_inner; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) For* j_tail; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int split_factor = 9; loopnest.splitWithTail( loops[1], // loops[0] is the outer loop, loops[1] is inner @@ -367,13 +355,10 @@ int main(int argc, char* argv[]) { // section we would look at how we can bridge that gap. // Let's start by constructing a simple computation for us to work with: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder A("A", kInt, {64, 32}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Placeholder B("B", kInt, {64, 32}); Tensor* X = Compute( "X", - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) {{64, "i"}, {32, "j"}}, [&](const VarHandle& i, const VarHandle& j) { return A.load(i, j) + B.load(i, j); @@ -419,11 +404,8 @@ int main(int argc, char* argv[]) { // computation everything is ready. // Let's now create some inputs and run our computation with them: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector data_A(64 * 32, 3); // This will be the input A - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector data_B(64 * 32, 5); // This will be the input B - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::vector data_X(64 * 32, 0); // This will be used for the result // Now let's invoke our codegen to perform the computation on our data. We @@ -440,13 +422,10 @@ int main(int argc, char* argv[]) { // Let's print one of the elements from each array to verify that the // computation did happen: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::cout << "A[10] = " << data_A[10] << std::endl - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << "B[10] = " << data_B[10] << std::endl - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << "X[10] = A[10] + B[10] = " << data_X[10] << std::endl; // Prints: // A[10] = 3 diff --git a/test/mobile/nnc/test_context.cpp b/test/mobile/nnc/test_context.cpp index c5e30511bce38..3022695f51399 100644 --- a/test/mobile/nnc/test_context.cpp +++ b/test/mobile/nnc/test_context.cpp @@ -80,22 +80,15 @@ TEST(Function, Serialization) { Function f; f.set_name("test_function"); f.set_nnc_kernel_id("test_kernel"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) f.set_input_specs({create_test_input_spec({1, 3, 224, 224})}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) f.set_output_spec({create_test_output_spec({1000})}); f.set_parameters({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::ones({1, 16, 3, 3}, at::kFloat), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::ones({16, 32, 1, 1}, at::kFloat), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) at::ones({32, 1, 3, 3}, at::kFloat) }); f.set_memory_plan(create_test_memory_plan({ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sizeof(float) * 1024, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sizeof(float) * 2048, })); diff --git a/third_party/miniz-2.0.8/miniz.c b/third_party/miniz-2.0.8/miniz.c index d08434ce86233..9a1ff5f67f320 100755 --- a/third_party/miniz-2.0.8/miniz.c +++ b/third_party/miniz-2.0.8/miniz.c @@ -28,7 +28,6 @@ typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; #ifdef __cplusplus @@ -41,13 +40,11 @@ mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-init-variables) mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; @@ -55,22 +52,16 @@ mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1 += ptr[5], s2 += s1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1 += ptr[6], s2 += s1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) block_len = 5552; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return (s2 << 16) + s1; } @@ -198,7 +189,6 @@ const char *mz_version(void) int mz_deflateInit(mz_streamp pStream, int level) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } @@ -210,7 +200,6 @@ int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, if (!pStream) return MZ_STREAM_ERROR; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; @@ -337,7 +326,6 @@ int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char memset(&stream, 0, sizeof(stream)); /* In case mz_ulong is 64-bits (argh I hate longs). */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; @@ -558,7 +546,6 @@ int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char memset(&stream, 0, sizeof(stream)); /* In case mz_ulong is 64-bits (argh I hate longs). */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; @@ -744,29 +731,21 @@ static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *p for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) hist[freq & 0xFF]++; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) hist[256 + ((freq >> 8) & 0xFF)]++; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) const mz_uint32 *pHist = &hist[pass << 8]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_uint offsets[256], cur_ofs = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; @@ -993,22 +972,16 @@ static void tdefl_start_dynamic_block(tdefl_compressor *d) int num_lit_codes, num_dist_codes, num_bit_lengths; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_huff_count[0][256] = 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; @@ -1027,7 +1000,6 @@ static void tdefl_start_dynamic_block(tdefl_compressor *d) if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); @@ -1042,7 +1014,6 @@ static void tdefl_start_dynamic_block(tdefl_compressor *d) d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); @@ -1059,7 +1030,6 @@ static void tdefl_start_dynamic_block(tdefl_compressor *d) TDEFL_RLE_ZERO_CODE_SIZE(); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); @@ -1067,7 +1037,6 @@ static void tdefl_start_dynamic_block(tdefl_compressor *d) TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; @@ -1081,7 +1050,6 @@ static void tdefl_start_dynamic_block(tdefl_compressor *d) mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (code >= 16) // NOLINTNEXTLINE(bugprone-signed-char-misuse) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); @@ -1094,28 +1062,18 @@ static void tdefl_start_static_block(tdefl_compressor *d) mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (i = 0; i <= 143; ++i) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *p++ = 8; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; i <= 255; ++i) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *p++ = 9; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; i <= 279; ++i) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *p++ = 7; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; i <= 287; ++i) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); @@ -1229,13 +1187,11 @@ static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) flags = *pLZ_codes++ | 0x100; if (flags & 1) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) mz_uint sym, num_extra_bits; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; @@ -1243,7 +1199,6 @@ static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; @@ -1251,9 +1206,7 @@ static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) sym = s_tdefl_large_dist_sym[match_dist >> 8]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); @@ -1295,7 +1248,6 @@ static int tdefl_flush_block(tdefl_compressor *d, int flush) mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); @@ -1303,7 +1255,6 @@ static int tdefl_flush_block(tdefl_compressor *d, int flush) d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) @@ -1319,7 +1270,6 @@ static int tdefl_flush_block(tdefl_compressor *d, int flush) saved_bits_in = d->m_bits_in; if (!use_raw_block) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); /* If the block gets expanded, forget the current contents of the output buffer and send a raw block instead. */ @@ -1335,7 +1285,6 @@ static int tdefl_flush_block(tdefl_compressor *d, int flush) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); @@ -1368,7 +1317,6 @@ static int tdefl_flush_block(tdefl_compressor *d, int flush) for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) a <<= 8; } } @@ -1382,7 +1330,6 @@ static int tdefl_flush_block(tdefl_compressor *d, int flush) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); @@ -1397,7 +1344,6 @@ static int tdefl_flush_block(tdefl_compressor *d, int flush) d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; @@ -1506,7 +1452,6 @@ static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahe { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) const mz_uint8 *s = d->m_dict + pos, *p, *q; @@ -1730,7 +1675,6 @@ static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 li *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } @@ -1749,26 +1693,19 @@ static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s0 = s_tdefl_small_dist_sym[match_dist & 511]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) @@ -1857,7 +1794,6 @@ static mz_bool tdefl_compress_normal(tdefl_compressor *d) { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; @@ -1867,7 +1803,6 @@ static mz_bool tdefl_compress_normal(tdefl_compressor *d) if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); @@ -1890,7 +1825,6 @@ static mz_bool tdefl_compress_normal(tdefl_compressor *d) } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); @@ -1908,9 +1842,7 @@ static mz_bool tdefl_compress_normal(tdefl_compressor *d) d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); /* Check if it's time to flush the current LZ codes to the internal output buffer. */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) @@ -2027,10 +1959,8 @@ tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_fun d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); @@ -2038,7 +1968,6 @@ tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_fun d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; @@ -2194,7 +2123,6 @@ void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { @@ -2202,7 +2130,6 @@ void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int return NULL; } /* write dummy header */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); /* compress image data */ @@ -2220,54 +2147,33 @@ void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int return NULL; } /* write real header */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = { 0x00, 0x00, 0x04, 0x02, 0x06 }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_uint8 pnghdr[41] = { 0x89, 0x50, 0x4e, 0x47, 0x0d, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0x0a, 0x1a, 0x0a, 0x00, 0x00, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0x00, 0x0d, 0x49, 0x48, 0x44, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0x52, 0x00, 0x00, 0x00, 0x00, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0x00, 0x00, 0x49, 0x44, 0x41, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 0x54 }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pnghdr[18] = (mz_uint8)(w >> 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pnghdr[19] = (mz_uint8)w; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pnghdr[22] = (mz_uint8)(h >> 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pnghdr[23] = (mz_uint8)h; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pnghdr[25] = chans[num_chans]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pnghdr[33] = (mz_uint8)(*pLen_out >> 24); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pnghdr[34] = (mz_uint8)(*pLen_out >> 16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pnghdr[35] = (mz_uint8)(*pLen_out >> 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pnghdr[36] = (mz_uint8)*pLen_out; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (i = 0; i < 4; ++i, c <<= 8) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } /* write footer (IDAT CRC-32, followed by IEND chunk) */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (!tdefl_output_buffer_putter("\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; @@ -2275,14 +2181,10 @@ void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int MZ_FREE(out_buf.m_pBuf); return NULL; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (i = 0; i < 4; ++i, c <<= 8) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); /* compute final size of file, grab compressed data buffer and return */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; @@ -2290,7 +2192,6 @@ void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { /* Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's where #defined out) */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } @@ -2527,7 +2428,6 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) // NOLINTNEXTLINE(bugprone-misplaced-widening-cast,cppcoreguidelines-avoid-magic-numbers) @@ -2552,7 +2452,6 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); @@ -2597,26 +2496,16 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex mz_uint8 *p = r->m_tables[0].m_code_size; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) mz_uint i; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) r->m_table_sizes[0] = 288; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (i = 0; i <= 143; ++i) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *p++ = 8; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; i <= 255; ++i) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *p++ = 9; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; i <= 279; ++i) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *p++ = 7; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (; i <= 287; ++i) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) *p++ = 8; } else @@ -2634,7 +2523,6 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) @@ -2653,13 +2541,11 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); @@ -2675,7 +2561,6 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { @@ -2715,13 +2600,11 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex // NOLINTNEXTLINE(cppcoreguidelines-init-variables) mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); @@ -2729,7 +2612,6 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,bugprone-signed-char-misuse) num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; @@ -2751,7 +2633,6 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) @@ -2767,12 +2648,10 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex // NOLINTNEXTLINE(cppcoreguidelines-init-variables) mz_uint code_len; #if TINFL_USE_64BIT_BITBUF - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) num_bits += 32; } #else @@ -2784,7 +2663,6 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex } #endif if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) code_len = sym2 >> 9; else { @@ -2797,7 +2675,6 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex counter = sym2; bit_buf >>= code_len; num_bits -= code_len; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (counter & 256) break; @@ -2810,7 +2687,6 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex } #endif if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) code_len = sym2 >> 9; else { @@ -2824,7 +2700,6 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (sym2 & 256) { pOut_buf_cur++; @@ -2835,13 +2710,10 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex pOut_buf_cur += 2; } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((counter &= 511) == 256) break; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) num_extra = s_length_extra[counter - 257]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) counter = s_length_base[counter - 257]; if (num_extra) { @@ -2928,11 +2800,9 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex /* Ensure byte alignment and put back any bytes from the bitbuf if we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */ /* I'm being super conservative here. A number of simplifications can be made to the byte alignment part, and the Adler32 check shouldn't ever need to worry about reading from the bitbuf now. */ TINFL_SKIP_BITS(32, num_bits & 7); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8)) { --pIn_buf_cur; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) num_bits -= 8; } bit_buf &= (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1); @@ -2948,7 +2818,6 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } @@ -2962,11 +2831,9 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex /* We need to be very careful here to NOT push back any bytes we definitely know we need to make forward progress, though, or we'll lock the caller up into an inf loop. */ if ((status != TINFL_STATUS_NEEDS_MORE_INPUT) && (status != TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS)) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8)) { --pIn_buf_cur; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) num_bits -= 8; } } @@ -2984,11 +2851,9 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex size_t buf_len = *pOut_buf_size; // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,cppcoreguidelines-init-variables) mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t block_len = buf_len % 5552; while (buf_len) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; @@ -2996,22 +2861,16 @@ tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_nex s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1 += ptr[5], s2 += s1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1 += ptr[6], s2 += s1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) block_len = 5552; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; @@ -3045,9 +2904,7 @@ void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, siz if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (new_out_buf_capacity < 128) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) @@ -3673,7 +3530,6 @@ static mz_bool mz_zip_reader_locate_header_sig(mz_zip_archive *pZip, mz_uint32 r { // NOLINTNEXTLINE(cppcoreguidelines-init-variables) mz_int64 cur_file_ofs; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; @@ -3726,7 +3582,6 @@ static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint flag // NOLINTNEXTLINE(cppcoreguidelines-init-variables) const mz_uint8 *p; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); @@ -3790,7 +3645,6 @@ static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint flag mz_uint64 zip64_size_of_end_of_central_dir_record = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS); mz_uint64 zip64_size_of_central_directory = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_SIZE_OFS); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (zip64_size_of_end_of_central_dir_record < (MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE - 12)) return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED); @@ -4267,7 +4121,6 @@ mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_ind /* Bugfix: This code was also checking if the internal attribute was non-zero, which wasn't correct. */ /* Most/all zip writers (hopefully) set DOS file/directory attributes in the low 16-bits, so check for the DOS directory flag and ignore the source OS ID in the created by field. */ /* FIXME: Remove this check? Is it necessary - we already check the filename. */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) attribute_mapping_id = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS) >> 8; (void)attribute_mapping_id; @@ -4641,7 +4494,6 @@ mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file { /* Temporarily allocate a read buffer. */ read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); @@ -4740,7 +4592,6 @@ void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, si uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) { mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR); @@ -5442,7 +5293,6 @@ mz_bool mz_zip_validate_file(mz_zip_archive *pZip, mz_uint file_index, mz_uint f local_header_uncomp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS); local_header_crc32 = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_CRC32_OFS); local_header_bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) has_data_descriptor = (local_header_bit_flags & 8) != 0; if (local_header_filename_len != strlen(file_stat.m_filename)) @@ -5524,7 +5374,6 @@ mz_bool mz_zip_validate_file(mz_zip_archive *pZip, mz_uint file_index, mz_uint f /* I've seen zips in the wild with the data descriptor bit set, but proper local header values and bogus data descriptors */ if ((has_data_descriptor) && (!local_header_comp_size) && (!local_header_crc32)) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_uint8 descriptor_buf[32]; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) mz_bool has_id; @@ -5534,7 +5383,6 @@ mz_bool mz_zip_validate_file(mz_zip_archive *pZip, mz_uint file_index, mz_uint f mz_uint32 file_crc32; mz_uint64 comp_size = 0, uncomp_size = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_uint32 num_descriptor_uint32s = ((pState->m_zip64) || (found_zip64_ext_data_in_ldir)) ? 6 : 4; if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_len + local_header_extra_len + file_stat.m_comp_size, descriptor_buf, sizeof(mz_uint32) * num_descriptor_uint32s) != (sizeof(mz_uint32) * num_descriptor_uint32s)) @@ -5743,23 +5591,18 @@ mz_bool mz_zip_validate_file_archive(const char *pFilename, mz_uint flags, mz_zi static MZ_FORCEINLINE void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) p[1] = (mz_uint8)(v >> 8); } static MZ_FORCEINLINE void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) p[1] = (mz_uint8)(v >> 8); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) p[2] = (mz_uint8)(v >> 16); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) p[3] = (mz_uint8)(v >> 24); } static MZ_FORCEINLINE void mz_write_le64(mz_uint8 *p, mz_uint64 v) { mz_write_le32(p, (mz_uint32)v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_write_le32(p + sizeof(mz_uint32), (mz_uint32)(v >> 32)); } @@ -5777,7 +5620,6 @@ static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const vo return 0; /* An allocation this big is likely to just fail on 32-bit systems, so don't even go there. */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)) { mz_zip_set_error(pZip, MZ_ZIP_FILE_TOO_LARGE); @@ -6242,7 +6084,6 @@ static mz_bool mz_zip_writer_add_to_central_dir(mz_zip_archive *pZip, const char if (!pZip->m_pState->m_zip64) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (local_header_ofs > 0xFFFFFFFF) return mz_zip_set_error(pZip, MZ_ZIP_FILE_TOO_LARGE); } @@ -6298,7 +6139,6 @@ static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(mz_zip_ar static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) @@ -6349,7 +6189,6 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n if (!(level_and_flags & MZ_ZIP_FLAG_ASCII_FILENAME)) bit_flags |= MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); @@ -6370,7 +6209,6 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n pState->m_zip64 = MZ_TRUE; /*return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); */ } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) { pState->m_zip64 = MZ_TRUE; @@ -6424,7 +6262,6 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n if ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size + user_extra_data_len + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + user_extra_data_central_len -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) + MZ_ZIP_DATA_DESCRIPTER_SIZE32) > 0xFFFFFFFF) { pState->m_zip64 = MZ_TRUE; @@ -6551,7 +6388,6 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params(level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { @@ -6949,7 +6785,6 @@ mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, static mz_bool mz_zip_writer_update_zip64_extension_block(mz_zip_array *pNew_ext, mz_zip_archive *pZip, const mz_uint8 *pExt, uint32_t ext_len, mz_uint64 *pComp_size, mz_uint64 *pUncomp_size, mz_uint64 *pLocal_header_ofs, mz_uint32 *pDisk_start) { /* + 64 should be enough for any new zip64 data */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (!mz_zip_array_reserve(pZip, pNew_ext, ext_len + 64, MZ_FALSE)) return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED); @@ -6957,7 +6792,6 @@ static mz_bool mz_zip_writer_update_zip64_extension_block(mz_zip_array *pNew_ext if ((pUncomp_size) || (pComp_size) || (pLocal_header_ofs) || (pDisk_start)) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_uint8 new_ext_block[64]; mz_uint8 *pDst = new_ext_block; mz_write_le16(pDst, MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID); @@ -7080,7 +6914,6 @@ mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive * src_central_dir_following_data_size = src_filename_len + src_ext_len + src_comment_len; /* TODO: We don't support central dir's >= MZ_UINT32_MAX bytes right now (+32 fudge factor in case we need to add more extra data) */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if ((pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_central_dir_following_data_size + 32) >= MZ_UINT32_MAX) return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE); @@ -7194,7 +7027,6 @@ mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive * /* Try to detect if the new archive will most likely wind up too big and bail early (+(sizeof(mz_uint32) * 4) is for the optional descriptor which could be present, +64 is a fudge factor). */ /* We also check when the archive is finalized so this doesn't need to be perfect. */ mz_uint64 approx_new_archive_size = cur_dst_file_ofs + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + src_archive_bytes_remaining + (sizeof(mz_uint32) * 4) + - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_central_dir_following_data_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + 64; if (approx_new_archive_size >= MZ_UINT32_MAX) @@ -7245,7 +7077,6 @@ mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive * /* Now deal with the optional data descriptor */ bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (bit_flags & 8) { /* Copy data descriptor */ @@ -7258,14 +7089,12 @@ mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive * /* crc 1 */ /* comp_size 2 */ /* uncomp_size 2 */ - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, (sizeof(mz_uint32) * 6)) != (sizeof(mz_uint32) * 6)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == MZ_ZIP_DATA_DESCRIPTOR_ID) ? 6 : 5); } else @@ -7295,7 +7124,6 @@ mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive * mz_write_le64((mz_uint8 *)pBuf + sizeof(mz_uint32) * 2, src_comp_size); mz_write_le64((mz_uint8 *)pBuf + sizeof(mz_uint32) * 4, src_uncomp_size); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) n = sizeof(mz_uint32) * 6; } else @@ -7419,7 +7247,6 @@ mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) mz_zip_internal_state *pState; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) mz_uint64 central_dir_ofs, central_dir_size; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) mz_uint8 hdr[256]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) diff --git a/torch/csrc/api/include/torch/data/datasets/chunk.h b/torch/csrc/api/include/torch/data/datasets/chunk.h index 638cedbeefb66..2acac0cf0b6a9 100644 --- a/torch/csrc/api/include/torch/data/datasets/chunk.h +++ b/torch/csrc/api/include/torch/data/datasets/chunk.h @@ -251,7 +251,6 @@ struct ChunkDatasetOptions { ChunkDatasetOptions( size_t preloader_count, size_t batch_size, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t cache_size = 2048, size_t cross_chunk_shuffle_count = 1) : preloader_count_(preloader_count), @@ -283,7 +282,6 @@ struct ChunkDatasetOptions { TORCH_ARG(size_t, batch_size); /// The capacity of the queue for batch caching. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(size_t, cache_size) = 2048; // The number of chunks to perfrom cross-chunk shuffling. Default to 1 meaning diff --git a/torch/csrc/api/include/torch/linalg.h b/torch/csrc/api/include/torch/linalg.h index 0f656a15fc369..7b1fa112855c9 100644 --- a/torch/csrc/api/include/torch/linalg.h +++ b/torch/csrc/api/include/torch/linalg.h @@ -353,12 +353,10 @@ inline Tensor& multi_dot_out(TensorList tensors, Tensor& result) { /// Computes pseudo-inverse /// /// See https://pytorch.org/docs/master/linalg.html#torch.linalg.pinv -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) inline Tensor pinv(const Tensor& input, double rcond=1e-15, bool hermitian=false) { return detail::pinv(input, rcond, hermitian); } -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) inline Tensor& pinv_out(Tensor& result, const Tensor& input, double rcond=1e-15, bool hermitian=false) { return detail::pinv_out(result, input, rcond, hermitian); } diff --git a/torch/csrc/api/include/torch/nn/functional/distance.h b/torch/csrc/api/include/torch/nn/functional/distance.h index 6743ae1773991..41d2d6fa11d16 100644 --- a/torch/csrc/api/include/torch/nn/functional/distance.h +++ b/torch/csrc/api/include/torch/nn/functional/distance.h @@ -82,7 +82,6 @@ inline Tensor pairwise_distance( /// Computes the p-norm distance between every pair of row vectors in the input. /// This function will be faster if the rows are contiguous. -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) inline Tensor pdist(const Tensor& input, double p = 2.0) { return torch::pdist(input, p); } diff --git a/torch/csrc/api/include/torch/nn/functional/loss.h b/torch/csrc/api/include/torch/nn/functional/loss.h index 7fc3944c4d9b0..ea2f6066ddf15 100644 --- a/torch/csrc/api/include/torch/nn/functional/loss.h +++ b/torch/csrc/api/include/torch/nn/functional/loss.h @@ -310,7 +310,6 @@ inline Tensor cosine_embedding_loss( inline Tensor _smooth_l1_loss(const Tensor& input, const Tensor& target, double beta = 1.) { auto t = torch::abs(input - target); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return torch::where(t < beta, 0.5 * torch::pow(t, 2) / beta, t - 0.5 * beta); } diff --git a/torch/csrc/api/include/torch/nn/functional/padding.h b/torch/csrc/api/include/torch/nn/functional/padding.h index 78da6462aeeb3..431ffd852eff8 100644 --- a/torch/csrc/api/include/torch/nn/functional/padding.h +++ b/torch/csrc/api/include/torch/nn/functional/padding.h @@ -21,9 +21,7 @@ inline Tensor _pad_circular(Tensor input, IntArrayRef padding) { } if (padding_size > 4) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input = torch::cat({input, _narrow_with_range(input, 4, 0, padding[-5 + padding_size])}, /*dim=*/4); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) input = torch::cat({_narrow_with_range(input, 4, -(padding[-5 + padding_size] + padding[-6 + padding_size]), -padding[-5 + padding_size]), input}, /*dim=*/4); } @@ -68,7 +66,6 @@ inline Tensor pad(const Tensor& input, } else { TORCH_CHECK(false, "NotImplementedError"); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (input.dim() == 5) { TORCH_CHECK(pad.size() == 6, "5D tensors expect 6 values for padding"); if (c10::get_if(&mode)) { diff --git a/torch/csrc/api/include/torch/nn/functional/upsampling.h b/torch/csrc/api/include/torch/nn/functional/upsampling.h index 0a74ca508659a..237b3acc69105 100644 --- a/torch/csrc/api/include/torch/nn/functional/upsampling.h +++ b/torch/csrc/api/include/torch/nn/functional/upsampling.h @@ -112,7 +112,6 @@ inline Tensor interpolate( } else if (input.dim() == 4 && c10::get_if(&mode)) { return torch::upsample_nearest2d(input, _interp_output_size(2, closed_over_args), scale_factor_list.at(0), scale_factor_list.at(1)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (input.dim() == 5 && c10::get_if(&mode)) { return torch::upsample_nearest3d(input, _interp_output_size(3, closed_over_args), scale_factor_list.at(0), scale_factor_list.at(1), scale_factor_list.at(2)); @@ -120,7 +119,6 @@ inline Tensor interpolate( return detail::adaptive_avg_pool1d(input, _interp_output_size(1, closed_over_args)); } else if (input.dim() == 4 && c10::get_if(&mode)) { return detail::adaptive_avg_pool2d(input, _interp_output_size(2, closed_over_args)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (input.dim() == 5 && c10::get_if(&mode)) { return detail::adaptive_avg_pool3d(input, _interp_output_size(3, closed_over_args)); } else if (input.dim() == 3 && c10::get_if(&mode)) { @@ -138,13 +136,10 @@ inline Tensor interpolate( scale_factor_list.at(0), scale_factor_list.at(1)); } else if (input.dim() == 4 && c10::get_if(&mode)) { TORCH_CHECK(false, "Got 4D input, but trilinear mode needs 5D input"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (input.dim() == 5 && c10::get_if(&mode)) { TORCH_CHECK(false, "Got 5D input, but linear mode needs 3D input"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (input.dim() == 5 && c10::get_if(&mode)) { TORCH_CHECK(false, "Got 5D input, but bilinear mode needs 4D input"); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (input.dim() == 5 && c10::get_if(&mode)) { TORCH_INTERNAL_ASSERT(align_corners != c10::nullopt); return torch::upsample_trilinear3d(input, _interp_output_size(3, closed_over_args), *align_corners, diff --git a/torch/csrc/api/include/torch/nn/functional/vision.h b/torch/csrc/api/include/torch/nn/functional/vision.h index 47598d6d35bb8..a19866fed6c64 100644 --- a/torch/csrc/api/include/torch/nn/functional/vision.h +++ b/torch/csrc/api/include/torch/nn/functional/vision.h @@ -25,7 +25,6 @@ inline Tensor affine_grid( size, ". Got ", theta.sizes(), "."); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (size.size() == 5) { TORCH_CHECK( theta.dim() == 3 && theta.size(-2) == 3 && theta.size(-1) == 4, diff --git a/torch/csrc/api/include/torch/nn/init.h b/torch/csrc/api/include/torch/nn/init.h index e5934295edfaa..1d6b5ec0acbba 100644 --- a/torch/csrc/api/include/torch/nn/init.h +++ b/torch/csrc/api/include/torch/nn/init.h @@ -34,7 +34,6 @@ namespace nn { namespace init { /// Return the recommended gain value for the given nonlinearity function. -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_API double calculate_gain(NonlinearityType nonlinearity, double param = 0.01); /// Fills the given `tensor` with the provided `value` in-place, and returns it. @@ -73,7 +72,6 @@ TORCH_API Tensor orthogonal_(Tensor tensor, double gain = 1.0); /// value between 0 and 1 that controls the fraction of elements in each column /// to be set to zero. /// No gradient will be recorded for this operation. -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_API Tensor sparse_(Tensor tensor, double sparsity, double std = 0.01); /// Fills the given 2-dimensional `matrix` with values drawn from a uniform diff --git a/torch/csrc/api/include/torch/nn/options/activation.h b/torch/csrc/api/include/torch/nn/options/activation.h index ff4bb20fc06e3..4cd66ff443cde 100644 --- a/torch/csrc/api/include/torch/nn/options/activation.h +++ b/torch/csrc/api/include/torch/nn/options/activation.h @@ -102,7 +102,6 @@ using GLUFuncOptions = GLUOptions; /// Hardshrink model(HardshrinkOptions().lambda(42.42)); /// ``` struct TORCH_API HardshrinkOptions { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) /* implicit */ HardshrinkOptions(double lambda = 0.5); /// the `lambda` value for the Hardshrink formulation. Default: 0.5 @@ -166,7 +165,6 @@ using HardtanhFuncOptions = HardtanhOptions; /// ``` struct TORCH_API LeakyReLUOptions { /// Controls the angle of the negative slope. Default: 1e-2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, negative_slope) = 1e-2; /// can optionally do the operation in-place. Default: False @@ -321,7 +319,6 @@ struct TORCH_API PReLUOptions { TORCH_ARG(int64_t, num_parameters) = 1; /// the initial value of `a`. Default: 0.25 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, init) = 0.25; }; @@ -393,11 +390,9 @@ using ReLU6FuncOptions = ReLU6Options; /// ``` struct TORCH_API RReLUOptions { /// lower bound of the uniform distribution. Default: 1/8 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, lower) = 1.0 / 8.0; /// upper bound of the uniform distribution. Default: 1/3 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, upper) = 1.0 / 3.0; /// can optionally do the operation in-place. Default: False @@ -417,11 +412,9 @@ namespace functional { /// ``` struct TORCH_API RReLUFuncOptions { /// lower bound of the uniform distribution. Default: 1/8 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, lower) = 1.0 / 8.0; /// upper bound of the uniform distribution. Default: 1/3 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, upper) = 1.0 / 3.0; TORCH_ARG(bool, training) = false; @@ -475,7 +468,6 @@ struct TORCH_API SoftplusOptions { TORCH_ARG(double, beta) = 1.0; /// values above this revert to a linear function. Default: 20 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, threshold) = 20.0; }; @@ -502,7 +494,6 @@ using SoftplusFuncOptions = SoftplusOptions; /// Softshrink model(SoftshrinkOptions(42.42)); /// ``` struct TORCH_API SoftshrinkOptions { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) /* implicit */ SoftshrinkOptions(double lambda = 0.5); /// the `lambda` value for the Softshrink formulation. Default: 0.5 diff --git a/torch/csrc/api/include/torch/nn/options/adaptive.h b/torch/csrc/api/include/torch/nn/options/adaptive.h index ff375f412812f..7f73dbbd5a258 100644 --- a/torch/csrc/api/include/torch/nn/options/adaptive.h +++ b/torch/csrc/api/include/torch/nn/options/adaptive.h @@ -26,7 +26,6 @@ struct TORCH_API AdaptiveLogSoftmaxWithLossOptions { TORCH_ARG(std::vector, cutoffs); /// value used as an exponent to compute sizes of the clusters. Default: 4.0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, div_value) = 4.; /// If ``true``, adds a bias term to the 'head' of diff --git a/torch/csrc/api/include/torch/nn/options/batchnorm.h b/torch/csrc/api/include/torch/nn/options/batchnorm.h index c60c98c8eb5e1..ea81afdff2f79 100644 --- a/torch/csrc/api/include/torch/nn/options/batchnorm.h +++ b/torch/csrc/api/include/torch/nn/options/batchnorm.h @@ -17,12 +17,10 @@ struct TORCH_API BatchNormOptions { /// The epsilon value added for numerical stability. /// Changing this parameter after construction __is effective__. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-5; /// A momentum multiplier for the mean and variance. /// Changing this parameter after construction __is effective__. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(c10::optional, momentum) = 0.1; /// Whether to learn a scale and bias that are applied in an affine @@ -80,12 +78,10 @@ struct TORCH_API BatchNormFuncOptions { /// A momentum multiplier for the mean and variance. /// Changing this parameter after construction __is effective__. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(c10::optional, momentum) = 0.1; /// The epsilon value added for numerical stability. /// Changing this parameter after construction __is effective__. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-5; }; diff --git a/torch/csrc/api/include/torch/nn/options/distance.h b/torch/csrc/api/include/torch/nn/options/distance.h index f7e1f46a9119f..debb8c8c41480 100644 --- a/torch/csrc/api/include/torch/nn/options/distance.h +++ b/torch/csrc/api/include/torch/nn/options/distance.h @@ -17,7 +17,6 @@ struct TORCH_API CosineSimilarityOptions { /// Dimension where cosine similarity is computed. Default: 1 TORCH_ARG(int64_t, dim) = 1; /// Small value to avoid division by zero. Default: 1e-8 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-8; }; @@ -45,10 +44,8 @@ using CosineSimilarityFuncOptions = CosineSimilarityOptions; /// ``` struct TORCH_API PairwiseDistanceOptions { /// The norm degree. Default: 2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, p) = 2.0; /// Small value to avoid division by zero. Default: 1e-6 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-6; /// Determines whether or not to keep the vector dimension. Default: false TORCH_ARG(bool, keepdim) = false; diff --git a/torch/csrc/api/include/torch/nn/options/dropout.h b/torch/csrc/api/include/torch/nn/options/dropout.h index 3b36f43d4707d..511690cd79907 100644 --- a/torch/csrc/api/include/torch/nn/options/dropout.h +++ b/torch/csrc/api/include/torch/nn/options/dropout.h @@ -14,11 +14,9 @@ namespace nn { /// Dropout model(DropoutOptions().p(0.42).inplace(true)); /// ``` struct TORCH_API DropoutOptions { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) /* implicit */ DropoutOptions(double p = 0.5); /// The probability of an element to be zeroed. Default: 0.5 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, p) = 0.5; /// can optionally do the operation in-place. Default: False @@ -68,7 +66,6 @@ namespace functional { /// ``` struct TORCH_API DropoutFuncOptions { /// The probability of an element to be zeroed. Default: 0.5 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, p) = 0.5; TORCH_ARG(bool, training) = true; @@ -103,7 +100,6 @@ using Dropout3dFuncOptions = DropoutFuncOptions; /// F::alpha_dropout(input, F::AlphaDropoutFuncOptions().p(0.5).training(false)); /// ``` struct TORCH_API AlphaDropoutFuncOptions { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, p) = 0.5; TORCH_ARG(bool, training) = false; @@ -119,7 +115,6 @@ struct TORCH_API AlphaDropoutFuncOptions { /// F::feature_alpha_dropout(input, F::FeatureAlphaDropoutFuncOptions().p(0.5).training(false)); /// ``` struct TORCH_API FeatureAlphaDropoutFuncOptions { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, p) = 0.5; TORCH_ARG(bool, training) = false; diff --git a/torch/csrc/api/include/torch/nn/options/embedding.h b/torch/csrc/api/include/torch/nn/options/embedding.h index 4dbbdd65fceea..5f2e7ea9fb4c0 100644 --- a/torch/csrc/api/include/torch/nn/options/embedding.h +++ b/torch/csrc/api/include/torch/nn/options/embedding.h @@ -30,7 +30,6 @@ struct TORCH_API EmbeddingOptions { /// If given, each embedding vector with norm larger than `max_norm` is renormalized to have norm `max_norm`. TORCH_ARG(c10::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``false``. TORCH_ARG(bool, scale_grad_by_freq) = false; @@ -54,7 +53,6 @@ struct TORCH_API EmbeddingFromPretrainedOptions { /// If given, each embedding vector with norm larger than `max_norm` is renormalized to have norm `max_norm`. TORCH_ARG(c10::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``false``. TORCH_ARG(bool, scale_grad_by_freq) = false; @@ -81,7 +79,6 @@ struct TORCH_API EmbeddingFuncOptions { /// If given, each embedding vector with norm larger than `max_norm` is renormalized to have norm `max_norm`. TORCH_ARG(c10::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``false``. TORCH_ARG(bool, scale_grad_by_freq) = false; @@ -111,7 +108,6 @@ struct TORCH_API EmbeddingBagOptions { /// If given, each embedding vector with norm larger than `max_norm` is renormalized to have norm `max_norm`. TORCH_ARG(c10::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``false``. /// Note: this option is not supported when ``mode="kMax"``. @@ -146,7 +142,6 @@ struct TORCH_API EmbeddingBagFromPretrainedOptions { /// If given, each embedding vector with norm larger than `max_norm` is renormalized to have norm `max_norm`. TORCH_ARG(c10::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``false``. /// Note: this option is not supported when ``mode="kMax"``. @@ -186,7 +181,6 @@ struct TORCH_API EmbeddingBagFuncOptions { /// If given, each embedding vector with norm larger than `max_norm` is renormalized to have norm `max_norm`. TORCH_ARG(c10::optional, max_norm) = c10::nullopt; /// The p of the p-norm to compute for the `max_norm` option. Default ``2``. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, norm_type) = 2.; /// If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``false``. /// Note: this option is not supported when ``mode="kMax"``. diff --git a/torch/csrc/api/include/torch/nn/options/instancenorm.h b/torch/csrc/api/include/torch/nn/options/instancenorm.h index 5fb8258b52819..0ef2f7b8b1a51 100644 --- a/torch/csrc/api/include/torch/nn/options/instancenorm.h +++ b/torch/csrc/api/include/torch/nn/options/instancenorm.h @@ -16,11 +16,9 @@ struct TORCH_API InstanceNormOptions { TORCH_ARG(int64_t, num_features); /// The epsilon value added for numerical stability. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-5; /// A momentum multiplier for the mean and variance. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, momentum) = 0.1; /// Whether to learn a scale and bias that are applied in an affine @@ -76,10 +74,8 @@ struct TORCH_API InstanceNormFuncOptions { TORCH_ARG(bool, use_input_stats) = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, momentum) = 0.1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-5; }; diff --git a/torch/csrc/api/include/torch/nn/options/loss.h b/torch/csrc/api/include/torch/nn/options/loss.h index b0b222446818b..d8ffd15c8660a 100644 --- a/torch/csrc/api/include/torch/nn/options/loss.h +++ b/torch/csrc/api/include/torch/nn/options/loss.h @@ -362,9 +362,7 @@ struct TORCH_API TripletMarginLossOptions { /// reach in order to incur zero loss. Default: 1 TORCH_ARG(double, margin) = 1.0; /// Specifies the norm degree for pairwise distance. Default: 2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, p) = 2.0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-6; /// The distance swap is described in detail in the paper Learning shallow /// convolutional feature descriptors with triplet losses by V. Balntas, @@ -559,7 +557,6 @@ struct TORCH_API PoissonNLLLossOptions { TORCH_ARG(bool, full) = false; /// Small value to avoid evaluation of `log(0)` when `log_input = false`. /// Default: 1e-8 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-8; /// Specifies the reduction to apply to the output. Default: Mean TORCH_ARG(reduction_t, reduction) = torch::kMean; @@ -627,7 +624,6 @@ struct TORCH_API NLLLossOptions { TORCH_ARG(Tensor, weight) = {}; /// Specifies a target value that is ignored /// and does not contribute to the input gradient. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(int64_t, ignore_index) = -100; /// Specifies the reduction to apply to the output. Default: Mean TORCH_ARG(reduction_t, reduction) = torch::kMean; @@ -663,7 +659,6 @@ struct TORCH_API CrossEntropyLossOptions { TORCH_ARG(Tensor, weight) = {}; /// Specifies a target value that is ignored /// and does not contribute to the input gradient. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(int64_t, ignore_index) = -100; /// Specifies the reduction to apply to the output. Default: Mean TORCH_ARG(reduction_t, reduction) = torch::kMean; diff --git a/torch/csrc/api/include/torch/nn/options/normalization.h b/torch/csrc/api/include/torch/nn/options/normalization.h index 9ab8a99648648..c47f19129f70d 100644 --- a/torch/csrc/api/include/torch/nn/options/normalization.h +++ b/torch/csrc/api/include/torch/nn/options/normalization.h @@ -19,7 +19,6 @@ struct TORCH_API LayerNormOptions { /// input shape from an expected input. TORCH_ARG(std::vector, normalized_shape); /// a value added to the denominator for numerical stability. ``Default: 1e-5``. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-5; /// a boolean value that when set to ``true``, this module /// has learnable per-element affine parameters initialized to ones (for weights) @@ -48,7 +47,6 @@ struct TORCH_API LayerNormFuncOptions { TORCH_ARG(Tensor, bias) = {}; /// a value added to the denominator for numerical stability. ``Default: 1e-5``. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-5; }; @@ -68,11 +66,9 @@ struct TORCH_API LocalResponseNormOptions { TORCH_ARG(int64_t, size); /// multiplicative factor. Default: 1e-4 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, alpha) = 1e-4; /// exponent. Default: 0.75 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, beta) = 0.75; /// additive factor. Default: 1 @@ -106,10 +102,8 @@ struct TORCH_API CrossMapLRN2dOptions { TORCH_ARG(int64_t, size); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, alpha) = 1e-4; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, beta) = 0.75; TORCH_ARG(int64_t, k) = 1; @@ -129,12 +123,10 @@ namespace functional { /// ``` struct TORCH_API NormalizeFuncOptions { /// The exponent value in the norm formulation. Default: 2.0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, p) = 2.0; /// The dimension to reduce. Default: 1 TORCH_ARG(int64_t, dim) = 1; /// Small value to avoid division by zero. Default: 1e-12 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-12; /// the output tensor. If `out` is used, this /// operation won't be differentiable. @@ -159,7 +151,6 @@ struct TORCH_API GroupNormOptions { /// number of channels expected in input TORCH_ARG(int64_t, num_channels); /// a value added to the denominator for numerical stability. Default: 1e-5 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-5; /// a boolean value that when set to ``true``, this module /// has learnable per-channel affine parameters initialized to ones (for weights) @@ -189,7 +180,6 @@ struct TORCH_API GroupNormFuncOptions { TORCH_ARG(Tensor, bias) = {}; /// a value added to the denominator for numerical stability. Default: 1e-5 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-5; }; diff --git a/torch/csrc/api/include/torch/nn/options/transformer.h b/torch/csrc/api/include/torch/nn/options/transformer.h index 87ffb04ad6755..1ea09b6fff847 100644 --- a/torch/csrc/api/include/torch/nn/options/transformer.h +++ b/torch/csrc/api/include/torch/nn/options/transformer.h @@ -29,27 +29,21 @@ struct TORCH_API TransformerOptions { TransformerOptions(int64_t d_model, int64_t nhead, int64_t num_encoder_layers, int64_t num_decoder_layers); /// the number of expected features in the encoder/decoder inputs (default=512) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(int64_t, d_model) = 512; /// the number of heads in the multiheadattention models (default=8) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(int64_t, nhead) = 8; /// the number of sub-encoder-layers in the encoder (default=6) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(int64_t, num_encoder_layers) = 6; /// the number of sub-decoder-layers in the decoder (default=6) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(int64_t, num_decoder_layers) = 6; /// the dimension of the feedforward network model (default=2048) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(int64_t, dim_feedforward) = 2048; /// the dropout value (default=0.1) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, dropout) = 0.1; /// the activation function of encoder/decoder intermediate layer (default=``torch::kReLU``) diff --git a/torch/csrc/api/include/torch/nn/options/transformerlayer.h b/torch/csrc/api/include/torch/nn/options/transformerlayer.h index f86486af82856..d78517b5e0e01 100644 --- a/torch/csrc/api/include/torch/nn/options/transformerlayer.h +++ b/torch/csrc/api/include/torch/nn/options/transformerlayer.h @@ -27,11 +27,9 @@ struct TORCH_API TransformerEncoderLayerOptions { TORCH_ARG(int64_t, nhead); /// the dimension of the feedforward network model, default is 2048 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(int64_t, dim_feedforward) = 2048; /// the dropout value, default is 0.1 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, dropout) = 0.1; /// the activation function of intermediate layer, either ``torch::kReLU`` or ``torch::GELU``, default is ``torch::kReLU`` @@ -60,11 +58,9 @@ struct TORCH_API TransformerDecoderLayerOptions { TORCH_ARG(int64_t, nhead); /// dimension of the feedforward network model. Default: 2048 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(int64_t, dim_feedforward) = 2048; /// dropout value. Default: 1 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, dropout) = 0.1; /// activation function of intermediate layer, can be either ``torch::kGELU`` or ``torch::kReLU``. Default: ``torch::kReLU`` diff --git a/torch/csrc/api/include/torch/nn/utils/clip_grad.h b/torch/csrc/api/include/torch/nn/utils/clip_grad.h index c325aaa5356e8..4074d8bd6b9d6 100644 --- a/torch/csrc/api/include/torch/nn/utils/clip_grad.h +++ b/torch/csrc/api/include/torch/nn/utils/clip_grad.h @@ -13,7 +13,6 @@ namespace utils { inline double clip_grad_norm_( std::vector parameters, double max_norm, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double norm_type = 2.0, bool error_if_nonfinite = false) { std::vector params_with_grad; @@ -54,7 +53,6 @@ inline double clip_grad_norm_( "error_if_nonfinite=false will be required to retain the old behavior."); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto clip_coef = max_norm / (total_norm + 1e-6); if (clip_coef < 1) { for (auto& param : params_with_grad) { @@ -69,7 +67,6 @@ inline double clip_grad_norm_( inline double clip_grad_norm_( std::initializer_list parameters, double max_norm, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double norm_type = 2.0, bool error_if_nonfinite = false) { return clip_grad_norm_(std::vector(parameters), max_norm, norm_type, error_if_nonfinite); @@ -80,7 +77,6 @@ inline double clip_grad_norm_( inline double clip_grad_norm_( Tensor parameter, double max_norm, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double norm_type = 2.0, bool error_if_nonfinite = false) { std::vector params = {parameter}; diff --git a/torch/csrc/api/include/torch/optim/adagrad.h b/torch/csrc/api/include/torch/optim/adagrad.h index 541f372b2d1c7..f010083eafca1 100644 --- a/torch/csrc/api/include/torch/optim/adagrad.h +++ b/torch/csrc/api/include/torch/optim/adagrad.h @@ -20,14 +20,11 @@ namespace torch { namespace optim { struct TORCH_API AdagradOptions : public OptimizerCloneableOptions { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdagradOptions(double lr = 1e-2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, lr) = 1e-2; TORCH_ARG(double, lr_decay) = 0; TORCH_ARG(double, weight_decay) = 0; TORCH_ARG(double, initial_accumulator_value) = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-10; public: void serialize(torch::serialize::InputArchive& archive) override; diff --git a/torch/csrc/api/include/torch/optim/adam.h b/torch/csrc/api/include/torch/optim/adam.h index 80d6376f79b7a..1fbfcdbfcdcbb 100644 --- a/torch/csrc/api/include/torch/optim/adam.h +++ b/torch/csrc/api/include/torch/optim/adam.h @@ -18,14 +18,10 @@ namespace torch { namespace optim { struct TORCH_API AdamOptions : public OptimizerCloneableOptions { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdamOptions(double lr = 1e-3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, lr) = 1e-3; typedef std::tuple betas_t; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(betas_t, betas) = std::make_tuple(0.9, 0.999); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-8; TORCH_ARG(double, weight_decay) = 0; TORCH_ARG(bool, amsgrad) = false; diff --git a/torch/csrc/api/include/torch/optim/adamw.h b/torch/csrc/api/include/torch/optim/adamw.h index 8dd1278255f7d..2c1f842510c4e 100644 --- a/torch/csrc/api/include/torch/optim/adamw.h +++ b/torch/csrc/api/include/torch/optim/adamw.h @@ -18,16 +18,11 @@ namespace torch { namespace optim { struct TORCH_API AdamWOptions : public OptimizerCloneableOptions { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AdamWOptions(double lr = 1e-3); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, lr) = 1e-3; typedef std::tuple betas_t; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(betas_t, betas) = std::make_tuple(0.9, 0.999); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-8; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, weight_decay) = 1e-2; TORCH_ARG(bool, amsgrad) = false; public: diff --git a/torch/csrc/api/include/torch/optim/lbfgs.h b/torch/csrc/api/include/torch/optim/lbfgs.h index a94244bd970f5..893600c01c95e 100644 --- a/torch/csrc/api/include/torch/optim/lbfgs.h +++ b/torch/csrc/api/include/torch/optim/lbfgs.h @@ -16,14 +16,10 @@ namespace optim { struct TORCH_API LBFGSOptions : public OptimizerCloneableOptions { LBFGSOptions(double lr = 1); TORCH_ARG(double, lr) = 1; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(int64_t, max_iter) = 20; TORCH_ARG(c10::optional, max_eval) = c10::nullopt; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, tolerance_grad) = 1e-7; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, tolerance_change) = 1e-9; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(int64_t, history_size) = 100; TORCH_ARG(c10::optional, line_search_fn) = c10::nullopt; public: @@ -64,7 +60,6 @@ class TORCH_API LBFGS : public Optimizer { LBFGSOptions defaults = {}) : Optimizer(std::move(param_groups), std::make_unique(defaults)) { TORCH_CHECK(param_groups_.size() == 1, "LBFGS doesn't support per-parameter options (parameter groups)"); if (defaults.max_eval() == c10::nullopt) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto max_eval_val = (defaults.max_iter() * 5) / 4; static_cast(param_groups_[0].options()).max_eval(max_eval_val); static_cast(*defaults_.get()).max_eval(max_eval_val); diff --git a/torch/csrc/api/include/torch/optim/rmsprop.h b/torch/csrc/api/include/torch/optim/rmsprop.h index d5aaa901b5235..e969978f976bd 100644 --- a/torch/csrc/api/include/torch/optim/rmsprop.h +++ b/torch/csrc/api/include/torch/optim/rmsprop.h @@ -22,13 +22,9 @@ namespace torch { namespace optim { struct TORCH_API RMSpropOptions : public OptimizerCloneableOptions { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) RMSpropOptions(double lr = 1e-2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, lr) = 1e-2; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, alpha) = 0.99; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) TORCH_ARG(double, eps) = 1e-8; TORCH_ARG(double, weight_decay) = 0; TORCH_ARG(double, momentum) = 0; diff --git a/torch/csrc/api/src/data/datasets/mnist.cpp b/torch/csrc/api/src/data/datasets/mnist.cpp index 2bc833173d431..f416009c61866 100644 --- a/torch/csrc/api/src/data/datasets/mnist.cpp +++ b/torch/csrc/api/src/data/datasets/mnist.cpp @@ -31,9 +31,7 @@ bool check_is_little_endian() { } constexpr uint32_t flip_endianness(uint32_t value) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return ((value & 0xffu) << 24u) | ((value & 0xff00u) << 8u) | - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ((value & 0xff0000u) >> 8u) | ((value & 0xff000000u) >> 24u); } @@ -79,7 +77,6 @@ Tensor read_images(const std::string& root, bool train) { auto tensor = torch::empty({count, 1, kImageRows, kImageColumns}, torch::kByte); images.read(reinterpret_cast(tensor.data_ptr()), tensor.numel()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return tensor.to(torch::kFloat32).div_(255); } diff --git a/torch/csrc/api/src/nn/init.cpp b/torch/csrc/api/src/nn/init.cpp index 3ec07197d058b..99707cb60e60d 100644 --- a/torch/csrc/api/src/nn/init.cpp +++ b/torch/csrc/api/src/nn/init.cpp @@ -90,7 +90,6 @@ Tensor dirac_(Tensor tensor) { case 4: // Spatial convolution tensor[d][d][sizes[2] / 2][sizes[3] / 2] = 1; break; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case 5: // Volumetric convolution tensor[d][d][sizes[2] / 2][sizes[3] / 2][sizes[4] / 2] = 1; break; diff --git a/torch/csrc/autograd/FunctionsManual.cpp b/torch/csrc/autograd/FunctionsManual.cpp index 5b1c614a4b6e6..9d701608cf776 100644 --- a/torch/csrc/autograd/FunctionsManual.cpp +++ b/torch/csrc/autograd/FunctionsManual.cpp @@ -178,7 +178,6 @@ Tensor norm_backward(const Tensor& grad, const Tensor& self, const optional & p_, Tensor norm, IntArrayRef dim, bool keepdim) { size_t ndim = self.sizes().size(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) double p = p_.value_or(2.0).toDouble(); Tensor self_scaled; Tensor scale_v; @@ -192,7 +191,6 @@ Tensor norm_backward(Tensor grad, const Tensor& self, const optional & p return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } else if (p == 1.0) { return self.sgn() * grad; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (p == 2.0) { self_scaled = self; scale_v = grad / norm; @@ -204,7 +202,6 @@ Tensor norm_backward(Tensor grad, const Tensor& self, const optional & p nb_max = unsqueeze_multiple(nb_max, dim, ndim); } scale_v = grad / nb_max; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (p < 2.0) { self_scaled = self.sgn() * self.abs().pow(p - 1); scale_v = grad / norm.pow(p - 1); @@ -219,7 +216,6 @@ Tensor norm_backward(Tensor grad, const Tensor& self, const optional & p Tensor linalg_vector_norm_backward(Tensor grad, const Tensor& self, const Scalar& scalar_ord, Tensor norm, const optional& opt_dim, bool keepdim) { size_t ndim = self.sizes().size(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto ord = scalar_ord.toDouble(); auto dim = opt_dim.value_or(IntArrayRef({})); Tensor self_scaled; @@ -234,7 +230,6 @@ Tensor linalg_vector_norm_backward(Tensor grad, const Tensor& self, const Scalar return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } else if (ord == 1.0) { return self.sgn() * grad; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (ord == 2.0) { self_scaled = self; scale_v = grad / norm; @@ -254,7 +249,6 @@ Tensor linalg_vector_norm_backward(Tensor grad, const Tensor& self, const Scalar nb_max = unsqueeze_multiple(nb_max, dim, ndim); } scale_v = grad / nb_max; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (ord < 2.0) { self_scaled = self.sgn() * self.abs().pow(ord - 1); scale_v = grad / norm.pow(ord - 1); @@ -334,7 +328,6 @@ Tensor angle_backward(Tensor grad, const Tensor& self) { } Tensor mvlgamma_backward(Tensor grad, const Tensor & self, int64_t p) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) Tensor args = at::arange(-p / 2. + 0.5, 0.5, 0.5, self.options()); args = args.add(self.unsqueeze(-1)); return grad * args.digamma_().sum(-1); @@ -848,7 +841,6 @@ Tensor renorm_backward(const Tensor & grad, const Tensor & self, const Scalar& p auto norm_flat = self_flat.norm(p, 1, true); auto grad_output = (self_flat * grad_flat).sum(1, true); auto nb = norm_backward(grad_output, self_flat, p, norm_flat, 1, true); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto invnorm = (norm_flat + 1e-7).reciprocal(); auto grad_norm = unflatten(maxnorm * invnorm * (grad_flat - invnorm * nb)); auto norm = unflatten(norm_flat.expand_as(self_flat)); @@ -1049,11 +1041,9 @@ Tensor cholesky_backward(Tensor grad, bool upper, Tensor L) { } auto L_inverse = std::get<0>(at::triangular_solve(at::eye(L.size(-1), L.options()), L, /*upper=*/false)); auto phi = at::matmul(L.transpose(-1, -2).conj(), grad); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) phi.tril_().diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).mul_(0.5); auto grad_input = at::matmul(at::matmul(L_inverse.transpose(-1, -2).conj(), phi), L_inverse); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return grad_input.add(grad_input.transpose(-1, -2).conj()).mul_(0.5); // Symmetrizing the gradient } @@ -1273,7 +1263,6 @@ Tensor log_softmax_double_backward(const Tensor & grad, const Tensor & grad_outp // write a batching rule for it. Tensor binary_cross_entropy_double_backward(const Tensor & grad_output, const Tensor & grad, const Tensor & input, const Tensor & target, const c10::optional& weight, int64_t reduction) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto eps = 1e-12; auto inp_pl_eps = input + eps; auto one_m_inp_pl_eps = 1 - input + eps; @@ -1297,7 +1286,6 @@ Tensor binary_cross_entropy_double_backward(const Tensor & grad_output, const Te } Tensor binary_cross_entropy_double_backward_grad_output(const Tensor & grad, const Tensor & input, const Tensor & target, const c10::optional& weight, int64_t reduction) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto eps = 1e-12; // gradient wrt grad_output auto ggO = (input - target) / ((input + eps) * (1 - input + eps)); @@ -2379,7 +2367,6 @@ Tensor symeig_backward(const std::vector &grads, cons result = result + glambda_term; } } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return result.add(result.conj().transpose(-2, -1)).mul_(0.5); } @@ -2426,7 +2413,6 @@ Tensor linalg_qr_backward(const std::vector &grads, c // Compute M = (tril(M) + tril(M).conj().transpose(-2, -1)) * 0.5 Identity Tensor M_tril = at::tril(M); M = M_tril + M_tril.conj().transpose(-2, -1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) M.diagonal(0, -2, -1).mul_(0.5); Tensor rhs_term; @@ -2865,7 +2851,6 @@ std::tuple batchnorm_double_backward( auto input_sub_mu = input - mu; auto sigma2_eps_neg_1_2 = unsqueeze_dim1( training ? toNonOptTensor(save_invstd).to(input.scalar_type()) - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) : toNonOptTensor(running_var).add(Scalar(eps)).pow(-0.5), input); auto sigma2_eps_neg_1 = sigma2_eps_neg_1_2.pow(2); @@ -2881,7 +2866,6 @@ std::tuple batchnorm_double_backward( auto ggI_sum = sum_exclude_dim1(ggI); auto ggIinmu_sum = sum_exclude_dim1(ggI * input_sub_mu); auto all_sub = ((ggI_sum * gO_sum).div_(M)).sub_(sum_exclude_dim1(gO * ggI)).add_( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) (sigma2_eps_neg_1 * gOinmu_sum * ggIinmu_sum).mul_(3. / M)); auto gI_0t = (input_mu_sigma2_neg_3_2 * all_sub).div_(M); auto gI_1t = (ggIinmu_sum * sigma2_eps_neg_3_2).div_(M) * (gO_sum.div(M) - gO); @@ -2998,7 +2982,6 @@ infinitely_differentiable_native_layer_norm_backward( Tensor dvar; if (drstd.defined()) { var = ((rstd_tensor * rstd_tensor).reciprocal_() - eps).clamp_min(0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dvar = -0.5 * rstd_cube * drstd.view({M, 1}); } Tensor ds; @@ -3099,7 +3082,6 @@ infinitely_differentiable_native_group_norm_backward( const Tensor rstd_cube = rstd_tensor * rstd_tensor * rstd_tensor; Tensor dvar; if (drstd.defined()) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) dvar = -0.5 * rstd_cube * drstd.view({N, G, 1, 1}); } if (dY.defined()) { diff --git a/torch/csrc/autograd/engine.cpp b/torch/csrc/autograd/engine.cpp index de44e9d54e793..5a70738cbd351 100644 --- a/torch/csrc/autograd/engine.cpp +++ b/torch/csrc/autograd/engine.cpp @@ -263,7 +263,6 @@ void Engine::stop() { using namespace std::chrono_literals; // Set a deadline for how long it is OK to wait device threads to shutdown - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto wait_deadline = std::chrono::steady_clock::now() + wait_duration * 1.0s; std::unique_lock lk(non_reentrant_device_thread_mutex_); while(non_reentrant_device_thread_count_.load() != 0) { diff --git a/torch/csrc/autograd/profiler_legacy.h b/torch/csrc/autograd/profiler_legacy.h index 1a4da5b05133a..ba1c2d36b800d 100644 --- a/torch/csrc/autograd/profiler_legacy.h +++ b/torch/csrc/autograd/profiler_legacy.h @@ -91,7 +91,6 @@ inline int64_t getTime(bool allow_monotonic = false) { mode = CLOCK_MONOTONIC; } clock_gettime(mode, &t); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return static_cast(t.tv_sec) * 1000000000 + static_cast(t.tv_nsec); #endif } @@ -202,12 +201,10 @@ struct TORCH_API LegacyEvent { } void setCpuUs(int64_t cpu_us) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) cpu_ns_ = cpu_us * 1000.0; } double cpuUs() const { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return cpu_ns_ / (1000.0); } diff --git a/torch/csrc/autograd/utils/wrap_outputs.h b/torch/csrc/autograd/utils/wrap_outputs.h index 9b6675909aeec..fc63ae65af65c 100644 --- a/torch/csrc/autograd/utils/wrap_outputs.h +++ b/torch/csrc/autograd/utils/wrap_outputs.h @@ -142,7 +142,6 @@ inline PyObject* wrap(std::tuple tensors } inline PyObject* wrap(std::tuple tensors) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto r = THPObjectPtr{PyTuple_New(5)}; if (!r) throw python_error(); PyTuple_SET_ITEM(r.get(), 0, wrap(std::move(std::get<0>(tensors)))); @@ -154,7 +153,6 @@ inline PyObject* wrap(std::tuple tensors) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto r = THPObjectPtr{PyTuple_New(5)}; if (!r) throw python_error(); PyTuple_SET_ITEM(r.get(), 0, wrap(std::move(std::get<0>(tensors)))); @@ -178,7 +176,6 @@ inline PyObject* wrap(std::tuple } inline PyObject* wrap(std::tuple tensors) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto r = THPObjectPtr{PyTuple_New(5)}; if (!r) throw python_error(); PyTuple_SET_ITEM(r.get(), 0, wrap(std::move(std::get<0>(tensors)))); diff --git a/torch/csrc/cuda/Module.cpp b/torch/csrc/cuda/Module.cpp index 86073d15a5ea1..ca44fa81d2ce9 100644 --- a/torch/csrc/cuda/Module.cpp +++ b/torch/csrc/cuda/Module.cpp @@ -262,7 +262,6 @@ PyObject * THCPModule_cudaLockMutex(PyObject *module, PyObject *noargs) break; { pybind11::gil_scoped_release no_gil; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::this_thread::sleep_for(std::chrono::microseconds(10)); } } @@ -449,7 +448,6 @@ static void registerCudaDeviceProperties(PyObject* module) { .def("__repr__", [](const cudaDeviceProp &prop) { std::ostringstream stream; stream << "_CudaDeviceProperties(name='" << prop.name << "', major=" << prop.major - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << ", minor=" << prop.minor << ", total_memory=" << prop.totalGlobalMem / (1024 * 1024) << "MB, multi_processor_count=" << prop.multiProcessorCount << ")"; return stream.str(); diff --git a/torch/csrc/deploy/deploy.cpp b/torch/csrc/deploy/deploy.cpp index d69fdefe2615b..121f4474bb7fd 100644 --- a/torch/csrc/deploy/deploy.cpp +++ b/torch/csrc/deploy/deploy.cpp @@ -124,7 +124,6 @@ int LoadBalancer::acquire() { } uint64_t prev = 0; bool acquired = __atomic_compare_exchange_n( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) &uses_[8 * last], &prev, 1ULL, @@ -146,13 +145,11 @@ int LoadBalancer::acquire() { // we failed to find a completely free interpreter. heuristically use the // one with the least number of user (note that this may have changed since // then, so this is only a heuristic). - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) __atomic_fetch_add(&uses_[8 * min_idx], 1ULL, __ATOMIC_SEQ_CST); return min_idx; } void LoadBalancer::free(int where) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) __atomic_fetch_sub(&uses_[8 * where], 1ULL, __ATOMIC_SEQ_CST); } diff --git a/torch/csrc/deploy/deploy.h b/torch/csrc/deploy/deploy.h index 7bf1fda193f9c..be9ef8cf4daa2 100644 --- a/torch/csrc/deploy/deploy.h +++ b/torch/csrc/deploy/deploy.h @@ -75,10 +75,8 @@ class TORCH_API Interpreter { struct Package; struct TORCH_API LoadBalancer { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) LoadBalancer(size_t n) : uses_(new uint64_t[8 * n]), allocated_(n), n_(n) { // 8*... to avoid false sharing of atomics on the same cache line - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) memset(uses_.get(), 0, 8 * n_ * sizeof(uint64_t)); } void setResourceLimit(size_t n) { diff --git a/torch/csrc/deploy/example/benchmark.cpp b/torch/csrc/deploy/example/benchmark.cpp index 4bd9af1cd4d00..693b65b7fc364 100644 --- a/torch/csrc/deploy/example/benchmark.cpp +++ b/torch/csrc/deploy/example/benchmark.cpp @@ -167,7 +167,6 @@ struct Benchmark { std::string strategy, // NOLINTNEXTLINE(modernize-pass-by-value) std::string file_to_run, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t n_seconds = 5) : manager_(manager), n_threads_(n_threads), @@ -301,7 +300,6 @@ int main(int argc, char* argv[]) { I.global("sys", "path").attr("append")({"torch/csrc/deploy/example"}); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto n_threads = {1, 2, 4, 8, 16, 32, 40}; for (int i = 4; i < argc; ++i) { std::string model_file = argv[i]; diff --git a/torch/csrc/deploy/test_deploy.cpp b/torch/csrc/deploy/test_deploy.cpp index d505e0a834f85..34fbcc8e5db7a 100644 --- a/torch/csrc/deploy/test_deploy.cpp +++ b/torch/csrc/deploy/test_deploy.cpp @@ -69,7 +69,6 @@ TEST(TorchpyTest, MultiSerialSimpleModel) { auto model = p.load_pickle("model", "model.pkl"); auto ref_model = torch::jit::load(path("SIMPLE_JIT", simple_jit)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::ones({10, 20}); size_t ninterp = 3; std::vector outputs; @@ -108,7 +107,6 @@ TEST(TorchpyTest, ThreadedSimpleModel) { auto model = p.load_pickle("model", "model.pkl"); auto ref_model = torch::jit::load(path("SIMPLE_JIT", simple_jit)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::ones({10, 20}); std::vector outputs; @@ -116,9 +114,7 @@ TEST(TorchpyTest, ThreadedSimpleModel) { std::vector> futures; for (size_t i = 0; i < nthreads; i++) { futures.push_back(std::async(std::launch::async, [&model]() { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto input = torch::ones({10, 20}); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (int i = 0; i < 100; ++i) { model({input}).toTensor(); } diff --git a/torch/csrc/distributed/rpc/rpc_agent.h b/torch/csrc/distributed/rpc/rpc_agent.h index b094b0eb5c5e7..dd42368711664 100644 --- a/torch/csrc/distributed/rpc/rpc_agent.h +++ b/torch/csrc/distributed/rpc/rpc_agent.h @@ -95,14 +95,11 @@ struct TORCH_API RpcRetryOptions { // sendWithRetries function. RpcRetryOptions() = default; // Maximum number of times we will retry the RPC - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int maxRetries{5}; // Initial duration between consecutive RPC send attempts - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::chrono::milliseconds rpcRetryDuration{std::chrono::milliseconds(1000)}; // Constant for exponential backoff used while calculating future wait // durations - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) float retryBackoff{1.5}; }; diff --git a/torch/csrc/generic/Storage.cpp b/torch/csrc/generic/Storage.cpp index 4dc29c8624437..8306a908c1888 100644 --- a/torch/csrc/generic/Storage.cpp +++ b/torch/csrc/generic/Storage.cpp @@ -122,7 +122,6 @@ static PyObject * THPStorage_(pynew)(PyTypeObject *type, PyObject *args, PyObjec return (PyObject*)self.release(); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) THPUtils_invalidArguments(args, kwargs, THPStorageStr " constructor", 6, "no arguments", "(int size)", diff --git a/torch/csrc/generic/serialization.cpp b/torch/csrc/generic/serialization.cpp index 05a4b271f9a37..769071902c977 100644 --- a/torch/csrc/generic/serialization.cpp +++ b/torch/csrc/generic/serialization.cpp @@ -52,7 +52,6 @@ void THPStorage_(writeFileRaw)(THWStorage *self, io fd, bool save_size) torch::utils::THPByteOrder::THP_LITTLE_ENDIAN) { doWrite(fd, data, size_bytes); } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t buffer_size = std::min(numel, (int64_t)5000); // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) std::unique_ptr le_buffer(new uint8_t[buffer_size * sizeof(scalar_t)]); @@ -70,7 +69,6 @@ void THPStorage_(writeFileRaw)(THWStorage *self, io fd, bool save_size) (const int32_t*)data + i, torch::utils::THPByteOrder::THP_LITTLE_ENDIAN, to_convert); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (sizeof(scalar_t) == 8) { torch::utils::THP_encodeInt64Buffer( (uint8_t*)le_buffer.get(), @@ -135,7 +133,6 @@ THWStorage * THPStorage_(readFileRaw)(io file, THWStorage *_storage) torch::utils::THPByteOrder::THP_LITTLE_ENDIAN) { doRead(file, data, storage->nbytes()); } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t buffer_size = std::min(size, (int64_t)5000); // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) std::unique_ptr le_buffer(new uint8_t[buffer_size * sizeof(scalar_t)]); @@ -157,7 +154,6 @@ THWStorage * THPStorage_(readFileRaw)(io file, THWStorage *_storage) le_buffer.get(), torch::utils::THP_nativeByteOrder(), to_convert); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (sizeof(scalar_t) == 8) { torch::utils::THP_decodeInt64Buffer( (int64_t*)data + i, diff --git a/torch/csrc/jit/codegen/cuda/executor_kernel_arg.h b/torch/csrc/jit/codegen/cuda/executor_kernel_arg.h index 31c6754fbdf6d..25f85ff5bf24d 100644 --- a/torch/csrc/jit/codegen/cuda/executor_kernel_arg.h +++ b/torch/csrc/jit/codegen/cuda/executor_kernel_arg.h @@ -133,21 +133,13 @@ std::unique_ptr getTensorArg(int nDims) { return std::make_unique>>(); case (4): return std::make_unique>>(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case (5): - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return std::make_unique>>(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case (6): - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return std::make_unique>>(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case (7): - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return std::make_unique>>(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) case (8): - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return std::make_unique>>(); default: TORCH_INTERNAL_ASSERT( diff --git a/torch/csrc/jit/codegen/cuda/kernel_cache.h b/torch/csrc/jit/codegen/cuda/kernel_cache.h index 5decc94674bcb..c396643eadaa3 100644 --- a/torch/csrc/jit/codegen/cuda/kernel_cache.h +++ b/torch/csrc/jit/codegen/cuda/kernel_cache.h @@ -29,7 +29,6 @@ namespace cuda { class TORCH_CUDA_CU_API InputsIdLookup { public: //! constructor where maximum cache size is fixed during init - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) explicit InputsIdLookup(size_t max_cache_size = 10) : max_cache_size_(max_cache_size){}; diff --git a/torch/csrc/jit/codegen/fuser/codegen.cpp b/torch/csrc/jit/codegen/fuser/codegen.cpp index 53e62cbb3e1a2..b4bfefd06c893 100644 --- a/torch/csrc/jit/codegen/fuser/codegen.cpp +++ b/torch/csrc/jit/codegen/fuser/codegen.cpp @@ -56,7 +56,6 @@ static std::string scalarValue(const double v) { out << "POS_INFINITY"; } } else { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) out << std::setprecision(16) << v; } return out.str(); diff --git a/torch/csrc/jit/codegen/fuser/cuda/fused_kernel.cpp b/torch/csrc/jit/codegen/fuser/cuda/fused_kernel.cpp index 6aedded4dc789..73daa194d8369 100644 --- a/torch/csrc/jit/codegen/fuser/cuda/fused_kernel.cpp +++ b/torch/csrc/jit/codegen/fuser/cuda/fused_kernel.cpp @@ -46,34 +46,21 @@ void codegenOutputQuery( // based on the NVRTC version major = prop->major; minor = prop->minor; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nvrtc_major <= 7 && prop->major > 5) { // 7 supports 2-5.x - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) major = 5; minor = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (nvrtc_major <= 8 && prop->major > 6) { // 8 supports 2-6.x - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) major = 6; minor = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (nvrtc_major <= 9 && prop->major >= 7) { // 9 supports 3-7.2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) major = 7; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) minor = (prop->major == 7 && prop->minor <= 2) ? prop->minor : 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (nvrtc_major <= 10 && prop->major >= 7) { // 10 supports 3-7.5 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) major = 7; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) minor = (prop->major == 7 && prop->minor <= 5) ? prop->minor : 0; } else if ( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) nvrtc_major == 11 && nvrtc_minor == 0 && - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) prop->major >= 8) { // 11.0 supports 3.5-8.0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) major = 8; minor = 0; } diff --git a/torch/csrc/jit/codegen/fuser/executor.cpp b/torch/csrc/jit/codegen/fuser/executor.cpp index 545696eed1624..b260e48b16c3f 100644 --- a/torch/csrc/jit/codegen/fuser/executor.cpp +++ b/torch/csrc/jit/codegen/fuser/executor.cpp @@ -35,7 +35,6 @@ static c10::optional> getMapSize( // should be straightforward. // Note: left unitialized since empty shape is broadcastable to any shape std::vector map_size; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) map_size.reserve(8); for (const auto arg_idx : arg_subset) { auto& arg = args.at(arg_idx); diff --git a/torch/csrc/jit/frontend/lexer.cpp b/torch/csrc/jit/frontend/lexer.cpp index e2eca7077a5b4..dcb8d20b3a48a 100644 --- a/torch/csrc/jit/frontend/lexer.cpp +++ b/torch/csrc/jit/frontend/lexer.cpp @@ -85,7 +85,6 @@ C10_EXPORT int stringToKind(const std::string& str) { } C10_EXPORT std::string kindToString(int kind) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (kind < 256) return std::string(1, kind); switch (kind) { diff --git a/torch/csrc/jit/frontend/parse_string_literal.h b/torch/csrc/jit/frontend/parse_string_literal.h index 012b1a3c596e4..2ca1f150aacdd 100644 --- a/torch/csrc/jit/frontend/parse_string_literal.h +++ b/torch/csrc/jit/frontend/parse_string_literal.h @@ -17,7 +17,6 @@ inline c10::optional parseOctal(const std::string& str, size_t pos) { if (pos + 3 >= str.size()) return c10::nullopt; size_t c = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (size_t i = 1, b = 64; i < 4; ++i, b /= 8) { // NOLINTNEXTLINE(bugprone-signed-char-misuse) int d = str[pos + i]; @@ -25,7 +24,6 @@ inline c10::optional parseOctal(const std::string& str, size_t pos) { return c10::nullopt; c += b * (d - '0'); } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (c >= 256) return c10::nullopt; return c; diff --git a/torch/csrc/jit/frontend/script_type_parser.cpp b/torch/csrc/jit/frontend/script_type_parser.cpp index cf367394486f1..5726cfb0e7601 100644 --- a/torch/csrc/jit/frontend/script_type_parser.cpp +++ b/torch/csrc/jit/frontend/script_type_parser.cpp @@ -185,7 +185,6 @@ c10::optional> ScriptTypeParser::parseBroadcastList( const char* len_c = len.c_str(); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) char* end; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t len_v = strtoull(len_c, &end, 10); if (end != len_c + len.size()) { throw ErrorReport(subscript.subscript_exprs().range()) diff --git a/torch/csrc/jit/frontend/tree.h b/torch/csrc/jit/frontend/tree.h index b4ae847d18366..073151c4f0eb0 100644 --- a/torch/csrc/jit/frontend/tree.h +++ b/torch/csrc/jit/frontend/tree.h @@ -168,7 +168,6 @@ struct Compound : public Tree { // tree pretty printer struct pretty_tree { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) pretty_tree(const TreeRef& tree, size_t col = 40) : tree(tree), col(col) {} const TreeRef& tree; size_t col; diff --git a/torch/csrc/jit/passes/batch_mm.cpp b/torch/csrc/jit/passes/batch_mm.cpp index 7789e61f4ded8..b59b50a9c9192 100644 --- a/torch/csrc/jit/passes/batch_mm.cpp +++ b/torch/csrc/jit/passes/batch_mm.cpp @@ -103,7 +103,6 @@ bool shape_is_fast_for_reduce(const at::Tensor& lhs, const at::Tensor& rhs) { size_t m = lhs.size(1); size_t r = rhs.size(1); // Numbers obtained by some simple benchmarks of fp32 gemms on a TITAN V - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return m < 512 || ((l < 256 && r < 256) || (l > 256 && r > 256)); } @@ -313,7 +312,6 @@ void BatchMMTreeReduce(Block* block) { bool shape_is_fast_for_side(const at::Tensor& other_side_input) { // Cutoff chosed by benchmarking on a TITAN V - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return other_side_input.numel() <= 1024 * 2048; } diff --git a/torch/csrc/jit/passes/create_functional_graphs.cpp b/torch/csrc/jit/passes/create_functional_graphs.cpp index 2759a708561af..d5d85f6f5b2ac 100644 --- a/torch/csrc/jit/passes/create_functional_graphs.cpp +++ b/torch/csrc/jit/passes/create_functional_graphs.cpp @@ -193,7 +193,6 @@ struct FunctionalGraphSlicer { std::unordered_set mutated_values_; std::shared_ptr graph_; std::unique_ptr aliasDb_ = nullptr; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t minSubgraphSize_ = 6; }; diff --git a/torch/csrc/jit/passes/decompose_ops.cpp b/torch/csrc/jit/passes/decompose_ops.cpp index 5b4336c2b5bdc..d617b88c7a0d2 100644 --- a/torch/csrc/jit/passes/decompose_ops.cpp +++ b/torch/csrc/jit/passes/decompose_ops.cpp @@ -63,7 +63,6 @@ RegisterOperators reg_ops( [](Stack* stack) { const int64_t ndim = pop(stack).toInt(); auto self = pop(stack).toTensor(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::SmallVector sizes(ndim, 1); AT_ASSERT(self.dim() == 1); sizes.at(1) = self.size(0); @@ -77,7 +76,6 @@ RegisterOperators reg_ops( auto input_shape = pop(stack).toIntList(); auto self = pop(stack).toTensor(); const int64_t input_ndim = input_shape.size(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) c10::SmallVector sizes(input_ndim, 1); for (int i = 0; i < input_ndim - normalized_ndim; ++i) { sizes.at(i) = input_shape.get(i); diff --git a/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp b/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp index 703bc2adc0c97..09990b0172e16 100644 --- a/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp +++ b/torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp @@ -684,7 +684,6 @@ void ComputeSubgraphInMKLDNN(Node* subgraph_node) { } if (body_node->kind() == aten::relu6) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) clamp_node_creator(body_node, prim::MKLDNNHardTanh, 0., 6.); continue; } diff --git a/torch/csrc/jit/passes/graph_fuser.cpp b/torch/csrc/jit/passes/graph_fuser.cpp index 1e57384b12f8a..b89bf1dbc86bc 100644 --- a/torch/csrc/jit/passes/graph_fuser.cpp +++ b/torch/csrc/jit/passes/graph_fuser.cpp @@ -139,7 +139,6 @@ struct GraphFuser { // limit here. // This limit is also applied to other devices in the fuser by default. // Change with setInputArgLimit - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t subgraph_arg_limit_ = 128; GraphFuser(AliasDb* aliasDb, Block* block, bool strict_fuser_check) diff --git a/torch/csrc/jit/passes/guard_elimination.cpp b/torch/csrc/jit/passes/guard_elimination.cpp index 40b7b23f4903b..c24e89ff41934 100644 --- a/torch/csrc/jit/passes/guard_elimination.cpp +++ b/torch/csrc/jit/passes/guard_elimination.cpp @@ -375,7 +375,6 @@ struct GuardElimination { case aten::conv1d: case aten::conv2d: case aten::conv3d: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return checkInputs(n, std::unordered_set{2, 6}, false); case aten::slice: return !n->input(0)->type()->expectRef().isSummarized() && @@ -400,7 +399,6 @@ struct GuardElimination { // check that the dilation is constant n->input(4)->node()->kind() == prim::Constant && // check that the ceil_mode is constant - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) n->input(5)->node()->kind() == prim::Constant; case aten::unsqueeze: // check that the dimension argument is constant diff --git a/torch/csrc/jit/passes/inline_autodiff_subgraphs.h b/torch/csrc/jit/passes/inline_autodiff_subgraphs.h index 0d267d7147f79..8edc81224a073 100644 --- a/torch/csrc/jit/passes/inline_autodiff_subgraphs.h +++ b/torch/csrc/jit/passes/inline_autodiff_subgraphs.h @@ -9,7 +9,6 @@ TORCH_API bool canRunWithAutograd(Node* node); TORCH_API void InlineAutodiffSubgraphs( std::shared_ptr& graph, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t threshold = 5); } // namespace jit diff --git a/torch/csrc/jit/passes/liveness.h b/torch/csrc/jit/passes/liveness.h index 3ea18db290324..970a5bf7c07a9 100644 --- a/torch/csrc/jit/passes/liveness.h +++ b/torch/csrc/jit/passes/liveness.h @@ -13,7 +13,6 @@ namespace torch { namespace jit { -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) using SparseBitVector = ::c10::SparseBitVector<256>; // BuildLivenessSets computes "bailout" liveness which is equivalent to diff --git a/torch/csrc/jit/passes/onnx/peephole.cpp b/torch/csrc/jit/passes/onnx/peephole.cpp index 88bbd30a6f404..9a38925ec6d71 100644 --- a/torch/csrc/jit/passes/onnx/peephole.cpp +++ b/torch/csrc/jit/passes/onnx/peephole.cpp @@ -471,11 +471,9 @@ void fixDefaultRnnHiddenState(Block* b, int opset_version) { } // Hidden state is the sixth input for RNN, LSTM, GRU. // See https://pytorch.org/docs/master/nn.html#torch.nn.RNN - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (n->inputs().size() < 6) { continue; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) fixDefaultRNNState(b->owningGraph(), n, 5, opset_version); } } @@ -492,11 +490,9 @@ void fixDefaultLstmCellState(Block* b, int opset_version) { } // Cell state is the seventh input for LSTM. // See https://pytorch.org/docs/master/nn.html#torch.nn.LSTM - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (n->inputs().size() < 7) { continue; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) fixDefaultRNNState(b->owningGraph(), n, 6, opset_version); } } diff --git a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp index 494a424d0eff3..dbeedde663972 100644 --- a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp +++ b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp @@ -721,11 +721,9 @@ void ProcessSliceNode(Node* n, int opset_version) { if (shape_size_0.rank().has_value()) { auto input0_shape_value = shape_size_0.sizes().value(); auto valid = true; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (opset_version >= 10) { valid = ConstantValueMap::HasValue(n->input(1)->debugName()) && ConstantValueMap::HasValue(n->input(2)->debugName()); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (auto input_idx = 3; input_idx < 5; ++input_idx) { if (n->inputs().size() > input_idx) { valid = valid && @@ -750,7 +748,6 @@ void ProcessSliceNode(Node* n, int opset_version) { } std::vector step_vector; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (opset_version < 10) { start_vector = n->is(attr::starts); end_vector = n->is(attr::ends); @@ -1001,7 +998,6 @@ void ComputeConstant(Node* n, int opset_version) { auto value = input_node->t(attr::value).toType(at::ScalarType::Float); auto value_a = value.accessor(); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (value_a.size(0) == 1 && std::abs(value_a[0]) > 1e-6) { if (ConstantValueMap::HasShape(n->input()->debugName())) { auto shape_size_0 = diff --git a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp index 9945ba77de84d..a6af6079ede57 100644 --- a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp +++ b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp @@ -81,7 +81,6 @@ double getScaleFromInput(Node* input_node) { } else if (input_name == "aten::sigmoid") { // For the _caffe2::Int8Sigmoid op output scale is 1.0/256 // And output zero_point is set to 0 (quint8 type). - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 1.0L / 256; } // For the ops below the scale is not part of the op signature, so we traverse @@ -297,7 +296,6 @@ void unpackQuantizedWeightsHelper( // Create caffe2::Int8GivenTensorFill node std::ostringstream os; for (int64_t i = 0; i < wt_numel; ++i) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) os << static_cast(inp_data[i] + 128); } diff --git a/torch/csrc/jit/passes/peephole_alias_sensitive.cpp b/torch/csrc/jit/passes/peephole_alias_sensitive.cpp index 114d378f6ec14..0a4324daf3985 100644 --- a/torch/csrc/jit/passes/peephole_alias_sensitive.cpp +++ b/torch/csrc/jit/passes/peephole_alias_sensitive.cpp @@ -50,7 +50,6 @@ struct PeepholeOptimizeAliasSensitiveImpl { } auto kind = node->kind(); int64_t output_size = - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) kind == aten::conv1d ? 3 : (kind == aten::conv2d ? 4 : 5); // this is to handle potential resize_ calls, however unlikely // if we add more checks related to resize_ in the graph, diff --git a/torch/csrc/jit/passes/quantization/helper.cpp b/torch/csrc/jit/passes/quantization/helper.cpp index 9fca5f8746206..0fa61fb76ce1d 100644 --- a/torch/csrc/jit/passes/quantization/helper.cpp +++ b/torch/csrc/jit/passes/quantization/helper.cpp @@ -329,7 +329,6 @@ bool isBiasOfConvOrLinear(Value* v) { bool isEmbeddingBagNonInput(Value* v) { bool result = matchArgPattern( v, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) AtenFuncArgs({{"embedding_bag", 2}, {"embedding_bag", 6}}), CallFuncArgs({})); return result; diff --git a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp index 0fd6d4eea1629..f298c6ec02bff 100644 --- a/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp +++ b/torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp @@ -367,7 +367,6 @@ Node* insertEmbeddingBagOps(Node* observer, const std::string& op_name) { // The sparse field in the float operator denotes sparse gradients. // For inference this stands for pruned weights. We currently don't support // pruning in graph mode API so we set the field to 0 for inference. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) qembedding_bag_inputs[5] = pruned_const; } else { TORCH_CHECK( @@ -376,12 +375,10 @@ Node* insertEmbeddingBagOps(Node* observer, const std::string& op_name) { qembedding_bag_inputs.push_back(embedding_bag_inputs[1]); // indices qembedding_bag_inputs.push_back(embedding_bag_inputs[3]); // offsets qembedding_bag_inputs.push_back( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) embedding_bag_inputs[6]); // scale_grad_by_freq qembedding_bag_inputs.push_back(zero); // mode qembedding_bag_inputs.push_back(pruned_const); // pruned_weights qembedding_bag_inputs.push_back( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) embedding_bag_inputs[9]); // per_sample_weights } diff --git a/torch/csrc/jit/passes/utils/memory_dag.h b/torch/csrc/jit/passes/utils/memory_dag.h index 640c94dce04e3..7e86fa112ee6d 100644 --- a/torch/csrc/jit/passes/utils/memory_dag.h +++ b/torch/csrc/jit/passes/utils/memory_dag.h @@ -12,7 +12,6 @@ #include // Uses a compressed index representation for faster comparisons -// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) typedef c10::SparseBitVector<256> MemoryLocations; namespace torch { namespace jit { diff --git a/torch/csrc/jit/passes/utils/subgraph_utils.h b/torch/csrc/jit/passes/utils/subgraph_utils.h index 3118e8aaabc8c..9c84eea471776 100644 --- a/torch/csrc/jit/passes/utils/subgraph_utils.h +++ b/torch/csrc/jit/passes/utils/subgraph_utils.h @@ -55,7 +55,6 @@ std::shared_ptr getSubgraph(Node* n); TORCH_API std::string generateNameForGraph( const std::shared_ptr& graph, - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t maxlen = 40, const std::string& prefix = "fused"); diff --git a/torch/csrc/jit/runtime/autodiff.cpp b/torch/csrc/jit/runtime/autodiff.cpp index 25323ba72d236..917a17c978f76 100644 --- a/torch/csrc/jit/runtime/autodiff.cpp +++ b/torch/csrc/jit/runtime/autodiff.cpp @@ -275,9 +275,7 @@ class GradientHelper { inputs.at(4), outputs.at(1), outputs.at(2), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) inputs.at(5), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) inputs.at(7), graph->insertConstant(c10::List({true, true, true}))}); // graph->insert returns a tuple automatically if multiple outputs are diff --git a/torch/csrc/jit/runtime/logging.cpp b/torch/csrc/jit/runtime/logging.cpp index eb543827d9663..62c56a5bdd303 100644 --- a/torch/csrc/jit/runtime/logging.cpp +++ b/torch/csrc/jit/runtime/logging.cpp @@ -64,7 +64,6 @@ JITTimePoint timePoint() { void recordDurationSince(const std::string& name, const JITTimePoint& tp) { auto end = std::chrono::high_resolution_clock::now(); // Measurement in microseconds. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto seconds = std::chrono::duration(end - tp.point).count() * 1e9; logging::getLogger()->addStatValue(name, seconds); } diff --git a/torch/csrc/jit/runtime/register_ops_utils.h b/torch/csrc/jit/runtime/register_ops_utils.h index 96a8d927be83a..5844f8160ede8 100644 --- a/torch/csrc/jit/runtime/register_ops_utils.h +++ b/torch/csrc/jit/runtime/register_ops_utils.h @@ -66,7 +66,6 @@ inline void noop(Stack* n) {} // result will always be a even number. Due to symmetricity, it also applies to // negative cases. inline double round_to_even(double a) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return a - std::floor(a) == 0.5 ? (std::round(a * 0.5) * 2.0) : std::round(a); } diff --git a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp index 602030cdac3fa..5e1368daa9608 100644 --- a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp +++ b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp @@ -408,7 +408,6 @@ RegisterOperators reg( [](Stack* stack) { auto num_inputs = pop(stack).toInt(); std::vector size; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size.reserve(8); for (auto i = 0; i < num_inputs; ++i) { size = @@ -867,7 +866,6 @@ RegisterOperators reg2({ ss << "-"; i = -i; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::string str = std::bitset<8 * sizeof(i)>(i).to_string(); str.erase(0, std::min(str.find_first_not_of('0'), str.size() - 1)); ss << "0b" << str; diff --git a/torch/csrc/jit/runtime/static/impl.cpp b/torch/csrc/jit/runtime/static/impl.cpp index 13e2fbf6ee44f..6db4062f17feb 100644 --- a/torch/csrc/jit/runtime/static/impl.cpp +++ b/torch/csrc/jit/runtime/static/impl.cpp @@ -802,7 +802,6 @@ void StaticRuntime::benchmark( float time_per_iter = benchmark_model(args, kwargs, warmup_runs, main_runs); std::cout << "Static runtime ms per iter: " << time_per_iter - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) << ". Iters per second: " << 1000.0 / time_per_iter << std::endl; IndividualMetrics results = @@ -826,13 +825,11 @@ void StaticRuntime::benchmark( for (const auto& p : time_per_node_type_vec) { const std::string& kind = p.first; const double ms = p.second; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::cout << std::setw(15) << ms << " ms. " << std::setw(10) << results.percent_per_node_type[kind] << "%. " << kind << " (" << results.instances_per_node_type[kind] << " nodes)" << std::endl; } - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::cout << std::setw(15) << results.total_time << " ms. in Total" << std::endl; std::cout << "StaticRuntime setup time: " << results.setup_time << " ms" @@ -985,7 +982,6 @@ StaticRuntime::IndividualMetrics StaticRuntime::benchmark_individual_ops( results.output_dealloc_time /= static_cast(main_runs); for (const auto& p : results.time_per_node_type) { const std::string& kind = p.first; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) results.percent_per_node_type[kind] = p.second / results.total_time * 100; } return results; diff --git a/torch/csrc/jit/serialization/import_source.cpp b/torch/csrc/jit/serialization/import_source.cpp index 57ed4bb562aab..2915d108e89b9 100644 --- a/torch/csrc/jit/serialization/import_source.cpp +++ b/torch/csrc/jit/serialization/import_source.cpp @@ -71,7 +71,6 @@ struct ConstantTableValue : public SugaredValue { const char* field_s = field.c_str(); // NOLINTNEXTLINE(cppcoreguidelines-init-variables) char* end; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t offset = strtoll(field_s + 1, &end, 10); if (field.size() < 2 || *end != 0) throw ErrorReport(loc) << "invalid constant specifier: " << field; diff --git a/torch/csrc/jit/serialization/pickler.cpp b/torch/csrc/jit/serialization/pickler.cpp index cd0d32e80f250..c1b697921fabf 100644 --- a/torch/csrc/jit/serialization/pickler.cpp +++ b/torch/csrc/jit/serialization/pickler.cpp @@ -240,7 +240,6 @@ void Pickler::pushInt(int64_t n) { } else { // Push 8 byte integer push(PickleOpCode::LONG1); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) push(8); push(n); } diff --git a/torch/csrc/jit/serialization/unpickler.h b/torch/csrc/jit/serialization/unpickler.h index e72efd8383105..359c768b80041 100644 --- a/torch/csrc/jit/serialization/unpickler.h +++ b/torch/csrc/jit/serialization/unpickler.h @@ -120,7 +120,6 @@ class TORCH_API Unpickler { // remember the position. Don't call reader_ directly. std::function reader_; // Small buffer to avoid calling reader_ on a per-byte basis. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) std::array buffer_; size_t buffer_pos_{0}; size_t buffer_remaining_{0}; diff --git a/torch/csrc/jit/tensorexpr/block_codegen.h b/torch/csrc/jit/tensorexpr/block_codegen.h index efd37cdcfe9e1..2bb440b5e4416 100644 --- a/torch/csrc/jit/tensorexpr/block_codegen.h +++ b/torch/csrc/jit/tensorexpr/block_codegen.h @@ -58,7 +58,6 @@ class BlockAnalysis : public IRVisitor { std::unordered_map map_input_to_tensor_bufs_; std::unordered_set store_targets_; std::unordered_set loads_; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int block_size_ = 32; }; diff --git a/torch/csrc/jit/tensorexpr/cuda_codegen.cpp b/torch/csrc/jit/tensorexpr/cuda_codegen.cpp index 7e3ac55af810c..497ff9736b7a4 100644 --- a/torch/csrc/jit/tensorexpr/cuda_codegen.cpp +++ b/torch/csrc/jit/tensorexpr/cuda_codegen.cpp @@ -84,26 +84,16 @@ static void codegenOutputQuery( CudaVersion dev_version = CudaVersion(prop->major, prop->minor); CudaVersion max_dev_version(dev_version); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (nvrtc_version.first <= 7) { // 7 supports 2-5.x - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) max_dev_version = CudaVersion(5, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (nvrtc_version.first <= 8) { // 8 supports 2-6.x - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) max_dev_version = CudaVersion(6, 0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (nvrtc_version.first <= 9) { // 9 supports 3-7.2 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) max_dev_version = CudaVersion(7, 2); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (nvrtc_version.first <= 10) { // 10 supports 3-7.5 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) max_dev_version = CudaVersion(7, 5); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (nvrtc_version.first == 11 && nvrtc_version.second == 0) { // 11.0 supports 3-8.0 - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) max_dev_version = CudaVersion(8, 0); } if (dev_version > max_dev_version) { @@ -1154,7 +1144,6 @@ void CudaCodeGen::call(const std::vector& args) { if (has_random_) { auto gen = at::cuda::detail::getDefaultCUDAGenerator(); // TODO: total hack. Switch to numel when it is available. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t total_elements_per_thread = (1LL << 28); { std::lock_guard lock(gen.mutex()); diff --git a/torch/csrc/jit/tensorexpr/expr.cpp b/torch/csrc/jit/tensorexpr/expr.cpp index b430da495b1b3..4c0e81fb80e91 100644 --- a/torch/csrc/jit/tensorexpr/expr.cpp +++ b/torch/csrc/jit/tensorexpr/expr.cpp @@ -151,37 +151,24 @@ ExprHandle fast_tanh(const ExprHandle& v) { Dtype dtype = v.dtype(); // TODO: use a dedicated bind-var to make sure v is not evalualted multiple // times. Clamp the input expression to [-9, 9] - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle plus_9 = FloatImm::make(9.0f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle minus_9 = FloatImm::make(-9.0f); ExprHandle v1 = Min::make(v, plus_9, false); v1 = Max::make(v1, minus_9, false); // The coefficients for the numerator - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle alpha_1 = FloatImm::make(4.89352455891786e-03f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle alpha_3 = FloatImm::make(6.37261928875436e-04f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle alpha_5 = FloatImm::make(1.48572235717979e-05f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle alpha_7 = FloatImm::make(5.12229709037114e-08f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle alpha_9 = FloatImm::make(-8.60467152213735e-11f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle alpha_11 = FloatImm::make(2.00018790482477e-13f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle alpha_13 = FloatImm::make(-2.76076847742355e-16f); // The coeffecients for the denominator - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle beta_0 = FloatImm::make(4.89352518554385e-03f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle beta_2 = FloatImm::make(2.26843463243900e-03f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle beta_4 = FloatImm::make(1.18534705686654e-04f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle beta_6 = FloatImm::make(1.19825839466702e-06f); // numerator @@ -206,7 +193,6 @@ ExprHandle fast_tanh(const ExprHandle& v) { ExprHandle fast_sigmoid(const ExprHandle& x) { // sigmoid(x) = (tanh(x / 2) + 1) / 2 ExprHandle one_v = FloatImm::make(1.f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ExprHandle half_v = FloatImm::make(0.5f); ExprHandle x2 = x * half_v; ExprHandle y{fast_tanh(x2)}; @@ -220,17 +206,13 @@ ExprHandle fast_log(const ExprHandle& v) { // to generate coefficients, this tool is provided // https://github.com/shibatch/sleef/blob/master/src/gencoef/gencoef.txt auto ilogb2kf = [](ExprHandle x) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto y = (bitcast(x) >> IntImm::make(23)) & IntImm::make(0xff); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return y - IntImm::make(0x7f); }; auto ldexp3kf = [](ExprHandle x, ExprHandle e) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return bitcast(bitcast(x) + (e << IntImm::make(23))); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto e = ilogb2kf(v * FloatImm::make(1.0 / 0.75)); auto m = ldexp3kf(v, IntImm::make(-1) * e); auto one = FloatImm::make(1.0f); @@ -241,17 +223,11 @@ ExprHandle fast_log(const ExprHandle& v) { return x * y + FloatImm::make(z); }; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = FloatImm::make(0.2392828464508056640625); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(t, x2, 0.28518211841583251953125); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(t, x2, 0.400005877017974853515625); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(t, x2, 0.666666686534881591796875); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(t, x2, 2.0); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) x = x * t + FloatImm::make(0.693147180559945286226764) * e; auto zero = FloatImm::make(0); @@ -268,45 +244,29 @@ ExprHandle log_vml(const ExprHandle& v) { }; auto in = bitcast(v); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto a = in - IntImm::make(0x3f2aaaab); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto e = cast(a >> IntImm::make(23)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto x = (a & IntImm::make(0x7fffff)) + IntImm::make(0x3f2aaaab); x = bitcast(x) - 1.0f; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto t = FloatImm::make(-0.12891686f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(x, t, 0.139844373f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(x, t, -0.121842608f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(x, t, 0.140058696f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(x, t, -0.16680488f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(x, t, 0.200104058f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(x, t, -0.249997973f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(x, t, 0.333332151f); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) t = mlaf(x, t, -0.5f); t = x * t; t = x * t + x; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto z = e * FloatImm::make(1.42860677e-06f) + t; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) z = e * FloatImm::make(0.693145752f) + z; return CompareSelect::make( - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) IntImm::make(0x1000000), - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) in + IntImm::make(0x800000), log(v), z, diff --git a/torch/csrc/jit/tensorexpr/external_functions.cpp b/torch/csrc/jit/tensorexpr/external_functions.cpp index e47eeb7692f14..0a64b21b11502 100644 --- a/torch/csrc/jit/tensorexpr/external_functions.cpp +++ b/torch/csrc/jit/tensorexpr/external_functions.cpp @@ -76,9 +76,7 @@ void nnc_aten_conv2d( int64_t paddingH = extra_args[2]; int64_t paddingW = extra_args[3]; int64_t dilationH = extra_args[4]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t dilationW = extra_args[5]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t groups = extra_args[6]; try { diff --git a/torch/csrc/jit/tensorexpr/hash_provider.h b/torch/csrc/jit/tensorexpr/hash_provider.h index a393625753b06..18064a146ce03 100644 --- a/torch/csrc/jit/tensorexpr/hash_provider.h +++ b/torch/csrc/jit/tensorexpr/hash_provider.h @@ -151,24 +151,20 @@ class TORCH_API HashProvider : public IRVisitor { // Hash funcs for various types, numbers are random. template void _hash_combine(SimplifierHashType& seed, const T& val) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4); } void _hash_combine(SimplifierHashType& seed, const char* val) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4); } // at:::Half doesn't have a prime_number_hash, so cast to short. void _hash_combine(SimplifierHashType& seed, const at::Half& val) { seed._h ^= - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) te_hash((uint16_t)val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4); } void _hash_combine(SimplifierHashType& seed, const Dtype& val) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) seed._h ^= te_hash(val.ToCppString()) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4); } @@ -203,15 +199,12 @@ class TORCH_API HashProvider : public IRVisitor { size_t te_hash(int64_t val) { // put the thing down. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) size_t h = val ^ 0x647AA4D20C0B; // bit flip it. size_t h2 = ~h; // and reverse byte order. size_t h3 = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (unsigned int i = 0; i < 64; i += 8) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) h3 |= ((h2 >> i) & 0xFF) << (64 - i - 8); } return h3; @@ -243,13 +236,11 @@ class TORCH_API HashProvider : public IRVisitor { // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions) int s = val.size() - 1; while (s >= 0) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) for (unsigned int i = 0; i < 8; ++i) { if (s < 0) break; // NOLINTNEXTLINE(bugprone-signed-char-misuse) int64_t c = val.data()[s]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) intval |= (c << (i * 8)); s--; diff --git a/torch/csrc/jit/tensorexpr/ir.h b/torch/csrc/jit/tensorexpr/ir.h index 55953df39bd18..be696ccfc6a1e 100644 --- a/torch/csrc/jit/tensorexpr/ir.h +++ b/torch/csrc/jit/tensorexpr/ir.h @@ -40,35 +40,26 @@ inline int getPrecedence(IRNodeType ty) { return 2; case kAdd: case kSub: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 6; case kMul: case kDiv: case kMod: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 5; case kMax: case kMin: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 99; case kAnd: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 11; case kOr: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 13; case kLshift: case kRshift: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 7; case kXor: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 12; case kCompareSelect: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 16; default: - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return 99; } } diff --git a/torch/csrc/jit/tensorexpr/kernel.cpp b/torch/csrc/jit/tensorexpr/kernel.cpp index a91cb19e59d2c..9fa28c900b375 100644 --- a/torch/csrc/jit/tensorexpr/kernel.cpp +++ b/torch/csrc/jit/tensorexpr/kernel.cpp @@ -220,9 +220,7 @@ bool conv2dIsSupportedJit(const torch::jit::Node* node) { auto const& bias = getTensorInfoJit(node->input(2)); auto const& stride = toIValue(node->input(3)); auto const& pad = toIValue(node->input(4)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto const& dilation = toIValue(node->input(5)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto const& groups = toIValue(node->input(6)); // Everything should be statically known. @@ -1472,7 +1470,6 @@ Tensor* computeMatmul( // an aten::matmul. // Native, even naive, lowering is beneficial when the sizes are small because // it allows to eliminate dispatch overhead. - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) if (total_size && total_size->value() < 1000) { return Reduce( "nnc_matmul", @@ -1507,7 +1504,6 @@ Tensor* computeConv2d( auto padding = _pair_int(inputs[4]); auto dilation = _pair_int(inputs[5]); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int groups = c10::get(inputs[6]); auto inpInfo = getTensorInfo(inp); @@ -1887,7 +1883,6 @@ Tensor* tensorexpr::computeOperandValue( tensorOrConstant(inputs[0], indices), // input tensorOrConstant(inputs[3], {c}), // mean tensorOrConstant(inputs[4], {c}), // var - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) constant(inputs[7]) // eps }; @@ -1911,7 +1906,6 @@ Tensor* tensorexpr::computeOperandValue( } // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) if (hasBias) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) bias = exprInputs[5]; } diff --git a/torch/csrc/jit/tensorexpr/operators/conv2d.cpp b/torch/csrc/jit/tensorexpr/operators/conv2d.cpp index 5eaefbd3c698d..f83b34bbacb90 100644 --- a/torch/csrc/jit/tensorexpr/operators/conv2d.cpp +++ b/torch/csrc/jit/tensorexpr/operators/conv2d.cpp @@ -56,9 +56,7 @@ Tensor* conv2d_depthwise_static( auto const& oh = v[2]; auto const& ow = v[3]; auto const& c = v[4]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto const& r = v[5]; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto const& s = v[6]; auto cond = CompareSelect::make(oh * stride - pad + r, 0, 1, 0, kLT); cond = CompareSelect::make(ow * stride - pad + s, 0, 1, cond, kLT); diff --git a/torch/csrc/serialization.cpp b/torch/csrc/serialization.cpp index c14630edd6219..715e39d5af4bf 100644 --- a/torch/csrc/serialization.cpp +++ b/torch/csrc/serialization.cpp @@ -119,7 +119,6 @@ void doRead(io fildes, void* raw_buf, size_t nbytes) { errno = 0; // doPartialRead may not set errno // we read in 1GB blocks to avoid bugs on Mac OS X Lion // see https://github.com/pytorch/pytorch/issues/1031 for more details - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ssize_t r = doPartialRead(fildes, buf, std::min(nbytes, 1073741824)); if (r < 0) { int err = errno; @@ -152,7 +151,6 @@ void doWrite(io fildes, void* raw_buf, size_t nbytes) { errno = 0; // doPartialWrite may not set errno // we write in 1GB blocks to avoid bugs on Mac OS X Lion // see https://github.com/pytorch/pytorch/issues/1031 for more details - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ssize_t r = doPartialWrite(fildes, buf, std::min(nbytes, 1073741824)); if (r < 0) { int err = errno; diff --git a/torch/csrc/utils/invalid_arguments.cpp b/torch/csrc/utils/invalid_arguments.cpp index 6edce7a9fdce4..836fecc1b7d8e 100644 --- a/torch/csrc/utils/invalid_arguments.cpp +++ b/torch/csrc/utils/invalid_arguments.cpp @@ -131,7 +131,6 @@ std::unique_ptr _buildType(std::string type_name, bool is_nullable) { } else if (type_name == "int") { result = torch::make_unique(MultiType{"int", "long"}); } else if (type_name.find("tuple[") == 0) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto type_list = type_name.substr(6); type_list.pop_back(); std::vector> types; @@ -139,7 +138,6 @@ std::unique_ptr _buildType(std::string type_name, bool is_nullable) { types.emplace_back(_buildType(type, false)); result = torch::make_unique(std::move(types)); } else if (type_name.find("sequence[") == 0) { - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto subtype = type_name.substr(9); subtype.pop_back(); result = torch::make_unique(_buildType(subtype, false)); @@ -327,7 +325,6 @@ std::string format_invalid_args( std::vector args; std::unordered_map kwargs; std::string error_msg; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) error_msg.reserve(2000); error_msg += function_name; error_msg += " received an invalid combination of arguments - "; diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index cb104f6c2a1d3..8322ade80e7b0 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -408,7 +408,6 @@ Tensor legacy_sparse_tensor_new(c10::DispatchKey dispatch_key, at::ScalarType sc "new(IntArrayRef size, *, Device? device=None)", }); check_base_legacy_new(dispatch_key, c10::kSparse); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ParsedArgs<5> parsed_args; auto r = parser.parse(args, kwargs, parsed_args); if (r.idx == 0) { @@ -510,7 +509,6 @@ Tensor legacy_tensor_ctor(c10::DispatchKey dispatch_key, at::ScalarType scalar_t return legacy_new_from_sequence(options, scalar_type, deviceOptional, r.pyobject(0)); } return new_with_sizes(options, scalar_type, r.deviceOptional(1), r.intlist(0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (r.idx == 5) { auto deviceOptional = r.deviceOptional(1); check_legacy_ctor_device(dispatch_key, deviceOptional); @@ -570,7 +568,6 @@ Tensor legacy_tensor_new(c10::DispatchKey dispatch_key, at::ScalarType scalar_ty return legacy_new_from_sequence(options, scalar_type, deviceOptional, r.pyobject(0)); } return new_with_sizes(options, scalar_type, r.deviceOptional(1), r.intlist(0)); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) } else if (r.idx == 5) { auto deviceOptional = r.deviceOptional(1); check_legacy_ctor_device(dispatch_key, deviceOptional); @@ -687,7 +684,6 @@ Tensor sparse_coo_tensor_ctor(c10::DispatchKey dispatch_key, at::ScalarType scal "sparse_coo_tensor(IntArrayRef size, *, ScalarType dtype=None, Device? device=None, bool requires_grad=False)", }); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) ParsedArgs<6> parsed_args; auto r = parser.parse(args, kwargs, parsed_args); if (r.idx == 0) { @@ -716,7 +712,6 @@ Tensor sparse_coo_tensor_ctor(c10::DispatchKey dispatch_key, at::ScalarType scal Tensor indices = internal_new_from_data(values.options(), kLong, r.deviceOptional(4), r.pyobject(0), /*copy_variables=*/false, /*copy_numpy=*/true, /*type_inference=*/false); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) return at::sparse_coo_tensor(indices, values, r.intlist(2), values.options().layout(at::kSparse)).set_requires_grad(r.toBool(5)); } else if (r.idx == 2) { const auto inferred_options = typeIdWithDefault(r, 2, dispatch_key); @@ -805,7 +800,6 @@ Tensor tensor_ctor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, Py /*copy_numpy=*/true, /*type_inference=*/type_inference, pin_memory); - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) auto names = r.toDimnameListOptional(5); if (names) { at::namedinference::propagate_names(new_tensor, *names, /*validate_names=*/true); diff --git a/torch/csrc/utils/throughput_benchmark-inl.h b/torch/csrc/utils/throughput_benchmark-inl.h index 3fdda7b12090f..009c4f5191370 100644 --- a/torch/csrc/utils/throughput_benchmark-inl.h +++ b/torch/csrc/utils/throughput_benchmark-inl.h @@ -131,7 +131,6 @@ BenchmarkExecutionStats BenchmarkHelper::benchmark( float total_time_ms = std::chrono::duration_cast( end_time - start_time) .count() / - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) 1000.0 / 1000.0; // We use config.num_iters instead of num_attempted_iters as it is // repsesatative of the real work done. Last attempted iteration on each diff --git a/torch/csrc/utils/throughput_benchmark.h b/torch/csrc/utils/throughput_benchmark.h index 45175d5651c0a..09e25d38629f7 100644 --- a/torch/csrc/utils/throughput_benchmark.h +++ b/torch/csrc/utils/throughput_benchmark.h @@ -52,7 +52,6 @@ struct BenchmarkConfig { int num_warmup_iters{1}; // Number of iterations the benchmark should run with. This number is separate // from the warmup iterations - // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers) int64_t num_iters{100}; // If set autograd profiler will be enabled. I.e. this variable would be created // before the main benchmark loop (but after the warmup):