From 97db9fde6960136cacc786975915fcb7fe1afb6d Mon Sep 17 00:00:00 2001 From: Aaron Gokaslan Date: Tue, 27 Dec 2022 07:34:12 +0000 Subject: [PATCH] =?UTF-8?q?Fix=20header-filter=20for=20clang-tidy=20c10=20?= =?UTF-8?q?and=20apply=20some=20fixes=20to=20c10=20and=20=E2=80=A6=20(#911?= =?UTF-8?q?78)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …c10d Fixes a broken header filters from #90699 and applies a few more clang-tidy fixes that are relevant from c10 and c10d. The header filter pattern was actually broken and the clang-tidy include pattern was redundant. Also fixed a few bugs in torch/distributed/c10d Pull Request resolved: https://github.com/pytorch/pytorch/pull/91178 Approved by: https://github.com/ezyang --- .clang-tidy | 2 +- .lintrunner.toml | 1 - c10/mobile/CPUCachingAllocator.cpp | 4 +-- c10/util/Exception.cpp | 9 +++--- c10/util/Exception.h | 2 +- c10/util/SmallVector.cpp | 2 +- c10/util/int128.cpp | 2 +- c10/util/numa.cpp | 2 +- c10/util/signal_handler.cpp | 10 +++---- c10/util/signal_handler.h | 2 +- torch/csrc/distributed/c10d/Backend.cpp | 2 +- torch/csrc/distributed/c10d/FileStore.cpp | 29 +++++++++++-------- torch/csrc/distributed/c10d/FileStore.hpp | 2 +- .../distributed/c10d/GlooDeviceFactory.cpp | 2 +- torch/csrc/distributed/c10d/HashStore.cpp | 4 +-- torch/csrc/distributed/c10d/PrefixStore.cpp | 7 ++--- torch/csrc/distributed/c10d/PrefixStore.hpp | 2 +- torch/csrc/distributed/c10d/ProcessGroup.cpp | 2 +- .../distributed/c10d/ProcessGroupGloo.cpp | 6 ++-- .../c10d/ProcessGroupRoundRobin.cpp | 2 +- .../distributed/c10d/ProcessGroupWrapper.cpp | 11 +++---- torch/csrc/distributed/c10d/Store.cpp | 2 +- torch/csrc/distributed/c10d/TCPStore.cpp | 6 ++-- torch/csrc/distributed/c10d/Work.cpp | 5 ++-- torch/csrc/distributed/c10d/init.cpp | 2 +- torch/csrc/distributed/c10d/logger.cpp | 4 +-- torch/csrc/distributed/c10d/sequence_num.cpp | 2 +- .../csrc/profiler/kineto_client_interface.cpp | 1 + 28 files changed, 67 insertions(+), 60 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 839dabac69dbdf..ec43eca88f2ed3 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -38,7 +38,7 @@ performance-*, -performance-noexcept-move-constructor, -performance-unnecessary-value-param, ' -HeaderFilterRegex: '(c10/(?!test)/|torch/csrc/(?!deploy/interpreter/cpython)).*' +HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$' AnalyzeTemporaryDtors: false WarningsAsErrors: '*' CheckOptions: diff --git a/.lintrunner.toml b/.lintrunner.toml index 33d114bcc1d700..073ab891c4fe3e 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -211,7 +211,6 @@ command = [ [[linter]] code = 'CLANGTIDY' include_patterns = [ - 'c10/core/*.cpp', 'c10/core/**/*.cpp', 'torch/csrc/fx/**/*.cpp', 'torch/csrc/generic/**/*.cpp', diff --git a/c10/mobile/CPUCachingAllocator.cpp b/c10/mobile/CPUCachingAllocator.cpp index 683cfe14553af5..e59de3775867bb 100644 --- a/c10/mobile/CPUCachingAllocator.cpp +++ b/c10/mobile/CPUCachingAllocator.cpp @@ -97,8 +97,8 @@ CPUCachingAllocator* GetThreadLocalCachingAllocator() { } WithCPUCachingAllocatorGuard::WithCPUCachingAllocatorGuard( - CPUCachingAllocator* allocator) { - prev_caching_allocator_ptr_ = GetThreadLocalCachingAllocator(); + CPUCachingAllocator* allocator) + : prev_caching_allocator_ptr_(GetThreadLocalCachingAllocator()) { caching_allocator_ptr = allocator; } diff --git a/c10/util/Exception.cpp b/c10/util/Exception.cpp index 6ab4895558de8f..5041147fd9c197 100644 --- a/c10/util/Exception.cpp +++ b/c10/util/Exception.cpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace c10 { @@ -183,11 +184,11 @@ void warn(const Warning& warning) { Warning::Warning( warning_variant_t type, const SourceLocation& source_location, - const std::string& msg, + std::string msg, const bool verbatim) : type_(type), source_location_(source_location), - msg_(msg), + msg_(std::move(msg)), verbatim_(verbatim) {} Warning::Warning( @@ -195,7 +196,7 @@ Warning::Warning( SourceLocation source_location, detail::CompileTimeEmptyString msg, const bool verbatim) - : Warning(type, std::move(source_location), "", verbatim) {} + : Warning(type, source_location, "", verbatim) {} Warning::Warning( warning_variant_t type, @@ -203,7 +204,7 @@ Warning::Warning( const char* msg, const bool verbatim) : type_(type), - source_location_(std::move(source_location)), + source_location_(source_location), msg_(std::string(msg)), verbatim_(verbatim) {} diff --git a/c10/util/Exception.h b/c10/util/Exception.h index 773107f668ae1b..5f3fccf8a94244 100644 --- a/c10/util/Exception.h +++ b/c10/util/Exception.h @@ -123,7 +123,7 @@ class C10_API Warning { Warning( warning_variant_t type, const SourceLocation& source_location, - const std::string& msg, + std::string msg, bool verbatim); Warning( diff --git a/c10/util/SmallVector.cpp b/c10/util/SmallVector.cpp index d57f4d97b999eb..ddece6801f6ab2 100644 --- a/c10/util/SmallVector.cpp +++ b/c10/util/SmallVector.cpp @@ -126,7 +126,7 @@ void SmallVectorBase::grow_pod( size_t MinSize, size_t TSize) { size_t NewCapacity = getNewCapacity(MinSize, TSize, this->capacity()); - void* NewElts; + void* NewElts = nullptr; if (BeginX == FirstEl) { NewElts = std::malloc(NewCapacity * TSize); if (NewElts == nullptr) { diff --git a/c10/util/int128.cpp b/c10/util/int128.cpp index f83dba49983363..329452d9c2e73e 100644 --- a/c10/util/int128.cpp +++ b/c10/util/int128.cpp @@ -129,7 +129,7 @@ std::ostream& operator<<(std::ostream& o, const uint128& b) { // Select a divisor which is the largest power of the base < 2^64. uint128 div; - std::streamsize div_base_log; + std::streamsize div_base_log = 0; switch (flags & std::ios::basefield) { case std::ios::hex: div = (uint64_t)0x1000000000000000u; // 16^15 diff --git a/c10/util/numa.cpp b/c10/util/numa.cpp index 43716b7e3b8f4d..0d8822e6f011da 100644 --- a/c10/util/numa.cpp +++ b/c10/util/numa.cpp @@ -49,7 +49,7 @@ int GetNUMANode(const void* ptr) { TORCH_CHECK( get_mempolicy( &numa_node, - NULL, + nullptr, 0, const_cast(ptr), MPOL_F_NODE | MPOL_F_ADDR) == 0, diff --git a/c10/util/signal_handler.cpp b/c10/util/signal_handler.cpp index 7eda700979fa8f..ab40b594a0b054 100644 --- a/c10/util/signal_handler.cpp +++ b/c10/util/signal_handler.cpp @@ -57,7 +57,7 @@ void hookupHandler() { if (hookedUpCount++) { return; } - struct sigaction sa; + struct sigaction sa {}; // Setup the handler sa.sa_handler = &handleSignal; // Restart the system call, if at all possible @@ -78,7 +78,7 @@ void unhookHandler() { if (--hookedUpCount > 0) { return; } - struct sigaction sa; + struct sigaction sa {}; // Setup the sighub handler sa.sa_handler = SIG_DFL; // Restart the system call, if at all possible @@ -106,7 +106,7 @@ FatalSignalHandler& FatalSignalHandler::getInstance() { return *handler; } -FatalSignalHandler::~FatalSignalHandler() {} +FatalSignalHandler::~FatalSignalHandler() = default; FatalSignalHandler::FatalSignalHandler() : fatalSignalHandlersInstalled(false), @@ -205,7 +205,7 @@ void FatalSignalHandler::fatalSignalHandler(int signum) { if (procDir) { pid_t pid = getpid(); pid_t currentTid = syscall(SYS_gettid); - struct dirent* entry; + struct dirent* entry = nullptr; pthread_mutex_lock(&writingMutex); while ((entry = readdir(procDir)) != nullptr) { if (entry->d_name[0] == '.') { @@ -263,7 +263,7 @@ void FatalSignalHandler::installFatalSignalHandlers() { return; } fatalSignalHandlersInstalled = true; - struct sigaction sa; + struct sigaction sa {}; sigemptyset(&sa.sa_mask); // Since we'll be in an exiting situation it's possible there's memory // corruption, so make our own stack just in case. diff --git a/c10/util/signal_handler.h b/c10/util/signal_handler.h index 6313e5d8b9b73e..2dafaf46835403 100644 --- a/c10/util/signal_handler.h +++ b/c10/util/signal_handler.h @@ -78,7 +78,7 @@ class TORCH_API FatalSignalHandler { bool fatalSignalHandlersInstalled; // We need to hold a reference to call the previous SIGUSR2 handler in case // we didn't signal it - struct sigaction previousSigusr2; + struct sigaction previousSigusr2 {}; // Flag dictating whether the SIGUSR2 handler falls back to previous handlers // or is intercepted in order to print a stack trace. std::atomic fatalSignalReceived; diff --git a/torch/csrc/distributed/c10d/Backend.cpp b/torch/csrc/distributed/c10d/Backend.cpp index 0819506c29fe79..9382c9c501ed22 100644 --- a/torch/csrc/distributed/c10d/Backend.cpp +++ b/torch/csrc/distributed/c10d/Backend.cpp @@ -9,7 +9,7 @@ Backend::Backend(int rank, int size) C10_LOG_API_USAGE_ONCE("c10d.backend"); } -Backend::~Backend() {} +Backend::~Backend() = default; void Backend::init() { C10_LOG_API_USAGE_ONCE(fmt::format("c10d.backend_{}", getBackendName())); diff --git a/torch/csrc/distributed/c10d/FileStore.cpp b/torch/csrc/distributed/c10d/FileStore.cpp index 0aa681c5caa144..8e364e0e420716 100644 --- a/torch/csrc/distributed/c10d/FileStore.cpp +++ b/torch/csrc/distributed/c10d/FileStore.cpp @@ -1,9 +1,9 @@ #include -#include #include -#include #include +#include +#include #ifdef _WIN32 #include @@ -22,6 +22,7 @@ #include #include #include +#include #include @@ -125,7 +126,7 @@ class Lock { #ifdef _WIN32 auto rv = syscall(std::bind(::flock_, fd_, operation)); #else - auto rv = syscall(std::bind(::flock, fd_, operation)); + auto rv = syscall([this, operation] { return ::flock(fd_, operation); }); #endif SYSASSERT(rv, "flock"); } @@ -143,7 +144,9 @@ class File { fd_ = syscall(std::bind( ::open, path.c_str(), flags | _O_BINARY, _S_IREAD | _S_IWRITE)); #else - fd_ = syscall(std::bind(::open, path.c_str(), flags, 0644)); + fd_ = syscall([capture0 = path.c_str(), flags] { + return ::open(capture0, flags, 0644); + }); #endif // Only retry when the file doesn't exist, since we are waiting for the // file to be created in this case to address the following issue: @@ -174,13 +177,14 @@ class File { } off_t seek(off_t offset, int whence) { - auto rv = syscall(std::bind(lseek, fd_, offset, whence)); + auto rv = + syscall([this, offset, whence] { return lseek(fd_, offset, whence); }); SYSASSERT(rv, "lseek"); return rv; } off_t tell() { - auto rv = syscall(std::bind(lseek, fd_, 0, SEEK_CUR)); + auto rv = syscall([this] { return lseek(fd_, 0, SEEK_CUR); }); SYSASSERT(rv, "lseek"); return rv; } @@ -194,7 +198,8 @@ class File { void write(const void* buf, size_t count) { while (count > 0) { - auto rv = syscall(std::bind(::write, fd_, buf, count)); + auto rv = + syscall([this, buf, count] { return ::write(fd_, buf, count); }); SYSASSERT(rv, "write"); buf = (uint8_t*)buf + rv; count -= rv; @@ -203,7 +208,7 @@ class File { void read(void* buf, size_t count) { while (count > 0) { - auto rv = syscall(std::bind(::read, fd_, buf, count)); + auto rv = syscall([this, buf, count] { return ::read(fd_, buf, count); }); SYSASSERT(rv, "read"); buf = (uint8_t*)buf + rv; count -= rv; @@ -225,7 +230,7 @@ class File { } void read(std::string& str) { - uint32_t len; + uint32_t len = 0; read(&len, sizeof(len)); std::vector buf(len); read(buf.data(), len); @@ -233,7 +238,7 @@ class File { } void read(std::vector& data) { - uint32_t len; + uint32_t len = 0; read(&len, sizeof(len)); data.resize(len); read(data.data(), len); @@ -270,9 +275,9 @@ off_t refresh( } // namespace -FileStore::FileStore(const std::string& path, int numWorkers) +FileStore::FileStore(std::string path, int numWorkers) : Store(), - path_(path), + path_(std::move(path)), pos_(0), numWorkers_(numWorkers), cleanupKey_("cleanup/"), diff --git a/torch/csrc/distributed/c10d/FileStore.hpp b/torch/csrc/distributed/c10d/FileStore.hpp index bb364a76a686ad..826c94f302f1f4 100644 --- a/torch/csrc/distributed/c10d/FileStore.hpp +++ b/torch/csrc/distributed/c10d/FileStore.hpp @@ -11,7 +11,7 @@ namespace c10d { class TORCH_API FileStore : public Store { public: - explicit FileStore(const std::string& path, int numWorkers); + explicit FileStore(std::string path, int numWorkers); virtual ~FileStore(); diff --git a/torch/csrc/distributed/c10d/GlooDeviceFactory.cpp b/torch/csrc/distributed/c10d/GlooDeviceFactory.cpp index 181e7deb439aab..3441c38be32ab9 100644 --- a/torch/csrc/distributed/c10d/GlooDeviceFactory.cpp +++ b/torch/csrc/distributed/c10d/GlooDeviceFactory.cpp @@ -2,7 +2,7 @@ #ifdef USE_C10D_GLOO -#include +#include #include diff --git a/torch/csrc/distributed/c10d/HashStore.cpp b/torch/csrc/distributed/c10d/HashStore.cpp index 41a4d46ff6175d..e426a3c58b49f3 100644 --- a/torch/csrc/distributed/c10d/HashStore.cpp +++ b/torch/csrc/distributed/c10d/HashStore.cpp @@ -1,8 +1,8 @@ #include -#include -#include #include +#include +#include #include #include diff --git a/torch/csrc/distributed/c10d/PrefixStore.cpp b/torch/csrc/distributed/c10d/PrefixStore.cpp index a27db02c1e3a7f..4489b785b25cd7 100644 --- a/torch/csrc/distributed/c10d/PrefixStore.cpp +++ b/torch/csrc/distributed/c10d/PrefixStore.cpp @@ -1,11 +1,10 @@ #include +#include namespace c10d { -PrefixStore::PrefixStore( - const std::string& prefix, - c10::intrusive_ptr store) - : prefix_(prefix), store_(store) {} +PrefixStore::PrefixStore(std::string prefix, c10::intrusive_ptr store) + : prefix_(std::move(prefix)), store_(std::move(store)) {} std::string PrefixStore::joinKey(const std::string& key) { return prefix_ + "/" + key; diff --git a/torch/csrc/distributed/c10d/PrefixStore.hpp b/torch/csrc/distributed/c10d/PrefixStore.hpp index 143d20b80435c7..d2955456c10cf6 100644 --- a/torch/csrc/distributed/c10d/PrefixStore.hpp +++ b/torch/csrc/distributed/c10d/PrefixStore.hpp @@ -8,7 +8,7 @@ namespace c10d { class TORCH_API PrefixStore : public Store { public: explicit PrefixStore( - const std::string& prefix, + std::string prefix, c10::intrusive_ptr store); virtual ~PrefixStore(){}; diff --git a/torch/csrc/distributed/c10d/ProcessGroup.cpp b/torch/csrc/distributed/c10d/ProcessGroup.cpp index fb56797eec19db..a018789a2e49d1 100644 --- a/torch/csrc/distributed/c10d/ProcessGroup.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroup.cpp @@ -147,7 +147,7 @@ ProcessGroup::ProcessGroup( ProcessGroup::ProcessGroup(int rank, int size) : rank_(rank), size_(size), backendType_(BackendType::UNDEFINED) {} -ProcessGroup::~ProcessGroup() {} +ProcessGroup::~ProcessGroup() = default; void ProcessGroup::init() { C10_LOG_API_USAGE_ONCE( diff --git a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp index e41d3ec3f1fad0..1d68523204c712 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp @@ -626,16 +626,16 @@ void socketInitialize() { // gracefully fall back to an alternative if it doesn't. bool doesHostnameResolveToUsableAddress(const std::string& hostname) { socketInitialize(); - struct addrinfo hints; + struct addrinfo hints {}; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; - struct addrinfo* result; + struct addrinfo* result = nullptr; auto rv = getaddrinfo(hostname.c_str(), nullptr, &hints, &result); if (rv < 0) { return false; } - struct addrinfo* rp; + struct addrinfo* rp = nullptr; for (rp = result; rp != nullptr; rp = rp->ai_next) { auto fd = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); if (fd == -1) { diff --git a/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.cpp b/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.cpp index e4c0c8b4d65155..801d97bb1ddc91 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupRoundRobin.cpp @@ -18,7 +18,7 @@ ProcessGroupRoundRobin::ProcessGroupRoundRobin( iterator_ = processGroups_.begin(); } -ProcessGroupRoundRobin::~ProcessGroupRoundRobin() {} +ProcessGroupRoundRobin::~ProcessGroupRoundRobin() = default; c10::intrusive_ptr ProcessGroupRoundRobin::broadcast( std::vector& tensors, diff --git a/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp b/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp index 8b235c935a181b..aee97133daff9b 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp @@ -13,6 +13,7 @@ #include #include #include +#include namespace c10d { @@ -23,7 +24,7 @@ struct CollectiveFingerPrint { // Current collective's operation type. OpType op_type_; // Number of input tensors - std::size_t num_tensors_; + std::size_t num_tensors_{}; // input tensor data types std::vector tensor_dtypes_; // input tensor device types @@ -52,9 +53,9 @@ struct CollectiveFingerPrint { std::vector tensor_device_types, std::vector> tensor_sizes) : op_type_(op_type), - tensor_dtypes_(tensor_dtypes), - tensor_device_types_(tensor_device_types), - tensor_sizes_(tensor_sizes) {} + tensor_dtypes_(std::move(tensor_dtypes)), + tensor_device_types_(std::move(tensor_device_types)), + tensor_sizes_(std::move(tensor_sizes)) {} // Logs collective information in case of a failure. friend std::ostream& operator<<( @@ -267,7 +268,7 @@ ProcessGroupWrapper::ProcessGroupWrapper( c10::intrusive_ptr glooBackend) : Backend(backend->getRank(), backend->getSize()), backend_(backend), - glooBackend_(glooBackend) { + glooBackend_(std::move(glooBackend)) { // Set the sequence number for the underlying process group. backend_->setSequenceNumberForGroup(); } diff --git a/torch/csrc/distributed/c10d/Store.cpp b/torch/csrc/distributed/c10d/Store.cpp index 9632176adda5d7..3df496270f682b 100644 --- a/torch/csrc/distributed/c10d/Store.cpp +++ b/torch/csrc/distributed/c10d/Store.cpp @@ -6,7 +6,7 @@ constexpr std::chrono::milliseconds Store::kDefaultTimeout; constexpr std::chrono::milliseconds Store::kNoTimeout; // Define destructor symbol for abstract base class. -Store::~Store() {} +Store::~Store() = default; const std::chrono::milliseconds& Store::getTimeout() const noexcept { return timeout_; diff --git a/torch/csrc/distributed/c10d/TCPStore.cpp b/torch/csrc/distributed/c10d/TCPStore.cpp index 371f509e5c2667..ff16f0710cdd15 100644 --- a/torch/csrc/distributed/c10d/TCPStore.cpp +++ b/torch/csrc/distributed/c10d/TCPStore.cpp @@ -434,7 +434,7 @@ void TCPStoreMasterDaemon::deleteHandler(int socket) { } void TCPStoreMasterDaemon::checkHandler(int socket) const { - SizeType nargs; + SizeType nargs = 0; tcputil::recvBytes(socket, &nargs, 1); std::vector keys(nargs); for (const auto i : c10::irange(nargs)) { @@ -449,7 +449,7 @@ void TCPStoreMasterDaemon::checkHandler(int socket) const { } void TCPStoreMasterDaemon::waitHandler(int socket) { - SizeType nargs; + SizeType nargs = 0; tcputil::recvBytes(socket, &nargs, 1); std::vector keys(nargs); for (const auto i : c10::irange(nargs)) { @@ -741,7 +741,7 @@ void TCPStoreWorkerDaemon::run() { } // if connection is closed gracefully by master, peeked data will return 0 - char data; + char data = 0; int ret = recv(fds[1].fd, &data, 1, MSG_PEEK); if (ret == 0) { continue; diff --git a/torch/csrc/distributed/c10d/Work.cpp b/torch/csrc/distributed/c10d/Work.cpp index 7ad90b7425570c..4842c52e0283af 100644 --- a/torch/csrc/distributed/c10d/Work.cpp +++ b/torch/csrc/distributed/c10d/Work.cpp @@ -1,6 +1,7 @@ #include #include +#include namespace c10d { @@ -129,9 +130,9 @@ void Work::finishAndThrow(std::exception_ptr exception) { class FutureWrappingWork : public Work { public: FutureWrappingWork(c10::intrusive_ptr fut) - : Work(), _fut(fut) {} + : Work(), _fut(std::move(fut)) {} - ~FutureWrappingWork() {} + ~FutureWrappingWork() override = default; bool isCompleted() override { return _fut->completed(); diff --git a/torch/csrc/distributed/c10d/init.cpp b/torch/csrc/distributed/c10d/init.cpp index 1b15ad94c3b298..e90b15b1b079da 100644 --- a/torch/csrc/distributed/c10d/init.cpp +++ b/torch/csrc/distributed/c10d/init.cpp @@ -258,7 +258,7 @@ static PyMethodDef reduceopmeta_methods[] = { (PyCFunction)reduceopmeta___instancecheck__, METH_O, "Custom `__instancecheck__` for ReduceOp"}, - {NULL, NULL}}; + {nullptr, nullptr}}; PyTypeObject* GetReduceOpMetaclass() { static auto* metaclass = [] { PyTypeObject* base_metaclass = diff --git a/torch/csrc/distributed/c10d/logger.cpp b/torch/csrc/distributed/c10d/logger.cpp index 7ba18dbc71117e..ca3919eb034b45 100644 --- a/torch/csrc/distributed/c10d/logger.cpp +++ b/torch/csrc/distributed/c10d/logger.cpp @@ -49,8 +49,8 @@ std::ostream& operator<<(std::ostream& output, const Logger& logger) { return output << loggerInfo; } -Logger::Logger(std::shared_ptr reducer) { - reducer_ = reducer; +Logger::Logger(std::shared_ptr reducer) + : reducer_(std::move(reducer)) { ddp_logging_data_ = std::make_unique(); } diff --git a/torch/csrc/distributed/c10d/sequence_num.cpp b/torch/csrc/distributed/c10d/sequence_num.cpp index 1405084a383d0a..6ea35820179e7d 100644 --- a/torch/csrc/distributed/c10d/sequence_num.cpp +++ b/torch/csrc/distributed/c10d/sequence_num.cpp @@ -31,7 +31,7 @@ void SequenceNum::increment() { // Implemented without above get() and increment() so we don't repeatedly lock // and unblock. uint64_t SequenceNum::getAndIncrement() { - uint64_t curVal; + uint64_t curVal = 0; std::lock_guard lock(lock_); TORCH_CHECK(num_ != c10::nullopt); curVal = *num_; diff --git a/torch/csrc/profiler/kineto_client_interface.cpp b/torch/csrc/profiler/kineto_client_interface.cpp index caaad47ab28466..76e77360259f11 100644 --- a/torch/csrc/profiler/kineto_client_interface.cpp +++ b/torch/csrc/profiler/kineto_client_interface.cpp @@ -46,6 +46,7 @@ class LibKinetoClient : public libkineto::ClientInterface { (void)disableProfiler(); } + // NOLINTNEXTLINE(modernize-use-override) void set_withstack(bool withStack) override { withStack_ = withStack; }