From 6786a24fd2829c395c20840b558c6f9f3fff2d21 Mon Sep 17 00:00:00 2001 From: cyy Date: Wed, 1 Mar 2023 23:27:28 +0000 Subject: [PATCH] fix some tiny code issues (#95757) This PR tries to fix: 1. a misspelled NDEBUG preprocessing condition. 2. get ride of all writable-strings warnings. Pull Request resolved: https://github.com/pytorch/pytorch/pull/95757 Approved by: https://github.com/soulitzer --- cmake/public/utils.cmake | 1 - functorch/csrc/dim/dim.cpp | 8 ++++---- setup.py | 1 - torch/CMakeLists.txt | 6 ------ torch/csrc/StorageMethods.cpp | 6 +++--- torch/csrc/Stream.cpp | 2 +- torch/csrc/autograd/python_engine.cpp | 18 +++++++++--------- torch/csrc/autograd/python_legacy_variable.cpp | 2 +- torch/csrc/cuda/Event.cpp | 2 +- torch/csrc/cuda/Module.cpp | 2 +- torch/csrc/cuda/Stream.cpp | 2 +- .../autograd/engine/dist_engine.cpp | 4 ++-- torch/csrc/jit/codegen/cuda/interface.cpp | 2 +- torch/csrc/jit/tensorexpr/llvm_jit.h | 3 ++- 14 files changed, 26 insertions(+), 33 deletions(-) diff --git a/cmake/public/utils.cmake b/cmake/public/utils.cmake index 0ce0f3b080c95..745bbc8545036 100644 --- a/cmake/public/utils.cmake +++ b/cmake/public/utils.cmake @@ -439,7 +439,6 @@ function(torch_compile_options libname) -Wno-unused-function -Wno-unused-result -Wno-missing-field-initializers - -Wno-write-strings -Wno-unknown-pragmas -Wno-type-limits -Wno-array-bounds diff --git a/functorch/csrc/dim/dim.cpp b/functorch/csrc/dim/dim.cpp index 332e6a935c5b0..f2041671bb151 100644 --- a/functorch/csrc/dim/dim.cpp +++ b/functorch/csrc/dim/dim.cpp @@ -223,10 +223,10 @@ std::ostream& operator<<(std::ostream& ss, DimEntry entry) { static int Dim_init(py::hdl self, PyObject *args, PyObject *kwds) { PY_BEGIN - static char* kwlist[] = {"name", "size", nullptr}; + static constexpr const char* kwlist[] = {"name", "size", nullptr}; py::handle name; py::handle size = nullptr; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwlist, &name, &size)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", const_cast(kwlist), &name, &size)) { return -1; } self->init(py::object::borrow(name), (size.ptr() && !py::is_none(size)) ? py::to_int(size) : -1); @@ -561,10 +561,10 @@ PyTypeObject DimList::Type = { static int DimList_init(DimList *self, PyObject *args, PyObject *kwds) { PY_BEGIN - static char* kwlist[] = {"len_or_dims", "name", nullptr}; + static constexpr const char* kwlist[] = {"len_or_dims", "name", nullptr}; py::handle len_or_dims = nullptr; PyObject* name = nullptr; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO", kwlist, &len_or_dims, &name)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO", const_cast(kwlist), &len_or_dims, &name)) { return -1; } self->init(py::object::borrow(name ? name : Py_None)); diff --git a/setup.py b/setup.py index d4b64795cbd94..5b3be7c1df63a 100644 --- a/setup.py +++ b/setup.py @@ -834,7 +834,6 @@ def configure_extension_build(): '-Wno-strict-overflow', '-Wno-unused-parameter', '-Wno-missing-field-initializers', - '-Wno-write-strings', '-Wno-unknown-pragmas', # This is required for Python 2 declarations that are deprecated in 3. '-Wno-deprecated-declarations', diff --git a/torch/CMakeLists.txt b/torch/CMakeLists.txt index fb98cda761197..c6c23f25a417f 100644 --- a/torch/CMakeLists.txt +++ b/torch/CMakeLists.txt @@ -109,15 +109,9 @@ elseif(APPLE) else() list(APPEND TORCH_PYTHON_COMPILE_OPTIONS -fno-strict-aliasing - -Wno-write-strings -Wno-strict-aliasing) endif() -if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") - list(APPEND TORCH_PYTHON_COMPILE_OPTIONS - -Wno-writable-strings) -endif() - if(USE_ITT) list(APPEND TORCH_PYTHON_SRCS ${TORCH_SRC_DIR}/csrc/itt.cpp diff --git a/torch/csrc/StorageMethods.cpp b/torch/csrc/StorageMethods.cpp index 410b044ba283c..e0a5a48d7e236 100644 --- a/torch/csrc/StorageMethods.cpp +++ b/torch/csrc/StorageMethods.cpp @@ -175,9 +175,9 @@ static PyObject* THPStorage_fromBuffer( c10::ScalarType scalar_type = at::kByte; Py_buffer buffer = {}; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) - constexpr char* kwlist[] = { + constexpr const char* kwlist[] = { "buffer", "byte_order", "count", "offset", "dtype", nullptr}; - constexpr char* argtypes = "O|snnO"; + constexpr const char* argtypes = "O|snnO"; if (!PyArg_ParseTupleAndKeywords( args, @@ -337,7 +337,7 @@ static PyObject* THPStorage_fromFile( Py_ssize_t nbytes = 0; int shared = 0; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) - constexpr char* kwlist[] = {"filename", "shared", "nbytes", nullptr}; + constexpr const char* kwlist[] = {"filename", "shared", "nbytes", nullptr}; if (!PyArg_ParseTupleAndKeywords( args, keywds, diff --git a/torch/csrc/Stream.cpp b/torch/csrc/Stream.cpp index 398e7b34af789..a4b80abcaae4a 100644 --- a/torch/csrc/Stream.cpp +++ b/torch/csrc/Stream.cpp @@ -17,7 +17,7 @@ static PyObject* THPStream_pynew( int64_t device_index = 0; int64_t device_type = 0; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) - constexpr char* kwlist[] = { + constexpr const char* kwlist[] = { "stream_id", "device_index", "device_type", nullptr}; if (!PyArg_ParseTupleAndKeywords( args, diff --git a/torch/csrc/autograd/python_engine.cpp b/torch/csrc/autograd/python_engine.cpp index 04aaa85c6c46d..22bab19f0ca6b 100644 --- a/torch/csrc/autograd/python_engine.cpp +++ b/torch/csrc/autograd/python_engine.cpp @@ -179,15 +179,15 @@ PyObject* THPEngine_run_backward( unsigned char allow_unreachable = 0; unsigned char accumulate_grad = 0; // Indicate whether to accumulate grad into leaf Tensors or capture - constexpr char* accepted_kwargs[] = {// NOLINT - "tensors", - "grad_tensors", - "keep_graph", - "create_graph", - "inputs", - "allow_unreachable", - "accumulate_grad", - nullptr}; + constexpr const char* accepted_kwargs[] = {// NOLINT + "tensors", + "grad_tensors", + "keep_graph", + "create_graph", + "inputs", + "allow_unreachable", + "accumulate_grad", + nullptr}; if (!PyArg_ParseTupleAndKeywords( args, kwargs, diff --git a/torch/csrc/autograd/python_legacy_variable.cpp b/torch/csrc/autograd/python_legacy_variable.cpp index 2ae6d646be68f..e70a40d92ad16 100644 --- a/torch/csrc/autograd/python_legacy_variable.cpp +++ b/torch/csrc/autograd/python_legacy_variable.cpp @@ -26,7 +26,7 @@ static PyObject* THPVariable_pynew( const char* name = nullptr; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) - constexpr char* accepted_args[] = { + constexpr const char* accepted_args[] = { "data", "requires_grad", "volatile", "_grad_fn", "name", nullptr}; if (!PyArg_ParseTupleAndKeywords( args, diff --git a/torch/csrc/cuda/Event.cpp b/torch/csrc/cuda/Event.cpp index 426064c9e8236..980e3cda0faa1 100644 --- a/torch/csrc/cuda/Event.cpp +++ b/torch/csrc/cuda/Event.cpp @@ -25,7 +25,7 @@ static PyObject* THCPEvent_pynew( unsigned char interprocess = 0; // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) - constexpr char* kwlist[] = { + constexpr const char* kwlist[] = { "enable_timing", "blocking", "interprocess", nullptr}; if (!PyArg_ParseTupleAndKeywords( args, diff --git a/torch/csrc/cuda/Module.cpp b/torch/csrc/cuda/Module.cpp index 6b1c44091d391..1dcb685f0e028 100644 --- a/torch/csrc/cuda/Module.cpp +++ b/torch/csrc/cuda/Module.cpp @@ -230,7 +230,7 @@ PyObject* THCPModule_setStream_wrap( int64_t device_type = 0; // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) - constexpr char* kwlist[] = { + constexpr const char* kwlist[] = { "stream_id", "device_index", "device_type", nullptr}; if (!PyArg_ParseTupleAndKeywords( args, diff --git a/torch/csrc/cuda/Stream.cpp b/torch/csrc/cuda/Stream.cpp index 936af674c24de..18c30dfdd1fe2 100644 --- a/torch/csrc/cuda/Stream.cpp +++ b/torch/csrc/cuda/Stream.cpp @@ -28,7 +28,7 @@ static PyObject* THCPStream_pynew( uint64_t stream_ptr = 0; // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) - constexpr char* kwlist[] = { + constexpr const char* kwlist[] = { "priority", "stream_id", "device_index", diff --git a/torch/csrc/distributed/autograd/engine/dist_engine.cpp b/torch/csrc/distributed/autograd/engine/dist_engine.cpp index 97a73919edfec..6ca5c3d148b92 100644 --- a/torch/csrc/distributed/autograd/engine/dist_engine.cpp +++ b/torch/csrc/distributed/autograd/engine/dist_engine.cpp @@ -26,8 +26,8 @@ using torch::autograd::ReadyQueue; using torch::autograd::validate_outputs; using torch::autograd::variable_list; -static constexpr char* kNumBackwardPasses = "num_current_backward_passes"; -static constexpr char* kNumAutogradContexts = "num_autograd_contexts"; +static constexpr const char* kNumBackwardPasses = "num_current_backward_passes"; +static constexpr const char* kNumAutogradContexts = "num_autograd_contexts"; // This hook does 3 things: // 1. Call pre hooks of the original AccumulateGrad to modify the input grad. diff --git a/torch/csrc/jit/codegen/cuda/interface.cpp b/torch/csrc/jit/codegen/cuda/interface.cpp index 6d4cdc0560d69..e22e7575716ae 100644 --- a/torch/csrc/jit/codegen/cuda/interface.cpp +++ b/torch/csrc/jit/codegen/cuda/interface.cpp @@ -44,7 +44,7 @@ class LoadingNvfuserLibrary { try { nvfuserLib_ = std::make_shared(library_name.c_str()); } catch (const c10::DynamicLibraryError& e) { -#if defined(BUILD_NVFUSER) || !defined(NODEBUG) +#if defined(BUILD_NVFUSER) || !defined(NDEBUG) TORCH_WARN("Loading nvfuser library failed with: ", e.msg()); #endif } diff --git a/torch/csrc/jit/tensorexpr/llvm_jit.h b/torch/csrc/jit/tensorexpr/llvm_jit.h index a00ec2468c4af..4aca55a9abf47 100644 --- a/torch/csrc/jit/tensorexpr/llvm_jit.h +++ b/torch/csrc/jit/tensorexpr/llvm_jit.h @@ -21,7 +21,8 @@ namespace jit { namespace tensorexpr { inline std::string formatError(llvm::Error&& err, const char* msg) { - static constexpr char* defaultErrorMsg = "Unexpected failure in LLVM JIT"; + static constexpr const char* defaultErrorMsg = + "Unexpected failure in LLVM JIT"; std::string errorMsg(msg ? msg : defaultErrorMsg); llvm::raw_string_ostream ss(errorMsg); ss << ": " << err;