Skip to content

Commit

Permalink
fix some tiny code issues (pytorch#95757)
Browse files Browse the repository at this point in the history
This PR tries to fix:
1. a misspelled NDEBUG preprocessing condition.
2. get ride of all writable-strings warnings.

Pull Request resolved: pytorch#95757
Approved by: https://github.com/soulitzer
  • Loading branch information
cyyever authored and pytorchmergebot committed Mar 1, 2023
1 parent f7b26bd commit 6786a24
Show file tree
Hide file tree
Showing 14 changed files with 26 additions and 33 deletions.
1 change: 0 additions & 1 deletion cmake/public/utils.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,6 @@ function(torch_compile_options libname)
-Wno-unused-function
-Wno-unused-result
-Wno-missing-field-initializers
-Wno-write-strings
-Wno-unknown-pragmas
-Wno-type-limits
-Wno-array-bounds
Expand Down
8 changes: 4 additions & 4 deletions functorch/csrc/dim/dim.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -223,10 +223,10 @@ std::ostream& operator<<(std::ostream& ss, DimEntry entry) {

static int Dim_init(py::hdl<Dim> self, PyObject *args, PyObject *kwds) {
PY_BEGIN
static char* kwlist[] = {"name", "size", nullptr};
static constexpr const char* kwlist[] = {"name", "size", nullptr};
py::handle name;
py::handle size = nullptr;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", kwlist, &name, &size)) {
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O", const_cast<char **>(kwlist), &name, &size)) {
return -1;
}
self->init(py::object::borrow(name), (size.ptr() && !py::is_none(size)) ? py::to_int(size) : -1);
Expand Down Expand Up @@ -561,10 +561,10 @@ PyTypeObject DimList::Type = {

static int DimList_init(DimList *self, PyObject *args, PyObject *kwds) {
PY_BEGIN
static char* kwlist[] = {"len_or_dims", "name", nullptr};
static constexpr const char* kwlist[] = {"len_or_dims", "name", nullptr};
py::handle len_or_dims = nullptr;
PyObject* name = nullptr;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO", kwlist, &len_or_dims, &name)) {
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO", const_cast<char**>(kwlist), &len_or_dims, &name)) {
return -1;
}
self->init(py::object::borrow(name ? name : Py_None));
Expand Down
1 change: 0 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -834,7 +834,6 @@ def configure_extension_build():
'-Wno-strict-overflow',
'-Wno-unused-parameter',
'-Wno-missing-field-initializers',
'-Wno-write-strings',
'-Wno-unknown-pragmas',
# This is required for Python 2 declarations that are deprecated in 3.
'-Wno-deprecated-declarations',
Expand Down
6 changes: 0 additions & 6 deletions torch/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -109,15 +109,9 @@ elseif(APPLE)
else()
list(APPEND TORCH_PYTHON_COMPILE_OPTIONS
-fno-strict-aliasing
-Wno-write-strings
-Wno-strict-aliasing)
endif()

if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
list(APPEND TORCH_PYTHON_COMPILE_OPTIONS
-Wno-writable-strings)
endif()

if(USE_ITT)
list(APPEND TORCH_PYTHON_SRCS
${TORCH_SRC_DIR}/csrc/itt.cpp
Expand Down
6 changes: 3 additions & 3 deletions torch/csrc/StorageMethods.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -175,9 +175,9 @@ static PyObject* THPStorage_fromBuffer(
c10::ScalarType scalar_type = at::kByte;
Py_buffer buffer = {};
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
constexpr char* kwlist[] = {
constexpr const char* kwlist[] = {
"buffer", "byte_order", "count", "offset", "dtype", nullptr};
constexpr char* argtypes = "O|snnO";
constexpr const char* argtypes = "O|snnO";

if (!PyArg_ParseTupleAndKeywords(
args,
Expand Down Expand Up @@ -337,7 +337,7 @@ static PyObject* THPStorage_fromFile(
Py_ssize_t nbytes = 0;
int shared = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
constexpr char* kwlist[] = {"filename", "shared", "nbytes", nullptr};
constexpr const char* kwlist[] = {"filename", "shared", "nbytes", nullptr};
if (!PyArg_ParseTupleAndKeywords(
args,
keywds,
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/Stream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ static PyObject* THPStream_pynew(
int64_t device_index = 0;
int64_t device_type = 0;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
constexpr char* kwlist[] = {
constexpr const char* kwlist[] = {
"stream_id", "device_index", "device_type", nullptr};
if (!PyArg_ParseTupleAndKeywords(
args,
Expand Down
18 changes: 9 additions & 9 deletions torch/csrc/autograd/python_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -179,15 +179,15 @@ PyObject* THPEngine_run_backward(
unsigned char allow_unreachable = 0;
unsigned char accumulate_grad =
0; // Indicate whether to accumulate grad into leaf Tensors or capture
constexpr char* accepted_kwargs[] = {// NOLINT
"tensors",
"grad_tensors",
"keep_graph",
"create_graph",
"inputs",
"allow_unreachable",
"accumulate_grad",
nullptr};
constexpr const char* accepted_kwargs[] = {// NOLINT
"tensors",
"grad_tensors",
"keep_graph",
"create_graph",
"inputs",
"allow_unreachable",
"accumulate_grad",
nullptr};
if (!PyArg_ParseTupleAndKeywords(
args,
kwargs,
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/python_legacy_variable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ static PyObject* THPVariable_pynew(
const char* name = nullptr;

// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
constexpr char* accepted_args[] = {
constexpr const char* accepted_args[] = {
"data", "requires_grad", "volatile", "_grad_fn", "name", nullptr};
if (!PyArg_ParseTupleAndKeywords(
args,
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/cuda/Event.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ static PyObject* THCPEvent_pynew(
unsigned char interprocess = 0;

// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
constexpr char* kwlist[] = {
constexpr const char* kwlist[] = {
"enable_timing", "blocking", "interprocess", nullptr};
if (!PyArg_ParseTupleAndKeywords(
args,
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/cuda/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ PyObject* THCPModule_setStream_wrap(
int64_t device_type = 0;

// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
constexpr char* kwlist[] = {
constexpr const char* kwlist[] = {
"stream_id", "device_index", "device_type", nullptr};
if (!PyArg_ParseTupleAndKeywords(
args,
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/cuda/Stream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ static PyObject* THCPStream_pynew(
uint64_t stream_ptr = 0;

// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
constexpr char* kwlist[] = {
constexpr const char* kwlist[] = {
"priority",
"stream_id",
"device_index",
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/distributed/autograd/engine/dist_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ using torch::autograd::ReadyQueue;
using torch::autograd::validate_outputs;
using torch::autograd::variable_list;

static constexpr char* kNumBackwardPasses = "num_current_backward_passes";
static constexpr char* kNumAutogradContexts = "num_autograd_contexts";
static constexpr const char* kNumBackwardPasses = "num_current_backward_passes";
static constexpr const char* kNumAutogradContexts = "num_autograd_contexts";

// This hook does 3 things:
// 1. Call pre hooks of the original AccumulateGrad to modify the input grad.
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/codegen/cuda/interface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class LoadingNvfuserLibrary {
try {
nvfuserLib_ = std::make_shared<at::DynamicLibrary>(library_name.c_str());
} catch (const c10::DynamicLibraryError& e) {
#if defined(BUILD_NVFUSER) || !defined(NODEBUG)
#if defined(BUILD_NVFUSER) || !defined(NDEBUG)
TORCH_WARN("Loading nvfuser library failed with: ", e.msg());
#endif
}
Expand Down
3 changes: 2 additions & 1 deletion torch/csrc/jit/tensorexpr/llvm_jit.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ namespace jit {
namespace tensorexpr {

inline std::string formatError(llvm::Error&& err, const char* msg) {
static constexpr char* defaultErrorMsg = "Unexpected failure in LLVM JIT";
static constexpr const char* defaultErrorMsg =
"Unexpected failure in LLVM JIT";
std::string errorMsg(msg ? msg : defaultErrorMsg);
llvm::raw_string_ostream ss(errorMsg);
ss << ": " << err;
Expand Down

0 comments on commit 6786a24

Please sign in to comment.