diff --git a/README.md b/README.md
index 1f97464b..e16f74e4 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,65 @@
+### Warning: This version has major internal changes.
+
+SdFat version 2.3.0 has major changes to implement RP2040/RP2350 SDIO.
+
+In addition there are number of bug fixes.
+
+Begin by running the Rp2040SdioSetup example to try RP2040/RP2350 SDIO.
+
+This example requires a SDIO Card socket with the following six lines.
+
+CLK - A clock signal sent to the card by the MCU.
+CMD - A bidirectional line for for commands and responses.
+DAT[0:3] - Four bidirectional lines for data transfer.
+
+CLK and CMD can be connected to any GPIO pins. DAT[0:3] can be connected
+to any four consecutive GPIO pins in the order DAT0, DAT1, DAT2, DAT3.
+
+Here is an example of SDIO for Pico using an Adafruit socket, PiCowbell
+Proto and PiCowbell Proto Doubler.
+
+![Alt text](images/SdioSpi.jpg)
+
+This Socket supports SDIO with:
+```
+#define RP_CLK_GPIO 10
+#define RP_CMD_GPIO 11
+#define RP_DAT0_GPIO 12 // DAT1: GPIO13 DAT2: GPIO14, DAT3: GPIO15.
+```
+It also can be used on SPI1 with:
+```
+const uint8_t SD_CS_PIN = 15;
+#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SPI_CLOCK, &SPI1)
+
+ // In setup
+ SPI1.setSCK(10);
+ SPI1.setTX(11);
+ SPI1.setRX(12);
+```
+
+This setup gets the following result in the bench example using SDIO.
+
+
+FILE_SIZE_MB = 5
+BUF_SIZE = 512 bytes
+Starting write test, please wait.
+
+write speed and latency
+speed,max,min,avg
+KB/Sec,usec,usec,usec
+15014.05,1165,32,32
+15289.54,1249,32,32
+
+Starting read test, please wait.
+
+read speed and latency
+speed,max,min,avg
+KB/Sec,usec,usec,usec
+15624.00,58,32,32
+15624.00,51,32,32
+
+
+
File copy constructors and file assignment operators have been made private by
default in 2.2.3 to prevent call by value and multiple copies of file instances.
diff --git a/doc/Doxyfile b/doc/Doxyfile
index 54e8ce86..78b5b418 100644
--- a/doc/Doxyfile
+++ b/doc/Doxyfile
@@ -1,4 +1,4 @@
-# Doxyfile 1.9.6
+# Doxyfile 1.10.0
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
@@ -63,6 +63,12 @@ PROJECT_BRIEF =
PROJECT_LOGO =
+# With the PROJECT_ICON tag one can specify an icon that is included in the tabs
+# when the HTML document is shown. Doxygen will copy the logo to the output
+# directory.
+
+PROJECT_ICON =
+
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
# entered, it will be relative to the location where doxygen was started. If
@@ -363,6 +369,17 @@ MARKDOWN_SUPPORT = YES
TOC_INCLUDE_HEADINGS = 0
+# The MARKDOWN_ID_STYLE tag can be used to specify the algorithm used to
+# generate identifiers for the Markdown headings. Note: Every identifier is
+# unique.
+# Possible values are: DOXYGEN use a fixed 'autotoc_md' string followed by a
+# sequence number starting at 0 and GITHUB use the lower case version of title
+# with any whitespace replaced by '-' and punctuation characters removed.
+# The default value is: DOXYGEN.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+MARKDOWN_ID_STYLE = DOXYGEN
+
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by putting a % sign in front of the word or
@@ -487,6 +504,14 @@ LOOKUP_CACHE_SIZE = 0
NUM_PROC_THREADS = 1
+# If the TIMESTAMP tag is set different from NO then each generated page will
+# contain the date or date and time when the page was generated. Setting this to
+# NO can help when comparing the output of multiple runs.
+# Possible values are: YES, NO, DATETIME and DATE.
+# The default value is: NO.
+
+TIMESTAMP = NO
+
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
@@ -872,7 +897,14 @@ WARN_IF_UNDOC_ENUM_VAL = NO
# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
# at the end of the doxygen process doxygen will return with a non-zero status.
-# Possible values are: NO, YES and FAIL_ON_WARNINGS.
+# If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS_PRINT then doxygen behaves
+# like FAIL_ON_WARNINGS but in case no WARN_LOGFILE is defined doxygen will not
+# write the warning messages in between other messages but write them at the end
+# of a run, in case a WARN_LOGFILE is defined the warning messages will be
+# besides being in the defined file also be shown at the end of a run, unless
+# the WARN_LOGFILE is defined as - i.e. standard output (stdout) in that case
+# the behavior will remain as with the setting FAIL_ON_WARNINGS.
+# Possible values are: NO, YES, FAIL_ON_WARNINGS and FAIL_ON_WARNINGS_PRINT.
# The default value is: NO.
WARN_AS_ERROR = NO
@@ -926,7 +958,9 @@ INPUT = ../src \
../src/SpiDriver \
mainpage.h \
../src/FsLib \
- ../src/FsLib
+ ../src/FsLib \
+ ../src/SdCard/TeensySdio \
+ ../src/SdCard/Rp2040Sdio
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
@@ -959,12 +993,12 @@ INPUT_FILE_ENCODING =
# Note the list of default checked file patterns might differ from the list of
# default file extension mappings.
#
-# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
-# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
-# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml,
-# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C
-# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd,
-# *.vhdl, *.ucf, *.qsf and *.ice.
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cxxm,
+# *.cpp, *.cppm, *.ccm, *.c++, *.c++m, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl,
+# *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, *.h++, *.ixx, *.l, *.cs, *.d,
+# *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to
+# be provided as doxygen C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
+# *.f18, *.f, *.for, *.vhd, *.vhdl, *.ucf, *.qsf and *.ice.
FILE_PATTERNS = *.c \
*.cc \
@@ -1016,7 +1050,9 @@ EXCLUDE = ../src/common/FsStructs.h \
../src/common/PrintBasic.h \
../src/common/PrintBasic.cpp \
../src/SpiDriver/SdSpiBareUnoDriver.h \
- ../src/iostream/StreamBaseClass.cpp
+ ../src/iostream/StreamBaseClass.cpp \
+ ../src/SdCard/Rp2040Sdio/DbgLog.h \
+ ../src/common/FsStructs.h
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
@@ -1039,9 +1075,6 @@ EXCLUDE_PATTERNS =
# output. The symbol name can be a fully qualified name, a word, or if the
# wildcard * is used, a substring. Examples: ANamespace, AClass,
# ANamespace::AClass, ANamespace::*Test
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories use the pattern */test/*
EXCLUDE_SYMBOLS =
@@ -1155,7 +1188,8 @@ FORTRAN_COMMENT_AFTER = 72
SOURCE_BROWSER = NO
# Setting the INLINE_SOURCES tag to YES will include the body of functions,
-# classes and enums directly into the documentation.
+# multi-line macros, enums or list initialized variables directly into the
+# documentation.
# The default value is: NO.
INLINE_SOURCES = NO
@@ -1424,15 +1458,6 @@ HTML_COLORSTYLE_SAT = 100
HTML_COLORSTYLE_GAMMA = 80
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting this
-# to YES can help to show when doxygen was last run and thus if the
-# documentation is up to date.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_TIMESTAMP = YES
-
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
# are dynamically created via JavaScript. If disabled, the navigation index will
@@ -1452,6 +1477,33 @@ HTML_DYNAMIC_MENUS = YES
HTML_DYNAMIC_SECTIONS = NO
+# If the HTML_CODE_FOLDING tag is set to YES then classes and functions can be
+# dynamically folded and expanded in the generated HTML source code.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_CODE_FOLDING = YES
+
+# If the HTML_COPY_CLIPBOARD tag is set to YES then doxygen will show an icon in
+# the top right corner of code and text fragments that allows the user to copy
+# its content to the clipboard. Note this only works if supported by the browser
+# and the web page is served via a secure context (see:
+# https://www.w3.org/TR/secure-contexts/), i.e. using the https: or file:
+# protocol.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COPY_CLIPBOARD = YES
+
+# Doxygen stores a couple of settings persistently in the browser (via e.g.
+# cookies). By default these settings apply to all HTML pages generated by
+# doxygen across all projects. The HTML_PROJECT_COOKIE tag can be used to store
+# the settings under a project specific key, such that the user preferences will
+# be stored separately.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_PROJECT_COOKIE =
+
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
# shown in the various tree structured indices initially; the user can expand
# and collapse entries dynamically later on. Doxygen will expand the tree to
@@ -1582,6 +1634,16 @@ BINARY_TOC = NO
TOC_EXPAND = NO
+# The SITEMAP_URL tag is used to specify the full URL of the place where the
+# generated documentation will be placed on the server by the user during the
+# deployment of the documentation. The generated sitemap is called sitemap.xml
+# and placed on the directory specified by HTML_OUTPUT. In case no SITEMAP_URL
+# is specified no sitemap is generated. For information about the sitemap
+# protocol see https://www.sitemaps.org
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SITEMAP_URL =
+
# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
@@ -2070,9 +2132,16 @@ PDF_HYPERLINKS = YES
USE_PDFLATEX = YES
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
-# command to the generated LaTeX files. This will instruct LaTeX to keep running
-# if errors occur, instead of asking the user for help.
+# The LATEX_BATCHMODE tag signals the behavior of LaTeX in case of an error.
+# Possible values are: NO same as ERROR_STOP, YES same as BATCH, BATCH In batch
+# mode nothing is printed on the terminal, errors are scrolled as if is
+# hit at every error; missing files that TeX tries to input or request from
+# keyboard input (\read on a not open input stream) cause the job to abort,
+# NON_STOP In nonstop mode the diagnostic message will appear on the terminal,
+# but there is no possibility of user interaction just like in batch mode,
+# SCROLL In scroll mode, TeX will stop only for missing files to input or if
+# keyboard input is necessary and ERROR_STOP In errorstop mode, TeX will stop at
+# each error, asking for user intervention.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
@@ -2093,14 +2162,6 @@ LATEX_HIDE_INDICES = NO
LATEX_BIB_STYLE = plain
-# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
-# page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_TIMESTAMP = NO
-
# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
# path from which the emoji images will be read. If a relative path is entered,
# it will be relative to the LATEX_OUTPUT directory. If left blank the
@@ -2266,7 +2327,7 @@ DOCBOOK_OUTPUT = docbook
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
-# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
+# AutoGen Definitions (see https://autogen.sourceforge.net/) file that captures
# the structure of the code including all documentation. Note that this feature
# is still experimental and incomplete at the moment.
# The default value is: NO.
@@ -2277,6 +2338,28 @@ GENERATE_AUTOGEN_DEF = NO
# Configuration options related to Sqlite3 output
#---------------------------------------------------------------------------
+# If the GENERATE_SQLITE3 tag is set to YES doxygen will generate a Sqlite3
+# database with symbols found by doxygen stored in tables.
+# The default value is: NO.
+
+GENERATE_SQLITE3 = NO
+
+# The SQLITE3_OUTPUT tag is used to specify where the Sqlite3 database will be
+# put. If a relative path is entered the value of OUTPUT_DIRECTORY will be put
+# in front of it.
+# The default directory is: sqlite3.
+# This tag requires that the tag GENERATE_SQLITE3 is set to YES.
+
+SQLITE3_OUTPUT = sqlite3
+
+# The SQLITE3_RECREATE_DB tag is set to YES, the existing doxygen_sqlite3.db
+# database file will be recreated with each doxygen run. If set to NO, doxygen
+# will warn if a database file is already found and not modify it.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_SQLITE3 is set to YES.
+
+SQLITE3_RECREATE_DB = YES
+
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
@@ -2423,15 +2506,15 @@ TAGFILES =
GENERATE_TAGFILE =
-# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
-# the class index. If set to NO, only the inherited external classes will be
-# listed.
+# If the ALLEXTERNALS tag is set to YES, all external classes and namespaces
+# will be listed in the class and namespace index. If set to NO, only the
+# inherited external classes will be listed.
# The default value is: NO.
ALLEXTERNALS = NO
# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will be
+# in the topic index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
@@ -2445,16 +2528,9 @@ EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
+# Configuration options related to diagram generator tools
#---------------------------------------------------------------------------
-# You can include diagrams made with dia in doxygen documentation. Doxygen will
-# then run dia to produce the diagram and insert it in the documentation. The
-# DIA_PATH tag allows you to specify the directory where the dia binary resides.
-# If left empty dia is assumed to be found in the default search path.
-
-DIA_PATH =
-
# If set to YES the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
@@ -2463,7 +2539,7 @@ HIDE_UNDOC_RELATIONS = YES
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz (see:
-# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# https://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: NO.
@@ -2516,13 +2592,19 @@ DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4"
DOT_FONTPATH =
-# If the CLASS_GRAPH tag is set to YES (or GRAPH) then doxygen will generate a
-# graph for each documented class showing the direct and indirect inheritance
-# relations. In case HAVE_DOT is set as well dot will be used to draw the graph,
-# otherwise the built-in generator will be used. If the CLASS_GRAPH tag is set
-# to TEXT the direct and indirect inheritance relations will be shown as texts /
-# links.
-# Possible values are: NO, YES, TEXT and GRAPH.
+# If the CLASS_GRAPH tag is set to YES or GRAPH or BUILTIN then doxygen will
+# generate a graph for each documented class showing the direct and indirect
+# inheritance relations. In case the CLASS_GRAPH tag is set to YES or GRAPH and
+# HAVE_DOT is enabled as well, then dot will be used to draw the graph. In case
+# the CLASS_GRAPH tag is set to YES and HAVE_DOT is disabled or if the
+# CLASS_GRAPH tag is set to BUILTIN, then the built-in generator will be used.
+# If the CLASS_GRAPH tag is set to TEXT the direct and indirect inheritance
+# relations will be shown as texts / links. Explicit enabling an inheritance
+# graph or choosing a different representation for an inheritance graph of a
+# specific class, can be accomplished by means of the command \inheritancegraph.
+# Disabling an inheritance graph can be accomplished by means of the command
+# \hideinheritancegraph.
+# Possible values are: NO, YES, TEXT, GRAPH and BUILTIN.
# The default value is: YES.
CLASS_GRAPH = YES
@@ -2530,15 +2612,21 @@ CLASS_GRAPH = YES
# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
# graph for each documented class showing the direct and indirect implementation
# dependencies (inheritance, containment, and class references variables) of the
-# class with other documented classes.
+# class with other documented classes. Explicit enabling a collaboration graph,
+# when COLLABORATION_GRAPH is set to NO, can be accomplished by means of the
+# command \collaborationgraph. Disabling a collaboration graph can be
+# accomplished by means of the command \hidecollaborationgraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
COLLABORATION_GRAPH = YES
# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
-# groups, showing the direct groups dependencies. See also the chapter Grouping
-# in the manual.
+# groups, showing the direct groups dependencies. Explicit enabling a group
+# dependency graph, when GROUP_GRAPHS is set to NO, can be accomplished by means
+# of the command \groupgraph. Disabling a directory graph can be accomplished by
+# means of the command \hidegroupgraph. See also the chapter Grouping in the
+# manual.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2580,8 +2668,8 @@ DOT_UML_DETAILS = NO
# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters
# to display on a single line. If the actual line length exceeds this threshold
-# significantly it will wrapped across multiple lines. Some heuristics are apply
-# to avoid ugly line breaks.
+# significantly it will be wrapped across multiple lines. Some heuristics are
+# applied to avoid ugly line breaks.
# Minimum value: 0, maximum value: 1000, default value: 17.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2598,7 +2686,9 @@ TEMPLATE_RELATIONS = NO
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
# direct and indirect include dependencies of the file with other documented
-# files.
+# files. Explicit enabling an include graph, when INCLUDE_GRAPH is is set to NO,
+# can be accomplished by means of the command \includegraph. Disabling an
+# include graph can be accomplished by means of the command \hideincludegraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2607,7 +2697,10 @@ INCLUDE_GRAPH = YES
# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
# set to YES then doxygen will generate a graph for each documented file showing
# the direct and indirect include dependencies of the file with other documented
-# files.
+# files. Explicit enabling an included by graph, when INCLUDED_BY_GRAPH is set
+# to NO, can be accomplished by means of the command \includedbygraph. Disabling
+# an included by graph can be accomplished by means of the command
+# \hideincludedbygraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2647,7 +2740,10 @@ GRAPHICAL_HIERARCHY = YES
# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
# dependencies a directory has on other directories in a graphical way. The
# dependency relations are determined by the #include relations between the
-# files in the directories.
+# files in the directories. Explicit enabling a directory graph, when
+# DIRECTORY_GRAPH is set to NO, can be accomplished by means of the command
+# \directorygraph. Disabling a directory graph can be accomplished by means of
+# the command \hidedirectorygraph.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2663,7 +2759,7 @@ DIR_GRAPH_MAX_DEPTH = 1
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. For an explanation of the image formats see the section
# output formats in the documentation of the dot tool (Graphviz (see:
-# http://www.graphviz.org/)).
+# https://www.graphviz.org/)).
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
@@ -2700,11 +2796,12 @@ DOT_PATH =
DOTFILE_DIRS =
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the \mscfile
-# command).
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
-MSCFILE_DIRS =
+DIA_PATH =
# The DIAFILE_DIRS tag can be used to specify one or more directories that
# contain dia files that are included in the documentation (see the \diafile
@@ -2781,3 +2878,19 @@ GENERATE_LEGEND = YES
# The default value is: YES.
DOT_CLEANUP = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. If the MSCGEN_TOOL tag is left empty (the default), then doxygen will
+# use a built-in version of mscgen tool to produce the charts. Alternatively,
+# the MSCGEN_TOOL tag can also specify the name an external tool. For instance,
+# specifying prog as the value, doxygen will call the tool as prog -T
+# -o . The external tool should support
+# output file formats "png", "eps", "svg", and "ismap".
+
+MSCGEN_TOOL =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS =
diff --git a/doc/SdErrorCodes.txt b/doc/SdErrorCodes.txt
index 91802597..29cc64ee 100644
--- a/doc/SdErrorCodes.txt
+++ b/doc/SdErrorCodes.txt
@@ -1,4 +1,4 @@
-2022-07-01
+2025-01-01
Run the SdErrorCode example to produce an updated list.
@@ -12,7 +12,7 @@ Code,Symbol - failed operation
0X06,SD_CARD_ERROR_CMD8 - Send and check interface settings
0X07,SD_CARD_ERROR_CMD9 - Read CSD data
0X08,SD_CARD_ERROR_CMD10 - Read CID data
-0X09,SD_CARD_ERROR_CMD12 - Stop multiple block read
+0X09,SD_CARD_ERROR_CMD12 - Stop multiple block transmission
0X0A,SD_CARD_ERROR_CMD13 - Read card status
0X0B,SD_CARD_ERROR_CMD17 - Read single block
0X0C,SD_CARD_ERROR_CMD18 - Read multiple blocks
diff --git a/doc/html.zip b/doc/html.zip
index efbd3841..ee7cbc21 100644
Binary files a/doc/html.zip and b/doc/html.zip differ
diff --git a/examples/AvrAdcLogger/AvrAdcLogger.ino b/examples/AvrAdcLogger/AvrAdcLogger.ino
index 6c70af8b..03200658 100644
--- a/examples/AvrAdcLogger/AvrAdcLogger.ino
+++ b/examples/AvrAdcLogger/AvrAdcLogger.ino
@@ -20,11 +20,10 @@
*/
#ifdef __AVR__
#include
-
+#include "SdFat.h"
#include "AvrAdcLogger.h"
#include "BufferedPrint.h"
#include "FreeStack.h"
-#include "SdFat.h"
// Save SRAM if 328.
#ifdef __AVR_ATmega328P__
diff --git a/examples/BufferedPrint/BufferedPrint.ino b/examples/BufferedPrint/BufferedPrint.ino
index 5a924eb6..a87ca799 100644
--- a/examples/BufferedPrint/BufferedPrint.ino
+++ b/examples/BufferedPrint/BufferedPrint.ino
@@ -1,9 +1,9 @@
// Test and benchmark of the fast bufferedPrint class.
//
// Mainly for AVR but may improve print performance with other CPUs.
-#include "BufferedPrint.h"
+#define DISABLE_FS_H_WARNING // Disable warning for type File not defined.
#include "SdFat.h"
-
+#include "BufferedPrint.h"
// SD_FAT_TYPE = 0 for SdFat/File as defined in SdFatConfig.h,
// 1 for FAT16/FAT32, 2 for exFAT, 3 for FAT16/FAT32 and exFAT.
#define SD_FAT_TYPE 3
@@ -28,13 +28,16 @@ const uint8_t SD_CS_PIN = SDCARD_SS_PIN;
#define SPI_CLOCK SD_SCK_MHZ(50)
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
+#elif defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+// See the Rp2040SdioSetup example for RP2040/RP2350 boards.
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SPI_CLOCK)
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SPI_CLOCK)
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
#if SD_FAT_TYPE == 0
SdFat sd;
diff --git a/examples/DirectoryFunctions/DirectoryFunctions.ino b/examples/DirectoryFunctions/DirectoryFunctions.ino
index 498ce513..9ab4db70 100644
--- a/examples/DirectoryFunctions/DirectoryFunctions.ino
+++ b/examples/DirectoryFunctions/DirectoryFunctions.ino
@@ -1,6 +1,7 @@
/*
* Example use of chdir(), ls(), mkdir(), and rmdir().
*/
+#define DISABLE_FS_H_WARNING // Disable warning for type File not defined.
#include "SdFat.h"
#include "sdios.h"
@@ -28,13 +29,16 @@ const uint8_t SD_CS_PIN = SDCARD_SS_PIN;
#define SPI_CLOCK SD_SCK_MHZ(50)
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
+#elif defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+// See the Rp2040SdioSetup example for RP2040/RP2350 boards.
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SPI_CLOCK)
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SPI_CLOCK)
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
//------------------------------------------------------------------------------
#if SD_FAT_TYPE == 0
diff --git a/examples/ExFatLogger/ExFatLogger.ino b/examples/ExFatLogger/ExFatLogger.ino
index f3a6c55d..9062df60 100644
--- a/examples/ExFatLogger/ExFatLogger.ino
+++ b/examples/ExFatLogger/ExFatLogger.ino
@@ -3,9 +3,10 @@
//
// The maximum data rate will depend on the quality of your SD,
// the size of the FIFO, and using dedicated SPI.
+#define DISABLE_FS_H_WARNING // Disable warning for type File not defined.
+#include "SdFat.h"
#include "ExFatLogger.h"
#include "FreeStack.h"
-#include "SdFat.h"
//------------------------------------------------------------------------------
// This example was designed for exFAT but will support FAT16/FAT32.
// Note: Uno will not support SD_FAT_TYPE = 3.
@@ -69,13 +70,16 @@ const uint32_t PREALLOCATE_SIZE_MiB = 1024UL;
#define SPI_CLOCK SD_SCK_MHZ(50)
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
+#elif defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+// See the Rp2040SdioSetup example for RP2040/RP2350 boards.
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SPI_CLOCK)
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SPI_CLOCK)
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
// Save SRAM if 328.
#ifdef __AVR_ATmega328P__
@@ -92,7 +96,7 @@ void logRecord(data_t* data, uint16_t overrun) {
data->adc[0] = 0X8000 | overrun;
} else {
for (size_t i = 0; i < ADC_COUNT; i++) {
- data->adc[i] = analogRead(i);
+ data->adc[i] = analogRead(A0 + i);
}
}
}
diff --git a/examples/OpenNext/OpenNext.ino b/examples/OpenNext/OpenNext.ino
index 2e58110d..a2ff1242 100644
--- a/examples/OpenNext/OpenNext.ino
+++ b/examples/OpenNext/OpenNext.ino
@@ -1,6 +1,7 @@
/*
* Print size, modify date/time, and name for all files in root.
*/
+#define DISABLE_FS_H_WARNING // Disable warning for type File not defined.
#include "SdFat.h"
// SD_FAT_TYPE = 0 for SdFat/File as defined in SdFatConfig.h,
@@ -27,13 +28,16 @@ const uint8_t SD_CS_PIN = SDCARD_SS_PIN;
#define SPI_CLOCK SD_SCK_MHZ(50)
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
+#elif defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+// See the Rp2040SdioSetup example for RP2040/RP2350 boards.
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SPI_CLOCK)
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SPI_CLOCK)
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
#if SD_FAT_TYPE == 0
SdFat sd;
diff --git a/examples/ReadCsvFile/ReadCsvFile.ino b/examples/ReadCsvFile/ReadCsvFile.ino
index 7fb61cad..b6a85c1a 100644
--- a/examples/ReadCsvFile/ReadCsvFile.ino
+++ b/examples/ReadCsvFile/ReadCsvFile.ino
@@ -1,3 +1,4 @@
+#define DISABLE_FS_H_WARNING // Disable warning for type File not defined.
#include "SdFat.h"
// SD_FAT_TYPE = 0 for SdFat/File as defined in SdFatConfig.h,
@@ -24,13 +25,16 @@ const uint8_t SD_CS_PIN = SDCARD_SS_PIN;
#define SPI_CLOCK SD_SCK_MHZ(50)
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
+#elif defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+// See the Rp2040SdioSetup example for RP2040/RP2350 boards.
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SPI_CLOCK)
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SPI_CLOCK)
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
#if SD_FAT_TYPE == 0
SdFat sd;
@@ -126,7 +130,7 @@ void setup() {
if (!file.open("ReadCsvDemo.csv", FILE_WRITE)) {
error("open failed");
}
- // Write test data.
+ // Write test data. Test missing CRLF on last line.
file.print(
F("abc,123,456,7.89\r\n"
"def,-321,654,-9.87\r\n"
@@ -143,6 +147,10 @@ void setup() {
if (line[n - 1] != '\n' && n == (sizeof(line) - 1)) {
error("line too long");
}
+ if (line[n - 1] == '\n') {
+ // Remove new line.
+ line[n -1] = 0;
+ }
if (!parseLine(line)) {
error("parseLine failed");
}
diff --git a/examples/Rp2040SdioSetup/Rp2040SdioSetup.ino b/examples/Rp2040SdioSetup/Rp2040SdioSetup.ino
new file mode 100644
index 00000000..468af71d
--- /dev/null
+++ b/examples/Rp2040SdioSetup/Rp2040SdioSetup.ino
@@ -0,0 +1,93 @@
+// RP2040 PIO SDIO setup and test.
+/*
+This example requires a SDIO Card socket with the following six lines.
+
+CLK - A clock signal sent to the card by the MCU.
+CMD - A bidirectional line for for commands and responses.
+DAT[0:3] - Four bidirectional lines for data transfer.
+
+CLK and CMD can be connected to any GPIO pins. DAT[0:3] can be connected
+to any four consecutive GPIO pins in the order DAT0, DAT1, DAT2, DAT3.
+
+For testing, I use several RP2040/RP3350 boards.
+The Adafruit Metro RP2040 which has a builtin SDIO socket.
+
+https://learn.adafruit.com/adafruit-metro-rp2040
+
+I use this SD socket breakout board for other boards.
+
+https://learn.adafruit.com/adafruit-microsd-spi-sdio
+
+Wires should be short since signals can be as faster than 50 MHz.
+*/
+#define DISABLE_FS_H_WARNING // Disable warning for type File not defined.
+#include "SdFat.h"
+//------------------------------------------------------------------------------
+// Example GPIO definitions I use for debug. Edit for your setup.
+// Run this example as is to print the symbol for your variant.
+//
+#if defined(ARDUINO_ADAFRUIT_METRO_RP2040)
+#define RP_CLK_GPIO 18
+#define RP_CMD_GPIO 19
+#define RP_DAT0_GPIO 20 // DAT1: GPIO21, DAT2: GPIO22, DAT3: GPIO23.
+#elif defined(ARDUINO_RASPBERRY_PI_PICO) || defined(ARDUINO_RASPBERRY_PI_PICO_2)
+#define RP_CLK_GPIO 16
+#define RP_CMD_GPIO 17
+#define RP_DAT0_GPIO 18 // DAT1: GPIO19, DAT2: GPIO20, DAT3: GPIO21.
+#elif defined(ARDUINO_ADAFRUIT_FEATHER_RP2350_HSTX)
+#define RP_CLK_GPIO 11
+#define RP_CMD_GPIO 10
+#define RP_DAT0_GPIO 22 // DAT1: GPIO23, DAT2: GPIO24, DAT3: GPIO25.
+#endif // defined(ARDUINO_ADAFRUIT_METRO_RP2040))
+
+#if defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
+#else // defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+#warning "Undefined SD_CONFIG. Run this program for the Variant Symbol."
+#endif // defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+//------------------------------------------------------------------------------
+// Class File is not defined by SdFat since the RP2040 system defines it.
+// 1 for FAT16/FAT32, 2 for exFAT, 3 for FAT16/FAT32 and exFAT.
+#define SD_FAT_TYPE 3
+
+#if SD_FAT_TYPE == 1
+SdFat32 sd;
+File32 file;
+#elif SD_FAT_TYPE == 2
+SdExFat sd;
+ExFile file;
+#elif SD_FAT_TYPE == 3
+SdFs sd;
+FsFile file;
+#else // SD_FAT_TYPE
+#error Invalid SD_FAT_TYPE
+#endif // SD_FAT_TYPE
+
+void setup() {
+ Serial.begin(9600);
+ while (!Serial) {
+ yield();
+ }
+ Serial.println("Type any character to start\n");
+ while (!Serial.available()) {
+ yield();
+ }
+ Serial.print("Variant Symbol: ");
+ Serial.print("ARDUINO_");
+ Serial.println(BOARD_NAME);
+ Serial.println();
+#if defined(SD_CONFIG)
+ if (!sd.begin(SD_CONFIG)) {
+ sd.initErrorHalt(&Serial);
+ }
+ Serial.println("Card successfully initialized.");
+ Serial.println("\nls:");
+ sd.ls(LS_A | LS_DATE | LS_SIZE); // Add LS_R for recursive list.
+ Serial.println("\nDone! Try the bench example next.");
+#else // #if defined(SD_CONFIG)
+ Serial.println("Error: SD_CONFIG undefined for your board.");
+ Serial.println("Define RP_CLK_GPIO, RP_CMD_GPIO, and RP_DAT0_GPIO above.");
+#endif
+}
+
+void loop() {}
diff --git a/examples/RtcTimestampTest/RtcTimestampTest.ino b/examples/RtcTimestampTest/RtcTimestampTest.ino
index 3e5e5a74..c9796d2d 100644
--- a/examples/RtcTimestampTest/RtcTimestampTest.ino
+++ b/examples/RtcTimestampTest/RtcTimestampTest.ino
@@ -35,13 +35,16 @@ const uint8_t SD_CS_PIN = SDCARD_SS_PIN;
#define SPI_CLOCK SD_SCK_MHZ(50)
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
+#elif defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+// See the Rp2040SdioSetup example for RP2040/RP2350 boards.
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SPI_CLOCK)
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SPI_CLOCK)
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
#if SD_FAT_TYPE == 0
SdFat sd;
diff --git a/examples/SdFormatter/SdFormatter.ino b/examples/SdFormatter/SdFormatter.ino
index 6cfd37be..1bc5a605 100644
--- a/examples/SdFormatter/SdFormatter.ino
+++ b/examples/SdFormatter/SdFormatter.ino
@@ -10,6 +10,7 @@
* For very small cards this program uses FAT16
* and the above SDFormatter uses FAT12.
*/
+#define DISABLE_FS_H_WARNING // Disable warning for type File not defined.
#include "SdFat.h"
#include "sdios.h"
@@ -40,13 +41,16 @@ const uint8_t SD_CS_PIN = SDCARD_SS_PIN;
#define SPI_CLOCK SD_SCK_MHZ(50)
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
+#elif defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+// See the Rp2040SdioSetup example for RP2040/RP2350 boards.
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SPI_CLOCK)
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SPI_CLOCK)
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
//==============================================================================
// Serial output stream
ArduinoOutStream cout(Serial);
diff --git a/examples/SdInfo/SdInfo.ino b/examples/SdInfo/SdInfo.ino
index a2c31623..5d04570d 100644
--- a/examples/SdInfo/SdInfo.ino
+++ b/examples/SdInfo/SdInfo.ino
@@ -6,6 +6,7 @@
* https://gurumeditation.org/1342/sd-memory-card-register-decoder/
* https://archive.goughlui.com/static/multicid.htm
*/
+#define DISABLE_FS_H_WARNING // Disable warning for type File not defined.
#include "SdFat.h"
#include "sdios.h"
/*
@@ -30,13 +31,16 @@ const uint8_t SD_CS_PIN = SDCARD_SS_PIN;
#endif // SDCARD_SS_PIN
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
+#elif defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+// See the Rp2040SdioSetup example for RP2040/RP2350 boards.
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SD_SCK_MHZ(16))
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SD_SCK_MHZ(16))
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
//------------------------------------------------------------------------------
SdFs sd;
diff --git a/examples/TeensyDmaAdcLogger/TeensyDmaAdcLogger.ino b/examples/TeensyDmaAdcLogger/TeensyDmaAdcLogger.ino
index ad491f4a..7bda5309 100644
--- a/examples/TeensyDmaAdcLogger/TeensyDmaAdcLogger.ino
+++ b/examples/TeensyDmaAdcLogger/TeensyDmaAdcLogger.ino
@@ -6,9 +6,9 @@
//
#include "ADC.h"
#include "DMAChannel.h"
+#include "SdFat.h"
#include "FreeStack.h"
#include "RingBuf.h"
-#include "SdFat.h"
// Pin must be on first ADC.
#define ADC_PIN A0
diff --git a/examples/TeensyRtcTimestamp/TeensyRtcTimestamp.ino b/examples/TeensyRtcTimestamp/TeensyRtcTimestamp.ino
index 496c1d02..babbf1af 100644
--- a/examples/TeensyRtcTimestamp/TeensyRtcTimestamp.ino
+++ b/examples/TeensyRtcTimestamp/TeensyRtcTimestamp.ino
@@ -29,13 +29,13 @@ const uint8_t SD_CS_PIN = SDCARD_SS_PIN;
#define SPI_CLOCK SD_SCK_MHZ(50)
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SPI_CLOCK)
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SPI_CLOCK)
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
#if SD_FAT_TYPE == 0
SdFat sd;
diff --git a/examples/TeensySdioLogger/TeensySdioLogger.ino b/examples/TeensySdioLogger/TeensySdioLogger.ino
index 36a51b93..912ea01f 100644
--- a/examples/TeensySdioLogger/TeensySdioLogger.ino
+++ b/examples/TeensySdioLogger/TeensySdioLogger.ino
@@ -5,9 +5,8 @@
// puts the controller in write mode and takes about 11 usec on a
// Teensy 4.1. About 5 usec is required to write a sector when the
// controller is in write mode.
-
-#include "RingBuf.h"
#include "SdFat.h"
+#include "RingBuf.h"
// Use Teensy SDIO
#define SD_CONFIG SdioConfig(FIFO_SDIO)
diff --git a/examples/UnicodeFilenames/UnicodeFilenames.ino b/examples/UnicodeFilenames/UnicodeFilenames.ino
index a1524070..6f1ee90e 100644
--- a/examples/UnicodeFilenames/UnicodeFilenames.ino
+++ b/examples/UnicodeFilenames/UnicodeFilenames.ino
@@ -1,7 +1,16 @@
// Simple test of Unicode filename.
// Unicode is supported as UTF-8 encoded strings.
+#define DISABLE_FS_H_WARNING // Disable warning for type File not defined.
#include "SdFat.h"
+// SD_FAT_TYPE = 0 for SdFat/File as defined in SdFatConfig.h,
+// 1 for FAT16/FAT32, 2 for exFAT, 3 for FAT16/FAT32 and exFAT.
+#if defined __has_include
+#if __has_include()
+#define SD_FAT_TYPE 3 // Can't use SdFat/File
+#endif // __has_include()
+#endif // defined __has_include
+
// USE_UTF8_LONG_NAMES must be non-zero in SdFat/src/SdFatCongfig.h
#if USE_UTF8_LONG_NAMES
@@ -11,10 +20,6 @@ const char* names[] = {u8"россиянин", u8"très élégant", u8"狗.txt",
// Remove files if non-zero.
#define REMOVE_UTF8_FILES 1
-// SD_FAT_TYPE = 0 for SdFat/File as defined in SdFatConfig.h,
-// 1 for FAT16/FAT32, 2 for exFAT, 3 for FAT16/FAT32 and exFAT.
-#define SD_FAT_TYPE 0
-
// SDCARD_SS_PIN is defined for the built-in SD on some boards.
#ifndef SDCARD_SS_PIN
const uint8_t SD_CS_PIN = SS;
@@ -24,13 +29,16 @@ const uint8_t SD_CS_PIN = SDCARD_SS_PIN;
#endif // SDCARD_SS_PIN
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
+#elif defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+// See the Rp2040SdioSetup example for RP2040/RP2350 boards.
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SD_SCK_MHZ(16))
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SD_SCK_MHZ(16))
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
#if SD_FAT_TYPE == 0
SdFat sd;
diff --git a/examples/bench/bench.ino b/examples/bench/bench.ino
index 36a577c5..1c646f67 100644
--- a/examples/bench/bench.ino
+++ b/examples/bench/bench.ino
@@ -1,13 +1,22 @@
/*
* This program is a simple binary write/read benchmark.
*/
+#define DISABLE_FS_H_WARNING // Disable warning for type File not defined.
#include "SdFat.h"
#include "FreeStack.h"
#include "sdios.h"
// SD_FAT_TYPE = 0 for SdFat/File as defined in SdFatConfig.h,
// 1 for FAT16/FAT32, 2 for exFAT, 3 for FAT16/FAT32 and exFAT.
-#define SD_FAT_TYPE 3
+#if defined __has_include
+#if __has_include()
+#define SD_FAT_TYPE 3 // Can't use SdFat/File
+#endif // __has_include()
+#endif // defined __has_include
+
+#ifndef SD_FAT_TYPE
+#define SD_FAT_TYPE 0 // Use SdFat/File
+#endif // SD_FAT_TYPE
/*
Change the value of SD_CS_PIN if you are using SPI and
your hardware does not use the default value, SS.
@@ -27,14 +36,24 @@ const uint8_t SD_CS_PIN = SDCARD_SS_PIN;
// Try max SPI clock for an SD. Reduce SPI_CLOCK if errors occur.
#define SPI_CLOCK SD_SCK_MHZ(50)
+// Example SDIO definition for RP2040/RP2350. See the Rp2040SdioSetup example.
+#if defined(ARDUINO_ADAFRUIT_METRO_RP2040) && !defined(RP_CLK_GPIO)
+#define RP_CLK_GPIO 18
+#define RP_CMD_GPIO 19
+#define RP_DAT0_GPIO 20 // DAT1: GPIO21, DAT2: GPIO22, DAT3: GPIO23.
+#endif // defined(ARDUINO_ADAFRUIT_METRO_RP2040)
+
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
+#elif defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+// See the Rp2040SdioSetup example for RP2040/RP2350 boards.
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SPI_CLOCK)
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SPI_CLOCK)
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
// Set PRE_ALLOCATE true to pre-allocate file clusters.
const bool PRE_ALLOCATE = true;
@@ -128,6 +147,11 @@ void setup() {
"\nSet ENABLE_DEDICATED_SPI nonzero in\n"
"SdFatConfig.h for best SPI performance.\n");
}
+ if (!SD_HAS_CUSTOM_SPI && !USE_SPI_ARRAY_TRANSFER && isSpi(SD_CONFIG)) {
+ cout << F(
+ "\nSetting USE_SPI_ARRAY_TRANSFER nonzero in\n"
+ "SdFatConfig.h may improve SPI performance.\n");
+ }
// use uppercase in hex and use 0X base prefix
cout << uppercase << showbase << endl;
}
diff --git a/examples/rename/rename.ino b/examples/rename/rename.ino
index 3995fce5..aa9fedcc 100644
--- a/examples/rename/rename.ino
+++ b/examples/rename/rename.ino
@@ -1,6 +1,7 @@
/*
* This program demonstrates use of rename().
*/
+ #define DISABLE_FS_H_WARNING // Disable warning for type File not defined.
#include "SdFat.h"
#include "sdios.h"
@@ -29,13 +30,16 @@ const uint8_t SD_CS_PIN = SDCARD_SS_PIN;
#define SPI_CLOCK SD_SCK_MHZ(50)
// Try to select the best SD card configuration.
-#if HAS_SDIO_CLASS
+#if defined(HAS_TEENSY_SDIO)
#define SD_CONFIG SdioConfig(FIFO_SDIO)
+#elif defined(RP_CLK_GPIO) && defined(RP_CMD_GPIO) && defined(RP_DAT0_GPIO)
+// See the Rp2040SdioSetup example for RP2040/RP2350 boards.
+#define SD_CONFIG SdioConfig(RP_CLK_GPIO, RP_CMD_GPIO, RP_DAT0_GPIO)
#elif ENABLE_DEDICATED_SPI
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, DEDICATED_SPI, SPI_CLOCK)
-#else // HAS_SDIO_CLASS
+#else // HAS_TEENSY_SDIO
#define SD_CONFIG SdSpiConfig(SD_CS_PIN, SHARED_SPI, SPI_CLOCK)
-#endif // HAS_SDIO_CLASS
+#endif // HAS_TEENSY_SDIO
#if SD_FAT_TYPE == 0
SdFat sd;
diff --git a/extras/cpplint.bat b/extras/cpplint.bat
index f874f45f..3246259e 100644
--- a/extras/cpplint.bat
+++ b/extras/cpplint.bat
@@ -1,2 +1,2 @@
-sh cpplint.sh
+bash cpplint.sh
pause
\ No newline at end of file
diff --git a/extras/cpplint.py b/extras/cpplint.py
index 704618f5..9d4ff5af 100644
--- a/extras/cpplint.py
+++ b/extras/cpplint.py
@@ -42,30 +42,43 @@
"""
import codecs
+import collections
import copy
import getopt
+import glob
+import itertools
import math # for log
import os
import re
-import sre_compile
import string
import sys
-import unicodedata
import sysconfig
+import unicodedata
+import xml.etree.ElementTree
-try:
- xrange # Python 2
-except NameError:
- xrange = range # Python 3
+# if empty, use defaults
+_valid_extensions = set([])
+__VERSION__ = '1.7'
_USAGE = """
-Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
+Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit|sed|gsed]
+ [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
+ [--repository=path]
[--linelength=digits] [--headers=x,y,...]
+ [--recursive]
+ [--exclude=path]
+ [--extensions=hpp,cpp,...]
+ [--includeorder=default|standardcfirst]
+ [--config=filename]
[--quiet]
+ [--version]
[file] ...
+ Style checker for C/C++ source files.
+ This is a fork of the Google style checker with minor extensions.
+
The style guidelines this tries to follow are those in
https://google.github.io/styleguide/cppguide.html
@@ -73,22 +86,37 @@
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
- To suppress false-positive errors of a certain category, add a
- 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
- suppresses errors of all categories on that line.
+ To suppress false-positive errors of certain categories, add a
+ 'NOLINT(category[, category...])' comment to the line. NOLINT or NOLINT(*)
+ suppresses errors of all categories on that line. To suppress categories
+ on the next line use NOLINTNEXTLINE instead of NOLINT. To suppress errors in
+ a block of code 'NOLINTBEGIN(category[, category...])' comment to a line at
+ the start of the block and to end the block add a comment with 'NOLINTEND'.
+ NOLINT blocks are inclusive so any statements on the same line as a BEGIN
+ or END will have the error suppression applied.
The files passed in will be linted; at least one file must be provided.
- Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
- extensions with the --extensions flag.
+ Default linted extensions are %s.
+ Other file types will be ignored.
+ Change the extensions with the --extensions flag.
Flags:
- output=vs7
+ output=emacs|eclipse|vs7|junit|sed|gsed
By default, the output is formatted to ease emacs parsing. Visual Studio
- compatible output (vs7) may also be used. Other formats are unsupported.
+ compatible output (vs7) may also be used. Further support exists for
+ eclipse (eclipse), and JUnit (junit). XML parsers such as those used
+ in Jenkins and Bamboo may also be used.
+ The sed format outputs sed commands that should fix some of the errors.
+ Note that this requires gnu sed. If that is installed as gsed on your
+ system (common e.g. on macOS with homebrew) you can use the gsed output
+ format. Sed commands are written to stdout, not stderr, so you should be
+ able to pipe output straight to a shell to run the fixes.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
+ Errors with lower verbosity levels have lower confidence and are more
+ likely to be false positives.
quiet
Don't print anything if no errors are found.
@@ -98,16 +126,24 @@
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
- "-FOO" and "FOO" means "do not print categories that start with FOO".
+ "-FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
- --filter=whitespace,runtime/printf,+runtime/printf_format
+ --filter=-whitespace,-runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
+ Filters can directly be limited to files and also line numbers. The
+ syntax is category:file:line , where line is optional. The filter limitation
+ works for both + and - and can be combined with ordinary filters:
+
+ Examples: --filter=-whitespace:foo.h,+whitespace/braces:foo.h
+ --filter=-whitespace,-runtime/printf:foo.h:14,+runtime/printf_format:foo.h
+ --filter=-,+build/include_what_you_use:foo.h:321
+
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
@@ -115,17 +151,41 @@
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
+ repository=path
+ The top level directory of the repository, used to derive the header
+ guard CPP variable. By default, this is determined by searching for a
+ path that contains .git, .hg, or .svn. When this flag is specified, the
+ given path is used instead. This option allows the header guard CPP
+ variable to remain consistent even if members of a team have different
+ repository root directories (such as when checking out a subdirectory
+ with SVN). In addition, users of non-mainstream version control systems
+ can use this flag to ensure readable header guard CPP variables.
+
+ Examples:
+ Assuming that Alice checks out ProjectName and Bob checks out
+ ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
+ with no --repository flag, the header guard CPP variable will be:
+
+ Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
+ Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
+
+ If Alice uses the --repository=trunk flag and Bob omits the flag or
+ uses --repository=. then the header guard CPP variable will be:
+
+ Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
+ Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
+
root=subdir
The root directory used for deriving header guard CPP variable.
- By default, the header guard CPP variable is calculated as the relative
- path to the directory that contains .git, .hg, or .svn. When this flag
- is specified, the relative path is calculated from the specified
- directory. If the specified directory does not exist, this flag is
- ignored.
+ This directory is relative to the top level directory of the repository
+ which by default is determined by searching for a directory that contains
+ .git, .hg, or .svn but can also be controlled with the --repository flag.
+ If the specified directory does not exist, this flag is ignored.
Examples:
- Assuming that top/src/.git exists (and cwd=top/src), the header guard
- CPP variables for top/src/chrome/browser/ui/browser.h are:
+ Assuming that src is the top level directory of the repository (and
+ cwd=top/src), the header guard CPP variables for
+ src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
@@ -139,17 +199,48 @@
Examples:
--linelength=120
+ recursive
+ Search for files to lint recursively. Each directory given in the list
+ of files to be linted is replaced by all files that descend from that
+ directory. Files with extensions not in the valid extensions list are
+ excluded.
+
+ exclude=path
+ Exclude the given path from the list of files to be linted. Relative
+ paths are evaluated relative to the current directory and shell globbing
+ is performed. This flag can be provided multiple times to exclude
+ multiple files.
+
+ Examples:
+ --exclude=one.cc
+ --exclude=src/*.cc
+ --exclude=src/*.cc --exclude=test/*.cc
+
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
- --extensions=hpp,cpp
+ --extensions=%s
+
+ includeorder=default|standardcfirst
+ For the build/include_order rule, the default is to blindly assume angle
+ bracket includes with file extension are c-system-headers (default),
+ even knowing this will have false classifications.
+ The default is established at google.
+ standardcfirst means to instead use an allow-list of known c headers and
+ treat all others as separate group of "other system headers". The C headers
+ included are those of the C-standard lib and closely related ones.
+
+ config=filename
+ Search for config files with the specified name instead of CPPLINT.cfg
headers=x,y,...
The header extensions that cpplint will treat as .h in checks. Values are
automatically added to --extensions list.
+ (by default, only files with extensions %s will be assumed to be headers)
Examples:
+ --headers=%s
--headers=hpp,hxx
--headers=hpp
@@ -174,7 +265,7 @@
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
- through liner.
+ through the linter.
"linelength" allows to specify the allowed line length for the project.
@@ -189,7 +280,7 @@
Example file:
filter=-build/include_order,+build/include_alpha
- exclude_files=.*\.cc
+ exclude_files=.*\\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
@@ -204,17 +295,19 @@
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
- 'build/c++14',
- 'build/c++tr1',
+ 'build/c++17',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
+ 'build/include_subdir',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
+ 'build/namespaces_headers',
+ 'build/namespaces_literals',
'build/namespaces',
'build/printf_format',
'build/storage_class',
@@ -242,7 +335,6 @@
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
- 'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
@@ -261,6 +353,7 @@
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
+ 'whitespace/indent_namespace',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
@@ -270,6 +363,13 @@
'whitespace/todo',
]
+# keywords to use with --outputs which generate stdout for machine processing
+_MACHINE_OUTPUTS = [
+ 'junit',
+ 'sed',
+ 'gsed'
+]
+
# These error categories are no longer enforced by cpplint, but for backwards-
# compatibility they may still appear in NOLINT comments.
_LEGACY_ERROR_CATEGORIES = [
@@ -277,6 +377,36 @@
'readability/function',
]
+# These prefixes for categories should be ignored since they relate to other
+# tools which also use the NOLINT syntax, e.g. clang-tidy.
+_OTHER_NOLINT_CATEGORY_PREFIXES = [
+ 'clang-analyzer-',
+ 'abseil-',
+ 'altera-',
+ 'android-',
+ 'boost-',
+ 'bugprone-',
+ 'cert-',
+ 'concurrency-',
+ 'cppcoreguidelines-',
+ 'darwin-',
+ 'fuchsia-',
+ 'google-',
+ 'hicpp-',
+ 'linuxkernel-',
+ 'llvm-',
+ 'llvmlibc-',
+ 'misc-',
+ 'modernize-',
+ 'mpi-',
+ 'objc-',
+ 'openmp-',
+ 'performance-',
+ 'portability-',
+ 'readability-',
+ 'zircon-',
+ ]
+
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
@@ -305,7 +435,7 @@
'alloc.h',
'builtinbuf.h',
'bvector.h',
- 'complex.h',
+ # 'complex.h', collides with System C header "complex.h" since C11
'defalloc.h',
'deque.h',
'editbuf.h',
@@ -351,7 +481,7 @@
'tree.h',
'type_traits.h',
'vector.h',
- # 17.6.1.2 C++ library headers
+ # C++ library headers
'algorithm',
'array',
'atomic',
@@ -405,7 +535,45 @@
'utility',
'valarray',
'vector',
- # 17.6.1.2 C++ headers for C library facilities
+ # C++14 headers
+ 'shared_mutex',
+ # C++17 headers
+ 'any',
+ 'charconv',
+ 'codecvt',
+ 'execution',
+ 'filesystem',
+ 'memory_resource',
+ 'optional',
+ 'string_view',
+ 'variant',
+ # C++20 headers
+ 'barrier',
+ 'bit',
+ 'compare',
+ 'concepts',
+ 'coroutine',
+ 'format',
+ 'latch'
+ 'numbers',
+ 'ranges',
+ 'semaphore',
+ 'source_location',
+ 'span',
+ 'stop_token',
+ 'syncstream',
+ 'version',
+ # C++23 headers
+ 'expected',
+ 'flat_map',
+ 'flat_set',
+ 'generator',
+ 'mdspan',
+ 'print',
+ 'spanstream',
+ 'stacktrace',
+ 'stdfloat',
+ # C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
@@ -434,6 +602,189 @@
'cwctype',
])
+# C headers
+_C_HEADERS = frozenset([
+ # System C headers
+ 'assert.h',
+ 'complex.h',
+ 'ctype.h',
+ 'errno.h',
+ 'fenv.h',
+ 'float.h',
+ 'inttypes.h',
+ 'iso646.h',
+ 'limits.h',
+ 'locale.h',
+ 'math.h',
+ 'setjmp.h',
+ 'signal.h',
+ 'stdalign.h',
+ 'stdarg.h',
+ 'stdatomic.h',
+ 'stdbool.h',
+ 'stddef.h',
+ 'stdint.h',
+ 'stdio.h',
+ 'stdlib.h',
+ 'stdnoreturn.h',
+ 'string.h',
+ 'tgmath.h',
+ 'threads.h',
+ 'time.h',
+ 'uchar.h',
+ 'wchar.h',
+ 'wctype.h',
+ # C23 headers
+ 'stdbit.h',
+ 'stdckdint.h',
+ # additional POSIX C headers
+ 'aio.h',
+ 'arpa/inet.h',
+ 'cpio.h',
+ 'dirent.h',
+ 'dlfcn.h',
+ 'fcntl.h',
+ 'fmtmsg.h',
+ 'fnmatch.h',
+ 'ftw.h',
+ 'glob.h',
+ 'grp.h',
+ 'iconv.h',
+ 'langinfo.h',
+ 'libgen.h',
+ 'monetary.h',
+ 'mqueue.h',
+ 'ndbm.h',
+ 'net/if.h',
+ 'netdb.h',
+ 'netinet/in.h',
+ 'netinet/tcp.h',
+ 'nl_types.h',
+ 'poll.h',
+ 'pthread.h',
+ 'pwd.h',
+ 'regex.h',
+ 'sched.h',
+ 'search.h',
+ 'semaphore.h',
+ 'setjmp.h',
+ 'signal.h',
+ 'spawn.h',
+ 'strings.h',
+ 'stropts.h',
+ 'syslog.h',
+ 'tar.h',
+ 'termios.h',
+ 'trace.h',
+ 'ulimit.h',
+ 'unistd.h',
+ 'utime.h',
+ 'utmpx.h',
+ 'wordexp.h',
+ # additional GNUlib headers
+ 'a.out.h',
+ 'aliases.h',
+ 'alloca.h',
+ 'ar.h',
+ 'argp.h',
+ 'argz.h',
+ 'byteswap.h',
+ 'crypt.h',
+ 'endian.h',
+ 'envz.h',
+ 'err.h',
+ 'error.h',
+ 'execinfo.h',
+ 'fpu_control.h',
+ 'fstab.h',
+ 'fts.h',
+ 'getopt.h',
+ 'gshadow.h',
+ 'ieee754.h',
+ 'ifaddrs.h',
+ 'libintl.h',
+ 'mcheck.h',
+ 'mntent.h',
+ 'obstack.h',
+ 'paths.h',
+ 'printf.h',
+ 'pty.h',
+ 'resolv.h',
+ 'shadow.h',
+ 'sysexits.h',
+ 'ttyent.h',
+ # Additional linux glibc headers
+ 'dlfcn.h',
+ 'elf.h',
+ 'features.h',
+ 'gconv.h',
+ 'gnu-versions.h',
+ 'lastlog.h',
+ 'libio.h',
+ 'link.h',
+ 'malloc.h',
+ 'memory.h',
+ 'netash/ash.h',
+ 'netatalk/at.h',
+ 'netax25/ax25.h',
+ 'neteconet/ec.h',
+ 'netipx/ipx.h',
+ 'netiucv/iucv.h',
+ 'netpacket/packet.h',
+ 'netrom/netrom.h',
+ 'netrose/rose.h',
+ 'nfs/nfs.h',
+ 'nl_types.h',
+ 'nss.h',
+ 're_comp.h',
+ 'regexp.h',
+ 'sched.h',
+ 'sgtty.h',
+ 'stab.h',
+ 'stdc-predef.h',
+ 'stdio_ext.h',
+ 'syscall.h',
+ 'termio.h',
+ 'thread_db.h',
+ 'ucontext.h',
+ 'ustat.h',
+ 'utmp.h',
+ 'values.h',
+ 'wait.h',
+ 'xlocale.h',
+ # Hardware specific headers
+ 'arm_neon.h',
+ 'emmintrin.h',
+ 'xmmintin.h',
+ ])
+
+# Folders of C libraries so commonly used in C++,
+# that they have parity with standard C libraries.
+C_STANDARD_HEADER_FOLDERS = frozenset([
+ # standard C library
+ "sys",
+ # glibc for linux
+ "arpa",
+ "asm-generic",
+ "bits",
+ "gnu",
+ "net",
+ "netinet",
+ "protocols",
+ "rpc",
+ "rpcsvc",
+ "scsi",
+ # linux kernel header
+ "drm",
+ "linux",
+ "misc",
+ "mtd",
+ "rdma",
+ "sound",
+ "video",
+ "xen",
+ ])
+
# Type names
_TYPES = re.compile(
r'^(?:'
@@ -457,7 +808,8 @@
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Pattern for matching FileInfo.BaseName() against test file name
-_TEST_FILE_SUFFIX = r'(_test|_unittest|_regtest)$'
+_test_suffixes = ['_test', '_regtest', '_unittest']
+_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
# Pattern that matches only complete whitespace, possibly across multiple lines.
_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
@@ -471,21 +823,21 @@
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
-_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
+_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
- _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
- _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
- _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
- _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
+ _CHECK_REPLACEMENT['DCHECK'][op] = f'DCHECK_{replacement}'
+ _CHECK_REPLACEMENT['CHECK'][op] = f'CHECK_{replacement}'
+ _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = f'EXPECT_{replacement}'
+ _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = f'ASSERT_{replacement}'
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
- _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
- _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
+ _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = f'EXPECT_{inv_replacement}'
+ _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = f'ASSERT_{inv_replacement}'
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
@@ -512,16 +864,17 @@
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
- r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
+ r'([ =()])(' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')([ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
-_LIKELY_MY_HEADER = 3
-_POSSIBLE_MY_HEADER = 4
-_OTHER_HEADER = 5
+_OTHER_SYS_HEADER = 3
+_LIKELY_MY_HEADER = 4
+_POSSIBLE_MY_HEADER = 5
+_OTHER_HEADER = 6
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
@@ -541,7 +894,21 @@
# Match string that indicates we're working on a Linux Kernel file.
_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
-_regexp_compile_cache = {}
+# Commands for sed to fix the problem
+_SED_FIXUPS = {
+ 'Remove spaces around =': r's/ = /=/',
+ 'Remove spaces around !=': r's/ != /!=/',
+ 'Remove space before ( in if (': r's/if (/if(/',
+ 'Remove space before ( in for (': r's/for (/for(/',
+ 'Remove space before ( in while (': r's/while (/while(/',
+ 'Remove space before ( in switch (': r's/switch (/switch(/',
+ 'Should have a space between // and comment': r's/\/\//\/\/ /',
+ 'Missing space before {': r's/\([^ ]\){/\1 {/',
+ 'Tab found, replace by spaces': r's/\t/ /g',
+ 'Line ends in whitespace. Consider deleting these extra spaces.': r's/\s*$//',
+ 'You don\'t need a ; after a }': r's/};/}/',
+ 'Missing space after ,': r's/,\([^ ]\)/, \1/g',
+}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
@@ -552,33 +919,142 @@
_root = None
_root_debug = False
+# The top level repository directory. If set, _root is calculated relative to
+# this directory instead of the directory containing version control artifacts.
+# This is set by the --repository flag.
+_repository = None
+
+# Files to exclude from linting. This is set by the --exclude flag.
+_excludes = None
+
+# Whether to suppress all PrintInfo messages, UNRELATED to --quiet flag
+_quiet = False
+
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
-# The allowed extensions for file names
-# This is set by --extensions flag.
-_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
+# This allows to use different include order rule than default
+_include_order = "default"
+
+# This allows different config files to be used
+_config_filename = "CPPLINT.cfg"
# Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc.
# This is set by --headers flag.
-_hpp_headers = set(['h'])
+_hpp_headers = set([])
+
+class ErrorSuppressions:
+ """Class to track all error suppressions for cpplint"""
+
+ class LineRange:
+ """Class to represent a range of line numbers for which an error is suppressed"""
+ def __init__(self, begin, end):
+ self.begin = begin
+ self.end = end
+
+ def __str__(self):
+ return f'[{self.begin}-{self.end}]'
+
+ def __contains__(self, obj):
+ return self.begin <= obj <= self.end
+
+ def ContainsRange(self, other):
+ return self.begin <= other.begin and self.end >= other.end
-# {str, bool}: a map from error categories to booleans which indicate if the
-# category should be suppressed for every line.
-_global_error_suppressions = {}
+ def __init__(self):
+ self._suppressions = collections.defaultdict(list)
+ self._open_block_suppression = None
+
+ def _AddSuppression(self, category, line_range):
+ suppressed = self._suppressions[category]
+ if not (suppressed and suppressed[-1].ContainsRange(line_range)):
+ suppressed.append(line_range)
+
+ def GetOpenBlockStart(self):
+ """:return: The start of the current open block or `-1` if there is not an open block"""
+ return self._open_block_suppression.begin if self._open_block_suppression else -1
+
+ def AddGlobalSuppression(self, category):
+ """Add a suppression for `category` which is suppressed for the whole file"""
+ self._AddSuppression(category, self.LineRange(0, math.inf))
+
+ def AddLineSuppression(self, category, linenum):
+ """Add a suppression for `category` which is suppressed only on `linenum`"""
+ self._AddSuppression(category, self.LineRange(linenum, linenum))
+
+ def StartBlockSuppression(self, category, linenum):
+ """Start a suppression block for `category` on `linenum`. inclusive"""
+ if self._open_block_suppression is None:
+ self._open_block_suppression = self.LineRange(linenum, math.inf)
+ self._AddSuppression(category, self._open_block_suppression)
+
+ def EndBlockSuppression(self, linenum):
+ """End the current block suppression on `linenum`. inclusive"""
+ if self._open_block_suppression:
+ self._open_block_suppression.end = linenum
+ self._open_block_suppression = None
+
+ def IsSuppressed(self, category, linenum):
+ """:return: `True` if `category` is suppressed for `linenum`"""
+ suppressed = self._suppressions[category] + self._suppressions[None]
+ return any(linenum in lr for lr in suppressed)
+
+ def HasOpenBlock(self):
+ """:return: `True` if a block suppression was started but not ended"""
+ return self._open_block_suppression is not None
+
+ def Clear(self):
+ """Clear all current error suppressions"""
+ self._suppressions.clear()
+ self._open_block_suppression = None
+
+_error_suppressions = ErrorSuppressions()
def ProcessHppHeadersOption(val):
global _hpp_headers
try:
- _hpp_headers = set(val.split(','))
- # Automatically append to extensions list so it does not have to be set 2 times
- _valid_extensions.update(_hpp_headers)
+ _hpp_headers = {ext.strip() for ext in val.split(',')}
except ValueError:
PrintUsage('Header extensions must be comma separated list.')
+def ProcessIncludeOrderOption(val):
+ if val is None or val == "default":
+ pass
+ elif val == "standardcfirst":
+ global _include_order
+ _include_order = val
+ else:
+ PrintUsage('Invalid includeorder value %s. Expected default|standardcfirst')
+
def IsHeaderExtension(file_extension):
- return file_extension in _hpp_headers
+ return file_extension in GetHeaderExtensions()
+
+def GetHeaderExtensions():
+ if _hpp_headers:
+ return _hpp_headers
+ if _valid_extensions:
+ return {h for h in _valid_extensions if 'h' in h}
+ return set(['h', 'hh', 'hpp', 'hxx', 'h++', 'cuh'])
+
+# The allowed extensions for file names
+# This is set by --extensions flag
+def GetAllExtensions():
+ return GetHeaderExtensions().union(_valid_extensions or set(
+ ['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
+
+def ProcessExtensionsOption(val):
+ global _valid_extensions
+ try:
+ extensions = [ext.strip() for ext in val.split(',')]
+ _valid_extensions = set(extensions)
+ except ValueError:
+ PrintUsage('Extensions should be a comma-separated list of values;'
+ 'for example: extensions=hpp,cpp\n'
+ f'This could not be parsed: "{val}"')
+
+def GetNonHeaderExtensions():
+ return GetAllExtensions().difference(GetHeaderExtensions())
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of line error-suppressions.
@@ -593,26 +1069,50 @@ def ParseNolintSuppressions(filename, raw_line, linenum, error):
linenum: int, the number of the current line.
error: function, an error handler.
"""
- matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
+ matched = re.search(r'\bNOLINT(NEXTLINE|BEGIN|END)?\b(\([^)]+\))?', raw_line)
if matched:
- if matched.group(1):
- suppressed_line = linenum + 1
- else:
- suppressed_line = linenum
- category = matched.group(2)
- if category in (None, '(*)'): # => "suppress all"
- _error_suppressions.setdefault(None, set()).add(suppressed_line)
+ no_lint_type = matched.group(1)
+ if no_lint_type == 'NEXTLINE':
+ def ProcessCategory(category):
+ _error_suppressions.AddLineSuppression(category, linenum + 1)
+ elif no_lint_type == 'BEGIN':
+ if _error_suppressions.HasOpenBlock():
+ error(filename, linenum, 'readability/nolint', 5,
+ f'NONLINT block already defined on line {_error_suppressions.GetOpenBlockStart()}')
+
+ def ProcessCategory(category):
+ _error_suppressions.StartBlockSuppression(category, linenum)
+ elif no_lint_type == 'END':
+ if not _error_suppressions.HasOpenBlock():
+ error(filename, linenum, 'readability/nolint', 5, 'Not in a NOLINT block')
+
+ def ProcessCategory(category):
+ if category is not None:
+ error(filename, linenum, 'readability/nolint', 5,
+ f'NOLINT categories not supported in block END: {category}')
+ _error_suppressions.EndBlockSuppression(linenum)
else:
- if category.startswith('(') and category.endswith(')'):
- category = category[1:-1]
+ def ProcessCategory(category):
+ _error_suppressions.AddLineSuppression(category, linenum)
+ categories = matched.group(2)
+ if categories in (None, '(*)'): # => "suppress all"
+ ProcessCategory(None)
+ elif categories.startswith('(') and categories.endswith(')'):
+ for category in set(map(lambda c: c.strip(), categories[1:-1].split(','))):
if category in _ERROR_CATEGORIES:
- _error_suppressions.setdefault(category, set()).add(suppressed_line)
+ ProcessCategory(category)
+ elif any(c for c in _OTHER_NOLINT_CATEGORY_PREFIXES if category.startswith(c)):
+ # Ignore any categories from other tools.
+ pass
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
- 'Unknown NOLINT error category: %s' % category)
-
+ f'Unknown NOLINT error category: {category}')
def ProcessGlobalSuppresions(lines):
+ """Deprecated; use ProcessGlobalSuppressions."""
+ ProcessGlobalSuppressions(lines)
+
+def ProcessGlobalSuppressions(lines):
"""Updates the list of global error suppressions.
Parses any lint directives in the file that have global effect.
@@ -624,74 +1124,36 @@ def ProcessGlobalSuppresions(lines):
for line in lines:
if _SEARCH_C_FILE.search(line):
for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
- _global_error_suppressions[category] = True
+ _error_suppressions.AddGlobalSuppression(category)
if _SEARCH_KERNEL_FILE.search(line):
for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
- _global_error_suppressions[category] = True
+ _error_suppressions.AddGlobalSuppression(category)
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
- _error_suppressions.clear()
- _global_error_suppressions.clear()
+ _error_suppressions.Clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
- ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
+ ParseNolintSuppressions/ProcessGlobalSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
- bool, True iff the error should be suppressed due to a NOLINT comment or
- global suppression.
- """
- return (_global_error_suppressions.get(category, False) or
- linenum in _error_suppressions.get(category, set()) or
- linenum in _error_suppressions.get(None, set()))
-
-
-def Match(pattern, s):
- """Matches the string with the pattern, caching the compiled regexp."""
- # The regexp compilation caching is inlined in both Match and Search for
- # performance reasons; factoring it out into a separate function turns out
- # to be noticeably expensive.
- if pattern not in _regexp_compile_cache:
- _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
- return _regexp_compile_cache[pattern].match(s)
-
-
-def ReplaceAll(pattern, rep, s):
- """Replaces instances of pattern in a string with a replacement.
-
- The compiled regex is kept in a cache shared by Match and Search.
-
- Args:
- pattern: regex pattern
- rep: replacement text
- s: search string
-
- Returns:
- string with replacements made (or original string if no replacements)
+ bool, True iff the error should be suppressed due to a NOLINT comment,
+ block suppression or global suppression.
"""
- if pattern not in _regexp_compile_cache:
- _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
- return _regexp_compile_cache[pattern].sub(rep, s)
-
-
-def Search(pattern, s):
- """Searches the string for the pattern, caching the compiled regexp."""
- if pattern not in _regexp_compile_cache:
- _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
- return _regexp_compile_cache[pattern].search(s)
+ return _error_suppressions.IsSuppressed(category, linenum)
def _IsSourceExtension(s):
"""File extension (excluding dot) matches a source file extension."""
- return s in ('c', 'cc', 'cpp', 'cxx')
+ return s in GetNonHeaderExtensions()
class _IncludeState(object):
@@ -712,11 +1174,13 @@ class _IncludeState(object):
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
- _OTHER_H_SECTION = 4
+ _OTHER_SYS_SECTION = 4
+ _OTHER_H_SECTION = 5
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
+ _OTHER_SYS_HEADER: 'other system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
@@ -726,11 +1190,14 @@ class _IncludeState(object):
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
+ _OTHER_SYS_SECTION: 'other system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
+ self._section = None
+ self._last_header = None
self.ResetSection('')
def FindHeader(self, header):
@@ -801,7 +1268,7 @@ def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
- Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
+ re.match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
return False
return True
@@ -819,9 +1286,8 @@ def CheckNextIncludeOrder(self, header_type):
error message describing what's wrong.
"""
- error_message = ('Found %s after %s' %
- (self._TYPE_NAMES[header_type],
- self._SECTION_NAMES[self._section]))
+ error_message = (f'Found {self._TYPE_NAMES[header_type]}'
+ f' after {self._SECTION_NAMES[self._section]}')
last_section = self._section
@@ -837,6 +1303,12 @@ def CheckNextIncludeOrder(self, header_type):
else:
self._last_header = ''
return error_message
+ elif header_type == _OTHER_SYS_HEADER:
+ if self._section <= self._OTHER_SYS_SECTION:
+ self._section = self._OTHER_SYS_SECTION
+ else:
+ self._last_header = ''
+ return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
@@ -875,9 +1347,18 @@ def __init__(self):
# output format:
# "emacs" - format that emacs can parse (default)
+ # "eclipse" - format that eclipse can parse
# "vs7" - format that Microsoft Visual Studio 7 can parse
+ # "junit" - format that Jenkins, Bamboo, etc can parse
+ # "sed" - returns a gnu sed command to fix the problem
+ # "gsed" - like sed, but names the command gsed, e.g. for macOS homebrew users
self.output_format = 'emacs'
+ # For JUnit output, save errors and failures until the end so that they
+ # can be written into the XML
+ self._junit_errors = []
+ self._junit_failures = []
+
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
@@ -925,7 +1406,7 @@ def AddFilters(self, filters):
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
- ' (%s does not)' % filt)
+ f' ({filt} does not)')
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
@@ -952,10 +1433,70 @@ def IncrementErrorCount(self, category):
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
- for category, count in self.errors_by_category.iteritems():
- sys.stderr.write('Category \'%s\' errors found: %d\n' %
- (category, count))
- sys.stdout.write('Total errors found: %d\n' % self.error_count)
+ for category, count in sorted(dict.items(self.errors_by_category)):
+ self.PrintInfo(f'Category \'{category}\' errors found: {count}\n')
+ if self.error_count > 0:
+ self.PrintInfo(f'Total errors found: {self.error_count}\n')
+
+ def PrintInfo(self, message):
+ # _quiet does not represent --quiet flag.
+ # Hide infos from stdout to keep stdout pure for machine consumption
+ if not _quiet and self.output_format not in _MACHINE_OUTPUTS:
+ sys.stdout.write(message)
+
+ def PrintError(self, message):
+ if self.output_format == 'junit':
+ self._junit_errors.append(message)
+ else:
+ sys.stderr.write(message)
+
+ def AddJUnitFailure(self, filename, linenum, message, category, confidence):
+ self._junit_failures.append((filename, linenum, message, category,
+ confidence))
+
+ def FormatJUnitXML(self):
+ num_errors = len(self._junit_errors)
+ num_failures = len(self._junit_failures)
+
+ testsuite = xml.etree.ElementTree.Element('testsuite')
+ testsuite.attrib['errors'] = str(num_errors)
+ testsuite.attrib['failures'] = str(num_failures)
+ testsuite.attrib['name'] = 'cpplint'
+
+ if num_errors == 0 and num_failures == 0:
+ testsuite.attrib['tests'] = str(1)
+ xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
+
+ else:
+ testsuite.attrib['tests'] = str(num_errors + num_failures)
+ if num_errors > 0:
+ testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
+ testcase.attrib['name'] = 'errors'
+ error = xml.etree.ElementTree.SubElement(testcase, 'error')
+ error.text = '\n'.join(self._junit_errors)
+ if num_failures > 0:
+ # Group failures by file
+ failed_file_order = []
+ failures_by_file = {}
+ for failure in self._junit_failures:
+ failed_file = failure[0]
+ if failed_file not in failed_file_order:
+ failed_file_order.append(failed_file)
+ failures_by_file[failed_file] = []
+ failures_by_file[failed_file].append(failure)
+ # Create a testcase for each file
+ for failed_file in failed_file_order:
+ failures = failures_by_file[failed_file]
+ testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
+ testcase.attrib['name'] = failed_file
+ failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
+ template = '{0}: {1} [{2}] [{3}]'
+ texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
+ failure.text = '\n'.join(texts)
+
+ xml_decl = '\n'
+ return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
+
_cpplint_state = _CppLintState()
@@ -1067,7 +1608,7 @@ def Check(self, error, filename, linenum):
if not self.in_a_function:
return
- if Match(r'T(EST|est)', self.current_function):
+ if re.match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
@@ -1080,9 +1621,8 @@ def Check(self, error, filename, linenum):
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
- ' %s has %d non-comment lines'
- ' (error triggered by exceeding %d lines).' % (
- self.current_function, self.lines_in_function, trigger))
+ f' {self.current_function} has {self.lines_in_function} non-comment lines'
+ f' (error triggered by exceeding {trigger} lines).')
def End(self):
"""Stop analyzing function body."""
@@ -1109,12 +1649,12 @@ def FullName(self):
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
- """FullName after removing the local path to the repository.
+ r"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
- "C:\Documents and Settings\..." or "/home/username/..." in them and thus
+ "C:\\Documents and Settings\\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
@@ -1123,6 +1663,20 @@ def RepositoryName(self):
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
+ # If the user specified a repository path, it exists, and the file is
+ # contained in it, use the specified repository path
+ if _repository:
+ repo = FileInfo(_repository).FullName()
+ root_dir = project_dir
+ while os.path.exists(root_dir):
+ # allow case insensitive compare on Windows
+ if os.path.normcase(root_dir) == os.path.normcase(repo):
+ return os.path.relpath(fullname, root_dir).replace('\\', '/')
+ one_up_dir = os.path.dirname(root_dir)
+ if one_up_dir == root_dir:
+ break
+ root_dir = one_up_dir
+
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
@@ -1143,6 +1697,7 @@ def RepositoryName(self):
os.path.exists(os.path.join(current_dir, ".hg")) or
os.path.exists(os.path.join(current_dir, ".svn"))):
root_dir = current_dir
+ break
current_dir = os.path.dirname(current_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
@@ -1173,7 +1728,7 @@ def BaseName(self):
return self.Split()[1]
def Extension(self):
- """File extension - text following the final period."""
+ """File extension - text following the final period, includes that period."""
return self.Split()[2]
def NoExtension(self):
@@ -1185,7 +1740,7 @@ def IsSource(self):
return _IsSourceExtension(self.Extension()[1:])
-def _ShouldPrintError(category, confidence, linenum):
+def _ShouldPrintError(category, confidence, filename, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
@@ -1199,11 +1754,16 @@ def _ShouldPrintError(category, confidence, linenum):
is_filtered = False
for one_filter in _Filters():
+ filter_cat, filter_file, filter_line = _ParseFilterSelector(one_filter[1:])
+ category_match = category.startswith(filter_cat)
+ file_match = filter_file == "" or filter_file == filename
+ line_match = filter_line == linenum or filter_line == -1
+
if one_filter.startswith('-'):
- if category.startswith(one_filter[1:]):
+ if category_match and file_match and line_match:
is_filtered = True
elif one_filter.startswith('+'):
- if category.startswith(one_filter[1:]):
+ if category_match and file_match and line_match:
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
@@ -1220,9 +1780,9 @@ def Error(filename, linenum, category, confidence, message):
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
- False positives can be suppressed by the use of
- "cpplint(category)" comments on the offending line. These are
- parsed into _error_suppressions.
+ False positives can be suppressed by the use of "NOLINT(category)"
+ comments, NOLINTNEXTLINE or in blocks started by NOLINTBEGIN. These
+ are parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
@@ -1235,17 +1795,28 @@ def Error(filename, linenum, category, confidence, message):
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
- if _ShouldPrintError(category, confidence, linenum):
+ if _ShouldPrintError(category, confidence, filename, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
- sys.stderr.write('%s(%s): error cpplint: [%s] %s [%d]\n' % (
- filename, linenum, category, message, confidence))
+ _cpplint_state.PrintError(f'{filename}({linenum}): error cpplint:'
+ f' [{category}] {message} [{confidence}]\n')
elif _cpplint_state.output_format == 'eclipse':
- sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
- filename, linenum, message, category, confidence))
+ sys.stderr.write(f'{filename}:{linenum}: warning:'
+ f' {message} [{category}] [{confidence}]\n')
+ elif _cpplint_state.output_format == 'junit':
+ _cpplint_state.AddJUnitFailure(filename, linenum, message, category, confidence)
+ elif _cpplint_state.output_format in ['sed', 'gsed']:
+ if message in _SED_FIXUPS:
+ sys.stdout.write(f"{_cpplint_state.output_format} -i"
+ f" '{linenum}{_SED_FIXUPS[message]}' {filename}"
+ f" # {message} [{category}] [{confidence}]\n")
+ else:
+ sys.stderr.write(f'# {filename}:{linenum}: '
+ f' "{message}" [{category}] [{confidence}]\n')
else:
- sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
- filename, linenum, message, category, confidence))
+ final_message = (f'{filename}:{linenum}: '
+ f' {message} [{category}] [{confidence}]\n')
+ sys.stderr.write(final_message)
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
@@ -1315,7 +1886,7 @@ def CleanseRawStrings(raw_lines):
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
- leading_space = Match(r'^(\s*)\S', line)
+ leading_space = re.match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
@@ -1336,9 +1907,9 @@ def CleanseRawStrings(raw_lines):
# before removing raw strings. This is because there are some
# cpplint checks that requires the comments to be preserved, but
# we don't want to check comments that are inside raw strings.
- matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
+ matched = re.match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if (matched and
- not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
+ not re.match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
matched.group(1))):
delimiter = ')' + matched.group(2) + '"'
@@ -1421,6 +1992,28 @@ def CleanseComments(line):
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
+def ReplaceAlternateTokens(line):
+ """Replace any alternate token by its original counterpart.
+
+ In order to comply with the google rule stating that unary operators should
+ never be followed by a space, an exception is made for the 'not' and 'compl'
+ alternate tokens. For these, any trailing space is removed during the
+ conversion.
+
+ Args:
+ line: The line being processed.
+
+ Returns:
+ The line with alternate tokens replaced.
+ """
+ for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
+ token = _ALT_TOKEN_REPLACEMENT[match.group(2)]
+ tail = '' if match.group(2) in ['not', 'compl'] and match.group(3) == ' ' \
+ else r'\3'
+ line = re.sub(match.re, rf'\1{token}{tail}', line, count=1)
+ return line
+
+
class CleansedLines(object):
"""Holds 4 copies of all lines with different preprocessing applied to them.
@@ -1433,15 +2026,17 @@ class CleansedLines(object):
"""
def __init__(self, lines):
+ if '-readability/alt_tokens' in _cpplint_state.filters:
+ for i, line in enumerate(lines):
+ lines[i] = ReplaceAlternateTokens(line)
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
- for linenum in range(len(self.lines_without_raw_strings)):
- self.lines.append(CleanseComments(
- self.lines_without_raw_strings[linenum]))
- elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
+ for line in self.lines_without_raw_strings:
+ self.lines.append(CleanseComments(line))
+ elided = self._CollapseStrings(line)
self.elided.append(CleanseComments(elided))
def NumLines(self):
@@ -1474,7 +2069,7 @@ def _CollapseStrings(elided):
collapsed = ''
while True:
# Find the first quote character
- match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
+ match = re.match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
@@ -1499,8 +2094,8 @@ def _CollapseStrings(elided):
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
- if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
- match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
+ if re.search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
+ match_literal = re.match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
@@ -1529,7 +2124,7 @@ def FindEndOfExpressionInLine(line, startpos, stack):
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
- for i in xrange(startpos, len(line)):
+ for i in range(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
@@ -1542,7 +2137,7 @@ def FindEndOfExpressionInLine(line, startpos, stack):
stack.pop()
if not stack:
return (-1, None)
- elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
+ elif i > 0 and re.search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
@@ -1571,7 +2166,7 @@ def FindEndOfExpressionInLine(line, startpos, stack):
# Ignore "->" and operator functions
if (i > 0 and
- (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
+ (line[i - 1] == '-' or re.search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
@@ -1618,7 +2213,7 @@ def CloseExpression(clean_lines, linenum, pos):
"""
line = clean_lines.elided[linenum]
- if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
+ if (line[pos] not in '({[<') or re.match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
@@ -1666,8 +2261,8 @@ def FindStartOfExpressionInLine(line, endpos, stack):
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
- Match(r'\s>=\s', line[i - 1:]) or
- Search(r'\boperator\s*$', line[0:i]))):
+ re.match(r'\s>=\s', line[i - 1:]) or
+ re.search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
@@ -1758,7 +2353,7 @@ def CheckForCopyright(filename, lines, error):
# We'll say it should occur by line 10. Don't forget there's a
# placeholder line at the front.
- for line in xrange(1, min(len(lines), 11)):
+ for line in range(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
@@ -1775,7 +2370,7 @@ def GetIndentLevel(line):
Returns:
An integer count of leading spaces, possibly zero.
"""
- indent = Match(r'^( *)\S', line)
+ indent = re.match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
@@ -1793,10 +2388,10 @@ def PathSplitToList(path):
lst = []
while True:
(head, tail) = os.path.split(path)
- if head == path: # absolute paths end
+ if head == path: # absolute paths end
lst.append(head)
break
- if tail == path: # relative paths end
+ if tail == path: # relative paths end
lst.append(tail)
break
@@ -1830,8 +2425,8 @@ def GetHeaderGuardCPPVariable(filename):
def FixupPathFromRoot():
if _root_debug:
- sys.stderr.write("\n_root fixup, _root = '%s', repository name = '%s'\n"
- %(_root, fileinfo.RepositoryName()))
+ sys.stderr.write(f"\n_root fixup, _root = '{_root}',"
+ f" repository name = '{fileinfo.RepositoryName()}'\n")
# Process the file path with the --root flag if it was set.
if not _root:
@@ -1853,27 +2448,28 @@ def StripListPrefix(lst, prefix):
if _root_debug:
sys.stderr.write(("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
- " _root=%s)\n") %(maybe_path, file_path_from_root, _root))
+ " _root=%s)\n") % (maybe_path, file_path_from_root, _root))
if maybe_path:
return os.path.join(*maybe_path)
# --root=.. , will prepend the outer directory to the header guard
full_path = fileinfo.FullName()
- root_abspath = os.path.abspath(_root)
+ # adapt slashes for windows
+ root_abspath = os.path.abspath(_root).replace('\\', '/')
maybe_path = StripListPrefix(PathSplitToList(full_path),
PathSplitToList(root_abspath))
if _root_debug:
sys.stderr.write(("_root prepend (maybe_path=%s, full_path=%s, " +
- "root_abspath=%s)\n") %(maybe_path, full_path, root_abspath))
+ "root_abspath=%s)\n") % (maybe_path, full_path, root_abspath))
if maybe_path:
return os.path.join(*maybe_path)
if _root_debug:
- sys.stderr.write("_root ignore, returning %s\n" %(file_path_from_root))
+ sys.stderr.write(f"_root ignore, returning {file_path_from_root}\n")
# --root=FAKE_DIR is ignored
return file_path_from_root
@@ -1902,7 +2498,12 @@ def CheckForHeaderGuard(filename, clean_lines, error):
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
- if Search(r'//\s*NOLINT\(build/header_guard\)', i):
+ if re.search(r'//\s*NOLINT\(build/header_guard\)', i):
+ return
+
+ # Allow pragma once instead of header guards
+ for i in raw_lines:
+ if re.search(r'^\s*#pragma\s+once', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
@@ -1929,8 +2530,7 @@ def CheckForHeaderGuard(filename, clean_lines, error):
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
- 'No #ifndef header guard found, suggested CPP variable is: %s' %
- cppvar)
+ f'No #ifndef header guard found, suggested CPP variable is: {cppvar}')
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
@@ -1943,66 +2543,75 @@ def CheckForHeaderGuard(filename, clean_lines, error):
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
- '#ifndef header guard has wrong style, please use: %s' % cppvar)
+ f'#ifndef header guard has wrong style, please use: {cppvar}')
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
- match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
+ match = re.match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
- '#endif line should be "#endif // %s"' % cppvar)
+ f'#endif line should be "#endif // {cppvar}"')
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
- for i in xrange(1, len(raw_lines) - 1):
+ for i in range(1, len(raw_lines) - 1):
line = raw_lines[i]
- if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
+ if re.match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
- match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
+ match = re.match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
- '#endif line should be "#endif /* %s */"' % cppvar)
+ f'#endif line should be "#endif /* {cppvar} */"')
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
- '#endif line should be "#endif // %s"' % cppvar)
+ f'#endif line should be "#endif // {cppvar}"')
def CheckHeaderFileIncluded(filename, include_state, error):
- """Logs an error if a .cc file does not include its header."""
+ """Logs an error if a source file does not include its header."""
# Do not check test files
fileinfo = FileInfo(filename)
- if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
+ if re.search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
- headerfile = filename[0:len(filename) - len(fileinfo.Extension())] + '.h'
- if not os.path.exists(headerfile):
- return
- headername = FileInfo(headerfile).RepositoryName()
- first_include = 0
- for section_list in include_state.include_list:
- for f in section_list:
- if headername in f[0] or f[0] in headername:
- return
- if not first_include:
- first_include = f[1]
+ first_include = message = None
+ basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
+ for ext in GetHeaderExtensions():
+ headerfile = basefilename + '.' + ext
+ if not os.path.exists(headerfile):
+ continue
+ headername = FileInfo(headerfile).RepositoryName()
+ include_uses_unix_dir_aliases = False
+ for section_list in include_state.include_list:
+ for f in section_list:
+ include_text = f[0]
+ if "./" in include_text:
+ include_uses_unix_dir_aliases = True
+ if headername in include_text or include_text in headername:
+ return
+ if not first_include:
+ first_include = f[1]
+
+ message = f'{fileinfo.RepositoryName()} should include its header file {headername}'
+ if include_uses_unix_dir_aliases:
+ message += ". Relative paths like . and .. are not allowed."
- error(filename, first_include, 'build/include', 5,
- '%s should include its header file %s' % (fileinfo.RepositoryName(),
- headername))
+ if message:
+ error(filename, first_include, 'build/include', 5, message)
def CheckForBadCharacters(filename, lines, error):
@@ -2023,7 +2632,7 @@ def CheckForBadCharacters(filename, lines, error):
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
- if u'\ufffd' in line:
+ if '\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
@@ -2135,7 +2744,7 @@ def CheckPosixThreading(filename, clean_lines, linenum, error):
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
- if Search(pattern, line):
+ if re.search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
@@ -2155,7 +2764,7 @@ def CheckVlogArguments(filename, clean_lines, linenum, error):
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
- if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
+ if re.search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
@@ -2189,17 +2798,17 @@ def CheckInvalidIncrement(filename, clean_lines, linenum, error):
def IsMacroDefinition(clean_lines, linenum):
- if Search(r'^#define', clean_lines[linenum]):
+ if re.search(r'^#define', clean_lines[linenum]):
return True
- if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
+ if linenum > 0 and re.search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
- return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
+ return re.match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
@@ -2294,15 +2903,15 @@ def __init__(self, name, class_or_struct, clean_lines, linenum):
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
- if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
+ if re.search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# If there is a DISALLOW macro, it should appear near the end of
# the class.
seen_last_thing_in_class = False
- for i in xrange(linenum - 1, self.starting_linenum, -1):
- match = Search(
+ for i in range(linenum - 1, self.starting_linenum, -1):
+ match = re.search(
r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
self.name + r'\)',
clean_lines.elided[i])
@@ -2312,20 +2921,20 @@ def CheckEnd(self, filename, clean_lines, linenum, error):
match.group(1) + ' should be the last thing in the class')
break
- if not Match(r'^\s*$', clean_lines.elided[i]):
+ if not re.match(r'^\s*$', clean_lines.elided[i]):
seen_last_thing_in_class = True
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
- indent = Match(r'^( *)\}', clean_lines.elided[linenum])
+ indent = re.match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
- 'Closing brace should be aligned with beginning of %s' % parent)
+ f'Closing brace should be aligned with beginning of {parent}')
class _NamespaceInfo(_BlockInfo):
@@ -2352,7 +2961,7 @@ def CheckEnd(self, filename, clean_lines, linenum, error):
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
- and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
+ and not re.match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
@@ -2369,18 +2978,17 @@ def CheckEnd(self, filename, clean_lines, linenum, error):
# expected namespace.
if self.name:
# Named namespace
- if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
+ if not re.match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
re.escape(self.name) + r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
- 'Namespace should be terminated with "// namespace %s"' %
- self.name)
+ f'Namespace should be terminated with "// namespace {self.name}"')
else:
# Anonymous namespace
- if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
+ if not re.match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
- if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
+ if re.match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
@@ -2483,7 +3091,7 @@ def InTemplateArgumentList(self, clean_lines, linenum, pos):
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
- match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
+ match = re.match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
@@ -2543,11 +3151,11 @@ def UpdatePreprocessor(self, line):
Args:
line: current line to check.
"""
- if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
+ if re.match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
- elif Match(r'^\s*#\s*(else|elif)\b', line):
+ elif re.match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
@@ -2562,7 +3170,7 @@ def UpdatePreprocessor(self, line):
else:
# TODO(unknown): unexpected #else, issue warning?
pass
- elif Match(r'^\s*#\s*endif\b', line):
+ elif re.match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
@@ -2634,7 +3242,7 @@ def Update(self, filename, clean_lines, linenum, error):
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
- namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
+ namespace_decl_match = re.match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
@@ -2651,9 +3259,9 @@ def Update(self, filename, clean_lines, linenum, error):
# such as in:
# class LOCKABLE API Object {
# };
- class_decl_match = Match(
- r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
- r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
+ class_decl_match = re.match(
+ r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
+ r'(class|struct)\s+(?:[a-zA-Z0-9_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
@@ -2681,7 +3289,7 @@ def Update(self, filename, clean_lines, linenum, error):
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
- access_match = Match(
+ access_match = re.match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
@@ -2692,7 +3300,7 @@ def Update(self, filename, clean_lines, linenum, error):
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
- Match(r'^\s*$', indent)):
+ re.match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
@@ -2701,13 +3309,13 @@ def Update(self, filename, clean_lines, linenum, error):
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
- '%s%s: should be indented +1 space inside %s' % (
- access_match.group(2), slots, parent))
+ f'{access_match.group(2)}{slots}:'
+ f' should be indented +1 space inside {parent}')
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
- matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
+ matched = re.match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
@@ -2718,7 +3326,7 @@ def Update(self, filename, clean_lines, linenum, error):
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
- elif Match(r'^extern\s*"[^"]*"\s*\{', line):
+ elif re.match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo(linenum))
else:
self.stack.append(_BlockInfo(linenum, True))
@@ -2769,12 +3377,10 @@ def CheckCompletedBlocks(self, filename, error):
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
- 'Failed to find complete declaration of class %s' %
- obj.name)
+ f'Failed to find complete declaration of class {obj.name}')
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
- 'Failed to find complete declaration of namespace %s' %
- obj.name)
+ f'Failed to find complete declaration of namespace {obj.name}')
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
@@ -2809,25 +3415,25 @@ def CheckForNonStandardConstructs(filename, clean_lines, linenum,
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
- if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
+ if re.search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
- if Search(r'printf\s*\(.*".*%\d+\$', line):
+ if re.search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
- if Search(r'("|\').*\\(%|\[|\(|{)', line):
+ if re.search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
- if Search(r'\b(const|volatile|void|char|short|int|long'
+ if re.search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
@@ -2836,20 +3442,20 @@ def CheckForNonStandardConstructs(filename, clean_lines, linenum,
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration.')
- if Match(r'\s*#\s*endif\s*[^/\s]+', line):
+ if re.match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
- if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
+ if re.match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
- if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
+ if re.search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and (max and min) operators are non-standard and deprecated.')
- if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
+ if re.search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
@@ -2874,12 +3480,10 @@ def CheckForNonStandardConstructs(filename, clean_lines, linenum,
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
- explicit_constructor_match = Match(
+ explicit_constructor_match = re.match(
r'\s+(?:(?:inline|constexpr)\s+)*(explicit\s+)?'
- r'(?:(?:inline|constexpr)\s+)*%s\s*'
- r'\(((?:[^()]|\([^()]*\))*)\)'
- % re.escape(base_classname),
- line)
+ rf'(?:(?:inline|constexpr)\s+)*{re.escape(base_classname)}\s*'
+ r'\(((?:[^()]|\([^()]*\))*)\)', line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
@@ -2901,6 +3505,7 @@ def CheckForNonStandardConstructs(filename, clean_lines, linenum,
constructor_args[i] = constructor_arg
i += 1
+ variadic_args = [arg for arg in constructor_args if '&&...' in arg]
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
@@ -2911,30 +3516,31 @@ def CheckForNonStandardConstructs(filename, clean_lines, linenum,
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
- len(defaulted_args) >= len(constructor_args) - 1))
+ len(defaulted_args) >= len(constructor_args) - 1) or
+ # variadic arguments with zero or one argument
+ (len(constructor_args) <= 2 and
+ len(variadic_args) >= 1))
initializer_list_constructor = bool(
onearg_constructor and
- Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
+ re.search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
- Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
- % re.escape(base_classname), constructor_args[0].strip()))
+ re.match(r'((const\s+(volatile\s+)?)?|(volatile\s+(const\s+)?))?'
+ rf'{re.escape(base_classname)}(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&',
+ constructor_args[0].strip())
+ )
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
- if defaulted_args:
- error(filename, linenum, 'runtime/explicit', 5,
+ if defaulted_args or variadic_args:
+ error(filename, linenum, 'runtime/explicit', 4,
'Constructors callable with one argument '
'should be marked explicit.')
else:
- error(filename, linenum, 'runtime/explicit', 5,
+ error(filename, linenum, 'runtime/explicit', 4,
'Single-parameter constructors should be marked explicit.')
- elif is_marked_explicit and not onearg_constructor:
- if noarg_constructor:
- error(filename, linenum, 'runtime/explicit', 5,
- 'Zero-parameter constructors should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
@@ -2957,7 +3563,7 @@ def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
- match = Search(pattern, line)
+ match = re.search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
@@ -2976,26 +3582,26 @@ def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
- not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
+ not re.search(r'\b(if|elif|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
- not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
+ not re.search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
- not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
- if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
+ not re.search(r' \([^)]+\)\[[^\]]+\]', fncall)):
+ if re.search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
- elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
+ elif re.search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
- if (Search(r'\w\s+\(', fncall) and
- not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
- not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
- not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
- not Search(r'\bcase\s+\(', fncall)):
+ if (re.search(r'\w\s+\(', fncall) and
+ not re.search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
+ not re.search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
+ not re.search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
+ not re.search(r'\bcase\s+\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
- if Search(r'\boperator_*\b', line):
+ if re.search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
@@ -3003,10 +3609,10 @@ def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
- if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
+ if re.search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
- if Search(r'^\s+\)', fncall):
+ if re.search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
@@ -3032,10 +3638,10 @@ def IsBlankLine(line):
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
- len(nesting_state.stack) > 1 and
- nesting_state.stack[-1].check_namespace_indentation and
- isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
- nesting_state.previous_stack_top == nesting_state.stack[-2])
+ len(nesting_state.stack) >= 1 and
+ (isinstance(nesting_state.stack[-1], _NamespaceInfo) or
+ (isinstance(nesting_state.previous_stack_top, _NamespaceInfo)))
+ )
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
@@ -3072,28 +3678,28 @@ def CheckForFunctionLengths(filename, clean_lines, linenum,
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
- match_result = Match(regexp, line)
+ match_result = re.match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
- not Match(r'[A-Z_]+$', function_name)):
+ not re.match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
- for start_linenum in xrange(linenum, clean_lines.NumLines()):
+ for start_linenum in range(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
- if Search(r'(;|})', start_line): # Declarations and trivial functions
+ if re.search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
- elif Search(r'{', start_line):
+ if re.search(r'{', start_line):
body_found = True
- function = Search(r'((\w|:)*)\(', line).group(1)
- if Match(r'TEST', function): # Handle TEST... macros
- parameter_regexp = Search(r'(\(.*\))', joined_line)
+ function = re.search(r'((\w|:)*)\(', line).group(1)
+ if re.match(r'TEST', function): # Handle TEST... macros
+ parameter_regexp = re.search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
@@ -3104,10 +3710,10 @@ def CheckForFunctionLengths(filename, clean_lines, linenum,
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
- elif Match(r'^\}\s*$', line): # function end
+ elif re.match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
- elif not Match(r'^\s*$', line):
+ elif not re.match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
@@ -3129,7 +3735,7 @@ def CheckComment(line, filename, linenum, next_line_start, error):
# Check if the // may be in quotes. If so, ignore it
if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
# Allow one space for new scopes, two spaces otherwise:
- if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
+ if (not (re.match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
@@ -3154,7 +3760,8 @@ def CheckComment(line, filename, linenum, next_line_start, error):
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
- # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
+ # Comparisons made explicit for correctness
+ # -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
@@ -3162,8 +3769,8 @@ def CheckComment(line, filename, linenum, next_line_start, error):
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the // unless
# it's a /// or //! Doxygen comment.
- if (Match(r'//[^ ]*\w', comment) and
- not Match(r'(///|//\!)(\s+|$)', comment)):
+ if (re.match(r'//[^ ]*\w', comment) and
+ not re.match(r'(///|//\!)(\s+|$)', comment)):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
@@ -3226,12 +3833,12 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
- if Match(r' {6}\w', prev_line): # Initializer list?
+ if re.match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
- and Match(r' {6}\w', elided[search_position])):
+ and re.match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
@@ -3242,9 +3849,9 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
- exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
+ exception = (re.match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
- or Match(r' {4}:', prev_line))
+ or re.match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
@@ -3261,16 +3868,16 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
- and Match(r'\s*}', next_line)
+ and re.match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
- matched = Match(r'\s*(public|protected|private):', prev_line)
+ matched = re.match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
- 'Do not leave a blank line after "%s:"' % matched.group(1))
+ f'Do not leave a blank line after "{matched.group(1)}:"')
# Next, check comments
next_line_start = 0
@@ -3282,16 +3889,17 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
# get rid of comments and strings
line = clean_lines.elided[linenum]
- # You shouldn't have spaces before your brackets, except maybe after
- # 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.
- if Search(r'\w\s+\[', line) and not Search(r'(?:auto&?|delete|return)\s+\[', line):
+ # You shouldn't have spaces before your brackets, except for C++11 attributes
+ # or maybe after 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.
+ if (re.search(r'\w\s+\[(?!\[)', line) and
+ not re.search(r'(?:auto&?|delete|return)\s+\[', line)):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
- if (Search(r'for *\(.*[^:]:[^: ]', line) or
- Search(r'for *\(.*[^: ]:[^:]', line)):
+ if (re.search(r'for *\(.*[^:]:[^: ]', line) or
+ re.search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
@@ -3314,7 +3922,7 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error):
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
- match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
+ match = re.match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
@@ -3324,12 +3932,12 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error):
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
- if ((Search(r'[\w.]=', line) or
- Search(r'=[\w.]', line))
- and not Search(r'\b(if|while|for) ', line)
+ if ((re.search(r'[\w.]=', line) or
+ re.search(r'=[\w.]', line))
+ and not re.search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
- and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
- and not Search(r'operator=', line)):
+ and not re.search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
+ and not re.search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
@@ -3348,16 +3956,17 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error):
#
# Note that && is not included here. This is because there are too
# many false positives due to RValue references.
- match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
+ match = re.search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
+ # TODO: support alternate operators
error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around %s' % match.group(1))
- elif not Match(r'#.*include', line):
+ f'Missing spaces around {match.group(1)}')
+ elif not re.match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
- match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
+ match = re.match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
@@ -3368,7 +3977,7 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error):
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
- match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
+ match = re.match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
@@ -3381,7 +3990,7 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error):
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
- match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
+ match = re.search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
@@ -3399,16 +4008,16 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error):
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type> alpha
- match = Search(r'>>[a-zA-Z_]', line)
+ match = re.search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
- match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
+ match = re.search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
- 'Extra space for operator %s' % match.group(1))
+ f'Extra space for operator {match.group(1)}')
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
@@ -3423,30 +4032,29 @@ def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
- match = Search(r' (if\(|for\(|while\(|switch\()', line)
+ match = re.search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
- 'Missing space before ( in %s' % match.group(1))
+ f'Missing space before ( in {match.group(1)}')
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
- match = Search(r'\b(if|for|while|switch)\s*'
+ match = re.search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
- not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
+ not match.group(2) and re.search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
- 'Mismatching spaces inside () in %s' % match.group(1))
+ f'Mismatching spaces inside () in {match.group(1)}')
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
- 'Should have zero or one spaces inside ( and ) in %s' %
- match.group(1))
+ f'Should have zero or one spaces inside ( and ) in {match.group(1)}')
def CheckCommaSpacing(filename, clean_lines, linenum, error):
@@ -3471,8 +4079,9 @@ def CheckCommaSpacing(filename, clean_lines, linenum, error):
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
- if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
- Search(r',[^,\s]', raw[linenum])):
+ match = re.search(r',[^,\s]', re.sub(r'\b__VA_OPT__\s*\(,\)', '',
+ re.sub(r'\boperator\s*,\s*\(', 'F(', line)))
+ if (match and re.search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
@@ -3480,7 +4089,7 @@ def CheckCommaSpacing(filename, clean_lines, linenum, error):
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
- if Search(r';[^\s};\\)/]', line):
+ if re.search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
@@ -3497,7 +4106,7 @@ def _IsType(clean_lines, nesting_state, expr):
True, if token looks like a type.
"""
# Keep only the last token in the expression
- last_word = Match(r'^.*(\b\S+)$', expr)
+ last_word = re.match(r'^.*(\b\S+)$', expr)
if last_word:
token = last_word.group(1)
else:
@@ -3540,8 +4149,8 @@ def _IsType(clean_lines, nesting_state, expr):
continue
# Look for typename in the specified range
- for i in xrange(first_line, last_line + 1, 1):
- if Search(typename_pattern, clean_lines.elided[i]):
+ for i in range(first_line, last_line + 1, 1):
+ if re.search(typename_pattern, clean_lines.elided[i]):
return True
block_index -= 1
@@ -3567,7 +4176,7 @@ def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
# And since you should never have braces at the beginning of a line,
# this is an easy test. Except that braces used for initialization don't
# follow the same rule; we often don't want spaces before those.
- match = Match(r'^(.*[^ ({>]){', line)
+ match = re.match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
@@ -3604,34 +4213,34 @@ def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
- for offset in xrange(endlinenum + 1,
+ for offset in range(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
# We also suppress warnings for `uint64_t{expression}` etc., as the style
# guide recommends brace initialization for integral types to avoid
# overflow/truncation.
- if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
+ if (not re.match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
and not _IsType(clean_lines, nesting_state, leading_text)):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
- if Search(r'}else', line):
+ if re.search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
- if Search(r':\s*;\s*$', line):
+ if re.search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
- elif Search(r'^\s*;\s*$', line):
+ elif re.search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
- elif (Search(r'\s+;\s*$', line) and
- not Search(r'\bfor\b', line)):
+ elif (re.search(r'\s+;\s*$', line) and
+ not re.search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
@@ -3650,11 +4259,10 @@ def IsDecltype(clean_lines, linenum, column):
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
- if Search(r'\bdecltype\s*$', text[0:start_col]):
+ if re.search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
-
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
@@ -3682,7 +4290,7 @@ def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
linenum <= class_info.starting_linenum):
return
- matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
+ matched = re.match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
@@ -3694,20 +4302,20 @@ def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
- not Search(r'\b(class|struct)\b', prev_line) and
- not Search(r'\\$', prev_line)):
+ not re.search(r'\b(class|struct)\b', prev_line) and
+ not re.search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
- if Search(r'\{\s*$', clean_lines.lines[i]):
+ if re.search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
- '"%s:" should be preceded by a blank line' % matched.group(1))
+ f'"{matched.group(1)}:" should be preceded by a blank line')
def GetPreviousNonBlankLine(clean_lines, linenum):
@@ -3745,7 +4353,7 @@ def CheckBraces(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum] # get rid of comments and strings
- if Match(r'\s*{\s*$', line):
+ if re.match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
@@ -3756,23 +4364,23 @@ def CheckBraces(filename, clean_lines, linenum, error):
# following line if it is part of an array initialization and would not fit
# within the 80 character limit of the preceding line.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
- if (not Search(r'[,;:}{(]\s*$', prevline) and
- not Match(r'\s*#', prevline) and
+ if (not re.search(r'[,;:}{(]\s*$', prevline) and
+ not re.match(r'\s*#', prevline) and
not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
- if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
+ if re.match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
- if Match(r'\s*}\s*$', prevline):
+ if re.match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
- if Search(r'else if\s*\(', line): # could be multi-line if
- brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
+ if re.search(r'else if\s*\(', line): # could be multi-line if
+ brace_on_left = bool(re.search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
@@ -3782,17 +4390,17 @@ def CheckBraces(filename, clean_lines, linenum, error):
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
- elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
+ elif re.search(r'}\s*else[^{]*$', line) or re.match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
- if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
+ if re.search(r'\belse [^\s{]', line) and not re.search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
- if Match(r'\s*do [^\s{]', line):
+ if re.match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
@@ -3803,21 +4411,21 @@ def CheckBraces(filename, clean_lines, linenum, error):
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
- if_else_match = Search(r'\b(if\s*\(|else\b)', line)
- if if_else_match and not Match(r'\s*#', line):
+ if_else_match = re.search(r'\b(if\s*(|constexpr)\s*\(|else\b)', line)
+ if if_else_match and not re.match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
- if_match = Search(r'\bif\s*\(', line)
+ if_match = re.search(r'\bif\s*(|constexpr)\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
- if (not Match(r'\s*{', endline[endpos:])
- and not (Match(r'\s*$', endline[endpos:])
+ if (not re.match(r'\s*{', endline[endpos:])
+ and not (re.match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
- and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
+ and re.match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
@@ -3827,11 +4435,11 @@ def CheckBraces(filename, clean_lines, linenum, error):
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
- if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
+ if not re.match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
- if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
+ if not re.match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
@@ -3842,7 +4450,7 @@ def CheckBraces(filename, clean_lines, linenum, error):
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
- if (if_match and Match(r'\s*else\b', next_line)
+ if (if_match and re.match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
@@ -3908,7 +4516,7 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
- match = Match(r'^(.*\)\s*)\{', line)
+ match = re.match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
@@ -3941,27 +4549,27 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
- macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
- func = Match(r'^(.*\])\s*$', line_prefix)
+ macro = re.search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
+ func = re.match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
- (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
- Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
- Search(r'\bdecltype$', line_prefix) or
- Search(r'\s+=\s*$', line_prefix)):
+ (func and not re.search(r'\boperator\s*\[\s*\]', func.group(1))) or
+ re.search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
+ re.search(r'\bdecltype$', line_prefix) or
+ re.search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
- Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
+ re.search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
- match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
+ match = re.match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
@@ -3972,14 +4580,14 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
- if prevline and Search(r'[;{}]\s*$', prevline):
- match = Match(r'^(\s*)\{', line)
+ if prevline and re.search(r'[;{}]\s*$', prevline):
+ match = re.match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
- if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
+ if endpos > -1 and re.match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
@@ -4016,7 +4624,7 @@ def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
- matched = Match(r'\s*(for|while|if)\s*\(', line)
+ matched = re.match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression.
(end_line, end_linenum, end_pos) = CloseExpression(
@@ -4025,7 +4633,7 @@ def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
- if end_pos >= 0 and Match(r';', end_line[end_pos:]):
+ if end_pos >= 0 and re.match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
@@ -4041,8 +4649,8 @@ def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
# Loop until EOF or find anything that's not whitespace or opening {.
- while not Search(r'^\s*\{', opening_line_fragment):
- if Search(r'^(?!\s*$)', opening_line_fragment):
+ while not re.search(r'^\s*\{', opening_line_fragment):
+ if re.search(r'^(?!\s*$)', opening_line_fragment):
# Conditional has no brackets.
return
opening_linenum += 1
@@ -4072,12 +4680,12 @@ def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
- body = list(opening_line[opening_pos+1:])
+ bodylist = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
- body.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
+ bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
- body.append(clean_lines.elided[closing_linenum][:closing_pos-1])
- body = '\n'.join(body)
+ bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
+ body = '\n'.join(bodylist)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
@@ -4089,8 +4697,8 @@ def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
# Loop until EOF or find anything that's not whitespace or else clause.
- while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
- if Search(r'^(?=\s*else)', current_line_fragment):
+ while re.search(r'^\s*$|^(?=\s*else)', current_line_fragment):
+ if re.search(r'^(?=\s*else)', current_line_fragment):
# Found an else clause, so don't log an error.
return
current_linenum += 1
@@ -4119,7 +4727,7 @@ def FindCheckMacro(line):
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
- matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
+ matched = re.match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
@@ -4151,14 +4759,14 @@ def CheckCheck(filename, clean_lines, linenum, error):
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
- if not Match(r'\s*;', last_line[end_pos:]):
+ if not re.match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
- for i in xrange(linenum + 1, end_line):
+ for i in range(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
@@ -4169,7 +4777,7 @@ def CheckCheck(filename, clean_lines, linenum, error):
rhs = ''
operator = None
while expression:
- matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
+ matched = re.match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
@@ -4203,9 +4811,9 @@ def CheckCheck(filename, clean_lines, linenum, error):
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
- matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
+ matched = re.match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
- matched = Match(r'^(\s*\S)(.*)$', expression)
+ matched = re.match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
@@ -4229,7 +4837,7 @@ def CheckCheck(filename, clean_lines, linenum, error):
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
- if Match(match_constant, lhs) or Match(match_constant, rhs):
+ if re.match(match_constant, lhs) or re.match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
@@ -4239,9 +4847,8 @@ def CheckCheck(filename, clean_lines, linenum, error):
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
- 'Consider using %s instead of %s(a %s b)' % (
- _CHECK_REPLACEMENT[check_macro][operator],
- check_macro, operator))
+ f'Consider using {_CHECK_REPLACEMENT[check_macro][operator]}'
+ f' instead of {check_macro}(a {operator} b)')
def CheckAltTokens(filename, clean_lines, linenum, error):
@@ -4256,7 +4863,7 @@ def CheckAltTokens(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
- if Match(r'^\s*#', line):
+ if re.match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
@@ -4272,8 +4879,8 @@ def CheckAltTokens(filename, clean_lines, linenum, error):
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
- 'Use operator %s instead of %s' % (
- _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
+ f'Use operator {_ALT_TOKEN_REPLACEMENT[match.group(2)]}'
+ f' instead of {match.group(2)}')
def GetLineWidth(line):
@@ -4286,7 +4893,7 @@ def GetLineWidth(line):
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
- if isinstance(line, unicode):
+ if isinstance(line, str):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
@@ -4301,7 +4908,7 @@ def GetLineWidth(line):
is_low_surrogate = 0xDC00 <= ord(uc) <= 0xDFFF
if not is_wide_build and is_low_surrogate:
width -= 1
-
+
width += 1
return width
else:
@@ -4349,7 +4956,7 @@ def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
- scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
+ scope_or_label_pattern = r'\s*(?:public|private|protected|signals)(?:\s+(?:slots\s*)?)?:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
@@ -4360,11 +4967,11 @@ def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
# We also don't check for lines that look like continuation lines
# (of lines ending in double quotes, commas, equals, or angle brackets)
# because the rules for how to indent those are non-trivial.
- if (not Search(r'[",=><] *$', prev) and
+ if (not re.search(r'[",=><] *$', prev) and
(initial_spaces == 1 or initial_spaces == 3) and
- not Match(scope_or_label_pattern, cleansed_line) and
+ not re.match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
- Match(r'^\s*""', line))):
+ re.match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
@@ -4377,9 +4984,9 @@ def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
is_header_guard = False
if IsHeaderExtension(file_extension):
cppvar = GetHeaderGuardCPPVariable(filename)
- if (line.startswith('#ifndef %s' % cppvar) or
- line.startswith('#define %s' % cppvar) or
- line.startswith('#endif // %s' % cppvar)):
+ if (line.startswith(f'#ifndef {cppvar}') or
+ line.startswith(f'#define {cppvar}') or
+ line.startswith(f'#endif // {cppvar}')):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
@@ -4389,16 +4996,23 @@ def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
+ #
+ # Doxygen documentation copying can get pretty long when using an overloaded
+ # function declaration
if (not line.startswith('#include') and not is_header_guard and
- not Match(r'^\s*//.*http(s?)://\S*$', line) and
- not Match(r'^\s*//\s*[^\s]*$', line) and
- not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
+ not re.match(r'^\s*//.*http(s?)://\S*$', line) and
+ not re.match(r'^\s*//\s*[^\s]*$', line) and
+ not re.match(r'^// \$Id:.*#[0-9]+ \$$', line) and
+ not re.match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
line_width = GetLineWidth(line)
if line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
- 'Lines should be <= %i characters long' % _line_length)
+ f'Lines should be <= {_line_length} characters long')
if (cleansed_line.count(';') > 1 and
+ # allow simple single line lambdas
+ not re.match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
+ line) and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
@@ -4455,21 +5069,25 @@ def _DropCommonSuffixes(filename):
Returns:
The filename with the common suffix removed.
"""
- for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
- 'inl.h', 'impl.h', 'internal.h'):
+ for suffix in itertools.chain(
+ (f"{test_suffix.lstrip('_')}.{ext}"
+ for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
+ (f'{suffix}.{ext}'
+ for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
-def _ClassifyInclude(fileinfo, include, is_system):
+def _ClassifyInclude(fileinfo, include, used_angle_brackets, include_order="default"):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
- is_system: True if the #include used <> rather than "".
+ used_angle_brackets: True if the #include used <> rather than "".
+ include_order: "default" or other value allowed in program arguments
Returns:
One of the _XXX_HEADER constants.
@@ -4479,6 +5097,8 @@ def _ClassifyInclude(fileinfo, include, is_system):
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
+ >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', True, "standardcfirst")
+ _OTHER_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
@@ -4489,13 +5109,24 @@ def _ClassifyInclude(fileinfo, include, is_system):
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
- is_cpp_h = include in _CPP_HEADERS
+ is_cpp_header = include in _CPP_HEADERS
+
+ # Mark include as C header if in list or in a known folder for standard-ish C headers.
+ is_std_c_header = (include_order == "default") or (include in _C_HEADERS
+ # additional linux glibc header folders
+ or re.search(rf'(?:{"|".join(C_STANDARD_HEADER_FOLDERS)})\/.*\.h', include))
+
+ # Headers with C++ extensions shouldn't be considered C system headers
+ include_ext = os.path.splitext(include)[1]
+ is_system = used_angle_brackets and include_ext not in ['.hh', '.hpp', '.hxx', '.h++']
if is_system:
- if is_cpp_h:
+ if is_cpp_header:
return _CPP_SYS_HEADER
- else:
+ if is_std_c_header:
return _C_SYS_HEADER
+ else:
+ return _OTHER_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
@@ -4503,9 +5134,11 @@ def _ClassifyInclude(fileinfo, include, is_system):
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
+ target_dir_pub = os.path.normpath(target_dir + '/../public')
+ target_dir_pub = target_dir_pub.replace('\\', '/')
if target_base == include_base and (
include_dir == target_dir or
- include_dir == os.path.normpath(target_dir + '/../public')):
+ include_dir == target_dir_pub):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
@@ -4547,10 +5180,12 @@ def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
- match = Match(r'#include\s*"([^/]+\.h)"', line)
- if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
- error(filename, linenum, 'build/include', 4,
- 'Include the directory when naming .h files')
+ match = re.match(r'#include\s*"([^/]+\.(.*))"', line)
+ if match:
+ if (IsHeaderExtension(match.group(2)) and
+ not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1))):
+ error(filename, linenum, 'build/include_subdir', 4,
+ 'Include the directory when naming header files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
@@ -4558,17 +5193,33 @@ def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
- is_system = (match.group(1) == '<')
+ used_angle_brackets = match.group(1) == '<'
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
- '"%s" already included at %s:%s' %
- (include, filename, duplicate_line))
- elif (include.endswith('.cc') and
+ f'"{include}" already included at {filename}:{duplicate_line}')
+ return
+
+ for extension in GetNonHeaderExtensions():
+ if (include.endswith('.' + extension) and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
- error(filename, linenum, 'build/include', 4,
- 'Do not include .cc files from other packages')
- elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
+ error(filename, linenum, 'build/include', 4,
+ 'Do not include .' + extension + ' files from other packages')
+ return
+
+ # We DO want to include a 3rd party looking header if it matches the
+ # filename. Otherwise we get an erroneous error "...should include its
+ # header" error later.
+ third_src_header = False
+ for ext in GetHeaderExtensions():
+ basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
+ headerfile = basefilename + '.' + ext
+ headername = FileInfo(headerfile).RepositoryName()
+ if headername in include or include in headername:
+ third_src_header = True
+ break
+
+ if third_src_header or not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
@@ -4583,16 +5234,16 @@ def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
- _ClassifyInclude(fileinfo, include, is_system))
+ _ClassifyInclude(fileinfo, include, used_angle_brackets, _include_order))
if error_message:
error(filename, linenum, 'build/include_order', 4,
- '%s. Should be: %s.h, c system, c++ system, other.' %
- (error_message, fileinfo.BaseName()))
+ f'{error_message}. Should be: {fileinfo.BaseName()}.h, c system,'
+ ' c++ system, other.')
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
- 'Include "%s" not in alphabetical order' % include)
+ f'Include "{include}" not in alphabetical order')
include_state.SetLastHeader(canonical_include)
@@ -4622,7 +5273,7 @@ def _GetTextInside(text, start_pattern):
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
- closing_punctuation = set(matching_punctuation.itervalues())
+ closing_punctuation = set(dict.values(matching_punctuation))
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
@@ -4712,12 +5363,10 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension,
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
- match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
+ match = re.match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
- # Make Windows paths like Unix.
- fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
@@ -4734,15 +5383,15 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension,
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
- if Search(r'\bshort port\b', line):
- if not Search(r'\bunsigned short port\b', line):
+ if re.search(r'\bshort port\b', line):
+ if not re.search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
- match = Search(r'\b(short|long(?! +double)|long long)\b', line)
+ match = re.search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
- 'Use int16/int64/etc, rather than the C type %s' % match.group(1))
+ f'Use int16/int64/etc, rather than the C type {match.group(1)}')
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
@@ -4750,13 +5399,13 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension,
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
- if Search(r'\boperator\s*&\s*\(\s*\)', line):
+ if re.search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
- if Search(r'\}\s*if\s*\(', line):
+ if re.search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
@@ -4769,28 +5418,32 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension,
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
- match = Match(r'([\w.\->()]+)$', printf_args)
+ match = re.match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
- 'Potential format string bug. Do %s("%%s", %s) instead.'
- % (function_name, match.group(1)))
+ 'Potential format string bug. Do'
+ f' {function_name}("%s", {match.group(1)}) instead.')
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
- match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
- if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
+ match = re.search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
+ if match and not re.match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
- 'Did you mean "memset(%s, 0, %s)"?'
- % (match.group(1), match.group(2)))
+ f'Did you mean "memset({match.group(1)}, 0, {match.group(2)})"?')
- if Search(r'\busing namespace\b', line):
- error(filename, linenum, 'build/namespaces', 5,
- 'Do not use namespace using-directives. '
- 'Use using-declarations instead.')
+ if re.search(r'\busing namespace\b', line):
+ if re.search(r'\bliterals\b', line):
+ error(filename, linenum, 'build/namespaces_literals', 5,
+ 'Do not use namespace using-directives. '
+ 'Use using-declarations instead.')
+ else:
+ error(filename, linenum, 'build/namespaces', 5,
+ 'Do not use namespace using-directives. '
+ 'Use using-declarations instead.')
# Detect variable-length arrays.
- match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
+ match = re.match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
@@ -4804,17 +5457,17 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension,
skip_next = False
continue
- if Search(r'sizeof\(.+\)', tok): continue
- if Search(r'arraysize\(\w+\)', tok): continue
+ if re.search(r'sizeof\(.+\)', tok): continue
+ if re.search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
- if Match(r'\d+', tok): continue
- if Match(r'0[xX][0-9a-fA-F]+', tok): continue
- if Match(r'k[A-Z0-9]\w*', tok): continue
- if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
- if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
+ if re.match(r'\d+', tok): continue
+ if re.match(r'0[xX][0-9a-fA-F]+', tok): continue
+ if re.match(r'k[A-Z0-9]\w*', tok): continue
+ if re.match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
+ if re.match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
@@ -4832,9 +5485,9 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension,
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (IsHeaderExtension(file_extension)
- and Search(r'\bnamespace\s*{', line)
+ and re.search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
- error(filename, linenum, 'build/namespaces', 4,
+ error(filename, linenum, 'build/namespaces_headers', 4,
'Do not use unnamed namespaces in header files. See '
'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
@@ -4852,7 +5505,7 @@ def CheckGlobalStatic(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
- if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
+ if linenum + 1 < clean_lines.NumLines() and not re.search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
@@ -4861,7 +5514,7 @@ def CheckGlobalStatic(filename, clean_lines, linenum, error):
# also because globals can be destroyed when some threads are still running.
# TODO(unknown): Generalize this to also find static unique_ptr instances.
# TODO(unknown): File bugs for clang-tidy to find these.
- match = Match(
+ match = re.match(
r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
r'([a-zA-Z0-9_:]+)\b(.*)',
line)
@@ -4883,20 +5536,19 @@ def CheckGlobalStatic(filename, clean_lines, linenum, error):
# matching identifiers.
# string Class::operator*()
if (match and
- not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
- not Search(r'\boperator\W', line) and
- not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
- if Search(r'\bconst\b', line):
+ not re.search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
+ not re.search(r'\boperator\W', line) and
+ not re.match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
+ if re.search(r'\bconst\b', line):
error(filename, linenum, 'runtime/string', 4,
- 'For a static/global string constant, use a C style string '
- 'instead: "%schar%s %s[]".' %
- (match.group(1), match.group(2) or '', match.group(3)))
+ 'For a static/global string constant, use a C style string instead:'
+ f' "{match.group(1)}char{match.group(2) or ""} {match.group(3)}[]".')
else:
error(filename, linenum, 'runtime/string', 4,
'Static/global string variables are not permitted.')
- if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
- Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
+ if (re.search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
+ re.search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
@@ -4913,21 +5565,21 @@ def CheckPrintf(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
- match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
+ match = re.search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
- error(filename, linenum, 'runtime/printf', 3,
- 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
- 'to snprintf.' % (match.group(1), match.group(2)))
+ error(filename, linenum, 'runtime/printf', 3, 'If you can, use'
+ f' sizeof({match.group(1)}) instead of {match.group(2)}'
+ ' as the 2nd arg to snprintf.')
# Check if some verboten C functions are being used.
- if Search(r'\bsprintf\s*\(', line):
+ if re.search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
- match = Search(r'\b(strcpy|strcat)\s*\(', line)
+ match = re.search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
- 'Almost always, snprintf is better than %s' % match.group(1))
+ f'Almost always, snprintf is better than {match.group(1)}')
def IsDerivedFunction(clean_lines, linenum):
@@ -4941,14 +5593,14 @@ def IsDerivedFunction(clean_lines, linenum):
virt-specifier.
"""
# Scan back a few lines for start of current function
- for i in xrange(linenum, max(-1, linenum - 10), -1):
- match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
+ for i in range(linenum, max(-1, linenum - 10), -1):
+ match = re.match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
- Search(r'\boverride\b', line[closing_paren:]))
+ re.search(r'\boverride\b', line[closing_paren:]))
return False
@@ -4962,9 +5614,9 @@ def IsOutOfLineMethodDefinition(clean_lines, linenum):
True if current line contains an out-of-line method definition.
"""
# Scan back a few lines for start of current function
- for i in xrange(linenum, max(-1, linenum - 10), -1):
- if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
- return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
+ for i in range(linenum, max(-1, linenum - 10), -1):
+ if re.match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
+ return re.match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
return False
@@ -4978,24 +5630,24 @@ def IsInitializerList(clean_lines, linenum):
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
- for i in xrange(linenum, 1, -1):
+ for i in range(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
- remove_function_body = Match(r'^(.*)\{\s*$', line)
+ remove_function_body = re.match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
- if Search(r'\s:\s*\w+[({]', line):
+ if re.search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
- if Search(r'\}\s*,\s*$', line):
+ if re.search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
- if Search(r'[{};]\s*$', line):
+ if re.search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
@@ -5059,13 +5711,13 @@ def CheckForNonConstReference(filename, clean_lines, linenum,
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
- if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
+ if re.match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
- previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
+ previous = re.search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
- elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
+ elif re.match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
- previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
+ previous = re.search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
@@ -5079,7 +5731,7 @@ def CheckForNonConstReference(filename, clean_lines, linenum,
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
- for i in xrange(startline, linenum + 1):
+ for i in range(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
@@ -5103,15 +5755,15 @@ def CheckForNonConstReference(filename, clean_lines, linenum,
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
- for i in xrange(linenum - 1, max(0, linenum - 10), -1):
+ for i in range(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
- if not Search(r'[),]\s*$', previous_line):
+ if not re.search(r'[),]\s*$', previous_line):
break
- if Match(r'^\s*:\s+\S', previous_line):
+ if re.match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
- if Search(r'\\\s*$', line):
+ if re.search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
@@ -5128,25 +5780,25 @@ def CheckForNonConstReference(filename, clean_lines, linenum,
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
- if Search(allowed_functions, line):
+ if re.search(allowed_functions, line):
return
- elif not Search(r'\S+\([^)]*$', line):
+ elif not re.search(r'\S+\([^)]*$', line):
# Don't see an allowed function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
- for i in xrange(2):
+ for i in range(2):
if (linenum > i and
- Search(allowed_functions, clean_lines.elided[linenum - i - 1])):
+ re.search(allowed_functions, clean_lines.elided[linenum - i - 1])):
return
- decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
+ decls = re.sub(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
- if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
- not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
+ if (not re.match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
+ not re.match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
- ReplaceAll(' *<', '<', parameter))
+ re.sub(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
@@ -5164,7 +5816,7 @@ def CheckCasts(filename, clean_lines, linenum, error):
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
- match = Search(
+ match = re.search(
r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
@@ -5188,7 +5840,7 @@ def CheckCasts(filename, clean_lines, linenum, error):
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
- if Match(r'\([^()]+\)\s*\[', match.group(3)):
+ if re.match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
@@ -5199,19 +5851,18 @@ def CheckCasts(filename, clean_lines, linenum, error):
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
- (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
+ (re.match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
- not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
- not Search(r'new\(\S+\)\s*' + matched_type, line)):
+ not re.match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
+ not re.search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
- 'Use static_cast<%s>(...) instead' %
- matched_type)
+ f'Use static_cast<{matched_type}>(...) instead')
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
- r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
+ r'\((int|float|double|bool|char|u?int(16|32|64)|size_t)\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
@@ -5236,7 +5887,7 @@ def CheckCasts(filename, clean_lines, linenum, error):
#
# This is not a cast:
# reference_type&(int* function_param);
- match = Search(
+ match = re.search(
r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match:
@@ -5244,7 +5895,7 @@ def CheckCasts(filename, clean_lines, linenum, error):
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
- match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
+ match = re.match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
@@ -5253,7 +5904,7 @@ def CheckCasts(filename, clean_lines, linenum, error):
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
- if Match(r'\s*(?:->|\[)', extended_line):
+ if re.match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
@@ -5285,38 +5936,38 @@ def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
False otherwise.
"""
line = clean_lines.elided[linenum]
- match = Search(pattern, line)
+ match = re.search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
- if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
+ if re.match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
- for i in xrange(linenum - 1, max(0, linenum - 5), -1):
+ for i in range(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
- if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
+ if re.match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
- if context.endswith(' operator++') or context.endswith(' operator--'):
+ if (context.endswith(' operator++') or context.endswith(' operator--') or
+ context.endswith('::operator++') or context.endswith('::operator--')):
return False
# A single unnamed argument for a function tends to look like old style cast.
# If we see those, don't issue warnings for deprecated casts.
remainder = line[match.end(0):]
- if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
+ if re.match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
remainder):
return False
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
- 'Using C-style cast. Use %s<%s>(...) instead' %
- (cast_type, match.group(1)))
+ f'Using C-style cast. Use {cast_type}<{match.group(1)}>(...) instead')
return True
@@ -5333,13 +5984,13 @@ def ExpectingFunctionArgs(clean_lines, linenum):
of function types.
"""
line = clean_lines.elided[linenum]
- return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
+ return (re.match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
- (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
+ (re.match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
- Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
+ re.match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
- Search(r'\bstd::m?function\s*\<\s*$',
+ re.search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
@@ -5364,11 +6015,11 @@ def ExpectingFunctionArgs(clean_lines, linenum):
)),
('', ('numeric_limits',)),
('', ('list',)),
- ('