cpplint.py
Go to the documentation of this file.
00001 #!/usr/bin/env python
00002 #
00003 # Copyright (c) 2009 Google Inc. All rights reserved.
00004 #
00005 # Redistribution and use in source and binary forms, with or without
00006 # modification, are permitted provided that the following conditions are
00007 # met:
00008 #
00009 #    * Redistributions of source code must retain the above copyright
00010 # notice, this list of conditions and the following disclaimer.
00011 #    * Redistributions in binary form must reproduce the above
00012 # copyright notice, this list of conditions and the following disclaimer
00013 # in the documentation and/or other materials provided with the
00014 # distribution.
00015 #    * Neither the name of Google Inc. nor the names of its
00016 # contributors may be used to endorse or promote products derived from
00017 # this software without specific prior written permission.
00018 #
00019 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00020 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00021 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
00022 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
00023 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00024 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00025 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
00026 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
00027 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00028 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
00029 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00030 
00031 """Does google-lint on c++ files.
00032 
00033 The goal of this script is to identify places in the code that *may*
00034 be in non-compliance with google style.  It does not attempt to fix
00035 up these problems -- the point is to educate.  It does also not
00036 attempt to find all problems, or to ensure that everything it does
00037 find is legitimately a problem.
00038 
00039 In particular, we can get very confused by /* and // inside strings!
00040 We do a small hack, which is to ignore //'s with "'s after them on the
00041 same line, but it is far from perfect (in either direction).
00042 """
00043 
00044 import codecs
00045 import copy
00046 import getopt
00047 import math  # for log
00048 import os
00049 import re
00050 import sre_compile
00051 import string
00052 import sys
00053 import unicodedata
00054 
00055 
00056 _USAGE = """
00057 Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
00058                    [--counting=total|toplevel|detailed] [--root=subdir]
00059                    [--linelength=digits]
00060         <file> [file] ...
00061 
00062   The style guidelines this tries to follow are those in
00063     http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
00064 
00065   Every problem is given a confidence score from 1-5, with 5 meaning we are
00066   certain of the problem, and 1 meaning it could be a legitimate construct.
00067   This will miss some errors, and is not a substitute for a code review.
00068 
00069   To suppress false-positive errors of a certain category, add a
00070   'NOLINT(category)' comment to the line.  NOLINT or NOLINT(*)
00071   suppresses errors of all categories on that line.
00072 
00073   The files passed in will be linted; at least one file must be provided.
00074   Default linted extensions are .cc, .cpp, .cu, .cuh and .h.  Change the
00075   extensions with the --extensions flag.
00076 
00077   Flags:
00078 
00079     output=vs7
00080       By default, the output is formatted to ease emacs parsing.  Visual Studio
00081       compatible output (vs7) may also be used.  Other formats are unsupported.
00082 
00083     verbose=#
00084       Specify a number 0-5 to restrict errors to certain verbosity levels.
00085 
00086     filter=-x,+y,...
00087       Specify a comma-separated list of category-filters to apply: only
00088       error messages whose category names pass the filters will be printed.
00089       (Category names are printed with the message and look like
00090       "[whitespace/indent]".)  Filters are evaluated left to right.
00091       "-FOO" and "FOO" means "do not print categories that start with FOO".
00092       "+FOO" means "do print categories that start with FOO".
00093 
00094       Examples: --filter=-whitespace,+whitespace/braces
00095                 --filter=whitespace,runtime/printf,+runtime/printf_format
00096                 --filter=-,+build/include_what_you_use
00097 
00098       To see a list of all the categories used in cpplint, pass no arg:
00099          --filter=
00100 
00101     counting=total|toplevel|detailed
00102       The total number of errors found is always printed. If
00103       'toplevel' is provided, then the count of errors in each of
00104       the top-level categories like 'build' and 'whitespace' will
00105       also be printed. If 'detailed' is provided, then a count
00106       is provided for each category like 'build/class'.
00107 
00108     root=subdir
00109       The root directory used for deriving header guard CPP variable.
00110       By default, the header guard CPP variable is calculated as the relative
00111       path to the directory that contains .git, .hg, or .svn.  When this flag
00112       is specified, the relative path is calculated from the specified
00113       directory. If the specified directory does not exist, this flag is
00114       ignored.
00115 
00116       Examples:
00117         Assuming that src/.git exists, the header guard CPP variables for
00118         src/chrome/browser/ui/browser.h are:
00119 
00120         No flag => CHROME_BROWSER_UI_BROWSER_H_
00121         --root=chrome => BROWSER_UI_BROWSER_H_
00122         --root=chrome/browser => UI_BROWSER_H_
00123 
00124     linelength=digits
00125       This is the allowed line length for the project. The default value is
00126       80 characters.
00127 
00128       Examples:
00129         --linelength=120
00130 
00131     extensions=extension,extension,...
00132       The allowed file extensions that cpplint will check
00133 
00134       Examples:
00135         --extensions=hpp,cpp
00136 
00137     cpplint.py supports per-directory configurations specified in CPPLINT.cfg
00138     files. CPPLINT.cfg file can contain a number of key=value pairs.
00139     Currently the following options are supported:
00140 
00141       set noparent
00142       filter=+filter1,-filter2,...
00143       exclude_files=regex
00144       linelength=80
00145 
00146     "set noparent" option prevents cpplint from traversing directory tree
00147     upwards looking for more .cfg files in parent directories. This option
00148     is usually placed in the top-level project directory.
00149 
00150     The "filter" option is similar in function to --filter flag. It specifies
00151     message filters in addition to the |_DEFAULT_FILTERS| and those specified
00152     through --filter command-line flag.
00153 
00154     "exclude_files" allows to specify a regular expression to be matched against
00155     a file name. If the expression matches, the file is skipped and not run
00156     through liner.
00157 
00158     "linelength" allows to specify the allowed line length for the project.
00159 
00160     CPPLINT.cfg has an effect on files in the same directory and all
00161     sub-directories, unless overridden by a nested configuration file.
00162 
00163       Example file:
00164         filter=-build/include_order,+build/include_alpha
00165         exclude_files=.*\.cc
00166 
00167     The above example disables build/include_order warning and enables
00168     build/include_alpha as well as excludes all .cc from being
00169     processed by linter, in the current directory (where the .cfg
00170     file is located) and all sub-directories.
00171 """
00172 
00173 # We categorize each error message we print.  Here are the categories.
00174 # We want an explicit list so we can list them all in cpplint --filter=.
00175 # If you add a new error message with a new category, add it to the list
00176 # here!  cpplint_unittest.py should tell you if you forget to do this.
00177 _ERROR_CATEGORIES = [
00178     'build/class',
00179     'build/c++11',
00180     'build/deprecated',
00181     'build/endif_comment',
00182     'build/explicit_make_pair',
00183     'build/forward_decl',
00184     'build/header_guard',
00185     'build/include',
00186     'build/include_alpha',
00187     'build/include_order',
00188     'build/include_what_you_use',
00189     'build/namespaces',
00190     'build/printf_format',
00191     'build/storage_class',
00192     'legal/copyright',
00193     'readability/alt_tokens',
00194     'readability/braces',
00195     'readability/casting',
00196     'readability/check',
00197     'readability/constructors',
00198     'readability/fn_size',
00199     'readability/function',
00200     'readability/inheritance',
00201     'readability/multiline_comment',
00202     'readability/multiline_string',
00203     'readability/namespace',
00204     'readability/nolint',
00205     'readability/nul',
00206     'readability/strings',
00207     'readability/todo',
00208     'readability/utf8',
00209     'runtime/arrays',
00210     'runtime/casting',
00211     'runtime/explicit',
00212     'runtime/int',
00213     'runtime/init',
00214     'runtime/invalid_increment',
00215     'runtime/member_string_references',
00216     'runtime/memset',
00217     'runtime/indentation_namespace',
00218     'runtime/operator',
00219     'runtime/printf',
00220     'runtime/printf_format',
00221     'runtime/references',
00222     'runtime/string',
00223     'runtime/threadsafe_fn',
00224     'runtime/vlog',
00225     'whitespace/blank_line',
00226     'whitespace/braces',
00227     'whitespace/comma',
00228     'whitespace/comments',
00229     'whitespace/empty_conditional_body',
00230     'whitespace/empty_loop_body',
00231     'whitespace/end_of_line',
00232     'whitespace/ending_newline',
00233     'whitespace/forcolon',
00234     'whitespace/indent',
00235     'whitespace/line_length',
00236     'whitespace/newline',
00237     'whitespace/operators',
00238     'whitespace/parens',
00239     'whitespace/semicolon',
00240     'whitespace/tab',
00241     'whitespace/todo',
00242     ]
00243 
00244 # These error categories are no longer enforced by cpplint, but for backwards-
00245 # compatibility they may still appear in NOLINT comments.
00246 _LEGACY_ERROR_CATEGORIES = [
00247     'readability/streams',
00248     ]
00249 
00250 # The default state of the category filter. This is overridden by the --filter=
00251 # flag. By default all errors are on, so only add here categories that should be
00252 # off by default (i.e., categories that must be enabled by the --filter= flags).
00253 # All entries here should start with a '-' or '+', as in the --filter= flag.
00254 _DEFAULT_FILTERS = ['-build/include_alpha']
00255 
00256 # We used to check for high-bit characters, but after much discussion we
00257 # decided those were OK, as long as they were in UTF-8 and didn't represent
00258 # hard-coded international strings, which belong in a separate i18n file.
00259 
00260 # C++ headers
00261 _CPP_HEADERS = frozenset([
00262     # Legacy
00263     'algobase.h',
00264     'algo.h',
00265     'alloc.h',
00266     'builtinbuf.h',
00267     'bvector.h',
00268     'complex.h',
00269     'defalloc.h',
00270     'deque.h',
00271     'editbuf.h',
00272     'fstream.h',
00273     'function.h',
00274     'hash_map',
00275     'hash_map.h',
00276     'hash_set',
00277     'hash_set.h',
00278     'hashtable.h',
00279     'heap.h',
00280     'indstream.h',
00281     'iomanip.h',
00282     'iostream.h',
00283     'istream.h',
00284     'iterator.h',
00285     'list.h',
00286     'map.h',
00287     'multimap.h',
00288     'multiset.h',
00289     'ostream.h',
00290     'pair.h',
00291     'parsestream.h',
00292     'pfstream.h',
00293     'procbuf.h',
00294     'pthread_alloc',
00295     'pthread_alloc.h',
00296     'rope',
00297     'rope.h',
00298     'ropeimpl.h',
00299     'set.h',
00300     'slist',
00301     'slist.h',
00302     'stack.h',
00303     'stdiostream.h',
00304     'stl_alloc.h',
00305     'stl_relops.h',
00306     'streambuf.h',
00307     'stream.h',
00308     'strfile.h',
00309     'strstream.h',
00310     'tempbuf.h',
00311     'tree.h',
00312     'type_traits.h',
00313     'vector.h',
00314     # 17.6.1.2 C++ library headers
00315     'algorithm',
00316     'array',
00317     'atomic',
00318     'bitset',
00319     'chrono',
00320     'codecvt',
00321     'complex',
00322     'condition_variable',
00323     'deque',
00324     'exception',
00325     'forward_list',
00326     'fstream',
00327     'functional',
00328     'future',
00329     'initializer_list',
00330     'iomanip',
00331     'ios',
00332     'iosfwd',
00333     'iostream',
00334     'istream',
00335     'iterator',
00336     'limits',
00337     'list',
00338     'locale',
00339     'map',
00340     'memory',
00341     'mutex',
00342     'new',
00343     'numeric',
00344     'ostream',
00345     'queue',
00346     'random',
00347     'ratio',
00348     'regex',
00349     'set',
00350     'sstream',
00351     'stack',
00352     'stdexcept',
00353     'streambuf',
00354     'string',
00355     'strstream',
00356     'system_error',
00357     'thread',
00358     'tuple',
00359     'typeindex',
00360     'typeinfo',
00361     'type_traits',
00362     'unordered_map',
00363     'unordered_set',
00364     'utility',
00365     'valarray',
00366     'vector',
00367     # 17.6.1.2 C++ headers for C library facilities
00368     'cassert',
00369     'ccomplex',
00370     'cctype',
00371     'cerrno',
00372     'cfenv',
00373     'cfloat',
00374     'cinttypes',
00375     'ciso646',
00376     'climits',
00377     'clocale',
00378     'cmath',
00379     'csetjmp',
00380     'csignal',
00381     'cstdalign',
00382     'cstdarg',
00383     'cstdbool',
00384     'cstddef',
00385     'cstdint',
00386     'cstdio',
00387     'cstdlib',
00388     'cstring',
00389     'ctgmath',
00390     'ctime',
00391     'cuchar',
00392     'cwchar',
00393     'cwctype',
00394     ])
00395 
00396 
00397 # These headers are excluded from [build/include] and [build/include_order]
00398 # checks:
00399 # - Anything not following google file name conventions (containing an
00400 #   uppercase character, such as Python.h or nsStringAPI.h, for example).
00401 # - Lua headers.
00402 _THIRD_PARTY_HEADERS_PATTERN = re.compile(
00403     r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
00404 
00405 
00406 # Assertion macros.  These are defined in base/logging.h and
00407 # testing/base/gunit.h.  Note that the _M versions need to come first
00408 # for substring matching to work.
00409 _CHECK_MACROS = [
00410     'DCHECK', 'CHECK',
00411     'EXPECT_TRUE_M', 'EXPECT_TRUE',
00412     'ASSERT_TRUE_M', 'ASSERT_TRUE',
00413     'EXPECT_FALSE_M', 'EXPECT_FALSE',
00414     'ASSERT_FALSE_M', 'ASSERT_FALSE',
00415     ]
00416 
00417 # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
00418 _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
00419 
00420 for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
00421                         ('>=', 'GE'), ('>', 'GT'),
00422                         ('<=', 'LE'), ('<', 'LT')]:
00423   _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
00424   _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
00425   _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
00426   _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
00427   _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
00428   _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
00429 
00430 for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
00431                             ('>=', 'LT'), ('>', 'LE'),
00432                             ('<=', 'GT'), ('<', 'GE')]:
00433   _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
00434   _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
00435   _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
00436   _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
00437 
00438 # Alternative tokens and their replacements.  For full list, see section 2.5
00439 # Alternative tokens [lex.digraph] in the C++ standard.
00440 #
00441 # Digraphs (such as '%:') are not included here since it's a mess to
00442 # match those on a word boundary.
00443 _ALT_TOKEN_REPLACEMENT = {
00444     'and': '&&',
00445     'bitor': '|',
00446     'or': '||',
00447     'xor': '^',
00448     'compl': '~',
00449     'bitand': '&',
00450     'and_eq': '&=',
00451     'or_eq': '|=',
00452     'xor_eq': '^=',
00453     'not': '!',
00454     'not_eq': '!='
00455     }
00456 
00457 # Compile regular expression that matches all the above keywords.  The "[ =()]"
00458 # bit is meant to avoid matching these keywords outside of boolean expressions.
00459 #
00460 # False positives include C-style multi-line comments and multi-line strings
00461 # but those have always been troublesome for cpplint.
00462 _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
00463     r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
00464 
00465 
00466 # These constants define types of headers for use with
00467 # _IncludeState.CheckNextIncludeOrder().
00468 _C_SYS_HEADER = 1
00469 _CPP_SYS_HEADER = 2
00470 _LIKELY_MY_HEADER = 3
00471 _POSSIBLE_MY_HEADER = 4
00472 _OTHER_HEADER = 5
00473 
00474 # These constants define the current inline assembly state
00475 _NO_ASM = 0       # Outside of inline assembly block
00476 _INSIDE_ASM = 1   # Inside inline assembly block
00477 _END_ASM = 2      # Last line of inline assembly block
00478 _BLOCK_ASM = 3    # The whole block is an inline assembly block
00479 
00480 # Match start of assembly blocks
00481 _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
00482                         r'(?:\s+(volatile|__volatile__))?'
00483                         r'\s*[{(]')
00484 
00485 
00486 _regexp_compile_cache = {}
00487 
00488 # {str, set(int)}: a map from error categories to sets of linenumbers
00489 # on which those errors are expected and should be suppressed.
00490 _error_suppressions = {}
00491 
00492 # The root directory used for deriving header guard CPP variable.
00493 # This is set by --root flag.
00494 _root = None
00495 
00496 # The allowed line length of files.
00497 # This is set by --linelength flag.
00498 _line_length = 80
00499 
00500 # The allowed extensions for file names
00501 # This is set by --extensions flag.
00502 _valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
00503 
00504 def ParseNolintSuppressions(filename, raw_line, linenum, error):
00505   """Updates the global list of error-suppressions.
00506 
00507   Parses any NOLINT comments on the current line, updating the global
00508   error_suppressions store.  Reports an error if the NOLINT comment
00509   was malformed.
00510 
00511   Args:
00512     filename: str, the name of the input file.
00513     raw_line: str, the line of input text, with comments.
00514     linenum: int, the number of the current line.
00515     error: function, an error handler.
00516   """
00517   matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
00518   if matched:
00519     if matched.group(1):
00520       suppressed_line = linenum + 1
00521     else:
00522       suppressed_line = linenum
00523     category = matched.group(2)
00524     if category in (None, '(*)'):  # => "suppress all"
00525       _error_suppressions.setdefault(None, set()).add(suppressed_line)
00526     else:
00527       if category.startswith('(') and category.endswith(')'):
00528         category = category[1:-1]
00529         if category in _ERROR_CATEGORIES:
00530           _error_suppressions.setdefault(category, set()).add(suppressed_line)
00531         elif category not in _LEGACY_ERROR_CATEGORIES:
00532           error(filename, linenum, 'readability/nolint', 5,
00533                 'Unknown NOLINT error category: %s' % category)
00534 
00535 
00536 def ResetNolintSuppressions():
00537   """Resets the set of NOLINT suppressions to empty."""
00538   _error_suppressions.clear()
00539 
00540 
00541 def IsErrorSuppressedByNolint(category, linenum):
00542   """Returns true if the specified error category is suppressed on this line.
00543 
00544   Consults the global error_suppressions map populated by
00545   ParseNolintSuppressions/ResetNolintSuppressions.
00546 
00547   Args:
00548     category: str, the category of the error.
00549     linenum: int, the current line number.
00550   Returns:
00551     bool, True iff the error should be suppressed due to a NOLINT comment.
00552   """
00553   return (linenum in _error_suppressions.get(category, set()) or
00554           linenum in _error_suppressions.get(None, set()))
00555 
00556 
00557 def Match(pattern, s):
00558   """Matches the string with the pattern, caching the compiled regexp."""
00559   # The regexp compilation caching is inlined in both Match and Search for
00560   # performance reasons; factoring it out into a separate function turns out
00561   # to be noticeably expensive.
00562   if pattern not in _regexp_compile_cache:
00563     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00564   return _regexp_compile_cache[pattern].match(s)
00565 
00566 
00567 def ReplaceAll(pattern, rep, s):
00568   """Replaces instances of pattern in a string with a replacement.
00569 
00570   The compiled regex is kept in a cache shared by Match and Search.
00571 
00572   Args:
00573     pattern: regex pattern
00574     rep: replacement text
00575     s: search string
00576 
00577   Returns:
00578     string with replacements made (or original string if no replacements)
00579   """
00580   if pattern not in _regexp_compile_cache:
00581     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00582   return _regexp_compile_cache[pattern].sub(rep, s)
00583 
00584 
00585 def Search(pattern, s):
00586   """Searches the string for the pattern, caching the compiled regexp."""
00587   if pattern not in _regexp_compile_cache:
00588     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00589   return _regexp_compile_cache[pattern].search(s)
00590 
00591 
00592 class _IncludeState(object):
00593   """Tracks line numbers for includes, and the order in which includes appear.
00594 
00595   include_list contains list of lists of (header, line number) pairs.
00596   It's a lists of lists rather than just one flat list to make it
00597   easier to update across preprocessor boundaries.
00598 
00599   Call CheckNextIncludeOrder() once for each header in the file, passing
00600   in the type constants defined above. Calls in an illegal order will
00601   raise an _IncludeError with an appropriate error message.
00602 
00603   """
00604   # self._section will move monotonically through this set. If it ever
00605   # needs to move backwards, CheckNextIncludeOrder will raise an error.
00606   _INITIAL_SECTION = 0
00607   _MY_H_SECTION = 1
00608   _C_SECTION = 2
00609   _CPP_SECTION = 3
00610   _OTHER_H_SECTION = 4
00611 
00612   _TYPE_NAMES = {
00613       _C_SYS_HEADER: 'C system header',
00614       _CPP_SYS_HEADER: 'C++ system header',
00615       _LIKELY_MY_HEADER: 'header this file implements',
00616       _POSSIBLE_MY_HEADER: 'header this file may implement',
00617       _OTHER_HEADER: 'other header',
00618       }
00619   _SECTION_NAMES = {
00620       _INITIAL_SECTION: "... nothing. (This can't be an error.)",
00621       _MY_H_SECTION: 'a header this file implements',
00622       _C_SECTION: 'C system header',
00623       _CPP_SECTION: 'C++ system header',
00624       _OTHER_H_SECTION: 'other header',
00625       }
00626 
00627   def __init__(self):
00628     self.include_list = [[]]
00629     self.ResetSection('')
00630 
00631   def FindHeader(self, header):
00632     """Check if a header has already been included.
00633 
00634     Args:
00635       header: header to check.
00636     Returns:
00637       Line number of previous occurrence, or -1 if the header has not
00638       been seen before.
00639     """
00640     for section_list in self.include_list:
00641       for f in section_list:
00642         if f[0] == header:
00643           return f[1]
00644     return -1
00645 
00646   def ResetSection(self, directive):
00647     """Reset section checking for preprocessor directive.
00648 
00649     Args:
00650       directive: preprocessor directive (e.g. "if", "else").
00651     """
00652     # The name of the current section.
00653     self._section = self._INITIAL_SECTION
00654     # The path of last found header.
00655     self._last_header = ''
00656 
00657     # Update list of includes.  Note that we never pop from the
00658     # include list.
00659     if directive in ('if', 'ifdef', 'ifndef'):
00660       self.include_list.append([])
00661     elif directive in ('else', 'elif'):
00662       self.include_list[-1] = []
00663 
00664   def SetLastHeader(self, header_path):
00665     self._last_header = header_path
00666 
00667   def CanonicalizeAlphabeticalOrder(self, header_path):
00668     """Returns a path canonicalized for alphabetical comparison.
00669 
00670     - replaces "-" with "_" so they both cmp the same.
00671     - removes '-inl' since we don't require them to be after the main header.
00672     - lowercase everything, just in case.
00673 
00674     Args:
00675       header_path: Path to be canonicalized.
00676 
00677     Returns:
00678       Canonicalized path.
00679     """
00680     return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
00681 
00682   def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
00683     """Check if a header is in alphabetical order with the previous header.
00684 
00685     Args:
00686       clean_lines: A CleansedLines instance containing the file.
00687       linenum: The number of the line to check.
00688       header_path: Canonicalized header to be checked.
00689 
00690     Returns:
00691       Returns true if the header is in alphabetical order.
00692     """
00693     # If previous section is different from current section, _last_header will
00694     # be reset to empty string, so it's always less than current header.
00695     #
00696     # If previous line was a blank line, assume that the headers are
00697     # intentionally sorted the way they are.
00698     if (self._last_header > header_path and
00699         Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
00700       return False
00701     return True
00702 
00703   def CheckNextIncludeOrder(self, header_type):
00704     """Returns a non-empty error message if the next header is out of order.
00705 
00706     This function also updates the internal state to be ready to check
00707     the next include.
00708 
00709     Args:
00710       header_type: One of the _XXX_HEADER constants defined above.
00711 
00712     Returns:
00713       The empty string if the header is in the right order, or an
00714       error message describing what's wrong.
00715 
00716     """
00717     error_message = ('Found %s after %s' %
00718                      (self._TYPE_NAMES[header_type],
00719                       self._SECTION_NAMES[self._section]))
00720 
00721     last_section = self._section
00722 
00723     if header_type == _C_SYS_HEADER:
00724       if self._section <= self._C_SECTION:
00725         self._section = self._C_SECTION
00726       else:
00727         self._last_header = ''
00728         return error_message
00729     elif header_type == _CPP_SYS_HEADER:
00730       if self._section <= self._CPP_SECTION:
00731         self._section = self._CPP_SECTION
00732       else:
00733         self._last_header = ''
00734         return error_message
00735     elif header_type == _LIKELY_MY_HEADER:
00736       if self._section <= self._MY_H_SECTION:
00737         self._section = self._MY_H_SECTION
00738       else:
00739         self._section = self._OTHER_H_SECTION
00740     elif header_type == _POSSIBLE_MY_HEADER:
00741       if self._section <= self._MY_H_SECTION:
00742         self._section = self._MY_H_SECTION
00743       else:
00744         # This will always be the fallback because we're not sure
00745         # enough that the header is associated with this file.
00746         self._section = self._OTHER_H_SECTION
00747     else:
00748       assert header_type == _OTHER_HEADER
00749       self._section = self._OTHER_H_SECTION
00750 
00751     if last_section != self._section:
00752       self._last_header = ''
00753 
00754     return ''
00755 
00756 
00757 class _CppLintState(object):
00758   """Maintains module-wide state.."""
00759 
00760   def __init__(self):
00761     self.verbose_level = 1  # global setting.
00762     self.error_count = 0    # global count of reported errors
00763     # filters to apply when emitting error messages
00764     self.filters = _DEFAULT_FILTERS[:]
00765     # backup of filter list. Used to restore the state after each file.
00766     self._filters_backup = self.filters[:]
00767     self.counting = 'total'  # In what way are we counting errors?
00768     self.errors_by_category = {}  # string to int dict storing error counts
00769 
00770     # output format:
00771     # "emacs" - format that emacs can parse (default)
00772     # "vs7" - format that Microsoft Visual Studio 7 can parse
00773     self.output_format = 'emacs'
00774 
00775   def SetOutputFormat(self, output_format):
00776     """Sets the output format for errors."""
00777     self.output_format = output_format
00778 
00779   def SetVerboseLevel(self, level):
00780     """Sets the module's verbosity, and returns the previous setting."""
00781     last_verbose_level = self.verbose_level
00782     self.verbose_level = level
00783     return last_verbose_level
00784 
00785   def SetCountingStyle(self, counting_style):
00786     """Sets the module's counting options."""
00787     self.counting = counting_style
00788 
00789   def SetFilters(self, filters):
00790     """Sets the error-message filters.
00791 
00792     These filters are applied when deciding whether to emit a given
00793     error message.
00794 
00795     Args:
00796       filters: A string of comma-separated filters (eg "+whitespace/indent").
00797                Each filter should start with + or -; else we die.
00798 
00799     Raises:
00800       ValueError: The comma-separated filters did not all start with '+' or '-'.
00801                   E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
00802     """
00803     # Default filters always have less priority than the flag ones.
00804     self.filters = _DEFAULT_FILTERS[:]
00805     self.AddFilters(filters)
00806 
00807   def AddFilters(self, filters):
00808     """ Adds more filters to the existing list of error-message filters. """
00809     for filt in filters.split(','):
00810       clean_filt = filt.strip()
00811       if clean_filt:
00812         self.filters.append(clean_filt)
00813     for filt in self.filters:
00814       if not (filt.startswith('+') or filt.startswith('-')):
00815         raise ValueError('Every filter in --filters must start with + or -'
00816                          ' (%s does not)' % filt)
00817 
00818   def BackupFilters(self):
00819     """ Saves the current filter list to backup storage."""
00820     self._filters_backup = self.filters[:]
00821 
00822   def RestoreFilters(self):
00823     """ Restores filters previously backed up."""
00824     self.filters = self._filters_backup[:]
00825 
00826   def ResetErrorCounts(self):
00827     """Sets the module's error statistic back to zero."""
00828     self.error_count = 0
00829     self.errors_by_category = {}
00830 
00831   def IncrementErrorCount(self, category):
00832     """Bumps the module's error statistic."""
00833     self.error_count += 1
00834     if self.counting in ('toplevel', 'detailed'):
00835       if self.counting != 'detailed':
00836         category = category.split('/')[0]
00837       if category not in self.errors_by_category:
00838         self.errors_by_category[category] = 0
00839       self.errors_by_category[category] += 1
00840 
00841   def PrintErrorCounts(self):
00842     """Print a summary of errors by category, and the total."""
00843     for category, count in self.errors_by_category.iteritems():
00844       sys.stderr.write('Category \'%s\' errors found: %d\n' %
00845                        (category, count))
00846     sys.stderr.write('Total errors found: %d\n' % self.error_count)
00847 
00848 _cpplint_state = _CppLintState()
00849 
00850 
00851 def _OutputFormat():
00852   """Gets the module's output format."""
00853   return _cpplint_state.output_format
00854 
00855 
00856 def _SetOutputFormat(output_format):
00857   """Sets the module's output format."""
00858   _cpplint_state.SetOutputFormat(output_format)
00859 
00860 
00861 def _VerboseLevel():
00862   """Returns the module's verbosity setting."""
00863   return _cpplint_state.verbose_level
00864 
00865 
00866 def _SetVerboseLevel(level):
00867   """Sets the module's verbosity, and returns the previous setting."""
00868   return _cpplint_state.SetVerboseLevel(level)
00869 
00870 
00871 def _SetCountingStyle(level):
00872   """Sets the module's counting options."""
00873   _cpplint_state.SetCountingStyle(level)
00874 
00875 
00876 def _Filters():
00877   """Returns the module's list of output filters, as a list."""
00878   return _cpplint_state.filters
00879 
00880 
00881 def _SetFilters(filters):
00882   """Sets the module's error-message filters.
00883 
00884   These filters are applied when deciding whether to emit a given
00885   error message.
00886 
00887   Args:
00888     filters: A string of comma-separated filters (eg "whitespace/indent").
00889              Each filter should start with + or -; else we die.
00890   """
00891   _cpplint_state.SetFilters(filters)
00892 
00893 def _AddFilters(filters):
00894   """Adds more filter overrides.
00895 
00896   Unlike _SetFilters, this function does not reset the current list of filters
00897   available.
00898 
00899   Args:
00900     filters: A string of comma-separated filters (eg "whitespace/indent").
00901              Each filter should start with + or -; else we die.
00902   """
00903   _cpplint_state.AddFilters(filters)
00904 
00905 def _BackupFilters():
00906   """ Saves the current filter list to backup storage."""
00907   _cpplint_state.BackupFilters()
00908 
00909 def _RestoreFilters():
00910   """ Restores filters previously backed up."""
00911   _cpplint_state.RestoreFilters()
00912 
00913 class _FunctionState(object):
00914   """Tracks current function name and the number of lines in its body."""
00915 
00916   _NORMAL_TRIGGER = 250  # for --v=0, 500 for --v=1, etc.
00917   _TEST_TRIGGER = 400    # about 50% more than _NORMAL_TRIGGER.
00918 
00919   def __init__(self):
00920     self.in_a_function = False
00921     self.lines_in_function = 0
00922     self.current_function = ''
00923 
00924   def Begin(self, function_name):
00925     """Start analyzing function body.
00926 
00927     Args:
00928       function_name: The name of the function being tracked.
00929     """
00930     self.in_a_function = True
00931     self.lines_in_function = 0
00932     self.current_function = function_name
00933 
00934   def Count(self):
00935     """Count line in current function body."""
00936     if self.in_a_function:
00937       self.lines_in_function += 1
00938 
00939   def Check(self, error, filename, linenum):
00940     """Report if too many lines in function body.
00941 
00942     Args:
00943       error: The function to call with any errors found.
00944       filename: The name of the current file.
00945       linenum: The number of the line to check.
00946     """
00947     if Match(r'T(EST|est)', self.current_function):
00948       base_trigger = self._TEST_TRIGGER
00949     else:
00950       base_trigger = self._NORMAL_TRIGGER
00951     trigger = base_trigger * 2**_VerboseLevel()
00952 
00953     if self.lines_in_function > trigger:
00954       error_level = int(math.log(self.lines_in_function / base_trigger, 2))
00955       # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
00956       if error_level > 5:
00957         error_level = 5
00958       error(filename, linenum, 'readability/fn_size', error_level,
00959             'Small and focused functions are preferred:'
00960             ' %s has %d non-comment lines'
00961             ' (error triggered by exceeding %d lines).'  % (
00962                 self.current_function, self.lines_in_function, trigger))
00963 
00964   def End(self):
00965     """Stop analyzing function body."""
00966     self.in_a_function = False
00967 
00968 
00969 class _IncludeError(Exception):
00970   """Indicates a problem with the include order in a file."""
00971   pass
00972 
00973 
00974 class FileInfo(object):
00975   """Provides utility functions for filenames.
00976 
00977   FileInfo provides easy access to the components of a file's path
00978   relative to the project root.
00979   """
00980 
00981   def __init__(self, filename):
00982     self._filename = filename
00983 
00984   def FullName(self):
00985     """Make Windows paths like Unix."""
00986     return os.path.abspath(self._filename).replace('\\', '/')
00987 
00988   def RepositoryName(self):
00989     """FullName after removing the local path to the repository.
00990 
00991     If we have a real absolute path name here we can try to do something smart:
00992     detecting the root of the checkout and truncating /path/to/checkout from
00993     the name so that we get header guards that don't include things like
00994     "C:\Documents and Settings\..." or "/home/username/..." in them and thus
00995     people on different computers who have checked the source out to different
00996     locations won't see bogus errors.
00997     """
00998     fullname = self.FullName()
00999 
01000     if os.path.exists(fullname):
01001       project_dir = os.path.dirname(fullname)
01002 
01003       if os.path.exists(os.path.join(project_dir, ".svn")):
01004         # If there's a .svn file in the current directory, we recursively look
01005         # up the directory tree for the top of the SVN checkout
01006         root_dir = project_dir
01007         one_up_dir = os.path.dirname(root_dir)
01008         while os.path.exists(os.path.join(one_up_dir, ".svn")):
01009           root_dir = os.path.dirname(root_dir)
01010           one_up_dir = os.path.dirname(one_up_dir)
01011 
01012         prefix = os.path.commonprefix([root_dir, project_dir])
01013         return fullname[len(prefix) + 1:]
01014 
01015       # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
01016       # searching up from the current path.
01017       root_dir = os.path.dirname(fullname)
01018       while (root_dir != os.path.dirname(root_dir) and
01019              not os.path.exists(os.path.join(root_dir, ".git")) and
01020              not os.path.exists(os.path.join(root_dir, ".hg")) and
01021              not os.path.exists(os.path.join(root_dir, ".svn"))):
01022         root_dir = os.path.dirname(root_dir)
01023 
01024       if (os.path.exists(os.path.join(root_dir, ".git")) or
01025           os.path.exists(os.path.join(root_dir, ".hg")) or
01026           os.path.exists(os.path.join(root_dir, ".svn"))):
01027         prefix = os.path.commonprefix([root_dir, project_dir])
01028         return fullname[len(prefix) + 1:]
01029 
01030     # Don't know what to do; header guard warnings may be wrong...
01031     return fullname
01032 
01033   def Split(self):
01034     """Splits the file into the directory, basename, and extension.
01035 
01036     For 'chrome/browser/browser.cc', Split() would
01037     return ('chrome/browser', 'browser', '.cc')
01038 
01039     Returns:
01040       A tuple of (directory, basename, extension).
01041     """
01042 
01043     googlename = self.RepositoryName()
01044     project, rest = os.path.split(googlename)
01045     return (project,) + os.path.splitext(rest)
01046 
01047   def BaseName(self):
01048     """File base name - text after the final slash, before the final period."""
01049     return self.Split()[1]
01050 
01051   def Extension(self):
01052     """File extension - text following the final period."""
01053     return self.Split()[2]
01054 
01055   def NoExtension(self):
01056     """File has no source file extension."""
01057     return '/'.join(self.Split()[0:2])
01058 
01059   def IsSource(self):
01060     """File has a source file extension."""
01061     return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
01062 
01063 
01064 def _ShouldPrintError(category, confidence, linenum):
01065   """If confidence >= verbose, category passes filter and is not suppressed."""
01066 
01067   # There are three ways we might decide not to print an error message:
01068   # a "NOLINT(category)" comment appears in the source,
01069   # the verbosity level isn't high enough, or the filters filter it out.
01070   if IsErrorSuppressedByNolint(category, linenum):
01071     return False
01072 
01073   if confidence < _cpplint_state.verbose_level:
01074     return False
01075 
01076   is_filtered = False
01077   for one_filter in _Filters():
01078     if one_filter.startswith('-'):
01079       if category.startswith(one_filter[1:]):
01080         is_filtered = True
01081     elif one_filter.startswith('+'):
01082       if category.startswith(one_filter[1:]):
01083         is_filtered = False
01084     else:
01085       assert False  # should have been checked for in SetFilter.
01086   if is_filtered:
01087     return False
01088 
01089   return True
01090 
01091 
01092 def Error(filename, linenum, category, confidence, message):
01093   """Logs the fact we've found a lint error.
01094 
01095   We log where the error was found, and also our confidence in the error,
01096   that is, how certain we are this is a legitimate style regression, and
01097   not a misidentification or a use that's sometimes justified.
01098 
01099   False positives can be suppressed by the use of
01100   "cpplint(category)"  comments on the offending line.  These are
01101   parsed into _error_suppressions.
01102 
01103   Args:
01104     filename: The name of the file containing the error.
01105     linenum: The number of the line containing the error.
01106     category: A string used to describe the "category" this bug
01107       falls under: "whitespace", say, or "runtime".  Categories
01108       may have a hierarchy separated by slashes: "whitespace/indent".
01109     confidence: A number from 1-5 representing a confidence score for
01110       the error, with 5 meaning that we are certain of the problem,
01111       and 1 meaning that it could be a legitimate construct.
01112     message: The error message.
01113   """
01114   if _ShouldPrintError(category, confidence, linenum):
01115     _cpplint_state.IncrementErrorCount(category)
01116     if _cpplint_state.output_format == 'vs7':
01117       sys.stderr.write('%s(%s):  %s  [%s] [%d]\n' % (
01118           filename, linenum, message, category, confidence))
01119     elif _cpplint_state.output_format == 'eclipse':
01120       sys.stderr.write('%s:%s: warning: %s  [%s] [%d]\n' % (
01121           filename, linenum, message, category, confidence))
01122     else:
01123       sys.stderr.write('%s:%s:  %s  [%s] [%d]\n' % (
01124           filename, linenum, message, category, confidence))
01125 
01126 
01127 # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
01128 _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
01129     r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
01130 # Match a single C style comment on the same line.
01131 _RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
01132 # Matches multi-line C style comments.
01133 # This RE is a little bit more complicated than one might expect, because we
01134 # have to take care of space removals tools so we can handle comments inside
01135 # statements better.
01136 # The current rule is: We only clear spaces from both sides when we're at the
01137 # end of the line. Otherwise, we try to remove spaces from the right side,
01138 # if this doesn't work we try on left side but only if there's a non-character
01139 # on the right.
01140 _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
01141     r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
01142     _RE_PATTERN_C_COMMENTS + r'\s+|' +
01143     r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
01144     _RE_PATTERN_C_COMMENTS + r')')
01145 
01146 
01147 def IsCppString(line):
01148   """Does line terminate so, that the next symbol is in string constant.
01149 
01150   This function does not consider single-line nor multi-line comments.
01151 
01152   Args:
01153     line: is a partial line of code starting from the 0..n.
01154 
01155   Returns:
01156     True, if next character appended to 'line' is inside a
01157     string constant.
01158   """
01159 
01160   line = line.replace(r'\\', 'XX')  # after this, \\" does not match to \"
01161   return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
01162 
01163 
01164 def CleanseRawStrings(raw_lines):
01165   """Removes C++11 raw strings from lines.
01166 
01167     Before:
01168       static const char kData[] = R"(
01169           multi-line string
01170           )";
01171 
01172     After:
01173       static const char kData[] = ""
01174           (replaced by blank line)
01175           "";
01176 
01177   Args:
01178     raw_lines: list of raw lines.
01179 
01180   Returns:
01181     list of lines with C++11 raw strings replaced by empty strings.
01182   """
01183 
01184   delimiter = None
01185   lines_without_raw_strings = []
01186   for line in raw_lines:
01187     if delimiter:
01188       # Inside a raw string, look for the end
01189       end = line.find(delimiter)
01190       if end >= 0:
01191         # Found the end of the string, match leading space for this
01192         # line and resume copying the original lines, and also insert
01193         # a "" on the last line.
01194         leading_space = Match(r'^(\s*)\S', line)
01195         line = leading_space.group(1) + '""' + line[end + len(delimiter):]
01196         delimiter = None
01197       else:
01198         # Haven't found the end yet, append a blank line.
01199         line = '""'
01200 
01201     # Look for beginning of a raw string, and replace them with
01202     # empty strings.  This is done in a loop to handle multiple raw
01203     # strings on the same line.
01204     while delimiter is None:
01205       # Look for beginning of a raw string.
01206       # See 2.14.15 [lex.string] for syntax.
01207       matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
01208       if matched:
01209         delimiter = ')' + matched.group(2) + '"'
01210 
01211         end = matched.group(3).find(delimiter)
01212         if end >= 0:
01213           # Raw string ended on same line
01214           line = (matched.group(1) + '""' +
01215                   matched.group(3)[end + len(delimiter):])
01216           delimiter = None
01217         else:
01218           # Start of a multi-line raw string
01219           line = matched.group(1) + '""'
01220       else:
01221         break
01222 
01223     lines_without_raw_strings.append(line)
01224 
01225   # TODO(unknown): if delimiter is not None here, we might want to
01226   # emit a warning for unterminated string.
01227   return lines_without_raw_strings
01228 
01229 
01230 def FindNextMultiLineCommentStart(lines, lineix):
01231   """Find the beginning marker for a multiline comment."""
01232   while lineix < len(lines):
01233     if lines[lineix].strip().startswith('/*'):
01234       # Only return this marker if the comment goes beyond this line
01235       if lines[lineix].strip().find('*/', 2) < 0:
01236         return lineix
01237     lineix += 1
01238   return len(lines)
01239 
01240 
01241 def FindNextMultiLineCommentEnd(lines, lineix):
01242   """We are inside a comment, find the end marker."""
01243   while lineix < len(lines):
01244     if lines[lineix].strip().endswith('*/'):
01245       return lineix
01246     lineix += 1
01247   return len(lines)
01248 
01249 
01250 def RemoveMultiLineCommentsFromRange(lines, begin, end):
01251   """Clears a range of lines for multi-line comments."""
01252   # Having // dummy comments makes the lines non-empty, so we will not get
01253   # unnecessary blank line warnings later in the code.
01254   for i in range(begin, end):
01255     lines[i] = '/**/'
01256 
01257 
01258 def RemoveMultiLineComments(filename, lines, error):
01259   """Removes multiline (c-style) comments from lines."""
01260   lineix = 0
01261   while lineix < len(lines):
01262     lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
01263     if lineix_begin >= len(lines):
01264       return
01265     lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
01266     if lineix_end >= len(lines):
01267       error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
01268             'Could not find end of multi-line comment')
01269       return
01270     RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
01271     lineix = lineix_end + 1
01272 
01273 
01274 def CleanseComments(line):
01275   """Removes //-comments and single-line C-style /* */ comments.
01276 
01277   Args:
01278     line: A line of C++ source.
01279 
01280   Returns:
01281     The line with single-line comments removed.
01282   """
01283   commentpos = line.find('//')
01284   if commentpos != -1 and not IsCppString(line[:commentpos]):
01285     line = line[:commentpos].rstrip()
01286   # get rid of /* ... */
01287   return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
01288 
01289 
01290 class CleansedLines(object):
01291   """Holds 4 copies of all lines with different preprocessing applied to them.
01292 
01293   1) elided member contains lines without strings and comments.
01294   2) lines member contains lines without comments.
01295   3) raw_lines member contains all the lines without processing.
01296   4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
01297      strings removed.
01298   All these members are of <type 'list'>, and of the same length.
01299   """
01300 
01301   def __init__(self, lines):
01302     self.elided = []
01303     self.lines = []
01304     self.raw_lines = lines
01305     self.num_lines = len(lines)
01306     self.lines_without_raw_strings = CleanseRawStrings(lines)
01307     for linenum in range(len(self.lines_without_raw_strings)):
01308       self.lines.append(CleanseComments(
01309           self.lines_without_raw_strings[linenum]))
01310       elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
01311       self.elided.append(CleanseComments(elided))
01312 
01313   def NumLines(self):
01314     """Returns the number of lines represented."""
01315     return self.num_lines
01316 
01317   @staticmethod
01318   def _CollapseStrings(elided):
01319     """Collapses strings and chars on a line to simple "" or '' blocks.
01320 
01321     We nix strings first so we're not fooled by text like '"http://"'
01322 
01323     Args:
01324       elided: The line being processed.
01325 
01326     Returns:
01327       The line with collapsed strings.
01328     """
01329     if _RE_PATTERN_INCLUDE.match(elided):
01330       return elided
01331 
01332     # Remove escaped characters first to make quote/single quote collapsing
01333     # basic.  Things that look like escaped characters shouldn't occur
01334     # outside of strings and chars.
01335     elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
01336 
01337     # Replace quoted strings and digit separators.  Both single quotes
01338     # and double quotes are processed in the same loop, otherwise
01339     # nested quotes wouldn't work.
01340     collapsed = ''
01341     while True:
01342       # Find the first quote character
01343       match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
01344       if not match:
01345         collapsed += elided
01346         break
01347       head, quote, tail = match.groups()
01348 
01349       if quote == '"':
01350         # Collapse double quoted strings
01351         second_quote = tail.find('"')
01352         if second_quote >= 0:
01353           collapsed += head + '""'
01354           elided = tail[second_quote + 1:]
01355         else:
01356           # Unmatched double quote, don't bother processing the rest
01357           # of the line since this is probably a multiline string.
01358           collapsed += elided
01359           break
01360       else:
01361         # Found single quote, check nearby text to eliminate digit separators.
01362         #
01363         # There is no special handling for floating point here, because
01364         # the integer/fractional/exponent parts would all be parsed
01365         # correctly as long as there are digits on both sides of the
01366         # separator.  So we are fine as long as we don't see something
01367         # like "0.'3" (gcc 4.9.0 will not allow this literal).
01368         if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
01369           match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
01370           collapsed += head + match_literal.group(1).replace("'", '')
01371           elided = match_literal.group(2)
01372         else:
01373           second_quote = tail.find('\'')
01374           if second_quote >= 0:
01375             collapsed += head + "''"
01376             elided = tail[second_quote + 1:]
01377           else:
01378             # Unmatched single quote
01379             collapsed += elided
01380             break
01381 
01382     return collapsed
01383 
01384 
01385 def FindEndOfExpressionInLine(line, startpos, stack):
01386   """Find the position just after the end of current parenthesized expression.
01387 
01388   Args:
01389     line: a CleansedLines line.
01390     startpos: start searching at this position.
01391     stack: nesting stack at startpos.
01392 
01393   Returns:
01394     On finding matching end: (index just after matching end, None)
01395     On finding an unclosed expression: (-1, None)
01396     Otherwise: (-1, new stack at end of this line)
01397   """
01398   for i in xrange(startpos, len(line)):
01399     char = line[i]
01400     if char in '([{':
01401       # Found start of parenthesized expression, push to expression stack
01402       stack.append(char)
01403     elif char == '<':
01404       # Found potential start of template argument list
01405       if i > 0 and line[i - 1] == '<':
01406         # Left shift operator
01407         if stack and stack[-1] == '<':
01408           stack.pop()
01409           if not stack:
01410             return (-1, None)
01411       elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
01412         # operator<, don't add to stack
01413         continue
01414       else:
01415         # Tentative start of template argument list
01416         stack.append('<')
01417     elif char in ')]}':
01418       # Found end of parenthesized expression.
01419       #
01420       # If we are currently expecting a matching '>', the pending '<'
01421       # must have been an operator.  Remove them from expression stack.
01422       while stack and stack[-1] == '<':
01423         stack.pop()
01424       if not stack:
01425         return (-1, None)
01426       if ((stack[-1] == '(' and char == ')') or
01427           (stack[-1] == '[' and char == ']') or
01428           (stack[-1] == '{' and char == '}')):
01429         stack.pop()
01430         if not stack:
01431           return (i + 1, None)
01432       else:
01433         # Mismatched parentheses
01434         return (-1, None)
01435     elif char == '>':
01436       # Found potential end of template argument list.
01437 
01438       # Ignore "->" and operator functions
01439       if (i > 0 and
01440           (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
01441         continue
01442 
01443       # Pop the stack if there is a matching '<'.  Otherwise, ignore
01444       # this '>' since it must be an operator.
01445       if stack:
01446         if stack[-1] == '<':
01447           stack.pop()
01448           if not stack:
01449             return (i + 1, None)
01450     elif char == ';':
01451       # Found something that look like end of statements.  If we are currently
01452       # expecting a '>', the matching '<' must have been an operator, since
01453       # template argument list should not contain statements.
01454       while stack and stack[-1] == '<':
01455         stack.pop()
01456       if not stack:
01457         return (-1, None)
01458 
01459   # Did not find end of expression or unbalanced parentheses on this line
01460   return (-1, stack)
01461 
01462 
01463 def CloseExpression(clean_lines, linenum, pos):
01464   """If input points to ( or { or [ or <, finds the position that closes it.
01465 
01466   If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
01467   linenum/pos that correspond to the closing of the expression.
01468 
01469   TODO(unknown): cpplint spends a fair bit of time matching parentheses.
01470   Ideally we would want to index all opening and closing parentheses once
01471   and have CloseExpression be just a simple lookup, but due to preprocessor
01472   tricks, this is not so easy.
01473 
01474   Args:
01475     clean_lines: A CleansedLines instance containing the file.
01476     linenum: The number of the line to check.
01477     pos: A position on the line.
01478 
01479   Returns:
01480     A tuple (line, linenum, pos) pointer *past* the closing brace, or
01481     (line, len(lines), -1) if we never find a close.  Note we ignore
01482     strings and comments when matching; and the line we return is the
01483     'cleansed' line at linenum.
01484   """
01485 
01486   line = clean_lines.elided[linenum]
01487   if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
01488     return (line, clean_lines.NumLines(), -1)
01489 
01490   # Check first line
01491   (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
01492   if end_pos > -1:
01493     return (line, linenum, end_pos)
01494 
01495   # Continue scanning forward
01496   while stack and linenum < clean_lines.NumLines() - 1:
01497     linenum += 1
01498     line = clean_lines.elided[linenum]
01499     (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
01500     if end_pos > -1:
01501       return (line, linenum, end_pos)
01502 
01503   # Did not find end of expression before end of file, give up
01504   return (line, clean_lines.NumLines(), -1)
01505 
01506 
01507 def FindStartOfExpressionInLine(line, endpos, stack):
01508   """Find position at the matching start of current expression.
01509 
01510   This is almost the reverse of FindEndOfExpressionInLine, but note
01511   that the input position and returned position differs by 1.
01512 
01513   Args:
01514     line: a CleansedLines line.
01515     endpos: start searching at this position.
01516     stack: nesting stack at endpos.
01517 
01518   Returns:
01519     On finding matching start: (index at matching start, None)
01520     On finding an unclosed expression: (-1, None)
01521     Otherwise: (-1, new stack at beginning of this line)
01522   """
01523   i = endpos
01524   while i >= 0:
01525     char = line[i]
01526     if char in ')]}':
01527       # Found end of expression, push to expression stack
01528       stack.append(char)
01529     elif char == '>':
01530       # Found potential end of template argument list.
01531       #
01532       # Ignore it if it's a "->" or ">=" or "operator>"
01533       if (i > 0 and
01534           (line[i - 1] == '-' or
01535            Match(r'\s>=\s', line[i - 1:]) or
01536            Search(r'\boperator\s*$', line[0:i]))):
01537         i -= 1
01538       else:
01539         stack.append('>')
01540     elif char == '<':
01541       # Found potential start of template argument list
01542       if i > 0 and line[i - 1] == '<':
01543         # Left shift operator
01544         i -= 1
01545       else:
01546         # If there is a matching '>', we can pop the expression stack.
01547         # Otherwise, ignore this '<' since it must be an operator.
01548         if stack and stack[-1] == '>':
01549           stack.pop()
01550           if not stack:
01551             return (i, None)
01552     elif char in '([{':
01553       # Found start of expression.
01554       #
01555       # If there are any unmatched '>' on the stack, they must be
01556       # operators.  Remove those.
01557       while stack and stack[-1] == '>':
01558         stack.pop()
01559       if not stack:
01560         return (-1, None)
01561       if ((char == '(' and stack[-1] == ')') or
01562           (char == '[' and stack[-1] == ']') or
01563           (char == '{' and stack[-1] == '}')):
01564         stack.pop()
01565         if not stack:
01566           return (i, None)
01567       else:
01568         # Mismatched parentheses
01569         return (-1, None)
01570     elif char == ';':
01571       # Found something that look like end of statements.  If we are currently
01572       # expecting a '<', the matching '>' must have been an operator, since
01573       # template argument list should not contain statements.
01574       while stack and stack[-1] == '>':
01575         stack.pop()
01576       if not stack:
01577         return (-1, None)
01578 
01579     i -= 1
01580 
01581   return (-1, stack)
01582 
01583 
01584 def ReverseCloseExpression(clean_lines, linenum, pos):
01585   """If input points to ) or } or ] or >, finds the position that opens it.
01586 
01587   If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
01588   linenum/pos that correspond to the opening of the expression.
01589 
01590   Args:
01591     clean_lines: A CleansedLines instance containing the file.
01592     linenum: The number of the line to check.
01593     pos: A position on the line.
01594 
01595   Returns:
01596     A tuple (line, linenum, pos) pointer *at* the opening brace, or
01597     (line, 0, -1) if we never find the matching opening brace.  Note
01598     we ignore strings and comments when matching; and the line we
01599     return is the 'cleansed' line at linenum.
01600   """
01601   line = clean_lines.elided[linenum]
01602   if line[pos] not in ')}]>':
01603     return (line, 0, -1)
01604 
01605   # Check last line
01606   (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
01607   if start_pos > -1:
01608     return (line, linenum, start_pos)
01609 
01610   # Continue scanning backward
01611   while stack and linenum > 0:
01612     linenum -= 1
01613     line = clean_lines.elided[linenum]
01614     (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
01615     if start_pos > -1:
01616       return (line, linenum, start_pos)
01617 
01618   # Did not find start of expression before beginning of file, give up
01619   return (line, 0, -1)
01620 
01621 
01622 def CheckForCopyright(filename, lines, error):
01623   """Logs an error if no Copyright message appears at the top of the file."""
01624 
01625   # We'll say it should occur by line 10. Don't forget there's a
01626   # dummy line at the front.
01627   for line in xrange(1, min(len(lines), 11)):
01628     if re.search(r'Copyright', lines[line], re.I): break
01629   else:                       # means no copyright line was found
01630     error(filename, 0, 'legal/copyright', 5,
01631           'No copyright message found.  '
01632           'You should have a line: "Copyright [year] <Copyright Owner>"')
01633 
01634 
01635 def GetIndentLevel(line):
01636   """Return the number of leading spaces in line.
01637 
01638   Args:
01639     line: A string to check.
01640 
01641   Returns:
01642     An integer count of leading spaces, possibly zero.
01643   """
01644   indent = Match(r'^( *)\S', line)
01645   if indent:
01646     return len(indent.group(1))
01647   else:
01648     return 0
01649 
01650 
01651 def GetHeaderGuardCPPVariable(filename):
01652   """Returns the CPP variable that should be used as a header guard.
01653 
01654   Args:
01655     filename: The name of a C++ header file.
01656 
01657   Returns:
01658     The CPP variable that should be used as a header guard in the
01659     named file.
01660 
01661   """
01662 
01663   # Restores original filename in case that cpplint is invoked from Emacs's
01664   # flymake.
01665   filename = re.sub(r'_flymake\.h$', '.h', filename)
01666   filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
01667   # Replace 'c++' with 'cpp'.
01668   filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
01669   
01670   fileinfo = FileInfo(filename)
01671   file_path_from_root = fileinfo.RepositoryName()
01672   if _root:
01673     file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
01674   return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
01675 
01676 
01677 def CheckForHeaderGuard(filename, clean_lines, error):
01678   """Checks that the file contains a header guard.
01679 
01680   Logs an error if no #ifndef header guard is present.  For other
01681   headers, checks that the full pathname is used.
01682 
01683   Args:
01684     filename: The name of the C++ header file.
01685     clean_lines: A CleansedLines instance containing the file.
01686     error: The function to call with any errors found.
01687   """
01688 
01689   # Don't check for header guards if there are error suppression
01690   # comments somewhere in this file.
01691   #
01692   # Because this is silencing a warning for a nonexistent line, we
01693   # only support the very specific NOLINT(build/header_guard) syntax,
01694   # and not the general NOLINT or NOLINT(*) syntax.
01695   raw_lines = clean_lines.lines_without_raw_strings
01696   for i in raw_lines:
01697     if Search(r'//\s*NOLINT\(build/header_guard\)', i):
01698       return
01699 
01700   cppvar = GetHeaderGuardCPPVariable(filename)
01701 
01702   ifndef = ''
01703   ifndef_linenum = 0
01704   define = ''
01705   endif = ''
01706   endif_linenum = 0
01707   for linenum, line in enumerate(raw_lines):
01708     linesplit = line.split()
01709     if len(linesplit) >= 2:
01710       # find the first occurrence of #ifndef and #define, save arg
01711       if not ifndef and linesplit[0] == '#ifndef':
01712         # set ifndef to the header guard presented on the #ifndef line.
01713         ifndef = linesplit[1]
01714         ifndef_linenum = linenum
01715       if not define and linesplit[0] == '#define':
01716         define = linesplit[1]
01717     # find the last occurrence of #endif, save entire line
01718     if line.startswith('#endif'):
01719       endif = line
01720       endif_linenum = linenum
01721 
01722   if not ifndef or not define or ifndef != define:
01723     error(filename, 0, 'build/header_guard', 5,
01724           'No #ifndef header guard found, suggested CPP variable is: %s' %
01725           cppvar)
01726     return
01727 
01728   # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
01729   # for backward compatibility.
01730   if ifndef != cppvar:
01731     error_level = 0
01732     if ifndef != cppvar + '_':
01733       error_level = 5
01734 
01735     ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
01736                             error)
01737     error(filename, ifndef_linenum, 'build/header_guard', error_level,
01738           '#ifndef header guard has wrong style, please use: %s' % cppvar)
01739 
01740   # Check for "//" comments on endif line.
01741   ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
01742                           error)
01743   match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
01744   if match:
01745     if match.group(1) == '_':
01746       # Issue low severity warning for deprecated double trailing underscore
01747       error(filename, endif_linenum, 'build/header_guard', 0,
01748             '#endif line should be "#endif  // %s"' % cppvar)
01749     return
01750 
01751   # Didn't find the corresponding "//" comment.  If this file does not
01752   # contain any "//" comments at all, it could be that the compiler
01753   # only wants "/**/" comments, look for those instead.
01754   no_single_line_comments = True
01755   for i in xrange(1, len(raw_lines) - 1):
01756     line = raw_lines[i]
01757     if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
01758       no_single_line_comments = False
01759       break
01760 
01761   if no_single_line_comments:
01762     match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
01763     if match:
01764       if match.group(1) == '_':
01765         # Low severity warning for double trailing underscore
01766         error(filename, endif_linenum, 'build/header_guard', 0,
01767               '#endif line should be "#endif  /* %s */"' % cppvar)
01768       return
01769 
01770   # Didn't find anything
01771   error(filename, endif_linenum, 'build/header_guard', 5,
01772         '#endif line should be "#endif  // %s"' % cppvar)
01773 
01774 
01775 def CheckHeaderFileIncluded(filename, include_state, error):
01776   """Logs an error if a .cc file does not include its header."""
01777 
01778   # Do not check test files
01779   if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'):
01780     return
01781 
01782   fileinfo = FileInfo(filename)
01783   headerfile = filename[0:len(filename) - 2] + 'h'
01784   if not os.path.exists(headerfile):
01785     return
01786   headername = FileInfo(headerfile).RepositoryName()
01787   first_include = 0
01788   for section_list in include_state.include_list:
01789     for f in section_list:
01790       if headername in f[0] or f[0] in headername:
01791         return
01792       if not first_include:
01793         first_include = f[1]
01794 
01795   error(filename, first_include, 'build/include', 5,
01796         '%s should include its header file %s' % (fileinfo.RepositoryName(),
01797                                                   headername))
01798 
01799 
01800 def CheckForBadCharacters(filename, lines, error):
01801   """Logs an error for each line containing bad characters.
01802 
01803   Two kinds of bad characters:
01804 
01805   1. Unicode replacement characters: These indicate that either the file
01806   contained invalid UTF-8 (likely) or Unicode replacement characters (which
01807   it shouldn't).  Note that it's possible for this to throw off line
01808   numbering if the invalid UTF-8 occurred adjacent to a newline.
01809 
01810   2. NUL bytes.  These are problematic for some tools.
01811 
01812   Args:
01813     filename: The name of the current file.
01814     lines: An array of strings, each representing a line of the file.
01815     error: The function to call with any errors found.
01816   """
01817   for linenum, line in enumerate(lines):
01818     if u'\ufffd' in line:
01819       error(filename, linenum, 'readability/utf8', 5,
01820             'Line contains invalid UTF-8 (or Unicode replacement character).')
01821     if '\0' in line:
01822       error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
01823 
01824 
01825 def CheckForNewlineAtEOF(filename, lines, error):
01826   """Logs an error if there is no newline char at the end of the file.
01827 
01828   Args:
01829     filename: The name of the current file.
01830     lines: An array of strings, each representing a line of the file.
01831     error: The function to call with any errors found.
01832   """
01833 
01834   # The array lines() was created by adding two newlines to the
01835   # original file (go figure), then splitting on \n.
01836   # To verify that the file ends in \n, we just have to make sure the
01837   # last-but-two element of lines() exists and is empty.
01838   if len(lines) < 3 or lines[-2]:
01839     error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
01840           'Could not find a newline character at the end of the file.')
01841 
01842 
01843 def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
01844   """Logs an error if we see /* ... */ or "..." that extend past one line.
01845 
01846   /* ... */ comments are legit inside macros, for one line.
01847   Otherwise, we prefer // comments, so it's ok to warn about the
01848   other.  Likewise, it's ok for strings to extend across multiple
01849   lines, as long as a line continuation character (backslash)
01850   terminates each line. Although not currently prohibited by the C++
01851   style guide, it's ugly and unnecessary. We don't do well with either
01852   in this lint program, so we warn about both.
01853 
01854   Args:
01855     filename: The name of the current file.
01856     clean_lines: A CleansedLines instance containing the file.
01857     linenum: The number of the line to check.
01858     error: The function to call with any errors found.
01859   """
01860   line = clean_lines.elided[linenum]
01861 
01862   # Remove all \\ (escaped backslashes) from the line. They are OK, and the
01863   # second (escaped) slash may trigger later \" detection erroneously.
01864   line = line.replace('\\\\', '')
01865 
01866   if line.count('/*') > line.count('*/'):
01867     error(filename, linenum, 'readability/multiline_comment', 5,
01868           'Complex multi-line /*...*/-style comment found. '
01869           'Lint may give bogus warnings.  '
01870           'Consider replacing these with //-style comments, '
01871           'with #if 0...#endif, '
01872           'or with more clearly structured multi-line comments.')
01873 
01874   if (line.count('"') - line.count('\\"')) % 2:
01875     error(filename, linenum, 'readability/multiline_string', 5,
01876           'Multi-line string ("...") found.  This lint script doesn\'t '
01877           'do well with such strings, and may give bogus warnings.  '
01878           'Use C++11 raw strings or concatenation instead.')
01879 
01880 
01881 # (non-threadsafe name, thread-safe alternative, validation pattern)
01882 #
01883 # The validation pattern is used to eliminate false positives such as:
01884 #  _rand();               // false positive due to substring match.
01885 #  ->rand();              // some member function rand().
01886 #  ACMRandom rand(seed);  // some variable named rand.
01887 #  ISAACRandom rand();    // another variable named rand.
01888 #
01889 # Basically we require the return value of these functions to be used
01890 # in some expression context on the same line by matching on some
01891 # operator before the function name.  This eliminates constructors and
01892 # member function calls.
01893 _UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
01894 _THREADING_LIST = (
01895     ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
01896     ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
01897     ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
01898     ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
01899     ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
01900     ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
01901     ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
01902     ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
01903     ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
01904     ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
01905     ('strtok(', 'strtok_r(',
01906      _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
01907     ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
01908     )
01909 
01910 
01911 def CheckPosixThreading(filename, clean_lines, linenum, error):
01912   """Checks for calls to thread-unsafe functions.
01913 
01914   Much code has been originally written without consideration of
01915   multi-threading. Also, engineers are relying on their old experience;
01916   they have learned posix before threading extensions were added. These
01917   tests guide the engineers to use thread-safe functions (when using
01918   posix directly).
01919 
01920   Args:
01921     filename: The name of the current file.
01922     clean_lines: A CleansedLines instance containing the file.
01923     linenum: The number of the line to check.
01924     error: The function to call with any errors found.
01925   """
01926   line = clean_lines.elided[linenum]
01927   for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
01928     # Additional pattern matching check to confirm that this is the
01929     # function we are looking for
01930     if Search(pattern, line):
01931       error(filename, linenum, 'runtime/threadsafe_fn', 2,
01932             'Consider using ' + multithread_safe_func +
01933             '...) instead of ' + single_thread_func +
01934             '...) for improved thread safety.')
01935 
01936 
01937 def CheckVlogArguments(filename, clean_lines, linenum, error):
01938   """Checks that VLOG() is only used for defining a logging level.
01939 
01940   For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
01941   VLOG(FATAL) are not.
01942 
01943   Args:
01944     filename: The name of the current file.
01945     clean_lines: A CleansedLines instance containing the file.
01946     linenum: The number of the line to check.
01947     error: The function to call with any errors found.
01948   """
01949   line = clean_lines.elided[linenum]
01950   if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
01951     error(filename, linenum, 'runtime/vlog', 5,
01952           'VLOG() should be used with numeric verbosity level.  '
01953           'Use LOG() if you want symbolic severity levels.')
01954 
01955 # Matches invalid increment: *count++, which moves pointer instead of
01956 # incrementing a value.
01957 _RE_PATTERN_INVALID_INCREMENT = re.compile(
01958     r'^\s*\*\w+(\+\+|--);')
01959 
01960 
01961 def CheckInvalidIncrement(filename, clean_lines, linenum, error):
01962   """Checks for invalid increment *count++.
01963 
01964   For example following function:
01965   void increment_counter(int* count) {
01966     *count++;
01967   }
01968   is invalid, because it effectively does count++, moving pointer, and should
01969   be replaced with ++*count, (*count)++ or *count += 1.
01970 
01971   Args:
01972     filename: The name of the current file.
01973     clean_lines: A CleansedLines instance containing the file.
01974     linenum: The number of the line to check.
01975     error: The function to call with any errors found.
01976   """
01977   line = clean_lines.elided[linenum]
01978   if _RE_PATTERN_INVALID_INCREMENT.match(line):
01979     error(filename, linenum, 'runtime/invalid_increment', 5,
01980           'Changing pointer instead of value (or unused value of operator*).')
01981 
01982 
01983 def IsMacroDefinition(clean_lines, linenum):
01984   if Search(r'^#define', clean_lines[linenum]):
01985     return True
01986 
01987   if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
01988     return True
01989 
01990   return False
01991 
01992 
01993 def IsForwardClassDeclaration(clean_lines, linenum):
01994   return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
01995 
01996 
01997 class _BlockInfo(object):
01998   """Stores information about a generic block of code."""
01999 
02000   def __init__(self, seen_open_brace):
02001     self.seen_open_brace = seen_open_brace
02002     self.open_parentheses = 0
02003     self.inline_asm = _NO_ASM
02004     self.check_namespace_indentation = False
02005 
02006   def CheckBegin(self, filename, clean_lines, linenum, error):
02007     """Run checks that applies to text up to the opening brace.
02008 
02009     This is mostly for checking the text after the class identifier
02010     and the "{", usually where the base class is specified.  For other
02011     blocks, there isn't much to check, so we always pass.
02012 
02013     Args:
02014       filename: The name of the current file.
02015       clean_lines: A CleansedLines instance containing the file.
02016       linenum: The number of the line to check.
02017       error: The function to call with any errors found.
02018     """
02019     pass
02020 
02021   def CheckEnd(self, filename, clean_lines, linenum, error):
02022     """Run checks that applies to text after the closing brace.
02023 
02024     This is mostly used for checking end of namespace comments.
02025 
02026     Args:
02027       filename: The name of the current file.
02028       clean_lines: A CleansedLines instance containing the file.
02029       linenum: The number of the line to check.
02030       error: The function to call with any errors found.
02031     """
02032     pass
02033 
02034   def IsBlockInfo(self):
02035     """Returns true if this block is a _BlockInfo.
02036 
02037     This is convenient for verifying that an object is an instance of
02038     a _BlockInfo, but not an instance of any of the derived classes.
02039 
02040     Returns:
02041       True for this class, False for derived classes.
02042     """
02043     return self.__class__ == _BlockInfo
02044 
02045 
02046 class _ExternCInfo(_BlockInfo):
02047   """Stores information about an 'extern "C"' block."""
02048 
02049   def __init__(self):
02050     _BlockInfo.__init__(self, True)
02051 
02052 
02053 class _ClassInfo(_BlockInfo):
02054   """Stores information about a class."""
02055 
02056   def __init__(self, name, class_or_struct, clean_lines, linenum):
02057     _BlockInfo.__init__(self, False)
02058     self.name = name
02059     self.starting_linenum = linenum
02060     self.is_derived = False
02061     self.check_namespace_indentation = True
02062     if class_or_struct == 'struct':
02063       self.access = 'public'
02064       self.is_struct = True
02065     else:
02066       self.access = 'private'
02067       self.is_struct = False
02068 
02069     # Remember initial indentation level for this class.  Using raw_lines here
02070     # instead of elided to account for leading comments.
02071     self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
02072 
02073     # Try to find the end of the class.  This will be confused by things like:
02074     #   class A {
02075     #   } *x = { ...
02076     #
02077     # But it's still good enough for CheckSectionSpacing.
02078     self.last_line = 0
02079     depth = 0
02080     for i in range(linenum, clean_lines.NumLines()):
02081       line = clean_lines.elided[i]
02082       depth += line.count('{') - line.count('}')
02083       if not depth:
02084         self.last_line = i
02085         break
02086 
02087   def CheckBegin(self, filename, clean_lines, linenum, error):
02088     # Look for a bare ':'
02089     if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
02090       self.is_derived = True
02091 
02092   def CheckEnd(self, filename, clean_lines, linenum, error):
02093     # If there is a DISALLOW macro, it should appear near the end of
02094     # the class.
02095     seen_last_thing_in_class = False
02096     for i in xrange(linenum - 1, self.starting_linenum, -1):
02097       match = Search(
02098           r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
02099           self.name + r'\)',
02100           clean_lines.elided[i])
02101       if match:
02102         if seen_last_thing_in_class:
02103           error(filename, i, 'readability/constructors', 3,
02104                 match.group(1) + ' should be the last thing in the class')
02105         break
02106 
02107       if not Match(r'^\s*$', clean_lines.elided[i]):
02108         seen_last_thing_in_class = True
02109 
02110     # Check that closing brace is aligned with beginning of the class.
02111     # Only do this if the closing brace is indented by only whitespaces.
02112     # This means we will not check single-line class definitions.
02113     indent = Match(r'^( *)\}', clean_lines.elided[linenum])
02114     if indent and len(indent.group(1)) != self.class_indent:
02115       if self.is_struct:
02116         parent = 'struct ' + self.name
02117       else:
02118         parent = 'class ' + self.name
02119       error(filename, linenum, 'whitespace/indent', 3,
02120             'Closing brace should be aligned with beginning of %s' % parent)
02121 
02122 
02123 class _NamespaceInfo(_BlockInfo):
02124   """Stores information about a namespace."""
02125 
02126   def __init__(self, name, linenum):
02127     _BlockInfo.__init__(self, False)
02128     self.name = name or ''
02129     self.starting_linenum = linenum
02130     self.check_namespace_indentation = True
02131 
02132   def CheckEnd(self, filename, clean_lines, linenum, error):
02133     """Check end of namespace comments."""
02134     line = clean_lines.raw_lines[linenum]
02135 
02136     # Check how many lines is enclosed in this namespace.  Don't issue
02137     # warning for missing namespace comments if there aren't enough
02138     # lines.  However, do apply checks if there is already an end of
02139     # namespace comment and it's incorrect.
02140     #
02141     # TODO(unknown): We always want to check end of namespace comments
02142     # if a namespace is large, but sometimes we also want to apply the
02143     # check if a short namespace contained nontrivial things (something
02144     # other than forward declarations).  There is currently no logic on
02145     # deciding what these nontrivial things are, so this check is
02146     # triggered by namespace size only, which works most of the time.
02147     if (linenum - self.starting_linenum < 10
02148         and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
02149       return
02150 
02151     # Look for matching comment at end of namespace.
02152     #
02153     # Note that we accept C style "/* */" comments for terminating
02154     # namespaces, so that code that terminate namespaces inside
02155     # preprocessor macros can be cpplint clean.
02156     #
02157     # We also accept stuff like "// end of namespace <name>." with the
02158     # period at the end.
02159     #
02160     # Besides these, we don't accept anything else, otherwise we might
02161     # get false negatives when existing comment is a substring of the
02162     # expected namespace.
02163     if self.name:
02164       # Named namespace
02165       if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
02166                     r'[\*/\.\\\s]*$'),
02167                    line):
02168         error(filename, linenum, 'readability/namespace', 5,
02169               'Namespace should be terminated with "// namespace %s"' %
02170               self.name)
02171     else:
02172       # Anonymous namespace
02173       if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
02174         # If "// namespace anonymous" or "// anonymous namespace (more text)",
02175         # mention "// anonymous namespace" as an acceptable form
02176         if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
02177           error(filename, linenum, 'readability/namespace', 5,
02178                 'Anonymous namespace should be terminated with "// namespace"'
02179                 ' or "// anonymous namespace"')
02180         else:
02181           error(filename, linenum, 'readability/namespace', 5,
02182                 'Anonymous namespace should be terminated with "// namespace"')
02183 
02184 
02185 class _PreprocessorInfo(object):
02186   """Stores checkpoints of nesting stacks when #if/#else is seen."""
02187 
02188   def __init__(self, stack_before_if):
02189     # The entire nesting stack before #if
02190     self.stack_before_if = stack_before_if
02191 
02192     # The entire nesting stack up to #else
02193     self.stack_before_else = []
02194 
02195     # Whether we have already seen #else or #elif
02196     self.seen_else = False
02197 
02198 
02199 class NestingState(object):
02200   """Holds states related to parsing braces."""
02201 
02202   def __init__(self):
02203     # Stack for tracking all braces.  An object is pushed whenever we
02204     # see a "{", and popped when we see a "}".  Only 3 types of
02205     # objects are possible:
02206     # - _ClassInfo: a class or struct.
02207     # - _NamespaceInfo: a namespace.
02208     # - _BlockInfo: some other type of block.
02209     self.stack = []
02210 
02211     # Top of the previous stack before each Update().
02212     #
02213     # Because the nesting_stack is updated at the end of each line, we
02214     # had to do some convoluted checks to find out what is the current
02215     # scope at the beginning of the line.  This check is simplified by
02216     # saving the previous top of nesting stack.
02217     #
02218     # We could save the full stack, but we only need the top.  Copying
02219     # the full nesting stack would slow down cpplint by ~10%.
02220     self.previous_stack_top = []
02221 
02222     # Stack of _PreprocessorInfo objects.
02223     self.pp_stack = []
02224 
02225   def SeenOpenBrace(self):
02226     """Check if we have seen the opening brace for the innermost block.
02227 
02228     Returns:
02229       True if we have seen the opening brace, False if the innermost
02230       block is still expecting an opening brace.
02231     """
02232     return (not self.stack) or self.stack[-1].seen_open_brace
02233 
02234   def InNamespaceBody(self):
02235     """Check if we are currently one level inside a namespace body.
02236 
02237     Returns:
02238       True if top of the stack is a namespace block, False otherwise.
02239     """
02240     return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
02241 
02242   def InExternC(self):
02243     """Check if we are currently one level inside an 'extern "C"' block.
02244 
02245     Returns:
02246       True if top of the stack is an extern block, False otherwise.
02247     """
02248     return self.stack and isinstance(self.stack[-1], _ExternCInfo)
02249 
02250   def InClassDeclaration(self):
02251     """Check if we are currently one level inside a class or struct declaration.
02252 
02253     Returns:
02254       True if top of the stack is a class/struct, False otherwise.
02255     """
02256     return self.stack and isinstance(self.stack[-1], _ClassInfo)
02257 
02258   def InAsmBlock(self):
02259     """Check if we are currently one level inside an inline ASM block.
02260 
02261     Returns:
02262       True if the top of the stack is a block containing inline ASM.
02263     """
02264     return self.stack and self.stack[-1].inline_asm != _NO_ASM
02265 
02266   def InTemplateArgumentList(self, clean_lines, linenum, pos):
02267     """Check if current position is inside template argument list.
02268 
02269     Args:
02270       clean_lines: A CleansedLines instance containing the file.
02271       linenum: The number of the line to check.
02272       pos: position just after the suspected template argument.
02273     Returns:
02274       True if (linenum, pos) is inside template arguments.
02275     """
02276     while linenum < clean_lines.NumLines():
02277       # Find the earliest character that might indicate a template argument
02278       line = clean_lines.elided[linenum]
02279       match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
02280       if not match:
02281         linenum += 1
02282         pos = 0
02283         continue
02284       token = match.group(1)
02285       pos += len(match.group(0))
02286 
02287       # These things do not look like template argument list:
02288       #   class Suspect {
02289       #   class Suspect x; }
02290       if token in ('{', '}', ';'): return False
02291 
02292       # These things look like template argument list:
02293       #   template <class Suspect>
02294       #   template <class Suspect = default_value>
02295       #   template <class Suspect[]>
02296       #   template <class Suspect...>
02297       if token in ('>', '=', '[', ']', '.'): return True
02298 
02299       # Check if token is an unmatched '<'.
02300       # If not, move on to the next character.
02301       if token != '<':
02302         pos += 1
02303         if pos >= len(line):
02304           linenum += 1
02305           pos = 0
02306         continue
02307 
02308       # We can't be sure if we just find a single '<', and need to
02309       # find the matching '>'.
02310       (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
02311       if end_pos < 0:
02312         # Not sure if template argument list or syntax error in file
02313         return False
02314       linenum = end_line
02315       pos = end_pos
02316     return False
02317 
02318   def UpdatePreprocessor(self, line):
02319     """Update preprocessor stack.
02320 
02321     We need to handle preprocessors due to classes like this:
02322       #ifdef SWIG
02323       struct ResultDetailsPageElementExtensionPoint {
02324       #else
02325       struct ResultDetailsPageElementExtensionPoint : public Extension {
02326       #endif
02327 
02328     We make the following assumptions (good enough for most files):
02329     - Preprocessor condition evaluates to true from #if up to first
02330       #else/#elif/#endif.
02331 
02332     - Preprocessor condition evaluates to false from #else/#elif up
02333       to #endif.  We still perform lint checks on these lines, but
02334       these do not affect nesting stack.
02335 
02336     Args:
02337       line: current line to check.
02338     """
02339     if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
02340       # Beginning of #if block, save the nesting stack here.  The saved
02341       # stack will allow us to restore the parsing state in the #else case.
02342       self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
02343     elif Match(r'^\s*#\s*(else|elif)\b', line):
02344       # Beginning of #else block
02345       if self.pp_stack:
02346         if not self.pp_stack[-1].seen_else:
02347           # This is the first #else or #elif block.  Remember the
02348           # whole nesting stack up to this point.  This is what we
02349           # keep after the #endif.
02350           self.pp_stack[-1].seen_else = True
02351           self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
02352 
02353         # Restore the stack to how it was before the #if
02354         self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
02355       else:
02356         # TODO(unknown): unexpected #else, issue warning?
02357         pass
02358     elif Match(r'^\s*#\s*endif\b', line):
02359       # End of #if or #else blocks.
02360       if self.pp_stack:
02361         # If we saw an #else, we will need to restore the nesting
02362         # stack to its former state before the #else, otherwise we
02363         # will just continue from where we left off.
02364         if self.pp_stack[-1].seen_else:
02365           # Here we can just use a shallow copy since we are the last
02366           # reference to it.
02367           self.stack = self.pp_stack[-1].stack_before_else
02368         # Drop the corresponding #if
02369         self.pp_stack.pop()
02370       else:
02371         # TODO(unknown): unexpected #endif, issue warning?
02372         pass
02373 
02374   # TODO(unknown): Update() is too long, but we will refactor later.
02375   def Update(self, filename, clean_lines, linenum, error):
02376     """Update nesting state with current line.
02377 
02378     Args:
02379       filename: The name of the current file.
02380       clean_lines: A CleansedLines instance containing the file.
02381       linenum: The number of the line to check.
02382       error: The function to call with any errors found.
02383     """
02384     line = clean_lines.elided[linenum]
02385 
02386     # Remember top of the previous nesting stack.
02387     #
02388     # The stack is always pushed/popped and not modified in place, so
02389     # we can just do a shallow copy instead of copy.deepcopy.  Using
02390     # deepcopy would slow down cpplint by ~28%.
02391     if self.stack:
02392       self.previous_stack_top = self.stack[-1]
02393     else:
02394       self.previous_stack_top = None
02395 
02396     # Update pp_stack
02397     self.UpdatePreprocessor(line)
02398 
02399     # Count parentheses.  This is to avoid adding struct arguments to
02400     # the nesting stack.
02401     if self.stack:
02402       inner_block = self.stack[-1]
02403       depth_change = line.count('(') - line.count(')')
02404       inner_block.open_parentheses += depth_change
02405 
02406       # Also check if we are starting or ending an inline assembly block.
02407       if inner_block.inline_asm in (_NO_ASM, _END_ASM):
02408         if (depth_change != 0 and
02409             inner_block.open_parentheses == 1 and
02410             _MATCH_ASM.match(line)):
02411           # Enter assembly block
02412           inner_block.inline_asm = _INSIDE_ASM
02413         else:
02414           # Not entering assembly block.  If previous line was _END_ASM,
02415           # we will now shift to _NO_ASM state.
02416           inner_block.inline_asm = _NO_ASM
02417       elif (inner_block.inline_asm == _INSIDE_ASM and
02418             inner_block.open_parentheses == 0):
02419         # Exit assembly block
02420         inner_block.inline_asm = _END_ASM
02421 
02422     # Consume namespace declaration at the beginning of the line.  Do
02423     # this in a loop so that we catch same line declarations like this:
02424     #   namespace proto2 { namespace bridge { class MessageSet; } }
02425     while True:
02426       # Match start of namespace.  The "\b\s*" below catches namespace
02427       # declarations even if it weren't followed by a whitespace, this
02428       # is so that we don't confuse our namespace checker.  The
02429       # missing spaces will be flagged by CheckSpacing.
02430       namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
02431       if not namespace_decl_match:
02432         break
02433 
02434       new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
02435       self.stack.append(new_namespace)
02436 
02437       line = namespace_decl_match.group(2)
02438       if line.find('{') != -1:
02439         new_namespace.seen_open_brace = True
02440         line = line[line.find('{') + 1:]
02441 
02442     # Look for a class declaration in whatever is left of the line
02443     # after parsing namespaces.  The regexp accounts for decorated classes
02444     # such as in:
02445     #   class LOCKABLE API Object {
02446     #   };
02447     class_decl_match = Match(
02448         r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
02449         r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
02450         r'(.*)$', line)
02451     if (class_decl_match and
02452         (not self.stack or self.stack[-1].open_parentheses == 0)):
02453       # We do not want to accept classes that are actually template arguments:
02454       #   template <class Ignore1,
02455       #             class Ignore2 = Default<Args>,
02456       #             template <Args> class Ignore3>
02457       #   void Function() {};
02458       #
02459       # To avoid template argument cases, we scan forward and look for
02460       # an unmatched '>'.  If we see one, assume we are inside a
02461       # template argument list.
02462       end_declaration = len(class_decl_match.group(1))
02463       if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
02464         self.stack.append(_ClassInfo(
02465             class_decl_match.group(3), class_decl_match.group(2),
02466             clean_lines, linenum))
02467         line = class_decl_match.group(4)
02468 
02469     # If we have not yet seen the opening brace for the innermost block,
02470     # run checks here.
02471     if not self.SeenOpenBrace():
02472       self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
02473 
02474     # Update access control if we are inside a class/struct
02475     if self.stack and isinstance(self.stack[-1], _ClassInfo):
02476       classinfo = self.stack[-1]
02477       access_match = Match(
02478           r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
02479           r':(?:[^:]|$)',
02480           line)
02481       if access_match:
02482         classinfo.access = access_match.group(2)
02483 
02484         # Check that access keywords are indented +1 space.  Skip this
02485         # check if the keywords are not preceded by whitespaces.
02486         indent = access_match.group(1)
02487         if (len(indent) != classinfo.class_indent + 1 and
02488             Match(r'^\s*$', indent)):
02489           if classinfo.is_struct:
02490             parent = 'struct ' + classinfo.name
02491           else:
02492             parent = 'class ' + classinfo.name
02493           slots = ''
02494           if access_match.group(3):
02495             slots = access_match.group(3)
02496           error(filename, linenum, 'whitespace/indent', 3,
02497                 '%s%s: should be indented +1 space inside %s' % (
02498                     access_match.group(2), slots, parent))
02499 
02500     # Consume braces or semicolons from what's left of the line
02501     while True:
02502       # Match first brace, semicolon, or closed parenthesis.
02503       matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
02504       if not matched:
02505         break
02506 
02507       token = matched.group(1)
02508       if token == '{':
02509         # If namespace or class hasn't seen a opening brace yet, mark
02510         # namespace/class head as complete.  Push a new block onto the
02511         # stack otherwise.
02512         if not self.SeenOpenBrace():
02513           self.stack[-1].seen_open_brace = True
02514         elif Match(r'^extern\s*"[^"]*"\s*\{', line):
02515           self.stack.append(_ExternCInfo())
02516         else:
02517           self.stack.append(_BlockInfo(True))
02518           if _MATCH_ASM.match(line):
02519             self.stack[-1].inline_asm = _BLOCK_ASM
02520 
02521       elif token == ';' or token == ')':
02522         # If we haven't seen an opening brace yet, but we already saw
02523         # a semicolon, this is probably a forward declaration.  Pop
02524         # the stack for these.
02525         #
02526         # Similarly, if we haven't seen an opening brace yet, but we
02527         # already saw a closing parenthesis, then these are probably
02528         # function arguments with extra "class" or "struct" keywords.
02529         # Also pop these stack for these.
02530         if not self.SeenOpenBrace():
02531           self.stack.pop()
02532       else:  # token == '}'
02533         # Perform end of block checks and pop the stack.
02534         if self.stack:
02535           self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
02536           self.stack.pop()
02537       line = matched.group(2)
02538 
02539   def InnermostClass(self):
02540     """Get class info on the top of the stack.
02541 
02542     Returns:
02543       A _ClassInfo object if we are inside a class, or None otherwise.
02544     """
02545     for i in range(len(self.stack), 0, -1):
02546       classinfo = self.stack[i - 1]
02547       if isinstance(classinfo, _ClassInfo):
02548         return classinfo
02549     return None
02550 
02551   def CheckCompletedBlocks(self, filename, error):
02552     """Checks that all classes and namespaces have been completely parsed.
02553 
02554     Call this when all lines in a file have been processed.
02555     Args:
02556       filename: The name of the current file.
02557       error: The function to call with any errors found.
02558     """
02559     # Note: This test can result in false positives if #ifdef constructs
02560     # get in the way of brace matching. See the testBuildClass test in
02561     # cpplint_unittest.py for an example of this.
02562     for obj in self.stack:
02563       if isinstance(obj, _ClassInfo):
02564         error(filename, obj.starting_linenum, 'build/class', 5,
02565               'Failed to find complete declaration of class %s' %
02566               obj.name)
02567       elif isinstance(obj, _NamespaceInfo):
02568         error(filename, obj.starting_linenum, 'build/namespaces', 5,
02569               'Failed to find complete declaration of namespace %s' %
02570               obj.name)
02571 
02572 
02573 def CheckForNonStandardConstructs(filename, clean_lines, linenum,
02574                                   nesting_state, error):
02575   r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
02576 
02577   Complain about several constructs which gcc-2 accepts, but which are
02578   not standard C++.  Warning about these in lint is one way to ease the
02579   transition to new compilers.
02580   - put storage class first (e.g. "static const" instead of "const static").
02581   - "%lld" instead of %qd" in printf-type functions.
02582   - "%1$d" is non-standard in printf-type functions.
02583   - "\%" is an undefined character escape sequence.
02584   - text after #endif is not allowed.
02585   - invalid inner-style forward declaration.
02586   - >? and <? operators, and their >?= and <?= cousins.
02587 
02588   Additionally, check for constructor/destructor style violations and reference
02589   members, as it is very convenient to do so while checking for
02590   gcc-2 compliance.
02591 
02592   Args:
02593     filename: The name of the current file.
02594     clean_lines: A CleansedLines instance containing the file.
02595     linenum: The number of the line to check.
02596     nesting_state: A NestingState instance which maintains information about
02597                    the current stack of nested blocks being parsed.
02598     error: A callable to which errors are reported, which takes 4 arguments:
02599            filename, line number, error level, and message
02600   """
02601 
02602   # Remove comments from the line, but leave in strings for now.
02603   line = clean_lines.lines[linenum]
02604 
02605   if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
02606     error(filename, linenum, 'runtime/printf_format', 3,
02607           '%q in format strings is deprecated.  Use %ll instead.')
02608 
02609   if Search(r'printf\s*\(.*".*%\d+\$', line):
02610     error(filename, linenum, 'runtime/printf_format', 2,
02611           '%N$ formats are unconventional.  Try rewriting to avoid them.')
02612 
02613   # Remove escaped backslashes before looking for undefined escapes.
02614   line = line.replace('\\\\', '')
02615 
02616   if Search(r'("|\').*\\(%|\[|\(|{)', line):
02617     error(filename, linenum, 'build/printf_format', 3,
02618           '%, [, (, and { are undefined character escapes.  Unescape them.')
02619 
02620   # For the rest, work with both comments and strings removed.
02621   line = clean_lines.elided[linenum]
02622 
02623   if Search(r'\b(const|volatile|void|char|short|int|long'
02624             r'|float|double|signed|unsigned'
02625             r'|schar|u?int8|u?int16|u?int32|u?int64)'
02626             r'\s+(register|static|extern|typedef)\b',
02627             line):
02628     error(filename, linenum, 'build/storage_class', 5,
02629           'Storage class (static, extern, typedef, etc) should be first.')
02630 
02631   if Match(r'\s*#\s*endif\s*[^/\s]+', line):
02632     error(filename, linenum, 'build/endif_comment', 5,
02633           'Uncommented text after #endif is non-standard.  Use a comment.')
02634 
02635   if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
02636     error(filename, linenum, 'build/forward_decl', 5,
02637           'Inner-style forward declarations are invalid.  Remove this line.')
02638 
02639   if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
02640             line):
02641     error(filename, linenum, 'build/deprecated', 3,
02642           '>? and <? (max and min) operators are non-standard and deprecated.')
02643 
02644   if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
02645     # TODO(unknown): Could it be expanded safely to arbitrary references,
02646     # without triggering too many false positives? The first
02647     # attempt triggered 5 warnings for mostly benign code in the regtest, hence
02648     # the restriction.
02649     # Here's the original regexp, for the reference:
02650     # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
02651     # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
02652     error(filename, linenum, 'runtime/member_string_references', 2,
02653           'const string& members are dangerous. It is much better to use '
02654           'alternatives, such as pointers or simple constants.')
02655 
02656   # Everything else in this function operates on class declarations.
02657   # Return early if the top of the nesting stack is not a class, or if
02658   # the class head is not completed yet.
02659   classinfo = nesting_state.InnermostClass()
02660   if not classinfo or not classinfo.seen_open_brace:
02661     return
02662 
02663   # The class may have been declared with namespace or classname qualifiers.
02664   # The constructor and destructor will not have those qualifiers.
02665   base_classname = classinfo.name.split('::')[-1]
02666 
02667   # Look for single-argument constructors that aren't marked explicit.
02668   # Technically a valid construct, but against style. Also look for
02669   # non-single-argument constructors which are also technically valid, but
02670   # strongly suggest something is wrong.
02671   explicit_constructor_match = Match(
02672       r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
02673       r'\(((?:[^()]|\([^()]*\))*)\)'
02674       % re.escape(base_classname),
02675       line)
02676 
02677   if explicit_constructor_match:
02678     is_marked_explicit = explicit_constructor_match.group(1)
02679 
02680     if not explicit_constructor_match.group(2):
02681       constructor_args = []
02682     else:
02683       constructor_args = explicit_constructor_match.group(2).split(',')
02684 
02685     # collapse arguments so that commas in template parameter lists and function
02686     # argument parameter lists don't split arguments in two
02687     i = 0
02688     while i < len(constructor_args):
02689       constructor_arg = constructor_args[i]
02690       while (constructor_arg.count('<') > constructor_arg.count('>') or
02691              constructor_arg.count('(') > constructor_arg.count(')')):
02692         constructor_arg += ',' + constructor_args[i + 1]
02693         del constructor_args[i + 1]
02694       constructor_args[i] = constructor_arg
02695       i += 1
02696 
02697     defaulted_args = [arg for arg in constructor_args if '=' in arg]
02698     noarg_constructor = (not constructor_args or  # empty arg list
02699                          # 'void' arg specifier
02700                          (len(constructor_args) == 1 and
02701                           constructor_args[0].strip() == 'void'))
02702     onearg_constructor = ((len(constructor_args) == 1 and  # exactly one arg
02703                            not noarg_constructor) or
02704                           # all but at most one arg defaulted
02705                           (len(constructor_args) >= 1 and
02706                            not noarg_constructor and
02707                            len(defaulted_args) >= len(constructor_args) - 1))
02708     initializer_list_constructor = bool(
02709         onearg_constructor and
02710         Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
02711     copy_constructor = bool(
02712         onearg_constructor and
02713         Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
02714               % re.escape(base_classname), constructor_args[0].strip()))
02715 
02716     if (not is_marked_explicit and
02717         onearg_constructor and
02718         not initializer_list_constructor and
02719         not copy_constructor):
02720       if defaulted_args:
02721         error(filename, linenum, 'runtime/explicit', 5,
02722               'Constructors callable with one argument '
02723               'should be marked explicit.')
02724       else:
02725         error(filename, linenum, 'runtime/explicit', 5,
02726               'Single-parameter constructors should be marked explicit.')
02727     elif is_marked_explicit and not onearg_constructor:
02728       if noarg_constructor:
02729         error(filename, linenum, 'runtime/explicit', 5,
02730               'Zero-parameter constructors should not be marked explicit.')
02731       else:
02732         error(filename, linenum, 'runtime/explicit', 0,
02733               'Constructors that require multiple arguments '
02734               'should not be marked explicit.')
02735 
02736 
02737 def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
02738   """Checks for the correctness of various spacing around function calls.
02739 
02740   Args:
02741     filename: The name of the current file.
02742     clean_lines: A CleansedLines instance containing the file.
02743     linenum: The number of the line to check.
02744     error: The function to call with any errors found.
02745   """
02746   line = clean_lines.elided[linenum]
02747 
02748   # Since function calls often occur inside if/for/while/switch
02749   # expressions - which have their own, more liberal conventions - we
02750   # first see if we should be looking inside such an expression for a
02751   # function call, to which we can apply more strict standards.
02752   fncall = line    # if there's no control flow construct, look at whole line
02753   for pattern in (r'\bif\s*\((.*)\)\s*{',
02754                   r'\bfor\s*\((.*)\)\s*{',
02755                   r'\bwhile\s*\((.*)\)\s*[{;]',
02756                   r'\bswitch\s*\((.*)\)\s*{'):
02757     match = Search(pattern, line)
02758     if match:
02759       fncall = match.group(1)    # look inside the parens for function calls
02760       break
02761 
02762   # Except in if/for/while/switch, there should never be space
02763   # immediately inside parens (eg "f( 3, 4 )").  We make an exception
02764   # for nested parens ( (a+b) + c ).  Likewise, there should never be
02765   # a space before a ( when it's a function argument.  I assume it's a
02766   # function argument when the char before the whitespace is legal in
02767   # a function name (alnum + _) and we're not starting a macro. Also ignore
02768   # pointers and references to arrays and functions coz they're too tricky:
02769   # we use a very simple way to recognize these:
02770   # " (something)(maybe-something)" or
02771   # " (something)(maybe-something," or
02772   # " (something)[something]"
02773   # Note that we assume the contents of [] to be short enough that
02774   # they'll never need to wrap.
02775   if (  # Ignore control structures.
02776       not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
02777                  fncall) and
02778       # Ignore pointers/references to functions.
02779       not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
02780       # Ignore pointers/references to arrays.
02781       not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
02782     if Search(r'\w\s*\(\s(?!\s*\\$)', fncall):      # a ( used for a fn call
02783       error(filename, linenum, 'whitespace/parens', 4,
02784             'Extra space after ( in function call')
02785     elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
02786       error(filename, linenum, 'whitespace/parens', 2,
02787             'Extra space after (')
02788     if (Search(r'\w\s+\(', fncall) and
02789         not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
02790         not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
02791         not Search(r'\bcase\s+\(', fncall)):
02792       # TODO(unknown): Space after an operator function seem to be a common
02793       # error, silence those for now by restricting them to highest verbosity.
02794       if Search(r'\boperator_*\b', line):
02795         error(filename, linenum, 'whitespace/parens', 0,
02796               'Extra space before ( in function call')
02797       else:
02798         error(filename, linenum, 'whitespace/parens', 4,
02799               'Extra space before ( in function call')
02800     # If the ) is followed only by a newline or a { + newline, assume it's
02801     # part of a control statement (if/while/etc), and don't complain
02802     if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
02803       # If the closing parenthesis is preceded by only whitespaces,
02804       # try to give a more descriptive error message.
02805       if Search(r'^\s+\)', fncall):
02806         error(filename, linenum, 'whitespace/parens', 2,
02807               'Closing ) should be moved to the previous line')
02808       else:
02809         error(filename, linenum, 'whitespace/parens', 2,
02810               'Extra space before )')
02811 
02812 
02813 def IsBlankLine(line):
02814   """Returns true if the given line is blank.
02815 
02816   We consider a line to be blank if the line is empty or consists of
02817   only white spaces.
02818 
02819   Args:
02820     line: A line of a string.
02821 
02822   Returns:
02823     True, if the given line is blank.
02824   """
02825   return not line or line.isspace()
02826 
02827 
02828 def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
02829                                  error):
02830   is_namespace_indent_item = (
02831       len(nesting_state.stack) > 1 and
02832       nesting_state.stack[-1].check_namespace_indentation and
02833       isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
02834       nesting_state.previous_stack_top == nesting_state.stack[-2])
02835 
02836   if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
02837                                      clean_lines.elided, line):
02838     CheckItemIndentationInNamespace(filename, clean_lines.elided,
02839                                     line, error)
02840 
02841 
02842 def CheckForFunctionLengths(filename, clean_lines, linenum,
02843                             function_state, error):
02844   """Reports for long function bodies.
02845 
02846   For an overview why this is done, see:
02847   http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
02848 
02849   Uses a simplistic algorithm assuming other style guidelines
02850   (especially spacing) are followed.
02851   Only checks unindented functions, so class members are unchecked.
02852   Trivial bodies are unchecked, so constructors with huge initializer lists
02853   may be missed.
02854   Blank/comment lines are not counted so as to avoid encouraging the removal
02855   of vertical space and comments just to get through a lint check.
02856   NOLINT *on the last line of a function* disables this check.
02857 
02858   Args:
02859     filename: The name of the current file.
02860     clean_lines: A CleansedLines instance containing the file.
02861     linenum: The number of the line to check.
02862     function_state: Current function name and lines in body so far.
02863     error: The function to call with any errors found.
02864   """
02865   lines = clean_lines.lines
02866   line = lines[linenum]
02867   joined_line = ''
02868 
02869   starting_func = False
02870   regexp = r'(\w(\w|::|\*|\&|\s)*)\('  # decls * & space::name( ...
02871   match_result = Match(regexp, line)
02872   if match_result:
02873     # If the name is all caps and underscores, figure it's a macro and
02874     # ignore it, unless it's TEST or TEST_F.
02875     function_name = match_result.group(1).split()[-1]
02876     if function_name == 'TEST' or function_name == 'TEST_F' or (
02877         not Match(r'[A-Z_]+$', function_name)):
02878       starting_func = True
02879 
02880   if starting_func:
02881     body_found = False
02882     for start_linenum in xrange(linenum, clean_lines.NumLines()):
02883       start_line = lines[start_linenum]
02884       joined_line += ' ' + start_line.lstrip()
02885       if Search(r'(;|})', start_line):  # Declarations and trivial functions
02886         body_found = True
02887         break                              # ... ignore
02888       elif Search(r'{', start_line):
02889         body_found = True
02890         function = Search(r'((\w|:)*)\(', line).group(1)
02891         if Match(r'TEST', function):    # Handle TEST... macros
02892           parameter_regexp = Search(r'(\(.*\))', joined_line)
02893           if parameter_regexp:             # Ignore bad syntax
02894             function += parameter_regexp.group(1)
02895         else:
02896           function += '()'
02897         function_state.Begin(function)
02898         break
02899     if not body_found:
02900       # No body for the function (or evidence of a non-function) was found.
02901       error(filename, linenum, 'readability/fn_size', 5,
02902             'Lint failed to find start of function body.')
02903   elif Match(r'^\}\s*$', line):  # function end
02904     function_state.Check(error, filename, linenum)
02905     function_state.End()
02906   elif not Match(r'^\s*$', line):
02907     function_state.Count()  # Count non-blank/non-comment lines.
02908 
02909 
02910 _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
02911 
02912 
02913 def CheckComment(line, filename, linenum, next_line_start, error):
02914   """Checks for common mistakes in comments.
02915 
02916   Args:
02917     line: The line in question.
02918     filename: The name of the current file.
02919     linenum: The number of the line to check.
02920     next_line_start: The first non-whitespace column of the next line.
02921     error: The function to call with any errors found.
02922   """
02923   commentpos = line.find('//')
02924   if commentpos != -1:
02925     # Check if the // may be in quotes.  If so, ignore it
02926     # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
02927     if (line.count('"', 0, commentpos) -
02928         line.count('\\"', 0, commentpos)) % 2 == 0:   # not in quotes
02929       # Allow one space for new scopes, two spaces otherwise:
02930       if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
02931           ((commentpos >= 1 and
02932             line[commentpos-1] not in string.whitespace) or
02933            (commentpos >= 2 and
02934             line[commentpos-2] not in string.whitespace))):
02935         error(filename, linenum, 'whitespace/comments', 2,
02936               'At least two spaces is best between code and comments')
02937 
02938       # Checks for common mistakes in TODO comments.
02939       comment = line[commentpos:]
02940       match = _RE_PATTERN_TODO.match(comment)
02941       if match:
02942         # One whitespace is correct; zero whitespace is handled elsewhere.
02943         leading_whitespace = match.group(1)
02944         if len(leading_whitespace) > 1:
02945           error(filename, linenum, 'whitespace/todo', 2,
02946                 'Too many spaces before TODO')
02947 
02948         username = match.group(2)
02949         if not username:
02950           error(filename, linenum, 'readability/todo', 2,
02951                 'Missing username in TODO; it should look like '
02952                 '"// TODO(my_username): Stuff."')
02953 
02954         middle_whitespace = match.group(3)
02955         # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
02956         if middle_whitespace != ' ' and middle_whitespace != '':
02957           error(filename, linenum, 'whitespace/todo', 2,
02958                 'TODO(my_username) should be followed by a space')
02959 
02960       # If the comment contains an alphanumeric character, there
02961       # should be a space somewhere between it and the // unless
02962       # it's a /// or //! Doxygen comment.
02963       if (Match(r'//[^ ]*\w', comment) and
02964           not Match(r'(///|//\!)(\s+|$)', comment)):
02965         error(filename, linenum, 'whitespace/comments', 4,
02966               'Should have a space between // and comment')
02967 
02968 
02969 def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
02970   """Checks for improper use of DISALLOW* macros.
02971 
02972   Args:
02973     filename: The name of the current file.
02974     clean_lines: A CleansedLines instance containing the file.
02975     linenum: The number of the line to check.
02976     nesting_state: A NestingState instance which maintains information about
02977                    the current stack of nested blocks being parsed.
02978     error: The function to call with any errors found.
02979   """
02980   line = clean_lines.elided[linenum]  # get rid of comments and strings
02981 
02982   matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
02983                    r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
02984   if not matched:
02985     return
02986   if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
02987     if nesting_state.stack[-1].access != 'private':
02988       error(filename, linenum, 'readability/constructors', 3,
02989             '%s must be in the private: section' % matched.group(1))
02990 
02991   else:
02992     # Found DISALLOW* macro outside a class declaration, or perhaps it
02993     # was used inside a function when it should have been part of the
02994     # class declaration.  We could issue a warning here, but it
02995     # probably resulted in a compiler error already.
02996     pass
02997 
02998 
02999 def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
03000   """Checks for the correctness of various spacing issues in the code.
03001 
03002   Things we check for: spaces around operators, spaces after
03003   if/for/while/switch, no spaces around parens in function calls, two
03004   spaces between code and comment, don't start a block with a blank
03005   line, don't end a function with a blank line, don't add a blank line
03006   after public/protected/private, don't have too many blank lines in a row.
03007 
03008   Args:
03009     filename: The name of the current file.
03010     clean_lines: A CleansedLines instance containing the file.
03011     linenum: The number of the line to check.
03012     nesting_state: A NestingState instance which maintains information about
03013                    the current stack of nested blocks being parsed.
03014     error: The function to call with any errors found.
03015   """
03016 
03017   # Don't use "elided" lines here, otherwise we can't check commented lines.
03018   # Don't want to use "raw" either, because we don't want to check inside C++11
03019   # raw strings,
03020   raw = clean_lines.lines_without_raw_strings
03021   line = raw[linenum]
03022 
03023   # Before nixing comments, check if the line is blank for no good
03024   # reason.  This includes the first line after a block is opened, and
03025   # blank lines at the end of a function (ie, right before a line like '}'
03026   #
03027   # Skip all the blank line checks if we are immediately inside a
03028   # namespace body.  In other words, don't issue blank line warnings
03029   # for this block:
03030   #   namespace {
03031   #
03032   #   }
03033   #
03034   # A warning about missing end of namespace comments will be issued instead.
03035   #
03036   # Also skip blank line checks for 'extern "C"' blocks, which are formatted
03037   # like namespaces.
03038   if (IsBlankLine(line) and
03039       not nesting_state.InNamespaceBody() and
03040       not nesting_state.InExternC()):
03041     elided = clean_lines.elided
03042     prev_line = elided[linenum - 1]
03043     prevbrace = prev_line.rfind('{')
03044     # TODO(unknown): Don't complain if line before blank line, and line after,
03045     #                both start with alnums and are indented the same amount.
03046     #                This ignores whitespace at the start of a namespace block
03047     #                because those are not usually indented.
03048     if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
03049       # OK, we have a blank line at the start of a code block.  Before we
03050       # complain, we check if it is an exception to the rule: The previous
03051       # non-empty line has the parameters of a function header that are indented
03052       # 4 spaces (because they did not fit in a 80 column line when placed on
03053       # the same line as the function name).  We also check for the case where
03054       # the previous line is indented 6 spaces, which may happen when the
03055       # initializers of a constructor do not fit into a 80 column line.
03056       exception = False
03057       if Match(r' {6}\w', prev_line):  # Initializer list?
03058         # We are looking for the opening column of initializer list, which
03059         # should be indented 4 spaces to cause 6 space indentation afterwards.
03060         search_position = linenum-2
03061         while (search_position >= 0
03062                and Match(r' {6}\w', elided[search_position])):
03063           search_position -= 1
03064         exception = (search_position >= 0
03065                      and elided[search_position][:5] == '    :')
03066       else:
03067         # Search for the function arguments or an initializer list.  We use a
03068         # simple heuristic here: If the line is indented 4 spaces; and we have a
03069         # closing paren, without the opening paren, followed by an opening brace
03070         # or colon (for initializer lists) we assume that it is the last line of
03071         # a function header.  If we have a colon indented 4 spaces, it is an
03072         # initializer list.
03073         exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
03074                            prev_line)
03075                      or Match(r' {4}:', prev_line))
03076 
03077       if not exception:
03078         error(filename, linenum, 'whitespace/blank_line', 2,
03079               'Redundant blank line at the start of a code block '
03080               'should be deleted.')
03081     # Ignore blank lines at the end of a block in a long if-else
03082     # chain, like this:
03083     #   if (condition1) {
03084     #     // Something followed by a blank line
03085     #
03086     #   } else if (condition2) {
03087     #     // Something else
03088     #   }
03089     if linenum + 1 < clean_lines.NumLines():
03090       next_line = raw[linenum + 1]
03091       if (next_line
03092           and Match(r'\s*}', next_line)
03093           and next_line.find('} else ') == -1):
03094         error(filename, linenum, 'whitespace/blank_line', 3,
03095               'Redundant blank line at the end of a code block '
03096               'should be deleted.')
03097 
03098     matched = Match(r'\s*(public|protected|private):', prev_line)
03099     if matched:
03100       error(filename, linenum, 'whitespace/blank_line', 3,
03101             'Do not leave a blank line after "%s:"' % matched.group(1))
03102 
03103   # Next, check comments
03104   next_line_start = 0
03105   if linenum + 1 < clean_lines.NumLines():
03106     next_line = raw[linenum + 1]
03107     next_line_start = len(next_line) - len(next_line.lstrip())
03108   CheckComment(line, filename, linenum, next_line_start, error)
03109 
03110   # get rid of comments and strings
03111   line = clean_lines.elided[linenum]
03112 
03113   # You shouldn't have spaces before your brackets, except maybe after
03114   # 'delete []' or 'return []() {};'
03115   if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
03116     error(filename, linenum, 'whitespace/braces', 5,
03117           'Extra space before [')
03118 
03119   # In range-based for, we wanted spaces before and after the colon, but
03120   # not around "::" tokens that might appear.
03121   if (Search(r'for *\(.*[^:]:[^: ]', line) or
03122       Search(r'for *\(.*[^: ]:[^:]', line)):
03123     error(filename, linenum, 'whitespace/forcolon', 2,
03124           'Missing space around colon in range-based for loop')
03125 
03126 
03127 def CheckOperatorSpacing(filename, clean_lines, linenum, error):
03128   """Checks for horizontal spacing around operators.
03129 
03130   Args:
03131     filename: The name of the current file.
03132     clean_lines: A CleansedLines instance containing the file.
03133     linenum: The number of the line to check.
03134     error: The function to call with any errors found.
03135   """
03136   line = clean_lines.elided[linenum]
03137 
03138   # Don't try to do spacing checks for operator methods.  Do this by
03139   # replacing the troublesome characters with something else,
03140   # preserving column position for all other characters.
03141   #
03142   # The replacement is done repeatedly to avoid false positives from
03143   # operators that call operators.
03144   while True:
03145     match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
03146     if match:
03147       line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
03148     else:
03149       break
03150 
03151   # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
03152   # Otherwise not.  Note we only check for non-spaces on *both* sides;
03153   # sometimes people put non-spaces on one side when aligning ='s among
03154   # many lines (not that this is behavior that I approve of...)
03155   if ((Search(r'[\w.]=', line) or
03156        Search(r'=[\w.]', line))
03157       and not Search(r'\b(if|while|for) ', line)
03158       # Operators taken from [lex.operators] in C++11 standard.
03159       and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
03160       and not Search(r'operator=', line)):
03161     error(filename, linenum, 'whitespace/operators', 4,
03162           'Missing spaces around =')
03163 
03164   # It's ok not to have spaces around binary operators like + - * /, but if
03165   # there's too little whitespace, we get concerned.  It's hard to tell,
03166   # though, so we punt on this one for now.  TODO.
03167 
03168   # You should always have whitespace around binary operators.
03169   #
03170   # Check <= and >= first to avoid false positives with < and >, then
03171   # check non-include lines for spacing around < and >.
03172   #
03173   # If the operator is followed by a comma, assume it's be used in a
03174   # macro context and don't do any checks.  This avoids false
03175   # positives.
03176   #
03177   # Note that && is not included here.  Those are checked separately
03178   # in CheckRValueReference
03179   match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
03180   if match:
03181     error(filename, linenum, 'whitespace/operators', 3,
03182           'Missing spaces around %s' % match.group(1))
03183   elif not Match(r'#.*include', line):
03184     # Look for < that is not surrounded by spaces.  This is only
03185     # triggered if both sides are missing spaces, even though
03186     # technically should should flag if at least one side is missing a
03187     # space.  This is done to avoid some false positives with shifts.
03188     match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
03189     if match:
03190       (_, _, end_pos) = CloseExpression(
03191           clean_lines, linenum, len(match.group(1)))
03192       if end_pos <= -1:
03193         error(filename, linenum, 'whitespace/operators', 3,
03194               'Missing spaces around <')
03195 
03196     # Look for > that is not surrounded by spaces.  Similar to the
03197     # above, we only trigger if both sides are missing spaces to avoid
03198     # false positives with shifts.
03199     match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
03200     if match:
03201       (_, _, start_pos) = ReverseCloseExpression(
03202           clean_lines, linenum, len(match.group(1)))
03203       if start_pos <= -1:
03204         error(filename, linenum, 'whitespace/operators', 3,
03205               'Missing spaces around >')
03206 
03207   # We allow no-spaces around << when used like this: 10<<20, but
03208   # not otherwise (particularly, not when used as streams)
03209   #
03210   # We also allow operators following an opening parenthesis, since
03211   # those tend to be macros that deal with operators.
03212   match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line)
03213   if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
03214       not (match.group(1) == 'operator' and match.group(2) == ';')):
03215     error(filename, linenum, 'whitespace/operators', 3,
03216           'Missing spaces around <<')
03217 
03218   # We allow no-spaces around >> for almost anything.  This is because
03219   # C++11 allows ">>" to close nested templates, which accounts for
03220   # most cases when ">>" is not followed by a space.
03221   #
03222   # We still warn on ">>" followed by alpha character, because that is
03223   # likely due to ">>" being used for right shifts, e.g.:
03224   #   value >> alpha
03225   #
03226   # When ">>" is used to close templates, the alphanumeric letter that
03227   # follows would be part of an identifier, and there should still be
03228   # a space separating the template type and the identifier.
03229   #   type<type<type>> alpha
03230   match = Search(r'>>[a-zA-Z_]', line)
03231   if match:
03232     error(filename, linenum, 'whitespace/operators', 3,
03233           'Missing spaces around >>')
03234 
03235   # There shouldn't be space around unary operators
03236   match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
03237   if match:
03238     error(filename, linenum, 'whitespace/operators', 4,
03239           'Extra space for operator %s' % match.group(1))
03240 
03241 
03242 def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
03243   """Checks for horizontal spacing around parentheses.
03244 
03245   Args:
03246     filename: The name of the current file.
03247     clean_lines: A CleansedLines instance containing the file.
03248     linenum: The number of the line to check.
03249     error: The function to call with any errors found.
03250   """
03251   line = clean_lines.elided[linenum]
03252 
03253   # No spaces after an if, while, switch, or for
03254   match = Search(r' (if\(|for\(|while\(|switch\()', line)
03255   if match:
03256     error(filename, linenum, 'whitespace/parens', 5,
03257           'Missing space before ( in %s' % match.group(1))
03258 
03259   # For if/for/while/switch, the left and right parens should be
03260   # consistent about how many spaces are inside the parens, and
03261   # there should either be zero or one spaces inside the parens.
03262   # We don't want: "if ( foo)" or "if ( foo   )".
03263   # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
03264   match = Search(r'\b(if|for|while|switch)\s*'
03265                  r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
03266                  line)
03267   if match:
03268     if len(match.group(2)) != len(match.group(4)):
03269       if not (match.group(3) == ';' and
03270               len(match.group(2)) == 1 + len(match.group(4)) or
03271               not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
03272         error(filename, linenum, 'whitespace/parens', 5,
03273               'Mismatching spaces inside () in %s' % match.group(1))
03274     if len(match.group(2)) not in [0, 1]:
03275       error(filename, linenum, 'whitespace/parens', 5,
03276             'Should have zero or one spaces inside ( and ) in %s' %
03277             match.group(1))
03278 
03279 
03280 def CheckCommaSpacing(filename, clean_lines, linenum, error):
03281   """Checks for horizontal spacing near commas and semicolons.
03282 
03283   Args:
03284     filename: The name of the current file.
03285     clean_lines: A CleansedLines instance containing the file.
03286     linenum: The number of the line to check.
03287     error: The function to call with any errors found.
03288   """
03289   raw = clean_lines.lines_without_raw_strings
03290   line = clean_lines.elided[linenum]
03291 
03292   # You should always have a space after a comma (either as fn arg or operator)
03293   #
03294   # This does not apply when the non-space character following the
03295   # comma is another comma, since the only time when that happens is
03296   # for empty macro arguments.
03297   #
03298   # We run this check in two passes: first pass on elided lines to
03299   # verify that lines contain missing whitespaces, second pass on raw
03300   # lines to confirm that those missing whitespaces are not due to
03301   # elided comments.
03302   if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
03303       Search(r',[^,\s]', raw[linenum])):
03304     error(filename, linenum, 'whitespace/comma', 3,
03305           'Missing space after ,')
03306 
03307   # You should always have a space after a semicolon
03308   # except for few corner cases
03309   # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
03310   # space after ;
03311   if Search(r';[^\s};\\)/]', line):
03312     error(filename, linenum, 'whitespace/semicolon', 3,
03313           'Missing space after ;')
03314 
03315 
03316 def CheckBracesSpacing(filename, clean_lines, linenum, error):
03317   """Checks for horizontal spacing near commas.
03318 
03319   Args:
03320     filename: The name of the current file.
03321     clean_lines: A CleansedLines instance containing the file.
03322     linenum: The number of the line to check.
03323     error: The function to call with any errors found.
03324   """
03325   line = clean_lines.elided[linenum]
03326 
03327   # Except after an opening paren, or after another opening brace (in case of
03328   # an initializer list, for instance), you should have spaces before your
03329   # braces. And since you should never have braces at the beginning of a line,
03330   # this is an easy test.
03331   match = Match(r'^(.*[^ ({>]){', line)
03332   if match:
03333     # Try a bit harder to check for brace initialization.  This
03334     # happens in one of the following forms:
03335     #   Constructor() : initializer_list_{} { ... }
03336     #   Constructor{}.MemberFunction()
03337     #   Type variable{};
03338     #   FunctionCall(type{}, ...);
03339     #   LastArgument(..., type{});
03340     #   LOG(INFO) << type{} << " ...";
03341     #   map_of_type[{...}] = ...;
03342     #   ternary = expr ? new type{} : nullptr;
03343     #   OuterTemplate<InnerTemplateConstructor<Type>{}>
03344     #
03345     # We check for the character following the closing brace, and
03346     # silence the warning if it's one of those listed above, i.e.
03347     # "{.;,)<>]:".
03348     #
03349     # To account for nested initializer list, we allow any number of
03350     # closing braces up to "{;,)<".  We can't simply silence the
03351     # warning on first sight of closing brace, because that would
03352     # cause false negatives for things that are not initializer lists.
03353     #   Silence this:         But not this:
03354     #     Outer{                if (...) {
03355     #       Inner{...}            if (...){  // Missing space before {
03356     #     };                    }
03357     #
03358     # There is a false negative with this approach if people inserted
03359     # spurious semicolons, e.g. "if (cond){};", but we will catch the
03360     # spurious semicolon with a separate check.
03361     (endline, endlinenum, endpos) = CloseExpression(
03362         clean_lines, linenum, len(match.group(1)))
03363     trailing_text = ''
03364     if endpos > -1:
03365       trailing_text = endline[endpos:]
03366     for offset in xrange(endlinenum + 1,
03367                          min(endlinenum + 3, clean_lines.NumLines() - 1)):
03368       trailing_text += clean_lines.elided[offset]
03369     if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
03370       error(filename, linenum, 'whitespace/braces', 5,
03371             'Missing space before {')
03372 
03373   # Make sure '} else {' has spaces.
03374   if Search(r'}else', line):
03375     error(filename, linenum, 'whitespace/braces', 5,
03376           'Missing space before else')
03377 
03378   # You shouldn't have a space before a semicolon at the end of the line.
03379   # There's a special case for "for" since the style guide allows space before
03380   # the semicolon there.
03381   if Search(r':\s*;\s*$', line):
03382     error(filename, linenum, 'whitespace/semicolon', 5,
03383           'Semicolon defining empty statement. Use {} instead.')
03384   elif Search(r'^\s*;\s*$', line):
03385     error(filename, linenum, 'whitespace/semicolon', 5,
03386           'Line contains only semicolon. If this should be an empty statement, '
03387           'use {} instead.')
03388   elif (Search(r'\s+;\s*$', line) and
03389         not Search(r'\bfor\b', line)):
03390     error(filename, linenum, 'whitespace/semicolon', 5,
03391           'Extra space before last semicolon. If this should be an empty '
03392           'statement, use {} instead.')
03393 
03394 
03395 def IsDecltype(clean_lines, linenum, column):
03396   """Check if the token ending on (linenum, column) is decltype().
03397 
03398   Args:
03399     clean_lines: A CleansedLines instance containing the file.
03400     linenum: the number of the line to check.
03401     column: end column of the token to check.
03402   Returns:
03403     True if this token is decltype() expression, False otherwise.
03404   """
03405   (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
03406   if start_col < 0:
03407     return False
03408   if Search(r'\bdecltype\s*$', text[0:start_col]):
03409     return True
03410   return False
03411 
03412 
03413 def IsTemplateParameterList(clean_lines, linenum, column):
03414   """Check if the token ending on (linenum, column) is the end of template<>.
03415 
03416   Args:
03417     clean_lines: A CleansedLines instance containing the file.
03418     linenum: the number of the line to check.
03419     column: end column of the token to check.
03420   Returns:
03421     True if this token is end of a template parameter list, False otherwise.
03422   """
03423   (_, startline, startpos) = ReverseCloseExpression(
03424       clean_lines, linenum, column)
03425   if (startpos > -1 and
03426       Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
03427     return True
03428   return False
03429 
03430 
03431 def IsRValueType(typenames, clean_lines, nesting_state, linenum, column):
03432   """Check if the token ending on (linenum, column) is a type.
03433 
03434   Assumes that text to the right of the column is "&&" or a function
03435   name.
03436 
03437   Args:
03438     typenames: set of type names from template-argument-list.
03439     clean_lines: A CleansedLines instance containing the file.
03440     nesting_state: A NestingState instance which maintains information about
03441                    the current stack of nested blocks being parsed.
03442     linenum: the number of the line to check.
03443     column: end column of the token to check.
03444   Returns:
03445     True if this token is a type, False if we are not sure.
03446   """
03447   prefix = clean_lines.elided[linenum][0:column]
03448 
03449   # Get one word to the left.  If we failed to do so, this is most
03450   # likely not a type, since it's unlikely that the type name and "&&"
03451   # would be split across multiple lines.
03452   match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
03453   if not match:
03454     return False
03455 
03456   # Check text following the token.  If it's "&&>" or "&&," or "&&...", it's
03457   # most likely a rvalue reference used inside a template.
03458   suffix = clean_lines.elided[linenum][column:]
03459   if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
03460     return True
03461 
03462   # Check for known types and end of templates:
03463   #   int&& variable
03464   #   vector<int>&& variable
03465   #
03466   # Because this function is called recursively, we also need to
03467   # recognize pointer and reference types:
03468   #   int* Function()
03469   #   int& Function()
03470   if (match.group(2) in typenames or
03471       match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
03472                          'short', 'int', 'long', 'signed', 'unsigned',
03473                          'float', 'double', 'void', 'auto', '>', '*', '&']):
03474     return True
03475 
03476   # If we see a close parenthesis, look for decltype on the other side.
03477   # decltype would unambiguously identify a type, anything else is
03478   # probably a parenthesized expression and not a type.
03479   if match.group(2) == ')':
03480     return IsDecltype(
03481         clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
03482 
03483   # Check for casts and cv-qualifiers.
03484   #   match.group(1)  remainder
03485   #   --------------  ---------
03486   #   const_cast<     type&&
03487   #   const           type&&
03488   #   type            const&&
03489   if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
03490             r'reinterpret_cast\s*<|\w+\s)\s*$',
03491             match.group(1)):
03492     return True
03493 
03494   # Look for a preceding symbol that might help differentiate the context.
03495   # These are the cases that would be ambiguous:
03496   #   match.group(1)  remainder
03497   #   --------------  ---------
03498   #   Call         (   expression &&
03499   #   Declaration  (   type&&
03500   #   sizeof       (   type&&
03501   #   if           (   expression &&
03502   #   while        (   expression &&
03503   #   for          (   type&&
03504   #   for(         ;   expression &&
03505   #   statement    ;   type&&
03506   #   block        {   type&&
03507   #   constructor  {   expression &&
03508   start = linenum
03509   line = match.group(1)
03510   match_symbol = None
03511   while start >= 0:
03512     # We want to skip over identifiers and commas to get to a symbol.
03513     # Commas are skipped so that we can find the opening parenthesis
03514     # for function parameter lists.
03515     match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
03516     if match_symbol:
03517       break
03518     start -= 1
03519     line = clean_lines.elided[start]
03520 
03521   if not match_symbol:
03522     # Probably the first statement in the file is an rvalue reference
03523     return True
03524 
03525   if match_symbol.group(2) == '}':
03526     # Found closing brace, probably an indicate of this:
03527     #   block{} type&&
03528     return True
03529 
03530   if match_symbol.group(2) == ';':
03531     # Found semicolon, probably one of these:
03532     #   for(; expression &&
03533     #   statement; type&&
03534 
03535     # Look for the previous 'for(' in the previous lines.
03536     before_text = match_symbol.group(1)
03537     for i in xrange(start - 1, max(start - 6, 0), -1):
03538       before_text = clean_lines.elided[i] + before_text
03539     if Search(r'for\s*\([^{};]*$', before_text):
03540       # This is the condition inside a for-loop
03541       return False
03542 
03543     # Did not find a for-init-statement before this semicolon, so this
03544     # is probably a new statement and not a condition.
03545     return True
03546 
03547   if match_symbol.group(2) == '{':
03548     # Found opening brace, probably one of these:
03549     #   block{ type&& = ... ; }
03550     #   constructor{ expression && expression }
03551 
03552     # Look for a closing brace or a semicolon.  If we see a semicolon
03553     # first, this is probably a rvalue reference.
03554     line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
03555     end = start
03556     depth = 1
03557     while True:
03558       for ch in line:
03559         if ch == ';':
03560           return True
03561         elif ch == '{':
03562           depth += 1
03563         elif ch == '}':
03564           depth -= 1
03565           if depth == 0:
03566             return False
03567       end += 1
03568       if end >= clean_lines.NumLines():
03569         break
03570       line = clean_lines.elided[end]
03571     # Incomplete program?
03572     return False
03573 
03574   if match_symbol.group(2) == '(':
03575     # Opening parenthesis.  Need to check what's to the left of the
03576     # parenthesis.  Look back one extra line for additional context.
03577     before_text = match_symbol.group(1)
03578     if linenum > 1:
03579       before_text = clean_lines.elided[linenum - 1] + before_text
03580     before_text = match_symbol.group(1)
03581 
03582     # Patterns that are likely to be types:
03583     #   [](type&&
03584     #   for (type&&
03585     #   sizeof(type&&
03586     #   operator=(type&&
03587     #
03588     if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
03589       return True
03590 
03591     # Patterns that are likely to be expressions:
03592     #   if (expression &&
03593     #   while (expression &&
03594     #   : initializer(expression &&
03595     #   , initializer(expression &&
03596     #   ( FunctionCall(expression &&
03597     #   + FunctionCall(expression &&
03598     #   + (expression &&
03599     #
03600     # The last '+' represents operators such as '+' and '-'.
03601     if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
03602       return False
03603 
03604     # Something else.  Check that tokens to the left look like
03605     #   return_type function_name
03606     match_func = Match(r'^(.*\S.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
03607                        match_symbol.group(1))
03608     if match_func:
03609       # Check for constructors, which don't have return types.
03610       if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
03611         return True
03612       implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
03613       if (implicit_constructor and
03614           implicit_constructor.group(1) == implicit_constructor.group(2)):
03615         return True
03616       return IsRValueType(typenames, clean_lines, nesting_state, linenum,
03617                           len(match_func.group(1)))
03618 
03619     # Nothing before the function name.  If this is inside a block scope,
03620     # this is probably a function call.
03621     return not (nesting_state.previous_stack_top and
03622                 nesting_state.previous_stack_top.IsBlockInfo())
03623 
03624   if match_symbol.group(2) == '>':
03625     # Possibly a closing bracket, check that what's on the other side
03626     # looks like the start of a template.
03627     return IsTemplateParameterList(
03628         clean_lines, start, len(match_symbol.group(1)))
03629 
03630   # Some other symbol, usually something like "a=b&&c".  This is most
03631   # likely not a type.
03632   return False
03633 
03634 
03635 def IsDeletedOrDefault(clean_lines, linenum):
03636   """Check if current constructor or operator is deleted or default.
03637 
03638   Args:
03639     clean_lines: A CleansedLines instance containing the file.
03640     linenum: The number of the line to check.
03641   Returns:
03642     True if this is a deleted or default constructor.
03643   """
03644   open_paren = clean_lines.elided[linenum].find('(')
03645   if open_paren < 0:
03646     return False
03647   (close_line, _, close_paren) = CloseExpression(
03648       clean_lines, linenum, open_paren)
03649   if close_paren < 0:
03650     return False
03651   return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
03652 
03653 
03654 def IsRValueAllowed(clean_lines, linenum, typenames):
03655   """Check if RValue reference is allowed on a particular line.
03656 
03657   Args:
03658     clean_lines: A CleansedLines instance containing the file.
03659     linenum: The number of the line to check.
03660     typenames: set of type names from template-argument-list.
03661   Returns:
03662     True if line is within the region where RValue references are allowed.
03663   """
03664   # Allow region marked by PUSH/POP macros
03665   for i in xrange(linenum, 0, -1):
03666     line = clean_lines.elided[i]
03667     if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
03668       if not line.endswith('PUSH'):
03669         return False
03670       for j in xrange(linenum, clean_lines.NumLines(), 1):
03671         line = clean_lines.elided[j]
03672         if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
03673           return line.endswith('POP')
03674 
03675   # Allow operator=
03676   line = clean_lines.elided[linenum]
03677   if Search(r'\boperator\s*=\s*\(', line):
03678     return IsDeletedOrDefault(clean_lines, linenum)
03679 
03680   # Allow constructors
03681   match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
03682   if match and match.group(1) == match.group(2):
03683     return IsDeletedOrDefault(clean_lines, linenum)
03684   if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
03685     return IsDeletedOrDefault(clean_lines, linenum)
03686 
03687   if Match(r'\s*[\w<>]+\s*\(', line):
03688     previous_line = 'ReturnType'
03689     if linenum > 0:
03690       previous_line = clean_lines.elided[linenum - 1]
03691     if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
03692       return IsDeletedOrDefault(clean_lines, linenum)
03693 
03694   # Reject types not mentioned in template-argument-list
03695   while line:
03696     match = Match(r'^.*?(\w+)\s*&&(.*)$', line)
03697     if not match:
03698       break
03699     if match.group(1) not in typenames:
03700       return False
03701     line = match.group(2)
03702 
03703   # All RValue types that were in template-argument-list should have
03704   # been removed by now.  Those were allowed, assuming that they will
03705   # be forwarded.
03706   #
03707   # If there are no remaining RValue types left (i.e. types that were
03708   # not found in template-argument-list), flag those as not allowed.
03709   return line.find('&&') < 0
03710 
03711 
03712 def GetTemplateArgs(clean_lines, linenum):
03713   """Find list of template arguments associated with this function declaration.
03714 
03715   Args:
03716     clean_lines: A CleansedLines instance containing the file.
03717     linenum: Line number containing the start of the function declaration,
03718              usually one line after the end of the template-argument-list.
03719   Returns:
03720     Set of type names, or empty set if this does not appear to have
03721     any template parameters.
03722   """
03723   # Find start of function
03724   func_line = linenum
03725   while func_line > 0:
03726     line = clean_lines.elided[func_line]
03727     if Match(r'^\s*$', line):
03728       return set()
03729     if line.find('(') >= 0:
03730       break
03731     func_line -= 1
03732   if func_line == 0:
03733     return set()
03734 
03735   # Collapse template-argument-list into a single string
03736   argument_list = ''
03737   match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line])
03738   if match:
03739     # template-argument-list on the same line as function name
03740     start_col = len(match.group(1))
03741     _, end_line, end_col = CloseExpression(clean_lines, func_line, start_col)
03742     if end_col > -1 and end_line == func_line:
03743       start_col += 1  # Skip the opening bracket
03744       argument_list = clean_lines.elided[func_line][start_col:end_col]
03745 
03746   elif func_line > 1:
03747     # template-argument-list one line before function name
03748     match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1])
03749     if match:
03750       end_col = len(match.group(1))
03751       _, start_line, start_col = ReverseCloseExpression(
03752           clean_lines, func_line - 1, end_col)
03753       if start_col > -1:
03754         start_col += 1  # Skip the opening bracket
03755         while start_line < func_line - 1:
03756           argument_list += clean_lines.elided[start_line][start_col:]
03757           start_col = 0
03758           start_line += 1
03759         argument_list += clean_lines.elided[func_line - 1][start_col:end_col]
03760 
03761   if not argument_list:
03762     return set()
03763 
03764   # Extract type names
03765   typenames = set()
03766   while True:
03767     match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$',
03768                   argument_list)
03769     if not match:
03770       break
03771     typenames.add(match.group(1))
03772     argument_list = match.group(2)
03773   return typenames
03774 
03775 
03776 def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
03777   """Check for rvalue references.
03778 
03779   Args:
03780     filename: The name of the current file.
03781     clean_lines: A CleansedLines instance containing the file.
03782     linenum: The number of the line to check.
03783     nesting_state: A NestingState instance which maintains information about
03784                    the current stack of nested blocks being parsed.
03785     error: The function to call with any errors found.
03786   """
03787   # Find lines missing spaces around &&.
03788   # TODO(unknown): currently we don't check for rvalue references
03789   # with spaces surrounding the && to avoid false positives with
03790   # boolean expressions.
03791   line = clean_lines.elided[linenum]
03792   match = Match(r'^(.*\S)&&', line)
03793   if not match:
03794     match = Match(r'(.*)&&\S', line)
03795   if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
03796     return
03797 
03798   # Either poorly formed && or an rvalue reference, check the context
03799   # to get a more accurate error message.  Mostly we want to determine
03800   # if what's to the left of "&&" is a type or not.
03801   typenames = GetTemplateArgs(clean_lines, linenum)
03802   and_pos = len(match.group(1))
03803   if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos):
03804     if not IsRValueAllowed(clean_lines, linenum, typenames):
03805       error(filename, linenum, 'build/c++11', 3,
03806             'RValue references are an unapproved C++ feature.')
03807   else:
03808     error(filename, linenum, 'whitespace/operators', 3,
03809           'Missing spaces around &&')
03810 
03811 
03812 def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
03813   """Checks for additional blank line issues related to sections.
03814 
03815   Currently the only thing checked here is blank line before protected/private.
03816 
03817   Args:
03818     filename: The name of the current file.
03819     clean_lines: A CleansedLines instance containing the file.
03820     class_info: A _ClassInfo objects.
03821     linenum: The number of the line to check.
03822     error: The function to call with any errors found.
03823   """
03824   # Skip checks if the class is small, where small means 25 lines or less.
03825   # 25 lines seems like a good cutoff since that's the usual height of
03826   # terminals, and any class that can't fit in one screen can't really
03827   # be considered "small".
03828   #
03829   # Also skip checks if we are on the first line.  This accounts for
03830   # classes that look like
03831   #   class Foo { public: ... };
03832   #
03833   # If we didn't find the end of the class, last_line would be zero,
03834   # and the check will be skipped by the first condition.
03835   if (class_info.last_line - class_info.starting_linenum <= 24 or
03836       linenum <= class_info.starting_linenum):
03837     return
03838 
03839   matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
03840   if matched:
03841     # Issue warning if the line before public/protected/private was
03842     # not a blank line, but don't do this if the previous line contains
03843     # "class" or "struct".  This can happen two ways:
03844     #  - We are at the beginning of the class.
03845     #  - We are forward-declaring an inner class that is semantically
03846     #    private, but needed to be public for implementation reasons.
03847     # Also ignores cases where the previous line ends with a backslash as can be
03848     # common when defining classes in C macros.
03849     prev_line = clean_lines.lines[linenum - 1]
03850     if (not IsBlankLine(prev_line) and
03851         not Search(r'\b(class|struct)\b', prev_line) and
03852         not Search(r'\\$', prev_line)):
03853       # Try a bit harder to find the beginning of the class.  This is to
03854       # account for multi-line base-specifier lists, e.g.:
03855       #   class Derived
03856       #       : public Base {
03857       end_class_head = class_info.starting_linenum
03858       for i in range(class_info.starting_linenum, linenum):
03859         if Search(r'\{\s*$', clean_lines.lines[i]):
03860           end_class_head = i
03861           break
03862       if end_class_head < linenum - 1:
03863         error(filename, linenum, 'whitespace/blank_line', 3,
03864               '"%s:" should be preceded by a blank line' % matched.group(1))
03865 
03866 
03867 def GetPreviousNonBlankLine(clean_lines, linenum):
03868   """Return the most recent non-blank line and its line number.
03869 
03870   Args:
03871     clean_lines: A CleansedLines instance containing the file contents.
03872     linenum: The number of the line to check.
03873 
03874   Returns:
03875     A tuple with two elements.  The first element is the contents of the last
03876     non-blank line before the current line, or the empty string if this is the
03877     first non-blank line.  The second is the line number of that line, or -1
03878     if this is the first non-blank line.
03879   """
03880 
03881   prevlinenum = linenum - 1
03882   while prevlinenum >= 0:
03883     prevline = clean_lines.elided[prevlinenum]
03884     if not IsBlankLine(prevline):     # if not a blank line...
03885       return (prevline, prevlinenum)
03886     prevlinenum -= 1
03887   return ('', -1)
03888 
03889 
03890 def CheckBraces(filename, clean_lines, linenum, error):
03891   """Looks for misplaced braces (e.g. at the end of line).
03892 
03893   Args:
03894     filename: The name of the current file.
03895     clean_lines: A CleansedLines instance containing the file.
03896     linenum: The number of the line to check.
03897     error: The function to call with any errors found.
03898   """
03899 
03900   line = clean_lines.elided[linenum]        # get rid of comments and strings
03901 
03902   if Match(r'\s*{\s*$', line):
03903     # We allow an open brace to start a line in the case where someone is using
03904     # braces in a block to explicitly create a new scope, which is commonly used
03905     # to control the lifetime of stack-allocated variables.  Braces are also
03906     # used for brace initializers inside function calls.  We don't detect this
03907     # perfectly: we just don't complain if the last non-whitespace character on
03908     # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
03909     # previous line starts a preprocessor block.
03910     prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
03911     if (not Search(r'[,;:}{(]\s*$', prevline) and
03912         not Match(r'\s*#', prevline)):
03913       error(filename, linenum, 'whitespace/braces', 4,
03914             '{ should almost always be at the end of the previous line')
03915 
03916   # An else clause should be on the same line as the preceding closing brace.
03917   if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
03918     prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
03919     if Match(r'\s*}\s*$', prevline):
03920       error(filename, linenum, 'whitespace/newline', 4,
03921             'An else should appear on the same line as the preceding }')
03922 
03923   # If braces come on one side of an else, they should be on both.
03924   # However, we have to worry about "else if" that spans multiple lines!
03925   if Search(r'else if\s*\(', line):       # could be multi-line if
03926     brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
03927     # find the ( after the if
03928     pos = line.find('else if')
03929     pos = line.find('(', pos)
03930     if pos > 0:
03931       (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
03932       brace_on_right = endline[endpos:].find('{') != -1
03933       if brace_on_left != brace_on_right:    # must be brace after if
03934         error(filename, linenum, 'readability/braces', 5,
03935               'If an else has a brace on one side, it should have it on both')
03936   elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
03937     error(filename, linenum, 'readability/braces', 5,
03938           'If an else has a brace on one side, it should have it on both')
03939 
03940   # Likewise, an else should never have the else clause on the same line
03941   if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
03942     error(filename, linenum, 'whitespace/newline', 4,
03943           'Else clause should never be on same line as else (use 2 lines)')
03944 
03945   # In the same way, a do/while should never be on one line
03946   if Match(r'\s*do [^\s{]', line):
03947     error(filename, linenum, 'whitespace/newline', 4,
03948           'do/while clauses should not be on a single line')
03949 
03950   # Check single-line if/else bodies. The style guide says 'curly braces are not
03951   # required for single-line statements'. We additionally allow multi-line,
03952   # single statements, but we reject anything with more than one semicolon in
03953   # it. This means that the first semicolon after the if should be at the end of
03954   # its line, and the line after that should have an indent level equal to or
03955   # lower than the if. We also check for ambiguous if/else nesting without
03956   # braces.
03957   if_else_match = Search(r'\b(if\s*\(|else\b)', line)
03958   if if_else_match and not Match(r'\s*#', line):
03959     if_indent = GetIndentLevel(line)
03960     endline, endlinenum, endpos = line, linenum, if_else_match.end()
03961     if_match = Search(r'\bif\s*\(', line)
03962     if if_match:
03963       # This could be a multiline if condition, so find the end first.
03964       pos = if_match.end() - 1
03965       (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
03966     # Check for an opening brace, either directly after the if or on the next
03967     # line. If found, this isn't a single-statement conditional.
03968     if (not Match(r'\s*{', endline[endpos:])
03969         and not (Match(r'\s*$', endline[endpos:])
03970                  and endlinenum < (len(clean_lines.elided) - 1)
03971                  and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
03972       while (endlinenum < len(clean_lines.elided)
03973              and ';' not in clean_lines.elided[endlinenum][endpos:]):
03974         endlinenum += 1
03975         endpos = 0
03976       if endlinenum < len(clean_lines.elided):
03977         endline = clean_lines.elided[endlinenum]
03978         # We allow a mix of whitespace and closing braces (e.g. for one-liner
03979         # methods) and a single \ after the semicolon (for macros)
03980         endpos = endline.find(';')
03981         if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
03982           # Semicolon isn't the last character, there's something trailing.
03983           # Output a warning if the semicolon is not contained inside
03984           # a lambda expression.
03985           if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
03986                        endline):
03987             error(filename, linenum, 'readability/braces', 4,
03988                   'If/else bodies with multiple statements require braces')
03989         elif endlinenum < len(clean_lines.elided) - 1:
03990           # Make sure the next line is dedented
03991           next_line = clean_lines.elided[endlinenum + 1]
03992           next_indent = GetIndentLevel(next_line)
03993           # With ambiguous nested if statements, this will error out on the
03994           # if that *doesn't* match the else, regardless of whether it's the
03995           # inner one or outer one.
03996           if (if_match and Match(r'\s*else\b', next_line)
03997               and next_indent != if_indent):
03998             error(filename, linenum, 'readability/braces', 4,
03999                   'Else clause should be indented at the same level as if. '
04000                   'Ambiguous nested if/else chains require braces.')
04001           elif next_indent > if_indent:
04002             error(filename, linenum, 'readability/braces', 4,
04003                   'If/else bodies with multiple statements require braces')
04004 
04005 
04006 def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
04007   """Looks for redundant trailing semicolon.
04008 
04009   Args:
04010     filename: The name of the current file.
04011     clean_lines: A CleansedLines instance containing the file.
04012     linenum: The number of the line to check.
04013     error: The function to call with any errors found.
04014   """
04015 
04016   line = clean_lines.elided[linenum]
04017 
04018   # Block bodies should not be followed by a semicolon.  Due to C++11
04019   # brace initialization, there are more places where semicolons are
04020   # required than not, so we use a whitelist approach to check these
04021   # rather than a blacklist.  These are the places where "};" should
04022   # be replaced by just "}":
04023   # 1. Some flavor of block following closing parenthesis:
04024   #    for (;;) {};
04025   #    while (...) {};
04026   #    switch (...) {};
04027   #    Function(...) {};
04028   #    if (...) {};
04029   #    if (...) else if (...) {};
04030   #
04031   # 2. else block:
04032   #    if (...) else {};
04033   #
04034   # 3. const member function:
04035   #    Function(...) const {};
04036   #
04037   # 4. Block following some statement:
04038   #    x = 42;
04039   #    {};
04040   #
04041   # 5. Block at the beginning of a function:
04042   #    Function(...) {
04043   #      {};
04044   #    }
04045   #
04046   #    Note that naively checking for the preceding "{" will also match
04047   #    braces inside multi-dimensional arrays, but this is fine since
04048   #    that expression will not contain semicolons.
04049   #
04050   # 6. Block following another block:
04051   #    while (true) {}
04052   #    {};
04053   #
04054   # 7. End of namespaces:
04055   #    namespace {};
04056   #
04057   #    These semicolons seems far more common than other kinds of
04058   #    redundant semicolons, possibly due to people converting classes
04059   #    to namespaces.  For now we do not warn for this case.
04060   #
04061   # Try matching case 1 first.
04062   match = Match(r'^(.*\)\s*)\{', line)
04063   if match:
04064     # Matched closing parenthesis (case 1).  Check the token before the
04065     # matching opening parenthesis, and don't warn if it looks like a
04066     # macro.  This avoids these false positives:
04067     #  - macro that defines a base class
04068     #  - multi-line macro that defines a base class
04069     #  - macro that defines the whole class-head
04070     #
04071     # But we still issue warnings for macros that we know are safe to
04072     # warn, specifically:
04073     #  - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
04074     #  - TYPED_TEST
04075     #  - INTERFACE_DEF
04076     #  - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
04077     #
04078     # We implement a whitelist of safe macros instead of a blacklist of
04079     # unsafe macros, even though the latter appears less frequently in
04080     # google code and would have been easier to implement.  This is because
04081     # the downside for getting the whitelist wrong means some extra
04082     # semicolons, while the downside for getting the blacklist wrong
04083     # would result in compile errors.
04084     #
04085     # In addition to macros, we also don't want to warn on
04086     #  - Compound literals
04087     #  - Lambdas
04088     #  - alignas specifier with anonymous structs:
04089     closing_brace_pos = match.group(1).rfind(')')
04090     opening_parenthesis = ReverseCloseExpression(
04091         clean_lines, linenum, closing_brace_pos)
04092     if opening_parenthesis[2] > -1:
04093       line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
04094       macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
04095       func = Match(r'^(.*\])\s*$', line_prefix)
04096       if ((macro and
04097            macro.group(1) not in (
04098                'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
04099                'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
04100                'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
04101           (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
04102           Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
04103           Search(r'\s+=\s*$', line_prefix)):
04104         match = None
04105     if (match and
04106         opening_parenthesis[1] > 1 and
04107         Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
04108       # Multi-line lambda-expression
04109       match = None
04110 
04111   else:
04112     # Try matching cases 2-3.
04113     match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
04114     if not match:
04115       # Try matching cases 4-6.  These are always matched on separate lines.
04116       #
04117       # Note that we can't simply concatenate the previous line to the
04118       # current line and do a single match, otherwise we may output
04119       # duplicate warnings for the blank line case:
04120       #   if (cond) {
04121       #     // blank line
04122       #   }
04123       prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
04124       if prevline and Search(r'[;{}]\s*$', prevline):
04125         match = Match(r'^(\s*)\{', line)
04126 
04127   # Check matching closing brace
04128   if match:
04129     (endline, endlinenum, endpos) = CloseExpression(
04130         clean_lines, linenum, len(match.group(1)))
04131     if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
04132       # Current {} pair is eligible for semicolon check, and we have found
04133       # the redundant semicolon, output warning here.
04134       #
04135       # Note: because we are scanning forward for opening braces, and
04136       # outputting warnings for the matching closing brace, if there are
04137       # nested blocks with trailing semicolons, we will get the error
04138       # messages in reversed order.
04139       error(filename, endlinenum, 'readability/braces', 4,
04140             "You don't need a ; after a }")
04141 
04142 
04143 def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
04144   """Look for empty loop/conditional body with only a single semicolon.
04145 
04146   Args:
04147     filename: The name of the current file.
04148     clean_lines: A CleansedLines instance containing the file.
04149     linenum: The number of the line to check.
04150     error: The function to call with any errors found.
04151   """
04152 
04153   # Search for loop keywords at the beginning of the line.  Because only
04154   # whitespaces are allowed before the keywords, this will also ignore most
04155   # do-while-loops, since those lines should start with closing brace.
04156   #
04157   # We also check "if" blocks here, since an empty conditional block
04158   # is likely an error.
04159   line = clean_lines.elided[linenum]
04160   matched = Match(r'\s*(for|while|if)\s*\(', line)
04161   if matched:
04162     # Find the end of the conditional expression
04163     (end_line, end_linenum, end_pos) = CloseExpression(
04164         clean_lines, linenum, line.find('('))
04165 
04166     # Output warning if what follows the condition expression is a semicolon.
04167     # No warning for all other cases, including whitespace or newline, since we
04168     # have a separate check for semicolons preceded by whitespace.
04169     if end_pos >= 0 and Match(r';', end_line[end_pos:]):
04170       if matched.group(1) == 'if':
04171         error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
04172               'Empty conditional bodies should use {}')
04173       else:
04174         error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
04175               'Empty loop bodies should use {} or continue')
04176 
04177 
04178 def FindCheckMacro(line):
04179   """Find a replaceable CHECK-like macro.
04180 
04181   Args:
04182     line: line to search on.
04183   Returns:
04184     (macro name, start position), or (None, -1) if no replaceable
04185     macro is found.
04186   """
04187   for macro in _CHECK_MACROS:
04188     i = line.find(macro)
04189     if i >= 0:
04190       # Find opening parenthesis.  Do a regular expression match here
04191       # to make sure that we are matching the expected CHECK macro, as
04192       # opposed to some other macro that happens to contain the CHECK
04193       # substring.
04194       matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
04195       if not matched:
04196         continue
04197       return (macro, len(matched.group(1)))
04198   return (None, -1)
04199 
04200 
04201 def CheckCheck(filename, clean_lines, linenum, error):
04202   """Checks the use of CHECK and EXPECT macros.
04203 
04204   Args:
04205     filename: The name of the current file.
04206     clean_lines: A CleansedLines instance containing the file.
04207     linenum: The number of the line to check.
04208     error: The function to call with any errors found.
04209   """
04210 
04211   # Decide the set of replacement macros that should be suggested
04212   lines = clean_lines.elided
04213   (check_macro, start_pos) = FindCheckMacro(lines[linenum])
04214   if not check_macro:
04215     return
04216 
04217   # Find end of the boolean expression by matching parentheses
04218   (last_line, end_line, end_pos) = CloseExpression(
04219       clean_lines, linenum, start_pos)
04220   if end_pos < 0:
04221     return
04222 
04223   # If the check macro is followed by something other than a
04224   # semicolon, assume users will log their own custom error messages
04225   # and don't suggest any replacements.
04226   if not Match(r'\s*;', last_line[end_pos:]):
04227     return
04228 
04229   if linenum == end_line:
04230     expression = lines[linenum][start_pos + 1:end_pos - 1]
04231   else:
04232     expression = lines[linenum][start_pos + 1:]
04233     for i in xrange(linenum + 1, end_line):
04234       expression += lines[i]
04235     expression += last_line[0:end_pos - 1]
04236 
04237   # Parse expression so that we can take parentheses into account.
04238   # This avoids false positives for inputs like "CHECK((a < 4) == b)",
04239   # which is not replaceable by CHECK_LE.
04240   lhs = ''
04241   rhs = ''
04242   operator = None
04243   while expression:
04244     matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
04245                     r'==|!=|>=|>|<=|<|\()(.*)$', expression)
04246     if matched:
04247       token = matched.group(1)
04248       if token == '(':
04249         # Parenthesized operand
04250         expression = matched.group(2)
04251         (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
04252         if end < 0:
04253           return  # Unmatched parenthesis
04254         lhs += '(' + expression[0:end]
04255         expression = expression[end:]
04256       elif token in ('&&', '||'):
04257         # Logical and/or operators.  This means the expression
04258         # contains more than one term, for example:
04259         #   CHECK(42 < a && a < b);
04260         #
04261         # These are not replaceable with CHECK_LE, so bail out early.
04262         return
04263       elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
04264         # Non-relational operator
04265         lhs += token
04266         expression = matched.group(2)
04267       else:
04268         # Relational operator
04269         operator = token
04270         rhs = matched.group(2)
04271         break
04272     else:
04273       # Unparenthesized operand.  Instead of appending to lhs one character
04274       # at a time, we do another regular expression match to consume several
04275       # characters at once if possible.  Trivial benchmark shows that this
04276       # is more efficient when the operands are longer than a single
04277       # character, which is generally the case.
04278       matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
04279       if not matched:
04280         matched = Match(r'^(\s*\S)(.*)$', expression)
04281         if not matched:
04282           break
04283       lhs += matched.group(1)
04284       expression = matched.group(2)
04285 
04286   # Only apply checks if we got all parts of the boolean expression
04287   if not (lhs and operator and rhs):
04288     return
04289 
04290   # Check that rhs do not contain logical operators.  We already know
04291   # that lhs is fine since the loop above parses out && and ||.
04292   if rhs.find('&&') > -1 or rhs.find('||') > -1:
04293     return
04294 
04295   # At least one of the operands must be a constant literal.  This is
04296   # to avoid suggesting replacements for unprintable things like
04297   # CHECK(variable != iterator)
04298   #
04299   # The following pattern matches decimal, hex integers, strings, and
04300   # characters (in that order).
04301   lhs = lhs.strip()
04302   rhs = rhs.strip()
04303   match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
04304   if Match(match_constant, lhs) or Match(match_constant, rhs):
04305     # Note: since we know both lhs and rhs, we can provide a more
04306     # descriptive error message like:
04307     #   Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
04308     # Instead of:
04309     #   Consider using CHECK_EQ instead of CHECK(a == b)
04310     #
04311     # We are still keeping the less descriptive message because if lhs
04312     # or rhs gets long, the error message might become unreadable.
04313     error(filename, linenum, 'readability/check', 2,
04314           'Consider using %s instead of %s(a %s b)' % (
04315               _CHECK_REPLACEMENT[check_macro][operator],
04316               check_macro, operator))
04317 
04318 
04319 def CheckAltTokens(filename, clean_lines, linenum, error):
04320   """Check alternative keywords being used in boolean expressions.
04321 
04322   Args:
04323     filename: The name of the current file.
04324     clean_lines: A CleansedLines instance containing the file.
04325     linenum: The number of the line to check.
04326     error: The function to call with any errors found.
04327   """
04328   line = clean_lines.elided[linenum]
04329 
04330   # Avoid preprocessor lines
04331   if Match(r'^\s*#', line):
04332     return
04333 
04334   # Last ditch effort to avoid multi-line comments.  This will not help
04335   # if the comment started before the current line or ended after the
04336   # current line, but it catches most of the false positives.  At least,
04337   # it provides a way to workaround this warning for people who use
04338   # multi-line comments in preprocessor macros.
04339   #
04340   # TODO(unknown): remove this once cpplint has better support for
04341   # multi-line comments.
04342   if line.find('/*') >= 0 or line.find('*/') >= 0:
04343     return
04344 
04345   for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
04346     error(filename, linenum, 'readability/alt_tokens', 2,
04347           'Use operator %s instead of %s' % (
04348               _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
04349 
04350 
04351 def GetLineWidth(line):
04352   """Determines the width of the line in column positions.
04353 
04354   Args:
04355     line: A string, which may be a Unicode string.
04356 
04357   Returns:
04358     The width of the line in column positions, accounting for Unicode
04359     combining characters and wide characters.
04360   """
04361   if isinstance(line, unicode):
04362     width = 0
04363     for uc in unicodedata.normalize('NFC', line):
04364       if unicodedata.east_asian_width(uc) in ('W', 'F'):
04365         width += 2
04366       elif not unicodedata.combining(uc):
04367         width += 1
04368     return width
04369   else:
04370     return len(line)
04371 
04372 
04373 def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
04374                error):
04375   """Checks rules from the 'C++ style rules' section of cppguide.html.
04376 
04377   Most of these rules are hard to test (naming, comment style), but we
04378   do what we can.  In particular we check for 2-space indents, line lengths,
04379   tab usage, spaces inside code, etc.
04380 
04381   Args:
04382     filename: The name of the current file.
04383     clean_lines: A CleansedLines instance containing the file.
04384     linenum: The number of the line to check.
04385     file_extension: The extension (without the dot) of the filename.
04386     nesting_state: A NestingState instance which maintains information about
04387                    the current stack of nested blocks being parsed.
04388     error: The function to call with any errors found.
04389   """
04390 
04391   # Don't use "elided" lines here, otherwise we can't check commented lines.
04392   # Don't want to use "raw" either, because we don't want to check inside C++11
04393   # raw strings,
04394   raw_lines = clean_lines.lines_without_raw_strings
04395   line = raw_lines[linenum]
04396 
04397   if line.find('\t') != -1:
04398     error(filename, linenum, 'whitespace/tab', 1,
04399           'Tab found; better to use spaces')
04400 
04401   # One or three blank spaces at the beginning of the line is weird; it's
04402   # hard to reconcile that with 2-space indents.
04403   # NOTE: here are the conditions rob pike used for his tests.  Mine aren't
04404   # as sophisticated, but it may be worth becoming so:  RLENGTH==initial_spaces
04405   # if(RLENGTH > 20) complain = 0;
04406   # if(match($0, " +(error|private|public|protected):")) complain = 0;
04407   # if(match(prev, "&& *$")) complain = 0;
04408   # if(match(prev, "\\|\\| *$")) complain = 0;
04409   # if(match(prev, "[\",=><] *$")) complain = 0;
04410   # if(match($0, " <<")) complain = 0;
04411   # if(match(prev, " +for \\(")) complain = 0;
04412   # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
04413   scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
04414   classinfo = nesting_state.InnermostClass()
04415   initial_spaces = 0
04416   cleansed_line = clean_lines.elided[linenum]
04417   while initial_spaces < len(line) and line[initial_spaces] == ' ':
04418     initial_spaces += 1
04419   if line and line[-1].isspace():
04420     error(filename, linenum, 'whitespace/end_of_line', 4,
04421           'Line ends in whitespace.  Consider deleting these extra spaces.')
04422   # There are certain situations we allow one space, notably for
04423   # section labels, and also lines containing multi-line raw strings.
04424   elif ((initial_spaces == 1 or initial_spaces == 3) and
04425         not Match(scope_or_label_pattern, cleansed_line) and
04426         not (clean_lines.raw_lines[linenum] != line and
04427              Match(r'^\s*""', line))):
04428     error(filename, linenum, 'whitespace/indent', 3,
04429           'Weird number of spaces at line-start.  '
04430           'Are you using a 2-space indent?')
04431 
04432   # Check if the line is a header guard.
04433   is_header_guard = False
04434   if file_extension == 'h':
04435     cppvar = GetHeaderGuardCPPVariable(filename)
04436     if (line.startswith('#ifndef %s' % cppvar) or
04437         line.startswith('#define %s' % cppvar) or
04438         line.startswith('#endif  // %s' % cppvar)):
04439       is_header_guard = True
04440   # #include lines and header guards can be long, since there's no clean way to
04441   # split them.
04442   #
04443   # URLs can be long too.  It's possible to split these, but it makes them
04444   # harder to cut&paste.
04445   #
04446   # The "$Id:...$" comment may also get very long without it being the
04447   # developers fault.
04448   if (not line.startswith('#include') and not is_header_guard and
04449       not Match(r'^\s*//.*http(s?)://\S*$', line) and
04450       not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
04451     line_width = GetLineWidth(line)
04452     extended_length = int((_line_length * 1.25))
04453     if line_width > extended_length:
04454       error(filename, linenum, 'whitespace/line_length', 4,
04455             'Lines should very rarely be longer than %i characters' %
04456             extended_length)
04457     elif line_width > _line_length:
04458       error(filename, linenum, 'whitespace/line_length', 2,
04459             'Lines should be <= %i characters long' % _line_length)
04460 
04461   if (cleansed_line.count(';') > 1 and
04462       # for loops are allowed two ;'s (and may run over two lines).
04463       cleansed_line.find('for') == -1 and
04464       (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
04465        GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
04466       # It's ok to have many commands in a switch case that fits in 1 line
04467       not ((cleansed_line.find('case ') != -1 or
04468             cleansed_line.find('default:') != -1) and
04469            cleansed_line.find('break;') != -1)):
04470     error(filename, linenum, 'whitespace/newline', 0,
04471           'More than one command on the same line')
04472 
04473   # Some more style checks
04474   CheckBraces(filename, clean_lines, linenum, error)
04475   CheckTrailingSemicolon(filename, clean_lines, linenum, error)
04476   CheckEmptyBlockBody(filename, clean_lines, linenum, error)
04477   CheckAccess(filename, clean_lines, linenum, nesting_state, error)
04478   CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
04479   CheckOperatorSpacing(filename, clean_lines, linenum, error)
04480   CheckParenthesisSpacing(filename, clean_lines, linenum, error)
04481   CheckCommaSpacing(filename, clean_lines, linenum, error)
04482   CheckBracesSpacing(filename, clean_lines, linenum, error)
04483   CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
04484   CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
04485   CheckCheck(filename, clean_lines, linenum, error)
04486   CheckAltTokens(filename, clean_lines, linenum, error)
04487   classinfo = nesting_state.InnermostClass()
04488   if classinfo:
04489     CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
04490 
04491 
04492 _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
04493 # Matches the first component of a filename delimited by -s and _s. That is:
04494 #  _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
04495 #  _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
04496 #  _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
04497 #  _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
04498 _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
04499 
04500 
04501 def _DropCommonSuffixes(filename):
04502   """Drops common suffixes like _test.cc or -inl.h from filename.
04503 
04504   For example:
04505     >>> _DropCommonSuffixes('foo/foo-inl.h')
04506     'foo/foo'
04507     >>> _DropCommonSuffixes('foo/bar/foo.cc')
04508     'foo/bar/foo'
04509     >>> _DropCommonSuffixes('foo/foo_internal.h')
04510     'foo/foo'
04511     >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
04512     'foo/foo_unusualinternal'
04513 
04514   Args:
04515     filename: The input filename.
04516 
04517   Returns:
04518     The filename with the common suffix removed.
04519   """
04520   for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
04521                  'inl.h', 'impl.h', 'internal.h'):
04522     if (filename.endswith(suffix) and len(filename) > len(suffix) and
04523         filename[-len(suffix) - 1] in ('-', '_')):
04524       return filename[:-len(suffix) - 1]
04525   return os.path.splitext(filename)[0]
04526 
04527 
04528 def _IsTestFilename(filename):
04529   """Determines if the given filename has a suffix that identifies it as a test.
04530 
04531   Args:
04532     filename: The input filename.
04533 
04534   Returns:
04535     True if 'filename' looks like a test, False otherwise.
04536   """
04537   if (filename.endswith('_test.cc') or
04538       filename.endswith('_unittest.cc') or
04539       filename.endswith('_regtest.cc')):
04540     return True
04541   else:
04542     return False
04543 
04544 
04545 def _ClassifyInclude(fileinfo, include, is_system):
04546   """Figures out what kind of header 'include' is.
04547 
04548   Args:
04549     fileinfo: The current file cpplint is running over. A FileInfo instance.
04550     include: The path to a #included file.
04551     is_system: True if the #include used <> rather than "".
04552 
04553   Returns:
04554     One of the _XXX_HEADER constants.
04555 
04556   For example:
04557     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
04558     _C_SYS_HEADER
04559     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
04560     _CPP_SYS_HEADER
04561     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
04562     _LIKELY_MY_HEADER
04563     >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
04564     ...                  'bar/foo_other_ext.h', False)
04565     _POSSIBLE_MY_HEADER
04566     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
04567     _OTHER_HEADER
04568   """
04569   # This is a list of all standard c++ header files, except
04570   # those already checked for above.
04571   is_cpp_h = include in _CPP_HEADERS
04572 
04573   if is_system:
04574     if is_cpp_h:
04575       return _CPP_SYS_HEADER
04576     else:
04577       return _C_SYS_HEADER
04578 
04579   # If the target file and the include we're checking share a
04580   # basename when we drop common extensions, and the include
04581   # lives in . , then it's likely to be owned by the target file.
04582   target_dir, target_base = (
04583       os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
04584   include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
04585   if target_base == include_base and (
04586       include_dir == target_dir or
04587       include_dir == os.path.normpath(target_dir + '/../public')):
04588     return _LIKELY_MY_HEADER
04589 
04590   # If the target and include share some initial basename
04591   # component, it's possible the target is implementing the
04592   # include, so it's allowed to be first, but we'll never
04593   # complain if it's not there.
04594   target_first_component = _RE_FIRST_COMPONENT.match(target_base)
04595   include_first_component = _RE_FIRST_COMPONENT.match(include_base)
04596   if (target_first_component and include_first_component and
04597       target_first_component.group(0) ==
04598       include_first_component.group(0)):
04599     return _POSSIBLE_MY_HEADER
04600 
04601   return _OTHER_HEADER
04602 
04603 
04604 
04605 def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
04606   """Check rules that are applicable to #include lines.
04607 
04608   Strings on #include lines are NOT removed from elided line, to make
04609   certain tasks easier. However, to prevent false positives, checks
04610   applicable to #include lines in CheckLanguage must be put here.
04611 
04612   Args:
04613     filename: The name of the current file.
04614     clean_lines: A CleansedLines instance containing the file.
04615     linenum: The number of the line to check.
04616     include_state: An _IncludeState instance in which the headers are inserted.
04617     error: The function to call with any errors found.
04618   """
04619   fileinfo = FileInfo(filename)
04620   line = clean_lines.lines[linenum]
04621 
04622   # "include" should use the new style "foo/bar.h" instead of just "bar.h"
04623   # Only do this check if the included header follows google naming
04624   # conventions.  If not, assume that it's a 3rd party API that
04625   # requires special include conventions.
04626   #
04627   # We also make an exception for Lua headers, which follow google
04628   # naming convention but not the include convention.
04629   match = Match(r'#include\s*"([^/]+\.h)"', line)
04630   if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
04631     error(filename, linenum, 'build/include', 4,
04632           'Include the directory when naming .h files')
04633 
04634   # we shouldn't include a file more than once. actually, there are a
04635   # handful of instances where doing so is okay, but in general it's
04636   # not.
04637   match = _RE_PATTERN_INCLUDE.search(line)
04638   if match:
04639     include = match.group(2)
04640     is_system = (match.group(1) == '<')
04641     duplicate_line = include_state.FindHeader(include)
04642     if duplicate_line >= 0:
04643       error(filename, linenum, 'build/include', 4,
04644             '"%s" already included at %s:%s' %
04645             (include, filename, duplicate_line))
04646     elif (include.endswith('.cc') and
04647           os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
04648       error(filename, linenum, 'build/include', 4,
04649             'Do not include .cc files from other packages')
04650     elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
04651       include_state.include_list[-1].append((include, linenum))
04652 
04653       # We want to ensure that headers appear in the right order:
04654       # 1) for foo.cc, foo.h  (preferred location)
04655       # 2) c system files
04656       # 3) cpp system files
04657       # 4) for foo.cc, foo.h  (deprecated location)
04658       # 5) other google headers
04659       #
04660       # We classify each include statement as one of those 5 types
04661       # using a number of techniques. The include_state object keeps
04662       # track of the highest type seen, and complains if we see a
04663       # lower type after that.
04664       error_message = include_state.CheckNextIncludeOrder(
04665           _ClassifyInclude(fileinfo, include, is_system))
04666       if error_message:
04667         error(filename, linenum, 'build/include_order', 4,
04668               '%s. Should be: %s.h, c system, c++ system, other.' %
04669               (error_message, fileinfo.BaseName()))
04670       canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
04671       if not include_state.IsInAlphabeticalOrder(
04672           clean_lines, linenum, canonical_include):
04673         error(filename, linenum, 'build/include_alpha', 4,
04674               'Include "%s" not in alphabetical order' % include)
04675       include_state.SetLastHeader(canonical_include)
04676 
04677 
04678 
04679 def _GetTextInside(text, start_pattern):
04680   r"""Retrieves all the text between matching open and close parentheses.
04681 
04682   Given a string of lines and a regular expression string, retrieve all the text
04683   following the expression and between opening punctuation symbols like
04684   (, [, or {, and the matching close-punctuation symbol. This properly nested
04685   occurrences of the punctuations, so for the text like
04686     printf(a(), b(c()));
04687   a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
04688   start_pattern must match string having an open punctuation symbol at the end.
04689 
04690   Args:
04691     text: The lines to extract text. Its comments and strings must be elided.
04692            It can be single line and can span multiple lines.
04693     start_pattern: The regexp string indicating where to start extracting
04694                    the text.
04695   Returns:
04696     The extracted text.
04697     None if either the opening string or ending punctuation could not be found.
04698   """
04699   # TODO(unknown): Audit cpplint.py to see what places could be profitably
04700   # rewritten to use _GetTextInside (and use inferior regexp matching today).
04701 
04702   # Give opening punctuations to get the matching close-punctuations.
04703   matching_punctuation = {'(': ')', '{': '}', '[': ']'}
04704   closing_punctuation = set(matching_punctuation.itervalues())
04705 
04706   # Find the position to start extracting text.
04707   match = re.search(start_pattern, text, re.M)
04708   if not match:  # start_pattern not found in text.
04709     return None
04710   start_position = match.end(0)
04711 
04712   assert start_position > 0, (
04713       'start_pattern must ends with an opening punctuation.')
04714   assert text[start_position - 1] in matching_punctuation, (
04715       'start_pattern must ends with an opening punctuation.')
04716   # Stack of closing punctuations we expect to have in text after position.
04717   punctuation_stack = [matching_punctuation[text[start_position - 1]]]
04718   position = start_position
04719   while punctuation_stack and position < len(text):
04720     if text[position] == punctuation_stack[-1]:
04721       punctuation_stack.pop()
04722     elif text[position] in closing_punctuation:
04723       # A closing punctuation without matching opening punctuations.
04724       return None
04725     elif text[position] in matching_punctuation:
04726       punctuation_stack.append(matching_punctuation[text[position]])
04727     position += 1
04728   if punctuation_stack:
04729     # Opening punctuations left without matching close-punctuations.
04730     return None
04731   # punctuations match.
04732   return text[start_position:position - 1]
04733 
04734 
04735 # Patterns for matching call-by-reference parameters.
04736 #
04737 # Supports nested templates up to 2 levels deep using this messy pattern:
04738 #   < (?: < (?: < [^<>]*
04739 #               >
04740 #           |   [^<>] )*
04741 #         >
04742 #     |   [^<>] )*
04743 #   >
04744 _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*'  # =~ [[:alpha:]][[:alnum:]]*
04745 _RE_PATTERN_TYPE = (
04746     r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
04747     r'(?:\w|'
04748     r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
04749     r'::)+')
04750 # A call-by-reference parameter ends with '& identifier'.
04751 _RE_PATTERN_REF_PARAM = re.compile(
04752     r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
04753     r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
04754 # A call-by-const-reference parameter either ends with 'const& identifier'
04755 # or looks like 'const type& identifier' when 'type' is atomic.
04756 _RE_PATTERN_CONST_REF_PARAM = (
04757     r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
04758     r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
04759 
04760 
04761 def CheckLanguage(filename, clean_lines, linenum, file_extension,
04762                   include_state, nesting_state, error):
04763   """Checks rules from the 'C++ language rules' section of cppguide.html.
04764 
04765   Some of these rules are hard to test (function overloading, using
04766   uint32 inappropriately), but we do the best we can.
04767 
04768   Args:
04769     filename: The name of the current file.
04770     clean_lines: A CleansedLines instance containing the file.
04771     linenum: The number of the line to check.
04772     file_extension: The extension (without the dot) of the filename.
04773     include_state: An _IncludeState instance in which the headers are inserted.
04774     nesting_state: A NestingState instance which maintains information about
04775                    the current stack of nested blocks being parsed.
04776     error: The function to call with any errors found.
04777   """
04778   # If the line is empty or consists of entirely a comment, no need to
04779   # check it.
04780   line = clean_lines.elided[linenum]
04781   if not line:
04782     return
04783 
04784   match = _RE_PATTERN_INCLUDE.search(line)
04785   if match:
04786     CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
04787     return
04788 
04789   # Reset include state across preprocessor directives.  This is meant
04790   # to silence warnings for conditional includes.
04791   match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
04792   if match:
04793     include_state.ResetSection(match.group(1))
04794 
04795   # Make Windows paths like Unix.
04796   fullname = os.path.abspath(filename).replace('\\', '/')
04797   
04798   # Perform other checks now that we are sure that this is not an include line
04799   CheckCasts(filename, clean_lines, linenum, error)
04800   CheckGlobalStatic(filename, clean_lines, linenum, error)
04801   CheckPrintf(filename, clean_lines, linenum, error)
04802 
04803   if file_extension == 'h':
04804     # TODO(unknown): check that 1-arg constructors are explicit.
04805     #                How to tell it's a constructor?
04806     #                (handled in CheckForNonStandardConstructs for now)
04807     # TODO(unknown): check that classes declare or disable copy/assign
04808     #                (level 1 error)
04809     pass
04810 
04811   # Check if people are using the verboten C basic types.  The only exception
04812   # we regularly allow is "unsigned short port" for port.
04813   if Search(r'\bshort port\b', line):
04814     if not Search(r'\bunsigned short port\b', line):
04815       error(filename, linenum, 'runtime/int', 4,
04816             'Use "unsigned short" for ports, not "short"')
04817   else:
04818     match = Search(r'\b(short|long(?! +double)|long long)\b', line)
04819     if match:
04820       error(filename, linenum, 'runtime/int', 4,
04821             'Use int16/int64/etc, rather than the C type %s' % match.group(1))
04822 
04823   # Check if some verboten operator overloading is going on
04824   # TODO(unknown): catch out-of-line unary operator&:
04825   #   class X {};
04826   #   int operator&(const X& x) { return 42; }  // unary operator&
04827   # The trick is it's hard to tell apart from binary operator&:
04828   #   class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
04829   if Search(r'\boperator\s*&\s*\(\s*\)', line):
04830     error(filename, linenum, 'runtime/operator', 4,
04831           'Unary operator& is dangerous.  Do not use it.')
04832 
04833   # Check for suspicious usage of "if" like
04834   # } if (a == b) {
04835   if Search(r'\}\s*if\s*\(', line):
04836     error(filename, linenum, 'readability/braces', 4,
04837           'Did you mean "else if"? If not, start a new line for "if".')
04838 
04839   # Check for potential format string bugs like printf(foo).
04840   # We constrain the pattern not to pick things like DocidForPrintf(foo).
04841   # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
04842   # TODO(unknown): Catch the following case. Need to change the calling
04843   # convention of the whole function to process multiple line to handle it.
04844   #   printf(
04845   #       boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
04846   printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
04847   if printf_args:
04848     match = Match(r'([\w.\->()]+)$', printf_args)
04849     if match and match.group(1) != '__VA_ARGS__':
04850       function_name = re.search(r'\b((?:string)?printf)\s*\(',
04851                                 line, re.I).group(1)
04852       error(filename, linenum, 'runtime/printf', 4,
04853             'Potential format string bug. Do %s("%%s", %s) instead.'
04854             % (function_name, match.group(1)))
04855 
04856   # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
04857   match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
04858   if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
04859     error(filename, linenum, 'runtime/memset', 4,
04860           'Did you mean "memset(%s, 0, %s)"?'
04861           % (match.group(1), match.group(2)))
04862 
04863   if Search(r'\busing namespace\b', line):
04864     error(filename, linenum, 'build/namespaces', 5,
04865           'Do not use namespace using-directives.  '
04866           'Use using-declarations instead.')
04867 
04868   # Detect variable-length arrays.
04869   match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
04870   if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
04871       match.group(3).find(']') == -1):
04872     # Split the size using space and arithmetic operators as delimiters.
04873     # If any of the resulting tokens are not compile time constants then
04874     # report the error.
04875     tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
04876     is_const = True
04877     skip_next = False
04878     for tok in tokens:
04879       if skip_next:
04880         skip_next = False
04881         continue
04882 
04883       if Search(r'sizeof\(.+\)', tok): continue
04884       if Search(r'arraysize\(\w+\)', tok): continue
04885 
04886       tok = tok.lstrip('(')
04887       tok = tok.rstrip(')')
04888       if not tok: continue
04889       if Match(r'\d+', tok): continue
04890       if Match(r'0[xX][0-9a-fA-F]+', tok): continue
04891       if Match(r'k[A-Z0-9]\w*', tok): continue
04892       if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
04893       if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
04894       # A catch all for tricky sizeof cases, including 'sizeof expression',
04895       # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
04896       # requires skipping the next token because we split on ' ' and '*'.
04897       if tok.startswith('sizeof'):
04898         skip_next = True
04899         continue
04900       is_const = False
04901       break
04902     if not is_const:
04903       error(filename, linenum, 'runtime/arrays', 1,
04904             'Do not use variable-length arrays.  Use an appropriately named '
04905             "('k' followed by CamelCase) compile-time constant for the size.")
04906 
04907   # Check for use of unnamed namespaces in header files.  Registration
04908   # macros are typically OK, so we allow use of "namespace {" on lines
04909   # that end with backslashes.
04910   if (file_extension == 'h'
04911       and Search(r'\bnamespace\s*{', line)
04912       and line[-1] != '\\'):
04913     error(filename, linenum, 'build/namespaces', 4,
04914           'Do not use unnamed namespaces in header files.  See '
04915           'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
04916           ' for more information.')
04917 
04918 
04919 def CheckGlobalStatic(filename, clean_lines, linenum, error):
04920   """Check for unsafe global or static objects.
04921 
04922   Args:
04923     filename: The name of the current file.
04924     clean_lines: A CleansedLines instance containing the file.
04925     linenum: The number of the line to check.
04926     error: The function to call with any errors found.
04927   """
04928   line = clean_lines.elided[linenum]
04929 
04930   # Match two lines at a time to support multiline declarations
04931   if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
04932     line += clean_lines.elided[linenum + 1].strip()
04933 
04934   # Check for people declaring static/global STL strings at the top level.
04935   # This is dangerous because the C++ language does not guarantee that
04936   # globals with constructors are initialized before the first access.
04937   match = Match(
04938       r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
04939       line)
04940 
04941   # Remove false positives:
04942   # - String pointers (as opposed to values).
04943   #    string *pointer
04944   #    const string *pointer
04945   #    string const *pointer
04946   #    string *const pointer
04947   #
04948   # - Functions and template specializations.
04949   #    string Function<Type>(...
04950   #    string Class<Type>::Method(...
04951   #
04952   # - Operators.  These are matched separately because operator names
04953   #   cross non-word boundaries, and trying to match both operators
04954   #   and functions at the same time would decrease accuracy of
04955   #   matching identifiers.
04956   #    string Class::operator*()
04957   if (match and
04958       not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
04959       not Search(r'\boperator\W', line) and
04960       not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
04961     error(filename, linenum, 'runtime/string', 4,
04962           'For a static/global string constant, use a C style string instead: '
04963           '"%schar %s[]".' %
04964           (match.group(1), match.group(2)))
04965 
04966   if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
04967     error(filename, linenum, 'runtime/init', 4,
04968           'You seem to be initializing a member variable with itself.')
04969 
04970 
04971 def CheckPrintf(filename, clean_lines, linenum, error):
04972   """Check for printf related issues.
04973 
04974   Args:
04975     filename: The name of the current file.
04976     clean_lines: A CleansedLines instance containing the file.
04977     linenum: The number of the line to check.
04978     error: The function to call with any errors found.
04979   """
04980   line = clean_lines.elided[linenum]
04981 
04982   # When snprintf is used, the second argument shouldn't be a literal.
04983   match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
04984   if match and match.group(2) != '0':
04985     # If 2nd arg is zero, snprintf is used to calculate size.
04986     error(filename, linenum, 'runtime/printf', 3,
04987           'If you can, use sizeof(%s) instead of %s as the 2nd arg '
04988           'to snprintf.' % (match.group(1), match.group(2)))
04989 
04990   # Check if some verboten C functions are being used.
04991   if Search(r'\bsprintf\s*\(', line):
04992     error(filename, linenum, 'runtime/printf', 5,
04993           'Never use sprintf. Use snprintf instead.')
04994   match = Search(r'\b(strcpy|strcat)\s*\(', line)
04995   if match:
04996     error(filename, linenum, 'runtime/printf', 4,
04997           'Almost always, snprintf is better than %s' % match.group(1))
04998 
04999 
05000 def IsDerivedFunction(clean_lines, linenum):
05001   """Check if current line contains an inherited function.
05002 
05003   Args:
05004     clean_lines: A CleansedLines instance containing the file.
05005     linenum: The number of the line to check.
05006   Returns:
05007     True if current line contains a function with "override"
05008     virt-specifier.
05009   """
05010   # Scan back a few lines for start of current function
05011   for i in xrange(linenum, max(-1, linenum - 10), -1):
05012     match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
05013     if match:
05014       # Look for "override" after the matching closing parenthesis
05015       line, _, closing_paren = CloseExpression(
05016           clean_lines, i, len(match.group(1)))
05017       return (closing_paren >= 0 and
05018               Search(r'\boverride\b', line[closing_paren:]))
05019   return False
05020 
05021 
05022 def IsOutOfLineMethodDefinition(clean_lines, linenum):
05023   """Check if current line contains an out-of-line method definition.
05024 
05025   Args:
05026     clean_lines: A CleansedLines instance containing the file.
05027     linenum: The number of the line to check.
05028   Returns:
05029     True if current line contains an out-of-line method definition.
05030   """
05031   # Scan back a few lines for start of current function
05032   for i in xrange(linenum, max(-1, linenum - 10), -1):
05033     if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
05034       return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
05035   return False
05036 
05037 
05038 def IsInitializerList(clean_lines, linenum):
05039   """Check if current line is inside constructor initializer list.
05040 
05041   Args:
05042     clean_lines: A CleansedLines instance containing the file.
05043     linenum: The number of the line to check.
05044   Returns:
05045     True if current line appears to be inside constructor initializer
05046     list, False otherwise.
05047   """
05048   for i in xrange(linenum, 1, -1):
05049     line = clean_lines.elided[i]
05050     if i == linenum:
05051       remove_function_body = Match(r'^(.*)\{\s*$', line)
05052       if remove_function_body:
05053         line = remove_function_body.group(1)
05054 
05055     if Search(r'\s:\s*\w+[({]', line):
05056       # A lone colon tend to indicate the start of a constructor
05057       # initializer list.  It could also be a ternary operator, which
05058       # also tend to appear in constructor initializer lists as
05059       # opposed to parameter lists.
05060       return True
05061     if Search(r'\}\s*,\s*$', line):
05062       # A closing brace followed by a comma is probably the end of a
05063       # brace-initialized member in constructor initializer list.
05064       return True
05065     if Search(r'[{};]\s*$', line):
05066       # Found one of the following:
05067       # - A closing brace or semicolon, probably the end of the previous
05068       #   function.
05069       # - An opening brace, probably the start of current class or namespace.
05070       #
05071       # Current line is probably not inside an initializer list since
05072       # we saw one of those things without seeing the starting colon.
05073       return False
05074 
05075   # Got to the beginning of the file without seeing the start of
05076   # constructor initializer list.
05077   return False
05078 
05079 
05080 def CheckForNonConstReference(filename, clean_lines, linenum,
05081                               nesting_state, error):
05082   """Check for non-const references.
05083 
05084   Separate from CheckLanguage since it scans backwards from current
05085   line, instead of scanning forward.
05086 
05087   Args:
05088     filename: The name of the current file.
05089     clean_lines: A CleansedLines instance containing the file.
05090     linenum: The number of the line to check.
05091     nesting_state: A NestingState instance which maintains information about
05092                    the current stack of nested blocks being parsed.
05093     error: The function to call with any errors found.
05094   """
05095   # Do nothing if there is no '&' on current line.
05096   line = clean_lines.elided[linenum]
05097   if '&' not in line:
05098     return
05099 
05100   # If a function is inherited, current function doesn't have much of
05101   # a choice, so any non-const references should not be blamed on
05102   # derived function.
05103   if IsDerivedFunction(clean_lines, linenum):
05104     return
05105 
05106   # Don't warn on out-of-line method definitions, as we would warn on the
05107   # in-line declaration, if it isn't marked with 'override'.
05108   if IsOutOfLineMethodDefinition(clean_lines, linenum):
05109     return
05110 
05111   # Long type names may be broken across multiple lines, usually in one
05112   # of these forms:
05113   #   LongType
05114   #       ::LongTypeContinued &identifier
05115   #   LongType::
05116   #       LongTypeContinued &identifier
05117   #   LongType<
05118   #       ...>::LongTypeContinued &identifier
05119   #
05120   # If we detected a type split across two lines, join the previous
05121   # line to current line so that we can match const references
05122   # accordingly.
05123   #
05124   # Note that this only scans back one line, since scanning back
05125   # arbitrary number of lines would be expensive.  If you have a type
05126   # that spans more than 2 lines, please use a typedef.
05127   if linenum > 1:
05128     previous = None
05129     if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
05130       # previous_line\n + ::current_line
05131       previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
05132                         clean_lines.elided[linenum - 1])
05133     elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
05134       # previous_line::\n + current_line
05135       previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
05136                         clean_lines.elided[linenum - 1])
05137     if previous:
05138       line = previous.group(1) + line.lstrip()
05139     else:
05140       # Check for templated parameter that is split across multiple lines
05141       endpos = line.rfind('>')
05142       if endpos > -1:
05143         (_, startline, startpos) = ReverseCloseExpression(
05144             clean_lines, linenum, endpos)
05145         if startpos > -1 and startline < linenum:
05146           # Found the matching < on an earlier line, collect all
05147           # pieces up to current line.
05148           line = ''
05149           for i in xrange(startline, linenum + 1):
05150             line += clean_lines.elided[i].strip()
05151 
05152   # Check for non-const references in function parameters.  A single '&' may
05153   # found in the following places:
05154   #   inside expression: binary & for bitwise AND
05155   #   inside expression: unary & for taking the address of something
05156   #   inside declarators: reference parameter
05157   # We will exclude the first two cases by checking that we are not inside a
05158   # function body, including one that was just introduced by a trailing '{'.
05159   # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
05160   if (nesting_state.previous_stack_top and
05161       not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
05162            isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
05163     # Not at toplevel, not within a class, and not within a namespace
05164     return
05165 
05166   # Avoid initializer lists.  We only need to scan back from the
05167   # current line for something that starts with ':'.
05168   #
05169   # We don't need to check the current line, since the '&' would
05170   # appear inside the second set of parentheses on the current line as
05171   # opposed to the first set.
05172   if linenum > 0:
05173     for i in xrange(linenum - 1, max(0, linenum - 10), -1):
05174       previous_line = clean_lines.elided[i]
05175       if not Search(r'[),]\s*$', previous_line):
05176         break
05177       if Match(r'^\s*:\s+\S', previous_line):
05178         return
05179 
05180   # Avoid preprocessors
05181   if Search(r'\\\s*$', line):
05182     return
05183 
05184   # Avoid constructor initializer lists
05185   if IsInitializerList(clean_lines, linenum):
05186     return
05187 
05188   # We allow non-const references in a few standard places, like functions
05189   # called "swap()" or iostream operators like "<<" or ">>".  Do not check
05190   # those function parameters.
05191   #
05192   # We also accept & in static_assert, which looks like a function but
05193   # it's actually a declaration expression.
05194   whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
05195                            r'operator\s*[<>][<>]|'
05196                            r'static_assert|COMPILE_ASSERT'
05197                            r')\s*\(')
05198   if Search(whitelisted_functions, line):
05199     return
05200   elif not Search(r'\S+\([^)]*$', line):
05201     # Don't see a whitelisted function on this line.  Actually we
05202     # didn't see any function name on this line, so this is likely a
05203     # multi-line parameter list.  Try a bit harder to catch this case.
05204     for i in xrange(2):
05205       if (linenum > i and
05206           Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
05207         return
05208 
05209   decls = ReplaceAll(r'{[^}]*}', ' ', line)  # exclude function body
05210   for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
05211     if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
05212       error(filename, linenum, 'runtime/references', 2,
05213             'Is this a non-const reference? '
05214             'If so, make const or use a pointer: ' +
05215             ReplaceAll(' *<', '<', parameter))
05216 
05217 
05218 def CheckCasts(filename, clean_lines, linenum, error):
05219   """Various cast related checks.
05220 
05221   Args:
05222     filename: The name of the current file.
05223     clean_lines: A CleansedLines instance containing the file.
05224     linenum: The number of the line to check.
05225     error: The function to call with any errors found.
05226   """
05227   line = clean_lines.elided[linenum]
05228 
05229   # Check to see if they're using an conversion function cast.
05230   # I just try to capture the most common basic types, though there are more.
05231   # Parameterless conversion functions, such as bool(), are allowed as they are
05232   # probably a member operator declaration or default constructor.
05233   match = Search(
05234       r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
05235       r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
05236       r'(\([^)].*)', line)
05237   expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
05238   if match and not expecting_function:
05239     matched_type = match.group(2)
05240 
05241     # matched_new_or_template is used to silence two false positives:
05242     # - New operators
05243     # - Template arguments with function types
05244     #
05245     # For template arguments, we match on types immediately following
05246     # an opening bracket without any spaces.  This is a fast way to
05247     # silence the common case where the function type is the first
05248     # template argument.  False negative with less-than comparison is
05249     # avoided because those operators are usually followed by a space.
05250     #
05251     #   function<double(double)>   // bracket + no space = false positive
05252     #   value < double(42)         // bracket + space = true positive
05253     matched_new_or_template = match.group(1)
05254 
05255     # Avoid arrays by looking for brackets that come after the closing
05256     # parenthesis.
05257     if Match(r'\([^()]+\)\s*\[', match.group(3)):
05258       return
05259 
05260     # Other things to ignore:
05261     # - Function pointers
05262     # - Casts to pointer types
05263     # - Placement new
05264     # - Alias declarations
05265     matched_funcptr = match.group(3)
05266     if (matched_new_or_template is None and
05267         not (matched_funcptr and
05268              (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
05269                     matched_funcptr) or
05270               matched_funcptr.startswith('(*)'))) and
05271         not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
05272         not Search(r'new\(\S+\)\s*' + matched_type, line)):
05273       error(filename, linenum, 'readability/casting', 4,
05274             'Using deprecated casting style.  '
05275             'Use static_cast<%s>(...) instead' %
05276             matched_type)
05277 
05278   if not expecting_function:
05279     CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
05280                     r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
05281 
05282   # This doesn't catch all cases. Consider (const char * const)"hello".
05283   #
05284   # (char *) "foo" should always be a const_cast (reinterpret_cast won't
05285   # compile).
05286   if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
05287                      r'\((char\s?\*+\s?)\)\s*"', error):
05288     pass
05289   else:
05290     # Check pointer casts for other than string constants
05291     CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
05292                     r'\((\w+\s?\*+\s?)\)', error)
05293 
05294   # In addition, we look for people taking the address of a cast.  This
05295   # is dangerous -- casts can assign to temporaries, so the pointer doesn't
05296   # point where you think.
05297   #
05298   # Some non-identifier character is required before the '&' for the
05299   # expression to be recognized as a cast.  These are casts:
05300   #   expression = &static_cast<int*>(temporary());
05301   #   function(&(int*)(temporary()));
05302   #
05303   # This is not a cast:
05304   #   reference_type&(int* function_param);
05305   match = Search(
05306       r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
05307       r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
05308   if match:
05309     # Try a better error message when the & is bound to something
05310     # dereferenced by the casted pointer, as opposed to the casted
05311     # pointer itself.
05312     parenthesis_error = False
05313     match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
05314     if match:
05315       _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
05316       if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
05317         _, y2, x2 = CloseExpression(clean_lines, y1, x1)
05318         if x2 >= 0:
05319           extended_line = clean_lines.elided[y2][x2:]
05320           if y2 < clean_lines.NumLines() - 1:
05321             extended_line += clean_lines.elided[y2 + 1]
05322           if Match(r'\s*(?:->|\[)', extended_line):
05323             parenthesis_error = True
05324 
05325     if parenthesis_error:
05326       error(filename, linenum, 'readability/casting', 4,
05327             ('Are you taking an address of something dereferenced '
05328              'from a cast?  Wrapping the dereferenced expression in '
05329              'parentheses will make the binding more obvious'))
05330     else:
05331       error(filename, linenum, 'runtime/casting', 4,
05332             ('Are you taking an address of a cast?  '
05333              'This is dangerous: could be a temp var.  '
05334              'Take the address before doing the cast, rather than after'))
05335 
05336 
05337 def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
05338   """Checks for a C-style cast by looking for the pattern.
05339 
05340   Args:
05341     filename: The name of the current file.
05342     clean_lines: A CleansedLines instance containing the file.
05343     linenum: The number of the line to check.
05344     cast_type: The string for the C++ cast to recommend.  This is either
05345       reinterpret_cast, static_cast, or const_cast, depending.
05346     pattern: The regular expression used to find C-style casts.
05347     error: The function to call with any errors found.
05348 
05349   Returns:
05350     True if an error was emitted.
05351     False otherwise.
05352   """
05353   line = clean_lines.elided[linenum]
05354   match = Search(pattern, line)
05355   if not match:
05356     return False
05357 
05358   # Exclude lines with keywords that tend to look like casts
05359   context = line[0:match.start(1) - 1]
05360   if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
05361     return False
05362 
05363   # Try expanding current context to see if we one level of
05364   # parentheses inside a macro.
05365   if linenum > 0:
05366     for i in xrange(linenum - 1, max(0, linenum - 5), -1):
05367       context = clean_lines.elided[i] + context
05368   if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
05369     return False
05370 
05371   # operator++(int) and operator--(int)
05372   if context.endswith(' operator++') or context.endswith(' operator--'):
05373     return False
05374 
05375   # A single unnamed argument for a function tends to look like old
05376   # style cast.  If we see those, don't issue warnings for deprecated
05377   # casts, instead issue warnings for unnamed arguments where
05378   # appropriate.
05379   #
05380   # These are things that we want warnings for, since the style guide
05381   # explicitly require all parameters to be named:
05382   #   Function(int);
05383   #   Function(int) {
05384   #   ConstMember(int) const;
05385   #   ConstMember(int) const {
05386   #   ExceptionMember(int) throw (...);
05387   #   ExceptionMember(int) throw (...) {
05388   #   PureVirtual(int) = 0;
05389   #   [](int) -> bool {
05390   #
05391   # These are functions of some sort, where the compiler would be fine
05392   # if they had named parameters, but people often omit those
05393   # identifiers to reduce clutter:
05394   #   (FunctionPointer)(int);
05395   #   (FunctionPointer)(int) = value;
05396   #   Function((function_pointer_arg)(int))
05397   #   Function((function_pointer_arg)(int), int param)
05398   #   <TemplateArgument(int)>;
05399   #   <(FunctionPointerTemplateArgument)(int)>;
05400   remainder = line[match.end(0):]
05401   if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
05402            remainder):
05403     # Looks like an unnamed parameter.
05404 
05405     # Don't warn on any kind of template arguments.
05406     if Match(r'^\s*>', remainder):
05407       return False
05408 
05409     # Don't warn on assignments to function pointers, but keep warnings for
05410     # unnamed parameters to pure virtual functions.  Note that this pattern
05411     # will also pass on assignments of "0" to function pointers, but the
05412     # preferred values for those would be "nullptr" or "NULL".
05413     matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
05414     if matched_zero and matched_zero.group(1) != '0':
05415       return False
05416 
05417     # Don't warn on function pointer declarations.  For this we need
05418     # to check what came before the "(type)" string.
05419     if Match(r'.*\)\s*$', line[0:match.start(0)]):
05420       return False
05421 
05422     # Don't warn if the parameter is named with block comments, e.g.:
05423     #  Function(int /*unused_param*/);
05424     raw_line = clean_lines.raw_lines[linenum]
05425     if '/*' in raw_line:
05426       return False
05427 
05428     # Passed all filters, issue warning here.
05429     error(filename, linenum, 'readability/function', 3,
05430           'All parameters should be named in a function')
05431     return True
05432 
05433   # At this point, all that should be left is actual casts.
05434   error(filename, linenum, 'readability/casting', 4,
05435         'Using C-style cast.  Use %s<%s>(...) instead' %
05436         (cast_type, match.group(1)))
05437 
05438   return True
05439 
05440 
05441 def ExpectingFunctionArgs(clean_lines, linenum):
05442   """Checks whether where function type arguments are expected.
05443 
05444   Args:
05445     clean_lines: A CleansedLines instance containing the file.
05446     linenum: The number of the line to check.
05447 
05448   Returns:
05449     True if the line at 'linenum' is inside something that expects arguments
05450     of function types.
05451   """
05452   line = clean_lines.elided[linenum]
05453   return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
05454           (linenum >= 2 and
05455            (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
05456                   clean_lines.elided[linenum - 1]) or
05457             Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
05458                   clean_lines.elided[linenum - 2]) or
05459             Search(r'\bstd::m?function\s*<\s*$',
05460                    clean_lines.elided[linenum - 1]))))
05461 
05462 
05463 _HEADERS_CONTAINING_TEMPLATES = (
05464     ('<deque>', ('deque',)),
05465     ('<functional>', ('unary_function', 'binary_function',
05466                       'plus', 'minus', 'multiplies', 'divides', 'modulus',
05467                       'negate',
05468                       'equal_to', 'not_equal_to', 'greater', 'less',
05469                       'greater_equal', 'less_equal',
05470                       'logical_and', 'logical_or', 'logical_not',
05471                       'unary_negate', 'not1', 'binary_negate', 'not2',
05472                       'bind1st', 'bind2nd',
05473                       'pointer_to_unary_function',
05474                       'pointer_to_binary_function',
05475                       'ptr_fun',
05476                       'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
05477                       'mem_fun_ref_t',
05478                       'const_mem_fun_t', 'const_mem_fun1_t',
05479                       'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
05480                       'mem_fun_ref',
05481                      )),
05482     ('<limits>', ('numeric_limits',)),
05483     ('<list>', ('list',)),
05484     ('<map>', ('map', 'multimap',)),
05485     ('<memory>', ('allocator',)),
05486     ('<queue>', ('queue', 'priority_queue',)),
05487     ('<set>', ('set', 'multiset',)),
05488     ('<stack>', ('stack',)),
05489     ('<string>', ('char_traits', 'basic_string',)),
05490     ('<tuple>', ('tuple',)),
05491     ('<utility>', ('pair',)),
05492     ('<vector>', ('vector',)),
05493 
05494     # gcc extensions.
05495     # Note: std::hash is their hash, ::hash is our hash
05496     ('<hash_map>', ('hash_map', 'hash_multimap',)),
05497     ('<hash_set>', ('hash_set', 'hash_multiset',)),
05498     ('<slist>', ('slist',)),
05499     )
05500 
05501 _RE_PATTERN_STRING = re.compile(r'\bstring\b')
05502 
05503 _re_pattern_algorithm_header = []
05504 for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
05505                   'transform'):
05506   # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
05507   # type::max().
05508   _re_pattern_algorithm_header.append(
05509       (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
05510        _template,
05511        '<algorithm>'))
05512 
05513 _re_pattern_templates = []
05514 for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
05515   for _template in _templates:
05516     _re_pattern_templates.append(
05517         (re.compile(r'(<|\b)' + _template + r'\s*<'),
05518          _template + '<>',
05519          _header))
05520 
05521 
05522 def FilesBelongToSameModule(filename_cc, filename_h):
05523   """Check if these two filenames belong to the same module.
05524 
05525   The concept of a 'module' here is a as follows:
05526   foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
05527   same 'module' if they are in the same directory.
05528   some/path/public/xyzzy and some/path/internal/xyzzy are also considered
05529   to belong to the same module here.
05530 
05531   If the filename_cc contains a longer path than the filename_h, for example,
05532   '/absolute/path/to/base/sysinfo.cc', and this file would include
05533   'base/sysinfo.h', this function also produces the prefix needed to open the
05534   header. This is used by the caller of this function to more robustly open the
05535   header file. We don't have access to the real include paths in this context,
05536   so we need this guesswork here.
05537 
05538   Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
05539   according to this implementation. Because of this, this function gives
05540   some false positives. This should be sufficiently rare in practice.
05541 
05542   Args:
05543     filename_cc: is the path for the .cc file
05544     filename_h: is the path for the header path
05545 
05546   Returns:
05547     Tuple with a bool and a string:
05548     bool: True if filename_cc and filename_h belong to the same module.
05549     string: the additional prefix needed to open the header file.
05550   """
05551 
05552   if not filename_cc.endswith('.cc'):
05553     return (False, '')
05554   filename_cc = filename_cc[:-len('.cc')]
05555   if filename_cc.endswith('_unittest'):
05556     filename_cc = filename_cc[:-len('_unittest')]
05557   elif filename_cc.endswith('_test'):
05558     filename_cc = filename_cc[:-len('_test')]
05559   filename_cc = filename_cc.replace('/public/', '/')
05560   filename_cc = filename_cc.replace('/internal/', '/')
05561 
05562   if not filename_h.endswith('.h'):
05563     return (False, '')
05564   filename_h = filename_h[:-len('.h')]
05565   if filename_h.endswith('-inl'):
05566     filename_h = filename_h[:-len('-inl')]
05567   filename_h = filename_h.replace('/public/', '/')
05568   filename_h = filename_h.replace('/internal/', '/')
05569 
05570   files_belong_to_same_module = filename_cc.endswith(filename_h)
05571   common_path = ''
05572   if files_belong_to_same_module:
05573     common_path = filename_cc[:-len(filename_h)]
05574   return files_belong_to_same_module, common_path
05575 
05576 
05577 def UpdateIncludeState(filename, include_dict, io=codecs):
05578   """Fill up the include_dict with new includes found from the file.
05579 
05580   Args:
05581     filename: the name of the header to read.
05582     include_dict: a dictionary in which the headers are inserted.
05583     io: The io factory to use to read the file. Provided for testability.
05584 
05585   Returns:
05586     True if a header was successfully added. False otherwise.
05587   """
05588   headerfile = None
05589   try:
05590     headerfile = io.open(filename, 'r', 'utf8', 'replace')
05591   except IOError:
05592     return False
05593   linenum = 0
05594   for line in headerfile:
05595     linenum += 1
05596     clean_line = CleanseComments(line)
05597     match = _RE_PATTERN_INCLUDE.search(clean_line)
05598     if match:
05599       include = match.group(2)
05600       include_dict.setdefault(include, linenum)
05601   return True
05602 
05603 
05604 def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
05605                               io=codecs):
05606   """Reports for missing stl includes.
05607 
05608   This function will output warnings to make sure you are including the headers
05609   necessary for the stl containers and functions that you use. We only give one
05610   reason to include a header. For example, if you use both equal_to<> and
05611   less<> in a .h file, only one (the latter in the file) of these will be
05612   reported as a reason to include the <functional>.
05613 
05614   Args:
05615     filename: The name of the current file.
05616     clean_lines: A CleansedLines instance containing the file.
05617     include_state: An _IncludeState instance.
05618     error: The function to call with any errors found.
05619     io: The IO factory to use to read the header file. Provided for unittest
05620         injection.
05621   """
05622   required = {}  # A map of header name to linenumber and the template entity.
05623                  # Example of required: { '<functional>': (1219, 'less<>') }
05624 
05625   for linenum in xrange(clean_lines.NumLines()):
05626     line = clean_lines.elided[linenum]
05627     if not line or line[0] == '#':
05628       continue
05629 
05630     # String is special -- it is a non-templatized type in STL.
05631     matched = _RE_PATTERN_STRING.search(line)
05632     if matched:
05633       # Don't warn about strings in non-STL namespaces:
05634       # (We check only the first match per line; good enough.)
05635       prefix = line[:matched.start()]
05636       if prefix.endswith('std::') or not prefix.endswith('::'):
05637         required['<string>'] = (linenum, 'string')
05638 
05639     for pattern, template, header in _re_pattern_algorithm_header:
05640       if pattern.search(line):
05641         required[header] = (linenum, template)
05642 
05643     # The following function is just a speed up, no semantics are changed.
05644     if not '<' in line:  # Reduces the cpu time usage by skipping lines.
05645       continue
05646 
05647     for pattern, template, header in _re_pattern_templates:
05648       if pattern.search(line):
05649         required[header] = (linenum, template)
05650 
05651   # The policy is that if you #include something in foo.h you don't need to
05652   # include it again in foo.cc. Here, we will look at possible includes.
05653   # Let's flatten the include_state include_list and copy it into a dictionary.
05654   include_dict = dict([item for sublist in include_state.include_list
05655                        for item in sublist])
05656 
05657   # Did we find the header for this file (if any) and successfully load it?
05658   header_found = False
05659 
05660   # Use the absolute path so that matching works properly.
05661   abs_filename = FileInfo(filename).FullName()
05662 
05663   # For Emacs's flymake.
05664   # If cpplint is invoked from Emacs's flymake, a temporary file is generated
05665   # by flymake and that file name might end with '_flymake.cc'. In that case,
05666   # restore original file name here so that the corresponding header file can be
05667   # found.
05668   # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
05669   # instead of 'foo_flymake.h'
05670   abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
05671 
05672   # include_dict is modified during iteration, so we iterate over a copy of
05673   # the keys.
05674   header_keys = include_dict.keys()
05675   for header in header_keys:
05676     (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
05677     fullpath = common_path + header
05678     if same_module and UpdateIncludeState(fullpath, include_dict, io):
05679       header_found = True
05680 
05681   # If we can't find the header file for a .cc, assume it's because we don't
05682   # know where to look. In that case we'll give up as we're not sure they
05683   # didn't include it in the .h file.
05684   # TODO(unknown): Do a better job of finding .h files so we are confident that
05685   # not having the .h file means there isn't one.
05686   if filename.endswith('.cc') and not header_found:
05687     return
05688 
05689   # All the lines have been processed, report the errors found.
05690   for required_header_unstripped in required:
05691     template = required[required_header_unstripped][1]
05692     if required_header_unstripped.strip('<>"') not in include_dict:
05693       error(filename, required[required_header_unstripped][0],
05694             'build/include_what_you_use', 4,
05695             'Add #include ' + required_header_unstripped + ' for ' + template)
05696 
05697 
05698 _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
05699 
05700 
05701 def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
05702   """Check that make_pair's template arguments are deduced.
05703 
05704   G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
05705   specified explicitly, and such use isn't intended in any case.
05706 
05707   Args:
05708     filename: The name of the current file.
05709     clean_lines: A CleansedLines instance containing the file.
05710     linenum: The number of the line to check.
05711     error: The function to call with any errors found.
05712   """
05713   line = clean_lines.elided[linenum]
05714   match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
05715   if match:
05716     error(filename, linenum, 'build/explicit_make_pair',
05717           4,  # 4 = high confidence
05718           'For C++11-compatibility, omit template arguments from make_pair'
05719           ' OR use pair directly OR if appropriate, construct a pair directly')
05720 
05721 
05722 def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
05723   """Check that default lambda captures are not used.
05724 
05725   Args:
05726     filename: The name of the current file.
05727     clean_lines: A CleansedLines instance containing the file.
05728     linenum: The number of the line to check.
05729     error: The function to call with any errors found.
05730   """
05731   line = clean_lines.elided[linenum]
05732 
05733   # A lambda introducer specifies a default capture if it starts with "[="
05734   # or if it starts with "[&" _not_ followed by an identifier.
05735   match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
05736   if match:
05737     # Found a potential error, check what comes after the lambda-introducer.
05738     # If it's not open parenthesis (for lambda-declarator) or open brace
05739     # (for compound-statement), it's not a lambda.
05740     line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
05741     if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
05742       error(filename, linenum, 'build/c++11',
05743             4,  # 4 = high confidence
05744             'Default lambda captures are an unapproved C++ feature.')
05745 
05746 
05747 def CheckRedundantVirtual(filename, clean_lines, linenum, error):
05748   """Check if line contains a redundant "virtual" function-specifier.
05749 
05750   Args:
05751     filename: The name of the current file.
05752     clean_lines: A CleansedLines instance containing the file.
05753     linenum: The number of the line to check.
05754     error: The function to call with any errors found.
05755   """
05756   # Look for "virtual" on current line.
05757   line = clean_lines.elided[linenum]
05758   virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
05759   if not virtual: return
05760 
05761   # Ignore "virtual" keywords that are near access-specifiers.  These
05762   # are only used in class base-specifier and do not apply to member
05763   # functions.
05764   if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
05765       Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
05766     return
05767 
05768   # Ignore the "virtual" keyword from virtual base classes.  Usually
05769   # there is a column on the same line in these cases (virtual base
05770   # classes are rare in google3 because multiple inheritance is rare).
05771   if Match(r'^.*[^:]:[^:].*$', line): return
05772 
05773   # Look for the next opening parenthesis.  This is the start of the
05774   # parameter list (possibly on the next line shortly after virtual).
05775   # TODO(unknown): doesn't work if there are virtual functions with
05776   # decltype() or other things that use parentheses, but csearch suggests
05777   # that this is rare.
05778   end_col = -1
05779   end_line = -1
05780   start_col = len(virtual.group(2))
05781   for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
05782     line = clean_lines.elided[start_line][start_col:]
05783     parameter_list = Match(r'^([^(]*)\(', line)
05784     if parameter_list:
05785       # Match parentheses to find the end of the parameter list
05786       (_, end_line, end_col) = CloseExpression(
05787           clean_lines, start_line, start_col + len(parameter_list.group(1)))
05788       break
05789     start_col = 0
05790 
05791   if end_col < 0:
05792     return  # Couldn't find end of parameter list, give up
05793 
05794   # Look for "override" or "final" after the parameter list
05795   # (possibly on the next few lines).
05796   for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
05797     line = clean_lines.elided[i][end_col:]
05798     match = Search(r'\b(override|final)\b', line)
05799     if match:
05800       error(filename, linenum, 'readability/inheritance', 4,
05801             ('"virtual" is redundant since function is '
05802              'already declared as "%s"' % match.group(1)))
05803 
05804     # Set end_col to check whole lines after we are done with the
05805     # first line.
05806     end_col = 0
05807     if Search(r'[^\w]\s*$', line):
05808       break
05809 
05810 
05811 def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
05812   """Check if line contains a redundant "override" or "final" virt-specifier.
05813 
05814   Args:
05815     filename: The name of the current file.
05816     clean_lines: A CleansedLines instance containing the file.
05817     linenum: The number of the line to check.
05818     error: The function to call with any errors found.
05819   """
05820   # Look for closing parenthesis nearby.  We need one to confirm where
05821   # the declarator ends and where the virt-specifier starts to avoid
05822   # false positives.
05823   line = clean_lines.elided[linenum]
05824   declarator_end = line.rfind(')')
05825   if declarator_end >= 0:
05826     fragment = line[declarator_end:]
05827   else:
05828     if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
05829       fragment = line
05830     else:
05831       return
05832 
05833   # Check that at most one of "override" or "final" is present, not both
05834   if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
05835     error(filename, linenum, 'readability/inheritance', 4,
05836           ('"override" is redundant since function is '
05837            'already declared as "final"'))
05838 
05839 
05840 
05841 
05842 # Returns true if we are at a new block, and it is directly
05843 # inside of a namespace.
05844 def IsBlockInNameSpace(nesting_state, is_forward_declaration):
05845   """Checks that the new block is directly in a namespace.
05846 
05847   Args:
05848     nesting_state: The _NestingState object that contains info about our state.
05849     is_forward_declaration: If the class is a forward declared class.
05850   Returns:
05851     Whether or not the new block is directly in a namespace.
05852   """
05853   if is_forward_declaration:
05854     if len(nesting_state.stack) >= 1 and (
05855         isinstance(nesting_state.stack[-1], _NamespaceInfo)):
05856       return True
05857     else:
05858       return False
05859 
05860   return (len(nesting_state.stack) > 1 and
05861           nesting_state.stack[-1].check_namespace_indentation and
05862           isinstance(nesting_state.stack[-2], _NamespaceInfo))
05863 
05864 
05865 def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
05866                                     raw_lines_no_comments, linenum):
05867   """This method determines if we should apply our namespace indentation check.
05868 
05869   Args:
05870     nesting_state: The current nesting state.
05871     is_namespace_indent_item: If we just put a new class on the stack, True.
05872       If the top of the stack is not a class, or we did not recently
05873       add the class, False.
05874     raw_lines_no_comments: The lines without the comments.
05875     linenum: The current line number we are processing.
05876 
05877   Returns:
05878     True if we should apply our namespace indentation check. Currently, it
05879     only works for classes and namespaces inside of a namespace.
05880   """
05881 
05882   is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
05883                                                      linenum)
05884 
05885   if not (is_namespace_indent_item or is_forward_declaration):
05886     return False
05887 
05888   # If we are in a macro, we do not want to check the namespace indentation.
05889   if IsMacroDefinition(raw_lines_no_comments, linenum):
05890     return False
05891 
05892   return IsBlockInNameSpace(nesting_state, is_forward_declaration)
05893 
05894 
05895 # Call this method if the line is directly inside of a namespace.
05896 # If the line above is blank (excluding comments) or the start of
05897 # an inner namespace, it cannot be indented.
05898 def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
05899                                     error):
05900   line = raw_lines_no_comments[linenum]
05901   if Match(r'^\s+', line):
05902     error(filename, linenum, 'runtime/indentation_namespace', 4,
05903           'Do not indent within a namespace')
05904 
05905 
05906 def ProcessLine(filename, file_extension, clean_lines, line,
05907                 include_state, function_state, nesting_state, error,
05908                 extra_check_functions=[]):
05909   """Processes a single line in the file.
05910 
05911   Args:
05912     filename: Filename of the file that is being processed.
05913     file_extension: The extension (dot not included) of the file.
05914     clean_lines: An array of strings, each representing a line of the file,
05915                  with comments stripped.
05916     line: Number of line being processed.
05917     include_state: An _IncludeState instance in which the headers are inserted.
05918     function_state: A _FunctionState instance which counts function lines, etc.
05919     nesting_state: A NestingState instance which maintains information about
05920                    the current stack of nested blocks being parsed.
05921     error: A callable to which errors are reported, which takes 4 arguments:
05922            filename, line number, error level, and message
05923     extra_check_functions: An array of additional check functions that will be
05924                            run on each source line. Each function takes 4
05925                            arguments: filename, clean_lines, line, error
05926   """
05927   raw_lines = clean_lines.raw_lines
05928   ParseNolintSuppressions(filename, raw_lines[line], line, error)
05929   nesting_state.Update(filename, clean_lines, line, error)
05930   CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
05931                                error)
05932   if nesting_state.InAsmBlock(): return
05933   CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
05934   CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
05935   CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
05936   CheckLanguage(filename, clean_lines, line, file_extension, include_state,
05937                 nesting_state, error)
05938   CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
05939   CheckForNonStandardConstructs(filename, clean_lines, line,
05940                                 nesting_state, error)
05941   CheckVlogArguments(filename, clean_lines, line, error)
05942   CheckPosixThreading(filename, clean_lines, line, error)
05943   CheckInvalidIncrement(filename, clean_lines, line, error)
05944   CheckMakePairUsesDeduction(filename, clean_lines, line, error)
05945   CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
05946   CheckRedundantVirtual(filename, clean_lines, line, error)
05947   CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
05948   for check_fn in extra_check_functions:
05949     check_fn(filename, clean_lines, line, error)
05950 
05951 def FlagCxx11Features(filename, clean_lines, linenum, error):
05952   """Flag those c++11 features that we only allow in certain places.
05953 
05954   Args:
05955     filename: The name of the current file.
05956     clean_lines: A CleansedLines instance containing the file.
05957     linenum: The number of the line to check.
05958     error: The function to call with any errors found.
05959   """
05960   line = clean_lines.elided[linenum]
05961 
05962   # Flag unapproved C++11 headers.
05963   include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
05964   if include and include.group(1) in ('cfenv',
05965                                       'condition_variable',
05966                                       'fenv.h',
05967                                       'future',
05968                                       'mutex',
05969                                       'thread',
05970                                       'chrono',
05971                                       'ratio',
05972                                       'regex',
05973                                       'system_error',
05974                                      ):
05975     error(filename, linenum, 'build/c++11', 5,
05976           ('<%s> is an unapproved C++11 header.') % include.group(1))
05977 
05978   # The only place where we need to worry about C++11 keywords and library
05979   # features in preprocessor directives is in macro definitions.
05980   if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
05981 
05982   # These are classes and free functions.  The classes are always
05983   # mentioned as std::*, but we only catch the free functions if
05984   # they're not found by ADL.  They're alphabetical by header.
05985   for top_name in (
05986       # type_traits
05987       'alignment_of',
05988       'aligned_union',
05989       ):
05990     if Search(r'\bstd::%s\b' % top_name, line):
05991       error(filename, linenum, 'build/c++11', 5,
05992             ('std::%s is an unapproved C++11 class or function.  Send c-style '
05993              'an example of where it would make your code more readable, and '
05994              'they may let you use it.') % top_name)
05995 
05996 
05997 def ProcessFileData(filename, file_extension, lines, error,
05998                     extra_check_functions=[]):
05999   """Performs lint checks and reports any errors to the given error function.
06000 
06001   Args:
06002     filename: Filename of the file that is being processed.
06003     file_extension: The extension (dot not included) of the file.
06004     lines: An array of strings, each representing a line of the file, with the
06005            last element being empty if the file is terminated with a newline.
06006     error: A callable to which errors are reported, which takes 4 arguments:
06007            filename, line number, error level, and message
06008     extra_check_functions: An array of additional check functions that will be
06009                            run on each source line. Each function takes 4
06010                            arguments: filename, clean_lines, line, error
06011   """
06012   lines = (['// marker so line numbers and indices both start at 1'] + lines +
06013            ['// marker so line numbers end in a known way'])
06014 
06015   include_state = _IncludeState()
06016   function_state = _FunctionState()
06017   nesting_state = NestingState()
06018 
06019   ResetNolintSuppressions()
06020 
06021   CheckForCopyright(filename, lines, error)
06022 
06023   RemoveMultiLineComments(filename, lines, error)
06024   clean_lines = CleansedLines(lines)
06025 
06026   if file_extension == 'h':
06027     CheckForHeaderGuard(filename, clean_lines, error)
06028 
06029   for line in xrange(clean_lines.NumLines()):
06030     ProcessLine(filename, file_extension, clean_lines, line,
06031                 include_state, function_state, nesting_state, error,
06032                 extra_check_functions)
06033     FlagCxx11Features(filename, clean_lines, line, error)
06034   nesting_state.CheckCompletedBlocks(filename, error)
06035 
06036   CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
06037   
06038   # Check that the .cc file has included its header if it exists.
06039   if file_extension == 'cc':
06040     CheckHeaderFileIncluded(filename, include_state, error)
06041 
06042   # We check here rather than inside ProcessLine so that we see raw
06043   # lines rather than "cleaned" lines.
06044   CheckForBadCharacters(filename, lines, error)
06045 
06046   CheckForNewlineAtEOF(filename, lines, error)
06047 
06048 def ProcessConfigOverrides(filename):
06049   """ Loads the configuration files and processes the config overrides.
06050 
06051   Args:
06052     filename: The name of the file being processed by the linter.
06053 
06054   Returns:
06055     False if the current |filename| should not be processed further.
06056   """
06057 
06058   abs_filename = os.path.abspath(filename)
06059   cfg_filters = []
06060   keep_looking = True
06061   while keep_looking:
06062     abs_path, base_name = os.path.split(abs_filename)
06063     if not base_name:
06064       break  # Reached the root directory.
06065 
06066     cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
06067     abs_filename = abs_path
06068     if not os.path.isfile(cfg_file):
06069       continue
06070 
06071     try:
06072       with open(cfg_file) as file_handle:
06073         for line in file_handle:
06074           line, _, _ = line.partition('#')  # Remove comments.
06075           if not line.strip():
06076             continue
06077 
06078           name, _, val = line.partition('=')
06079           name = name.strip()
06080           val = val.strip()
06081           if name == 'set noparent':
06082             keep_looking = False
06083           elif name == 'filter':
06084             cfg_filters.append(val)
06085           elif name == 'exclude_files':
06086             # When matching exclude_files pattern, use the base_name of
06087             # the current file name or the directory name we are processing.
06088             # For example, if we are checking for lint errors in /foo/bar/baz.cc
06089             # and we found the .cfg file at /foo/CPPLINT.cfg, then the config
06090             # file's "exclude_files" filter is meant to be checked against "bar"
06091             # and not "baz" nor "bar/baz.cc".
06092             if base_name:
06093               pattern = re.compile(val)
06094               if pattern.match(base_name):
06095                 sys.stderr.write('Ignoring "%s": file excluded by "%s". '
06096                                  'File path component "%s" matches '
06097                                  'pattern "%s"\n' %
06098                                  (filename, cfg_file, base_name, val))
06099                 return False
06100           elif name == 'linelength':
06101             global _line_length
06102             try:
06103                 _line_length = int(val)
06104             except ValueError:
06105                 sys.stderr.write('Line length must be numeric.')
06106           else:
06107             sys.stderr.write(
06108                 'Invalid configuration option (%s) in file %s\n' %
06109                 (name, cfg_file))
06110 
06111     except IOError:
06112       sys.stderr.write(
06113           "Skipping config file '%s': Can't open for reading\n" % cfg_file)
06114       keep_looking = False
06115 
06116   # Apply all the accumulated filters in reverse order (top-level directory
06117   # config options having the least priority).
06118   for filter in reversed(cfg_filters):
06119      _AddFilters(filter)
06120 
06121   return True
06122 
06123 
06124 def ProcessFile(filename, vlevel, extra_check_functions=[]):
06125   """Does google-lint on a single file.
06126 
06127   Args:
06128     filename: The name of the file to parse.
06129 
06130     vlevel: The level of errors to report.  Every error of confidence
06131     >= verbose_level will be reported.  0 is a good default.
06132 
06133     extra_check_functions: An array of additional check functions that will be
06134                            run on each source line. Each function takes 4
06135                            arguments: filename, clean_lines, line, error
06136   """
06137 
06138   _SetVerboseLevel(vlevel)
06139   _BackupFilters()
06140 
06141   if not ProcessConfigOverrides(filename):
06142     _RestoreFilters()
06143     return
06144 
06145   lf_lines = []
06146   crlf_lines = []
06147   try:
06148     # Support the UNIX convention of using "-" for stdin.  Note that
06149     # we are not opening the file with universal newline support
06150     # (which codecs doesn't support anyway), so the resulting lines do
06151     # contain trailing '\r' characters if we are reading a file that
06152     # has CRLF endings.
06153     # If after the split a trailing '\r' is present, it is removed
06154     # below.
06155     if filename == '-':
06156       lines = codecs.StreamReaderWriter(sys.stdin,
06157                                         codecs.getreader('utf8'),
06158                                         codecs.getwriter('utf8'),
06159                                         'replace').read().split('\n')
06160     else:
06161       lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
06162 
06163     # Remove trailing '\r'.
06164     # The -1 accounts for the extra trailing blank line we get from split()
06165     for linenum in range(len(lines) - 1):
06166       if lines[linenum].endswith('\r'):
06167         lines[linenum] = lines[linenum].rstrip('\r')
06168         crlf_lines.append(linenum + 1)
06169       else:
06170         lf_lines.append(linenum + 1)
06171 
06172   except IOError:
06173     sys.stderr.write(
06174         "Skipping input '%s': Can't open for reading\n" % filename)
06175     _RestoreFilters()
06176     return
06177 
06178   # Note, if no dot is found, this will give the entire filename as the ext.
06179   file_extension = filename[filename.rfind('.') + 1:]
06180 
06181   # When reading from stdin, the extension is unknown, so no cpplint tests
06182   # should rely on the extension.
06183   if filename != '-' and file_extension not in _valid_extensions:
06184     sys.stderr.write('Ignoring %s; not a valid file name '
06185                      '(%s)\n' % (filename, ', '.join(_valid_extensions)))
06186   else:
06187     ProcessFileData(filename, file_extension, lines, Error,
06188                     extra_check_functions)
06189 
06190     # If end-of-line sequences are a mix of LF and CR-LF, issue
06191     # warnings on the lines with CR.
06192     #
06193     # Don't issue any warnings if all lines are uniformly LF or CR-LF,
06194     # since critique can handle these just fine, and the style guide
06195     # doesn't dictate a particular end of line sequence.
06196     #
06197     # We can't depend on os.linesep to determine what the desired
06198     # end-of-line sequence should be, since that will return the
06199     # server-side end-of-line sequence.
06200     if lf_lines and crlf_lines:
06201       # Warn on every line with CR.  An alternative approach might be to
06202       # check whether the file is mostly CRLF or just LF, and warn on the
06203       # minority, we bias toward LF here since most tools prefer LF.
06204       for linenum in crlf_lines:
06205         Error(filename, linenum, 'whitespace/newline', 1,
06206               'Unexpected \\r (^M) found; better to use only \\n')
06207 
06208   sys.stderr.write('Done processing %s\n' % filename)
06209   _RestoreFilters()
06210 
06211 
06212 def PrintUsage(message):
06213   """Prints a brief usage string and exits, optionally with an error message.
06214 
06215   Args:
06216     message: The optional error message.
06217   """
06218   sys.stderr.write(_USAGE)
06219   if message:
06220     sys.exit('\nFATAL ERROR: ' + message)
06221   else:
06222     sys.exit(1)
06223 
06224 
06225 def PrintCategories():
06226   """Prints a list of all the error-categories used by error messages.
06227 
06228   These are the categories used to filter messages via --filter.
06229   """
06230   sys.stderr.write(''.join('  %s\n' % cat for cat in _ERROR_CATEGORIES))
06231   sys.exit(0)
06232 
06233 
06234 def ParseArguments(args):
06235   """Parses the command line arguments.
06236 
06237   This may set the output format and verbosity level as side-effects.
06238 
06239   Args:
06240     args: The command line arguments:
06241 
06242   Returns:
06243     The list of filenames to lint.
06244   """
06245   try:
06246     (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
06247                                                  'counting=',
06248                                                  'filter=',
06249                                                  'root=',
06250                                                  'linelength=',
06251                                                  'extensions='])
06252   except getopt.GetoptError:
06253     PrintUsage('Invalid arguments.')
06254 
06255   verbosity = _VerboseLevel()
06256   output_format = _OutputFormat()
06257   filters = ''
06258   counting_style = ''
06259 
06260   for (opt, val) in opts:
06261     if opt == '--help':
06262       PrintUsage(None)
06263     elif opt == '--output':
06264       if val not in ('emacs', 'vs7', 'eclipse'):
06265         PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
06266       output_format = val
06267     elif opt == '--verbose':
06268       verbosity = int(val)
06269     elif opt == '--filter':
06270       filters = val
06271       if not filters:
06272         PrintCategories()
06273     elif opt == '--counting':
06274       if val not in ('total', 'toplevel', 'detailed'):
06275         PrintUsage('Valid counting options are total, toplevel, and detailed')
06276       counting_style = val
06277     elif opt == '--root':
06278       global _root
06279       _root = val
06280     elif opt == '--linelength':
06281       global _line_length
06282       try:
06283           _line_length = int(val)
06284       except ValueError:
06285           PrintUsage('Line length must be digits.')
06286     elif opt == '--extensions':
06287       global _valid_extensions
06288       try:
06289           _valid_extensions = set(val.split(','))
06290       except ValueError:
06291           PrintUsage('Extensions must be comma seperated list.')
06292 
06293   if not filenames:
06294     PrintUsage('No files were specified.')
06295 
06296   _SetOutputFormat(output_format)
06297   _SetVerboseLevel(verbosity)
06298   _SetFilters(filters)
06299   _SetCountingStyle(counting_style)
06300 
06301   return filenames
06302 
06303 
06304 def main():
06305   filenames = ParseArguments(sys.argv[1:])
06306 
06307   # Change stderr to write with replacement characters so we don't die
06308   # if we try to print something containing non-ASCII characters.
06309   sys.stderr = codecs.StreamReaderWriter(sys.stderr,
06310                                          codecs.getreader('utf8'),
06311                                          codecs.getwriter('utf8'),
06312                                          'replace')
06313 
06314   _cpplint_state.ResetErrorCounts()
06315   for filename in filenames:
06316     ProcessFile(filename, _cpplint_state.verbose_level)
06317   _cpplint_state.PrintErrorCounts()
06318 
06319   sys.exit(_cpplint_state.error_count > 0)
06320 
06321 
06322 if __name__ == '__main__':
06323   main()


roslint
Author(s): Mike Purvis, Jack O'Quin
autogenerated on Fri Aug 28 2015 12:41:58