cpplint.py
Go to the documentation of this file.
00001 #!/usr/bin/env python
00002 #
00003 # Copyright (c) 2009 Google Inc. All rights reserved.
00004 #
00005 # Redistribution and use in source and binary forms, with or without
00006 # modification, are permitted provided that the following conditions are
00007 # met:
00008 #
00009 #    * Redistributions of source code must retain the above copyright
00010 # notice, this list of conditions and the following disclaimer.
00011 #    * Redistributions in binary form must reproduce the above
00012 # copyright notice, this list of conditions and the following disclaimer
00013 # in the documentation and/or other materials provided with the
00014 # distribution.
00015 #    * Neither the name of Google Inc. nor the names of its
00016 # contributors may be used to endorse or promote products derived from
00017 # this software without specific prior written permission.
00018 #
00019 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00020 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00021 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
00022 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
00023 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00024 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00025 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
00026 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
00027 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00028 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
00029 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00030 
00031 """Does google-lint on c++ files.
00032 
00033 The goal of this script is to identify places in the code that *may*
00034 be in non-compliance with google style.  It does not attempt to fix
00035 up these problems -- the point is to educate.  It does also not
00036 attempt to find all problems, or to ensure that everything it does
00037 find is legitimately a problem.
00038 
00039 In particular, we can get very confused by /* and // inside strings!
00040 We do a small hack, which is to ignore //'s with "'s after them on the
00041 same line, but it is far from perfect (in either direction).
00042 """
00043 
00044 import codecs
00045 import copy
00046 import getopt
00047 import math  # for log
00048 import os
00049 import re
00050 import sre_compile
00051 import string
00052 import sys
00053 import unicodedata
00054 
00055 try:
00056   xrange          # Python 2
00057 except NameError:
00058   xrange = range  # Python 3
00059 
00060 
00061 _USAGE = """
00062 Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
00063                    [--counting=total|toplevel|detailed] [--root=subdir]
00064                    [--linelength=digits]
00065         <file> [file] ...
00066 
00067   The style guidelines this tries to follow are those in
00068     http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
00069 
00070   Every problem is given a confidence score from 1-5, with 5 meaning we are
00071   certain of the problem, and 1 meaning it could be a legitimate construct.
00072   This will miss some errors, and is not a substitute for a code review.
00073 
00074   To suppress false-positive errors of a certain category, add a
00075   'NOLINT(category)' comment to the line.  NOLINT or NOLINT(*)
00076   suppresses errors of all categories on that line.
00077 
00078   The files passed in will be linted; at least one file must be provided.
00079   Default linted extensions are .cc, .cpp, .cu, .cuh and .h.  Change the
00080   extensions with the --extensions flag.
00081 
00082   Flags:
00083 
00084     output=vs7
00085       By default, the output is formatted to ease emacs parsing.  Visual Studio
00086       compatible output (vs7) may also be used.  Other formats are unsupported.
00087 
00088     verbose=#
00089       Specify a number 0-5 to restrict errors to certain verbosity levels.
00090 
00091     filter=-x,+y,...
00092       Specify a comma-separated list of category-filters to apply: only
00093       error messages whose category names pass the filters will be printed.
00094       (Category names are printed with the message and look like
00095       "[whitespace/indent]".)  Filters are evaluated left to right.
00096       "-FOO" and "FOO" means "do not print categories that start with FOO".
00097       "+FOO" means "do print categories that start with FOO".
00098 
00099       Examples: --filter=-whitespace,+whitespace/braces
00100                 --filter=whitespace,runtime/printf,+runtime/printf_format
00101                 --filter=-,+build/include_what_you_use
00102 
00103       To see a list of all the categories used in cpplint, pass no arg:
00104          --filter=
00105 
00106     counting=total|toplevel|detailed
00107       The total number of errors found is always printed. If
00108       'toplevel' is provided, then the count of errors in each of
00109       the top-level categories like 'build' and 'whitespace' will
00110       also be printed. If 'detailed' is provided, then a count
00111       is provided for each category like 'build/class'.
00112 
00113     root=subdir
00114       The root directory used for deriving header guard CPP variable.
00115       By default, the header guard CPP variable is calculated as the relative
00116       path to the directory that contains .git, .hg, or .svn.  When this flag
00117       is specified, the relative path is calculated from the specified
00118       directory. If the specified directory does not exist, this flag is
00119       ignored.
00120 
00121       Examples:
00122         Assuming that src/.git exists, the header guard CPP variables for
00123         src/chrome/browser/ui/browser.h are:
00124 
00125         No flag => CHROME_BROWSER_UI_BROWSER_H_
00126         --root=chrome => BROWSER_UI_BROWSER_H_
00127         --root=chrome/browser => UI_BROWSER_H_
00128 
00129     linelength=digits
00130       This is the allowed line length for the project. The default value is
00131       80 characters.
00132 
00133       Examples:
00134         --linelength=120
00135 
00136     extensions=extension,extension,...
00137       The allowed file extensions that cpplint will check
00138 
00139       Examples:
00140         --extensions=hpp,cpp
00141 
00142     cpplint.py supports per-directory configurations specified in CPPLINT.cfg
00143     files. CPPLINT.cfg file can contain a number of key=value pairs.
00144     Currently the following options are supported:
00145 
00146       set noparent
00147       filter=+filter1,-filter2,...
00148       exclude_files=regex
00149       linelength=80
00150 
00151     "set noparent" option prevents cpplint from traversing directory tree
00152     upwards looking for more .cfg files in parent directories. This option
00153     is usually placed in the top-level project directory.
00154 
00155     The "filter" option is similar in function to --filter flag. It specifies
00156     message filters in addition to the |_DEFAULT_FILTERS| and those specified
00157     through --filter command-line flag.
00158 
00159     "exclude_files" allows to specify a regular expression to be matched against
00160     a file name. If the expression matches, the file is skipped and not run
00161     through liner.
00162 
00163     "linelength" allows to specify the allowed line length for the project.
00164 
00165     CPPLINT.cfg has an effect on files in the same directory and all
00166     sub-directories, unless overridden by a nested configuration file.
00167 
00168       Example file:
00169         filter=-build/include_order,+build/include_alpha
00170         exclude_files=.*\.cc
00171 
00172     The above example disables build/include_order warning and enables
00173     build/include_alpha as well as excludes all .cc from being
00174     processed by linter, in the current directory (where the .cfg
00175     file is located) and all sub-directories.
00176 """
00177 
00178 # We categorize each error message we print.  Here are the categories.
00179 # We want an explicit list so we can list them all in cpplint --filter=.
00180 # If you add a new error message with a new category, add it to the list
00181 # here!  cpplint_unittest.py should tell you if you forget to do this.
00182 _ERROR_CATEGORIES = [
00183     'build/class',
00184     'build/c++11',
00185     'build/deprecated',
00186     'build/endif_comment',
00187     'build/explicit_make_pair',
00188     'build/forward_decl',
00189     'build/header_guard',
00190     'build/include',
00191     'build/include_alpha',
00192     'build/include_order',
00193     'build/include_what_you_use',
00194     'build/namespaces',
00195     'build/printf_format',
00196     'build/storage_class',
00197     'legal/copyright',
00198     'readability/alt_tokens',
00199     'readability/braces',
00200     'readability/casting',
00201     'readability/check',
00202     'readability/constructors',
00203     'readability/fn_size',
00204     'readability/function',
00205     'readability/inheritance',
00206     'readability/multiline_comment',
00207     'readability/multiline_string',
00208     'readability/namespace',
00209     'readability/nolint',
00210     'readability/nul',
00211     'readability/strings',
00212     'readability/todo',
00213     'readability/utf8',
00214     'runtime/arrays',
00215     'runtime/casting',
00216     'runtime/explicit',
00217     'runtime/int',
00218     'runtime/init',
00219     'runtime/invalid_increment',
00220     'runtime/member_string_references',
00221     'runtime/memset',
00222     'runtime/indentation_namespace',
00223     'runtime/operator',
00224     'runtime/printf',
00225     'runtime/printf_format',
00226     'runtime/references',
00227     'runtime/string',
00228     'runtime/threadsafe_fn',
00229     'runtime/vlog',
00230     'whitespace/blank_line',
00231     'whitespace/braces',
00232     'whitespace/comma',
00233     'whitespace/comments',
00234     'whitespace/empty_conditional_body',
00235     'whitespace/empty_loop_body',
00236     'whitespace/end_of_line',
00237     'whitespace/ending_newline',
00238     'whitespace/forcolon',
00239     'whitespace/indent',
00240     'whitespace/line_length',
00241     'whitespace/newline',
00242     'whitespace/operators',
00243     'whitespace/parens',
00244     'whitespace/semicolon',
00245     'whitespace/tab',
00246     'whitespace/todo',
00247     ]
00248 
00249 # These error categories are no longer enforced by cpplint, but for backwards-
00250 # compatibility they may still appear in NOLINT comments.
00251 _LEGACY_ERROR_CATEGORIES = [
00252     'readability/streams',
00253     ]
00254 
00255 # The default state of the category filter. This is overridden by the --filter=
00256 # flag. By default all errors are on, so only add here categories that should be
00257 # off by default (i.e., categories that must be enabled by the --filter= flags).
00258 # All entries here should start with a '-' or '+', as in the --filter= flag.
00259 _DEFAULT_FILTERS = ['-build/include_alpha']
00260 
00261 # We used to check for high-bit characters, but after much discussion we
00262 # decided those were OK, as long as they were in UTF-8 and didn't represent
00263 # hard-coded international strings, which belong in a separate i18n file.
00264 
00265 # C++ headers
00266 _CPP_HEADERS = frozenset([
00267     # Legacy
00268     'algobase.h',
00269     'algo.h',
00270     'alloc.h',
00271     'builtinbuf.h',
00272     'bvector.h',
00273     'complex.h',
00274     'defalloc.h',
00275     'deque.h',
00276     'editbuf.h',
00277     'fstream.h',
00278     'function.h',
00279     'hash_map',
00280     'hash_map.h',
00281     'hash_set',
00282     'hash_set.h',
00283     'hashtable.h',
00284     'heap.h',
00285     'indstream.h',
00286     'iomanip.h',
00287     'iostream.h',
00288     'istream.h',
00289     'iterator.h',
00290     'list.h',
00291     'map.h',
00292     'multimap.h',
00293     'multiset.h',
00294     'ostream.h',
00295     'pair.h',
00296     'parsestream.h',
00297     'pfstream.h',
00298     'procbuf.h',
00299     'pthread_alloc',
00300     'pthread_alloc.h',
00301     'rope',
00302     'rope.h',
00303     'ropeimpl.h',
00304     'set.h',
00305     'slist',
00306     'slist.h',
00307     'stack.h',
00308     'stdiostream.h',
00309     'stl_alloc.h',
00310     'stl_relops.h',
00311     'streambuf.h',
00312     'stream.h',
00313     'strfile.h',
00314     'strstream.h',
00315     'tempbuf.h',
00316     'tree.h',
00317     'type_traits.h',
00318     'vector.h',
00319     # 17.6.1.2 C++ library headers
00320     'algorithm',
00321     'array',
00322     'atomic',
00323     'bitset',
00324     'chrono',
00325     'codecvt',
00326     'complex',
00327     'condition_variable',
00328     'deque',
00329     'exception',
00330     'forward_list',
00331     'fstream',
00332     'functional',
00333     'future',
00334     'initializer_list',
00335     'iomanip',
00336     'ios',
00337     'iosfwd',
00338     'iostream',
00339     'istream',
00340     'iterator',
00341     'limits',
00342     'list',
00343     'locale',
00344     'map',
00345     'memory',
00346     'mutex',
00347     'new',
00348     'numeric',
00349     'ostream',
00350     'queue',
00351     'random',
00352     'ratio',
00353     'regex',
00354     'set',
00355     'sstream',
00356     'stack',
00357     'stdexcept',
00358     'streambuf',
00359     'string',
00360     'strstream',
00361     'system_error',
00362     'thread',
00363     'tuple',
00364     'typeindex',
00365     'typeinfo',
00366     'type_traits',
00367     'unordered_map',
00368     'unordered_set',
00369     'utility',
00370     'valarray',
00371     'vector',
00372     # 17.6.1.2 C++ headers for C library facilities
00373     'cassert',
00374     'ccomplex',
00375     'cctype',
00376     'cerrno',
00377     'cfenv',
00378     'cfloat',
00379     'cinttypes',
00380     'ciso646',
00381     'climits',
00382     'clocale',
00383     'cmath',
00384     'csetjmp',
00385     'csignal',
00386     'cstdalign',
00387     'cstdarg',
00388     'cstdbool',
00389     'cstddef',
00390     'cstdint',
00391     'cstdio',
00392     'cstdlib',
00393     'cstring',
00394     'ctgmath',
00395     'ctime',
00396     'cuchar',
00397     'cwchar',
00398     'cwctype',
00399     ])
00400 
00401 
00402 # These headers are excluded from [build/include] and [build/include_order]
00403 # checks:
00404 # - Anything not following google file name conventions (containing an
00405 #   uppercase character, such as Python.h or nsStringAPI.h, for example).
00406 # - Lua headers.
00407 _THIRD_PARTY_HEADERS_PATTERN = re.compile(
00408     r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
00409 
00410 
00411 # Assertion macros.  These are defined in base/logging.h and
00412 # testing/base/gunit.h.  Note that the _M versions need to come first
00413 # for substring matching to work.
00414 _CHECK_MACROS = [
00415     'DCHECK', 'CHECK',
00416     'EXPECT_TRUE_M', 'EXPECT_TRUE',
00417     'ASSERT_TRUE_M', 'ASSERT_TRUE',
00418     'EXPECT_FALSE_M', 'EXPECT_FALSE',
00419     'ASSERT_FALSE_M', 'ASSERT_FALSE',
00420     ]
00421 
00422 # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
00423 _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
00424 
00425 for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
00426                         ('>=', 'GE'), ('>', 'GT'),
00427                         ('<=', 'LE'), ('<', 'LT')]:
00428   _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
00429   _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
00430   _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
00431   _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
00432   _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
00433   _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
00434 
00435 for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
00436                             ('>=', 'LT'), ('>', 'LE'),
00437                             ('<=', 'GT'), ('<', 'GE')]:
00438   _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
00439   _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
00440   _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
00441   _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
00442 
00443 # Alternative tokens and their replacements.  For full list, see section 2.5
00444 # Alternative tokens [lex.digraph] in the C++ standard.
00445 #
00446 # Digraphs (such as '%:') are not included here since it's a mess to
00447 # match those on a word boundary.
00448 _ALT_TOKEN_REPLACEMENT = {
00449     'and': '&&',
00450     'bitor': '|',
00451     'or': '||',
00452     'xor': '^',
00453     'compl': '~',
00454     'bitand': '&',
00455     'and_eq': '&=',
00456     'or_eq': '|=',
00457     'xor_eq': '^=',
00458     'not': '!',
00459     'not_eq': '!='
00460     }
00461 
00462 # Compile regular expression that matches all the above keywords.  The "[ =()]"
00463 # bit is meant to avoid matching these keywords outside of boolean expressions.
00464 #
00465 # False positives include C-style multi-line comments and multi-line strings
00466 # but those have always been troublesome for cpplint.
00467 _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
00468     r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
00469 
00470 
00471 # These constants define types of headers for use with
00472 # _IncludeState.CheckNextIncludeOrder().
00473 _C_SYS_HEADER = 1
00474 _CPP_SYS_HEADER = 2
00475 _LIKELY_MY_HEADER = 3
00476 _POSSIBLE_MY_HEADER = 4
00477 _OTHER_HEADER = 5
00478 
00479 # These constants define the current inline assembly state
00480 _NO_ASM = 0       # Outside of inline assembly block
00481 _INSIDE_ASM = 1   # Inside inline assembly block
00482 _END_ASM = 2      # Last line of inline assembly block
00483 _BLOCK_ASM = 3    # The whole block is an inline assembly block
00484 
00485 # Match start of assembly blocks
00486 _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
00487                         r'(?:\s+(volatile|__volatile__))?'
00488                         r'\s*[{(]')
00489 
00490 
00491 _regexp_compile_cache = {}
00492 
00493 # {str, set(int)}: a map from error categories to sets of linenumbers
00494 # on which those errors are expected and should be suppressed.
00495 _error_suppressions = {}
00496 
00497 # The root directory used for deriving header guard CPP variable.
00498 # This is set by --root flag.
00499 _root = None
00500 
00501 # The allowed line length of files.
00502 # This is set by --linelength flag.
00503 _line_length = 80
00504 
00505 # The allowed extensions for file names
00506 # This is set by --extensions flag.
00507 _valid_extensions = set(['cc', 'h', 'hpp', 'cpp', 'cu', 'cuh'])
00508 
00509 def ParseNolintSuppressions(filename, raw_line, linenum, error):
00510   """Updates the global list of error-suppressions.
00511 
00512   Parses any NOLINT comments on the current line, updating the global
00513   error_suppressions store.  Reports an error if the NOLINT comment
00514   was malformed.
00515 
00516   Args:
00517     filename: str, the name of the input file.
00518     raw_line: str, the line of input text, with comments.
00519     linenum: int, the number of the current line.
00520     error: function, an error handler.
00521   """
00522   matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
00523   if matched:
00524     if matched.group(1):
00525       suppressed_line = linenum + 1
00526     else:
00527       suppressed_line = linenum
00528     category = matched.group(2)
00529     if category in (None, '(*)'):  # => "suppress all"
00530       _error_suppressions.setdefault(None, set()).add(suppressed_line)
00531     else:
00532       if category.startswith('(') and category.endswith(')'):
00533         category = category[1:-1]
00534         if category in _ERROR_CATEGORIES:
00535           _error_suppressions.setdefault(category, set()).add(suppressed_line)
00536         elif category not in _LEGACY_ERROR_CATEGORIES:
00537           error(filename, linenum, 'readability/nolint', 5,
00538                 'Unknown NOLINT error category: %s' % category)
00539 
00540 
00541 def ResetNolintSuppressions():
00542   """Resets the set of NOLINT suppressions to empty."""
00543   _error_suppressions.clear()
00544 
00545 
00546 def IsErrorSuppressedByNolint(category, linenum):
00547   """Returns true if the specified error category is suppressed on this line.
00548 
00549   Consults the global error_suppressions map populated by
00550   ParseNolintSuppressions/ResetNolintSuppressions.
00551 
00552   Args:
00553     category: str, the category of the error.
00554     linenum: int, the current line number.
00555   Returns:
00556     bool, True iff the error should be suppressed due to a NOLINT comment.
00557   """
00558   return (linenum in _error_suppressions.get(category, set()) or
00559           linenum in _error_suppressions.get(None, set()))
00560 
00561 
00562 def Match(pattern, s):
00563   """Matches the string with the pattern, caching the compiled regexp."""
00564   # The regexp compilation caching is inlined in both Match and Search for
00565   # performance reasons; factoring it out into a separate function turns out
00566   # to be noticeably expensive.
00567   if pattern not in _regexp_compile_cache:
00568     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00569   return _regexp_compile_cache[pattern].match(s)
00570 
00571 
00572 def ReplaceAll(pattern, rep, s):
00573   """Replaces instances of pattern in a string with a replacement.
00574 
00575   The compiled regex is kept in a cache shared by Match and Search.
00576 
00577   Args:
00578     pattern: regex pattern
00579     rep: replacement text
00580     s: search string
00581 
00582   Returns:
00583     string with replacements made (or original string if no replacements)
00584   """
00585   if pattern not in _regexp_compile_cache:
00586     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00587   return _regexp_compile_cache[pattern].sub(rep, s)
00588 
00589 
00590 def Search(pattern, s):
00591   """Searches the string for the pattern, caching the compiled regexp."""
00592   if pattern not in _regexp_compile_cache:
00593     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00594   return _regexp_compile_cache[pattern].search(s)
00595 
00596 
00597 class _IncludeState(object):
00598   """Tracks line numbers for includes, and the order in which includes appear.
00599 
00600   include_list contains list of lists of (header, line number) pairs.
00601   It's a lists of lists rather than just one flat list to make it
00602   easier to update across preprocessor boundaries.
00603 
00604   Call CheckNextIncludeOrder() once for each header in the file, passing
00605   in the type constants defined above. Calls in an illegal order will
00606   raise an _IncludeError with an appropriate error message.
00607 
00608   """
00609   # self._section will move monotonically through this set. If it ever
00610   # needs to move backwards, CheckNextIncludeOrder will raise an error.
00611   _INITIAL_SECTION = 0
00612   _MY_H_SECTION = 1
00613   _C_SECTION = 2
00614   _CPP_SECTION = 3
00615   _OTHER_H_SECTION = 4
00616 
00617   _TYPE_NAMES = {
00618       _C_SYS_HEADER: 'C system header',
00619       _CPP_SYS_HEADER: 'C++ system header',
00620       _LIKELY_MY_HEADER: 'header this file implements',
00621       _POSSIBLE_MY_HEADER: 'header this file may implement',
00622       _OTHER_HEADER: 'other header',
00623       }
00624   _SECTION_NAMES = {
00625       _INITIAL_SECTION: "... nothing. (This can't be an error.)",
00626       _MY_H_SECTION: 'a header this file implements',
00627       _C_SECTION: 'C system header',
00628       _CPP_SECTION: 'C++ system header',
00629       _OTHER_H_SECTION: 'other header',
00630       }
00631 
00632   def __init__(self):
00633     self.include_list = [[]]
00634     self.ResetSection('')
00635 
00636   def FindHeader(self, header):
00637     """Check if a header has already been included.
00638 
00639     Args:
00640       header: header to check.
00641     Returns:
00642       Line number of previous occurrence, or -1 if the header has not
00643       been seen before.
00644     """
00645     for section_list in self.include_list:
00646       for f in section_list:
00647         if f[0] == header:
00648           return f[1]
00649     return -1
00650 
00651   def ResetSection(self, directive):
00652     """Reset section checking for preprocessor directive.
00653 
00654     Args:
00655       directive: preprocessor directive (e.g. "if", "else").
00656     """
00657     # The name of the current section.
00658     self._section = self._INITIAL_SECTION
00659     # The path of last found header.
00660     self._last_header = ''
00661 
00662     # Update list of includes.  Note that we never pop from the
00663     # include list.
00664     if directive in ('if', 'ifdef', 'ifndef'):
00665       self.include_list.append([])
00666     elif directive in ('else', 'elif'):
00667       self.include_list[-1] = []
00668 
00669   def SetLastHeader(self, header_path):
00670     self._last_header = header_path
00671 
00672   def CanonicalizeAlphabeticalOrder(self, header_path):
00673     """Returns a path canonicalized for alphabetical comparison.
00674 
00675     - replaces "-" with "_" so they both cmp the same.
00676     - removes '-inl' since we don't require them to be after the main header.
00677     - lowercase everything, just in case.
00678 
00679     Args:
00680       header_path: Path to be canonicalized.
00681 
00682     Returns:
00683       Canonicalized path.
00684     """
00685     return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
00686 
00687   def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
00688     """Check if a header is in alphabetical order with the previous header.
00689 
00690     Args:
00691       clean_lines: A CleansedLines instance containing the file.
00692       linenum: The number of the line to check.
00693       header_path: Canonicalized header to be checked.
00694 
00695     Returns:
00696       Returns true if the header is in alphabetical order.
00697     """
00698     # If previous section is different from current section, _last_header will
00699     # be reset to empty string, so it's always less than current header.
00700     #
00701     # If previous line was a blank line, assume that the headers are
00702     # intentionally sorted the way they are.
00703     if (self._last_header > header_path and
00704         Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
00705       return False
00706     return True
00707 
00708   def CheckNextIncludeOrder(self, header_type):
00709     """Returns a non-empty error message if the next header is out of order.
00710 
00711     This function also updates the internal state to be ready to check
00712     the next include.
00713 
00714     Args:
00715       header_type: One of the _XXX_HEADER constants defined above.
00716 
00717     Returns:
00718       The empty string if the header is in the right order, or an
00719       error message describing what's wrong.
00720 
00721     """
00722     error_message = ('Found %s after %s' %
00723                      (self._TYPE_NAMES[header_type],
00724                       self._SECTION_NAMES[self._section]))
00725 
00726     last_section = self._section
00727 
00728     if header_type == _C_SYS_HEADER:
00729       if self._section <= self._C_SECTION:
00730         self._section = self._C_SECTION
00731       else:
00732         self._last_header = ''
00733         return error_message
00734     elif header_type == _CPP_SYS_HEADER:
00735       if self._section <= self._CPP_SECTION:
00736         self._section = self._CPP_SECTION
00737       else:
00738         self._last_header = ''
00739         return error_message
00740     elif header_type == _LIKELY_MY_HEADER:
00741       if self._section <= self._MY_H_SECTION:
00742         self._section = self._MY_H_SECTION
00743       else:
00744         self._section = self._OTHER_H_SECTION
00745     elif header_type == _POSSIBLE_MY_HEADER:
00746       if self._section <= self._MY_H_SECTION:
00747         self._section = self._MY_H_SECTION
00748       else:
00749         # This will always be the fallback because we're not sure
00750         # enough that the header is associated with this file.
00751         self._section = self._OTHER_H_SECTION
00752     else:
00753       assert header_type == _OTHER_HEADER
00754       self._section = self._OTHER_H_SECTION
00755 
00756     if last_section != self._section:
00757       self._last_header = ''
00758 
00759     return ''
00760 
00761 
00762 class _CppLintState(object):
00763   """Maintains module-wide state.."""
00764 
00765   def __init__(self):
00766     self.verbose_level = 1  # global setting.
00767     self.error_count = 0    # global count of reported errors
00768     # filters to apply when emitting error messages
00769     self.filters = _DEFAULT_FILTERS[:]
00770     # backup of filter list. Used to restore the state after each file.
00771     self._filters_backup = self.filters[:]
00772     self.counting = 'total'  # In what way are we counting errors?
00773     self.errors_by_category = {}  # string to int dict storing error counts
00774 
00775     # output format:
00776     # "emacs" - format that emacs can parse (default)
00777     # "vs7" - format that Microsoft Visual Studio 7 can parse
00778     self.output_format = 'emacs'
00779 
00780   def SetOutputFormat(self, output_format):
00781     """Sets the output format for errors."""
00782     self.output_format = output_format
00783 
00784   def SetVerboseLevel(self, level):
00785     """Sets the module's verbosity, and returns the previous setting."""
00786     last_verbose_level = self.verbose_level
00787     self.verbose_level = level
00788     return last_verbose_level
00789 
00790   def SetCountingStyle(self, counting_style):
00791     """Sets the module's counting options."""
00792     self.counting = counting_style
00793 
00794   def SetFilters(self, filters):
00795     """Sets the error-message filters.
00796 
00797     These filters are applied when deciding whether to emit a given
00798     error message.
00799 
00800     Args:
00801       filters: A string of comma-separated filters (eg "+whitespace/indent").
00802                Each filter should start with + or -; else we die.
00803 
00804     Raises:
00805       ValueError: The comma-separated filters did not all start with '+' or '-'.
00806                   E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
00807     """
00808     # Default filters always have less priority than the flag ones.
00809     self.filters = _DEFAULT_FILTERS[:]
00810     self.AddFilters(filters)
00811 
00812   def AddFilters(self, filters):
00813     """ Adds more filters to the existing list of error-message filters. """
00814     for filt in filters.split(','):
00815       clean_filt = filt.strip()
00816       if clean_filt:
00817         self.filters.append(clean_filt)
00818     for filt in self.filters:
00819       if not (filt.startswith('+') or filt.startswith('-')):
00820         raise ValueError('Every filter in --filters must start with + or -'
00821                          ' (%s does not)' % filt)
00822 
00823   def BackupFilters(self):
00824     """ Saves the current filter list to backup storage."""
00825     self._filters_backup = self.filters[:]
00826 
00827   def RestoreFilters(self):
00828     """ Restores filters previously backed up."""
00829     self.filters = self._filters_backup[:]
00830 
00831   def ResetErrorCounts(self):
00832     """Sets the module's error statistic back to zero."""
00833     self.error_count = 0
00834     self.errors_by_category = {}
00835 
00836   def IncrementErrorCount(self, category):
00837     """Bumps the module's error statistic."""
00838     self.error_count += 1
00839     if self.counting in ('toplevel', 'detailed'):
00840       if self.counting != 'detailed':
00841         category = category.split('/')[0]
00842       if category not in self.errors_by_category:
00843         self.errors_by_category[category] = 0
00844       self.errors_by_category[category] += 1
00845 
00846   def PrintErrorCounts(self):
00847     """Print a summary of errors by category, and the total."""
00848     for category, count in self.errors_by_category.iteritems():
00849       sys.stderr.write('Category \'%s\' errors found: %d\n' %
00850                        (category, count))
00851     sys.stderr.write('Total errors found: %d\n' % self.error_count)
00852 
00853 _cpplint_state = _CppLintState()
00854 
00855 
00856 def _OutputFormat():
00857   """Gets the module's output format."""
00858   return _cpplint_state.output_format
00859 
00860 
00861 def _SetOutputFormat(output_format):
00862   """Sets the module's output format."""
00863   _cpplint_state.SetOutputFormat(output_format)
00864 
00865 
00866 def _VerboseLevel():
00867   """Returns the module's verbosity setting."""
00868   return _cpplint_state.verbose_level
00869 
00870 
00871 def _SetVerboseLevel(level):
00872   """Sets the module's verbosity, and returns the previous setting."""
00873   return _cpplint_state.SetVerboseLevel(level)
00874 
00875 
00876 def _SetCountingStyle(level):
00877   """Sets the module's counting options."""
00878   _cpplint_state.SetCountingStyle(level)
00879 
00880 
00881 def _Filters():
00882   """Returns the module's list of output filters, as a list."""
00883   return _cpplint_state.filters
00884 
00885 
00886 def _SetFilters(filters):
00887   """Sets the module's error-message filters.
00888 
00889   These filters are applied when deciding whether to emit a given
00890   error message.
00891 
00892   Args:
00893     filters: A string of comma-separated filters (eg "whitespace/indent").
00894              Each filter should start with + or -; else we die.
00895   """
00896   _cpplint_state.SetFilters(filters)
00897 
00898 def _AddFilters(filters):
00899   """Adds more filter overrides.
00900 
00901   Unlike _SetFilters, this function does not reset the current list of filters
00902   available.
00903 
00904   Args:
00905     filters: A string of comma-separated filters (eg "whitespace/indent").
00906              Each filter should start with + or -; else we die.
00907   """
00908   _cpplint_state.AddFilters(filters)
00909 
00910 def _BackupFilters():
00911   """ Saves the current filter list to backup storage."""
00912   _cpplint_state.BackupFilters()
00913 
00914 def _RestoreFilters():
00915   """ Restores filters previously backed up."""
00916   _cpplint_state.RestoreFilters()
00917 
00918 class _FunctionState(object):
00919   """Tracks current function name and the number of lines in its body."""
00920 
00921   _NORMAL_TRIGGER = 250  # for --v=0, 500 for --v=1, etc.
00922   _TEST_TRIGGER = 400    # about 50% more than _NORMAL_TRIGGER.
00923 
00924   def __init__(self):
00925     self.in_a_function = False
00926     self.lines_in_function = 0
00927     self.current_function = ''
00928 
00929   def Begin(self, function_name):
00930     """Start analyzing function body.
00931 
00932     Args:
00933       function_name: The name of the function being tracked.
00934     """
00935     self.in_a_function = True
00936     self.lines_in_function = 0
00937     self.current_function = function_name
00938 
00939   def Count(self):
00940     """Count line in current function body."""
00941     if self.in_a_function:
00942       self.lines_in_function += 1
00943 
00944   def Check(self, error, filename, linenum):
00945     """Report if too many lines in function body.
00946 
00947     Args:
00948       error: The function to call with any errors found.
00949       filename: The name of the current file.
00950       linenum: The number of the line to check.
00951     """
00952     if Match(r'T(EST|est)', self.current_function):
00953       base_trigger = self._TEST_TRIGGER
00954     else:
00955       base_trigger = self._NORMAL_TRIGGER
00956     trigger = base_trigger * 2**_VerboseLevel()
00957 
00958     if self.lines_in_function > trigger:
00959       error_level = int(math.log(self.lines_in_function / base_trigger, 2))
00960       # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
00961       if error_level > 5:
00962         error_level = 5
00963       error(filename, linenum, 'readability/fn_size', error_level,
00964             'Small and focused functions are preferred:'
00965             ' %s has %d non-comment lines'
00966             ' (error triggered by exceeding %d lines).'  % (
00967                 self.current_function, self.lines_in_function, trigger))
00968 
00969   def End(self):
00970     """Stop analyzing function body."""
00971     self.in_a_function = False
00972 
00973 
00974 class _IncludeError(Exception):
00975   """Indicates a problem with the include order in a file."""
00976   pass
00977 
00978 
00979 class FileInfo(object):
00980   """Provides utility functions for filenames.
00981 
00982   FileInfo provides easy access to the components of a file's path
00983   relative to the project root.
00984   """
00985 
00986   def __init__(self, filename):
00987     self._filename = filename
00988 
00989   def FullName(self):
00990     """Make Windows paths like Unix."""
00991     return os.path.abspath(self._filename).replace('\\', '/')
00992 
00993   def RepositoryName(self):
00994     """FullName after removing the local path to the repository.
00995 
00996     If we have a real absolute path name here we can try to do something smart:
00997     detecting the root of the checkout and truncating /path/to/checkout from
00998     the name so that we get header guards that don't include things like
00999     "C:\Documents and Settings\..." or "/home/username/..." in them and thus
01000     people on different computers who have checked the source out to different
01001     locations won't see bogus errors.
01002     """
01003     fullname = self.FullName()
01004 
01005     if os.path.exists(fullname):
01006       project_dir = os.path.dirname(fullname)
01007 
01008       if os.path.exists(os.path.join(project_dir, ".svn")):
01009         # If there's a .svn file in the current directory, we recursively look
01010         # up the directory tree for the top of the SVN checkout
01011         root_dir = project_dir
01012         one_up_dir = os.path.dirname(root_dir)
01013         while os.path.exists(os.path.join(one_up_dir, ".svn")):
01014           root_dir = os.path.dirname(root_dir)
01015           one_up_dir = os.path.dirname(one_up_dir)
01016 
01017         prefix = os.path.commonprefix([root_dir, project_dir])
01018         return fullname[len(prefix) + 1:]
01019 
01020       # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
01021       # searching up from the current path.
01022       root_dir = os.path.dirname(fullname)
01023       while (root_dir != os.path.dirname(root_dir) and
01024              not os.path.exists(os.path.join(root_dir, ".git")) and
01025              not os.path.exists(os.path.join(root_dir, ".hg")) and
01026              not os.path.exists(os.path.join(root_dir, ".svn"))):
01027         root_dir = os.path.dirname(root_dir)
01028 
01029       if (os.path.exists(os.path.join(root_dir, ".git")) or
01030           os.path.exists(os.path.join(root_dir, ".hg")) or
01031           os.path.exists(os.path.join(root_dir, ".svn"))):
01032         prefix = os.path.commonprefix([root_dir, project_dir])
01033         return fullname[len(prefix) + 1:]
01034 
01035     # Don't know what to do; header guard warnings may be wrong...
01036     return fullname
01037 
01038   def Split(self):
01039     """Splits the file into the directory, basename, and extension.
01040 
01041     For 'chrome/browser/browser.cc', Split() would
01042     return ('chrome/browser', 'browser', '.cc')
01043 
01044     Returns:
01045       A tuple of (directory, basename, extension).
01046     """
01047 
01048     googlename = self.RepositoryName()
01049     project, rest = os.path.split(googlename)
01050     return (project,) + os.path.splitext(rest)
01051 
01052   def BaseName(self):
01053     """File base name - text after the final slash, before the final period."""
01054     return self.Split()[1]
01055 
01056   def Extension(self):
01057     """File extension - text following the final period."""
01058     return self.Split()[2]
01059 
01060   def NoExtension(self):
01061     """File has no source file extension."""
01062     return '/'.join(self.Split()[0:2])
01063 
01064   def IsSource(self):
01065     """File has a source file extension."""
01066     return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
01067 
01068 
01069 def _ShouldPrintError(category, confidence, linenum):
01070   """If confidence >= verbose, category passes filter and is not suppressed."""
01071 
01072   # There are three ways we might decide not to print an error message:
01073   # a "NOLINT(category)" comment appears in the source,
01074   # the verbosity level isn't high enough, or the filters filter it out.
01075   if IsErrorSuppressedByNolint(category, linenum):
01076     return False
01077 
01078   if confidence < _cpplint_state.verbose_level:
01079     return False
01080 
01081   is_filtered = False
01082   for one_filter in _Filters():
01083     if one_filter.startswith('-'):
01084       if category.startswith(one_filter[1:]):
01085         is_filtered = True
01086     elif one_filter.startswith('+'):
01087       if category.startswith(one_filter[1:]):
01088         is_filtered = False
01089     else:
01090       assert False  # should have been checked for in SetFilter.
01091   if is_filtered:
01092     return False
01093 
01094   return True
01095 
01096 
01097 def Error(filename, linenum, category, confidence, message):
01098   """Logs the fact we've found a lint error.
01099 
01100   We log where the error was found, and also our confidence in the error,
01101   that is, how certain we are this is a legitimate style regression, and
01102   not a misidentification or a use that's sometimes justified.
01103 
01104   False positives can be suppressed by the use of
01105   "cpplint(category)"  comments on the offending line.  These are
01106   parsed into _error_suppressions.
01107 
01108   Args:
01109     filename: The name of the file containing the error.
01110     linenum: The number of the line containing the error.
01111     category: A string used to describe the "category" this bug
01112       falls under: "whitespace", say, or "runtime".  Categories
01113       may have a hierarchy separated by slashes: "whitespace/indent".
01114     confidence: A number from 1-5 representing a confidence score for
01115       the error, with 5 meaning that we are certain of the problem,
01116       and 1 meaning that it could be a legitimate construct.
01117     message: The error message.
01118   """
01119   if _ShouldPrintError(category, confidence, linenum):
01120     _cpplint_state.IncrementErrorCount(category)
01121     if _cpplint_state.output_format == 'vs7':
01122       sys.stderr.write('%s(%s):  %s  [%s] [%d]\n' % (
01123           filename, linenum, message, category, confidence))
01124     elif _cpplint_state.output_format == 'eclipse':
01125       sys.stderr.write('%s:%s: warning: %s  [%s] [%d]\n' % (
01126           filename, linenum, message, category, confidence))
01127     else:
01128       sys.stderr.write('%s:%s:  %s  [%s] [%d]\n' % (
01129           filename, linenum, message, category, confidence))
01130 
01131 
01132 # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
01133 _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
01134     r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
01135 # Match a single C style comment on the same line.
01136 _RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
01137 # Matches multi-line C style comments.
01138 # This RE is a little bit more complicated than one might expect, because we
01139 # have to take care of space removals tools so we can handle comments inside
01140 # statements better.
01141 # The current rule is: We only clear spaces from both sides when we're at the
01142 # end of the line. Otherwise, we try to remove spaces from the right side,
01143 # if this doesn't work we try on left side but only if there's a non-character
01144 # on the right.
01145 _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
01146     r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
01147     _RE_PATTERN_C_COMMENTS + r'\s+|' +
01148     r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
01149     _RE_PATTERN_C_COMMENTS + r')')
01150 
01151 
01152 def IsCppString(line):
01153   """Does line terminate so, that the next symbol is in string constant.
01154 
01155   This function does not consider single-line nor multi-line comments.
01156 
01157   Args:
01158     line: is a partial line of code starting from the 0..n.
01159 
01160   Returns:
01161     True, if next character appended to 'line' is inside a
01162     string constant.
01163   """
01164 
01165   line = line.replace(r'\\', 'XX')  # after this, \\" does not match to \"
01166   return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
01167 
01168 
01169 def CleanseRawStrings(raw_lines):
01170   """Removes C++11 raw strings from lines.
01171 
01172     Before:
01173       static const char kData[] = R"(
01174           multi-line string
01175           )";
01176 
01177     After:
01178       static const char kData[] = ""
01179           (replaced by blank line)
01180           "";
01181 
01182   Args:
01183     raw_lines: list of raw lines.
01184 
01185   Returns:
01186     list of lines with C++11 raw strings replaced by empty strings.
01187   """
01188 
01189   delimiter = None
01190   lines_without_raw_strings = []
01191   for line in raw_lines:
01192     if delimiter:
01193       # Inside a raw string, look for the end
01194       end = line.find(delimiter)
01195       if end >= 0:
01196         # Found the end of the string, match leading space for this
01197         # line and resume copying the original lines, and also insert
01198         # a "" on the last line.
01199         leading_space = Match(r'^(\s*)\S', line)
01200         line = leading_space.group(1) + '""' + line[end + len(delimiter):]
01201         delimiter = None
01202       else:
01203         # Haven't found the end yet, append a blank line.
01204         line = '""'
01205 
01206     # Look for beginning of a raw string, and replace them with
01207     # empty strings.  This is done in a loop to handle multiple raw
01208     # strings on the same line.
01209     while delimiter is None:
01210       # Look for beginning of a raw string.
01211       # See 2.14.15 [lex.string] for syntax.
01212       matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
01213       if matched:
01214         delimiter = ')' + matched.group(2) + '"'
01215 
01216         end = matched.group(3).find(delimiter)
01217         if end >= 0:
01218           # Raw string ended on same line
01219           line = (matched.group(1) + '""' +
01220                   matched.group(3)[end + len(delimiter):])
01221           delimiter = None
01222         else:
01223           # Start of a multi-line raw string
01224           line = matched.group(1) + '""'
01225       else:
01226         break
01227 
01228     lines_without_raw_strings.append(line)
01229 
01230   # TODO(unknown): if delimiter is not None here, we might want to
01231   # emit a warning for unterminated string.
01232   return lines_without_raw_strings
01233 
01234 
01235 def FindNextMultiLineCommentStart(lines, lineix):
01236   """Find the beginning marker for a multiline comment."""
01237   while lineix < len(lines):
01238     if lines[lineix].strip().startswith('/*'):
01239       # Only return this marker if the comment goes beyond this line
01240       if lines[lineix].strip().find('*/', 2) < 0:
01241         return lineix
01242     lineix += 1
01243   return len(lines)
01244 
01245 
01246 def FindNextMultiLineCommentEnd(lines, lineix):
01247   """We are inside a comment, find the end marker."""
01248   while lineix < len(lines):
01249     if lines[lineix].strip().endswith('*/'):
01250       return lineix
01251     lineix += 1
01252   return len(lines)
01253 
01254 
01255 def RemoveMultiLineCommentsFromRange(lines, begin, end):
01256   """Clears a range of lines for multi-line comments."""
01257   # Having // dummy comments makes the lines non-empty, so we will not get
01258   # unnecessary blank line warnings later in the code.
01259   for i in range(begin, end):
01260     lines[i] = '/**/'
01261 
01262 
01263 def RemoveMultiLineComments(filename, lines, error):
01264   """Removes multiline (c-style) comments from lines."""
01265   lineix = 0
01266   while lineix < len(lines):
01267     lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
01268     if lineix_begin >= len(lines):
01269       return
01270     lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
01271     if lineix_end >= len(lines):
01272       error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
01273             'Could not find end of multi-line comment')
01274       return
01275     RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
01276     lineix = lineix_end + 1
01277 
01278 
01279 def CleanseComments(line):
01280   """Removes //-comments and single-line C-style /* */ comments.
01281 
01282   Args:
01283     line: A line of C++ source.
01284 
01285   Returns:
01286     The line with single-line comments removed.
01287   """
01288   commentpos = line.find('//')
01289   if commentpos != -1 and not IsCppString(line[:commentpos]):
01290     line = line[:commentpos].rstrip()
01291   # get rid of /* ... */
01292   return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
01293 
01294 
01295 class CleansedLines(object):
01296   """Holds 4 copies of all lines with different preprocessing applied to them.
01297 
01298   1) elided member contains lines without strings and comments.
01299   2) lines member contains lines without comments.
01300   3) raw_lines member contains all the lines without processing.
01301   4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
01302      strings removed.
01303   All these members are of <type 'list'>, and of the same length.
01304   """
01305 
01306   def __init__(self, lines):
01307     self.elided = []
01308     self.lines = []
01309     self.raw_lines = lines
01310     self.num_lines = len(lines)
01311     self.lines_without_raw_strings = CleanseRawStrings(lines)
01312     for linenum in range(len(self.lines_without_raw_strings)):
01313       self.lines.append(CleanseComments(
01314           self.lines_without_raw_strings[linenum]))
01315       elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
01316       self.elided.append(CleanseComments(elided))
01317 
01318   def NumLines(self):
01319     """Returns the number of lines represented."""
01320     return self.num_lines
01321 
01322   @staticmethod
01323   def _CollapseStrings(elided):
01324     """Collapses strings and chars on a line to simple "" or '' blocks.
01325 
01326     We nix strings first so we're not fooled by text like '"http://"'
01327 
01328     Args:
01329       elided: The line being processed.
01330 
01331     Returns:
01332       The line with collapsed strings.
01333     """
01334     if _RE_PATTERN_INCLUDE.match(elided):
01335       return elided
01336 
01337     # Remove escaped characters first to make quote/single quote collapsing
01338     # basic.  Things that look like escaped characters shouldn't occur
01339     # outside of strings and chars.
01340     elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
01341 
01342     # Replace quoted strings and digit separators.  Both single quotes
01343     # and double quotes are processed in the same loop, otherwise
01344     # nested quotes wouldn't work.
01345     collapsed = ''
01346     while True:
01347       # Find the first quote character
01348       match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
01349       if not match:
01350         collapsed += elided
01351         break
01352       head, quote, tail = match.groups()
01353 
01354       if quote == '"':
01355         # Collapse double quoted strings
01356         second_quote = tail.find('"')
01357         if second_quote >= 0:
01358           collapsed += head + '""'
01359           elided = tail[second_quote + 1:]
01360         else:
01361           # Unmatched double quote, don't bother processing the rest
01362           # of the line since this is probably a multiline string.
01363           collapsed += elided
01364           break
01365       else:
01366         # Found single quote, check nearby text to eliminate digit separators.
01367         #
01368         # There is no special handling for floating point here, because
01369         # the integer/fractional/exponent parts would all be parsed
01370         # correctly as long as there are digits on both sides of the
01371         # separator.  So we are fine as long as we don't see something
01372         # like "0.'3" (gcc 4.9.0 will not allow this literal).
01373         if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
01374           match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
01375           collapsed += head + match_literal.group(1).replace("'", '')
01376           elided = match_literal.group(2)
01377         else:
01378           second_quote = tail.find('\'')
01379           if second_quote >= 0:
01380             collapsed += head + "''"
01381             elided = tail[second_quote + 1:]
01382           else:
01383             # Unmatched single quote
01384             collapsed += elided
01385             break
01386 
01387     return collapsed
01388 
01389 
01390 def FindEndOfExpressionInLine(line, startpos, stack):
01391   """Find the position just after the end of current parenthesized expression.
01392 
01393   Args:
01394     line: a CleansedLines line.
01395     startpos: start searching at this position.
01396     stack: nesting stack at startpos.
01397 
01398   Returns:
01399     On finding matching end: (index just after matching end, None)
01400     On finding an unclosed expression: (-1, None)
01401     Otherwise: (-1, new stack at end of this line)
01402   """
01403   for i in xrange(startpos, len(line)):
01404     char = line[i]
01405     if char in '([{':
01406       # Found start of parenthesized expression, push to expression stack
01407       stack.append(char)
01408     elif char == '<':
01409       # Found potential start of template argument list
01410       if i > 0 and line[i - 1] == '<':
01411         # Left shift operator
01412         if stack and stack[-1] == '<':
01413           stack.pop()
01414           if not stack:
01415             return (-1, None)
01416       elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
01417         # operator<, don't add to stack
01418         continue
01419       else:
01420         # Tentative start of template argument list
01421         stack.append('<')
01422     elif char in ')]}':
01423       # Found end of parenthesized expression.
01424       #
01425       # If we are currently expecting a matching '>', the pending '<'
01426       # must have been an operator.  Remove them from expression stack.
01427       while stack and stack[-1] == '<':
01428         stack.pop()
01429       if not stack:
01430         return (-1, None)
01431       if ((stack[-1] == '(' and char == ')') or
01432           (stack[-1] == '[' and char == ']') or
01433           (stack[-1] == '{' and char == '}')):
01434         stack.pop()
01435         if not stack:
01436           return (i + 1, None)
01437       else:
01438         # Mismatched parentheses
01439         return (-1, None)
01440     elif char == '>':
01441       # Found potential end of template argument list.
01442 
01443       # Ignore "->" and operator functions
01444       if (i > 0 and
01445           (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
01446         continue
01447 
01448       # Pop the stack if there is a matching '<'.  Otherwise, ignore
01449       # this '>' since it must be an operator.
01450       if stack:
01451         if stack[-1] == '<':
01452           stack.pop()
01453           if not stack:
01454             return (i + 1, None)
01455     elif char == ';':
01456       # Found something that look like end of statements.  If we are currently
01457       # expecting a '>', the matching '<' must have been an operator, since
01458       # template argument list should not contain statements.
01459       while stack and stack[-1] == '<':
01460         stack.pop()
01461       if not stack:
01462         return (-1, None)
01463 
01464   # Did not find end of expression or unbalanced parentheses on this line
01465   return (-1, stack)
01466 
01467 
01468 def CloseExpression(clean_lines, linenum, pos):
01469   """If input points to ( or { or [ or <, finds the position that closes it.
01470 
01471   If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
01472   linenum/pos that correspond to the closing of the expression.
01473 
01474   TODO(unknown): cpplint spends a fair bit of time matching parentheses.
01475   Ideally we would want to index all opening and closing parentheses once
01476   and have CloseExpression be just a simple lookup, but due to preprocessor
01477   tricks, this is not so easy.
01478 
01479   Args:
01480     clean_lines: A CleansedLines instance containing the file.
01481     linenum: The number of the line to check.
01482     pos: A position on the line.
01483 
01484   Returns:
01485     A tuple (line, linenum, pos) pointer *past* the closing brace, or
01486     (line, len(lines), -1) if we never find a close.  Note we ignore
01487     strings and comments when matching; and the line we return is the
01488     'cleansed' line at linenum.
01489   """
01490 
01491   line = clean_lines.elided[linenum]
01492   if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
01493     return (line, clean_lines.NumLines(), -1)
01494 
01495   # Check first line
01496   (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
01497   if end_pos > -1:
01498     return (line, linenum, end_pos)
01499 
01500   # Continue scanning forward
01501   while stack and linenum < clean_lines.NumLines() - 1:
01502     linenum += 1
01503     line = clean_lines.elided[linenum]
01504     (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
01505     if end_pos > -1:
01506       return (line, linenum, end_pos)
01507 
01508   # Did not find end of expression before end of file, give up
01509   return (line, clean_lines.NumLines(), -1)
01510 
01511 
01512 def FindStartOfExpressionInLine(line, endpos, stack):
01513   """Find position at the matching start of current expression.
01514 
01515   This is almost the reverse of FindEndOfExpressionInLine, but note
01516   that the input position and returned position differs by 1.
01517 
01518   Args:
01519     line: a CleansedLines line.
01520     endpos: start searching at this position.
01521     stack: nesting stack at endpos.
01522 
01523   Returns:
01524     On finding matching start: (index at matching start, None)
01525     On finding an unclosed expression: (-1, None)
01526     Otherwise: (-1, new stack at beginning of this line)
01527   """
01528   i = endpos
01529   while i >= 0:
01530     char = line[i]
01531     if char in ')]}':
01532       # Found end of expression, push to expression stack
01533       stack.append(char)
01534     elif char == '>':
01535       # Found potential end of template argument list.
01536       #
01537       # Ignore it if it's a "->" or ">=" or "operator>"
01538       if (i > 0 and
01539           (line[i - 1] == '-' or
01540            Match(r'\s>=\s', line[i - 1:]) or
01541            Search(r'\boperator\s*$', line[0:i]))):
01542         i -= 1
01543       else:
01544         stack.append('>')
01545     elif char == '<':
01546       # Found potential start of template argument list
01547       if i > 0 and line[i - 1] == '<':
01548         # Left shift operator
01549         i -= 1
01550       else:
01551         # If there is a matching '>', we can pop the expression stack.
01552         # Otherwise, ignore this '<' since it must be an operator.
01553         if stack and stack[-1] == '>':
01554           stack.pop()
01555           if not stack:
01556             return (i, None)
01557     elif char in '([{':
01558       # Found start of expression.
01559       #
01560       # If there are any unmatched '>' on the stack, they must be
01561       # operators.  Remove those.
01562       while stack and stack[-1] == '>':
01563         stack.pop()
01564       if not stack:
01565         return (-1, None)
01566       if ((char == '(' and stack[-1] == ')') or
01567           (char == '[' and stack[-1] == ']') or
01568           (char == '{' and stack[-1] == '}')):
01569         stack.pop()
01570         if not stack:
01571           return (i, None)
01572       else:
01573         # Mismatched parentheses
01574         return (-1, None)
01575     elif char == ';':
01576       # Found something that look like end of statements.  If we are currently
01577       # expecting a '<', the matching '>' must have been an operator, since
01578       # template argument list should not contain statements.
01579       while stack and stack[-1] == '>':
01580         stack.pop()
01581       if not stack:
01582         return (-1, None)
01583 
01584     i -= 1
01585 
01586   return (-1, stack)
01587 
01588 
01589 def ReverseCloseExpression(clean_lines, linenum, pos):
01590   """If input points to ) or } or ] or >, finds the position that opens it.
01591 
01592   If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
01593   linenum/pos that correspond to the opening of the expression.
01594 
01595   Args:
01596     clean_lines: A CleansedLines instance containing the file.
01597     linenum: The number of the line to check.
01598     pos: A position on the line.
01599 
01600   Returns:
01601     A tuple (line, linenum, pos) pointer *at* the opening brace, or
01602     (line, 0, -1) if we never find the matching opening brace.  Note
01603     we ignore strings and comments when matching; and the line we
01604     return is the 'cleansed' line at linenum.
01605   """
01606   line = clean_lines.elided[linenum]
01607   if line[pos] not in ')}]>':
01608     return (line, 0, -1)
01609 
01610   # Check last line
01611   (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
01612   if start_pos > -1:
01613     return (line, linenum, start_pos)
01614 
01615   # Continue scanning backward
01616   while stack and linenum > 0:
01617     linenum -= 1
01618     line = clean_lines.elided[linenum]
01619     (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
01620     if start_pos > -1:
01621       return (line, linenum, start_pos)
01622 
01623   # Did not find start of expression before beginning of file, give up
01624   return (line, 0, -1)
01625 
01626 
01627 def CheckForCopyright(filename, lines, error):
01628   """Logs an error if no Copyright message appears at the top of the file."""
01629 
01630   # We'll say it should occur by line 10. Don't forget there's a
01631   # dummy line at the front.
01632   for line in xrange(1, min(len(lines), 11)):
01633     if re.search(r'Copyright', lines[line], re.I): break
01634   else:                       # means no copyright line was found
01635     error(filename, 0, 'legal/copyright', 5,
01636           'No copyright message found.  '
01637           'You should have a line: "Copyright [year] <Copyright Owner>"')
01638 
01639 
01640 def GetIndentLevel(line):
01641   """Return the number of leading spaces in line.
01642 
01643   Args:
01644     line: A string to check.
01645 
01646   Returns:
01647     An integer count of leading spaces, possibly zero.
01648   """
01649   indent = Match(r'^( *)\S', line)
01650   if indent:
01651     return len(indent.group(1))
01652   else:
01653     return 0
01654 
01655 
01656 def GetHeaderGuardCPPVariable(filename):
01657   """Returns the CPP variable that should be used as a header guard.
01658 
01659   Args:
01660     filename: The name of a C++ header file.
01661 
01662   Returns:
01663     The CPP variable that should be used as a header guard in the
01664     named file.
01665 
01666   """
01667 
01668   # Restores original filename in case that cpplint is invoked from Emacs's
01669   # flymake.
01670   filename = re.sub(r'_flymake\.h$', '.h', filename)
01671   filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
01672   # Replace 'c++' with 'cpp'.
01673   filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
01674   
01675   fileinfo = FileInfo(filename)
01676   file_path_from_root = fileinfo.RepositoryName()
01677   if _root:
01678     file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
01679   return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
01680 
01681 
01682 def CheckForHeaderGuard(filename, clean_lines, error):
01683   """Checks that the file contains a header guard.
01684 
01685   Logs an error if no #ifndef header guard is present.  For other
01686   headers, checks that the full pathname is used.
01687 
01688   Args:
01689     filename: The name of the C++ header file.
01690     clean_lines: A CleansedLines instance containing the file.
01691     error: The function to call with any errors found.
01692   """
01693 
01694   # Don't check for header guards if there are error suppression
01695   # comments somewhere in this file.
01696   #
01697   # Because this is silencing a warning for a nonexistent line, we
01698   # only support the very specific NOLINT(build/header_guard) syntax,
01699   # and not the general NOLINT or NOLINT(*) syntax.
01700   raw_lines = clean_lines.lines_without_raw_strings
01701   for i in raw_lines:
01702     if Search(r'//\s*NOLINT\(build/header_guard\)', i):
01703       return
01704 
01705   cppvar = GetHeaderGuardCPPVariable(filename)
01706 
01707   ifndef = ''
01708   ifndef_linenum = 0
01709   define = ''
01710   endif = ''
01711   endif_linenum = 0
01712   for linenum, line in enumerate(raw_lines):
01713     linesplit = line.split()
01714     if len(linesplit) >= 2:
01715       # find the first occurrence of #ifndef and #define, save arg
01716       if not ifndef and linesplit[0] == '#ifndef':
01717         # set ifndef to the header guard presented on the #ifndef line.
01718         ifndef = linesplit[1]
01719         ifndef_linenum = linenum
01720       if not define and linesplit[0] == '#define':
01721         define = linesplit[1]
01722     # find the last occurrence of #endif, save entire line
01723     if line.startswith('#endif'):
01724       endif = line
01725       endif_linenum = linenum
01726 
01727   if not ifndef or not define or ifndef != define:
01728     error(filename, 0, 'build/header_guard', 5,
01729           'No #ifndef header guard found, suggested CPP variable is: %s' %
01730           cppvar)
01731     return
01732 
01733   # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
01734   # for backward compatibility.
01735   if ifndef != cppvar:
01736     error_level = 0
01737     if ifndef != cppvar + '_':
01738       error_level = 5
01739 
01740     ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
01741                             error)
01742     error(filename, ifndef_linenum, 'build/header_guard', error_level,
01743           '#ifndef header guard has wrong style, please use: %s' % cppvar)
01744 
01745   # Check for "//" comments on endif line.
01746   ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
01747                           error)
01748   match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
01749   if match:
01750     if match.group(1) == '_':
01751       # Issue low severity warning for deprecated double trailing underscore
01752       error(filename, endif_linenum, 'build/header_guard', 0,
01753             '#endif line should be "#endif  // %s"' % cppvar)
01754     return
01755 
01756   # Didn't find the corresponding "//" comment.  If this file does not
01757   # contain any "//" comments at all, it could be that the compiler
01758   # only wants "/**/" comments, look for those instead.
01759   no_single_line_comments = True
01760   for i in xrange(1, len(raw_lines) - 1):
01761     line = raw_lines[i]
01762     if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
01763       no_single_line_comments = False
01764       break
01765 
01766   if no_single_line_comments:
01767     match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
01768     if match:
01769       if match.group(1) == '_':
01770         # Low severity warning for double trailing underscore
01771         error(filename, endif_linenum, 'build/header_guard', 0,
01772               '#endif line should be "#endif  /* %s */"' % cppvar)
01773       return
01774 
01775   # Didn't find anything
01776   error(filename, endif_linenum, 'build/header_guard', 5,
01777         '#endif line should be "#endif  // %s"' % cppvar)
01778 
01779 
01780 def CheckHeaderFileIncluded(filename, include_state, error):
01781   """Logs an error if a .cc file does not include its header."""
01782 
01783   # Do not check test files
01784   if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'):
01785     return
01786 
01787   fileinfo = FileInfo(filename)
01788   headerfile = filename[0:len(filename) - 2] + 'h'
01789   if not os.path.exists(headerfile):
01790     return
01791   headername = FileInfo(headerfile).RepositoryName()
01792   first_include = 0
01793   for section_list in include_state.include_list:
01794     for f in section_list:
01795       if headername in f[0] or f[0] in headername:
01796         return
01797       if not first_include:
01798         first_include = f[1]
01799 
01800   error(filename, first_include, 'build/include', 5,
01801         '%s should include its header file %s' % (fileinfo.RepositoryName(),
01802                                                   headername))
01803 
01804 
01805 def CheckForBadCharacters(filename, lines, error):
01806   """Logs an error for each line containing bad characters.
01807 
01808   Two kinds of bad characters:
01809 
01810   1. Unicode replacement characters: These indicate that either the file
01811   contained invalid UTF-8 (likely) or Unicode replacement characters (which
01812   it shouldn't).  Note that it's possible for this to throw off line
01813   numbering if the invalid UTF-8 occurred adjacent to a newline.
01814 
01815   2. NUL bytes.  These are problematic for some tools.
01816 
01817   Args:
01818     filename: The name of the current file.
01819     lines: An array of strings, each representing a line of the file.
01820     error: The function to call with any errors found.
01821   """
01822   for linenum, line in enumerate(lines):
01823     if u'\ufffd' in line:
01824       error(filename, linenum, 'readability/utf8', 5,
01825             'Line contains invalid UTF-8 (or Unicode replacement character).')
01826     if '\0' in line:
01827       error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
01828 
01829 
01830 def CheckForNewlineAtEOF(filename, lines, error):
01831   """Logs an error if there is no newline char at the end of the file.
01832 
01833   Args:
01834     filename: The name of the current file.
01835     lines: An array of strings, each representing a line of the file.
01836     error: The function to call with any errors found.
01837   """
01838 
01839   # The array lines() was created by adding two newlines to the
01840   # original file (go figure), then splitting on \n.
01841   # To verify that the file ends in \n, we just have to make sure the
01842   # last-but-two element of lines() exists and is empty.
01843   if len(lines) < 3 or lines[-2]:
01844     error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
01845           'Could not find a newline character at the end of the file.')
01846 
01847 
01848 def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
01849   """Logs an error if we see /* ... */ or "..." that extend past one line.
01850 
01851   /* ... */ comments are legit inside macros, for one line.
01852   Otherwise, we prefer // comments, so it's ok to warn about the
01853   other.  Likewise, it's ok for strings to extend across multiple
01854   lines, as long as a line continuation character (backslash)
01855   terminates each line. Although not currently prohibited by the C++
01856   style guide, it's ugly and unnecessary. We don't do well with either
01857   in this lint program, so we warn about both.
01858 
01859   Args:
01860     filename: The name of the current file.
01861     clean_lines: A CleansedLines instance containing the file.
01862     linenum: The number of the line to check.
01863     error: The function to call with any errors found.
01864   """
01865   line = clean_lines.elided[linenum]
01866 
01867   # Remove all \\ (escaped backslashes) from the line. They are OK, and the
01868   # second (escaped) slash may trigger later \" detection erroneously.
01869   line = line.replace('\\\\', '')
01870 
01871   if line.count('/*') > line.count('*/'):
01872     error(filename, linenum, 'readability/multiline_comment', 5,
01873           'Complex multi-line /*...*/-style comment found. '
01874           'Lint may give bogus warnings.  '
01875           'Consider replacing these with //-style comments, '
01876           'with #if 0...#endif, '
01877           'or with more clearly structured multi-line comments.')
01878 
01879   if (line.count('"') - line.count('\\"')) % 2:
01880     error(filename, linenum, 'readability/multiline_string', 5,
01881           'Multi-line string ("...") found.  This lint script doesn\'t '
01882           'do well with such strings, and may give bogus warnings.  '
01883           'Use C++11 raw strings or concatenation instead.')
01884 
01885 
01886 # (non-threadsafe name, thread-safe alternative, validation pattern)
01887 #
01888 # The validation pattern is used to eliminate false positives such as:
01889 #  _rand();               // false positive due to substring match.
01890 #  ->rand();              // some member function rand().
01891 #  ACMRandom rand(seed);  // some variable named rand.
01892 #  ISAACRandom rand();    // another variable named rand.
01893 #
01894 # Basically we require the return value of these functions to be used
01895 # in some expression context on the same line by matching on some
01896 # operator before the function name.  This eliminates constructors and
01897 # member function calls.
01898 _UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
01899 _THREADING_LIST = (
01900     ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
01901     ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
01902     ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
01903     ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
01904     ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
01905     ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
01906     ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
01907     ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
01908     ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
01909     ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
01910     ('strtok(', 'strtok_r(',
01911      _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
01912     ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
01913     )
01914 
01915 
01916 def CheckPosixThreading(filename, clean_lines, linenum, error):
01917   """Checks for calls to thread-unsafe functions.
01918 
01919   Much code has been originally written without consideration of
01920   multi-threading. Also, engineers are relying on their old experience;
01921   they have learned posix before threading extensions were added. These
01922   tests guide the engineers to use thread-safe functions (when using
01923   posix directly).
01924 
01925   Args:
01926     filename: The name of the current file.
01927     clean_lines: A CleansedLines instance containing the file.
01928     linenum: The number of the line to check.
01929     error: The function to call with any errors found.
01930   """
01931   line = clean_lines.elided[linenum]
01932   for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
01933     # Additional pattern matching check to confirm that this is the
01934     # function we are looking for
01935     if Search(pattern, line):
01936       error(filename, linenum, 'runtime/threadsafe_fn', 2,
01937             'Consider using ' + multithread_safe_func +
01938             '...) instead of ' + single_thread_func +
01939             '...) for improved thread safety.')
01940 
01941 
01942 def CheckVlogArguments(filename, clean_lines, linenum, error):
01943   """Checks that VLOG() is only used for defining a logging level.
01944 
01945   For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
01946   VLOG(FATAL) are not.
01947 
01948   Args:
01949     filename: The name of the current file.
01950     clean_lines: A CleansedLines instance containing the file.
01951     linenum: The number of the line to check.
01952     error: The function to call with any errors found.
01953   """
01954   line = clean_lines.elided[linenum]
01955   if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
01956     error(filename, linenum, 'runtime/vlog', 5,
01957           'VLOG() should be used with numeric verbosity level.  '
01958           'Use LOG() if you want symbolic severity levels.')
01959 
01960 # Matches invalid increment: *count++, which moves pointer instead of
01961 # incrementing a value.
01962 _RE_PATTERN_INVALID_INCREMENT = re.compile(
01963     r'^\s*\*\w+(\+\+|--);')
01964 
01965 
01966 def CheckInvalidIncrement(filename, clean_lines, linenum, error):
01967   """Checks for invalid increment *count++.
01968 
01969   For example following function:
01970   void increment_counter(int* count) {
01971     *count++;
01972   }
01973   is invalid, because it effectively does count++, moving pointer, and should
01974   be replaced with ++*count, (*count)++ or *count += 1.
01975 
01976   Args:
01977     filename: The name of the current file.
01978     clean_lines: A CleansedLines instance containing the file.
01979     linenum: The number of the line to check.
01980     error: The function to call with any errors found.
01981   """
01982   line = clean_lines.elided[linenum]
01983   if _RE_PATTERN_INVALID_INCREMENT.match(line):
01984     error(filename, linenum, 'runtime/invalid_increment', 5,
01985           'Changing pointer instead of value (or unused value of operator*).')
01986 
01987 
01988 def IsMacroDefinition(clean_lines, linenum):
01989   if Search(r'^#define', clean_lines[linenum]):
01990     return True
01991 
01992   if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
01993     return True
01994 
01995   return False
01996 
01997 
01998 def IsForwardClassDeclaration(clean_lines, linenum):
01999   return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
02000 
02001 
02002 class _BlockInfo(object):
02003   """Stores information about a generic block of code."""
02004 
02005   def __init__(self, seen_open_brace):
02006     self.seen_open_brace = seen_open_brace
02007     self.open_parentheses = 0
02008     self.inline_asm = _NO_ASM
02009     self.check_namespace_indentation = False
02010 
02011   def CheckBegin(self, filename, clean_lines, linenum, error):
02012     """Run checks that applies to text up to the opening brace.
02013 
02014     This is mostly for checking the text after the class identifier
02015     and the "{", usually where the base class is specified.  For other
02016     blocks, there isn't much to check, so we always pass.
02017 
02018     Args:
02019       filename: The name of the current file.
02020       clean_lines: A CleansedLines instance containing the file.
02021       linenum: The number of the line to check.
02022       error: The function to call with any errors found.
02023     """
02024     pass
02025 
02026   def CheckEnd(self, filename, clean_lines, linenum, error):
02027     """Run checks that applies to text after the closing brace.
02028 
02029     This is mostly used for checking end of namespace comments.
02030 
02031     Args:
02032       filename: The name of the current file.
02033       clean_lines: A CleansedLines instance containing the file.
02034       linenum: The number of the line to check.
02035       error: The function to call with any errors found.
02036     """
02037     pass
02038 
02039   def IsBlockInfo(self):
02040     """Returns true if this block is a _BlockInfo.
02041 
02042     This is convenient for verifying that an object is an instance of
02043     a _BlockInfo, but not an instance of any of the derived classes.
02044 
02045     Returns:
02046       True for this class, False for derived classes.
02047     """
02048     return self.__class__ == _BlockInfo
02049 
02050 
02051 class _ExternCInfo(_BlockInfo):
02052   """Stores information about an 'extern "C"' block."""
02053 
02054   def __init__(self):
02055     _BlockInfo.__init__(self, True)
02056 
02057 
02058 class _ClassInfo(_BlockInfo):
02059   """Stores information about a class."""
02060 
02061   def __init__(self, name, class_or_struct, clean_lines, linenum):
02062     _BlockInfo.__init__(self, False)
02063     self.name = name
02064     self.starting_linenum = linenum
02065     self.is_derived = False
02066     self.check_namespace_indentation = True
02067     if class_or_struct == 'struct':
02068       self.access = 'public'
02069       self.is_struct = True
02070     else:
02071       self.access = 'private'
02072       self.is_struct = False
02073 
02074     # Remember initial indentation level for this class.  Using raw_lines here
02075     # instead of elided to account for leading comments.
02076     self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
02077 
02078     # Try to find the end of the class.  This will be confused by things like:
02079     #   class A {
02080     #   } *x = { ...
02081     #
02082     # But it's still good enough for CheckSectionSpacing.
02083     self.last_line = 0
02084     depth = 0
02085     for i in range(linenum, clean_lines.NumLines()):
02086       line = clean_lines.elided[i]
02087       depth += line.count('{') - line.count('}')
02088       if not depth:
02089         self.last_line = i
02090         break
02091 
02092   def CheckBegin(self, filename, clean_lines, linenum, error):
02093     # Look for a bare ':'
02094     if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
02095       self.is_derived = True
02096 
02097   def CheckEnd(self, filename, clean_lines, linenum, error):
02098     # If there is a DISALLOW macro, it should appear near the end of
02099     # the class.
02100     seen_last_thing_in_class = False
02101     for i in xrange(linenum - 1, self.starting_linenum, -1):
02102       match = Search(
02103           r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
02104           self.name + r'\)',
02105           clean_lines.elided[i])
02106       if match:
02107         if seen_last_thing_in_class:
02108           error(filename, i, 'readability/constructors', 3,
02109                 match.group(1) + ' should be the last thing in the class')
02110         break
02111 
02112       if not Match(r'^\s*$', clean_lines.elided[i]):
02113         seen_last_thing_in_class = True
02114 
02115     # Check that closing brace is aligned with beginning of the class.
02116     # Only do this if the closing brace is indented by only whitespaces.
02117     # This means we will not check single-line class definitions.
02118     indent = Match(r'^( *)\}', clean_lines.elided[linenum])
02119     if indent and len(indent.group(1)) != self.class_indent:
02120       if self.is_struct:
02121         parent = 'struct ' + self.name
02122       else:
02123         parent = 'class ' + self.name
02124       error(filename, linenum, 'whitespace/indent', 3,
02125             'Closing brace should be aligned with beginning of %s' % parent)
02126 
02127 
02128 class _NamespaceInfo(_BlockInfo):
02129   """Stores information about a namespace."""
02130 
02131   def __init__(self, name, linenum):
02132     _BlockInfo.__init__(self, False)
02133     self.name = name or ''
02134     self.starting_linenum = linenum
02135     self.check_namespace_indentation = True
02136 
02137   def CheckEnd(self, filename, clean_lines, linenum, error):
02138     """Check end of namespace comments."""
02139     line = clean_lines.raw_lines[linenum]
02140 
02141     # Check how many lines is enclosed in this namespace.  Don't issue
02142     # warning for missing namespace comments if there aren't enough
02143     # lines.  However, do apply checks if there is already an end of
02144     # namespace comment and it's incorrect.
02145     #
02146     # TODO(unknown): We always want to check end of namespace comments
02147     # if a namespace is large, but sometimes we also want to apply the
02148     # check if a short namespace contained nontrivial things (something
02149     # other than forward declarations).  There is currently no logic on
02150     # deciding what these nontrivial things are, so this check is
02151     # triggered by namespace size only, which works most of the time.
02152     if (linenum - self.starting_linenum < 10
02153         and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
02154       return
02155 
02156     # Look for matching comment at end of namespace.
02157     #
02158     # Note that we accept C style "/* */" comments for terminating
02159     # namespaces, so that code that terminate namespaces inside
02160     # preprocessor macros can be cpplint clean.
02161     #
02162     # We also accept stuff like "// end of namespace <name>." with the
02163     # period at the end.
02164     #
02165     # Besides these, we don't accept anything else, otherwise we might
02166     # get false negatives when existing comment is a substring of the
02167     # expected namespace.
02168     if self.name:
02169       # Named namespace
02170       if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
02171                     r'[\*/\.\\\s]*$'),
02172                    line):
02173         error(filename, linenum, 'readability/namespace', 5,
02174               'Namespace should be terminated with "// namespace %s"' %
02175               self.name)
02176     else:
02177       # Anonymous namespace
02178       if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
02179         # If "// namespace anonymous" or "// anonymous namespace (more text)",
02180         # mention "// anonymous namespace" as an acceptable form
02181         if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
02182           error(filename, linenum, 'readability/namespace', 5,
02183                 'Anonymous namespace should be terminated with "// namespace"'
02184                 ' or "// anonymous namespace"')
02185         else:
02186           error(filename, linenum, 'readability/namespace', 5,
02187                 'Anonymous namespace should be terminated with "// namespace"')
02188 
02189 
02190 class _PreprocessorInfo(object):
02191   """Stores checkpoints of nesting stacks when #if/#else is seen."""
02192 
02193   def __init__(self, stack_before_if):
02194     # The entire nesting stack before #if
02195     self.stack_before_if = stack_before_if
02196 
02197     # The entire nesting stack up to #else
02198     self.stack_before_else = []
02199 
02200     # Whether we have already seen #else or #elif
02201     self.seen_else = False
02202 
02203 
02204 class NestingState(object):
02205   """Holds states related to parsing braces."""
02206 
02207   def __init__(self):
02208     # Stack for tracking all braces.  An object is pushed whenever we
02209     # see a "{", and popped when we see a "}".  Only 3 types of
02210     # objects are possible:
02211     # - _ClassInfo: a class or struct.
02212     # - _NamespaceInfo: a namespace.
02213     # - _BlockInfo: some other type of block.
02214     self.stack = []
02215 
02216     # Top of the previous stack before each Update().
02217     #
02218     # Because the nesting_stack is updated at the end of each line, we
02219     # had to do some convoluted checks to find out what is the current
02220     # scope at the beginning of the line.  This check is simplified by
02221     # saving the previous top of nesting stack.
02222     #
02223     # We could save the full stack, but we only need the top.  Copying
02224     # the full nesting stack would slow down cpplint by ~10%.
02225     self.previous_stack_top = []
02226 
02227     # Stack of _PreprocessorInfo objects.
02228     self.pp_stack = []
02229 
02230   def SeenOpenBrace(self):
02231     """Check if we have seen the opening brace for the innermost block.
02232 
02233     Returns:
02234       True if we have seen the opening brace, False if the innermost
02235       block is still expecting an opening brace.
02236     """
02237     return (not self.stack) or self.stack[-1].seen_open_brace
02238 
02239   def InNamespaceBody(self):
02240     """Check if we are currently one level inside a namespace body.
02241 
02242     Returns:
02243       True if top of the stack is a namespace block, False otherwise.
02244     """
02245     return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
02246 
02247   def InExternC(self):
02248     """Check if we are currently one level inside an 'extern "C"' block.
02249 
02250     Returns:
02251       True if top of the stack is an extern block, False otherwise.
02252     """
02253     return self.stack and isinstance(self.stack[-1], _ExternCInfo)
02254 
02255   def InClassDeclaration(self):
02256     """Check if we are currently one level inside a class or struct declaration.
02257 
02258     Returns:
02259       True if top of the stack is a class/struct, False otherwise.
02260     """
02261     return self.stack and isinstance(self.stack[-1], _ClassInfo)
02262 
02263   def InAsmBlock(self):
02264     """Check if we are currently one level inside an inline ASM block.
02265 
02266     Returns:
02267       True if the top of the stack is a block containing inline ASM.
02268     """
02269     return self.stack and self.stack[-1].inline_asm != _NO_ASM
02270 
02271   def InTemplateArgumentList(self, clean_lines, linenum, pos):
02272     """Check if current position is inside template argument list.
02273 
02274     Args:
02275       clean_lines: A CleansedLines instance containing the file.
02276       linenum: The number of the line to check.
02277       pos: position just after the suspected template argument.
02278     Returns:
02279       True if (linenum, pos) is inside template arguments.
02280     """
02281     while linenum < clean_lines.NumLines():
02282       # Find the earliest character that might indicate a template argument
02283       line = clean_lines.elided[linenum]
02284       match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
02285       if not match:
02286         linenum += 1
02287         pos = 0
02288         continue
02289       token = match.group(1)
02290       pos += len(match.group(0))
02291 
02292       # These things do not look like template argument list:
02293       #   class Suspect {
02294       #   class Suspect x; }
02295       if token in ('{', '}', ';'): return False
02296 
02297       # These things look like template argument list:
02298       #   template <class Suspect>
02299       #   template <class Suspect = default_value>
02300       #   template <class Suspect[]>
02301       #   template <class Suspect...>
02302       if token in ('>', '=', '[', ']', '.'): return True
02303 
02304       # Check if token is an unmatched '<'.
02305       # If not, move on to the next character.
02306       if token != '<':
02307         pos += 1
02308         if pos >= len(line):
02309           linenum += 1
02310           pos = 0
02311         continue
02312 
02313       # We can't be sure if we just find a single '<', and need to
02314       # find the matching '>'.
02315       (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
02316       if end_pos < 0:
02317         # Not sure if template argument list or syntax error in file
02318         return False
02319       linenum = end_line
02320       pos = end_pos
02321     return False
02322 
02323   def UpdatePreprocessor(self, line):
02324     """Update preprocessor stack.
02325 
02326     We need to handle preprocessors due to classes like this:
02327       #ifdef SWIG
02328       struct ResultDetailsPageElementExtensionPoint {
02329       #else
02330       struct ResultDetailsPageElementExtensionPoint : public Extension {
02331       #endif
02332 
02333     We make the following assumptions (good enough for most files):
02334     - Preprocessor condition evaluates to true from #if up to first
02335       #else/#elif/#endif.
02336 
02337     - Preprocessor condition evaluates to false from #else/#elif up
02338       to #endif.  We still perform lint checks on these lines, but
02339       these do not affect nesting stack.
02340 
02341     Args:
02342       line: current line to check.
02343     """
02344     if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
02345       # Beginning of #if block, save the nesting stack here.  The saved
02346       # stack will allow us to restore the parsing state in the #else case.
02347       self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
02348     elif Match(r'^\s*#\s*(else|elif)\b', line):
02349       # Beginning of #else block
02350       if self.pp_stack:
02351         if not self.pp_stack[-1].seen_else:
02352           # This is the first #else or #elif block.  Remember the
02353           # whole nesting stack up to this point.  This is what we
02354           # keep after the #endif.
02355           self.pp_stack[-1].seen_else = True
02356           self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
02357 
02358         # Restore the stack to how it was before the #if
02359         self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
02360       else:
02361         # TODO(unknown): unexpected #else, issue warning?
02362         pass
02363     elif Match(r'^\s*#\s*endif\b', line):
02364       # End of #if or #else blocks.
02365       if self.pp_stack:
02366         # If we saw an #else, we will need to restore the nesting
02367         # stack to its former state before the #else, otherwise we
02368         # will just continue from where we left off.
02369         if self.pp_stack[-1].seen_else:
02370           # Here we can just use a shallow copy since we are the last
02371           # reference to it.
02372           self.stack = self.pp_stack[-1].stack_before_else
02373         # Drop the corresponding #if
02374         self.pp_stack.pop()
02375       else:
02376         # TODO(unknown): unexpected #endif, issue warning?
02377         pass
02378 
02379   # TODO(unknown): Update() is too long, but we will refactor later.
02380   def Update(self, filename, clean_lines, linenum, error):
02381     """Update nesting state with current line.
02382 
02383     Args:
02384       filename: The name of the current file.
02385       clean_lines: A CleansedLines instance containing the file.
02386       linenum: The number of the line to check.
02387       error: The function to call with any errors found.
02388     """
02389     line = clean_lines.elided[linenum]
02390 
02391     # Remember top of the previous nesting stack.
02392     #
02393     # The stack is always pushed/popped and not modified in place, so
02394     # we can just do a shallow copy instead of copy.deepcopy.  Using
02395     # deepcopy would slow down cpplint by ~28%.
02396     if self.stack:
02397       self.previous_stack_top = self.stack[-1]
02398     else:
02399       self.previous_stack_top = None
02400 
02401     # Update pp_stack
02402     self.UpdatePreprocessor(line)
02403 
02404     # Count parentheses.  This is to avoid adding struct arguments to
02405     # the nesting stack.
02406     if self.stack:
02407       inner_block = self.stack[-1]
02408       depth_change = line.count('(') - line.count(')')
02409       inner_block.open_parentheses += depth_change
02410 
02411       # Also check if we are starting or ending an inline assembly block.
02412       if inner_block.inline_asm in (_NO_ASM, _END_ASM):
02413         if (depth_change != 0 and
02414             inner_block.open_parentheses == 1 and
02415             _MATCH_ASM.match(line)):
02416           # Enter assembly block
02417           inner_block.inline_asm = _INSIDE_ASM
02418         else:
02419           # Not entering assembly block.  If previous line was _END_ASM,
02420           # we will now shift to _NO_ASM state.
02421           inner_block.inline_asm = _NO_ASM
02422       elif (inner_block.inline_asm == _INSIDE_ASM and
02423             inner_block.open_parentheses == 0):
02424         # Exit assembly block
02425         inner_block.inline_asm = _END_ASM
02426 
02427     # Consume namespace declaration at the beginning of the line.  Do
02428     # this in a loop so that we catch same line declarations like this:
02429     #   namespace proto2 { namespace bridge { class MessageSet; } }
02430     while True:
02431       # Match start of namespace.  The "\b\s*" below catches namespace
02432       # declarations even if it weren't followed by a whitespace, this
02433       # is so that we don't confuse our namespace checker.  The
02434       # missing spaces will be flagged by CheckSpacing.
02435       namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
02436       if not namespace_decl_match:
02437         break
02438 
02439       new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
02440       self.stack.append(new_namespace)
02441 
02442       line = namespace_decl_match.group(2)
02443       if line.find('{') != -1:
02444         new_namespace.seen_open_brace = True
02445         line = line[line.find('{') + 1:]
02446 
02447     # Look for a class declaration in whatever is left of the line
02448     # after parsing namespaces.  The regexp accounts for decorated classes
02449     # such as in:
02450     #   class LOCKABLE API Object {
02451     #   };
02452     class_decl_match = Match(
02453         r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
02454         r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
02455         r'(.*)$', line)
02456     if (class_decl_match and
02457         (not self.stack or self.stack[-1].open_parentheses == 0)):
02458       # We do not want to accept classes that are actually template arguments:
02459       #   template <class Ignore1,
02460       #             class Ignore2 = Default<Args>,
02461       #             template <Args> class Ignore3>
02462       #   void Function() {};
02463       #
02464       # To avoid template argument cases, we scan forward and look for
02465       # an unmatched '>'.  If we see one, assume we are inside a
02466       # template argument list.
02467       end_declaration = len(class_decl_match.group(1))
02468       if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
02469         self.stack.append(_ClassInfo(
02470             class_decl_match.group(3), class_decl_match.group(2),
02471             clean_lines, linenum))
02472         line = class_decl_match.group(4)
02473 
02474     # If we have not yet seen the opening brace for the innermost block,
02475     # run checks here.
02476     if not self.SeenOpenBrace():
02477       self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
02478 
02479     # Update access control if we are inside a class/struct
02480     if self.stack and isinstance(self.stack[-1], _ClassInfo):
02481       classinfo = self.stack[-1]
02482       access_match = Match(
02483           r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
02484           r':(?:[^:]|$)',
02485           line)
02486       if access_match:
02487         classinfo.access = access_match.group(2)
02488 
02489         # Check that access keywords are indented +1 space.  Skip this
02490         # check if the keywords are not preceded by whitespaces.
02491         indent = access_match.group(1)
02492         if (len(indent) != classinfo.class_indent + 1 and
02493             Match(r'^\s*$', indent)):
02494           if classinfo.is_struct:
02495             parent = 'struct ' + classinfo.name
02496           else:
02497             parent = 'class ' + classinfo.name
02498           slots = ''
02499           if access_match.group(3):
02500             slots = access_match.group(3)
02501           error(filename, linenum, 'whitespace/indent', 3,
02502                 '%s%s: should be indented +1 space inside %s' % (
02503                     access_match.group(2), slots, parent))
02504 
02505     # Consume braces or semicolons from what's left of the line
02506     while True:
02507       # Match first brace, semicolon, or closed parenthesis.
02508       matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
02509       if not matched:
02510         break
02511 
02512       token = matched.group(1)
02513       if token == '{':
02514         # If namespace or class hasn't seen a opening brace yet, mark
02515         # namespace/class head as complete.  Push a new block onto the
02516         # stack otherwise.
02517         if not self.SeenOpenBrace():
02518           self.stack[-1].seen_open_brace = True
02519         elif Match(r'^extern\s*"[^"]*"\s*\{', line):
02520           self.stack.append(_ExternCInfo())
02521         else:
02522           self.stack.append(_BlockInfo(True))
02523           if _MATCH_ASM.match(line):
02524             self.stack[-1].inline_asm = _BLOCK_ASM
02525 
02526       elif token == ';' or token == ')':
02527         # If we haven't seen an opening brace yet, but we already saw
02528         # a semicolon, this is probably a forward declaration.  Pop
02529         # the stack for these.
02530         #
02531         # Similarly, if we haven't seen an opening brace yet, but we
02532         # already saw a closing parenthesis, then these are probably
02533         # function arguments with extra "class" or "struct" keywords.
02534         # Also pop these stack for these.
02535         if not self.SeenOpenBrace():
02536           self.stack.pop()
02537       else:  # token == '}'
02538         # Perform end of block checks and pop the stack.
02539         if self.stack:
02540           self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
02541           self.stack.pop()
02542       line = matched.group(2)
02543 
02544   def InnermostClass(self):
02545     """Get class info on the top of the stack.
02546 
02547     Returns:
02548       A _ClassInfo object if we are inside a class, or None otherwise.
02549     """
02550     for i in range(len(self.stack), 0, -1):
02551       classinfo = self.stack[i - 1]
02552       if isinstance(classinfo, _ClassInfo):
02553         return classinfo
02554     return None
02555 
02556   def CheckCompletedBlocks(self, filename, error):
02557     """Checks that all classes and namespaces have been completely parsed.
02558 
02559     Call this when all lines in a file have been processed.
02560     Args:
02561       filename: The name of the current file.
02562       error: The function to call with any errors found.
02563     """
02564     # Note: This test can result in false positives if #ifdef constructs
02565     # get in the way of brace matching. See the testBuildClass test in
02566     # cpplint_unittest.py for an example of this.
02567     for obj in self.stack:
02568       if isinstance(obj, _ClassInfo):
02569         error(filename, obj.starting_linenum, 'build/class', 5,
02570               'Failed to find complete declaration of class %s' %
02571               obj.name)
02572       elif isinstance(obj, _NamespaceInfo):
02573         error(filename, obj.starting_linenum, 'build/namespaces', 5,
02574               'Failed to find complete declaration of namespace %s' %
02575               obj.name)
02576 
02577 
02578 def CheckForNonStandardConstructs(filename, clean_lines, linenum,
02579                                   nesting_state, error):
02580   r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
02581 
02582   Complain about several constructs which gcc-2 accepts, but which are
02583   not standard C++.  Warning about these in lint is one way to ease the
02584   transition to new compilers.
02585   - put storage class first (e.g. "static const" instead of "const static").
02586   - "%lld" instead of %qd" in printf-type functions.
02587   - "%1$d" is non-standard in printf-type functions.
02588   - "\%" is an undefined character escape sequence.
02589   - text after #endif is not allowed.
02590   - invalid inner-style forward declaration.
02591   - >? and <? operators, and their >?= and <?= cousins.
02592 
02593   Additionally, check for constructor/destructor style violations and reference
02594   members, as it is very convenient to do so while checking for
02595   gcc-2 compliance.
02596 
02597   Args:
02598     filename: The name of the current file.
02599     clean_lines: A CleansedLines instance containing the file.
02600     linenum: The number of the line to check.
02601     nesting_state: A NestingState instance which maintains information about
02602                    the current stack of nested blocks being parsed.
02603     error: A callable to which errors are reported, which takes 4 arguments:
02604            filename, line number, error level, and message
02605   """
02606 
02607   # Remove comments from the line, but leave in strings for now.
02608   line = clean_lines.lines[linenum]
02609 
02610   if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
02611     error(filename, linenum, 'runtime/printf_format', 3,
02612           '%q in format strings is deprecated.  Use %ll instead.')
02613 
02614   if Search(r'printf\s*\(.*".*%\d+\$', line):
02615     error(filename, linenum, 'runtime/printf_format', 2,
02616           '%N$ formats are unconventional.  Try rewriting to avoid them.')
02617 
02618   # Remove escaped backslashes before looking for undefined escapes.
02619   line = line.replace('\\\\', '')
02620 
02621   if Search(r'("|\').*\\(%|\[|\(|{)', line):
02622     error(filename, linenum, 'build/printf_format', 3,
02623           '%, [, (, and { are undefined character escapes.  Unescape them.')
02624 
02625   # For the rest, work with both comments and strings removed.
02626   line = clean_lines.elided[linenum]
02627 
02628   if Search(r'\b(const|volatile|void|char|short|int|long'
02629             r'|float|double|signed|unsigned'
02630             r'|schar|u?int8|u?int16|u?int32|u?int64)'
02631             r'\s+(register|static|extern|typedef)\b',
02632             line):
02633     error(filename, linenum, 'build/storage_class', 5,
02634           'Storage class (static, extern, typedef, etc) should be first.')
02635 
02636   if Match(r'\s*#\s*endif\s*[^/\s]+', line):
02637     error(filename, linenum, 'build/endif_comment', 5,
02638           'Uncommented text after #endif is non-standard.  Use a comment.')
02639 
02640   if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
02641     error(filename, linenum, 'build/forward_decl', 5,
02642           'Inner-style forward declarations are invalid.  Remove this line.')
02643 
02644   if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
02645             line):
02646     error(filename, linenum, 'build/deprecated', 3,
02647           '>? and <? (max and min) operators are non-standard and deprecated.')
02648 
02649   if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
02650     # TODO(unknown): Could it be expanded safely to arbitrary references,
02651     # without triggering too many false positives? The first
02652     # attempt triggered 5 warnings for mostly benign code in the regtest, hence
02653     # the restriction.
02654     # Here's the original regexp, for the reference:
02655     # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
02656     # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
02657     error(filename, linenum, 'runtime/member_string_references', 2,
02658           'const string& members are dangerous. It is much better to use '
02659           'alternatives, such as pointers or simple constants.')
02660 
02661   # Everything else in this function operates on class declarations.
02662   # Return early if the top of the nesting stack is not a class, or if
02663   # the class head is not completed yet.
02664   classinfo = nesting_state.InnermostClass()
02665   if not classinfo or not classinfo.seen_open_brace:
02666     return
02667 
02668   # The class may have been declared with namespace or classname qualifiers.
02669   # The constructor and destructor will not have those qualifiers.
02670   base_classname = classinfo.name.split('::')[-1]
02671 
02672   # Look for single-argument constructors that aren't marked explicit.
02673   # Technically a valid construct, but against style. Also look for
02674   # non-single-argument constructors which are also technically valid, but
02675   # strongly suggest something is wrong.
02676   explicit_constructor_match = Match(
02677       r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
02678       r'\(((?:[^()]|\([^()]*\))*)\)'
02679       % re.escape(base_classname),
02680       line)
02681 
02682   if explicit_constructor_match:
02683     is_marked_explicit = explicit_constructor_match.group(1)
02684 
02685     if not explicit_constructor_match.group(2):
02686       constructor_args = []
02687     else:
02688       constructor_args = explicit_constructor_match.group(2).split(',')
02689 
02690     # collapse arguments so that commas in template parameter lists and function
02691     # argument parameter lists don't split arguments in two
02692     i = 0
02693     while i < len(constructor_args):
02694       constructor_arg = constructor_args[i]
02695       while (constructor_arg.count('<') > constructor_arg.count('>') or
02696              constructor_arg.count('(') > constructor_arg.count(')')):
02697         constructor_arg += ',' + constructor_args[i + 1]
02698         del constructor_args[i + 1]
02699       constructor_args[i] = constructor_arg
02700       i += 1
02701 
02702     defaulted_args = [arg for arg in constructor_args if '=' in arg]
02703     noarg_constructor = (not constructor_args or  # empty arg list
02704                          # 'void' arg specifier
02705                          (len(constructor_args) == 1 and
02706                           constructor_args[0].strip() == 'void'))
02707     onearg_constructor = ((len(constructor_args) == 1 and  # exactly one arg
02708                            not noarg_constructor) or
02709                           # all but at most one arg defaulted
02710                           (len(constructor_args) >= 1 and
02711                            not noarg_constructor and
02712                            len(defaulted_args) >= len(constructor_args) - 1))
02713     initializer_list_constructor = bool(
02714         onearg_constructor and
02715         Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
02716     copy_constructor = bool(
02717         onearg_constructor and
02718         Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
02719               % re.escape(base_classname), constructor_args[0].strip()))
02720 
02721     if (not is_marked_explicit and
02722         onearg_constructor and
02723         not initializer_list_constructor and
02724         not copy_constructor):
02725       if defaulted_args:
02726         error(filename, linenum, 'runtime/explicit', 5,
02727               'Constructors callable with one argument '
02728               'should be marked explicit.')
02729       else:
02730         error(filename, linenum, 'runtime/explicit', 5,
02731               'Single-parameter constructors should be marked explicit.')
02732     elif is_marked_explicit and not onearg_constructor:
02733       if noarg_constructor:
02734         error(filename, linenum, 'runtime/explicit', 5,
02735               'Zero-parameter constructors should not be marked explicit.')
02736       else:
02737         error(filename, linenum, 'runtime/explicit', 0,
02738               'Constructors that require multiple arguments '
02739               'should not be marked explicit.')
02740 
02741 
02742 def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
02743   """Checks for the correctness of various spacing around function calls.
02744 
02745   Args:
02746     filename: The name of the current file.
02747     clean_lines: A CleansedLines instance containing the file.
02748     linenum: The number of the line to check.
02749     error: The function to call with any errors found.
02750   """
02751   line = clean_lines.elided[linenum]
02752 
02753   # Since function calls often occur inside if/for/while/switch
02754   # expressions - which have their own, more liberal conventions - we
02755   # first see if we should be looking inside such an expression for a
02756   # function call, to which we can apply more strict standards.
02757   fncall = line    # if there's no control flow construct, look at whole line
02758   for pattern in (r'\bif\s*\((.*)\)\s*{',
02759                   r'\bfor\s*\((.*)\)\s*{',
02760                   r'\bwhile\s*\((.*)\)\s*[{;]',
02761                   r'\bswitch\s*\((.*)\)\s*{'):
02762     match = Search(pattern, line)
02763     if match:
02764       fncall = match.group(1)    # look inside the parens for function calls
02765       break
02766 
02767   # Except in if/for/while/switch, there should never be space
02768   # immediately inside parens (eg "f( 3, 4 )").  We make an exception
02769   # for nested parens ( (a+b) + c ).  Likewise, there should never be
02770   # a space before a ( when it's a function argument.  I assume it's a
02771   # function argument when the char before the whitespace is legal in
02772   # a function name (alnum + _) and we're not starting a macro. Also ignore
02773   # pointers and references to arrays and functions coz they're too tricky:
02774   # we use a very simple way to recognize these:
02775   # " (something)(maybe-something)" or
02776   # " (something)(maybe-something," or
02777   # " (something)[something]"
02778   # Note that we assume the contents of [] to be short enough that
02779   # they'll never need to wrap.
02780   if (  # Ignore control structures.
02781       not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
02782                  fncall) and
02783       # Ignore pointers/references to functions.
02784       not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
02785       # Ignore pointers/references to arrays.
02786       not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
02787     if Search(r'\w\s*\(\s(?!\s*\\$)', fncall):      # a ( used for a fn call
02788       error(filename, linenum, 'whitespace/parens', 4,
02789             'Extra space after ( in function call')
02790     elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
02791       error(filename, linenum, 'whitespace/parens', 2,
02792             'Extra space after (')
02793     if (Search(r'\w\s+\(', fncall) and
02794         not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
02795         not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
02796         not Search(r'\bcase\s+\(', fncall)):
02797       # TODO(unknown): Space after an operator function seem to be a common
02798       # error, silence those for now by restricting them to highest verbosity.
02799       if Search(r'\boperator_*\b', line):
02800         error(filename, linenum, 'whitespace/parens', 0,
02801               'Extra space before ( in function call')
02802       else:
02803         error(filename, linenum, 'whitespace/parens', 4,
02804               'Extra space before ( in function call')
02805     # If the ) is followed only by a newline or a { + newline, assume it's
02806     # part of a control statement (if/while/etc), and don't complain
02807     if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
02808       # If the closing parenthesis is preceded by only whitespaces,
02809       # try to give a more descriptive error message.
02810       if Search(r'^\s+\)', fncall):
02811         error(filename, linenum, 'whitespace/parens', 2,
02812               'Closing ) should be moved to the previous line')
02813       else:
02814         error(filename, linenum, 'whitespace/parens', 2,
02815               'Extra space before )')
02816 
02817 
02818 def IsBlankLine(line):
02819   """Returns true if the given line is blank.
02820 
02821   We consider a line to be blank if the line is empty or consists of
02822   only white spaces.
02823 
02824   Args:
02825     line: A line of a string.
02826 
02827   Returns:
02828     True, if the given line is blank.
02829   """
02830   return not line or line.isspace()
02831 
02832 
02833 def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
02834                                  error):
02835   is_namespace_indent_item = (
02836       len(nesting_state.stack) > 1 and
02837       nesting_state.stack[-1].check_namespace_indentation and
02838       isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
02839       nesting_state.previous_stack_top == nesting_state.stack[-2])
02840 
02841   if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
02842                                      clean_lines.elided, line):
02843     CheckItemIndentationInNamespace(filename, clean_lines.elided,
02844                                     line, error)
02845 
02846 
02847 def CheckForFunctionLengths(filename, clean_lines, linenum,
02848                             function_state, error):
02849   """Reports for long function bodies.
02850 
02851   For an overview why this is done, see:
02852   http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
02853 
02854   Uses a simplistic algorithm assuming other style guidelines
02855   (especially spacing) are followed.
02856   Only checks unindented functions, so class members are unchecked.
02857   Trivial bodies are unchecked, so constructors with huge initializer lists
02858   may be missed.
02859   Blank/comment lines are not counted so as to avoid encouraging the removal
02860   of vertical space and comments just to get through a lint check.
02861   NOLINT *on the last line of a function* disables this check.
02862 
02863   Args:
02864     filename: The name of the current file.
02865     clean_lines: A CleansedLines instance containing the file.
02866     linenum: The number of the line to check.
02867     function_state: Current function name and lines in body so far.
02868     error: The function to call with any errors found.
02869   """
02870   lines = clean_lines.lines
02871   line = lines[linenum]
02872   joined_line = ''
02873 
02874   starting_func = False
02875   regexp = r'(\w(\w|::|\*|\&|\s)*)\('  # decls * & space::name( ...
02876   match_result = Match(regexp, line)
02877   if match_result:
02878     # If the name is all caps and underscores, figure it's a macro and
02879     # ignore it, unless it's TEST or TEST_F.
02880     function_name = match_result.group(1).split()[-1]
02881     if function_name == 'TEST' or function_name == 'TEST_F' or (
02882         not Match(r'[A-Z_]+$', function_name)):
02883       starting_func = True
02884 
02885   if starting_func:
02886     body_found = False
02887     for start_linenum in xrange(linenum, clean_lines.NumLines()):
02888       start_line = lines[start_linenum]
02889       joined_line += ' ' + start_line.lstrip()
02890       if Search(r'(;|})', start_line):  # Declarations and trivial functions
02891         body_found = True
02892         break                              # ... ignore
02893       elif Search(r'{', start_line):
02894         body_found = True
02895         function = Search(r'((\w|:)*)\(', line).group(1)
02896         if Match(r'TEST', function):    # Handle TEST... macros
02897           parameter_regexp = Search(r'(\(.*\))', joined_line)
02898           if parameter_regexp:             # Ignore bad syntax
02899             function += parameter_regexp.group(1)
02900         else:
02901           function += '()'
02902         function_state.Begin(function)
02903         break
02904     if not body_found:
02905       # No body for the function (or evidence of a non-function) was found.
02906       error(filename, linenum, 'readability/fn_size', 5,
02907             'Lint failed to find start of function body.')
02908   elif Match(r'^\}\s*$', line):  # function end
02909     function_state.Check(error, filename, linenum)
02910     function_state.End()
02911   elif not Match(r'^\s*$', line):
02912     function_state.Count()  # Count non-blank/non-comment lines.
02913 
02914 
02915 _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
02916 
02917 
02918 def CheckComment(line, filename, linenum, next_line_start, error):
02919   """Checks for common mistakes in comments.
02920 
02921   Args:
02922     line: The line in question.
02923     filename: The name of the current file.
02924     linenum: The number of the line to check.
02925     next_line_start: The first non-whitespace column of the next line.
02926     error: The function to call with any errors found.
02927   """
02928   commentpos = line.find('//')
02929   if commentpos != -1:
02930     # Check if the // may be in quotes.  If so, ignore it
02931     # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
02932     if (line.count('"', 0, commentpos) -
02933         line.count('\\"', 0, commentpos)) % 2 == 0:   # not in quotes
02934       # Allow one space for new scopes, two spaces otherwise:
02935       if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
02936           ((commentpos >= 1 and
02937             line[commentpos-1] not in string.whitespace) or
02938            (commentpos >= 2 and
02939             line[commentpos-2] not in string.whitespace))):
02940         error(filename, linenum, 'whitespace/comments', 2,
02941               'At least two spaces is best between code and comments')
02942 
02943       # Checks for common mistakes in TODO comments.
02944       comment = line[commentpos:]
02945       match = _RE_PATTERN_TODO.match(comment)
02946       if match:
02947         # One whitespace is correct; zero whitespace is handled elsewhere.
02948         leading_whitespace = match.group(1)
02949         if len(leading_whitespace) > 1:
02950           error(filename, linenum, 'whitespace/todo', 2,
02951                 'Too many spaces before TODO')
02952 
02953         username = match.group(2)
02954         if not username:
02955           error(filename, linenum, 'readability/todo', 2,
02956                 'Missing username in TODO; it should look like '
02957                 '"// TODO(my_username): Stuff."')
02958 
02959         middle_whitespace = match.group(3)
02960         # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
02961         if middle_whitespace != ' ' and middle_whitespace != '':
02962           error(filename, linenum, 'whitespace/todo', 2,
02963                 'TODO(my_username) should be followed by a space')
02964 
02965       # If the comment contains an alphanumeric character, there
02966       # should be a space somewhere between it and the // unless
02967       # it's a /// or //! Doxygen comment.
02968       if (Match(r'//[^ ]*\w', comment) and
02969           not Match(r'(///|//\!)(\s+|$)', comment)):
02970         error(filename, linenum, 'whitespace/comments', 4,
02971               'Should have a space between // and comment')
02972 
02973 
02974 def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
02975   """Checks for improper use of DISALLOW* macros.
02976 
02977   Args:
02978     filename: The name of the current file.
02979     clean_lines: A CleansedLines instance containing the file.
02980     linenum: The number of the line to check.
02981     nesting_state: A NestingState instance which maintains information about
02982                    the current stack of nested blocks being parsed.
02983     error: The function to call with any errors found.
02984   """
02985   line = clean_lines.elided[linenum]  # get rid of comments and strings
02986 
02987   matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
02988                    r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
02989   if not matched:
02990     return
02991   if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
02992     if nesting_state.stack[-1].access != 'private':
02993       error(filename, linenum, 'readability/constructors', 3,
02994             '%s must be in the private: section' % matched.group(1))
02995 
02996   else:
02997     # Found DISALLOW* macro outside a class declaration, or perhaps it
02998     # was used inside a function when it should have been part of the
02999     # class declaration.  We could issue a warning here, but it
03000     # probably resulted in a compiler error already.
03001     pass
03002 
03003 
03004 def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
03005   """Checks for the correctness of various spacing issues in the code.
03006 
03007   Things we check for: spaces around operators, spaces after
03008   if/for/while/switch, no spaces around parens in function calls, two
03009   spaces between code and comment, don't start a block with a blank
03010   line, don't end a function with a blank line, don't add a blank line
03011   after public/protected/private, don't have too many blank lines in a row.
03012 
03013   Args:
03014     filename: The name of the current file.
03015     clean_lines: A CleansedLines instance containing the file.
03016     linenum: The number of the line to check.
03017     nesting_state: A NestingState instance which maintains information about
03018                    the current stack of nested blocks being parsed.
03019     error: The function to call with any errors found.
03020   """
03021 
03022   # Don't use "elided" lines here, otherwise we can't check commented lines.
03023   # Don't want to use "raw" either, because we don't want to check inside C++11
03024   # raw strings,
03025   raw = clean_lines.lines_without_raw_strings
03026   line = raw[linenum]
03027 
03028   # Before nixing comments, check if the line is blank for no good
03029   # reason.  This includes the first line after a block is opened, and
03030   # blank lines at the end of a function (ie, right before a line like '}'
03031   #
03032   # Skip all the blank line checks if we are immediately inside a
03033   # namespace body.  In other words, don't issue blank line warnings
03034   # for this block:
03035   #   namespace {
03036   #
03037   #   }
03038   #
03039   # A warning about missing end of namespace comments will be issued instead.
03040   #
03041   # Also skip blank line checks for 'extern "C"' blocks, which are formatted
03042   # like namespaces.
03043   if (IsBlankLine(line) and
03044       not nesting_state.InNamespaceBody() and
03045       not nesting_state.InExternC()):
03046     elided = clean_lines.elided
03047     prev_line = elided[linenum - 1]
03048     prevbrace = prev_line.rfind('{')
03049     # TODO(unknown): Don't complain if line before blank line, and line after,
03050     #                both start with alnums and are indented the same amount.
03051     #                This ignores whitespace at the start of a namespace block
03052     #                because those are not usually indented.
03053     if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
03054       # OK, we have a blank line at the start of a code block.  Before we
03055       # complain, we check if it is an exception to the rule: The previous
03056       # non-empty line has the parameters of a function header that are indented
03057       # 4 spaces (because they did not fit in a 80 column line when placed on
03058       # the same line as the function name).  We also check for the case where
03059       # the previous line is indented 6 spaces, which may happen when the
03060       # initializers of a constructor do not fit into a 80 column line.
03061       exception = False
03062       if Match(r' {6}\w', prev_line):  # Initializer list?
03063         # We are looking for the opening column of initializer list, which
03064         # should be indented 4 spaces to cause 6 space indentation afterwards.
03065         search_position = linenum-2
03066         while (search_position >= 0
03067                and Match(r' {6}\w', elided[search_position])):
03068           search_position -= 1
03069         exception = (search_position >= 0
03070                      and elided[search_position][:5] == '    :')
03071       else:
03072         # Search for the function arguments or an initializer list.  We use a
03073         # simple heuristic here: If the line is indented 4 spaces; and we have a
03074         # closing paren, without the opening paren, followed by an opening brace
03075         # or colon (for initializer lists) we assume that it is the last line of
03076         # a function header.  If we have a colon indented 4 spaces, it is an
03077         # initializer list.
03078         exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
03079                            prev_line)
03080                      or Match(r' {4}:', prev_line))
03081 
03082       if not exception:
03083         error(filename, linenum, 'whitespace/blank_line', 2,
03084               'Redundant blank line at the start of a code block '
03085               'should be deleted.')
03086     # Ignore blank lines at the end of a block in a long if-else
03087     # chain, like this:
03088     #   if (condition1) {
03089     #     // Something followed by a blank line
03090     #
03091     #   } else if (condition2) {
03092     #     // Something else
03093     #   }
03094     if linenum + 1 < clean_lines.NumLines():
03095       next_line = raw[linenum + 1]
03096       if (next_line
03097           and Match(r'\s*}', next_line)
03098           and next_line.find('} else ') == -1):
03099         error(filename, linenum, 'whitespace/blank_line', 3,
03100               'Redundant blank line at the end of a code block '
03101               'should be deleted.')
03102 
03103     matched = Match(r'\s*(public|protected|private):', prev_line)
03104     if matched:
03105       error(filename, linenum, 'whitespace/blank_line', 3,
03106             'Do not leave a blank line after "%s:"' % matched.group(1))
03107 
03108   # Next, check comments
03109   next_line_start = 0
03110   if linenum + 1 < clean_lines.NumLines():
03111     next_line = raw[linenum + 1]
03112     next_line_start = len(next_line) - len(next_line.lstrip())
03113   CheckComment(line, filename, linenum, next_line_start, error)
03114 
03115   # get rid of comments and strings
03116   line = clean_lines.elided[linenum]
03117 
03118   # You shouldn't have spaces before your brackets, except maybe after
03119   # 'delete []' or 'return []() {};'
03120   if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
03121     error(filename, linenum, 'whitespace/braces', 5,
03122           'Extra space before [')
03123 
03124   # In range-based for, we wanted spaces before and after the colon, but
03125   # not around "::" tokens that might appear.
03126   if (Search(r'for *\(.*[^:]:[^: ]', line) or
03127       Search(r'for *\(.*[^: ]:[^:]', line)):
03128     error(filename, linenum, 'whitespace/forcolon', 2,
03129           'Missing space around colon in range-based for loop')
03130 
03131 
03132 def CheckOperatorSpacing(filename, clean_lines, linenum, error):
03133   """Checks for horizontal spacing around operators.
03134 
03135   Args:
03136     filename: The name of the current file.
03137     clean_lines: A CleansedLines instance containing the file.
03138     linenum: The number of the line to check.
03139     error: The function to call with any errors found.
03140   """
03141   line = clean_lines.elided[linenum]
03142 
03143   # Don't try to do spacing checks for operator methods.  Do this by
03144   # replacing the troublesome characters with something else,
03145   # preserving column position for all other characters.
03146   #
03147   # The replacement is done repeatedly to avoid false positives from
03148   # operators that call operators.
03149   while True:
03150     match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
03151     if match:
03152       line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
03153     else:
03154       break
03155 
03156   # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
03157   # Otherwise not.  Note we only check for non-spaces on *both* sides;
03158   # sometimes people put non-spaces on one side when aligning ='s among
03159   # many lines (not that this is behavior that I approve of...)
03160   if ((Search(r'[\w.]=', line) or
03161        Search(r'=[\w.]', line))
03162       and not Search(r'\b(if|while|for) ', line)
03163       # Operators taken from [lex.operators] in C++11 standard.
03164       and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
03165       and not Search(r'operator=', line)):
03166     error(filename, linenum, 'whitespace/operators', 4,
03167           'Missing spaces around =')
03168 
03169   # It's ok not to have spaces around binary operators like + - * /, but if
03170   # there's too little whitespace, we get concerned.  It's hard to tell,
03171   # though, so we punt on this one for now.  TODO.
03172 
03173   # You should always have whitespace around binary operators.
03174   #
03175   # Check <= and >= first to avoid false positives with < and >, then
03176   # check non-include lines for spacing around < and >.
03177   #
03178   # If the operator is followed by a comma, assume it's be used in a
03179   # macro context and don't do any checks.  This avoids false
03180   # positives.
03181   #
03182   # Note that && is not included here.  Those are checked separately
03183   # in CheckRValueReference
03184   match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
03185   if match:
03186     error(filename, linenum, 'whitespace/operators', 3,
03187           'Missing spaces around %s' % match.group(1))
03188   elif not Match(r'#.*include', line):
03189     # Look for < that is not surrounded by spaces.  This is only
03190     # triggered if both sides are missing spaces, even though
03191     # technically should should flag if at least one side is missing a
03192     # space.  This is done to avoid some false positives with shifts.
03193     match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
03194     if match:
03195       (_, _, end_pos) = CloseExpression(
03196           clean_lines, linenum, len(match.group(1)))
03197       if end_pos <= -1:
03198         error(filename, linenum, 'whitespace/operators', 3,
03199               'Missing spaces around <')
03200 
03201     # Look for > that is not surrounded by spaces.  Similar to the
03202     # above, we only trigger if both sides are missing spaces to avoid
03203     # false positives with shifts.
03204     match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
03205     if match:
03206       (_, _, start_pos) = ReverseCloseExpression(
03207           clean_lines, linenum, len(match.group(1)))
03208       if start_pos <= -1:
03209         error(filename, linenum, 'whitespace/operators', 3,
03210               'Missing spaces around >')
03211 
03212   # We allow no-spaces around << when used like this: 10<<20, but
03213   # not otherwise (particularly, not when used as streams)
03214   #
03215   # We also allow operators following an opening parenthesis, since
03216   # those tend to be macros that deal with operators.
03217   match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line)
03218   if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
03219       not (match.group(1) == 'operator' and match.group(2) == ';')):
03220     error(filename, linenum, 'whitespace/operators', 3,
03221           'Missing spaces around <<')
03222 
03223   # We allow no-spaces around >> for almost anything.  This is because
03224   # C++11 allows ">>" to close nested templates, which accounts for
03225   # most cases when ">>" is not followed by a space.
03226   #
03227   # We still warn on ">>" followed by alpha character, because that is
03228   # likely due to ">>" being used for right shifts, e.g.:
03229   #   value >> alpha
03230   #
03231   # When ">>" is used to close templates, the alphanumeric letter that
03232   # follows would be part of an identifier, and there should still be
03233   # a space separating the template type and the identifier.
03234   #   type<type<type>> alpha
03235   match = Search(r'>>[a-zA-Z_]', line)
03236   if match:
03237     error(filename, linenum, 'whitespace/operators', 3,
03238           'Missing spaces around >>')
03239 
03240   # There shouldn't be space around unary operators
03241   match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
03242   if match:
03243     error(filename, linenum, 'whitespace/operators', 4,
03244           'Extra space for operator %s' % match.group(1))
03245 
03246 
03247 def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
03248   """Checks for horizontal spacing around parentheses.
03249 
03250   Args:
03251     filename: The name of the current file.
03252     clean_lines: A CleansedLines instance containing the file.
03253     linenum: The number of the line to check.
03254     error: The function to call with any errors found.
03255   """
03256   line = clean_lines.elided[linenum]
03257 
03258   # No spaces after an if, while, switch, or for
03259   match = Search(r' (if\(|for\(|while\(|switch\()', line)
03260   if match:
03261     error(filename, linenum, 'whitespace/parens', 5,
03262           'Missing space before ( in %s' % match.group(1))
03263 
03264   # For if/for/while/switch, the left and right parens should be
03265   # consistent about how many spaces are inside the parens, and
03266   # there should either be zero or one spaces inside the parens.
03267   # We don't want: "if ( foo)" or "if ( foo   )".
03268   # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
03269   match = Search(r'\b(if|for|while|switch)\s*'
03270                  r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
03271                  line)
03272   if match:
03273     if len(match.group(2)) != len(match.group(4)):
03274       if not (match.group(3) == ';' and
03275               len(match.group(2)) == 1 + len(match.group(4)) or
03276               not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
03277         error(filename, linenum, 'whitespace/parens', 5,
03278               'Mismatching spaces inside () in %s' % match.group(1))
03279     if len(match.group(2)) not in [0, 1]:
03280       error(filename, linenum, 'whitespace/parens', 5,
03281             'Should have zero or one spaces inside ( and ) in %s' %
03282             match.group(1))
03283 
03284 
03285 def CheckCommaSpacing(filename, clean_lines, linenum, error):
03286   """Checks for horizontal spacing near commas and semicolons.
03287 
03288   Args:
03289     filename: The name of the current file.
03290     clean_lines: A CleansedLines instance containing the file.
03291     linenum: The number of the line to check.
03292     error: The function to call with any errors found.
03293   """
03294   raw = clean_lines.lines_without_raw_strings
03295   line = clean_lines.elided[linenum]
03296 
03297   # You should always have a space after a comma (either as fn arg or operator)
03298   #
03299   # This does not apply when the non-space character following the
03300   # comma is another comma, since the only time when that happens is
03301   # for empty macro arguments.
03302   #
03303   # We run this check in two passes: first pass on elided lines to
03304   # verify that lines contain missing whitespaces, second pass on raw
03305   # lines to confirm that those missing whitespaces are not due to
03306   # elided comments.
03307   if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
03308       Search(r',[^,\s]', raw[linenum])):
03309     error(filename, linenum, 'whitespace/comma', 3,
03310           'Missing space after ,')
03311 
03312   # You should always have a space after a semicolon
03313   # except for few corner cases
03314   # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
03315   # space after ;
03316   if Search(r';[^\s};\\)/]', line):
03317     error(filename, linenum, 'whitespace/semicolon', 3,
03318           'Missing space after ;')
03319 
03320 
03321 def CheckBracesSpacing(filename, clean_lines, linenum, error):
03322   """Checks for horizontal spacing near commas.
03323 
03324   Args:
03325     filename: The name of the current file.
03326     clean_lines: A CleansedLines instance containing the file.
03327     linenum: The number of the line to check.
03328     error: The function to call with any errors found.
03329   """
03330   line = clean_lines.elided[linenum]
03331 
03332   # Except after an opening paren, or after another opening brace (in case of
03333   # an initializer list, for instance), you should have spaces before your
03334   # braces. And since you should never have braces at the beginning of a line,
03335   # this is an easy test.
03336   match = Match(r'^(.*[^ ({>]){', line)
03337   if match:
03338     # Try a bit harder to check for brace initialization.  This
03339     # happens in one of the following forms:
03340     #   Constructor() : initializer_list_{} { ... }
03341     #   Constructor{}.MemberFunction()
03342     #   Type variable{};
03343     #   FunctionCall(type{}, ...);
03344     #   LastArgument(..., type{});
03345     #   LOG(INFO) << type{} << " ...";
03346     #   map_of_type[{...}] = ...;
03347     #   ternary = expr ? new type{} : nullptr;
03348     #   OuterTemplate<InnerTemplateConstructor<Type>{}>
03349     #
03350     # We check for the character following the closing brace, and
03351     # silence the warning if it's one of those listed above, i.e.
03352     # "{.;,)<>]:".
03353     #
03354     # To account for nested initializer list, we allow any number of
03355     # closing braces up to "{;,)<".  We can't simply silence the
03356     # warning on first sight of closing brace, because that would
03357     # cause false negatives for things that are not initializer lists.
03358     #   Silence this:         But not this:
03359     #     Outer{                if (...) {
03360     #       Inner{...}            if (...){  // Missing space before {
03361     #     };                    }
03362     #
03363     # There is a false negative with this approach if people inserted
03364     # spurious semicolons, e.g. "if (cond){};", but we will catch the
03365     # spurious semicolon with a separate check.
03366     (endline, endlinenum, endpos) = CloseExpression(
03367         clean_lines, linenum, len(match.group(1)))
03368     trailing_text = ''
03369     if endpos > -1:
03370       trailing_text = endline[endpos:]
03371     for offset in xrange(endlinenum + 1,
03372                          min(endlinenum + 3, clean_lines.NumLines() - 1)):
03373       trailing_text += clean_lines.elided[offset]
03374     if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
03375       error(filename, linenum, 'whitespace/braces', 5,
03376             'Missing space before {')
03377 
03378   # Make sure '} else {' has spaces.
03379   if Search(r'}else', line):
03380     error(filename, linenum, 'whitespace/braces', 5,
03381           'Missing space before else')
03382 
03383   # You shouldn't have a space before a semicolon at the end of the line.
03384   # There's a special case for "for" since the style guide allows space before
03385   # the semicolon there.
03386   if Search(r':\s*;\s*$', line):
03387     error(filename, linenum, 'whitespace/semicolon', 5,
03388           'Semicolon defining empty statement. Use {} instead.')
03389   elif Search(r'^\s*;\s*$', line):
03390     error(filename, linenum, 'whitespace/semicolon', 5,
03391           'Line contains only semicolon. If this should be an empty statement, '
03392           'use {} instead.')
03393   elif (Search(r'\s+;\s*$', line) and
03394         not Search(r'\bfor\b', line)):
03395     error(filename, linenum, 'whitespace/semicolon', 5,
03396           'Extra space before last semicolon. If this should be an empty '
03397           'statement, use {} instead.')
03398 
03399 
03400 def IsDecltype(clean_lines, linenum, column):
03401   """Check if the token ending on (linenum, column) is decltype().
03402 
03403   Args:
03404     clean_lines: A CleansedLines instance containing the file.
03405     linenum: the number of the line to check.
03406     column: end column of the token to check.
03407   Returns:
03408     True if this token is decltype() expression, False otherwise.
03409   """
03410   (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
03411   if start_col < 0:
03412     return False
03413   if Search(r'\bdecltype\s*$', text[0:start_col]):
03414     return True
03415   return False
03416 
03417 
03418 def IsTemplateParameterList(clean_lines, linenum, column):
03419   """Check if the token ending on (linenum, column) is the end of template<>.
03420 
03421   Args:
03422     clean_lines: A CleansedLines instance containing the file.
03423     linenum: the number of the line to check.
03424     column: end column of the token to check.
03425   Returns:
03426     True if this token is end of a template parameter list, False otherwise.
03427   """
03428   (_, startline, startpos) = ReverseCloseExpression(
03429       clean_lines, linenum, column)
03430   if (startpos > -1 and
03431       Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
03432     return True
03433   return False
03434 
03435 
03436 def IsRValueType(typenames, clean_lines, nesting_state, linenum, column):
03437   """Check if the token ending on (linenum, column) is a type.
03438 
03439   Assumes that text to the right of the column is "&&" or a function
03440   name.
03441 
03442   Args:
03443     typenames: set of type names from template-argument-list.
03444     clean_lines: A CleansedLines instance containing the file.
03445     nesting_state: A NestingState instance which maintains information about
03446                    the current stack of nested blocks being parsed.
03447     linenum: the number of the line to check.
03448     column: end column of the token to check.
03449   Returns:
03450     True if this token is a type, False if we are not sure.
03451   """
03452   prefix = clean_lines.elided[linenum][0:column]
03453 
03454   # Get one word to the left.  If we failed to do so, this is most
03455   # likely not a type, since it's unlikely that the type name and "&&"
03456   # would be split across multiple lines.
03457   match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
03458   if not match:
03459     return False
03460 
03461   # Check text following the token.  If it's "&&>" or "&&," or "&&...", it's
03462   # most likely a rvalue reference used inside a template.
03463   suffix = clean_lines.elided[linenum][column:]
03464   if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
03465     return True
03466 
03467   # Check for known types and end of templates:
03468   #   int&& variable
03469   #   vector<int>&& variable
03470   #
03471   # Because this function is called recursively, we also need to
03472   # recognize pointer and reference types:
03473   #   int* Function()
03474   #   int& Function()
03475   if (match.group(2) in typenames or
03476       match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
03477                          'short', 'int', 'long', 'signed', 'unsigned',
03478                          'float', 'double', 'void', 'auto', '>', '*', '&']):
03479     return True
03480 
03481   # If we see a close parenthesis, look for decltype on the other side.
03482   # decltype would unambiguously identify a type, anything else is
03483   # probably a parenthesized expression and not a type.
03484   if match.group(2) == ')':
03485     return IsDecltype(
03486         clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
03487 
03488   # Check for casts and cv-qualifiers.
03489   #   match.group(1)  remainder
03490   #   --------------  ---------
03491   #   const_cast<     type&&
03492   #   const           type&&
03493   #   type            const&&
03494   if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
03495             r'reinterpret_cast\s*<|\w+\s)\s*$',
03496             match.group(1)):
03497     return True
03498 
03499   # Look for a preceding symbol that might help differentiate the context.
03500   # These are the cases that would be ambiguous:
03501   #   match.group(1)  remainder
03502   #   --------------  ---------
03503   #   Call         (   expression &&
03504   #   Declaration  (   type&&
03505   #   sizeof       (   type&&
03506   #   if           (   expression &&
03507   #   while        (   expression &&
03508   #   for          (   type&&
03509   #   for(         ;   expression &&
03510   #   statement    ;   type&&
03511   #   block        {   type&&
03512   #   constructor  {   expression &&
03513   start = linenum
03514   line = match.group(1)
03515   match_symbol = None
03516   while start >= 0:
03517     # We want to skip over identifiers and commas to get to a symbol.
03518     # Commas are skipped so that we can find the opening parenthesis
03519     # for function parameter lists.
03520     match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
03521     if match_symbol:
03522       break
03523     start -= 1
03524     line = clean_lines.elided[start]
03525 
03526   if not match_symbol:
03527     # Probably the first statement in the file is an rvalue reference
03528     return True
03529 
03530   if match_symbol.group(2) == '}':
03531     # Found closing brace, probably an indicate of this:
03532     #   block{} type&&
03533     return True
03534 
03535   if match_symbol.group(2) == ';':
03536     # Found semicolon, probably one of these:
03537     #   for(; expression &&
03538     #   statement; type&&
03539 
03540     # Look for the previous 'for(' in the previous lines.
03541     before_text = match_symbol.group(1)
03542     for i in xrange(start - 1, max(start - 6, 0), -1):
03543       before_text = clean_lines.elided[i] + before_text
03544     if Search(r'for\s*\([^{};]*$', before_text):
03545       # This is the condition inside a for-loop
03546       return False
03547 
03548     # Did not find a for-init-statement before this semicolon, so this
03549     # is probably a new statement and not a condition.
03550     return True
03551 
03552   if match_symbol.group(2) == '{':
03553     # Found opening brace, probably one of these:
03554     #   block{ type&& = ... ; }
03555     #   constructor{ expression && expression }
03556 
03557     # Look for a closing brace or a semicolon.  If we see a semicolon
03558     # first, this is probably a rvalue reference.
03559     line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
03560     end = start
03561     depth = 1
03562     while True:
03563       for ch in line:
03564         if ch == ';':
03565           return True
03566         elif ch == '{':
03567           depth += 1
03568         elif ch == '}':
03569           depth -= 1
03570           if depth == 0:
03571             return False
03572       end += 1
03573       if end >= clean_lines.NumLines():
03574         break
03575       line = clean_lines.elided[end]
03576     # Incomplete program?
03577     return False
03578 
03579   if match_symbol.group(2) == '(':
03580     # Opening parenthesis.  Need to check what's to the left of the
03581     # parenthesis.  Look back one extra line for additional context.
03582     before_text = match_symbol.group(1)
03583     if linenum > 1:
03584       before_text = clean_lines.elided[linenum - 1] + before_text
03585     before_text = match_symbol.group(1)
03586 
03587     # Patterns that are likely to be types:
03588     #   [](type&&
03589     #   for (type&&
03590     #   sizeof(type&&
03591     #   operator=(type&&
03592     #
03593     if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
03594       return True
03595 
03596     # Patterns that are likely to be expressions:
03597     #   if (expression &&
03598     #   while (expression &&
03599     #   : initializer(expression &&
03600     #   , initializer(expression &&
03601     #   ( FunctionCall(expression &&
03602     #   + FunctionCall(expression &&
03603     #   + (expression &&
03604     #
03605     # The last '+' represents operators such as '+' and '-'.
03606     if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
03607       return False
03608 
03609     # Something else.  Check that tokens to the left look like
03610     #   return_type function_name
03611     match_func = Match(r'^(.*\S.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
03612                        match_symbol.group(1))
03613     if match_func:
03614       # Check for constructors, which don't have return types.
03615       if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
03616         return True
03617       implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
03618       if (implicit_constructor and
03619           implicit_constructor.group(1) == implicit_constructor.group(2)):
03620         return True
03621       return IsRValueType(typenames, clean_lines, nesting_state, linenum,
03622                           len(match_func.group(1)))
03623 
03624     # Nothing before the function name.  If this is inside a block scope,
03625     # this is probably a function call.
03626     return not (nesting_state.previous_stack_top and
03627                 nesting_state.previous_stack_top.IsBlockInfo())
03628 
03629   if match_symbol.group(2) == '>':
03630     # Possibly a closing bracket, check that what's on the other side
03631     # looks like the start of a template.
03632     return IsTemplateParameterList(
03633         clean_lines, start, len(match_symbol.group(1)))
03634 
03635   # Some other symbol, usually something like "a=b&&c".  This is most
03636   # likely not a type.
03637   return False
03638 
03639 
03640 def IsDeletedOrDefault(clean_lines, linenum):
03641   """Check if current constructor or operator is deleted or default.
03642 
03643   Args:
03644     clean_lines: A CleansedLines instance containing the file.
03645     linenum: The number of the line to check.
03646   Returns:
03647     True if this is a deleted or default constructor.
03648   """
03649   open_paren = clean_lines.elided[linenum].find('(')
03650   if open_paren < 0:
03651     return False
03652   (close_line, _, close_paren) = CloseExpression(
03653       clean_lines, linenum, open_paren)
03654   if close_paren < 0:
03655     return False
03656   return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
03657 
03658 
03659 def IsRValueAllowed(clean_lines, linenum, typenames):
03660   """Check if RValue reference is allowed on a particular line.
03661 
03662   Args:
03663     clean_lines: A CleansedLines instance containing the file.
03664     linenum: The number of the line to check.
03665     typenames: set of type names from template-argument-list.
03666   Returns:
03667     True if line is within the region where RValue references are allowed.
03668   """
03669   # Allow region marked by PUSH/POP macros
03670   for i in xrange(linenum, 0, -1):
03671     line = clean_lines.elided[i]
03672     if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
03673       if not line.endswith('PUSH'):
03674         return False
03675       for j in xrange(linenum, clean_lines.NumLines(), 1):
03676         line = clean_lines.elided[j]
03677         if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
03678           return line.endswith('POP')
03679 
03680   # Allow operator=
03681   line = clean_lines.elided[linenum]
03682   if Search(r'\boperator\s*=\s*\(', line):
03683     return IsDeletedOrDefault(clean_lines, linenum)
03684 
03685   # Allow constructors
03686   match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
03687   if match and match.group(1) == match.group(2):
03688     return IsDeletedOrDefault(clean_lines, linenum)
03689   if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
03690     return IsDeletedOrDefault(clean_lines, linenum)
03691 
03692   if Match(r'\s*[\w<>]+\s*\(', line):
03693     previous_line = 'ReturnType'
03694     if linenum > 0:
03695       previous_line = clean_lines.elided[linenum - 1]
03696     if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
03697       return IsDeletedOrDefault(clean_lines, linenum)
03698 
03699   # Reject types not mentioned in template-argument-list
03700   while line:
03701     match = Match(r'^.*?(\w+)\s*&&(.*)$', line)
03702     if not match:
03703       break
03704     if match.group(1) not in typenames:
03705       return False
03706     line = match.group(2)
03707 
03708   # All RValue types that were in template-argument-list should have
03709   # been removed by now.  Those were allowed, assuming that they will
03710   # be forwarded.
03711   #
03712   # If there are no remaining RValue types left (i.e. types that were
03713   # not found in template-argument-list), flag those as not allowed.
03714   return line.find('&&') < 0
03715 
03716 
03717 def GetTemplateArgs(clean_lines, linenum):
03718   """Find list of template arguments associated with this function declaration.
03719 
03720   Args:
03721     clean_lines: A CleansedLines instance containing the file.
03722     linenum: Line number containing the start of the function declaration,
03723              usually one line after the end of the template-argument-list.
03724   Returns:
03725     Set of type names, or empty set if this does not appear to have
03726     any template parameters.
03727   """
03728   # Find start of function
03729   func_line = linenum
03730   while func_line > 0:
03731     line = clean_lines.elided[func_line]
03732     if Match(r'^\s*$', line):
03733       return set()
03734     if line.find('(') >= 0:
03735       break
03736     func_line -= 1
03737   if func_line == 0:
03738     return set()
03739 
03740   # Collapse template-argument-list into a single string
03741   argument_list = ''
03742   match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line])
03743   if match:
03744     # template-argument-list on the same line as function name
03745     start_col = len(match.group(1))
03746     _, end_line, end_col = CloseExpression(clean_lines, func_line, start_col)
03747     if end_col > -1 and end_line == func_line:
03748       start_col += 1  # Skip the opening bracket
03749       argument_list = clean_lines.elided[func_line][start_col:end_col]
03750 
03751   elif func_line > 1:
03752     # template-argument-list one line before function name
03753     match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1])
03754     if match:
03755       end_col = len(match.group(1))
03756       _, start_line, start_col = ReverseCloseExpression(
03757           clean_lines, func_line - 1, end_col)
03758       if start_col > -1:
03759         start_col += 1  # Skip the opening bracket
03760         while start_line < func_line - 1:
03761           argument_list += clean_lines.elided[start_line][start_col:]
03762           start_col = 0
03763           start_line += 1
03764         argument_list += clean_lines.elided[func_line - 1][start_col:end_col]
03765 
03766   if not argument_list:
03767     return set()
03768 
03769   # Extract type names
03770   typenames = set()
03771   while True:
03772     match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$',
03773                   argument_list)
03774     if not match:
03775       break
03776     typenames.add(match.group(1))
03777     argument_list = match.group(2)
03778   return typenames
03779 
03780 
03781 def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
03782   """Check for rvalue references.
03783 
03784   Args:
03785     filename: The name of the current file.
03786     clean_lines: A CleansedLines instance containing the file.
03787     linenum: The number of the line to check.
03788     nesting_state: A NestingState instance which maintains information about
03789                    the current stack of nested blocks being parsed.
03790     error: The function to call with any errors found.
03791   """
03792   # Find lines missing spaces around &&.
03793   # TODO(unknown): currently we don't check for rvalue references
03794   # with spaces surrounding the && to avoid false positives with
03795   # boolean expressions.
03796   line = clean_lines.elided[linenum]
03797   match = Match(r'^(.*\S)&&', line)
03798   if not match:
03799     match = Match(r'(.*)&&\S', line)
03800   if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
03801     return
03802 
03803   # Either poorly formed && or an rvalue reference, check the context
03804   # to get a more accurate error message.  Mostly we want to determine
03805   # if what's to the left of "&&" is a type or not.
03806   typenames = GetTemplateArgs(clean_lines, linenum)
03807   and_pos = len(match.group(1))
03808   if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos):
03809     if not IsRValueAllowed(clean_lines, linenum, typenames):
03810       error(filename, linenum, 'build/c++11', 3,
03811             'RValue references are an unapproved C++ feature.')
03812   else:
03813     error(filename, linenum, 'whitespace/operators', 3,
03814           'Missing spaces around &&')
03815 
03816 
03817 def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
03818   """Checks for additional blank line issues related to sections.
03819 
03820   Currently the only thing checked here is blank line before protected/private.
03821 
03822   Args:
03823     filename: The name of the current file.
03824     clean_lines: A CleansedLines instance containing the file.
03825     class_info: A _ClassInfo objects.
03826     linenum: The number of the line to check.
03827     error: The function to call with any errors found.
03828   """
03829   # Skip checks if the class is small, where small means 25 lines or less.
03830   # 25 lines seems like a good cutoff since that's the usual height of
03831   # terminals, and any class that can't fit in one screen can't really
03832   # be considered "small".
03833   #
03834   # Also skip checks if we are on the first line.  This accounts for
03835   # classes that look like
03836   #   class Foo { public: ... };
03837   #
03838   # If we didn't find the end of the class, last_line would be zero,
03839   # and the check will be skipped by the first condition.
03840   if (class_info.last_line - class_info.starting_linenum <= 24 or
03841       linenum <= class_info.starting_linenum):
03842     return
03843 
03844   matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
03845   if matched:
03846     # Issue warning if the line before public/protected/private was
03847     # not a blank line, but don't do this if the previous line contains
03848     # "class" or "struct".  This can happen two ways:
03849     #  - We are at the beginning of the class.
03850     #  - We are forward-declaring an inner class that is semantically
03851     #    private, but needed to be public for implementation reasons.
03852     # Also ignores cases where the previous line ends with a backslash as can be
03853     # common when defining classes in C macros.
03854     prev_line = clean_lines.lines[linenum - 1]
03855     if (not IsBlankLine(prev_line) and
03856         not Search(r'\b(class|struct)\b', prev_line) and
03857         not Search(r'\\$', prev_line)):
03858       # Try a bit harder to find the beginning of the class.  This is to
03859       # account for multi-line base-specifier lists, e.g.:
03860       #   class Derived
03861       #       : public Base {
03862       end_class_head = class_info.starting_linenum
03863       for i in range(class_info.starting_linenum, linenum):
03864         if Search(r'\{\s*$', clean_lines.lines[i]):
03865           end_class_head = i
03866           break
03867       if end_class_head < linenum - 1:
03868         error(filename, linenum, 'whitespace/blank_line', 3,
03869               '"%s:" should be preceded by a blank line' % matched.group(1))
03870 
03871 
03872 def GetPreviousNonBlankLine(clean_lines, linenum):
03873   """Return the most recent non-blank line and its line number.
03874 
03875   Args:
03876     clean_lines: A CleansedLines instance containing the file contents.
03877     linenum: The number of the line to check.
03878 
03879   Returns:
03880     A tuple with two elements.  The first element is the contents of the last
03881     non-blank line before the current line, or the empty string if this is the
03882     first non-blank line.  The second is the line number of that line, or -1
03883     if this is the first non-blank line.
03884   """
03885 
03886   prevlinenum = linenum - 1
03887   while prevlinenum >= 0:
03888     prevline = clean_lines.elided[prevlinenum]
03889     if not IsBlankLine(prevline):     # if not a blank line...
03890       return (prevline, prevlinenum)
03891     prevlinenum -= 1
03892   return ('', -1)
03893 
03894 
03895 def CheckBraces(filename, clean_lines, linenum, error):
03896   """Looks for misplaced braces (e.g. at the end of line).
03897 
03898   Args:
03899     filename: The name of the current file.
03900     clean_lines: A CleansedLines instance containing the file.
03901     linenum: The number of the line to check.
03902     error: The function to call with any errors found.
03903   """
03904 
03905   line = clean_lines.elided[linenum]        # get rid of comments and strings
03906 
03907   if Match(r'\s*{\s*$', line):
03908     # We allow an open brace to start a line in the case where someone is using
03909     # braces in a block to explicitly create a new scope, which is commonly used
03910     # to control the lifetime of stack-allocated variables.  Braces are also
03911     # used for brace initializers inside function calls.  We don't detect this
03912     # perfectly: we just don't complain if the last non-whitespace character on
03913     # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
03914     # previous line starts a preprocessor block.
03915     prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
03916     if (not Search(r'[,;:}{(]\s*$', prevline) and
03917         not Match(r'\s*#', prevline)):
03918       error(filename, linenum, 'whitespace/braces', 4,
03919             '{ should almost always be at the end of the previous line')
03920 
03921   # An else clause should be on the same line as the preceding closing brace.
03922   if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
03923     prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
03924     if Match(r'\s*}\s*$', prevline):
03925       error(filename, linenum, 'whitespace/newline', 4,
03926             'An else should appear on the same line as the preceding }')
03927 
03928   # If braces come on one side of an else, they should be on both.
03929   # However, we have to worry about "else if" that spans multiple lines!
03930   if Search(r'else if\s*\(', line):       # could be multi-line if
03931     brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
03932     # find the ( after the if
03933     pos = line.find('else if')
03934     pos = line.find('(', pos)
03935     if pos > 0:
03936       (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
03937       brace_on_right = endline[endpos:].find('{') != -1
03938       if brace_on_left != brace_on_right:    # must be brace after if
03939         error(filename, linenum, 'readability/braces', 5,
03940               'If an else has a brace on one side, it should have it on both')
03941   elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
03942     error(filename, linenum, 'readability/braces', 5,
03943           'If an else has a brace on one side, it should have it on both')
03944 
03945   # Likewise, an else should never have the else clause on the same line
03946   if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
03947     error(filename, linenum, 'whitespace/newline', 4,
03948           'Else clause should never be on same line as else (use 2 lines)')
03949 
03950   # In the same way, a do/while should never be on one line
03951   if Match(r'\s*do [^\s{]', line):
03952     error(filename, linenum, 'whitespace/newline', 4,
03953           'do/while clauses should not be on a single line')
03954 
03955   # Check single-line if/else bodies. The style guide says 'curly braces are not
03956   # required for single-line statements'. We additionally allow multi-line,
03957   # single statements, but we reject anything with more than one semicolon in
03958   # it. This means that the first semicolon after the if should be at the end of
03959   # its line, and the line after that should have an indent level equal to or
03960   # lower than the if. We also check for ambiguous if/else nesting without
03961   # braces.
03962   if_else_match = Search(r'\b(if\s*\(|else\b)', line)
03963   if if_else_match and not Match(r'\s*#', line):
03964     if_indent = GetIndentLevel(line)
03965     endline, endlinenum, endpos = line, linenum, if_else_match.end()
03966     if_match = Search(r'\bif\s*\(', line)
03967     if if_match:
03968       # This could be a multiline if condition, so find the end first.
03969       pos = if_match.end() - 1
03970       (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
03971     # Check for an opening brace, either directly after the if or on the next
03972     # line. If found, this isn't a single-statement conditional.
03973     if (not Match(r'\s*{', endline[endpos:])
03974         and not (Match(r'\s*$', endline[endpos:])
03975                  and endlinenum < (len(clean_lines.elided) - 1)
03976                  and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
03977       while (endlinenum < len(clean_lines.elided)
03978              and ';' not in clean_lines.elided[endlinenum][endpos:]):
03979         endlinenum += 1
03980         endpos = 0
03981       if endlinenum < len(clean_lines.elided):
03982         endline = clean_lines.elided[endlinenum]
03983         # We allow a mix of whitespace and closing braces (e.g. for one-liner
03984         # methods) and a single \ after the semicolon (for macros)
03985         endpos = endline.find(';')
03986         if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
03987           # Semicolon isn't the last character, there's something trailing.
03988           # Output a warning if the semicolon is not contained inside
03989           # a lambda expression.
03990           if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
03991                        endline):
03992             error(filename, linenum, 'readability/braces', 4,
03993                   'If/else bodies with multiple statements require braces')
03994         elif endlinenum < len(clean_lines.elided) - 1:
03995           # Make sure the next line is dedented
03996           next_line = clean_lines.elided[endlinenum + 1]
03997           next_indent = GetIndentLevel(next_line)
03998           # With ambiguous nested if statements, this will error out on the
03999           # if that *doesn't* match the else, regardless of whether it's the
04000           # inner one or outer one.
04001           if (if_match and Match(r'\s*else\b', next_line)
04002               and next_indent != if_indent):
04003             error(filename, linenum, 'readability/braces', 4,
04004                   'Else clause should be indented at the same level as if. '
04005                   'Ambiguous nested if/else chains require braces.')
04006           elif next_indent > if_indent:
04007             error(filename, linenum, 'readability/braces', 4,
04008                   'If/else bodies with multiple statements require braces')
04009 
04010 
04011 def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
04012   """Looks for redundant trailing semicolon.
04013 
04014   Args:
04015     filename: The name of the current file.
04016     clean_lines: A CleansedLines instance containing the file.
04017     linenum: The number of the line to check.
04018     error: The function to call with any errors found.
04019   """
04020 
04021   line = clean_lines.elided[linenum]
04022 
04023   # Block bodies should not be followed by a semicolon.  Due to C++11
04024   # brace initialization, there are more places where semicolons are
04025   # required than not, so we use a whitelist approach to check these
04026   # rather than a blacklist.  These are the places where "};" should
04027   # be replaced by just "}":
04028   # 1. Some flavor of block following closing parenthesis:
04029   #    for (;;) {};
04030   #    while (...) {};
04031   #    switch (...) {};
04032   #    Function(...) {};
04033   #    if (...) {};
04034   #    if (...) else if (...) {};
04035   #
04036   # 2. else block:
04037   #    if (...) else {};
04038   #
04039   # 3. const member function:
04040   #    Function(...) const {};
04041   #
04042   # 4. Block following some statement:
04043   #    x = 42;
04044   #    {};
04045   #
04046   # 5. Block at the beginning of a function:
04047   #    Function(...) {
04048   #      {};
04049   #    }
04050   #
04051   #    Note that naively checking for the preceding "{" will also match
04052   #    braces inside multi-dimensional arrays, but this is fine since
04053   #    that expression will not contain semicolons.
04054   #
04055   # 6. Block following another block:
04056   #    while (true) {}
04057   #    {};
04058   #
04059   # 7. End of namespaces:
04060   #    namespace {};
04061   #
04062   #    These semicolons seems far more common than other kinds of
04063   #    redundant semicolons, possibly due to people converting classes
04064   #    to namespaces.  For now we do not warn for this case.
04065   #
04066   # Try matching case 1 first.
04067   match = Match(r'^(.*\)\s*)\{', line)
04068   if match:
04069     # Matched closing parenthesis (case 1).  Check the token before the
04070     # matching opening parenthesis, and don't warn if it looks like a
04071     # macro.  This avoids these false positives:
04072     #  - macro that defines a base class
04073     #  - multi-line macro that defines a base class
04074     #  - macro that defines the whole class-head
04075     #
04076     # But we still issue warnings for macros that we know are safe to
04077     # warn, specifically:
04078     #  - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
04079     #  - TYPED_TEST
04080     #  - INTERFACE_DEF
04081     #  - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
04082     #
04083     # We implement a whitelist of safe macros instead of a blacklist of
04084     # unsafe macros, even though the latter appears less frequently in
04085     # google code and would have been easier to implement.  This is because
04086     # the downside for getting the whitelist wrong means some extra
04087     # semicolons, while the downside for getting the blacklist wrong
04088     # would result in compile errors.
04089     #
04090     # In addition to macros, we also don't want to warn on
04091     #  - Compound literals
04092     #  - Lambdas
04093     #  - alignas specifier with anonymous structs:
04094     closing_brace_pos = match.group(1).rfind(')')
04095     opening_parenthesis = ReverseCloseExpression(
04096         clean_lines, linenum, closing_brace_pos)
04097     if opening_parenthesis[2] > -1:
04098       line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
04099       macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
04100       func = Match(r'^(.*\])\s*$', line_prefix)
04101       if ((macro and
04102            macro.group(1) not in (
04103                'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
04104                'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
04105                'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
04106           (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
04107           Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
04108           Search(r'\s+=\s*$', line_prefix)):
04109         match = None
04110     if (match and
04111         opening_parenthesis[1] > 1 and
04112         Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
04113       # Multi-line lambda-expression
04114       match = None
04115 
04116   else:
04117     # Try matching cases 2-3.
04118     match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
04119     if not match:
04120       # Try matching cases 4-6.  These are always matched on separate lines.
04121       #
04122       # Note that we can't simply concatenate the previous line to the
04123       # current line and do a single match, otherwise we may output
04124       # duplicate warnings for the blank line case:
04125       #   if (cond) {
04126       #     // blank line
04127       #   }
04128       prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
04129       if prevline and Search(r'[;{}]\s*$', prevline):
04130         match = Match(r'^(\s*)\{', line)
04131 
04132   # Check matching closing brace
04133   if match:
04134     (endline, endlinenum, endpos) = CloseExpression(
04135         clean_lines, linenum, len(match.group(1)))
04136     if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
04137       # Current {} pair is eligible for semicolon check, and we have found
04138       # the redundant semicolon, output warning here.
04139       #
04140       # Note: because we are scanning forward for opening braces, and
04141       # outputting warnings for the matching closing brace, if there are
04142       # nested blocks with trailing semicolons, we will get the error
04143       # messages in reversed order.
04144       error(filename, endlinenum, 'readability/braces', 4,
04145             "You don't need a ; after a }")
04146 
04147 
04148 def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
04149   """Look for empty loop/conditional body with only a single semicolon.
04150 
04151   Args:
04152     filename: The name of the current file.
04153     clean_lines: A CleansedLines instance containing the file.
04154     linenum: The number of the line to check.
04155     error: The function to call with any errors found.
04156   """
04157 
04158   # Search for loop keywords at the beginning of the line.  Because only
04159   # whitespaces are allowed before the keywords, this will also ignore most
04160   # do-while-loops, since those lines should start with closing brace.
04161   #
04162   # We also check "if" blocks here, since an empty conditional block
04163   # is likely an error.
04164   line = clean_lines.elided[linenum]
04165   matched = Match(r'\s*(for|while|if)\s*\(', line)
04166   if matched:
04167     # Find the end of the conditional expression
04168     (end_line, end_linenum, end_pos) = CloseExpression(
04169         clean_lines, linenum, line.find('('))
04170 
04171     # Output warning if what follows the condition expression is a semicolon.
04172     # No warning for all other cases, including whitespace or newline, since we
04173     # have a separate check for semicolons preceded by whitespace.
04174     if end_pos >= 0 and Match(r';', end_line[end_pos:]):
04175       if matched.group(1) == 'if':
04176         error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
04177               'Empty conditional bodies should use {}')
04178       else:
04179         error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
04180               'Empty loop bodies should use {} or continue')
04181 
04182 
04183 def FindCheckMacro(line):
04184   """Find a replaceable CHECK-like macro.
04185 
04186   Args:
04187     line: line to search on.
04188   Returns:
04189     (macro name, start position), or (None, -1) if no replaceable
04190     macro is found.
04191   """
04192   for macro in _CHECK_MACROS:
04193     i = line.find(macro)
04194     if i >= 0:
04195       # Find opening parenthesis.  Do a regular expression match here
04196       # to make sure that we are matching the expected CHECK macro, as
04197       # opposed to some other macro that happens to contain the CHECK
04198       # substring.
04199       matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
04200       if not matched:
04201         continue
04202       return (macro, len(matched.group(1)))
04203   return (None, -1)
04204 
04205 
04206 def CheckCheck(filename, clean_lines, linenum, error):
04207   """Checks the use of CHECK and EXPECT macros.
04208 
04209   Args:
04210     filename: The name of the current file.
04211     clean_lines: A CleansedLines instance containing the file.
04212     linenum: The number of the line to check.
04213     error: The function to call with any errors found.
04214   """
04215 
04216   # Decide the set of replacement macros that should be suggested
04217   lines = clean_lines.elided
04218   (check_macro, start_pos) = FindCheckMacro(lines[linenum])
04219   if not check_macro:
04220     return
04221 
04222   # Find end of the boolean expression by matching parentheses
04223   (last_line, end_line, end_pos) = CloseExpression(
04224       clean_lines, linenum, start_pos)
04225   if end_pos < 0:
04226     return
04227 
04228   # If the check macro is followed by something other than a
04229   # semicolon, assume users will log their own custom error messages
04230   # and don't suggest any replacements.
04231   if not Match(r'\s*;', last_line[end_pos:]):
04232     return
04233 
04234   if linenum == end_line:
04235     expression = lines[linenum][start_pos + 1:end_pos - 1]
04236   else:
04237     expression = lines[linenum][start_pos + 1:]
04238     for i in xrange(linenum + 1, end_line):
04239       expression += lines[i]
04240     expression += last_line[0:end_pos - 1]
04241 
04242   # Parse expression so that we can take parentheses into account.
04243   # This avoids false positives for inputs like "CHECK((a < 4) == b)",
04244   # which is not replaceable by CHECK_LE.
04245   lhs = ''
04246   rhs = ''
04247   operator = None
04248   while expression:
04249     matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
04250                     r'==|!=|>=|>|<=|<|\()(.*)$', expression)
04251     if matched:
04252       token = matched.group(1)
04253       if token == '(':
04254         # Parenthesized operand
04255         expression = matched.group(2)
04256         (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
04257         if end < 0:
04258           return  # Unmatched parenthesis
04259         lhs += '(' + expression[0:end]
04260         expression = expression[end:]
04261       elif token in ('&&', '||'):
04262         # Logical and/or operators.  This means the expression
04263         # contains more than one term, for example:
04264         #   CHECK(42 < a && a < b);
04265         #
04266         # These are not replaceable with CHECK_LE, so bail out early.
04267         return
04268       elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
04269         # Non-relational operator
04270         lhs += token
04271         expression = matched.group(2)
04272       else:
04273         # Relational operator
04274         operator = token
04275         rhs = matched.group(2)
04276         break
04277     else:
04278       # Unparenthesized operand.  Instead of appending to lhs one character
04279       # at a time, we do another regular expression match to consume several
04280       # characters at once if possible.  Trivial benchmark shows that this
04281       # is more efficient when the operands are longer than a single
04282       # character, which is generally the case.
04283       matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
04284       if not matched:
04285         matched = Match(r'^(\s*\S)(.*)$', expression)
04286         if not matched:
04287           break
04288       lhs += matched.group(1)
04289       expression = matched.group(2)
04290 
04291   # Only apply checks if we got all parts of the boolean expression
04292   if not (lhs and operator and rhs):
04293     return
04294 
04295   # Check that rhs do not contain logical operators.  We already know
04296   # that lhs is fine since the loop above parses out && and ||.
04297   if rhs.find('&&') > -1 or rhs.find('||') > -1:
04298     return
04299 
04300   # At least one of the operands must be a constant literal.  This is
04301   # to avoid suggesting replacements for unprintable things like
04302   # CHECK(variable != iterator)
04303   #
04304   # The following pattern matches decimal, hex integers, strings, and
04305   # characters (in that order).
04306   lhs = lhs.strip()
04307   rhs = rhs.strip()
04308   match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
04309   if Match(match_constant, lhs) or Match(match_constant, rhs):
04310     # Note: since we know both lhs and rhs, we can provide a more
04311     # descriptive error message like:
04312     #   Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
04313     # Instead of:
04314     #   Consider using CHECK_EQ instead of CHECK(a == b)
04315     #
04316     # We are still keeping the less descriptive message because if lhs
04317     # or rhs gets long, the error message might become unreadable.
04318     error(filename, linenum, 'readability/check', 2,
04319           'Consider using %s instead of %s(a %s b)' % (
04320               _CHECK_REPLACEMENT[check_macro][operator],
04321               check_macro, operator))
04322 
04323 
04324 def CheckAltTokens(filename, clean_lines, linenum, error):
04325   """Check alternative keywords being used in boolean expressions.
04326 
04327   Args:
04328     filename: The name of the current file.
04329     clean_lines: A CleansedLines instance containing the file.
04330     linenum: The number of the line to check.
04331     error: The function to call with any errors found.
04332   """
04333   line = clean_lines.elided[linenum]
04334 
04335   # Avoid preprocessor lines
04336   if Match(r'^\s*#', line):
04337     return
04338 
04339   # Last ditch effort to avoid multi-line comments.  This will not help
04340   # if the comment started before the current line or ended after the
04341   # current line, but it catches most of the false positives.  At least,
04342   # it provides a way to workaround this warning for people who use
04343   # multi-line comments in preprocessor macros.
04344   #
04345   # TODO(unknown): remove this once cpplint has better support for
04346   # multi-line comments.
04347   if line.find('/*') >= 0 or line.find('*/') >= 0:
04348     return
04349 
04350   for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
04351     error(filename, linenum, 'readability/alt_tokens', 2,
04352           'Use operator %s instead of %s' % (
04353               _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
04354 
04355 
04356 def GetLineWidth(line):
04357   """Determines the width of the line in column positions.
04358 
04359   Args:
04360     line: A string, which may be a Unicode string.
04361 
04362   Returns:
04363     The width of the line in column positions, accounting for Unicode
04364     combining characters and wide characters.
04365   """
04366   if isinstance(line, unicode):
04367     width = 0
04368     for uc in unicodedata.normalize('NFC', line):
04369       if unicodedata.east_asian_width(uc) in ('W', 'F'):
04370         width += 2
04371       elif not unicodedata.combining(uc):
04372         width += 1
04373     return width
04374   else:
04375     return len(line)
04376 
04377 
04378 def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
04379                error):
04380   """Checks rules from the 'C++ style rules' section of cppguide.html.
04381 
04382   Most of these rules are hard to test (naming, comment style), but we
04383   do what we can.  In particular we check for 2-space indents, line lengths,
04384   tab usage, spaces inside code, etc.
04385 
04386   Args:
04387     filename: The name of the current file.
04388     clean_lines: A CleansedLines instance containing the file.
04389     linenum: The number of the line to check.
04390     file_extension: The extension (without the dot) of the filename.
04391     nesting_state: A NestingState instance which maintains information about
04392                    the current stack of nested blocks being parsed.
04393     error: The function to call with any errors found.
04394   """
04395 
04396   # Don't use "elided" lines here, otherwise we can't check commented lines.
04397   # Don't want to use "raw" either, because we don't want to check inside C++11
04398   # raw strings,
04399   raw_lines = clean_lines.lines_without_raw_strings
04400   line = raw_lines[linenum]
04401 
04402   if line.find('\t') != -1:
04403     error(filename, linenum, 'whitespace/tab', 1,
04404           'Tab found; better to use spaces')
04405 
04406   # One or three blank spaces at the beginning of the line is weird; it's
04407   # hard to reconcile that with 2-space indents.
04408   # NOTE: here are the conditions rob pike used for his tests.  Mine aren't
04409   # as sophisticated, but it may be worth becoming so:  RLENGTH==initial_spaces
04410   # if(RLENGTH > 20) complain = 0;
04411   # if(match($0, " +(error|private|public|protected):")) complain = 0;
04412   # if(match(prev, "&& *$")) complain = 0;
04413   # if(match(prev, "\\|\\| *$")) complain = 0;
04414   # if(match(prev, "[\",=><] *$")) complain = 0;
04415   # if(match($0, " <<")) complain = 0;
04416   # if(match(prev, " +for \\(")) complain = 0;
04417   # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
04418   scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
04419   classinfo = nesting_state.InnermostClass()
04420   initial_spaces = 0
04421   cleansed_line = clean_lines.elided[linenum]
04422   while initial_spaces < len(line) and line[initial_spaces] == ' ':
04423     initial_spaces += 1
04424   if line and line[-1].isspace():
04425     error(filename, linenum, 'whitespace/end_of_line', 4,
04426           'Line ends in whitespace.  Consider deleting these extra spaces.')
04427   # There are certain situations we allow one space, notably for
04428   # section labels, and also lines containing multi-line raw strings.
04429   elif ((initial_spaces == 1 or initial_spaces == 3) and
04430         not Match(scope_or_label_pattern, cleansed_line) and
04431         not (clean_lines.raw_lines[linenum] != line and
04432              Match(r'^\s*""', line))):
04433     error(filename, linenum, 'whitespace/indent', 3,
04434           'Weird number of spaces at line-start.  '
04435           'Are you using a 2-space indent?')
04436 
04437   # Check if the line is a header guard.
04438   is_header_guard = False
04439   if file_extension == 'h':
04440     cppvar = GetHeaderGuardCPPVariable(filename)
04441     if (line.startswith('#ifndef %s' % cppvar) or
04442         line.startswith('#define %s' % cppvar) or
04443         line.startswith('#endif  // %s' % cppvar)):
04444       is_header_guard = True
04445   # #include lines and header guards can be long, since there's no clean way to
04446   # split them.
04447   #
04448   # URLs can be long too.  It's possible to split these, but it makes them
04449   # harder to cut&paste.
04450   #
04451   # The "$Id:...$" comment may also get very long without it being the
04452   # developers fault.
04453   if (not line.startswith('#include') and not is_header_guard and
04454       not Match(r'^\s*//.*http(s?)://\S*$', line) and
04455       not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
04456     line_width = GetLineWidth(line)
04457     extended_length = int((_line_length * 1.25))
04458     if line_width > extended_length:
04459       error(filename, linenum, 'whitespace/line_length', 4,
04460             'Lines should very rarely be longer than %i characters' %
04461             extended_length)
04462     elif line_width > _line_length:
04463       error(filename, linenum, 'whitespace/line_length', 2,
04464             'Lines should be <= %i characters long' % _line_length)
04465 
04466   if (cleansed_line.count(';') > 1 and
04467       # for loops are allowed two ;'s (and may run over two lines).
04468       cleansed_line.find('for') == -1 and
04469       (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
04470        GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
04471       # It's ok to have many commands in a switch case that fits in 1 line
04472       not ((cleansed_line.find('case ') != -1 or
04473             cleansed_line.find('default:') != -1) and
04474            cleansed_line.find('break;') != -1)):
04475     error(filename, linenum, 'whitespace/newline', 0,
04476           'More than one command on the same line')
04477 
04478   # Some more style checks
04479   CheckBraces(filename, clean_lines, linenum, error)
04480   CheckTrailingSemicolon(filename, clean_lines, linenum, error)
04481   CheckEmptyBlockBody(filename, clean_lines, linenum, error)
04482   CheckAccess(filename, clean_lines, linenum, nesting_state, error)
04483   CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
04484   CheckOperatorSpacing(filename, clean_lines, linenum, error)
04485   CheckParenthesisSpacing(filename, clean_lines, linenum, error)
04486   CheckCommaSpacing(filename, clean_lines, linenum, error)
04487   CheckBracesSpacing(filename, clean_lines, linenum, error)
04488   CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
04489   CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
04490   CheckCheck(filename, clean_lines, linenum, error)
04491   CheckAltTokens(filename, clean_lines, linenum, error)
04492   classinfo = nesting_state.InnermostClass()
04493   if classinfo:
04494     CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
04495 
04496 
04497 _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
04498 # Matches the first component of a filename delimited by -s and _s. That is:
04499 #  _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
04500 #  _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
04501 #  _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
04502 #  _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
04503 _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
04504 
04505 
04506 def _DropCommonSuffixes(filename):
04507   """Drops common suffixes like _test.cc or -inl.h from filename.
04508 
04509   For example:
04510     >>> _DropCommonSuffixes('foo/foo-inl.h')
04511     'foo/foo'
04512     >>> _DropCommonSuffixes('foo/bar/foo.cc')
04513     'foo/bar/foo'
04514     >>> _DropCommonSuffixes('foo/foo_internal.h')
04515     'foo/foo'
04516     >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
04517     'foo/foo_unusualinternal'
04518 
04519   Args:
04520     filename: The input filename.
04521 
04522   Returns:
04523     The filename with the common suffix removed.
04524   """
04525   for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
04526                  'inl.h', 'impl.h', 'internal.h'):
04527     if (filename.endswith(suffix) and len(filename) > len(suffix) and
04528         filename[-len(suffix) - 1] in ('-', '_')):
04529       return filename[:-len(suffix) - 1]
04530   return os.path.splitext(filename)[0]
04531 
04532 
04533 def _IsTestFilename(filename):
04534   """Determines if the given filename has a suffix that identifies it as a test.
04535 
04536   Args:
04537     filename: The input filename.
04538 
04539   Returns:
04540     True if 'filename' looks like a test, False otherwise.
04541   """
04542   if (filename.endswith('_test.cc') or
04543       filename.endswith('_unittest.cc') or
04544       filename.endswith('_regtest.cc')):
04545     return True
04546   else:
04547     return False
04548 
04549 
04550 def _ClassifyInclude(fileinfo, include, is_system):
04551   """Figures out what kind of header 'include' is.
04552 
04553   Args:
04554     fileinfo: The current file cpplint is running over. A FileInfo instance.
04555     include: The path to a #included file.
04556     is_system: True if the #include used <> rather than "".
04557 
04558   Returns:
04559     One of the _XXX_HEADER constants.
04560 
04561   For example:
04562     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
04563     _C_SYS_HEADER
04564     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
04565     _CPP_SYS_HEADER
04566     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
04567     _LIKELY_MY_HEADER
04568     >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
04569     ...                  'bar/foo_other_ext.h', False)
04570     _POSSIBLE_MY_HEADER
04571     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
04572     _OTHER_HEADER
04573   """
04574   # This is a list of all standard c++ header files, except
04575   # those already checked for above.
04576   is_cpp_h = include in _CPP_HEADERS
04577 
04578   if is_system:
04579     if is_cpp_h:
04580       return _CPP_SYS_HEADER
04581     else:
04582       return _C_SYS_HEADER
04583 
04584   # If the target file and the include we're checking share a
04585   # basename when we drop common extensions, and the include
04586   # lives in . , then it's likely to be owned by the target file.
04587   target_dir, target_base = (
04588       os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
04589   include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
04590   if target_base == include_base and (
04591       include_dir == target_dir or
04592       include_dir == os.path.normpath(target_dir + '/../public')):
04593     return _LIKELY_MY_HEADER
04594 
04595   # If the target and include share some initial basename
04596   # component, it's possible the target is implementing the
04597   # include, so it's allowed to be first, but we'll never
04598   # complain if it's not there.
04599   target_first_component = _RE_FIRST_COMPONENT.match(target_base)
04600   include_first_component = _RE_FIRST_COMPONENT.match(include_base)
04601   if (target_first_component and include_first_component and
04602       target_first_component.group(0) ==
04603       include_first_component.group(0)):
04604     return _POSSIBLE_MY_HEADER
04605 
04606   return _OTHER_HEADER
04607 
04608 
04609 
04610 def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
04611   """Check rules that are applicable to #include lines.
04612 
04613   Strings on #include lines are NOT removed from elided line, to make
04614   certain tasks easier. However, to prevent false positives, checks
04615   applicable to #include lines in CheckLanguage must be put here.
04616 
04617   Args:
04618     filename: The name of the current file.
04619     clean_lines: A CleansedLines instance containing the file.
04620     linenum: The number of the line to check.
04621     include_state: An _IncludeState instance in which the headers are inserted.
04622     error: The function to call with any errors found.
04623   """
04624   fileinfo = FileInfo(filename)
04625   line = clean_lines.lines[linenum]
04626 
04627   # "include" should use the new style "foo/bar.h" instead of just "bar.h"
04628   # Only do this check if the included header follows google naming
04629   # conventions.  If not, assume that it's a 3rd party API that
04630   # requires special include conventions.
04631   #
04632   # We also make an exception for Lua headers, which follow google
04633   # naming convention but not the include convention.
04634   match = Match(r'#include\s*"([^/]+\.h)"', line)
04635   if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
04636     error(filename, linenum, 'build/include', 4,
04637           'Include the directory when naming .h files')
04638 
04639   # we shouldn't include a file more than once. actually, there are a
04640   # handful of instances where doing so is okay, but in general it's
04641   # not.
04642   match = _RE_PATTERN_INCLUDE.search(line)
04643   if match:
04644     include = match.group(2)
04645     is_system = (match.group(1) == '<')
04646     duplicate_line = include_state.FindHeader(include)
04647     if duplicate_line >= 0:
04648       error(filename, linenum, 'build/include', 4,
04649             '"%s" already included at %s:%s' %
04650             (include, filename, duplicate_line))
04651     elif (include.endswith('.cc') and
04652           os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
04653       error(filename, linenum, 'build/include', 4,
04654             'Do not include .cc files from other packages')
04655     elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
04656       include_state.include_list[-1].append((include, linenum))
04657 
04658       # We want to ensure that headers appear in the right order:
04659       # 1) for foo.cc, foo.h  (preferred location)
04660       # 2) c system files
04661       # 3) cpp system files
04662       # 4) for foo.cc, foo.h  (deprecated location)
04663       # 5) other google headers
04664       #
04665       # We classify each include statement as one of those 5 types
04666       # using a number of techniques. The include_state object keeps
04667       # track of the highest type seen, and complains if we see a
04668       # lower type after that.
04669       error_message = include_state.CheckNextIncludeOrder(
04670           _ClassifyInclude(fileinfo, include, is_system))
04671       if error_message:
04672         error(filename, linenum, 'build/include_order', 4,
04673               '%s. Should be: %s.h, c system, c++ system, other.' %
04674               (error_message, fileinfo.BaseName()))
04675       canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
04676       if not include_state.IsInAlphabeticalOrder(
04677           clean_lines, linenum, canonical_include):
04678         error(filename, linenum, 'build/include_alpha', 4,
04679               'Include "%s" not in alphabetical order' % include)
04680       include_state.SetLastHeader(canonical_include)
04681 
04682 
04683 
04684 def _GetTextInside(text, start_pattern):
04685   r"""Retrieves all the text between matching open and close parentheses.
04686 
04687   Given a string of lines and a regular expression string, retrieve all the text
04688   following the expression and between opening punctuation symbols like
04689   (, [, or {, and the matching close-punctuation symbol. This properly nested
04690   occurrences of the punctuations, so for the text like
04691     printf(a(), b(c()));
04692   a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
04693   start_pattern must match string having an open punctuation symbol at the end.
04694 
04695   Args:
04696     text: The lines to extract text. Its comments and strings must be elided.
04697            It can be single line and can span multiple lines.
04698     start_pattern: The regexp string indicating where to start extracting
04699                    the text.
04700   Returns:
04701     The extracted text.
04702     None if either the opening string or ending punctuation could not be found.
04703   """
04704   # TODO(unknown): Audit cpplint.py to see what places could be profitably
04705   # rewritten to use _GetTextInside (and use inferior regexp matching today).
04706 
04707   # Give opening punctuations to get the matching close-punctuations.
04708   matching_punctuation = {'(': ')', '{': '}', '[': ']'}
04709   closing_punctuation = set(matching_punctuation.itervalues())
04710 
04711   # Find the position to start extracting text.
04712   match = re.search(start_pattern, text, re.M)
04713   if not match:  # start_pattern not found in text.
04714     return None
04715   start_position = match.end(0)
04716 
04717   assert start_position > 0, (
04718       'start_pattern must ends with an opening punctuation.')
04719   assert text[start_position - 1] in matching_punctuation, (
04720       'start_pattern must ends with an opening punctuation.')
04721   # Stack of closing punctuations we expect to have in text after position.
04722   punctuation_stack = [matching_punctuation[text[start_position - 1]]]
04723   position = start_position
04724   while punctuation_stack and position < len(text):
04725     if text[position] == punctuation_stack[-1]:
04726       punctuation_stack.pop()
04727     elif text[position] in closing_punctuation:
04728       # A closing punctuation without matching opening punctuations.
04729       return None
04730     elif text[position] in matching_punctuation:
04731       punctuation_stack.append(matching_punctuation[text[position]])
04732     position += 1
04733   if punctuation_stack:
04734     # Opening punctuations left without matching close-punctuations.
04735     return None
04736   # punctuations match.
04737   return text[start_position:position - 1]
04738 
04739 
04740 # Patterns for matching call-by-reference parameters.
04741 #
04742 # Supports nested templates up to 2 levels deep using this messy pattern:
04743 #   < (?: < (?: < [^<>]*
04744 #               >
04745 #           |   [^<>] )*
04746 #         >
04747 #     |   [^<>] )*
04748 #   >
04749 _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*'  # =~ [[:alpha:]][[:alnum:]]*
04750 _RE_PATTERN_TYPE = (
04751     r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
04752     r'(?:\w|'
04753     r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
04754     r'::)+')
04755 # A call-by-reference parameter ends with '& identifier'.
04756 _RE_PATTERN_REF_PARAM = re.compile(
04757     r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
04758     r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
04759 # A call-by-const-reference parameter either ends with 'const& identifier'
04760 # or looks like 'const type& identifier' when 'type' is atomic.
04761 _RE_PATTERN_CONST_REF_PARAM = (
04762     r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
04763     r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
04764 
04765 
04766 def CheckLanguage(filename, clean_lines, linenum, file_extension,
04767                   include_state, nesting_state, error):
04768   """Checks rules from the 'C++ language rules' section of cppguide.html.
04769 
04770   Some of these rules are hard to test (function overloading, using
04771   uint32 inappropriately), but we do the best we can.
04772 
04773   Args:
04774     filename: The name of the current file.
04775     clean_lines: A CleansedLines instance containing the file.
04776     linenum: The number of the line to check.
04777     file_extension: The extension (without the dot) of the filename.
04778     include_state: An _IncludeState instance in which the headers are inserted.
04779     nesting_state: A NestingState instance which maintains information about
04780                    the current stack of nested blocks being parsed.
04781     error: The function to call with any errors found.
04782   """
04783   # If the line is empty or consists of entirely a comment, no need to
04784   # check it.
04785   line = clean_lines.elided[linenum]
04786   if not line:
04787     return
04788 
04789   match = _RE_PATTERN_INCLUDE.search(line)
04790   if match:
04791     CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
04792     return
04793 
04794   # Reset include state across preprocessor directives.  This is meant
04795   # to silence warnings for conditional includes.
04796   match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
04797   if match:
04798     include_state.ResetSection(match.group(1))
04799 
04800   # Make Windows paths like Unix.
04801   fullname = os.path.abspath(filename).replace('\\', '/')
04802   
04803   # Perform other checks now that we are sure that this is not an include line
04804   CheckCasts(filename, clean_lines, linenum, error)
04805   CheckGlobalStatic(filename, clean_lines, linenum, error)
04806   CheckPrintf(filename, clean_lines, linenum, error)
04807 
04808   if file_extension == 'h':
04809     # TODO(unknown): check that 1-arg constructors are explicit.
04810     #                How to tell it's a constructor?
04811     #                (handled in CheckForNonStandardConstructs for now)
04812     # TODO(unknown): check that classes declare or disable copy/assign
04813     #                (level 1 error)
04814     pass
04815 
04816   # Check if people are using the verboten C basic types.  The only exception
04817   # we regularly allow is "unsigned short port" for port.
04818   if Search(r'\bshort port\b', line):
04819     if not Search(r'\bunsigned short port\b', line):
04820       error(filename, linenum, 'runtime/int', 4,
04821             'Use "unsigned short" for ports, not "short"')
04822   else:
04823     match = Search(r'\b(short|long(?! +double)|long long)\b', line)
04824     if match:
04825       error(filename, linenum, 'runtime/int', 4,
04826             'Use int16/int64/etc, rather than the C type %s' % match.group(1))
04827 
04828   # Check if some verboten operator overloading is going on
04829   # TODO(unknown): catch out-of-line unary operator&:
04830   #   class X {};
04831   #   int operator&(const X& x) { return 42; }  // unary operator&
04832   # The trick is it's hard to tell apart from binary operator&:
04833   #   class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
04834   if Search(r'\boperator\s*&\s*\(\s*\)', line):
04835     error(filename, linenum, 'runtime/operator', 4,
04836           'Unary operator& is dangerous.  Do not use it.')
04837 
04838   # Check for suspicious usage of "if" like
04839   # } if (a == b) {
04840   if Search(r'\}\s*if\s*\(', line):
04841     error(filename, linenum, 'readability/braces', 4,
04842           'Did you mean "else if"? If not, start a new line for "if".')
04843 
04844   # Check for potential format string bugs like printf(foo).
04845   # We constrain the pattern not to pick things like DocidForPrintf(foo).
04846   # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
04847   # TODO(unknown): Catch the following case. Need to change the calling
04848   # convention of the whole function to process multiple line to handle it.
04849   #   printf(
04850   #       boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
04851   printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
04852   if printf_args:
04853     match = Match(r'([\w.\->()]+)$', printf_args)
04854     if match and match.group(1) != '__VA_ARGS__':
04855       function_name = re.search(r'\b((?:string)?printf)\s*\(',
04856                                 line, re.I).group(1)
04857       error(filename, linenum, 'runtime/printf', 4,
04858             'Potential format string bug. Do %s("%%s", %s) instead.'
04859             % (function_name, match.group(1)))
04860 
04861   # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
04862   match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
04863   if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
04864     error(filename, linenum, 'runtime/memset', 4,
04865           'Did you mean "memset(%s, 0, %s)"?'
04866           % (match.group(1), match.group(2)))
04867 
04868   if Search(r'\busing namespace\b', line):
04869     error(filename, linenum, 'build/namespaces', 5,
04870           'Do not use namespace using-directives.  '
04871           'Use using-declarations instead.')
04872 
04873   # Detect variable-length arrays.
04874   match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
04875   if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
04876       match.group(3).find(']') == -1):
04877     # Split the size using space and arithmetic operators as delimiters.
04878     # If any of the resulting tokens are not compile time constants then
04879     # report the error.
04880     tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
04881     is_const = True
04882     skip_next = False
04883     for tok in tokens:
04884       if skip_next:
04885         skip_next = False
04886         continue
04887 
04888       if Search(r'sizeof\(.+\)', tok): continue
04889       if Search(r'arraysize\(\w+\)', tok): continue
04890 
04891       tok = tok.lstrip('(')
04892       tok = tok.rstrip(')')
04893       if not tok: continue
04894       if Match(r'\d+', tok): continue
04895       if Match(r'0[xX][0-9a-fA-F]+', tok): continue
04896       if Match(r'k[A-Z0-9]\w*', tok): continue
04897       if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
04898       if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
04899       # A catch all for tricky sizeof cases, including 'sizeof expression',
04900       # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
04901       # requires skipping the next token because we split on ' ' and '*'.
04902       if tok.startswith('sizeof'):
04903         skip_next = True
04904         continue
04905       is_const = False
04906       break
04907     if not is_const:
04908       error(filename, linenum, 'runtime/arrays', 1,
04909             'Do not use variable-length arrays.  Use an appropriately named '
04910             "('k' followed by CamelCase) compile-time constant for the size.")
04911 
04912   # Check for use of unnamed namespaces in header files.  Registration
04913   # macros are typically OK, so we allow use of "namespace {" on lines
04914   # that end with backslashes.
04915   if (file_extension == 'h'
04916       and Search(r'\bnamespace\s*{', line)
04917       and line[-1] != '\\'):
04918     error(filename, linenum, 'build/namespaces', 4,
04919           'Do not use unnamed namespaces in header files.  See '
04920           'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
04921           ' for more information.')
04922 
04923 
04924 def CheckGlobalStatic(filename, clean_lines, linenum, error):
04925   """Check for unsafe global or static objects.
04926 
04927   Args:
04928     filename: The name of the current file.
04929     clean_lines: A CleansedLines instance containing the file.
04930     linenum: The number of the line to check.
04931     error: The function to call with any errors found.
04932   """
04933   line = clean_lines.elided[linenum]
04934 
04935   # Match two lines at a time to support multiline declarations
04936   if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
04937     line += clean_lines.elided[linenum + 1].strip()
04938 
04939   # Check for people declaring static/global STL strings at the top level.
04940   # This is dangerous because the C++ language does not guarantee that
04941   # globals with constructors are initialized before the first access.
04942   match = Match(
04943       r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
04944       line)
04945 
04946   # Remove false positives:
04947   # - String pointers (as opposed to values).
04948   #    string *pointer
04949   #    const string *pointer
04950   #    string const *pointer
04951   #    string *const pointer
04952   #
04953   # - Functions and template specializations.
04954   #    string Function<Type>(...
04955   #    string Class<Type>::Method(...
04956   #
04957   # - Operators.  These are matched separately because operator names
04958   #   cross non-word boundaries, and trying to match both operators
04959   #   and functions at the same time would decrease accuracy of
04960   #   matching identifiers.
04961   #    string Class::operator*()
04962   if (match and
04963       not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
04964       not Search(r'\boperator\W', line) and
04965       not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
04966     error(filename, linenum, 'runtime/string', 4,
04967           'For a static/global string constant, use a C style string instead: '
04968           '"%schar %s[]".' %
04969           (match.group(1), match.group(2)))
04970 
04971   if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
04972     error(filename, linenum, 'runtime/init', 4,
04973           'You seem to be initializing a member variable with itself.')
04974 
04975 
04976 def CheckPrintf(filename, clean_lines, linenum, error):
04977   """Check for printf related issues.
04978 
04979   Args:
04980     filename: The name of the current file.
04981     clean_lines: A CleansedLines instance containing the file.
04982     linenum: The number of the line to check.
04983     error: The function to call with any errors found.
04984   """
04985   line = clean_lines.elided[linenum]
04986 
04987   # When snprintf is used, the second argument shouldn't be a literal.
04988   match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
04989   if match and match.group(2) != '0':
04990     # If 2nd arg is zero, snprintf is used to calculate size.
04991     error(filename, linenum, 'runtime/printf', 3,
04992           'If you can, use sizeof(%s) instead of %s as the 2nd arg '
04993           'to snprintf.' % (match.group(1), match.group(2)))
04994 
04995   # Check if some verboten C functions are being used.
04996   if Search(r'\bsprintf\s*\(', line):
04997     error(filename, linenum, 'runtime/printf', 5,
04998           'Never use sprintf. Use snprintf instead.')
04999   match = Search(r'\b(strcpy|strcat)\s*\(', line)
05000   if match:
05001     error(filename, linenum, 'runtime/printf', 4,
05002           'Almost always, snprintf is better than %s' % match.group(1))
05003 
05004 
05005 def IsDerivedFunction(clean_lines, linenum):
05006   """Check if current line contains an inherited function.
05007 
05008   Args:
05009     clean_lines: A CleansedLines instance containing the file.
05010     linenum: The number of the line to check.
05011   Returns:
05012     True if current line contains a function with "override"
05013     virt-specifier.
05014   """
05015   # Scan back a few lines for start of current function
05016   for i in xrange(linenum, max(-1, linenum - 10), -1):
05017     match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
05018     if match:
05019       # Look for "override" after the matching closing parenthesis
05020       line, _, closing_paren = CloseExpression(
05021           clean_lines, i, len(match.group(1)))
05022       return (closing_paren >= 0 and
05023               Search(r'\boverride\b', line[closing_paren:]))
05024   return False
05025 
05026 
05027 def IsOutOfLineMethodDefinition(clean_lines, linenum):
05028   """Check if current line contains an out-of-line method definition.
05029 
05030   Args:
05031     clean_lines: A CleansedLines instance containing the file.
05032     linenum: The number of the line to check.
05033   Returns:
05034     True if current line contains an out-of-line method definition.
05035   """
05036   # Scan back a few lines for start of current function
05037   for i in xrange(linenum, max(-1, linenum - 10), -1):
05038     if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
05039       return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
05040   return False
05041 
05042 
05043 def IsInitializerList(clean_lines, linenum):
05044   """Check if current line is inside constructor initializer list.
05045 
05046   Args:
05047     clean_lines: A CleansedLines instance containing the file.
05048     linenum: The number of the line to check.
05049   Returns:
05050     True if current line appears to be inside constructor initializer
05051     list, False otherwise.
05052   """
05053   for i in xrange(linenum, 1, -1):
05054     line = clean_lines.elided[i]
05055     if i == linenum:
05056       remove_function_body = Match(r'^(.*)\{\s*$', line)
05057       if remove_function_body:
05058         line = remove_function_body.group(1)
05059 
05060     if Search(r'\s:\s*\w+[({]', line):
05061       # A lone colon tend to indicate the start of a constructor
05062       # initializer list.  It could also be a ternary operator, which
05063       # also tend to appear in constructor initializer lists as
05064       # opposed to parameter lists.
05065       return True
05066     if Search(r'\}\s*,\s*$', line):
05067       # A closing brace followed by a comma is probably the end of a
05068       # brace-initialized member in constructor initializer list.
05069       return True
05070     if Search(r'[{};]\s*$', line):
05071       # Found one of the following:
05072       # - A closing brace or semicolon, probably the end of the previous
05073       #   function.
05074       # - An opening brace, probably the start of current class or namespace.
05075       #
05076       # Current line is probably not inside an initializer list since
05077       # we saw one of those things without seeing the starting colon.
05078       return False
05079 
05080   # Got to the beginning of the file without seeing the start of
05081   # constructor initializer list.
05082   return False
05083 
05084 
05085 def CheckForNonConstReference(filename, clean_lines, linenum,
05086                               nesting_state, error):
05087   """Check for non-const references.
05088 
05089   Separate from CheckLanguage since it scans backwards from current
05090   line, instead of scanning forward.
05091 
05092   Args:
05093     filename: The name of the current file.
05094     clean_lines: A CleansedLines instance containing the file.
05095     linenum: The number of the line to check.
05096     nesting_state: A NestingState instance which maintains information about
05097                    the current stack of nested blocks being parsed.
05098     error: The function to call with any errors found.
05099   """
05100   # Do nothing if there is no '&' on current line.
05101   line = clean_lines.elided[linenum]
05102   if '&' not in line:
05103     return
05104 
05105   # If a function is inherited, current function doesn't have much of
05106   # a choice, so any non-const references should not be blamed on
05107   # derived function.
05108   if IsDerivedFunction(clean_lines, linenum):
05109     return
05110 
05111   # Don't warn on out-of-line method definitions, as we would warn on the
05112   # in-line declaration, if it isn't marked with 'override'.
05113   if IsOutOfLineMethodDefinition(clean_lines, linenum):
05114     return
05115 
05116   # Long type names may be broken across multiple lines, usually in one
05117   # of these forms:
05118   #   LongType
05119   #       ::LongTypeContinued &identifier
05120   #   LongType::
05121   #       LongTypeContinued &identifier
05122   #   LongType<
05123   #       ...>::LongTypeContinued &identifier
05124   #
05125   # If we detected a type split across two lines, join the previous
05126   # line to current line so that we can match const references
05127   # accordingly.
05128   #
05129   # Note that this only scans back one line, since scanning back
05130   # arbitrary number of lines would be expensive.  If you have a type
05131   # that spans more than 2 lines, please use a typedef.
05132   if linenum > 1:
05133     previous = None
05134     if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
05135       # previous_line\n + ::current_line
05136       previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
05137                         clean_lines.elided[linenum - 1])
05138     elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
05139       # previous_line::\n + current_line
05140       previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
05141                         clean_lines.elided[linenum - 1])
05142     if previous:
05143       line = previous.group(1) + line.lstrip()
05144     else:
05145       # Check for templated parameter that is split across multiple lines
05146       endpos = line.rfind('>')
05147       if endpos > -1:
05148         (_, startline, startpos) = ReverseCloseExpression(
05149             clean_lines, linenum, endpos)
05150         if startpos > -1 and startline < linenum:
05151           # Found the matching < on an earlier line, collect all
05152           # pieces up to current line.
05153           line = ''
05154           for i in xrange(startline, linenum + 1):
05155             line += clean_lines.elided[i].strip()
05156 
05157   # Check for non-const references in function parameters.  A single '&' may
05158   # found in the following places:
05159   #   inside expression: binary & for bitwise AND
05160   #   inside expression: unary & for taking the address of something
05161   #   inside declarators: reference parameter
05162   # We will exclude the first two cases by checking that we are not inside a
05163   # function body, including one that was just introduced by a trailing '{'.
05164   # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
05165   if (nesting_state.previous_stack_top and
05166       not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
05167            isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
05168     # Not at toplevel, not within a class, and not within a namespace
05169     return
05170 
05171   # Avoid initializer lists.  We only need to scan back from the
05172   # current line for something that starts with ':'.
05173   #
05174   # We don't need to check the current line, since the '&' would
05175   # appear inside the second set of parentheses on the current line as
05176   # opposed to the first set.
05177   if linenum > 0:
05178     for i in xrange(linenum - 1, max(0, linenum - 10), -1):
05179       previous_line = clean_lines.elided[i]
05180       if not Search(r'[),]\s*$', previous_line):
05181         break
05182       if Match(r'^\s*:\s+\S', previous_line):
05183         return
05184 
05185   # Avoid preprocessors
05186   if Search(r'\\\s*$', line):
05187     return
05188 
05189   # Avoid constructor initializer lists
05190   if IsInitializerList(clean_lines, linenum):
05191     return
05192 
05193   # We allow non-const references in a few standard places, like functions
05194   # called "swap()" or iostream operators like "<<" or ">>".  Do not check
05195   # those function parameters.
05196   #
05197   # We also accept & in static_assert, which looks like a function but
05198   # it's actually a declaration expression.
05199   whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
05200                            r'operator\s*[<>][<>]|'
05201                            r'static_assert|COMPILE_ASSERT'
05202                            r')\s*\(')
05203   if Search(whitelisted_functions, line):
05204     return
05205   elif not Search(r'\S+\([^)]*$', line):
05206     # Don't see a whitelisted function on this line.  Actually we
05207     # didn't see any function name on this line, so this is likely a
05208     # multi-line parameter list.  Try a bit harder to catch this case.
05209     for i in xrange(2):
05210       if (linenum > i and
05211           Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
05212         return
05213 
05214   decls = ReplaceAll(r'{[^}]*}', ' ', line)  # exclude function body
05215   for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
05216     if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
05217       error(filename, linenum, 'runtime/references', 2,
05218             'Is this a non-const reference? '
05219             'If so, make const or use a pointer: ' +
05220             ReplaceAll(' *<', '<', parameter))
05221 
05222 
05223 def CheckCasts(filename, clean_lines, linenum, error):
05224   """Various cast related checks.
05225 
05226   Args:
05227     filename: The name of the current file.
05228     clean_lines: A CleansedLines instance containing the file.
05229     linenum: The number of the line to check.
05230     error: The function to call with any errors found.
05231   """
05232   line = clean_lines.elided[linenum]
05233 
05234   # Check to see if they're using an conversion function cast.
05235   # I just try to capture the most common basic types, though there are more.
05236   # Parameterless conversion functions, such as bool(), are allowed as they are
05237   # probably a member operator declaration or default constructor.
05238   match = Search(
05239       r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
05240       r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
05241       r'(\([^)].*)', line)
05242   expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
05243   if match and not expecting_function:
05244     matched_type = match.group(2)
05245 
05246     # matched_new_or_template is used to silence two false positives:
05247     # - New operators
05248     # - Template arguments with function types
05249     #
05250     # For template arguments, we match on types immediately following
05251     # an opening bracket without any spaces.  This is a fast way to
05252     # silence the common case where the function type is the first
05253     # template argument.  False negative with less-than comparison is
05254     # avoided because those operators are usually followed by a space.
05255     #
05256     #   function<double(double)>   // bracket + no space = false positive
05257     #   value < double(42)         // bracket + space = true positive
05258     matched_new_or_template = match.group(1)
05259 
05260     # Avoid arrays by looking for brackets that come after the closing
05261     # parenthesis.
05262     if Match(r'\([^()]+\)\s*\[', match.group(3)):
05263       return
05264 
05265     # Other things to ignore:
05266     # - Function pointers
05267     # - Casts to pointer types
05268     # - Placement new
05269     # - Alias declarations
05270     matched_funcptr = match.group(3)
05271     if (matched_new_or_template is None and
05272         not (matched_funcptr and
05273              (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
05274                     matched_funcptr) or
05275               matched_funcptr.startswith('(*)'))) and
05276         not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
05277         not Search(r'new\(\S+\)\s*' + matched_type, line)):
05278       error(filename, linenum, 'readability/casting', 4,
05279             'Using deprecated casting style.  '
05280             'Use static_cast<%s>(...) instead' %
05281             matched_type)
05282 
05283   if not expecting_function:
05284     CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
05285                     r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
05286 
05287   # This doesn't catch all cases. Consider (const char * const)"hello".
05288   #
05289   # (char *) "foo" should always be a const_cast (reinterpret_cast won't
05290   # compile).
05291   if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
05292                      r'\((char\s?\*+\s?)\)\s*"', error):
05293     pass
05294   else:
05295     # Check pointer casts for other than string constants
05296     CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
05297                     r'\((\w+\s?\*+\s?)\)', error)
05298 
05299   # In addition, we look for people taking the address of a cast.  This
05300   # is dangerous -- casts can assign to temporaries, so the pointer doesn't
05301   # point where you think.
05302   #
05303   # Some non-identifier character is required before the '&' for the
05304   # expression to be recognized as a cast.  These are casts:
05305   #   expression = &static_cast<int*>(temporary());
05306   #   function(&(int*)(temporary()));
05307   #
05308   # This is not a cast:
05309   #   reference_type&(int* function_param);
05310   match = Search(
05311       r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
05312       r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
05313   if match:
05314     # Try a better error message when the & is bound to something
05315     # dereferenced by the casted pointer, as opposed to the casted
05316     # pointer itself.
05317     parenthesis_error = False
05318     match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
05319     if match:
05320       _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
05321       if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
05322         _, y2, x2 = CloseExpression(clean_lines, y1, x1)
05323         if x2 >= 0:
05324           extended_line = clean_lines.elided[y2][x2:]
05325           if y2 < clean_lines.NumLines() - 1:
05326             extended_line += clean_lines.elided[y2 + 1]
05327           if Match(r'\s*(?:->|\[)', extended_line):
05328             parenthesis_error = True
05329 
05330     if parenthesis_error:
05331       error(filename, linenum, 'readability/casting', 4,
05332             ('Are you taking an address of something dereferenced '
05333              'from a cast?  Wrapping the dereferenced expression in '
05334              'parentheses will make the binding more obvious'))
05335     else:
05336       error(filename, linenum, 'runtime/casting', 4,
05337             ('Are you taking an address of a cast?  '
05338              'This is dangerous: could be a temp var.  '
05339              'Take the address before doing the cast, rather than after'))
05340 
05341 
05342 def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
05343   """Checks for a C-style cast by looking for the pattern.
05344 
05345   Args:
05346     filename: The name of the current file.
05347     clean_lines: A CleansedLines instance containing the file.
05348     linenum: The number of the line to check.
05349     cast_type: The string for the C++ cast to recommend.  This is either
05350       reinterpret_cast, static_cast, or const_cast, depending.
05351     pattern: The regular expression used to find C-style casts.
05352     error: The function to call with any errors found.
05353 
05354   Returns:
05355     True if an error was emitted.
05356     False otherwise.
05357   """
05358   line = clean_lines.elided[linenum]
05359   match = Search(pattern, line)
05360   if not match:
05361     return False
05362 
05363   # Exclude lines with keywords that tend to look like casts
05364   context = line[0:match.start(1) - 1]
05365   if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
05366     return False
05367 
05368   # Try expanding current context to see if we one level of
05369   # parentheses inside a macro.
05370   if linenum > 0:
05371     for i in xrange(linenum - 1, max(0, linenum - 5), -1):
05372       context = clean_lines.elided[i] + context
05373   if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
05374     return False
05375 
05376   # operator++(int) and operator--(int)
05377   if context.endswith(' operator++') or context.endswith(' operator--'):
05378     return False
05379 
05380   # A single unnamed argument for a function tends to look like old
05381   # style cast.  If we see those, don't issue warnings for deprecated
05382   # casts, instead issue warnings for unnamed arguments where
05383   # appropriate.
05384   #
05385   # These are things that we want warnings for, since the style guide
05386   # explicitly require all parameters to be named:
05387   #   Function(int);
05388   #   Function(int) {
05389   #   ConstMember(int) const;
05390   #   ConstMember(int) const {
05391   #   ExceptionMember(int) throw (...);
05392   #   ExceptionMember(int) throw (...) {
05393   #   PureVirtual(int) = 0;
05394   #   [](int) -> bool {
05395   #
05396   # These are functions of some sort, where the compiler would be fine
05397   # if they had named parameters, but people often omit those
05398   # identifiers to reduce clutter:
05399   #   (FunctionPointer)(int);
05400   #   (FunctionPointer)(int) = value;
05401   #   Function((function_pointer_arg)(int))
05402   #   Function((function_pointer_arg)(int), int param)
05403   #   <TemplateArgument(int)>;
05404   #   <(FunctionPointerTemplateArgument)(int)>;
05405   remainder = line[match.end(0):]
05406   if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
05407            remainder):
05408     # Looks like an unnamed parameter.
05409 
05410     # Don't warn on any kind of template arguments.
05411     if Match(r'^\s*>', remainder):
05412       return False
05413 
05414     # Don't warn on assignments to function pointers, but keep warnings for
05415     # unnamed parameters to pure virtual functions.  Note that this pattern
05416     # will also pass on assignments of "0" to function pointers, but the
05417     # preferred values for those would be "nullptr" or "NULL".
05418     matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
05419     if matched_zero and matched_zero.group(1) != '0':
05420       return False
05421 
05422     # Don't warn on function pointer declarations.  For this we need
05423     # to check what came before the "(type)" string.
05424     if Match(r'.*\)\s*$', line[0:match.start(0)]):
05425       return False
05426 
05427     # Don't warn if the parameter is named with block comments, e.g.:
05428     #  Function(int /*unused_param*/);
05429     raw_line = clean_lines.raw_lines[linenum]
05430     if '/*' in raw_line:
05431       return False
05432 
05433     # Passed all filters, issue warning here.
05434     error(filename, linenum, 'readability/function', 3,
05435           'All parameters should be named in a function')
05436     return True
05437 
05438   # At this point, all that should be left is actual casts.
05439   error(filename, linenum, 'readability/casting', 4,
05440         'Using C-style cast.  Use %s<%s>(...) instead' %
05441         (cast_type, match.group(1)))
05442 
05443   return True
05444 
05445 
05446 def ExpectingFunctionArgs(clean_lines, linenum):
05447   """Checks whether where function type arguments are expected.
05448 
05449   Args:
05450     clean_lines: A CleansedLines instance containing the file.
05451     linenum: The number of the line to check.
05452 
05453   Returns:
05454     True if the line at 'linenum' is inside something that expects arguments
05455     of function types.
05456   """
05457   line = clean_lines.elided[linenum]
05458   return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
05459           (linenum >= 2 and
05460            (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
05461                   clean_lines.elided[linenum - 1]) or
05462             Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
05463                   clean_lines.elided[linenum - 2]) or
05464             Search(r'\bstd::m?function\s*<\s*$',
05465                    clean_lines.elided[linenum - 1]))))
05466 
05467 
05468 _HEADERS_CONTAINING_TEMPLATES = (
05469     ('<deque>', ('deque',)),
05470     ('<functional>', ('unary_function', 'binary_function',
05471                       'plus', 'minus', 'multiplies', 'divides', 'modulus',
05472                       'negate',
05473                       'equal_to', 'not_equal_to', 'greater', 'less',
05474                       'greater_equal', 'less_equal',
05475                       'logical_and', 'logical_or', 'logical_not',
05476                       'unary_negate', 'not1', 'binary_negate', 'not2',
05477                       'bind1st', 'bind2nd',
05478                       'pointer_to_unary_function',
05479                       'pointer_to_binary_function',
05480                       'ptr_fun',
05481                       'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
05482                       'mem_fun_ref_t',
05483                       'const_mem_fun_t', 'const_mem_fun1_t',
05484                       'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
05485                       'mem_fun_ref',
05486                      )),
05487     ('<limits>', ('numeric_limits',)),
05488     ('<list>', ('list',)),
05489     ('<map>', ('map', 'multimap',)),
05490     ('<memory>', ('allocator',)),
05491     ('<queue>', ('queue', 'priority_queue',)),
05492     ('<set>', ('set', 'multiset',)),
05493     ('<stack>', ('stack',)),
05494     ('<string>', ('char_traits', 'basic_string',)),
05495     ('<tuple>', ('tuple',)),
05496     ('<utility>', ('pair',)),
05497     ('<vector>', ('vector',)),
05498 
05499     # gcc extensions.
05500     # Note: std::hash is their hash, ::hash is our hash
05501     ('<hash_map>', ('hash_map', 'hash_multimap',)),
05502     ('<hash_set>', ('hash_set', 'hash_multiset',)),
05503     ('<slist>', ('slist',)),
05504     )
05505 
05506 _RE_PATTERN_STRING = re.compile(r'\bstring\b')
05507 
05508 _re_pattern_algorithm_header = []
05509 for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
05510                   'transform'):
05511   # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
05512   # type::max().
05513   _re_pattern_algorithm_header.append(
05514       (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
05515        _template,
05516        '<algorithm>'))
05517 
05518 _re_pattern_templates = []
05519 for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
05520   for _template in _templates:
05521     _re_pattern_templates.append(
05522         (re.compile(r'(<|\b)' + _template + r'\s*<'),
05523          _template + '<>',
05524          _header))
05525 
05526 
05527 def FilesBelongToSameModule(filename_cc, filename_h):
05528   """Check if these two filenames belong to the same module.
05529 
05530   The concept of a 'module' here is a as follows:
05531   foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
05532   same 'module' if they are in the same directory.
05533   some/path/public/xyzzy and some/path/internal/xyzzy are also considered
05534   to belong to the same module here.
05535 
05536   If the filename_cc contains a longer path than the filename_h, for example,
05537   '/absolute/path/to/base/sysinfo.cc', and this file would include
05538   'base/sysinfo.h', this function also produces the prefix needed to open the
05539   header. This is used by the caller of this function to more robustly open the
05540   header file. We don't have access to the real include paths in this context,
05541   so we need this guesswork here.
05542 
05543   Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
05544   according to this implementation. Because of this, this function gives
05545   some false positives. This should be sufficiently rare in practice.
05546 
05547   Args:
05548     filename_cc: is the path for the .cc file
05549     filename_h: is the path for the header path
05550 
05551   Returns:
05552     Tuple with a bool and a string:
05553     bool: True if filename_cc and filename_h belong to the same module.
05554     string: the additional prefix needed to open the header file.
05555   """
05556 
05557   if not filename_cc.endswith('.cc'):
05558     return (False, '')
05559   filename_cc = filename_cc[:-len('.cc')]
05560   if filename_cc.endswith('_unittest'):
05561     filename_cc = filename_cc[:-len('_unittest')]
05562   elif filename_cc.endswith('_test'):
05563     filename_cc = filename_cc[:-len('_test')]
05564   filename_cc = filename_cc.replace('/public/', '/')
05565   filename_cc = filename_cc.replace('/internal/', '/')
05566 
05567   if not filename_h.endswith('.h'):
05568     return (False, '')
05569   filename_h = filename_h[:-len('.h')]
05570   if filename_h.endswith('-inl'):
05571     filename_h = filename_h[:-len('-inl')]
05572   filename_h = filename_h.replace('/public/', '/')
05573   filename_h = filename_h.replace('/internal/', '/')
05574 
05575   files_belong_to_same_module = filename_cc.endswith(filename_h)
05576   common_path = ''
05577   if files_belong_to_same_module:
05578     common_path = filename_cc[:-len(filename_h)]
05579   return files_belong_to_same_module, common_path
05580 
05581 
05582 def UpdateIncludeState(filename, include_dict, io=codecs):
05583   """Fill up the include_dict with new includes found from the file.
05584 
05585   Args:
05586     filename: the name of the header to read.
05587     include_dict: a dictionary in which the headers are inserted.
05588     io: The io factory to use to read the file. Provided for testability.
05589 
05590   Returns:
05591     True if a header was successfully added. False otherwise.
05592   """
05593   headerfile = None
05594   try:
05595     headerfile = io.open(filename, 'r', 'utf8', 'replace')
05596   except IOError:
05597     return False
05598   linenum = 0
05599   for line in headerfile:
05600     linenum += 1
05601     clean_line = CleanseComments(line)
05602     match = _RE_PATTERN_INCLUDE.search(clean_line)
05603     if match:
05604       include = match.group(2)
05605       include_dict.setdefault(include, linenum)
05606   return True
05607 
05608 
05609 def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
05610                               io=codecs):
05611   """Reports for missing stl includes.
05612 
05613   This function will output warnings to make sure you are including the headers
05614   necessary for the stl containers and functions that you use. We only give one
05615   reason to include a header. For example, if you use both equal_to<> and
05616   less<> in a .h file, only one (the latter in the file) of these will be
05617   reported as a reason to include the <functional>.
05618 
05619   Args:
05620     filename: The name of the current file.
05621     clean_lines: A CleansedLines instance containing the file.
05622     include_state: An _IncludeState instance.
05623     error: The function to call with any errors found.
05624     io: The IO factory to use to read the header file. Provided for unittest
05625         injection.
05626   """
05627   required = {}  # A map of header name to linenumber and the template entity.
05628                  # Example of required: { '<functional>': (1219, 'less<>') }
05629 
05630   for linenum in xrange(clean_lines.NumLines()):
05631     line = clean_lines.elided[linenum]
05632     if not line or line[0] == '#':
05633       continue
05634 
05635     # String is special -- it is a non-templatized type in STL.
05636     matched = _RE_PATTERN_STRING.search(line)
05637     if matched:
05638       # Don't warn about strings in non-STL namespaces:
05639       # (We check only the first match per line; good enough.)
05640       prefix = line[:matched.start()]
05641       if prefix.endswith('std::') or not prefix.endswith('::'):
05642         required['<string>'] = (linenum, 'string')
05643 
05644     for pattern, template, header in _re_pattern_algorithm_header:
05645       if pattern.search(line):
05646         required[header] = (linenum, template)
05647 
05648     # The following function is just a speed up, no semantics are changed.
05649     if not '<' in line:  # Reduces the cpu time usage by skipping lines.
05650       continue
05651 
05652     for pattern, template, header in _re_pattern_templates:
05653       if pattern.search(line):
05654         required[header] = (linenum, template)
05655 
05656   # The policy is that if you #include something in foo.h you don't need to
05657   # include it again in foo.cc. Here, we will look at possible includes.
05658   # Let's flatten the include_state include_list and copy it into a dictionary.
05659   include_dict = dict([item for sublist in include_state.include_list
05660                        for item in sublist])
05661 
05662   # Did we find the header for this file (if any) and successfully load it?
05663   header_found = False
05664 
05665   # Use the absolute path so that matching works properly.
05666   abs_filename = FileInfo(filename).FullName()
05667 
05668   # For Emacs's flymake.
05669   # If cpplint is invoked from Emacs's flymake, a temporary file is generated
05670   # by flymake and that file name might end with '_flymake.cc'. In that case,
05671   # restore original file name here so that the corresponding header file can be
05672   # found.
05673   # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
05674   # instead of 'foo_flymake.h'
05675   abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
05676 
05677   # include_dict is modified during iteration, so we iterate over a copy of
05678   # the keys.
05679   header_keys = include_dict.keys()
05680   for header in header_keys:
05681     (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
05682     fullpath = common_path + header
05683     if same_module and UpdateIncludeState(fullpath, include_dict, io):
05684       header_found = True
05685 
05686   # If we can't find the header file for a .cc, assume it's because we don't
05687   # know where to look. In that case we'll give up as we're not sure they
05688   # didn't include it in the .h file.
05689   # TODO(unknown): Do a better job of finding .h files so we are confident that
05690   # not having the .h file means there isn't one.
05691   if filename.endswith('.cc') and not header_found:
05692     return
05693 
05694   # All the lines have been processed, report the errors found.
05695   for required_header_unstripped in required:
05696     template = required[required_header_unstripped][1]
05697     if required_header_unstripped.strip('<>"') not in include_dict:
05698       error(filename, required[required_header_unstripped][0],
05699             'build/include_what_you_use', 4,
05700             'Add #include ' + required_header_unstripped + ' for ' + template)
05701 
05702 
05703 _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
05704 
05705 
05706 def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
05707   """Check that make_pair's template arguments are deduced.
05708 
05709   G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
05710   specified explicitly, and such use isn't intended in any case.
05711 
05712   Args:
05713     filename: The name of the current file.
05714     clean_lines: A CleansedLines instance containing the file.
05715     linenum: The number of the line to check.
05716     error: The function to call with any errors found.
05717   """
05718   line = clean_lines.elided[linenum]
05719   match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
05720   if match:
05721     error(filename, linenum, 'build/explicit_make_pair',
05722           4,  # 4 = high confidence
05723           'For C++11-compatibility, omit template arguments from make_pair'
05724           ' OR use pair directly OR if appropriate, construct a pair directly')
05725 
05726 
05727 def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
05728   """Check that default lambda captures are not used.
05729 
05730   Args:
05731     filename: The name of the current file.
05732     clean_lines: A CleansedLines instance containing the file.
05733     linenum: The number of the line to check.
05734     error: The function to call with any errors found.
05735   """
05736   line = clean_lines.elided[linenum]
05737 
05738   # A lambda introducer specifies a default capture if it starts with "[="
05739   # or if it starts with "[&" _not_ followed by an identifier.
05740   match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
05741   if match:
05742     # Found a potential error, check what comes after the lambda-introducer.
05743     # If it's not open parenthesis (for lambda-declarator) or open brace
05744     # (for compound-statement), it's not a lambda.
05745     line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
05746     if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
05747       error(filename, linenum, 'build/c++11',
05748             4,  # 4 = high confidence
05749             'Default lambda captures are an unapproved C++ feature.')
05750 
05751 
05752 def CheckRedundantVirtual(filename, clean_lines, linenum, error):
05753   """Check if line contains a redundant "virtual" function-specifier.
05754 
05755   Args:
05756     filename: The name of the current file.
05757     clean_lines: A CleansedLines instance containing the file.
05758     linenum: The number of the line to check.
05759     error: The function to call with any errors found.
05760   """
05761   # Look for "virtual" on current line.
05762   line = clean_lines.elided[linenum]
05763   virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
05764   if not virtual: return
05765 
05766   # Ignore "virtual" keywords that are near access-specifiers.  These
05767   # are only used in class base-specifier and do not apply to member
05768   # functions.
05769   if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
05770       Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
05771     return
05772 
05773   # Ignore the "virtual" keyword from virtual base classes.  Usually
05774   # there is a column on the same line in these cases (virtual base
05775   # classes are rare in google3 because multiple inheritance is rare).
05776   if Match(r'^.*[^:]:[^:].*$', line): return
05777 
05778   # Look for the next opening parenthesis.  This is the start of the
05779   # parameter list (possibly on the next line shortly after virtual).
05780   # TODO(unknown): doesn't work if there are virtual functions with
05781   # decltype() or other things that use parentheses, but csearch suggests
05782   # that this is rare.
05783   end_col = -1
05784   end_line = -1
05785   start_col = len(virtual.group(2))
05786   for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
05787     line = clean_lines.elided[start_line][start_col:]
05788     parameter_list = Match(r'^([^(]*)\(', line)
05789     if parameter_list:
05790       # Match parentheses to find the end of the parameter list
05791       (_, end_line, end_col) = CloseExpression(
05792           clean_lines, start_line, start_col + len(parameter_list.group(1)))
05793       break
05794     start_col = 0
05795 
05796   if end_col < 0:
05797     return  # Couldn't find end of parameter list, give up
05798 
05799   # Look for "override" or "final" after the parameter list
05800   # (possibly on the next few lines).
05801   for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
05802     line = clean_lines.elided[i][end_col:]
05803     match = Search(r'\b(override|final)\b', line)
05804     if match:
05805       error(filename, linenum, 'readability/inheritance', 4,
05806             ('"virtual" is redundant since function is '
05807              'already declared as "%s"' % match.group(1)))
05808 
05809     # Set end_col to check whole lines after we are done with the
05810     # first line.
05811     end_col = 0
05812     if Search(r'[^\w]\s*$', line):
05813       break
05814 
05815 
05816 def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
05817   """Check if line contains a redundant "override" or "final" virt-specifier.
05818 
05819   Args:
05820     filename: The name of the current file.
05821     clean_lines: A CleansedLines instance containing the file.
05822     linenum: The number of the line to check.
05823     error: The function to call with any errors found.
05824   """
05825   # Look for closing parenthesis nearby.  We need one to confirm where
05826   # the declarator ends and where the virt-specifier starts to avoid
05827   # false positives.
05828   line = clean_lines.elided[linenum]
05829   declarator_end = line.rfind(')')
05830   if declarator_end >= 0:
05831     fragment = line[declarator_end:]
05832   else:
05833     if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
05834       fragment = line
05835     else:
05836       return
05837 
05838   # Check that at most one of "override" or "final" is present, not both
05839   if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
05840     error(filename, linenum, 'readability/inheritance', 4,
05841           ('"override" is redundant since function is '
05842            'already declared as "final"'))
05843 
05844 
05845 
05846 
05847 # Returns true if we are at a new block, and it is directly
05848 # inside of a namespace.
05849 def IsBlockInNameSpace(nesting_state, is_forward_declaration):
05850   """Checks that the new block is directly in a namespace.
05851 
05852   Args:
05853     nesting_state: The _NestingState object that contains info about our state.
05854     is_forward_declaration: If the class is a forward declared class.
05855   Returns:
05856     Whether or not the new block is directly in a namespace.
05857   """
05858   if is_forward_declaration:
05859     if len(nesting_state.stack) >= 1 and (
05860         isinstance(nesting_state.stack[-1], _NamespaceInfo)):
05861       return True
05862     else:
05863       return False
05864 
05865   return (len(nesting_state.stack) > 1 and
05866           nesting_state.stack[-1].check_namespace_indentation and
05867           isinstance(nesting_state.stack[-2], _NamespaceInfo))
05868 
05869 
05870 def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
05871                                     raw_lines_no_comments, linenum):
05872   """This method determines if we should apply our namespace indentation check.
05873 
05874   Args:
05875     nesting_state: The current nesting state.
05876     is_namespace_indent_item: If we just put a new class on the stack, True.
05877       If the top of the stack is not a class, or we did not recently
05878       add the class, False.
05879     raw_lines_no_comments: The lines without the comments.
05880     linenum: The current line number we are processing.
05881 
05882   Returns:
05883     True if we should apply our namespace indentation check. Currently, it
05884     only works for classes and namespaces inside of a namespace.
05885   """
05886 
05887   is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
05888                                                      linenum)
05889 
05890   if not (is_namespace_indent_item or is_forward_declaration):
05891     return False
05892 
05893   # If we are in a macro, we do not want to check the namespace indentation.
05894   if IsMacroDefinition(raw_lines_no_comments, linenum):
05895     return False
05896 
05897   return IsBlockInNameSpace(nesting_state, is_forward_declaration)
05898 
05899 
05900 # Call this method if the line is directly inside of a namespace.
05901 # If the line above is blank (excluding comments) or the start of
05902 # an inner namespace, it cannot be indented.
05903 def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
05904                                     error):
05905   line = raw_lines_no_comments[linenum]
05906   if Match(r'^\s+', line):
05907     error(filename, linenum, 'runtime/indentation_namespace', 4,
05908           'Do not indent within a namespace')
05909 
05910 
05911 def ProcessLine(filename, file_extension, clean_lines, line,
05912                 include_state, function_state, nesting_state, error,
05913                 extra_check_functions=[]):
05914   """Processes a single line in the file.
05915 
05916   Args:
05917     filename: Filename of the file that is being processed.
05918     file_extension: The extension (dot not included) of the file.
05919     clean_lines: An array of strings, each representing a line of the file,
05920                  with comments stripped.
05921     line: Number of line being processed.
05922     include_state: An _IncludeState instance in which the headers are inserted.
05923     function_state: A _FunctionState instance which counts function lines, etc.
05924     nesting_state: A NestingState instance which maintains information about
05925                    the current stack of nested blocks being parsed.
05926     error: A callable to which errors are reported, which takes 4 arguments:
05927            filename, line number, error level, and message
05928     extra_check_functions: An array of additional check functions that will be
05929                            run on each source line. Each function takes 4
05930                            arguments: filename, clean_lines, line, error
05931   """
05932   raw_lines = clean_lines.raw_lines
05933   ParseNolintSuppressions(filename, raw_lines[line], line, error)
05934   nesting_state.Update(filename, clean_lines, line, error)
05935   CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
05936                                error)
05937   if nesting_state.InAsmBlock(): return
05938   CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
05939   CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
05940   CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
05941   CheckLanguage(filename, clean_lines, line, file_extension, include_state,
05942                 nesting_state, error)
05943   CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
05944   CheckForNonStandardConstructs(filename, clean_lines, line,
05945                                 nesting_state, error)
05946   CheckVlogArguments(filename, clean_lines, line, error)
05947   CheckPosixThreading(filename, clean_lines, line, error)
05948   CheckInvalidIncrement(filename, clean_lines, line, error)
05949   CheckMakePairUsesDeduction(filename, clean_lines, line, error)
05950   CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
05951   CheckRedundantVirtual(filename, clean_lines, line, error)
05952   CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
05953   for check_fn in extra_check_functions:
05954     check_fn(filename, clean_lines, line, error)
05955 
05956 def FlagCxx11Features(filename, clean_lines, linenum, error):
05957   """Flag those c++11 features that we only allow in certain places.
05958 
05959   Args:
05960     filename: The name of the current file.
05961     clean_lines: A CleansedLines instance containing the file.
05962     linenum: The number of the line to check.
05963     error: The function to call with any errors found.
05964   """
05965   line = clean_lines.elided[linenum]
05966 
05967   # Flag unapproved C++11 headers.
05968   include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
05969   if include and include.group(1) in ('cfenv',
05970                                       'condition_variable',
05971                                       'fenv.h',
05972                                       'future',
05973                                       'mutex',
05974                                       'thread',
05975                                       'chrono',
05976                                       'ratio',
05977                                       'regex',
05978                                       'system_error',
05979                                      ):
05980     error(filename, linenum, 'build/c++11', 5,
05981           ('<%s> is an unapproved C++11 header.') % include.group(1))
05982 
05983   # The only place where we need to worry about C++11 keywords and library
05984   # features in preprocessor directives is in macro definitions.
05985   if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
05986 
05987   # These are classes and free functions.  The classes are always
05988   # mentioned as std::*, but we only catch the free functions if
05989   # they're not found by ADL.  They're alphabetical by header.
05990   for top_name in (
05991       # type_traits
05992       'alignment_of',
05993       'aligned_union',
05994       ):
05995     if Search(r'\bstd::%s\b' % top_name, line):
05996       error(filename, linenum, 'build/c++11', 5,
05997             ('std::%s is an unapproved C++11 class or function.  Send c-style '
05998              'an example of where it would make your code more readable, and '
05999              'they may let you use it.') % top_name)
06000 
06001 
06002 def ProcessFileData(filename, file_extension, lines, error,
06003                     extra_check_functions=[]):
06004   """Performs lint checks and reports any errors to the given error function.
06005 
06006   Args:
06007     filename: Filename of the file that is being processed.
06008     file_extension: The extension (dot not included) of the file.
06009     lines: An array of strings, each representing a line of the file, with the
06010            last element being empty if the file is terminated with a newline.
06011     error: A callable to which errors are reported, which takes 4 arguments:
06012            filename, line number, error level, and message
06013     extra_check_functions: An array of additional check functions that will be
06014                            run on each source line. Each function takes 4
06015                            arguments: filename, clean_lines, line, error
06016   """
06017   lines = (['// marker so line numbers and indices both start at 1'] + lines +
06018            ['// marker so line numbers end in a known way'])
06019 
06020   include_state = _IncludeState()
06021   function_state = _FunctionState()
06022   nesting_state = NestingState()
06023 
06024   ResetNolintSuppressions()
06025 
06026   CheckForCopyright(filename, lines, error)
06027 
06028   RemoveMultiLineComments(filename, lines, error)
06029   clean_lines = CleansedLines(lines)
06030 
06031   if file_extension == 'h':
06032     CheckForHeaderGuard(filename, clean_lines, error)
06033 
06034   for line in xrange(clean_lines.NumLines()):
06035     ProcessLine(filename, file_extension, clean_lines, line,
06036                 include_state, function_state, nesting_state, error,
06037                 extra_check_functions)
06038     FlagCxx11Features(filename, clean_lines, line, error)
06039   nesting_state.CheckCompletedBlocks(filename, error)
06040 
06041   CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
06042   
06043   # Check that the .cc file has included its header if it exists.
06044   if file_extension == 'cc':
06045     CheckHeaderFileIncluded(filename, include_state, error)
06046 
06047   # We check here rather than inside ProcessLine so that we see raw
06048   # lines rather than "cleaned" lines.
06049   CheckForBadCharacters(filename, lines, error)
06050 
06051   CheckForNewlineAtEOF(filename, lines, error)
06052 
06053 def ProcessConfigOverrides(filename):
06054   """ Loads the configuration files and processes the config overrides.
06055 
06056   Args:
06057     filename: The name of the file being processed by the linter.
06058 
06059   Returns:
06060     False if the current |filename| should not be processed further.
06061   """
06062 
06063   abs_filename = os.path.abspath(filename)
06064   cfg_filters = []
06065   keep_looking = True
06066   while keep_looking:
06067     abs_path, base_name = os.path.split(abs_filename)
06068     if not base_name:
06069       break  # Reached the root directory.
06070 
06071     cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
06072     abs_filename = abs_path
06073     if not os.path.isfile(cfg_file):
06074       continue
06075 
06076     try:
06077       with open(cfg_file) as file_handle:
06078         for line in file_handle:
06079           line, _, _ = line.partition('#')  # Remove comments.
06080           if not line.strip():
06081             continue
06082 
06083           name, _, val = line.partition('=')
06084           name = name.strip()
06085           val = val.strip()
06086           if name == 'set noparent':
06087             keep_looking = False
06088           elif name == 'filter':
06089             cfg_filters.append(val)
06090           elif name == 'exclude_files':
06091             # When matching exclude_files pattern, use the base_name of
06092             # the current file name or the directory name we are processing.
06093             # For example, if we are checking for lint errors in /foo/bar/baz.cc
06094             # and we found the .cfg file at /foo/CPPLINT.cfg, then the config
06095             # file's "exclude_files" filter is meant to be checked against "bar"
06096             # and not "baz" nor "bar/baz.cc".
06097             if base_name:
06098               pattern = re.compile(val)
06099               if pattern.match(base_name):
06100                 sys.stderr.write('Ignoring "%s": file excluded by "%s". '
06101                                  'File path component "%s" matches '
06102                                  'pattern "%s"\n' %
06103                                  (filename, cfg_file, base_name, val))
06104                 return False
06105           elif name == 'linelength':
06106             global _line_length
06107             try:
06108                 _line_length = int(val)
06109             except ValueError:
06110                 sys.stderr.write('Line length must be numeric.')
06111           else:
06112             sys.stderr.write(
06113                 'Invalid configuration option (%s) in file %s\n' %
06114                 (name, cfg_file))
06115 
06116     except IOError:
06117       sys.stderr.write(
06118           "Skipping config file '%s': Can't open for reading\n" % cfg_file)
06119       keep_looking = False
06120 
06121   # Apply all the accumulated filters in reverse order (top-level directory
06122   # config options having the least priority).
06123   for filter in reversed(cfg_filters):
06124      _AddFilters(filter)
06125 
06126   return True
06127 
06128 
06129 def ProcessFile(filename, vlevel, extra_check_functions=[]):
06130   """Does google-lint on a single file.
06131 
06132   Args:
06133     filename: The name of the file to parse.
06134 
06135     vlevel: The level of errors to report.  Every error of confidence
06136     >= verbose_level will be reported.  0 is a good default.
06137 
06138     extra_check_functions: An array of additional check functions that will be
06139                            run on each source line. Each function takes 4
06140                            arguments: filename, clean_lines, line, error
06141   """
06142 
06143   _SetVerboseLevel(vlevel)
06144   _BackupFilters()
06145 
06146   if not ProcessConfigOverrides(filename):
06147     _RestoreFilters()
06148     return
06149 
06150   lf_lines = []
06151   crlf_lines = []
06152   try:
06153     # Support the UNIX convention of using "-" for stdin.  Note that
06154     # we are not opening the file with universal newline support
06155     # (which codecs doesn't support anyway), so the resulting lines do
06156     # contain trailing '\r' characters if we are reading a file that
06157     # has CRLF endings.
06158     # If after the split a trailing '\r' is present, it is removed
06159     # below.
06160     if filename == '-':
06161       lines = codecs.StreamReaderWriter(sys.stdin,
06162                                         codecs.getreader('utf8'),
06163                                         codecs.getwriter('utf8'),
06164                                         'replace').read().split('\n')
06165     else:
06166       lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
06167 
06168     # Remove trailing '\r'.
06169     # The -1 accounts for the extra trailing blank line we get from split()
06170     for linenum in range(len(lines) - 1):
06171       if lines[linenum].endswith('\r'):
06172         lines[linenum] = lines[linenum].rstrip('\r')
06173         crlf_lines.append(linenum + 1)
06174       else:
06175         lf_lines.append(linenum + 1)
06176 
06177   except IOError:
06178     sys.stderr.write(
06179         "Skipping input '%s': Can't open for reading\n" % filename)
06180     _RestoreFilters()
06181     return
06182 
06183   # Note, if no dot is found, this will give the entire filename as the ext.
06184   file_extension = filename[filename.rfind('.') + 1:]
06185 
06186   # When reading from stdin, the extension is unknown, so no cpplint tests
06187   # should rely on the extension.
06188   if filename != '-' and file_extension not in _valid_extensions:
06189     sys.stderr.write('Ignoring %s; not a valid file name '
06190                      '(%s)\n' % (filename, ', '.join(_valid_extensions)))
06191   else:
06192     ProcessFileData(filename, file_extension, lines, Error,
06193                     extra_check_functions)
06194 
06195     # If end-of-line sequences are a mix of LF and CR-LF, issue
06196     # warnings on the lines with CR.
06197     #
06198     # Don't issue any warnings if all lines are uniformly LF or CR-LF,
06199     # since critique can handle these just fine, and the style guide
06200     # doesn't dictate a particular end of line sequence.
06201     #
06202     # We can't depend on os.linesep to determine what the desired
06203     # end-of-line sequence should be, since that will return the
06204     # server-side end-of-line sequence.
06205     if lf_lines and crlf_lines:
06206       # Warn on every line with CR.  An alternative approach might be to
06207       # check whether the file is mostly CRLF or just LF, and warn on the
06208       # minority, we bias toward LF here since most tools prefer LF.
06209       for linenum in crlf_lines:
06210         Error(filename, linenum, 'whitespace/newline', 1,
06211               'Unexpected \\r (^M) found; better to use only \\n')
06212 
06213   sys.stderr.write('Done processing %s\n' % filename)
06214   _RestoreFilters()
06215 
06216 
06217 def PrintUsage(message):
06218   """Prints a brief usage string and exits, optionally with an error message.
06219 
06220   Args:
06221     message: The optional error message.
06222   """
06223   sys.stderr.write(_USAGE)
06224   if message:
06225     sys.exit('\nFATAL ERROR: ' + message)
06226   else:
06227     sys.exit(1)
06228 
06229 
06230 def PrintCategories():
06231   """Prints a list of all the error-categories used by error messages.
06232 
06233   These are the categories used to filter messages via --filter.
06234   """
06235   sys.stderr.write(''.join('  %s\n' % cat for cat in _ERROR_CATEGORIES))
06236   sys.exit(0)
06237 
06238 
06239 def ParseArguments(args):
06240   """Parses the command line arguments.
06241 
06242   This may set the output format and verbosity level as side-effects.
06243 
06244   Args:
06245     args: The command line arguments:
06246 
06247   Returns:
06248     The list of filenames to lint.
06249   """
06250   try:
06251     (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
06252                                                  'counting=',
06253                                                  'filter=',
06254                                                  'root=',
06255                                                  'linelength=',
06256                                                  'extensions='])
06257   except getopt.GetoptError:
06258     PrintUsage('Invalid arguments.')
06259 
06260   verbosity = _VerboseLevel()
06261   output_format = _OutputFormat()
06262   filters = ''
06263   counting_style = ''
06264 
06265   for (opt, val) in opts:
06266     if opt == '--help':
06267       PrintUsage(None)
06268     elif opt == '--output':
06269       if val not in ('emacs', 'vs7', 'eclipse'):
06270         PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
06271       output_format = val
06272     elif opt == '--verbose':
06273       verbosity = int(val)
06274     elif opt == '--filter':
06275       filters = val
06276       if not filters:
06277         PrintCategories()
06278     elif opt == '--counting':
06279       if val not in ('total', 'toplevel', 'detailed'):
06280         PrintUsage('Valid counting options are total, toplevel, and detailed')
06281       counting_style = val
06282     elif opt == '--root':
06283       global _root
06284       _root = val
06285     elif opt == '--linelength':
06286       global _line_length
06287       try:
06288           _line_length = int(val)
06289       except ValueError:
06290           PrintUsage('Line length must be digits.')
06291     elif opt == '--extensions':
06292       global _valid_extensions
06293       try:
06294           _valid_extensions = set(val.split(','))
06295       except ValueError:
06296           PrintUsage('Extensions must be comma seperated list.')
06297 
06298   if not filenames:
06299     PrintUsage('No files were specified.')
06300 
06301   _SetOutputFormat(output_format)
06302   _SetVerboseLevel(verbosity)
06303   _SetFilters(filters)
06304   _SetCountingStyle(counting_style)
06305 
06306   return filenames
06307 
06308 
06309 def main():
06310   filenames = ParseArguments(sys.argv[1:])
06311 
06312   # Change stderr to write with replacement characters so we don't die
06313   # if we try to print something containing non-ASCII characters.
06314   sys.stderr = codecs.StreamReaderWriter(sys.stderr,
06315                                          codecs.getreader('utf8'),
06316                                          codecs.getwriter('utf8'),
06317                                          'replace')
06318 
06319   _cpplint_state.ResetErrorCounts()
06320   for filename in filenames:
06321     ProcessFile(filename, _cpplint_state.verbose_level)
06322   _cpplint_state.PrintErrorCounts()
06323 
06324   sys.exit(_cpplint_state.error_count > 0)
06325 
06326 
06327 if __name__ == '__main__':
06328   main()


roslint
Author(s): Mike Purvis, Jack O'Quin
autogenerated on Fri May 3 2019 02:48:51