cpplint.py
Go to the documentation of this file.
00001 #!/usr/bin/python
00002 #
00003 # Copyright (c) 2009 Google Inc. All rights reserved.
00004 #
00005 # Redistribution and use in source and binary forms, with or without
00006 # modification, are permitted provided that the following conditions are
00007 # met:
00008 #
00009 #    * Redistributions of source code must retain the above copyright
00010 # notice, this list of conditions and the following disclaimer.
00011 #    * Redistributions in binary form must reproduce the above
00012 # copyright notice, this list of conditions and the following disclaimer
00013 # in the documentation and/or other materials provided with the
00014 # distribution.
00015 #    * Neither the name of Google Inc. nor the names of its
00016 # contributors may be used to endorse or promote products derived from
00017 # this software without specific prior written permission.
00018 #
00019 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00020 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00021 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
00022 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
00023 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00024 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00025 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
00026 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
00027 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00028 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
00029 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00030 
00031 """Does google-lint on c++ files.
00032 
00033 The goal of this script is to identify places in the code that *may*
00034 be in non-compliance with google style.  It does not attempt to fix
00035 up these problems -- the point is to educate.  It does also not
00036 attempt to find all problems, or to ensure that everything it does
00037 find is legitimately a problem.
00038 
00039 In particular, we can get very confused by /* and // inside strings!
00040 We do a small hack, which is to ignore //'s with "'s after them on the
00041 same line, but it is far from perfect (in either direction).
00042 """
00043 
00044 import codecs
00045 import copy
00046 import getopt
00047 import math  # for log
00048 import os
00049 import re
00050 import sre_compile
00051 import string
00052 import sys
00053 import unicodedata
00054 
00055 
00056 _USAGE = """
00057 Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
00058                    [--counting=total|toplevel|detailed] [--root=subdir]
00059                    [--linelength=digits]
00060         <file> [file] ...
00061 
00062   The style guidelines this tries to follow are those in
00063     http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
00064 
00065   Every problem is given a confidence score from 1-5, with 5 meaning we are
00066   certain of the problem, and 1 meaning it could be a legitimate construct.
00067   This will miss some errors, and is not a substitute for a code review.
00068 
00069   To suppress false-positive errors of a certain category, add a
00070   'NOLINT(category)' comment to the line.  NOLINT or NOLINT(*)
00071   suppresses errors of all categories on that line.
00072 
00073   The files passed in will be linted; at least one file must be provided.
00074   Linted extensions are .cc, .cpp, and .h.  Other file types will be ignored.
00075 
00076   Flags:
00077 
00078     output=vs7
00079       By default, the output is formatted to ease emacs parsing.  Visual Studio
00080       compatible output (vs7) may also be used.  Other formats are unsupported.
00081 
00082     verbose=#
00083       Specify a number 0-5 to restrict errors to certain verbosity levels.
00084 
00085     filter=-x,+y,...
00086       Specify a comma-separated list of category-filters to apply: only
00087       error messages whose category names pass the filters will be printed.
00088       (Category names are printed with the message and look like
00089       "[whitespace/indent]".)  Filters are evaluated left to right.
00090       "-FOO" and "FOO" means "do not print categories that start with FOO".
00091       "+FOO" means "do print categories that start with FOO".
00092 
00093       Examples: --filter=-whitespace,+whitespace/braces
00094                 --filter=whitespace,runtime/printf,+runtime/printf_format
00095                 --filter=-,+build/include_what_you_use
00096 
00097       To see a list of all the categories used in cpplint, pass no arg:
00098          --filter=
00099 
00100     counting=total|toplevel|detailed
00101       The total number of errors found is always printed. If
00102       'toplevel' is provided, then the count of errors in each of
00103       the top-level categories like 'build' and 'whitespace' will
00104       also be printed. If 'detailed' is provided, then a count
00105       is provided for each category like 'build/class'.
00106 
00107     root=subdir
00108       The root directory used for deriving header guard CPP variable.
00109       By default, the header guard CPP variable is calculated as the relative
00110       path to the directory that contains .git, .hg, or .svn.  When this flag
00111       is specified, the relative path is calculated from the specified
00112       directory. If the specified directory does not exist, this flag is
00113       ignored.
00114 
00115       Examples:
00116         Assuing that src/.git exists, the header guard CPP variables for
00117         src/chrome/browser/ui/browser.h are:
00118 
00119         No flag => CHROME_BROWSER_UI_BROWSER_H_
00120         --root=chrome => BROWSER_UI_BROWSER_H_
00121         --root=chrome/browser => UI_BROWSER_H_
00122 
00123     linelength=digits
00124       This is the allowed line length for the project. The default value is
00125       80 characters.
00126 
00127       Examples:
00128         --linelength=120
00129 """
00130 
00131 # We categorize each error message we print.  Here are the categories.
00132 # We want an explicit list so we can list them all in cpplint --filter=.
00133 # If you add a new error message with a new category, add it to the list
00134 # here!  cpplint_unittest.py should tell you if you forget to do this.
00135 _ERROR_CATEGORIES = [
00136   'build/class',
00137   'build/deprecated',
00138   'build/endif_comment',
00139   'build/explicit_make_pair',
00140   'build/forward_decl',
00141   'build/header_guard',
00142   'build/include',
00143   'build/include_alpha',
00144   'build/include_order',
00145   'build/include_what_you_use',
00146   'build/namespaces',
00147   'build/printf_format',
00148   'build/storage_class',
00149   'legal/copyright',
00150   'readability/alt_tokens',
00151   'readability/braces',
00152   'readability/casting',
00153   'readability/check',
00154   'readability/constructors',
00155   'readability/fn_size',
00156   'readability/function',
00157   'readability/multiline_comment',
00158   'readability/multiline_string',
00159   'readability/namespace',
00160   'readability/nolint',
00161   'readability/nul',
00162   'readability/streams',
00163   'readability/todo',
00164   'readability/utf8',
00165   'runtime/arrays',
00166   'runtime/casting',
00167   'runtime/explicit',
00168   'runtime/int',
00169   'runtime/init',
00170   'runtime/invalid_increment',
00171   'runtime/member_string_references',
00172   'runtime/memset',
00173   'runtime/operator',
00174   'runtime/printf',
00175   'runtime/printf_format',
00176   'runtime/references',
00177   'runtime/string',
00178   'runtime/threadsafe_fn',
00179   'runtime/vlog',
00180   'whitespace/blank_line',
00181   'whitespace/braces',
00182   'whitespace/comma',
00183   'whitespace/comments',
00184   'whitespace/empty_conditional_body',
00185   'whitespace/empty_loop_body',
00186   'whitespace/end_of_line',
00187   'whitespace/ending_newline',
00188   'whitespace/forcolon',
00189   'whitespace/indent',
00190   'whitespace/line_length',
00191   'whitespace/newline',
00192   'whitespace/operators',
00193   'whitespace/parens',
00194   'whitespace/semicolon',
00195   'whitespace/tab',
00196   'whitespace/todo'
00197   ]
00198 
00199 # The default state of the category filter. This is overrided by the --filter=
00200 # flag. By default all errors are on, so only add here categories that should be
00201 # off by default (i.e., categories that must be enabled by the --filter= flags).
00202 # All entries here should start with a '-' or '+', as in the --filter= flag.
00203 _DEFAULT_FILTERS = ['-build/include_alpha']
00204 
00205 # We used to check for high-bit characters, but after much discussion we
00206 # decided those were OK, as long as they were in UTF-8 and didn't represent
00207 # hard-coded international strings, which belong in a separate i18n file.
00208 
00209 
00210 # C++ headers
00211 _CPP_HEADERS = frozenset([
00212     # Legacy
00213     'algobase.h',
00214     'algo.h',
00215     'alloc.h',
00216     'builtinbuf.h',
00217     'bvector.h',
00218     'complex.h',
00219     'defalloc.h',
00220     'deque.h',
00221     'editbuf.h',
00222     'fstream.h',
00223     'function.h',
00224     'hash_map',
00225     'hash_map.h',
00226     'hash_set',
00227     'hash_set.h',
00228     'hashtable.h',
00229     'heap.h',
00230     'indstream.h',
00231     'iomanip.h',
00232     'iostream.h',
00233     'istream.h',
00234     'iterator.h',
00235     'list.h',
00236     'map.h',
00237     'multimap.h',
00238     'multiset.h',
00239     'ostream.h',
00240     'pair.h',
00241     'parsestream.h',
00242     'pfstream.h',
00243     'procbuf.h',
00244     'pthread_alloc',
00245     'pthread_alloc.h',
00246     'rope',
00247     'rope.h',
00248     'ropeimpl.h',
00249     'set.h',
00250     'slist',
00251     'slist.h',
00252     'stack.h',
00253     'stdiostream.h',
00254     'stl_alloc.h',
00255     'stl_relops.h',
00256     'streambuf.h',
00257     'stream.h',
00258     'strfile.h',
00259     'strstream.h',
00260     'tempbuf.h',
00261     'tree.h',
00262     'type_traits.h',
00263     'vector.h',
00264     # 17.6.1.2 C++ library headers
00265     'algorithm',
00266     'array',
00267     'atomic',
00268     'bitset',
00269     'chrono',
00270     'codecvt',
00271     'complex',
00272     'condition_variable',
00273     'deque',
00274     'exception',
00275     'forward_list',
00276     'fstream',
00277     'functional',
00278     'future',
00279     'initializer_list',
00280     'iomanip',
00281     'ios',
00282     'iosfwd',
00283     'iostream',
00284     'istream',
00285     'iterator',
00286     'limits',
00287     'list',
00288     'locale',
00289     'map',
00290     'memory',
00291     'mutex',
00292     'new',
00293     'numeric',
00294     'ostream',
00295     'queue',
00296     'random',
00297     'ratio',
00298     'regex',
00299     'set',
00300     'sstream',
00301     'stack',
00302     'stdexcept',
00303     'streambuf',
00304     'string',
00305     'strstream',
00306     'system_error',
00307     'thread',
00308     'tuple',
00309     'typeindex',
00310     'typeinfo',
00311     'type_traits',
00312     'unordered_map',
00313     'unordered_set',
00314     'utility',
00315     'valarray',
00316     'vector',
00317     # 17.6.1.2 C++ headers for C library facilities
00318     'cassert',
00319     'ccomplex',
00320     'cctype',
00321     'cerrno',
00322     'cfenv',
00323     'cfloat',
00324     'cinttypes',
00325     'ciso646',
00326     'climits',
00327     'clocale',
00328     'cmath',
00329     'csetjmp',
00330     'csignal',
00331     'cstdalign',
00332     'cstdarg',
00333     'cstdbool',
00334     'cstddef',
00335     'cstdint',
00336     'cstdio',
00337     'cstdlib',
00338     'cstring',
00339     'ctgmath',
00340     'ctime',
00341     'cuchar',
00342     'cwchar',
00343     'cwctype',
00344     ])
00345 
00346 # Assertion macros.  These are defined in base/logging.h and
00347 # testing/base/gunit.h.  Note that the _M versions need to come first
00348 # for substring matching to work.
00349 _CHECK_MACROS = [
00350     'DCHECK', 'CHECK',
00351     'EXPECT_TRUE_M', 'EXPECT_TRUE',
00352     'ASSERT_TRUE_M', 'ASSERT_TRUE',
00353     'EXPECT_FALSE_M', 'EXPECT_FALSE',
00354     'ASSERT_FALSE_M', 'ASSERT_FALSE',
00355     ]
00356 
00357 # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
00358 _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
00359 
00360 for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
00361                         ('>=', 'GE'), ('>', 'GT'),
00362                         ('<=', 'LE'), ('<', 'LT')]:
00363   _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
00364   _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
00365   _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
00366   _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
00367   _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
00368   _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
00369 
00370 for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
00371                             ('>=', 'LT'), ('>', 'LE'),
00372                             ('<=', 'GT'), ('<', 'GE')]:
00373   _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
00374   _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
00375   _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
00376   _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
00377 
00378 # Alternative tokens and their replacements.  For full list, see section 2.5
00379 # Alternative tokens [lex.digraph] in the C++ standard.
00380 #
00381 # Digraphs (such as '%:') are not included here since it's a mess to
00382 # match those on a word boundary.
00383 _ALT_TOKEN_REPLACEMENT = {
00384     'and': '&&',
00385     'bitor': '|',
00386     'or': '||',
00387     'xor': '^',
00388     'compl': '~',
00389     'bitand': '&',
00390     'and_eq': '&=',
00391     'or_eq': '|=',
00392     'xor_eq': '^=',
00393     'not': '!',
00394     'not_eq': '!='
00395     }
00396 
00397 # Compile regular expression that matches all the above keywords.  The "[ =()]"
00398 # bit is meant to avoid matching these keywords outside of boolean expressions.
00399 #
00400 # False positives include C-style multi-line comments and multi-line strings
00401 # but those have always been troublesome for cpplint.
00402 _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
00403     r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
00404 
00405 
00406 # These constants define types of headers for use with
00407 # _IncludeState.CheckNextIncludeOrder().
00408 _C_SYS_HEADER = 1
00409 _CPP_SYS_HEADER = 2
00410 _LIKELY_MY_HEADER = 3
00411 _POSSIBLE_MY_HEADER = 4
00412 _OTHER_HEADER = 5
00413 
00414 # These constants define the current inline assembly state
00415 _NO_ASM = 0       # Outside of inline assembly block
00416 _INSIDE_ASM = 1   # Inside inline assembly block
00417 _END_ASM = 2      # Last line of inline assembly block
00418 _BLOCK_ASM = 3    # The whole block is an inline assembly block
00419 
00420 # Match start of assembly blocks
00421 _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
00422                         r'(?:\s+(volatile|__volatile__))?'
00423                         r'\s*[{(]')
00424 
00425 
00426 _regexp_compile_cache = {}
00427 
00428 # Finds occurrences of NOLINT or NOLINT(...).
00429 _RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
00430 
00431 # {str, set(int)}: a map from error categories to sets of linenumbers
00432 # on which those errors are expected and should be suppressed.
00433 _error_suppressions = {}
00434 
00435 # The root directory used for deriving header guard CPP variable.
00436 # This is set by --root flag.
00437 _root = None
00438 
00439 # The allowed line length of files.
00440 # This is set by --linelength flag.
00441 _line_length = 80
00442 
00443 def ParseNolintSuppressions(filename, raw_line, linenum, error):
00444   """Updates the global list of error-suppressions.
00445 
00446   Parses any NOLINT comments on the current line, updating the global
00447   error_suppressions store.  Reports an error if the NOLINT comment
00448   was malformed.
00449 
00450   Args:
00451     filename: str, the name of the input file.
00452     raw_line: str, the line of input text, with comments.
00453     linenum: int, the number of the current line.
00454     error: function, an error handler.
00455   """
00456   # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
00457   matched = _RE_SUPPRESSION.search(raw_line)
00458   if matched:
00459     category = matched.group(1)
00460     if category in (None, '(*)'):  # => "suppress all"
00461       _error_suppressions.setdefault(None, set()).add(linenum)
00462     else:
00463       if category.startswith('(') and category.endswith(')'):
00464         category = category[1:-1]
00465         if category in _ERROR_CATEGORIES:
00466           _error_suppressions.setdefault(category, set()).add(linenum)
00467         else:
00468           error(filename, linenum, 'readability/nolint', 5,
00469                 'Unknown NOLINT error category: %s' % category)
00470 
00471 
00472 def ResetNolintSuppressions():
00473   "Resets the set of NOLINT suppressions to empty."
00474   _error_suppressions.clear()
00475 
00476 
00477 def IsErrorSuppressedByNolint(category, linenum):
00478   """Returns true if the specified error category is suppressed on this line.
00479 
00480   Consults the global error_suppressions map populated by
00481   ParseNolintSuppressions/ResetNolintSuppressions.
00482 
00483   Args:
00484     category: str, the category of the error.
00485     linenum: int, the current line number.
00486   Returns:
00487     bool, True iff the error should be suppressed due to a NOLINT comment.
00488   """
00489   return (linenum in _error_suppressions.get(category, set()) or
00490           linenum in _error_suppressions.get(None, set()))
00491 
00492 def Match(pattern, s):
00493   """Matches the string with the pattern, caching the compiled regexp."""
00494   # The regexp compilation caching is inlined in both Match and Search for
00495   # performance reasons; factoring it out into a separate function turns out
00496   # to be noticeably expensive.
00497   if pattern not in _regexp_compile_cache:
00498     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00499   return _regexp_compile_cache[pattern].match(s)
00500 
00501 
00502 def ReplaceAll(pattern, rep, s):
00503   """Replaces instances of pattern in a string with a replacement.
00504 
00505   The compiled regex is kept in a cache shared by Match and Search.
00506 
00507   Args:
00508     pattern: regex pattern
00509     rep: replacement text
00510     s: search string
00511 
00512   Returns:
00513     string with replacements made (or original string if no replacements)
00514   """
00515   if pattern not in _regexp_compile_cache:
00516     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00517   return _regexp_compile_cache[pattern].sub(rep, s)
00518 
00519 
00520 def Search(pattern, s):
00521   """Searches the string for the pattern, caching the compiled regexp."""
00522   if pattern not in _regexp_compile_cache:
00523     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00524   return _regexp_compile_cache[pattern].search(s)
00525 
00526 
00527 class _IncludeState(dict):
00528   """Tracks line numbers for includes, and the order in which includes appear.
00529 
00530   As a dict, an _IncludeState object serves as a mapping between include
00531   filename and line number on which that file was included.
00532 
00533   Call CheckNextIncludeOrder() once for each header in the file, passing
00534   in the type constants defined above. Calls in an illegal order will
00535   raise an _IncludeError with an appropriate error message.
00536 
00537   """
00538   # self._section will move monotonically through this set. If it ever
00539   # needs to move backwards, CheckNextIncludeOrder will raise an error.
00540   _INITIAL_SECTION = 0
00541   _MY_H_SECTION = 1
00542   _C_SECTION = 2
00543   _CPP_SECTION = 3
00544   _OTHER_H_SECTION = 4
00545 
00546   _TYPE_NAMES = {
00547       _C_SYS_HEADER: 'C system header',
00548       _CPP_SYS_HEADER: 'C++ system header',
00549       _LIKELY_MY_HEADER: 'header this file implements',
00550       _POSSIBLE_MY_HEADER: 'header this file may implement',
00551       _OTHER_HEADER: 'other header',
00552       }
00553   _SECTION_NAMES = {
00554       _INITIAL_SECTION: "... nothing. (This can't be an error.)",
00555       _MY_H_SECTION: 'a header this file implements',
00556       _C_SECTION: 'C system header',
00557       _CPP_SECTION: 'C++ system header',
00558       _OTHER_H_SECTION: 'other header',
00559       }
00560 
00561   def __init__(self):
00562     dict.__init__(self)
00563     self.ResetSection()
00564 
00565   def ResetSection(self):
00566     # The name of the current section.
00567     self._section = self._INITIAL_SECTION
00568     # The path of last found header.
00569     self._last_header = ''
00570 
00571   def SetLastHeader(self, header_path):
00572     self._last_header = header_path
00573 
00574   def CanonicalizeAlphabeticalOrder(self, header_path):
00575     """Returns a path canonicalized for alphabetical comparison.
00576 
00577     - replaces "-" with "_" so they both cmp the same.
00578     - removes '-inl' since we don't require them to be after the main header.
00579     - lowercase everything, just in case.
00580 
00581     Args:
00582       header_path: Path to be canonicalized.
00583 
00584     Returns:
00585       Canonicalized path.
00586     """
00587     return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
00588 
00589   def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
00590     """Check if a header is in alphabetical order with the previous header.
00591 
00592     Args:
00593       clean_lines: A CleansedLines instance containing the file.
00594       linenum: The number of the line to check.
00595       header_path: Canonicalized header to be checked.
00596 
00597     Returns:
00598       Returns true if the header is in alphabetical order.
00599     """
00600     # If previous section is different from current section, _last_header will
00601     # be reset to empty string, so it's always less than current header.
00602     #
00603     # If previous line was a blank line, assume that the headers are
00604     # intentionally sorted the way they are.
00605     if (self._last_header > header_path and
00606         not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
00607       return False
00608     return True
00609 
00610   def CheckNextIncludeOrder(self, header_type):
00611     """Returns a non-empty error message if the next header is out of order.
00612 
00613     This function also updates the internal state to be ready to check
00614     the next include.
00615 
00616     Args:
00617       header_type: One of the _XXX_HEADER constants defined above.
00618 
00619     Returns:
00620       The empty string if the header is in the right order, or an
00621       error message describing what's wrong.
00622 
00623     """
00624     error_message = ('Found %s after %s' %
00625                      (self._TYPE_NAMES[header_type],
00626                       self._SECTION_NAMES[self._section]))
00627 
00628     last_section = self._section
00629 
00630     if header_type == _C_SYS_HEADER:
00631       if self._section <= self._C_SECTION:
00632         self._section = self._C_SECTION
00633       else:
00634         self._last_header = ''
00635         return error_message
00636     elif header_type == _CPP_SYS_HEADER:
00637       if self._section <= self._CPP_SECTION:
00638         self._section = self._CPP_SECTION
00639       else:
00640         self._last_header = ''
00641         return error_message
00642     elif header_type == _LIKELY_MY_HEADER:
00643       if self._section <= self._MY_H_SECTION:
00644         self._section = self._MY_H_SECTION
00645       else:
00646         self._section = self._OTHER_H_SECTION
00647     elif header_type == _POSSIBLE_MY_HEADER:
00648       if self._section <= self._MY_H_SECTION:
00649         self._section = self._MY_H_SECTION
00650       else:
00651         # This will always be the fallback because we're not sure
00652         # enough that the header is associated with this file.
00653         self._section = self._OTHER_H_SECTION
00654     else:
00655       assert header_type == _OTHER_HEADER
00656       self._section = self._OTHER_H_SECTION
00657 
00658     if last_section != self._section:
00659       self._last_header = ''
00660 
00661     return ''
00662 
00663 
00664 class _CppLintState(object):
00665   """Maintains module-wide state.."""
00666 
00667   def __init__(self):
00668     self.verbose_level = 1  # global setting.
00669     self.error_count = 0    # global count of reported errors
00670     # filters to apply when emitting error messages
00671     self.filters = _DEFAULT_FILTERS[:]
00672     self.counting = 'total'  # In what way are we counting errors?
00673     self.errors_by_category = {}  # string to int dict storing error counts
00674 
00675     # output format:
00676     # "emacs" - format that emacs can parse (default)
00677     # "vs7" - format that Microsoft Visual Studio 7 can parse
00678     self.output_format = 'emacs'
00679 
00680   def SetOutputFormat(self, output_format):
00681     """Sets the output format for errors."""
00682     self.output_format = output_format
00683 
00684   def SetVerboseLevel(self, level):
00685     """Sets the module's verbosity, and returns the previous setting."""
00686     last_verbose_level = self.verbose_level
00687     self.verbose_level = level
00688     return last_verbose_level
00689 
00690   def SetCountingStyle(self, counting_style):
00691     """Sets the module's counting options."""
00692     self.counting = counting_style
00693 
00694   def SetFilters(self, filters):
00695     """Sets the error-message filters.
00696 
00697     These filters are applied when deciding whether to emit a given
00698     error message.
00699 
00700     Args:
00701       filters: A string of comma-separated filters (eg "+whitespace/indent").
00702                Each filter should start with + or -; else we die.
00703 
00704     Raises:
00705       ValueError: The comma-separated filters did not all start with '+' or '-'.
00706                   E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
00707     """
00708     # Default filters always have less priority than the flag ones.
00709     self.filters = _DEFAULT_FILTERS[:]
00710     for filt in filters.split(','):
00711       clean_filt = filt.strip()
00712       if clean_filt:
00713         self.filters.append(clean_filt)
00714     for filt in self.filters:
00715       if not (filt.startswith('+') or filt.startswith('-')):
00716         raise ValueError('Every filter in --filters must start with + or -'
00717                          ' (%s does not)' % filt)
00718 
00719   def ResetErrorCounts(self):
00720     """Sets the module's error statistic back to zero."""
00721     self.error_count = 0
00722     self.errors_by_category = {}
00723 
00724   def IncrementErrorCount(self, category):
00725     """Bumps the module's error statistic."""
00726     self.error_count += 1
00727     if self.counting in ('toplevel', 'detailed'):
00728       if self.counting != 'detailed':
00729         category = category.split('/')[0]
00730       if category not in self.errors_by_category:
00731         self.errors_by_category[category] = 0
00732       self.errors_by_category[category] += 1
00733 
00734   def PrintErrorCounts(self):
00735     """Print a summary of errors by category, and the total."""
00736     for category, count in self.errors_by_category.iteritems():
00737       sys.stderr.write('Category \'%s\' errors found: %d\n' %
00738                        (category, count))
00739     sys.stderr.write('Total errors found: %d\n' % self.error_count)
00740 
00741 _cpplint_state = _CppLintState()
00742 
00743 
00744 def _OutputFormat():
00745   """Gets the module's output format."""
00746   return _cpplint_state.output_format
00747 
00748 
00749 def _SetOutputFormat(output_format):
00750   """Sets the module's output format."""
00751   _cpplint_state.SetOutputFormat(output_format)
00752 
00753 
00754 def _VerboseLevel():
00755   """Returns the module's verbosity setting."""
00756   return _cpplint_state.verbose_level
00757 
00758 
00759 def _SetVerboseLevel(level):
00760   """Sets the module's verbosity, and returns the previous setting."""
00761   return _cpplint_state.SetVerboseLevel(level)
00762 
00763 
00764 def _SetCountingStyle(level):
00765   """Sets the module's counting options."""
00766   _cpplint_state.SetCountingStyle(level)
00767 
00768 
00769 def _Filters():
00770   """Returns the module's list of output filters, as a list."""
00771   return _cpplint_state.filters
00772 
00773 
00774 def _SetFilters(filters):
00775   """Sets the module's error-message filters.
00776 
00777   These filters are applied when deciding whether to emit a given
00778   error message.
00779 
00780   Args:
00781     filters: A string of comma-separated filters (eg "whitespace/indent").
00782              Each filter should start with + or -; else we die.
00783   """
00784   _cpplint_state.SetFilters(filters)
00785 
00786 
00787 class _FunctionState(object):
00788   """Tracks current function name and the number of lines in its body."""
00789 
00790   _NORMAL_TRIGGER = 250  # for --v=0, 500 for --v=1, etc.
00791   _TEST_TRIGGER = 400    # about 50% more than _NORMAL_TRIGGER.
00792 
00793   def __init__(self):
00794     self.in_a_function = False
00795     self.lines_in_function = 0
00796     self.current_function = ''
00797 
00798   def Begin(self, function_name):
00799     """Start analyzing function body.
00800 
00801     Args:
00802       function_name: The name of the function being tracked.
00803     """
00804     self.in_a_function = True
00805     self.lines_in_function = 0
00806     self.current_function = function_name
00807 
00808   def Count(self):
00809     """Count line in current function body."""
00810     if self.in_a_function:
00811       self.lines_in_function += 1
00812 
00813   def Check(self, error, filename, linenum):
00814     """Report if too many lines in function body.
00815 
00816     Args:
00817       error: The function to call with any errors found.
00818       filename: The name of the current file.
00819       linenum: The number of the line to check.
00820     """
00821     if Match(r'T(EST|est)', self.current_function):
00822       base_trigger = self._TEST_TRIGGER
00823     else:
00824       base_trigger = self._NORMAL_TRIGGER
00825     trigger = base_trigger * 2**_VerboseLevel()
00826 
00827     if self.lines_in_function > trigger:
00828       error_level = int(math.log(self.lines_in_function / base_trigger, 2))
00829       # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
00830       if error_level > 5:
00831         error_level = 5
00832       error(filename, linenum, 'readability/fn_size', error_level,
00833             'Small and focused functions are preferred:'
00834             ' %s has %d non-comment lines'
00835             ' (error triggered by exceeding %d lines).'  % (
00836                 self.current_function, self.lines_in_function, trigger))
00837 
00838   def End(self):
00839     """Stop analyzing function body."""
00840     self.in_a_function = False
00841 
00842 
00843 class _IncludeError(Exception):
00844   """Indicates a problem with the include order in a file."""
00845   pass
00846 
00847 
00848 class FileInfo:
00849   """Provides utility functions for filenames.
00850 
00851   FileInfo provides easy access to the components of a file's path
00852   relative to the project root.
00853   """
00854 
00855   def __init__(self, filename):
00856     self._filename = filename
00857 
00858   def FullName(self):
00859     """Make Windows paths like Unix."""
00860     return os.path.abspath(self._filename).replace('\\', '/')
00861 
00862   def RepositoryName(self):
00863     """FullName after removing the local path to the repository.
00864 
00865     If we have a real absolute path name here we can try to do something smart:
00866     detecting the root of the checkout and truncating /path/to/checkout from
00867     the name so that we get header guards that don't include things like
00868     "C:\Documents and Settings\..." or "/home/username/..." in them and thus
00869     people on different computers who have checked the source out to different
00870     locations won't see bogus errors.
00871     """
00872     fullname = self.FullName()
00873 
00874     if os.path.exists(fullname):
00875       project_dir = os.path.dirname(fullname)
00876 
00877       if os.path.exists(os.path.join(project_dir, ".svn")):
00878         # If there's a .svn file in the current directory, we recursively look
00879         # up the directory tree for the top of the SVN checkout
00880         root_dir = project_dir
00881         one_up_dir = os.path.dirname(root_dir)
00882         while os.path.exists(os.path.join(one_up_dir, ".svn")):
00883           root_dir = os.path.dirname(root_dir)
00884           one_up_dir = os.path.dirname(one_up_dir)
00885 
00886         prefix = os.path.commonprefix([root_dir, project_dir])
00887         return fullname[len(prefix) + 1:]
00888 
00889       # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
00890       # searching up from the current path.
00891       root_dir = os.path.dirname(fullname)
00892       while (root_dir != os.path.dirname(root_dir) and
00893              not os.path.exists(os.path.join(root_dir, ".git")) and
00894              not os.path.exists(os.path.join(root_dir, ".hg")) and
00895              not os.path.exists(os.path.join(root_dir, ".svn"))):
00896         root_dir = os.path.dirname(root_dir)
00897 
00898       if (os.path.exists(os.path.join(root_dir, ".git")) or
00899           os.path.exists(os.path.join(root_dir, ".hg")) or
00900           os.path.exists(os.path.join(root_dir, ".svn"))):
00901         prefix = os.path.commonprefix([root_dir, project_dir])
00902         return fullname[len(prefix) + 1:]
00903 
00904     # Don't know what to do; header guard warnings may be wrong...
00905     return fullname
00906 
00907   def Split(self):
00908     """Splits the file into the directory, basename, and extension.
00909 
00910     For 'chrome/browser/browser.cc', Split() would
00911     return ('chrome/browser', 'browser', '.cc')
00912 
00913     Returns:
00914       A tuple of (directory, basename, extension).
00915     """
00916 
00917     googlename = self.RepositoryName()
00918     project, rest = os.path.split(googlename)
00919     return (project,) + os.path.splitext(rest)
00920 
00921   def BaseName(self):
00922     """File base name - text after the final slash, before the final period."""
00923     return self.Split()[1]
00924 
00925   def Extension(self):
00926     """File extension - text following the final period."""
00927     return self.Split()[2]
00928 
00929   def NoExtension(self):
00930     """File has no source file extension."""
00931     return '/'.join(self.Split()[0:2])
00932 
00933   def IsSource(self):
00934     """File has a source file extension."""
00935     return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
00936 
00937 
00938 def _ShouldPrintError(category, confidence, linenum):
00939   """If confidence >= verbose, category passes filter and is not suppressed."""
00940 
00941   # There are three ways we might decide not to print an error message:
00942   # a "NOLINT(category)" comment appears in the source,
00943   # the verbosity level isn't high enough, or the filters filter it out.
00944   if IsErrorSuppressedByNolint(category, linenum):
00945     return False
00946   if confidence < _cpplint_state.verbose_level:
00947     return False
00948 
00949   is_filtered = False
00950   for one_filter in _Filters():
00951     if one_filter.startswith('-'):
00952       if category.startswith(one_filter[1:]):
00953         is_filtered = True
00954     elif one_filter.startswith('+'):
00955       if category.startswith(one_filter[1:]):
00956         is_filtered = False
00957     else:
00958       assert False  # should have been checked for in SetFilter.
00959   if is_filtered:
00960     return False
00961 
00962   return True
00963 
00964 
00965 def Error(filename, linenum, category, confidence, message):
00966   """Logs the fact we've found a lint error.
00967 
00968   We log where the error was found, and also our confidence in the error,
00969   that is, how certain we are this is a legitimate style regression, and
00970   not a misidentification or a use that's sometimes justified.
00971 
00972   False positives can be suppressed by the use of
00973   "cpplint(category)"  comments on the offending line.  These are
00974   parsed into _error_suppressions.
00975 
00976   Args:
00977     filename: The name of the file containing the error.
00978     linenum: The number of the line containing the error.
00979     category: A string used to describe the "category" this bug
00980       falls under: "whitespace", say, or "runtime".  Categories
00981       may have a hierarchy separated by slashes: "whitespace/indent".
00982     confidence: A number from 1-5 representing a confidence score for
00983       the error, with 5 meaning that we are certain of the problem,
00984       and 1 meaning that it could be a legitimate construct.
00985     message: The error message.
00986   """
00987   if _ShouldPrintError(category, confidence, linenum):
00988     _cpplint_state.IncrementErrorCount(category)
00989     if _cpplint_state.output_format == 'vs7':
00990       sys.stderr.write('%s(%s):  %s  [%s] [%d]\n' % (
00991           filename, linenum, message, category, confidence))
00992     elif _cpplint_state.output_format == 'eclipse':
00993       sys.stderr.write('%s:%s: warning: %s  [%s] [%d]\n' % (
00994           filename, linenum, message, category, confidence))
00995     else:
00996       sys.stderr.write('%s:%s:  %s  [%s] [%d]\n' % (
00997           filename, linenum, message, category, confidence))
00998 
00999 
01000 # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
01001 _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
01002     r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
01003 # Matches strings.  Escape codes should already be removed by ESCAPES.
01004 _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
01005 # Matches characters.  Escape codes should already be removed by ESCAPES.
01006 _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
01007 # Matches multi-line C++ comments.
01008 # This RE is a little bit more complicated than one might expect, because we
01009 # have to take care of space removals tools so we can handle comments inside
01010 # statements better.
01011 # The current rule is: We only clear spaces from both sides when we're at the
01012 # end of the line. Otherwise, we try to remove spaces from the right side,
01013 # if this doesn't work we try on left side but only if there's a non-character
01014 # on the right.
01015 _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
01016     r"""(\s*/\*.*\*/\s*$|
01017             /\*.*\*/\s+|
01018          \s+/\*.*\*/(?=\W)|
01019             /\*.*\*/)""", re.VERBOSE)
01020 
01021 
01022 def IsCppString(line):
01023   """Does line terminate so, that the next symbol is in string constant.
01024 
01025   This function does not consider single-line nor multi-line comments.
01026 
01027   Args:
01028     line: is a partial line of code starting from the 0..n.
01029 
01030   Returns:
01031     True, if next character appended to 'line' is inside a
01032     string constant.
01033   """
01034 
01035   line = line.replace(r'\\', 'XX')  # after this, \\" does not match to \"
01036   return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
01037 
01038 
01039 def CleanseRawStrings(raw_lines):
01040   """Removes C++11 raw strings from lines.
01041 
01042     Before:
01043       static const char kData[] = R"(
01044           multi-line string
01045           )";
01046 
01047     After:
01048       static const char kData[] = ""
01049           (replaced by blank line)
01050           "";
01051 
01052   Args:
01053     raw_lines: list of raw lines.
01054 
01055   Returns:
01056     list of lines with C++11 raw strings replaced by empty strings.
01057   """
01058 
01059   delimiter = None
01060   lines_without_raw_strings = []
01061   for line in raw_lines:
01062     if delimiter:
01063       # Inside a raw string, look for the end
01064       end = line.find(delimiter)
01065       if end >= 0:
01066         # Found the end of the string, match leading space for this
01067         # line and resume copying the original lines, and also insert
01068         # a "" on the last line.
01069         leading_space = Match(r'^(\s*)\S', line)
01070         line = leading_space.group(1) + '""' + line[end + len(delimiter):]
01071         delimiter = None
01072       else:
01073         # Haven't found the end yet, append a blank line.
01074         line = ''
01075 
01076     else:
01077       # Look for beginning of a raw string.
01078       # See 2.14.15 [lex.string] for syntax.
01079       matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
01080       if matched:
01081         delimiter = ')' + matched.group(2) + '"'
01082 
01083         end = matched.group(3).find(delimiter)
01084         if end >= 0:
01085           # Raw string ended on same line
01086           line = (matched.group(1) + '""' +
01087                   matched.group(3)[end + len(delimiter):])
01088           delimiter = None
01089         else:
01090           # Start of a multi-line raw string
01091           line = matched.group(1) + '""'
01092 
01093     lines_without_raw_strings.append(line)
01094 
01095   # TODO(unknown): if delimiter is not None here, we might want to
01096   # emit a warning for unterminated string.
01097   return lines_without_raw_strings
01098 
01099 
01100 def FindNextMultiLineCommentStart(lines, lineix):
01101   """Find the beginning marker for a multiline comment."""
01102   while lineix < len(lines):
01103     if lines[lineix].strip().startswith('/*'):
01104       # Only return this marker if the comment goes beyond this line
01105       if lines[lineix].strip().find('*/', 2) < 0:
01106         return lineix
01107     lineix += 1
01108   return len(lines)
01109 
01110 
01111 def FindNextMultiLineCommentEnd(lines, lineix):
01112   """We are inside a comment, find the end marker."""
01113   while lineix < len(lines):
01114     if lines[lineix].strip().endswith('*/'):
01115       return lineix
01116     lineix += 1
01117   return len(lines)
01118 
01119 
01120 def RemoveMultiLineCommentsFromRange(lines, begin, end):
01121   """Clears a range of lines for multi-line comments."""
01122   # Having // dummy comments makes the lines non-empty, so we will not get
01123   # unnecessary blank line warnings later in the code.
01124   for i in range(begin, end):
01125     lines[i] = '// dummy'
01126 
01127 
01128 def RemoveMultiLineComments(filename, lines, error):
01129   """Removes multiline (c-style) comments from lines."""
01130   lineix = 0
01131   while lineix < len(lines):
01132     lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
01133     if lineix_begin >= len(lines):
01134       return
01135     lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
01136     if lineix_end >= len(lines):
01137       error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
01138             'Could not find end of multi-line comment')
01139       return
01140     RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
01141     lineix = lineix_end + 1
01142 
01143 
01144 def CleanseComments(line):
01145   """Removes //-comments and single-line C-style /* */ comments.
01146 
01147   Args:
01148     line: A line of C++ source.
01149 
01150   Returns:
01151     The line with single-line comments removed.
01152   """
01153   commentpos = line.find('//')
01154   if commentpos != -1 and not IsCppString(line[:commentpos]):
01155     line = line[:commentpos].rstrip()
01156   # get rid of /* ... */
01157   return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
01158 
01159 
01160 class CleansedLines(object):
01161   """Holds 3 copies of all lines with different preprocessing applied to them.
01162 
01163   1) elided member contains lines without strings and comments,
01164   2) lines member contains lines without comments, and
01165   3) raw_lines member contains all the lines without processing.
01166   All these three members are of <type 'list'>, and of the same length.
01167   """
01168 
01169   def __init__(self, lines):
01170     self.elided = []
01171     self.lines = []
01172     self.raw_lines = lines
01173     self.num_lines = len(lines)
01174     self.lines_without_raw_strings = CleanseRawStrings(lines)
01175     for linenum in range(len(self.lines_without_raw_strings)):
01176       self.lines.append(CleanseComments(
01177           self.lines_without_raw_strings[linenum]))
01178       elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
01179       self.elided.append(CleanseComments(elided))
01180 
01181   def NumLines(self):
01182     """Returns the number of lines represented."""
01183     return self.num_lines
01184 
01185   @staticmethod
01186   def _CollapseStrings(elided):
01187     """Collapses strings and chars on a line to simple "" or '' blocks.
01188 
01189     We nix strings first so we're not fooled by text like '"http://"'
01190 
01191     Args:
01192       elided: The line being processed.
01193 
01194     Returns:
01195       The line with collapsed strings.
01196     """
01197     if not _RE_PATTERN_INCLUDE.match(elided):
01198       # Remove escaped characters first to make quote/single quote collapsing
01199       # basic.  Things that look like escaped characters shouldn't occur
01200       # outside of strings and chars.
01201       elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
01202       elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
01203       elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
01204     return elided
01205 
01206 
01207 def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
01208   """Find the position just after the matching endchar.
01209 
01210   Args:
01211     line: a CleansedLines line.
01212     startpos: start searching at this position.
01213     depth: nesting level at startpos.
01214     startchar: expression opening character.
01215     endchar: expression closing character.
01216 
01217   Returns:
01218     On finding matching endchar: (index just after matching endchar, 0)
01219     Otherwise: (-1, new depth at end of this line)
01220   """
01221   for i in xrange(startpos, len(line)):
01222     if line[i] == startchar:
01223       depth += 1
01224     elif line[i] == endchar:
01225       depth -= 1
01226       if depth == 0:
01227         return (i + 1, 0)
01228   return (-1, depth)
01229 
01230 
01231 def CloseExpression(clean_lines, linenum, pos):
01232   """If input points to ( or { or [ or <, finds the position that closes it.
01233 
01234   If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
01235   linenum/pos that correspond to the closing of the expression.
01236 
01237   Args:
01238     clean_lines: A CleansedLines instance containing the file.
01239     linenum: The number of the line to check.
01240     pos: A position on the line.
01241 
01242   Returns:
01243     A tuple (line, linenum, pos) pointer *past* the closing brace, or
01244     (line, len(lines), -1) if we never find a close.  Note we ignore
01245     strings and comments when matching; and the line we return is the
01246     'cleansed' line at linenum.
01247   """
01248 
01249   line = clean_lines.elided[linenum]
01250   startchar = line[pos]
01251   if startchar not in '({[<':
01252     return (line, clean_lines.NumLines(), -1)
01253   if startchar == '(': endchar = ')'
01254   if startchar == '[': endchar = ']'
01255   if startchar == '{': endchar = '}'
01256   if startchar == '<': endchar = '>'
01257 
01258   # Check first line
01259   (end_pos, num_open) = FindEndOfExpressionInLine(
01260       line, pos, 0, startchar, endchar)
01261   if end_pos > -1:
01262     return (line, linenum, end_pos)
01263 
01264   # Continue scanning forward
01265   while linenum < clean_lines.NumLines() - 1:
01266     linenum += 1
01267     line = clean_lines.elided[linenum]
01268     (end_pos, num_open) = FindEndOfExpressionInLine(
01269         line, 0, num_open, startchar, endchar)
01270     if end_pos > -1:
01271       return (line, linenum, end_pos)
01272 
01273   # Did not find endchar before end of file, give up
01274   return (line, clean_lines.NumLines(), -1)
01275 
01276 
01277 def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
01278   """Find position at the matching startchar.
01279 
01280   This is almost the reverse of FindEndOfExpressionInLine, but note
01281   that the input position and returned position differs by 1.
01282 
01283   Args:
01284     line: a CleansedLines line.
01285     endpos: start searching at this position.
01286     depth: nesting level at endpos.
01287     startchar: expression opening character.
01288     endchar: expression closing character.
01289 
01290   Returns:
01291     On finding matching startchar: (index at matching startchar, 0)
01292     Otherwise: (-1, new depth at beginning of this line)
01293   """
01294   for i in xrange(endpos, -1, -1):
01295     if line[i] == endchar:
01296       depth += 1
01297     elif line[i] == startchar:
01298       depth -= 1
01299       if depth == 0:
01300         return (i, 0)
01301   return (-1, depth)
01302 
01303 
01304 def ReverseCloseExpression(clean_lines, linenum, pos):
01305   """If input points to ) or } or ] or >, finds the position that opens it.
01306 
01307   If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
01308   linenum/pos that correspond to the opening of the expression.
01309 
01310   Args:
01311     clean_lines: A CleansedLines instance containing the file.
01312     linenum: The number of the line to check.
01313     pos: A position on the line.
01314 
01315   Returns:
01316     A tuple (line, linenum, pos) pointer *at* the opening brace, or
01317     (line, 0, -1) if we never find the matching opening brace.  Note
01318     we ignore strings and comments when matching; and the line we
01319     return is the 'cleansed' line at linenum.
01320   """
01321   line = clean_lines.elided[linenum]
01322   endchar = line[pos]
01323   if endchar not in ')}]>':
01324     return (line, 0, -1)
01325   if endchar == ')': startchar = '('
01326   if endchar == ']': startchar = '['
01327   if endchar == '}': startchar = '{'
01328   if endchar == '>': startchar = '<'
01329 
01330   # Check last line
01331   (start_pos, num_open) = FindStartOfExpressionInLine(
01332       line, pos, 0, startchar, endchar)
01333   if start_pos > -1:
01334     return (line, linenum, start_pos)
01335 
01336   # Continue scanning backward
01337   while linenum > 0:
01338     linenum -= 1
01339     line = clean_lines.elided[linenum]
01340     (start_pos, num_open) = FindStartOfExpressionInLine(
01341         line, len(line) - 1, num_open, startchar, endchar)
01342     if start_pos > -1:
01343       return (line, linenum, start_pos)
01344 
01345   # Did not find startchar before beginning of file, give up
01346   return (line, 0, -1)
01347 
01348 
01349 def CheckForCopyright(filename, lines, error):
01350   """Logs an error if no Copyright message appears at the top of the file."""
01351 
01352   # We'll say it should occur by line 10. Don't forget there's a
01353   # dummy line at the front.
01354   for line in xrange(1, min(len(lines), 11)):
01355     if re.search(r'Copyright', lines[line], re.I): break
01356   else:                       # means no copyright line was found
01357     error(filename, 0, 'legal/copyright', 5,
01358           'No copyright message found.  '
01359           'You should have a line: "Copyright [year] <Copyright Owner>"')
01360 
01361 
01362 def GetHeaderGuardCPPVariable(filename):
01363   """Returns the CPP variable that should be used as a header guard.
01364 
01365   Args:
01366     filename: The name of a C++ header file.
01367 
01368   Returns:
01369     The CPP variable that should be used as a header guard in the
01370     named file.
01371 
01372   """
01373 
01374   # Restores original filename in case that cpplint is invoked from Emacs's
01375   # flymake.
01376   filename = re.sub(r'_flymake\.h$', '.h', filename)
01377   filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
01378 
01379   fileinfo = FileInfo(filename)
01380   file_path_from_root = fileinfo.RepositoryName()
01381   if _root:
01382     file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
01383   return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
01384 
01385 
01386 def CheckForHeaderGuard(filename, lines, error):
01387   """Checks that the file contains a header guard.
01388 
01389   Logs an error if no #ifndef header guard is present.  For other
01390   headers, checks that the full pathname is used.
01391 
01392   Args:
01393     filename: The name of the C++ header file.
01394     lines: An array of strings, each representing a line of the file.
01395     error: The function to call with any errors found.
01396   """
01397 
01398   cppvar = GetHeaderGuardCPPVariable(filename)
01399 
01400   ifndef = None
01401   ifndef_linenum = 0
01402   define = None
01403   endif = None
01404   endif_linenum = 0
01405   for linenum, line in enumerate(lines):
01406     linesplit = line.split()
01407     if len(linesplit) >= 2:
01408       # find the first occurrence of #ifndef and #define, save arg
01409       if not ifndef and linesplit[0] == '#ifndef':
01410         # set ifndef to the header guard presented on the #ifndef line.
01411         ifndef = linesplit[1]
01412         ifndef_linenum = linenum
01413       if not define and linesplit[0] == '#define':
01414         define = linesplit[1]
01415     # find the last occurrence of #endif, save entire line
01416     if line.startswith('#endif'):
01417       endif = line
01418       endif_linenum = linenum
01419 
01420   if not ifndef:
01421     error(filename, 0, 'build/header_guard', 5,
01422           'No #ifndef header guard found, suggested CPP variable is: %s' %
01423           cppvar)
01424     return
01425 
01426   if not define:
01427     error(filename, 0, 'build/header_guard', 5,
01428           'No #define header guard found, suggested CPP variable is: %s' %
01429           cppvar)
01430     return
01431 
01432   # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
01433   # for backward compatibility.
01434   if ifndef != cppvar:
01435     error_level = 0
01436     if ifndef != cppvar + '_':
01437       error_level = 5
01438 
01439     ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
01440                             error)
01441     error(filename, ifndef_linenum, 'build/header_guard', error_level,
01442           '#ifndef header guard has wrong style, please use: %s' % cppvar)
01443 
01444   if define != ifndef:
01445     error(filename, 0, 'build/header_guard', 5,
01446           '#ifndef and #define don\'t match, suggested CPP variable is: %s' %
01447           cppvar)
01448     return
01449 
01450   if endif != ('#endif  // %s' % cppvar):
01451     error_level = 0
01452     if endif != ('#endif  // %s' % (cppvar + '_')):
01453       error_level = 5
01454 
01455     ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
01456                             error)
01457     error(filename, endif_linenum, 'build/header_guard', error_level,
01458           '#endif line should be "#endif  // %s"' % cppvar)
01459 
01460 
01461 def CheckForBadCharacters(filename, lines, error):
01462   """Logs an error for each line containing bad characters.
01463 
01464   Two kinds of bad characters:
01465 
01466   1. Unicode replacement characters: These indicate that either the file
01467   contained invalid UTF-8 (likely) or Unicode replacement characters (which
01468   it shouldn't).  Note that it's possible for this to throw off line
01469   numbering if the invalid UTF-8 occurred adjacent to a newline.
01470 
01471   2. NUL bytes.  These are problematic for some tools.
01472 
01473   Args:
01474     filename: The name of the current file.
01475     lines: An array of strings, each representing a line of the file.
01476     error: The function to call with any errors found.
01477   """
01478   for linenum, line in enumerate(lines):
01479     if u'\ufffd' in line:
01480       error(filename, linenum, 'readability/utf8', 5,
01481             'Line contains invalid UTF-8 (or Unicode replacement character).')
01482     if '\0' in line:
01483       error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
01484 
01485 
01486 def CheckForNewlineAtEOF(filename, lines, error):
01487   """Logs an error if there is no newline char at the end of the file.
01488 
01489   Args:
01490     filename: The name of the current file.
01491     lines: An array of strings, each representing a line of the file.
01492     error: The function to call with any errors found.
01493   """
01494 
01495   # The array lines() was created by adding two newlines to the
01496   # original file (go figure), then splitting on \n.
01497   # To verify that the file ends in \n, we just have to make sure the
01498   # last-but-two element of lines() exists and is empty.
01499   if len(lines) < 3 or lines[-2]:
01500     error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
01501           'Could not find a newline character at the end of the file.')
01502 
01503 
01504 def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
01505   """Logs an error if we see /* ... */ or "..." that extend past one line.
01506 
01507   /* ... */ comments are legit inside macros, for one line.
01508   Otherwise, we prefer // comments, so it's ok to warn about the
01509   other.  Likewise, it's ok for strings to extend across multiple
01510   lines, as long as a line continuation character (backslash)
01511   terminates each line. Although not currently prohibited by the C++
01512   style guide, it's ugly and unnecessary. We don't do well with either
01513   in this lint program, so we warn about both.
01514 
01515   Args:
01516     filename: The name of the current file.
01517     clean_lines: A CleansedLines instance containing the file.
01518     linenum: The number of the line to check.
01519     error: The function to call with any errors found.
01520   """
01521   line = clean_lines.elided[linenum]
01522 
01523   # Remove all \\ (escaped backslashes) from the line. They are OK, and the
01524   # second (escaped) slash may trigger later \" detection erroneously.
01525   line = line.replace('\\\\', '')
01526 
01527   if line.count('/*') > line.count('*/'):
01528     error(filename, linenum, 'readability/multiline_comment', 5,
01529           'Complex multi-line /*...*/-style comment found. '
01530           'Lint may give bogus warnings.  '
01531           'Consider replacing these with //-style comments, '
01532           'with #if 0...#endif, '
01533           'or with more clearly structured multi-line comments.')
01534 
01535   if (line.count('"') - line.count('\\"')) % 2:
01536     error(filename, linenum, 'readability/multiline_string', 5,
01537           'Multi-line string ("...") found.  This lint script doesn\'t '
01538           'do well with such strings, and may give bogus warnings.  '
01539           'Use C++11 raw strings or concatenation instead.')
01540 
01541 
01542 threading_list = (
01543     ('asctime(', 'asctime_r('),
01544     ('ctime(', 'ctime_r('),
01545     ('getgrgid(', 'getgrgid_r('),
01546     ('getgrnam(', 'getgrnam_r('),
01547     ('getlogin(', 'getlogin_r('),
01548     ('getpwnam(', 'getpwnam_r('),
01549     ('getpwuid(', 'getpwuid_r('),
01550     ('gmtime(', 'gmtime_r('),
01551     ('localtime(', 'localtime_r('),
01552     ('rand(', 'rand_r('),
01553     ('strtok(', 'strtok_r('),
01554     ('ttyname(', 'ttyname_r('),
01555     )
01556 
01557 
01558 def CheckPosixThreading(filename, clean_lines, linenum, error):
01559   """Checks for calls to thread-unsafe functions.
01560 
01561   Much code has been originally written without consideration of
01562   multi-threading. Also, engineers are relying on their old experience;
01563   they have learned posix before threading extensions were added. These
01564   tests guide the engineers to use thread-safe functions (when using
01565   posix directly).
01566 
01567   Args:
01568     filename: The name of the current file.
01569     clean_lines: A CleansedLines instance containing the file.
01570     linenum: The number of the line to check.
01571     error: The function to call with any errors found.
01572   """
01573   line = clean_lines.elided[linenum]
01574   for single_thread_function, multithread_safe_function in threading_list:
01575     ix = line.find(single_thread_function)
01576     # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
01577     if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
01578                                 line[ix - 1] not in ('_', '.', '>'))):
01579       error(filename, linenum, 'runtime/threadsafe_fn', 2,
01580             'Consider using ' + multithread_safe_function +
01581             '...) instead of ' + single_thread_function +
01582             '...) for improved thread safety.')
01583 
01584 
01585 def CheckVlogArguments(filename, clean_lines, linenum, error):
01586   """Checks that VLOG() is only used for defining a logging level.
01587 
01588   For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
01589   VLOG(FATAL) are not.
01590 
01591   Args:
01592     filename: The name of the current file.
01593     clean_lines: A CleansedLines instance containing the file.
01594     linenum: The number of the line to check.
01595     error: The function to call with any errors found.
01596   """
01597   line = clean_lines.elided[linenum]
01598   if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
01599     error(filename, linenum, 'runtime/vlog', 5,
01600           'VLOG() should be used with numeric verbosity level.  '
01601           'Use LOG() if you want symbolic severity levels.')
01602 
01603 
01604 # Matches invalid increment: *count++, which moves pointer instead of
01605 # incrementing a value.
01606 _RE_PATTERN_INVALID_INCREMENT = re.compile(
01607     r'^\s*\*\w+(\+\+|--);')
01608 
01609 
01610 def CheckInvalidIncrement(filename, clean_lines, linenum, error):
01611   """Checks for invalid increment *count++.
01612 
01613   For example following function:
01614   void increment_counter(int* count) {
01615     *count++;
01616   }
01617   is invalid, because it effectively does count++, moving pointer, and should
01618   be replaced with ++*count, (*count)++ or *count += 1.
01619 
01620   Args:
01621     filename: The name of the current file.
01622     clean_lines: A CleansedLines instance containing the file.
01623     linenum: The number of the line to check.
01624     error: The function to call with any errors found.
01625   """
01626   line = clean_lines.elided[linenum]
01627   if _RE_PATTERN_INVALID_INCREMENT.match(line):
01628     error(filename, linenum, 'runtime/invalid_increment', 5,
01629           'Changing pointer instead of value (or unused value of operator*).')
01630 
01631 
01632 class _BlockInfo(object):
01633   """Stores information about a generic block of code."""
01634 
01635   def __init__(self, seen_open_brace):
01636     self.seen_open_brace = seen_open_brace
01637     self.open_parentheses = 0
01638     self.inline_asm = _NO_ASM
01639 
01640   def CheckBegin(self, filename, clean_lines, linenum, error):
01641     """Run checks that applies to text up to the opening brace.
01642 
01643     This is mostly for checking the text after the class identifier
01644     and the "{", usually where the base class is specified.  For other
01645     blocks, there isn't much to check, so we always pass.
01646 
01647     Args:
01648       filename: The name of the current file.
01649       clean_lines: A CleansedLines instance containing the file.
01650       linenum: The number of the line to check.
01651       error: The function to call with any errors found.
01652     """
01653     pass
01654 
01655   def CheckEnd(self, filename, clean_lines, linenum, error):
01656     """Run checks that applies to text after the closing brace.
01657 
01658     This is mostly used for checking end of namespace comments.
01659 
01660     Args:
01661       filename: The name of the current file.
01662       clean_lines: A CleansedLines instance containing the file.
01663       linenum: The number of the line to check.
01664       error: The function to call with any errors found.
01665     """
01666     pass
01667 
01668 
01669 class _ClassInfo(_BlockInfo):
01670   """Stores information about a class."""
01671 
01672   def __init__(self, name, class_or_struct, clean_lines, linenum):
01673     _BlockInfo.__init__(self, False)
01674     self.name = name
01675     self.starting_linenum = linenum
01676     self.is_derived = False
01677     if class_or_struct == 'struct':
01678       self.access = 'public'
01679       self.is_struct = True
01680     else:
01681       self.access = 'private'
01682       self.is_struct = False
01683 
01684     # Remember initial indentation level for this class.  Using raw_lines here
01685     # instead of elided to account for leading comments.
01686     initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum])
01687     if initial_indent:
01688       self.class_indent = len(initial_indent.group(1))
01689     else:
01690       self.class_indent = 0
01691 
01692     # Try to find the end of the class.  This will be confused by things like:
01693     #   class A {
01694     #   } *x = { ...
01695     #
01696     # But it's still good enough for CheckSectionSpacing.
01697     self.last_line = 0
01698     depth = 0
01699     for i in range(linenum, clean_lines.NumLines()):
01700       line = clean_lines.elided[i]
01701       depth += line.count('{') - line.count('}')
01702       if not depth:
01703         self.last_line = i
01704         break
01705 
01706   def CheckBegin(self, filename, clean_lines, linenum, error):
01707     # Look for a bare ':'
01708     if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
01709       self.is_derived = True
01710 
01711   def CheckEnd(self, filename, clean_lines, linenum, error):
01712     # Check that closing brace is aligned with beginning of the class.
01713     # Only do this if the closing brace is indented by only whitespaces.
01714     # This means we will not check single-line class definitions.
01715     indent = Match(r'^( *)\}', clean_lines.elided[linenum])
01716     if indent and len(indent.group(1)) != self.class_indent:
01717       if self.is_struct:
01718         parent = 'struct ' + self.name
01719       else:
01720         parent = 'class ' + self.name
01721       error(filename, linenum, 'whitespace/indent', 3,
01722             'Closing brace should be aligned with beginning of %s' % parent)
01723 
01724 
01725 class _NamespaceInfo(_BlockInfo):
01726   """Stores information about a namespace."""
01727 
01728   def __init__(self, name, linenum):
01729     _BlockInfo.__init__(self, False)
01730     self.name = name or ''
01731     self.starting_linenum = linenum
01732 
01733   def CheckEnd(self, filename, clean_lines, linenum, error):
01734     """Check end of namespace comments."""
01735     line = clean_lines.raw_lines[linenum]
01736 
01737     # Check how many lines is enclosed in this namespace.  Don't issue
01738     # warning for missing namespace comments if there aren't enough
01739     # lines.  However, do apply checks if there is already an end of
01740     # namespace comment and it's incorrect.
01741     #
01742     # TODO(unknown): We always want to check end of namespace comments
01743     # if a namespace is large, but sometimes we also want to apply the
01744     # check if a short namespace contained nontrivial things (something
01745     # other than forward declarations).  There is currently no logic on
01746     # deciding what these nontrivial things are, so this check is
01747     # triggered by namespace size only, which works most of the time.
01748     if (linenum - self.starting_linenum < 10
01749         and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
01750       return
01751 
01752     # Look for matching comment at end of namespace.
01753     #
01754     # Note that we accept C style "/* */" comments for terminating
01755     # namespaces, so that code that terminate namespaces inside
01756     # preprocessor macros can be cpplint clean.
01757     #
01758     # We also accept stuff like "// end of namespace <name>." with the
01759     # period at the end.
01760     #
01761     # Besides these, we don't accept anything else, otherwise we might
01762     # get false negatives when existing comment is a substring of the
01763     # expected namespace.
01764     if self.name:
01765       # Named namespace
01766       if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
01767                     r'[\*/\.\\\s]*$'),
01768                    line):
01769         error(filename, linenum, 'readability/namespace', 5,
01770               'Namespace should be terminated with "// namespace %s"' %
01771               self.name)
01772     else:
01773       # Anonymous namespace
01774       if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
01775         error(filename, linenum, 'readability/namespace', 5,
01776               'Namespace should be terminated with "// namespace"')
01777 
01778 
01779 class _PreprocessorInfo(object):
01780   """Stores checkpoints of nesting stacks when #if/#else is seen."""
01781 
01782   def __init__(self, stack_before_if):
01783     # The entire nesting stack before #if
01784     self.stack_before_if = stack_before_if
01785 
01786     # The entire nesting stack up to #else
01787     self.stack_before_else = []
01788 
01789     # Whether we have already seen #else or #elif
01790     self.seen_else = False
01791 
01792 
01793 class _NestingState(object):
01794   """Holds states related to parsing braces."""
01795 
01796   def __init__(self):
01797     # Stack for tracking all braces.  An object is pushed whenever we
01798     # see a "{", and popped when we see a "}".  Only 3 types of
01799     # objects are possible:
01800     # - _ClassInfo: a class or struct.
01801     # - _NamespaceInfo: a namespace.
01802     # - _BlockInfo: some other type of block.
01803     self.stack = []
01804 
01805     # Stack of _PreprocessorInfo objects.
01806     self.pp_stack = []
01807 
01808   def SeenOpenBrace(self):
01809     """Check if we have seen the opening brace for the innermost block.
01810 
01811     Returns:
01812       True if we have seen the opening brace, False if the innermost
01813       block is still expecting an opening brace.
01814     """
01815     return (not self.stack) or self.stack[-1].seen_open_brace
01816 
01817   def InNamespaceBody(self):
01818     """Check if we are currently one level inside a namespace body.
01819 
01820     Returns:
01821       True if top of the stack is a namespace block, False otherwise.
01822     """
01823     return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
01824 
01825   def UpdatePreprocessor(self, line):
01826     """Update preprocessor stack.
01827 
01828     We need to handle preprocessors due to classes like this:
01829       #ifdef SWIG
01830       struct ResultDetailsPageElementExtensionPoint {
01831       #else
01832       struct ResultDetailsPageElementExtensionPoint : public Extension {
01833       #endif
01834 
01835     We make the following assumptions (good enough for most files):
01836     - Preprocessor condition evaluates to true from #if up to first
01837       #else/#elif/#endif.
01838 
01839     - Preprocessor condition evaluates to false from #else/#elif up
01840       to #endif.  We still perform lint checks on these lines, but
01841       these do not affect nesting stack.
01842 
01843     Args:
01844       line: current line to check.
01845     """
01846     if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
01847       # Beginning of #if block, save the nesting stack here.  The saved
01848       # stack will allow us to restore the parsing state in the #else case.
01849       self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
01850     elif Match(r'^\s*#\s*(else|elif)\b', line):
01851       # Beginning of #else block
01852       if self.pp_stack:
01853         if not self.pp_stack[-1].seen_else:
01854           # This is the first #else or #elif block.  Remember the
01855           # whole nesting stack up to this point.  This is what we
01856           # keep after the #endif.
01857           self.pp_stack[-1].seen_else = True
01858           self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
01859 
01860         # Restore the stack to how it was before the #if
01861         self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
01862       else:
01863         # TODO(unknown): unexpected #else, issue warning?
01864         pass
01865     elif Match(r'^\s*#\s*endif\b', line):
01866       # End of #if or #else blocks.
01867       if self.pp_stack:
01868         # If we saw an #else, we will need to restore the nesting
01869         # stack to its former state before the #else, otherwise we
01870         # will just continue from where we left off.
01871         if self.pp_stack[-1].seen_else:
01872           # Here we can just use a shallow copy since we are the last
01873           # reference to it.
01874           self.stack = self.pp_stack[-1].stack_before_else
01875         # Drop the corresponding #if
01876         self.pp_stack.pop()
01877       else:
01878         # TODO(unknown): unexpected #endif, issue warning?
01879         pass
01880 
01881   def Update(self, filename, clean_lines, linenum, error):
01882     """Update nesting state with current line.
01883 
01884     Args:
01885       filename: The name of the current file.
01886       clean_lines: A CleansedLines instance containing the file.
01887       linenum: The number of the line to check.
01888       error: The function to call with any errors found.
01889     """
01890     line = clean_lines.elided[linenum]
01891 
01892     # Update pp_stack first
01893     self.UpdatePreprocessor(line)
01894 
01895     # Count parentheses.  This is to avoid adding struct arguments to
01896     # the nesting stack.
01897     if self.stack:
01898       inner_block = self.stack[-1]
01899       depth_change = line.count('(') - line.count(')')
01900       inner_block.open_parentheses += depth_change
01901 
01902       # Also check if we are starting or ending an inline assembly block.
01903       if inner_block.inline_asm in (_NO_ASM, _END_ASM):
01904         if (depth_change != 0 and
01905             inner_block.open_parentheses == 1 and
01906             _MATCH_ASM.match(line)):
01907           # Enter assembly block
01908           inner_block.inline_asm = _INSIDE_ASM
01909         else:
01910           # Not entering assembly block.  If previous line was _END_ASM,
01911           # we will now shift to _NO_ASM state.
01912           inner_block.inline_asm = _NO_ASM
01913       elif (inner_block.inline_asm == _INSIDE_ASM and
01914             inner_block.open_parentheses == 0):
01915         # Exit assembly block
01916         inner_block.inline_asm = _END_ASM
01917 
01918     # Consume namespace declaration at the beginning of the line.  Do
01919     # this in a loop so that we catch same line declarations like this:
01920     #   namespace proto2 { namespace bridge { class MessageSet; } }
01921     while True:
01922       # Match start of namespace.  The "\b\s*" below catches namespace
01923       # declarations even if it weren't followed by a whitespace, this
01924       # is so that we don't confuse our namespace checker.  The
01925       # missing spaces will be flagged by CheckSpacing.
01926       namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
01927       if not namespace_decl_match:
01928         break
01929 
01930       new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
01931       self.stack.append(new_namespace)
01932 
01933       line = namespace_decl_match.group(2)
01934       if line.find('{') != -1:
01935         new_namespace.seen_open_brace = True
01936         line = line[line.find('{') + 1:]
01937 
01938     # Look for a class declaration in whatever is left of the line
01939     # after parsing namespaces.  The regexp accounts for decorated classes
01940     # such as in:
01941     #   class LOCKABLE API Object {
01942     #   };
01943     #
01944     # Templates with class arguments may confuse the parser, for example:
01945     #   template <class T
01946     #             class Comparator = less<T>,
01947     #             class Vector = vector<T> >
01948     #   class HeapQueue {
01949     #
01950     # Because this parser has no nesting state about templates, by the
01951     # time it saw "class Comparator", it may think that it's a new class.
01952     # Nested templates have a similar problem:
01953     #   template <
01954     #       typename ExportedType,
01955     #       typename TupleType,
01956     #       template <typename, typename> class ImplTemplate>
01957     #
01958     # To avoid these cases, we ignore classes that are followed by '=' or '>'
01959     class_decl_match = Match(
01960         r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
01961         r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
01962         r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line)
01963     if (class_decl_match and
01964         (not self.stack or self.stack[-1].open_parentheses == 0)):
01965       self.stack.append(_ClassInfo(
01966           class_decl_match.group(4), class_decl_match.group(2),
01967           clean_lines, linenum))
01968       line = class_decl_match.group(5)
01969 
01970     # If we have not yet seen the opening brace for the innermost block,
01971     # run checks here.
01972     if not self.SeenOpenBrace():
01973       self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
01974 
01975     # Update access control if we are inside a class/struct
01976     if self.stack and isinstance(self.stack[-1], _ClassInfo):
01977       classinfo = self.stack[-1]
01978       access_match = Match(
01979           r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
01980           r':(?:[^:]|$)',
01981           line)
01982       if access_match:
01983         classinfo.access = access_match.group(2)
01984 
01985         # Check that access keywords are indented +1 space.  Skip this
01986         # check if the keywords are not preceded by whitespaces.
01987         indent = access_match.group(1)
01988         if (len(indent) != classinfo.class_indent + 1 and
01989             Match(r'^\s*$', indent)):
01990           if classinfo.is_struct:
01991             parent = 'struct ' + classinfo.name
01992           else:
01993             parent = 'class ' + classinfo.name
01994           slots = ''
01995           if access_match.group(3):
01996             slots = access_match.group(3)
01997           error(filename, linenum, 'whitespace/indent', 3,
01998                 '%s%s: should be indented +1 space inside %s' % (
01999                     access_match.group(2), slots, parent))
02000 
02001     # Consume braces or semicolons from what's left of the line
02002     while True:
02003       # Match first brace, semicolon, or closed parenthesis.
02004       matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
02005       if not matched:
02006         break
02007 
02008       token = matched.group(1)
02009       if token == '{':
02010         # If namespace or class hasn't seen a opening brace yet, mark
02011         # namespace/class head as complete.  Push a new block onto the
02012         # stack otherwise.
02013         if not self.SeenOpenBrace():
02014           self.stack[-1].seen_open_brace = True
02015         else:
02016           self.stack.append(_BlockInfo(True))
02017           if _MATCH_ASM.match(line):
02018             self.stack[-1].inline_asm = _BLOCK_ASM
02019       elif token == ';' or token == ')':
02020         # If we haven't seen an opening brace yet, but we already saw
02021         # a semicolon, this is probably a forward declaration.  Pop
02022         # the stack for these.
02023         #
02024         # Similarly, if we haven't seen an opening brace yet, but we
02025         # already saw a closing parenthesis, then these are probably
02026         # function arguments with extra "class" or "struct" keywords.
02027         # Also pop these stack for these.
02028         if not self.SeenOpenBrace():
02029           self.stack.pop()
02030       else:  # token == '}'
02031         # Perform end of block checks and pop the stack.
02032         if self.stack:
02033           self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
02034           self.stack.pop()
02035       line = matched.group(2)
02036 
02037   def InnermostClass(self):
02038     """Get class info on the top of the stack.
02039 
02040     Returns:
02041       A _ClassInfo object if we are inside a class, or None otherwise.
02042     """
02043     for i in range(len(self.stack), 0, -1):
02044       classinfo = self.stack[i - 1]
02045       if isinstance(classinfo, _ClassInfo):
02046         return classinfo
02047     return None
02048 
02049   def CheckCompletedBlocks(self, filename, error):
02050     """Checks that all classes and namespaces have been completely parsed.
02051 
02052     Call this when all lines in a file have been processed.
02053     Args:
02054       filename: The name of the current file.
02055       error: The function to call with any errors found.
02056     """
02057     # Note: This test can result in false positives if #ifdef constructs
02058     # get in the way of brace matching. See the testBuildClass test in
02059     # cpplint_unittest.py for an example of this.
02060     for obj in self.stack:
02061       if isinstance(obj, _ClassInfo):
02062         error(filename, obj.starting_linenum, 'build/class', 5,
02063               'Failed to find complete declaration of class %s' %
02064               obj.name)
02065       elif isinstance(obj, _NamespaceInfo):
02066         error(filename, obj.starting_linenum, 'build/namespaces', 5,
02067               'Failed to find complete declaration of namespace %s' %
02068               obj.name)
02069 
02070 
02071 def CheckForNonStandardConstructs(filename, clean_lines, linenum,
02072                                   nesting_state, error):
02073   r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
02074 
02075   Complain about several constructs which gcc-2 accepts, but which are
02076   not standard C++.  Warning about these in lint is one way to ease the
02077   transition to new compilers.
02078   - put storage class first (e.g. "static const" instead of "const static").
02079   - "%lld" instead of %qd" in printf-type functions.
02080   - "%1$d" is non-standard in printf-type functions.
02081   - "\%" is an undefined character escape sequence.
02082   - text after #endif is not allowed.
02083   - invalid inner-style forward declaration.
02084   - >? and <? operators, and their >?= and <?= cousins.
02085 
02086   Additionally, check for constructor/destructor style violations and reference
02087   members, as it is very convenient to do so while checking for
02088   gcc-2 compliance.
02089 
02090   Args:
02091     filename: The name of the current file.
02092     clean_lines: A CleansedLines instance containing the file.
02093     linenum: The number of the line to check.
02094     nesting_state: A _NestingState instance which maintains information about
02095                    the current stack of nested blocks being parsed.
02096     error: A callable to which errors are reported, which takes 4 arguments:
02097            filename, line number, error level, and message
02098   """
02099 
02100   # Remove comments from the line, but leave in strings for now.
02101   line = clean_lines.lines[linenum]
02102 
02103   if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
02104     error(filename, linenum, 'runtime/printf_format', 3,
02105           '%q in format strings is deprecated.  Use %ll instead.')
02106 
02107   if Search(r'printf\s*\(.*".*%\d+\$', line):
02108     error(filename, linenum, 'runtime/printf_format', 2,
02109           '%N$ formats are unconventional.  Try rewriting to avoid them.')
02110 
02111   # Remove escaped backslashes before looking for undefined escapes.
02112   line = line.replace('\\\\', '')
02113 
02114   if Search(r'("|\').*\\(%|\[|\(|{)', line):
02115     error(filename, linenum, 'build/printf_format', 3,
02116           '%, [, (, and { are undefined character escapes.  Unescape them.')
02117 
02118   # For the rest, work with both comments and strings removed.
02119   line = clean_lines.elided[linenum]
02120 
02121   if Search(r'\b(const|volatile|void|char|short|int|long'
02122             r'|float|double|signed|unsigned'
02123             r'|schar|u?int8|u?int16|u?int32|u?int64)'
02124             r'\s+(register|static|extern|typedef)\b',
02125             line):
02126     error(filename, linenum, 'build/storage_class', 5,
02127           'Storage class (static, extern, typedef, etc) should be first.')
02128 
02129   if Match(r'\s*#\s*endif\s*[^/\s]+', line):
02130     error(filename, linenum, 'build/endif_comment', 5,
02131           'Uncommented text after #endif is non-standard.  Use a comment.')
02132 
02133   if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
02134     error(filename, linenum, 'build/forward_decl', 5,
02135           'Inner-style forward declarations are invalid.  Remove this line.')
02136 
02137   if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
02138             line):
02139     error(filename, linenum, 'build/deprecated', 3,
02140           '>? and <? (max and min) operators are non-standard and deprecated.')
02141 
02142   if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
02143     # TODO(unknown): Could it be expanded safely to arbitrary references,
02144     # without triggering too many false positives? The first
02145     # attempt triggered 5 warnings for mostly benign code in the regtest, hence
02146     # the restriction.
02147     # Here's the original regexp, for the reference:
02148     # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
02149     # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
02150     error(filename, linenum, 'runtime/member_string_references', 2,
02151           'const string& members are dangerous. It is much better to use '
02152           'alternatives, such as pointers or simple constants.')
02153 
02154   # Everything else in this function operates on class declarations.
02155   # Return early if the top of the nesting stack is not a class, or if
02156   # the class head is not completed yet.
02157   classinfo = nesting_state.InnermostClass()
02158   if not classinfo or not classinfo.seen_open_brace:
02159     return
02160 
02161   # The class may have been declared with namespace or classname qualifiers.
02162   # The constructor and destructor will not have those qualifiers.
02163   base_classname = classinfo.name.split('::')[-1]
02164 
02165   # Look for single-argument constructors that aren't marked explicit.
02166   # Technically a valid construct, but against style.
02167   args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
02168                % re.escape(base_classname),
02169                line)
02170   if (args and
02171       args.group(1) != 'void' and
02172       not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&'
02173                 % re.escape(base_classname), args.group(1).strip())):
02174     error(filename, linenum, 'runtime/explicit', 5,
02175           'Single-argument constructors should be marked explicit.')
02176 
02177 
02178 def CheckSpacingForFunctionCall(filename, line, linenum, error):
02179   """Checks for the correctness of various spacing around function calls.
02180 
02181   Args:
02182     filename: The name of the current file.
02183     line: The text of the line to check.
02184     linenum: The number of the line to check.
02185     error: The function to call with any errors found.
02186   """
02187 
02188   # Since function calls often occur inside if/for/while/switch
02189   # expressions - which have their own, more liberal conventions - we
02190   # first see if we should be looking inside such an expression for a
02191   # function call, to which we can apply more strict standards.
02192   fncall = line    # if there's no control flow construct, look at whole line
02193   for pattern in (r'\bif\s*\((.*)\)\s*{',
02194                   r'\bfor\s*\((.*)\)\s*{',
02195                   r'\bwhile\s*\((.*)\)\s*[{;]',
02196                   r'\bswitch\s*\((.*)\)\s*{'):
02197     match = Search(pattern, line)
02198     if match:
02199       fncall = match.group(1)    # look inside the parens for function calls
02200       break
02201 
02202   # Except in if/for/while/switch, there should never be space
02203   # immediately inside parens (eg "f( 3, 4 )").  We make an exception
02204   # for nested parens ( (a+b) + c ).  Likewise, there should never be
02205   # a space before a ( when it's a function argument.  I assume it's a
02206   # function argument when the char before the whitespace is legal in
02207   # a function name (alnum + _) and we're not starting a macro. Also ignore
02208   # pointers and references to arrays and functions coz they're too tricky:
02209   # we use a very simple way to recognize these:
02210   # " (something)(maybe-something)" or
02211   # " (something)(maybe-something," or
02212   # " (something)[something]"
02213   # Note that we assume the contents of [] to be short enough that
02214   # they'll never need to wrap.
02215   if (  # Ignore control structures.
02216       not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
02217                  fncall) and
02218       # Ignore pointers/references to functions.
02219       not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
02220       # Ignore pointers/references to arrays.
02221       not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
02222     if Search(r'\w\s*\(\s(?!\s*\\$)', fncall):      # a ( used for a fn call
02223       error(filename, linenum, 'whitespace/parens', 4,
02224             'Extra space after ( in function call')
02225     elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
02226       error(filename, linenum, 'whitespace/parens', 2,
02227             'Extra space after (')
02228     if (Search(r'\w\s+\(', fncall) and
02229         not Search(r'#\s*define|typedef', fncall) and
02230         not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
02231       error(filename, linenum, 'whitespace/parens', 4,
02232             'Extra space before ( in function call')
02233     # If the ) is followed only by a newline or a { + newline, assume it's
02234     # part of a control statement (if/while/etc), and don't complain
02235     if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
02236       # If the closing parenthesis is preceded by only whitespaces,
02237       # try to give a more descriptive error message.
02238       if Search(r'^\s+\)', fncall):
02239         error(filename, linenum, 'whitespace/parens', 2,
02240               'Closing ) should be moved to the previous line')
02241       else:
02242         error(filename, linenum, 'whitespace/parens', 2,
02243               'Extra space before )')
02244 
02245 
02246 def IsBlankLine(line):
02247   """Returns true if the given line is blank.
02248 
02249   We consider a line to be blank if the line is empty or consists of
02250   only white spaces.
02251 
02252   Args:
02253     line: A line of a string.
02254 
02255   Returns:
02256     True, if the given line is blank.
02257   """
02258   return not line or line.isspace()
02259 
02260 
02261 def CheckForFunctionLengths(filename, clean_lines, linenum,
02262                             function_state, error):
02263   """Reports for long function bodies.
02264 
02265   For an overview why this is done, see:
02266   http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
02267 
02268   Uses a simplistic algorithm assuming other style guidelines
02269   (especially spacing) are followed.
02270   Only checks unindented functions, so class members are unchecked.
02271   Trivial bodies are unchecked, so constructors with huge initializer lists
02272   may be missed.
02273   Blank/comment lines are not counted so as to avoid encouraging the removal
02274   of vertical space and comments just to get through a lint check.
02275   NOLINT *on the last line of a function* disables this check.
02276 
02277   Args:
02278     filename: The name of the current file.
02279     clean_lines: A CleansedLines instance containing the file.
02280     linenum: The number of the line to check.
02281     function_state: Current function name and lines in body so far.
02282     error: The function to call with any errors found.
02283   """
02284   lines = clean_lines.lines
02285   line = lines[linenum]
02286   raw = clean_lines.raw_lines
02287   raw_line = raw[linenum]
02288   joined_line = ''
02289 
02290   starting_func = False
02291   regexp = r'(\w(\w|::|\*|\&|\s)*)\('  # decls * & space::name( ...
02292   match_result = Match(regexp, line)
02293   if match_result:
02294     # If the name is all caps and underscores, figure it's a macro and
02295     # ignore it, unless it's TEST or TEST_F.
02296     function_name = match_result.group(1).split()[-1]
02297     if function_name == 'TEST' or function_name == 'TEST_F' or (
02298         not Match(r'[A-Z_]+$', function_name)):
02299       starting_func = True
02300 
02301   if starting_func:
02302     body_found = False
02303     for start_linenum in xrange(linenum, clean_lines.NumLines()):
02304       start_line = lines[start_linenum]
02305       joined_line += ' ' + start_line.lstrip()
02306       if Search(r'(;|})', start_line):  # Declarations and trivial functions
02307         body_found = True
02308         break                              # ... ignore
02309       elif Search(r'{', start_line):
02310         body_found = True
02311         function = Search(r'((\w|:)*)\(', line).group(1)
02312         if Match(r'TEST', function):    # Handle TEST... macros
02313           parameter_regexp = Search(r'(\(.*\))', joined_line)
02314           if parameter_regexp:             # Ignore bad syntax
02315             function += parameter_regexp.group(1)
02316         else:
02317           function += '()'
02318         function_state.Begin(function)
02319         break
02320     if not body_found:
02321       # No body for the function (or evidence of a non-function) was found.
02322       error(filename, linenum, 'readability/fn_size', 5,
02323             'Lint failed to find start of function body.')
02324   elif Match(r'^\}\s*$', line):  # function end
02325     function_state.Check(error, filename, linenum)
02326     function_state.End()
02327   elif not Match(r'^\s*$', line):
02328     function_state.Count()  # Count non-blank/non-comment lines.
02329 
02330 
02331 _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
02332 
02333 
02334 def CheckComment(comment, filename, linenum, error):
02335   """Checks for common mistakes in TODO comments.
02336 
02337   Args:
02338     comment: The text of the comment from the line in question.
02339     filename: The name of the current file.
02340     linenum: The number of the line to check.
02341     error: The function to call with any errors found.
02342   """
02343   match = _RE_PATTERN_TODO.match(comment)
02344   if match:
02345     # One whitespace is correct; zero whitespace is handled elsewhere.
02346     leading_whitespace = match.group(1)
02347     if len(leading_whitespace) > 1:
02348       error(filename, linenum, 'whitespace/todo', 2,
02349             'Too many spaces before TODO')
02350 
02351     username = match.group(2)
02352     if not username:
02353       error(filename, linenum, 'readability/todo', 2,
02354             'Missing username in TODO; it should look like '
02355             '"// TODO(my_username): Stuff."')
02356 
02357     middle_whitespace = match.group(3)
02358     # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
02359     if middle_whitespace != ' ' and middle_whitespace != '':
02360       error(filename, linenum, 'whitespace/todo', 2,
02361             'TODO(my_username) should be followed by a space')
02362 
02363 def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
02364   """Checks for improper use of DISALLOW* macros.
02365 
02366   Args:
02367     filename: The name of the current file.
02368     clean_lines: A CleansedLines instance containing the file.
02369     linenum: The number of the line to check.
02370     nesting_state: A _NestingState instance which maintains information about
02371                    the current stack of nested blocks being parsed.
02372     error: The function to call with any errors found.
02373   """
02374   line = clean_lines.elided[linenum]  # get rid of comments and strings
02375 
02376   matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
02377                    r'DISALLOW_EVIL_CONSTRUCTORS|'
02378                    r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
02379   if not matched:
02380     return
02381   if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
02382     if nesting_state.stack[-1].access != 'private':
02383       error(filename, linenum, 'readability/constructors', 3,
02384             '%s must be in the private: section' % matched.group(1))
02385 
02386   else:
02387     # Found DISALLOW* macro outside a class declaration, or perhaps it
02388     # was used inside a function when it should have been part of the
02389     # class declaration.  We could issue a warning here, but it
02390     # probably resulted in a compiler error already.
02391     pass
02392 
02393 
02394 def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
02395   """Find the corresponding > to close a template.
02396 
02397   Args:
02398     clean_lines: A CleansedLines instance containing the file.
02399     linenum: Current line number.
02400     init_suffix: Remainder of the current line after the initial <.
02401 
02402   Returns:
02403     True if a matching bracket exists.
02404   """
02405   line = init_suffix
02406   nesting_stack = ['<']
02407   while True:
02408     # Find the next operator that can tell us whether < is used as an
02409     # opening bracket or as a less-than operator.  We only want to
02410     # warn on the latter case.
02411     #
02412     # We could also check all other operators and terminate the search
02413     # early, e.g. if we got something like this "a<b+c", the "<" is
02414     # most likely a less-than operator, but then we will get false
02415     # positives for default arguments and other template expressions.
02416     match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
02417     if match:
02418       # Found an operator, update nesting stack
02419       operator = match.group(1)
02420       line = match.group(2)
02421 
02422       if nesting_stack[-1] == '<':
02423         # Expecting closing angle bracket
02424         if operator in ('<', '(', '['):
02425           nesting_stack.append(operator)
02426         elif operator == '>':
02427           nesting_stack.pop()
02428           if not nesting_stack:
02429             # Found matching angle bracket
02430             return True
02431         elif operator == ',':
02432           # Got a comma after a bracket, this is most likely a template
02433           # argument.  We have not seen a closing angle bracket yet, but
02434           # it's probably a few lines later if we look for it, so just
02435           # return early here.
02436           return True
02437         else:
02438           # Got some other operator.
02439           return False
02440 
02441       else:
02442         # Expecting closing parenthesis or closing bracket
02443         if operator in ('<', '(', '['):
02444           nesting_stack.append(operator)
02445         elif operator in (')', ']'):
02446           # We don't bother checking for matching () or [].  If we got
02447           # something like (] or [), it would have been a syntax error.
02448           nesting_stack.pop()
02449 
02450     else:
02451       # Scan the next line
02452       linenum += 1
02453       if linenum >= len(clean_lines.elided):
02454         break
02455       line = clean_lines.elided[linenum]
02456 
02457   # Exhausted all remaining lines and still no matching angle bracket.
02458   # Most likely the input was incomplete, otherwise we should have
02459   # seen a semicolon and returned early.
02460   return True
02461 
02462 
02463 def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
02464   """Find the corresponding < that started a template.
02465 
02466   Args:
02467     clean_lines: A CleansedLines instance containing the file.
02468     linenum: Current line number.
02469     init_prefix: Part of the current line before the initial >.
02470 
02471   Returns:
02472     True if a matching bracket exists.
02473   """
02474   line = init_prefix
02475   nesting_stack = ['>']
02476   while True:
02477     # Find the previous operator
02478     match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
02479     if match:
02480       # Found an operator, update nesting stack
02481       operator = match.group(2)
02482       line = match.group(1)
02483 
02484       if nesting_stack[-1] == '>':
02485         # Expecting opening angle bracket
02486         if operator in ('>', ')', ']'):
02487           nesting_stack.append(operator)
02488         elif operator == '<':
02489           nesting_stack.pop()
02490           if not nesting_stack:
02491             # Found matching angle bracket
02492             return True
02493         elif operator == ',':
02494           # Got a comma before a bracket, this is most likely a
02495           # template argument.  The opening angle bracket is probably
02496           # there if we look for it, so just return early here.
02497           return True
02498         else:
02499           # Got some other operator.
02500           return False
02501 
02502       else:
02503         # Expecting opening parenthesis or opening bracket
02504         if operator in ('>', ')', ']'):
02505           nesting_stack.append(operator)
02506         elif operator in ('(', '['):
02507           nesting_stack.pop()
02508 
02509     else:
02510       # Scan the previous line
02511       linenum -= 1
02512       if linenum < 0:
02513         break
02514       line = clean_lines.elided[linenum]
02515 
02516   # Exhausted all earlier lines and still no matching angle bracket.
02517   return False
02518 
02519 
02520 def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
02521   """Checks for the correctness of various spacing issues in the code.
02522 
02523   Things we check for: spaces around operators, spaces after
02524   if/for/while/switch, no spaces around parens in function calls, two
02525   spaces between code and comment, don't start a block with a blank
02526   line, don't end a function with a blank line, don't add a blank line
02527   after public/protected/private, don't have too many blank lines in a row.
02528 
02529   Args:
02530     filename: The name of the current file.
02531     clean_lines: A CleansedLines instance containing the file.
02532     linenum: The number of the line to check.
02533     nesting_state: A _NestingState instance which maintains information about
02534                    the current stack of nested blocks being parsed.
02535     error: The function to call with any errors found.
02536   """
02537 
02538   # Don't use "elided" lines here, otherwise we can't check commented lines.
02539   # Don't want to use "raw" either, because we don't want to check inside C++11
02540   # raw strings,
02541   raw = clean_lines.lines_without_raw_strings
02542   line = raw[linenum]
02543 
02544   # Before nixing comments, check if the line is blank for no good
02545   # reason.  This includes the first line after a block is opened, and
02546   # blank lines at the end of a function (ie, right before a line like '}'
02547   #
02548   # Skip all the blank line checks if we are immediately inside a
02549   # namespace body.  In other words, don't issue blank line warnings
02550   # for this block:
02551   #   namespace {
02552   #
02553   #   }
02554   #
02555   # A warning about missing end of namespace comments will be issued instead.
02556   if IsBlankLine(line) and not nesting_state.InNamespaceBody():
02557     elided = clean_lines.elided
02558     prev_line = elided[linenum - 1]
02559     prevbrace = prev_line.rfind('{')
02560     # TODO(unknown): Don't complain if line before blank line, and line after,
02561     #                both start with alnums and are indented the same amount.
02562     #                This ignores whitespace at the start of a namespace block
02563     #                because those are not usually indented.
02564     if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
02565       # OK, we have a blank line at the start of a code block.  Before we
02566       # complain, we check if it is an exception to the rule: The previous
02567       # non-empty line has the parameters of a function header that are indented
02568       # 4 spaces (because they did not fit in a 80 column line when placed on
02569       # the same line as the function name).  We also check for the case where
02570       # the previous line is indented 6 spaces, which may happen when the
02571       # initializers of a constructor do not fit into a 80 column line.
02572       exception = False
02573       if Match(r' {6}\w', prev_line):  # Initializer list?
02574         # We are looking for the opening column of initializer list, which
02575         # should be indented 4 spaces to cause 6 space indentation afterwards.
02576         search_position = linenum-2
02577         while (search_position >= 0
02578                and Match(r' {6}\w', elided[search_position])):
02579           search_position -= 1
02580         exception = (search_position >= 0
02581                      and elided[search_position][:5] == '    :')
02582       else:
02583         # Search for the function arguments or an initializer list.  We use a
02584         # simple heuristic here: If the line is indented 4 spaces; and we have a
02585         # closing paren, without the opening paren, followed by an opening brace
02586         # or colon (for initializer lists) we assume that it is the last line of
02587         # a function header.  If we have a colon indented 4 spaces, it is an
02588         # initializer list.
02589         exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
02590                            prev_line)
02591                      or Match(r' {4}:', prev_line))
02592 
02593       if not exception:
02594         error(filename, linenum, 'whitespace/blank_line', 2,
02595               'Redundant blank line at the start of a code block '
02596               'should be deleted.')
02597     # Ignore blank lines at the end of a block in a long if-else
02598     # chain, like this:
02599     #   if (condition1) {
02600     #     // Something followed by a blank line
02601     #
02602     #   } else if (condition2) {
02603     #     // Something else
02604     #   }
02605     if linenum + 1 < clean_lines.NumLines():
02606       next_line = raw[linenum + 1]
02607       if (next_line
02608           and Match(r'\s*}', next_line)
02609           and next_line.find('} else ') == -1):
02610         error(filename, linenum, 'whitespace/blank_line', 3,
02611               'Redundant blank line at the end of a code block '
02612               'should be deleted.')
02613 
02614     matched = Match(r'\s*(public|protected|private):', prev_line)
02615     if matched:
02616       error(filename, linenum, 'whitespace/blank_line', 3,
02617             'Do not leave a blank line after "%s:"' % matched.group(1))
02618 
02619   # Next, we complain if there's a comment too near the text
02620   commentpos = line.find('//')
02621   if commentpos != -1:
02622     # Check if the // may be in quotes.  If so, ignore it
02623     # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
02624     if (line.count('"', 0, commentpos) -
02625         line.count('\\"', 0, commentpos)) % 2 == 0:   # not in quotes
02626       # Allow one space for new scopes, two spaces otherwise:
02627       if (not Match(r'^\s*{ //', line) and
02628           ((commentpos >= 1 and
02629             line[commentpos-1] not in string.whitespace) or
02630            (commentpos >= 2 and
02631             line[commentpos-2] not in string.whitespace))):
02632         error(filename, linenum, 'whitespace/comments', 2,
02633               'At least two spaces is best between code and comments')
02634       # There should always be a space between the // and the comment
02635       commentend = commentpos + 2
02636       if commentend < len(line) and not line[commentend] == ' ':
02637         # but some lines are exceptions -- e.g. if they're big
02638         # comment delimiters like:
02639         # //----------------------------------------------------------
02640         # or are an empty C++ style Doxygen comment, like:
02641         # ///
02642         # or C++ style Doxygen comments placed after the variable:
02643         # ///<  Header comment
02644         # //!<  Header comment
02645         # or they begin with multiple slashes followed by a space:
02646         # //////// Header comment
02647         match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
02648                  Search(r'^/$', line[commentend:]) or
02649                  Search(r'^!< ', line[commentend:]) or
02650                  Search(r'^/< ', line[commentend:]) or
02651                  Search(r'^/+ ', line[commentend:]))
02652         if not match:
02653           error(filename, linenum, 'whitespace/comments', 4,
02654                 'Should have a space between // and comment')
02655       CheckComment(line[commentpos:], filename, linenum, error)
02656 
02657   line = clean_lines.elided[linenum]  # get rid of comments and strings
02658 
02659   # Don't try to do spacing checks for operator methods
02660   line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
02661 
02662   # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
02663   # Otherwise not.  Note we only check for non-spaces on *both* sides;
02664   # sometimes people put non-spaces on one side when aligning ='s among
02665   # many lines (not that this is behavior that I approve of...)
02666   if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
02667     error(filename, linenum, 'whitespace/operators', 4,
02668           'Missing spaces around =')
02669 
02670   # It's ok not to have spaces around binary operators like + - * /, but if
02671   # there's too little whitespace, we get concerned.  It's hard to tell,
02672   # though, so we punt on this one for now.  TODO.
02673 
02674   # You should always have whitespace around binary operators.
02675   #
02676   # Check <= and >= first to avoid false positives with < and >, then
02677   # check non-include lines for spacing around < and >.
02678   match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
02679   if match:
02680     error(filename, linenum, 'whitespace/operators', 3,
02681           'Missing spaces around %s' % match.group(1))
02682   # We allow no-spaces around << when used like this: 10<<20, but
02683   # not otherwise (particularly, not when used as streams)
02684   # Also ignore using ns::operator<<;
02685   match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
02686   if (match and
02687       not (match.group(1).isdigit() and match.group(2).isdigit()) and
02688       not (match.group(1) == 'operator' and match.group(2) == ';')):
02689     error(filename, linenum, 'whitespace/operators', 3,
02690           'Missing spaces around <<')
02691   elif not Match(r'#.*include', line):
02692     # Avoid false positives on ->
02693     reduced_line = line.replace('->', '')
02694 
02695     # Look for < that is not surrounded by spaces.  This is only
02696     # triggered if both sides are missing spaces, even though
02697     # technically should should flag if at least one side is missing a
02698     # space.  This is done to avoid some false positives with shifts.
02699     match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
02700     if (match and
02701         not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
02702       error(filename, linenum, 'whitespace/operators', 3,
02703             'Missing spaces around <')
02704 
02705     # Look for > that is not surrounded by spaces.  Similar to the
02706     # above, we only trigger if both sides are missing spaces to avoid
02707     # false positives with shifts.
02708     match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
02709     if (match and
02710         not FindPreviousMatchingAngleBracket(clean_lines, linenum,
02711                                              match.group(1))):
02712       error(filename, linenum, 'whitespace/operators', 3,
02713             'Missing spaces around >')
02714 
02715   # We allow no-spaces around >> for almost anything.  This is because
02716   # C++11 allows ">>" to close nested templates, which accounts for
02717   # most cases when ">>" is not followed by a space.
02718   #
02719   # We still warn on ">>" followed by alpha character, because that is
02720   # likely due to ">>" being used for right shifts, e.g.:
02721   #   value >> alpha
02722   #
02723   # When ">>" is used to close templates, the alphanumeric letter that
02724   # follows would be part of an identifier, and there should still be
02725   # a space separating the template type and the identifier.
02726   #   type<type<type>> alpha
02727   match = Search(r'>>[a-zA-Z_]', line)
02728   if match:
02729     error(filename, linenum, 'whitespace/operators', 3,
02730           'Missing spaces around >>')
02731 
02732   # There shouldn't be space around unary operators
02733   match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
02734   if match:
02735     error(filename, linenum, 'whitespace/operators', 4,
02736           'Extra space for operator %s' % match.group(1))
02737 
02738   # A pet peeve of mine: no spaces after an if, while, switch, or for
02739   match = Search(r' (if\(|for\(|while\(|switch\()', line)
02740   if match:
02741     error(filename, linenum, 'whitespace/parens', 5,
02742           'Missing space before ( in %s' % match.group(1))
02743 
02744   # For if/for/while/switch, the left and right parens should be
02745   # consistent about how many spaces are inside the parens, and
02746   # there should either be zero or one spaces inside the parens.
02747   # We don't want: "if ( foo)" or "if ( foo   )".
02748   # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
02749   match = Search(r'\b(if|for|while|switch)\s*'
02750                  r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
02751                  line)
02752   if match:
02753     if len(match.group(2)) != len(match.group(4)):
02754       if not (match.group(3) == ';' and
02755               len(match.group(2)) == 1 + len(match.group(4)) or
02756               not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
02757         error(filename, linenum, 'whitespace/parens', 5,
02758               'Mismatching spaces inside () in %s' % match.group(1))
02759     if len(match.group(2)) not in [0, 1]:
02760       error(filename, linenum, 'whitespace/parens', 5,
02761             'Should have zero or one spaces inside ( and ) in %s' %
02762             match.group(1))
02763 
02764   # You should always have a space after a comma (either as fn arg or operator)
02765   #
02766   # This does not apply when the non-space character following the
02767   # comma is another comma, since the only time when that happens is
02768   # for empty macro arguments.
02769   #
02770   # We run this check in two passes: first pass on elided lines to
02771   # verify that lines contain missing whitespaces, second pass on raw
02772   # lines to confirm that those missing whitespaces are not due to
02773   # elided comments.
02774   if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
02775     error(filename, linenum, 'whitespace/comma', 3,
02776           'Missing space after ,')
02777 
02778   # You should always have a space after a semicolon
02779   # except for few corner cases
02780   # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
02781   # space after ;
02782   if Search(r';[^\s};\\)/]', line):
02783     error(filename, linenum, 'whitespace/semicolon', 3,
02784           'Missing space after ;')
02785 
02786   # Next we will look for issues with function calls.
02787   CheckSpacingForFunctionCall(filename, line, linenum, error)
02788 
02789   # Except after an opening paren, or after another opening brace (in case of
02790   # an initializer list, for instance), you should have spaces before your
02791   # braces. And since you should never have braces at the beginning of a line,
02792   # this is an easy test.
02793   match = Match(r'^(.*[^ ({]){', line)
02794   if match:
02795     # Try a bit harder to check for brace initialization.  This
02796     # happens in one of the following forms:
02797     #   Constructor() : initializer_list_{} { ... }
02798     #   Constructor{}.MemberFunction()
02799     #   Type variable{};
02800     #   FunctionCall(type{}, ...);
02801     #   LastArgument(..., type{});
02802     #   LOG(INFO) << type{} << " ...";
02803     #   map_of_type[{...}] = ...;
02804     #
02805     # We check for the character following the closing brace, and
02806     # silence the warning if it's one of those listed above, i.e.
02807     # "{.;,)<]".
02808     #
02809     # To account for nested initializer list, we allow any number of
02810     # closing braces up to "{;,)<".  We can't simply silence the
02811     # warning on first sight of closing brace, because that would
02812     # cause false negatives for things that are not initializer lists.
02813     #   Silence this:         But not this:
02814     #     Outer{                if (...) {
02815     #       Inner{...}            if (...){  // Missing space before {
02816     #     };                    }
02817     #
02818     # There is a false negative with this approach if people inserted
02819     # spurious semicolons, e.g. "if (cond){};", but we will catch the
02820     # spurious semicolon with a separate check.
02821     (endline, endlinenum, endpos) = CloseExpression(
02822         clean_lines, linenum, len(match.group(1)))
02823     trailing_text = ''
02824     if endpos > -1:
02825       trailing_text = endline[endpos:]
02826     for offset in xrange(endlinenum + 1,
02827                          min(endlinenum + 3, clean_lines.NumLines() - 1)):
02828       trailing_text += clean_lines.elided[offset]
02829     if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text):
02830       error(filename, linenum, 'whitespace/braces', 5,
02831             'Missing space before {')
02832 
02833   # Make sure '} else {' has spaces.
02834   if Search(r'}else', line):
02835     error(filename, linenum, 'whitespace/braces', 5,
02836           'Missing space before else')
02837 
02838   # You shouldn't have spaces before your brackets, except maybe after
02839   # 'delete []' or 'new char * []'.
02840   if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
02841     error(filename, linenum, 'whitespace/braces', 5,
02842           'Extra space before [')
02843 
02844   # You shouldn't have a space before a semicolon at the end of the line.
02845   # There's a special case for "for" since the style guide allows space before
02846   # the semicolon there.
02847   if Search(r':\s*;\s*$', line):
02848     error(filename, linenum, 'whitespace/semicolon', 5,
02849           'Semicolon defining empty statement. Use {} instead.')
02850   elif Search(r'^\s*;\s*$', line):
02851     error(filename, linenum, 'whitespace/semicolon', 5,
02852           'Line contains only semicolon. If this should be an empty statement, '
02853           'use {} instead.')
02854   elif (Search(r'\s+;\s*$', line) and
02855         not Search(r'\bfor\b', line)):
02856     error(filename, linenum, 'whitespace/semicolon', 5,
02857           'Extra space before last semicolon. If this should be an empty '
02858           'statement, use {} instead.')
02859 
02860   # In range-based for, we wanted spaces before and after the colon, but
02861   # not around "::" tokens that might appear.
02862   if (Search('for *\(.*[^:]:[^: ]', line) or
02863       Search('for *\(.*[^: ]:[^:]', line)):
02864     error(filename, linenum, 'whitespace/forcolon', 2,
02865           'Missing space around colon in range-based for loop')
02866 
02867 
02868 def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
02869   """Checks for additional blank line issues related to sections.
02870 
02871   Currently the only thing checked here is blank line before protected/private.
02872 
02873   Args:
02874     filename: The name of the current file.
02875     clean_lines: A CleansedLines instance containing the file.
02876     class_info: A _ClassInfo objects.
02877     linenum: The number of the line to check.
02878     error: The function to call with any errors found.
02879   """
02880   # Skip checks if the class is small, where small means 25 lines or less.
02881   # 25 lines seems like a good cutoff since that's the usual height of
02882   # terminals, and any class that can't fit in one screen can't really
02883   # be considered "small".
02884   #
02885   # Also skip checks if we are on the first line.  This accounts for
02886   # classes that look like
02887   #   class Foo { public: ... };
02888   #
02889   # If we didn't find the end of the class, last_line would be zero,
02890   # and the check will be skipped by the first condition.
02891   if (class_info.last_line - class_info.starting_linenum <= 24 or
02892       linenum <= class_info.starting_linenum):
02893     return
02894 
02895   matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
02896   if matched:
02897     # Issue warning if the line before public/protected/private was
02898     # not a blank line, but don't do this if the previous line contains
02899     # "class" or "struct".  This can happen two ways:
02900     #  - We are at the beginning of the class.
02901     #  - We are forward-declaring an inner class that is semantically
02902     #    private, but needed to be public for implementation reasons.
02903     # Also ignores cases where the previous line ends with a backslash as can be
02904     # common when defining classes in C macros.
02905     prev_line = clean_lines.lines[linenum - 1]
02906     if (not IsBlankLine(prev_line) and
02907         not Search(r'\b(class|struct)\b', prev_line) and
02908         not Search(r'\\$', prev_line)):
02909       # Try a bit harder to find the beginning of the class.  This is to
02910       # account for multi-line base-specifier lists, e.g.:
02911       #   class Derived
02912       #       : public Base {
02913       end_class_head = class_info.starting_linenum
02914       for i in range(class_info.starting_linenum, linenum):
02915         if Search(r'\{\s*$', clean_lines.lines[i]):
02916           end_class_head = i
02917           break
02918       if end_class_head < linenum - 1:
02919         error(filename, linenum, 'whitespace/blank_line', 3,
02920               '"%s:" should be preceded by a blank line' % matched.group(1))
02921 
02922 
02923 def GetPreviousNonBlankLine(clean_lines, linenum):
02924   """Return the most recent non-blank line and its line number.
02925 
02926   Args:
02927     clean_lines: A CleansedLines instance containing the file contents.
02928     linenum: The number of the line to check.
02929 
02930   Returns:
02931     A tuple with two elements.  The first element is the contents of the last
02932     non-blank line before the current line, or the empty string if this is the
02933     first non-blank line.  The second is the line number of that line, or -1
02934     if this is the first non-blank line.
02935   """
02936 
02937   prevlinenum = linenum - 1
02938   while prevlinenum >= 0:
02939     prevline = clean_lines.elided[prevlinenum]
02940     if not IsBlankLine(prevline):     # if not a blank line...
02941       return (prevline, prevlinenum)
02942     prevlinenum -= 1
02943   return ('', -1)
02944 
02945 
02946 def CheckBraces(filename, clean_lines, linenum, error):
02947   """Looks for misplaced braces (e.g. at the end of line).
02948 
02949   Args:
02950     filename: The name of the current file.
02951     clean_lines: A CleansedLines instance containing the file.
02952     linenum: The number of the line to check.
02953     error: The function to call with any errors found.
02954   """
02955 
02956   line = clean_lines.elided[linenum]        # get rid of comments and strings
02957 
02958   if Match(r'\s*{\s*$', line):
02959     # We allow an open brace to start a line in the case where someone is using
02960     # braces in a block to explicitly create a new scope, which is commonly used
02961     # to control the lifetime of stack-allocated variables.  Braces are also
02962     # used for brace initializers inside function calls.  We don't detect this
02963     # perfectly: we just don't complain if the last non-whitespace character on
02964     # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
02965     # previous line starts a preprocessor block.
02966     prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
02967     if (not Search(r'[,;:}{(]\s*$', prevline) and
02968         not Match(r'\s*#', prevline)):
02969       error(filename, linenum, 'whitespace/braces', 4,
02970             '{ should almost always be at the end of the previous line')
02971 
02972   # An else clause should be on the same line as the preceding closing brace.
02973   if Match(r'\s*else\s*', line):
02974     prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
02975     if Match(r'\s*}\s*$', prevline):
02976       error(filename, linenum, 'whitespace/newline', 4,
02977             'An else should appear on the same line as the preceding }')
02978 
02979   # If braces come on one side of an else, they should be on both.
02980   # However, we have to worry about "else if" that spans multiple lines!
02981   if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
02982     if Search(r'}\s*else if([^{]*)$', line):       # could be multi-line if
02983       # find the ( after the if
02984       pos = line.find('else if')
02985       pos = line.find('(', pos)
02986       if pos > 0:
02987         (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
02988         if endline[endpos:].find('{') == -1:    # must be brace after if
02989           error(filename, linenum, 'readability/braces', 5,
02990                 'If an else has a brace on one side, it should have it on both')
02991     else:            # common case: else not followed by a multi-line if
02992       error(filename, linenum, 'readability/braces', 5,
02993             'If an else has a brace on one side, it should have it on both')
02994 
02995   # Likewise, an else should never have the else clause on the same line
02996   if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
02997     error(filename, linenum, 'whitespace/newline', 4,
02998           'Else clause should never be on same line as else (use 2 lines)')
02999 
03000   # In the same way, a do/while should never be on one line
03001   if Match(r'\s*do [^\s{]', line):
03002     error(filename, linenum, 'whitespace/newline', 4,
03003           'do/while clauses should not be on a single line')
03004 
03005   # Block bodies should not be followed by a semicolon.  Due to C++11
03006   # brace initialization, there are more places where semicolons are
03007   # required than not, so we use a whitelist approach to check these
03008   # rather than a blacklist.  These are the places where "};" should
03009   # be replaced by just "}":
03010   # 1. Some flavor of block following closing parenthesis:
03011   #    for (;;) {};
03012   #    while (...) {};
03013   #    switch (...) {};
03014   #    Function(...) {};
03015   #    if (...) {};
03016   #    if (...) else if (...) {};
03017   #
03018   # 2. else block:
03019   #    if (...) else {};
03020   #
03021   # 3. const member function:
03022   #    Function(...) const {};
03023   #
03024   # 4. Block following some statement:
03025   #    x = 42;
03026   #    {};
03027   #
03028   # 5. Block at the beginning of a function:
03029   #    Function(...) {
03030   #      {};
03031   #    }
03032   #
03033   #    Note that naively checking for the preceding "{" will also match
03034   #    braces inside multi-dimensional arrays, but this is fine since
03035   #    that expression will not contain semicolons.
03036   #
03037   # 6. Block following another block:
03038   #    while (true) {}
03039   #    {};
03040   #
03041   # 7. End of namespaces:
03042   #    namespace {};
03043   #
03044   #    These semicolons seems far more common than other kinds of
03045   #    redundant semicolons, possibly due to people converting classes
03046   #    to namespaces.  For now we do not warn for this case.
03047   #
03048   # Try matching case 1 first.
03049   match = Match(r'^(.*\)\s*)\{', line)
03050   if match:
03051     # Matched closing parenthesis (case 1).  Check the token before the
03052     # matching opening parenthesis, and don't warn if it looks like a
03053     # macro.  This avoids these false positives:
03054     #  - macro that defines a base class
03055     #  - multi-line macro that defines a base class
03056     #  - macro that defines the whole class-head
03057     #
03058     # But we still issue warnings for macros that we know are safe to
03059     # warn, specifically:
03060     #  - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
03061     #  - TYPED_TEST
03062     #  - INTERFACE_DEF
03063     #  - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
03064     #
03065     # We implement a whitelist of safe macros instead of a blacklist of
03066     # unsafe macros, even though the latter appears less frequently in
03067     # google code and would have been easier to implement.  This is because
03068     # the downside for getting the whitelist wrong means some extra
03069     # semicolons, while the downside for getting the blacklist wrong
03070     # would result in compile errors.
03071     #
03072     # In addition to macros, we also don't want to warn on compound
03073     # literals.
03074     closing_brace_pos = match.group(1).rfind(')')
03075     opening_parenthesis = ReverseCloseExpression(
03076         clean_lines, linenum, closing_brace_pos)
03077     if opening_parenthesis[2] > -1:
03078       line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
03079       macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
03080       if ((macro and
03081            macro.group(1) not in (
03082                'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
03083                'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
03084                'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
03085           Search(r'\s+=\s*$', line_prefix)):
03086         match = None
03087 
03088   else:
03089     # Try matching cases 2-3.
03090     match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
03091     if not match:
03092       # Try matching cases 4-6.  These are always matched on separate lines.
03093       #
03094       # Note that we can't simply concatenate the previous line to the
03095       # current line and do a single match, otherwise we may output
03096       # duplicate warnings for the blank line case:
03097       #   if (cond) {
03098       #     // blank line
03099       #   }
03100       prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
03101       if prevline and Search(r'[;{}]\s*$', prevline):
03102         match = Match(r'^(\s*)\{', line)
03103 
03104   # Check matching closing brace
03105   if match:
03106     (endline, endlinenum, endpos) = CloseExpression(
03107         clean_lines, linenum, len(match.group(1)))
03108     if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
03109       # Current {} pair is eligible for semicolon check, and we have found
03110       # the redundant semicolon, output warning here.
03111       #
03112       # Note: because we are scanning forward for opening braces, and
03113       # outputting warnings for the matching closing brace, if there are
03114       # nested blocks with trailing semicolons, we will get the error
03115       # messages in reversed order.
03116       error(filename, endlinenum, 'readability/braces', 4,
03117             "You don't need a ; after a }")
03118 
03119 
03120 def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
03121   """Look for empty loop/conditional body with only a single semicolon.
03122 
03123   Args:
03124     filename: The name of the current file.
03125     clean_lines: A CleansedLines instance containing the file.
03126     linenum: The number of the line to check.
03127     error: The function to call with any errors found.
03128   """
03129 
03130   # Search for loop keywords at the beginning of the line.  Because only
03131   # whitespaces are allowed before the keywords, this will also ignore most
03132   # do-while-loops, since those lines should start with closing brace.
03133   #
03134   # We also check "if" blocks here, since an empty conditional block
03135   # is likely an error.
03136   line = clean_lines.elided[linenum]
03137   matched = Match(r'\s*(for|while|if)\s*\(', line)
03138   if matched:
03139     # Find the end of the conditional expression
03140     (end_line, end_linenum, end_pos) = CloseExpression(
03141         clean_lines, linenum, line.find('('))
03142 
03143     # Output warning if what follows the condition expression is a semicolon.
03144     # No warning for all other cases, including whitespace or newline, since we
03145     # have a separate check for semicolons preceded by whitespace.
03146     if end_pos >= 0 and Match(r';', end_line[end_pos:]):
03147       if matched.group(1) == 'if':
03148         error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
03149               'Empty conditional bodies should use {}')
03150       else:
03151         error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
03152               'Empty loop bodies should use {} or continue')
03153 
03154 
03155 def CheckCheck(filename, clean_lines, linenum, error):
03156   """Checks the use of CHECK and EXPECT macros.
03157 
03158   Args:
03159     filename: The name of the current file.
03160     clean_lines: A CleansedLines instance containing the file.
03161     linenum: The number of the line to check.
03162     error: The function to call with any errors found.
03163   """
03164 
03165   # Decide the set of replacement macros that should be suggested
03166   lines = clean_lines.elided
03167   check_macro = None
03168   start_pos = -1
03169   for macro in _CHECK_MACROS:
03170     i = lines[linenum].find(macro)
03171     if i >= 0:
03172       check_macro = macro
03173 
03174       # Find opening parenthesis.  Do a regular expression match here
03175       # to make sure that we are matching the expected CHECK macro, as
03176       # opposed to some other macro that happens to contain the CHECK
03177       # substring.
03178       matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum])
03179       if not matched:
03180         continue
03181       start_pos = len(matched.group(1))
03182       break
03183   if not check_macro or start_pos < 0:
03184     # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
03185     return
03186 
03187   # Find end of the boolean expression by matching parentheses
03188   (last_line, end_line, end_pos) = CloseExpression(
03189       clean_lines, linenum, start_pos)
03190   if end_pos < 0:
03191     return
03192   if linenum == end_line:
03193     expression = lines[linenum][start_pos + 1:end_pos - 1]
03194   else:
03195     expression = lines[linenum][start_pos + 1:]
03196     for i in xrange(linenum + 1, end_line):
03197       expression += lines[i]
03198     expression += last_line[0:end_pos - 1]
03199 
03200   # Parse expression so that we can take parentheses into account.
03201   # This avoids false positives for inputs like "CHECK((a < 4) == b)",
03202   # which is not replaceable by CHECK_LE.
03203   lhs = ''
03204   rhs = ''
03205   operator = None
03206   while expression:
03207     matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
03208                     r'==|!=|>=|>|<=|<|\()(.*)$', expression)
03209     if matched:
03210       token = matched.group(1)
03211       if token == '(':
03212         # Parenthesized operand
03213         expression = matched.group(2)
03214         (end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')')
03215         if end < 0:
03216           return  # Unmatched parenthesis
03217         lhs += '(' + expression[0:end]
03218         expression = expression[end:]
03219       elif token in ('&&', '||'):
03220         # Logical and/or operators.  This means the expression
03221         # contains more than one term, for example:
03222         #   CHECK(42 < a && a < b);
03223         #
03224         # These are not replaceable with CHECK_LE, so bail out early.
03225         return
03226       elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
03227         # Non-relational operator
03228         lhs += token
03229         expression = matched.group(2)
03230       else:
03231         # Relational operator
03232         operator = token
03233         rhs = matched.group(2)
03234         break
03235     else:
03236       # Unparenthesized operand.  Instead of appending to lhs one character
03237       # at a time, we do another regular expression match to consume several
03238       # characters at once if possible.  Trivial benchmark shows that this
03239       # is more efficient when the operands are longer than a single
03240       # character, which is generally the case.
03241       matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
03242       if not matched:
03243         matched = Match(r'^(\s*\S)(.*)$', expression)
03244         if not matched:
03245           break
03246       lhs += matched.group(1)
03247       expression = matched.group(2)
03248 
03249   # Only apply checks if we got all parts of the boolean expression
03250   if not (lhs and operator and rhs):
03251     return
03252 
03253   # Check that rhs do not contain logical operators.  We already know
03254   # that lhs is fine since the loop above parses out && and ||.
03255   if rhs.find('&&') > -1 or rhs.find('||') > -1:
03256     return
03257 
03258   # At least one of the operands must be a constant literal.  This is
03259   # to avoid suggesting replacements for unprintable things like
03260   # CHECK(variable != iterator)
03261   #
03262   # The following pattern matches decimal, hex integers, strings, and
03263   # characters (in that order).
03264   lhs = lhs.strip()
03265   rhs = rhs.strip()
03266   match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
03267   if Match(match_constant, lhs) or Match(match_constant, rhs):
03268     # Note: since we know both lhs and rhs, we can provide a more
03269     # descriptive error message like:
03270     #   Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
03271     # Instead of:
03272     #   Consider using CHECK_EQ instead of CHECK(a == b)
03273     #
03274     # We are still keeping the less descriptive message because if lhs
03275     # or rhs gets long, the error message might become unreadable.
03276     error(filename, linenum, 'readability/check', 2,
03277           'Consider using %s instead of %s(a %s b)' % (
03278               _CHECK_REPLACEMENT[check_macro][operator],
03279               check_macro, operator))
03280 
03281 
03282 def CheckAltTokens(filename, clean_lines, linenum, error):
03283   """Check alternative keywords being used in boolean expressions.
03284 
03285   Args:
03286     filename: The name of the current file.
03287     clean_lines: A CleansedLines instance containing the file.
03288     linenum: The number of the line to check.
03289     error: The function to call with any errors found.
03290   """
03291   line = clean_lines.elided[linenum]
03292 
03293   # Avoid preprocessor lines
03294   if Match(r'^\s*#', line):
03295     return
03296 
03297   # Last ditch effort to avoid multi-line comments.  This will not help
03298   # if the comment started before the current line or ended after the
03299   # current line, but it catches most of the false positives.  At least,
03300   # it provides a way to workaround this warning for people who use
03301   # multi-line comments in preprocessor macros.
03302   #
03303   # TODO(unknown): remove this once cpplint has better support for
03304   # multi-line comments.
03305   if line.find('/*') >= 0 or line.find('*/') >= 0:
03306     return
03307 
03308   for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
03309     error(filename, linenum, 'readability/alt_tokens', 2,
03310           'Use operator %s instead of %s' % (
03311               _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
03312 
03313 
03314 def GetLineWidth(line):
03315   """Determines the width of the line in column positions.
03316 
03317   Args:
03318     line: A string, which may be a Unicode string.
03319 
03320   Returns:
03321     The width of the line in column positions, accounting for Unicode
03322     combining characters and wide characters.
03323   """
03324   if isinstance(line, unicode):
03325     width = 0
03326     for uc in unicodedata.normalize('NFC', line):
03327       if unicodedata.east_asian_width(uc) in ('W', 'F'):
03328         width += 2
03329       elif not unicodedata.combining(uc):
03330         width += 1
03331     return width
03332   else:
03333     return len(line)
03334 
03335 
03336 def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
03337                error):
03338   """Checks rules from the 'C++ style rules' section of cppguide.html.
03339 
03340   Most of these rules are hard to test (naming, comment style), but we
03341   do what we can.  In particular we check for 2-space indents, line lengths,
03342   tab usage, spaces inside code, etc.
03343 
03344   Args:
03345     filename: The name of the current file.
03346     clean_lines: A CleansedLines instance containing the file.
03347     linenum: The number of the line to check.
03348     file_extension: The extension (without the dot) of the filename.
03349     nesting_state: A _NestingState instance which maintains information about
03350                    the current stack of nested blocks being parsed.
03351     error: The function to call with any errors found.
03352   """
03353 
03354   # Don't use "elided" lines here, otherwise we can't check commented lines.
03355   # Don't want to use "raw" either, because we don't want to check inside C++11
03356   # raw strings,
03357   raw_lines = clean_lines.lines_without_raw_strings
03358   line = raw_lines[linenum]
03359 
03360   if line.find('\t') != -1:
03361     error(filename, linenum, 'whitespace/tab', 1,
03362           'Tab found; better to use spaces')
03363 
03364   # One or three blank spaces at the beginning of the line is weird; it's
03365   # hard to reconcile that with 2-space indents.
03366   # NOTE: here are the conditions rob pike used for his tests.  Mine aren't
03367   # as sophisticated, but it may be worth becoming so:  RLENGTH==initial_spaces
03368   # if(RLENGTH > 20) complain = 0;
03369   # if(match($0, " +(error|private|public|protected):")) complain = 0;
03370   # if(match(prev, "&& *$")) complain = 0;
03371   # if(match(prev, "\\|\\| *$")) complain = 0;
03372   # if(match(prev, "[\",=><] *$")) complain = 0;
03373   # if(match($0, " <<")) complain = 0;
03374   # if(match(prev, " +for \\(")) complain = 0;
03375   # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
03376   initial_spaces = 0
03377   cleansed_line = clean_lines.elided[linenum]
03378   while initial_spaces < len(line) and line[initial_spaces] == ' ':
03379     initial_spaces += 1
03380   if line and line[-1].isspace():
03381     error(filename, linenum, 'whitespace/end_of_line', 4,
03382           'Line ends in whitespace.  Consider deleting these extra spaces.')
03383   # There are certain situations we allow one space, notably for section labels
03384   elif ((initial_spaces == 1 or initial_spaces == 3) and
03385         not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
03386     error(filename, linenum, 'whitespace/indent', 3,
03387           'Weird number of spaces at line-start.  '
03388           'Are you using a 2-space indent?')
03389 
03390   # Check if the line is a header guard.
03391   is_header_guard = False
03392   if file_extension == 'h':
03393     cppvar = GetHeaderGuardCPPVariable(filename)
03394     if (line.startswith('#ifndef %s' % cppvar) or
03395         line.startswith('#define %s' % cppvar) or
03396         line.startswith('#endif  // %s' % cppvar)):
03397       is_header_guard = True
03398   # #include lines and header guards can be long, since there's no clean way to
03399   # split them.
03400   #
03401   # URLs can be long too.  It's possible to split these, but it makes them
03402   # harder to cut&paste.
03403   #
03404   # The "$Id:...$" comment may also get very long without it being the
03405   # developers fault.
03406   if (not line.startswith('#include') and not is_header_guard and
03407       not Match(r'^\s*//.*http(s?)://\S*$', line) and
03408       not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
03409     line_width = GetLineWidth(line)
03410     extended_length = int((_line_length * 1.25))
03411     if line_width > extended_length:
03412       error(filename, linenum, 'whitespace/line_length', 4,
03413             'Lines should very rarely be longer than %i characters' %
03414             extended_length)
03415     elif line_width > _line_length:
03416       error(filename, linenum, 'whitespace/line_length', 2,
03417             'Lines should be <= %i characters long' % _line_length)
03418 
03419   if (cleansed_line.count(';') > 1 and
03420       # for loops are allowed two ;'s (and may run over two lines).
03421       cleansed_line.find('for') == -1 and
03422       (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
03423        GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
03424       # It's ok to have many commands in a switch case that fits in 1 line
03425       not ((cleansed_line.find('case ') != -1 or
03426             cleansed_line.find('default:') != -1) and
03427            cleansed_line.find('break;') != -1)):
03428     error(filename, linenum, 'whitespace/newline', 0,
03429           'More than one command on the same line')
03430 
03431   # Some more style checks
03432   CheckBraces(filename, clean_lines, linenum, error)
03433   CheckEmptyBlockBody(filename, clean_lines, linenum, error)
03434   CheckAccess(filename, clean_lines, linenum, nesting_state, error)
03435   CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
03436   CheckCheck(filename, clean_lines, linenum, error)
03437   CheckAltTokens(filename, clean_lines, linenum, error)
03438   classinfo = nesting_state.InnermostClass()
03439   if classinfo:
03440     CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
03441 
03442 
03443 _RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
03444 _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
03445 # Matches the first component of a filename delimited by -s and _s. That is:
03446 #  _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
03447 #  _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
03448 #  _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
03449 #  _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
03450 _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
03451 
03452 
03453 def _DropCommonSuffixes(filename):
03454   """Drops common suffixes like _test.cc or -inl.h from filename.
03455 
03456   For example:
03457     >>> _DropCommonSuffixes('foo/foo-inl.h')
03458     'foo/foo'
03459     >>> _DropCommonSuffixes('foo/bar/foo.cc')
03460     'foo/bar/foo'
03461     >>> _DropCommonSuffixes('foo/foo_internal.h')
03462     'foo/foo'
03463     >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
03464     'foo/foo_unusualinternal'
03465 
03466   Args:
03467     filename: The input filename.
03468 
03469   Returns:
03470     The filename with the common suffix removed.
03471   """
03472   for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
03473                  'inl.h', 'impl.h', 'internal.h'):
03474     if (filename.endswith(suffix) and len(filename) > len(suffix) and
03475         filename[-len(suffix) - 1] in ('-', '_')):
03476       return filename[:-len(suffix) - 1]
03477   return os.path.splitext(filename)[0]
03478 
03479 
03480 def _IsTestFilename(filename):
03481   """Determines if the given filename has a suffix that identifies it as a test.
03482 
03483   Args:
03484     filename: The input filename.
03485 
03486   Returns:
03487     True if 'filename' looks like a test, False otherwise.
03488   """
03489   if (filename.endswith('_test.cc') or
03490       filename.endswith('_unittest.cc') or
03491       filename.endswith('_regtest.cc')):
03492     return True
03493   else:
03494     return False
03495 
03496 
03497 def _ClassifyInclude(fileinfo, include, is_system):
03498   """Figures out what kind of header 'include' is.
03499 
03500   Args:
03501     fileinfo: The current file cpplint is running over. A FileInfo instance.
03502     include: The path to a #included file.
03503     is_system: True if the #include used <> rather than "".
03504 
03505   Returns:
03506     One of the _XXX_HEADER constants.
03507 
03508   For example:
03509     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
03510     _C_SYS_HEADER
03511     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
03512     _CPP_SYS_HEADER
03513     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
03514     _LIKELY_MY_HEADER
03515     >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
03516     ...                  'bar/foo_other_ext.h', False)
03517     _POSSIBLE_MY_HEADER
03518     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
03519     _OTHER_HEADER
03520   """
03521   # This is a list of all standard c++ header files, except
03522   # those already checked for above.
03523   is_cpp_h = include in _CPP_HEADERS
03524 
03525   if is_system:
03526     if is_cpp_h:
03527       return _CPP_SYS_HEADER
03528     else:
03529       return _C_SYS_HEADER
03530 
03531   # If the target file and the include we're checking share a
03532   # basename when we drop common extensions, and the include
03533   # lives in . , then it's likely to be owned by the target file.
03534   target_dir, target_base = (
03535       os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
03536   include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
03537   if target_base == include_base and (
03538       include_dir == target_dir or
03539       include_dir == os.path.normpath(target_dir + '/../public')):
03540     return _LIKELY_MY_HEADER
03541 
03542   # If the target and include share some initial basename
03543   # component, it's possible the target is implementing the
03544   # include, so it's allowed to be first, but we'll never
03545   # complain if it's not there.
03546   target_first_component = _RE_FIRST_COMPONENT.match(target_base)
03547   include_first_component = _RE_FIRST_COMPONENT.match(include_base)
03548   if (target_first_component and include_first_component and
03549       target_first_component.group(0) ==
03550       include_first_component.group(0)):
03551     return _POSSIBLE_MY_HEADER
03552 
03553   return _OTHER_HEADER
03554 
03555 
03556 
03557 def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
03558   """Check rules that are applicable to #include lines.
03559 
03560   Strings on #include lines are NOT removed from elided line, to make
03561   certain tasks easier. However, to prevent false positives, checks
03562   applicable to #include lines in CheckLanguage must be put here.
03563 
03564   Args:
03565     filename: The name of the current file.
03566     clean_lines: A CleansedLines instance containing the file.
03567     linenum: The number of the line to check.
03568     include_state: An _IncludeState instance in which the headers are inserted.
03569     error: The function to call with any errors found.
03570   """
03571   fileinfo = FileInfo(filename)
03572 
03573   line = clean_lines.lines[linenum]
03574 
03575   # "include" should use the new style "foo/bar.h" instead of just "bar.h"
03576   if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
03577     error(filename, linenum, 'build/include', 4,
03578           'Include the directory when naming .h files')
03579 
03580   # we shouldn't include a file more than once. actually, there are a
03581   # handful of instances where doing so is okay, but in general it's
03582   # not.
03583   match = _RE_PATTERN_INCLUDE.search(line)
03584   if match:
03585     include = match.group(2)
03586     is_system = (match.group(1) == '<')
03587     if include in include_state:
03588       error(filename, linenum, 'build/include', 4,
03589             '"%s" already included at %s:%s' %
03590             (include, filename, include_state[include]))
03591     else:
03592       include_state[include] = linenum
03593 
03594       # We want to ensure that headers appear in the right order:
03595       # 1) for foo.cc, foo.h  (preferred location)
03596       # 2) c system files
03597       # 3) cpp system files
03598       # 4) for foo.cc, foo.h  (deprecated location)
03599       # 5) other google headers
03600       #
03601       # We classify each include statement as one of those 5 types
03602       # using a number of techniques. The include_state object keeps
03603       # track of the highest type seen, and complains if we see a
03604       # lower type after that.
03605       error_message = include_state.CheckNextIncludeOrder(
03606           _ClassifyInclude(fileinfo, include, is_system))
03607       if error_message:
03608         error(filename, linenum, 'build/include_order', 4,
03609               '%s. Should be: %s.h, c system, c++ system, other.' %
03610               (error_message, fileinfo.BaseName()))
03611       canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
03612       if not include_state.IsInAlphabeticalOrder(
03613           clean_lines, linenum, canonical_include):
03614         error(filename, linenum, 'build/include_alpha', 4,
03615               'Include "%s" not in alphabetical order' % include)
03616       include_state.SetLastHeader(canonical_include)
03617 
03618   # Look for any of the stream classes that are part of standard C++.
03619   match = _RE_PATTERN_INCLUDE.match(line)
03620   if match:
03621     include = match.group(2)
03622     if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
03623       # Many unit tests use cout, so we exempt them.
03624       if not _IsTestFilename(filename):
03625         error(filename, linenum, 'readability/streams', 3,
03626               'Streams are highly discouraged.')
03627 
03628 
03629 def _GetTextInside(text, start_pattern):
03630   r"""Retrieves all the text between matching open and close parentheses.
03631 
03632   Given a string of lines and a regular expression string, retrieve all the text
03633   following the expression and between opening punctuation symbols like
03634   (, [, or {, and the matching close-punctuation symbol. This properly nested
03635   occurrences of the punctuations, so for the text like
03636     printf(a(), b(c()));
03637   a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
03638   start_pattern must match string having an open punctuation symbol at the end.
03639 
03640   Args:
03641     text: The lines to extract text. Its comments and strings must be elided.
03642            It can be single line and can span multiple lines.
03643     start_pattern: The regexp string indicating where to start extracting
03644                    the text.
03645   Returns:
03646     The extracted text.
03647     None if either the opening string or ending punctuation could not be found.
03648   """
03649   # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
03650   # rewritten to use _GetTextInside (and use inferior regexp matching today).
03651 
03652   # Give opening punctuations to get the matching close-punctuations.
03653   matching_punctuation = {'(': ')', '{': '}', '[': ']'}
03654   closing_punctuation = set(matching_punctuation.itervalues())
03655 
03656   # Find the position to start extracting text.
03657   match = re.search(start_pattern, text, re.M)
03658   if not match:  # start_pattern not found in text.
03659     return None
03660   start_position = match.end(0)
03661 
03662   assert start_position > 0, (
03663       'start_pattern must ends with an opening punctuation.')
03664   assert text[start_position - 1] in matching_punctuation, (
03665       'start_pattern must ends with an opening punctuation.')
03666   # Stack of closing punctuations we expect to have in text after position.
03667   punctuation_stack = [matching_punctuation[text[start_position - 1]]]
03668   position = start_position
03669   while punctuation_stack and position < len(text):
03670     if text[position] == punctuation_stack[-1]:
03671       punctuation_stack.pop()
03672     elif text[position] in closing_punctuation:
03673       # A closing punctuation without matching opening punctuations.
03674       return None
03675     elif text[position] in matching_punctuation:
03676       punctuation_stack.append(matching_punctuation[text[position]])
03677     position += 1
03678   if punctuation_stack:
03679     # Opening punctuations left without matching close-punctuations.
03680     return None
03681   # punctuations match.
03682   return text[start_position:position - 1]
03683 
03684 
03685 # Patterns for matching call-by-reference parameters.
03686 #
03687 # Supports nested templates up to 2 levels deep using this messy pattern:
03688 #   < (?: < (?: < [^<>]*
03689 #               >
03690 #           |   [^<>] )*
03691 #         >
03692 #     |   [^<>] )*
03693 #   >
03694 _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*'  # =~ [[:alpha:]][[:alnum:]]*
03695 _RE_PATTERN_TYPE = (
03696     r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
03697     r'(?:\w|'
03698     r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
03699     r'::)+')
03700 # A call-by-reference parameter ends with '& identifier'.
03701 _RE_PATTERN_REF_PARAM = re.compile(
03702     r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
03703     r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
03704 # A call-by-const-reference parameter either ends with 'const& identifier'
03705 # or looks like 'const type& identifier' when 'type' is atomic.
03706 _RE_PATTERN_CONST_REF_PARAM = (
03707     r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
03708     r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
03709 
03710 
03711 def CheckLanguage(filename, clean_lines, linenum, file_extension,
03712                   include_state, nesting_state, error):
03713   """Checks rules from the 'C++ language rules' section of cppguide.html.
03714 
03715   Some of these rules are hard to test (function overloading, using
03716   uint32 inappropriately), but we do the best we can.
03717 
03718   Args:
03719     filename: The name of the current file.
03720     clean_lines: A CleansedLines instance containing the file.
03721     linenum: The number of the line to check.
03722     file_extension: The extension (without the dot) of the filename.
03723     include_state: An _IncludeState instance in which the headers are inserted.
03724     nesting_state: A _NestingState instance which maintains information about
03725                    the current stack of nested blocks being parsed.
03726     error: The function to call with any errors found.
03727   """
03728   # If the line is empty or consists of entirely a comment, no need to
03729   # check it.
03730   line = clean_lines.elided[linenum]
03731   if not line:
03732     return
03733 
03734   match = _RE_PATTERN_INCLUDE.search(line)
03735   if match:
03736     CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
03737     return
03738 
03739   # Reset include state across preprocessor directives.  This is meant
03740   # to silence warnings for conditional includes.
03741   if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
03742     include_state.ResetSection()
03743 
03744   # Make Windows paths like Unix.
03745   fullname = os.path.abspath(filename).replace('\\', '/')
03746 
03747   # TODO(unknown): figure out if they're using default arguments in fn proto.
03748 
03749   # Check to see if they're using an conversion function cast.
03750   # I just try to capture the most common basic types, though there are more.
03751   # Parameterless conversion functions, such as bool(), are allowed as they are
03752   # probably a member operator declaration or default constructor.
03753   match = Search(
03754       r'(\bnew\s+)?\b'  # Grab 'new' operator, if it's there
03755       r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
03756       r'(\([^)].*)', line)
03757   if match:
03758     matched_new = match.group(1)
03759     matched_type = match.group(2)
03760     matched_funcptr = match.group(3)
03761 
03762     # gMock methods are defined using some variant of MOCK_METHODx(name, type)
03763     # where type may be float(), int(string), etc.  Without context they are
03764     # virtually indistinguishable from int(x) casts. Likewise, gMock's
03765     # MockCallback takes a template parameter of the form return_type(arg_type),
03766     # which looks much like the cast we're trying to detect.
03767     #
03768     # std::function<> wrapper has a similar problem.
03769     #
03770     # Return types for function pointers also look like casts if they
03771     # don't have an extra space.
03772     if (matched_new is None and  # If new operator, then this isn't a cast
03773         not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
03774              Search(r'\bMockCallback<.*>', line) or
03775              Search(r'\bstd::function<.*>', line)) and
03776         not (matched_funcptr and
03777              Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
03778                    matched_funcptr))):
03779       # Try a bit harder to catch gmock lines: the only place where
03780       # something looks like an old-style cast is where we declare the
03781       # return type of the mocked method, and the only time when we
03782       # are missing context is if MOCK_METHOD was split across
03783       # multiple lines.  The missing MOCK_METHOD is usually one or two
03784       # lines back, so scan back one or two lines.
03785       #
03786       # It's not possible for gmock macros to appear in the first 2
03787       # lines, since the class head + section name takes up 2 lines.
03788       if (linenum < 2 or
03789           not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
03790                      clean_lines.elided[linenum - 1]) or
03791                Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
03792                      clean_lines.elided[linenum - 2]))):
03793         error(filename, linenum, 'readability/casting', 4,
03794               'Using deprecated casting style.  '
03795               'Use static_cast<%s>(...) instead' %
03796               matched_type)
03797 
03798   CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
03799                   'static_cast',
03800                   r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
03801 
03802   # This doesn't catch all cases. Consider (const char * const)"hello".
03803   #
03804   # (char *) "foo" should always be a const_cast (reinterpret_cast won't
03805   # compile).
03806   if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
03807                      'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
03808     pass
03809   else:
03810     # Check pointer casts for other than string constants
03811     CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
03812                     'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
03813 
03814   # In addition, we look for people taking the address of a cast.  This
03815   # is dangerous -- casts can assign to temporaries, so the pointer doesn't
03816   # point where you think.
03817   match = Search(
03818       r'(?:&\(([^)]+)\)[\w(])|'
03819       r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line)
03820   if match and match.group(1) != '*':
03821     error(filename, linenum, 'runtime/casting', 4,
03822           ('Are you taking an address of a cast?  '
03823            'This is dangerous: could be a temp var.  '
03824            'Take the address before doing the cast, rather than after'))
03825 
03826   # Create an extended_line, which is the concatenation of the current and
03827   # next lines, for more effective checking of code that may span more than one
03828   # line.
03829   if linenum + 1 < clean_lines.NumLines():
03830     extended_line = line + clean_lines.elided[linenum + 1]
03831   else:
03832     extended_line = line
03833 
03834   # Check for people declaring static/global STL strings at the top level.
03835   # This is dangerous because the C++ language does not guarantee that
03836   # globals with constructors are initialized before the first access.
03837   match = Match(
03838       r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
03839       line)
03840   # Make sure it's not a function.
03841   # Function template specialization looks like: "string foo<Type>(...".
03842   # Class template definitions look like: "string Foo<Type>::Method(...".
03843   #
03844   # Also ignore things that look like operators.  These are matched separately
03845   # because operator names cross non-word boundaries.  If we change the pattern
03846   # above, we would decrease the accuracy of matching identifiers.
03847   if (match and
03848       not Search(r'\boperator\W', line) and
03849       not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))):
03850     error(filename, linenum, 'runtime/string', 4,
03851           'For a static/global string constant, use a C style string instead: '
03852           '"%schar %s[]".' %
03853           (match.group(1), match.group(2)))
03854 
03855   if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
03856     error(filename, linenum, 'runtime/init', 4,
03857           'You seem to be initializing a member variable with itself.')
03858 
03859   if file_extension == 'h':
03860     # TODO(unknown): check that 1-arg constructors are explicit.
03861     #                How to tell it's a constructor?
03862     #                (handled in CheckForNonStandardConstructs for now)
03863     # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
03864     #                (level 1 error)
03865     pass
03866 
03867   # Check if people are using the verboten C basic types.  The only exception
03868   # we regularly allow is "unsigned short port" for port.
03869   if Search(r'\bshort port\b', line):
03870     if not Search(r'\bunsigned short port\b', line):
03871       error(filename, linenum, 'runtime/int', 4,
03872             'Use "unsigned short" for ports, not "short"')
03873   else:
03874     match = Search(r'\b(short|long(?! +double)|long long)\b', line)
03875     if match:
03876       error(filename, linenum, 'runtime/int', 4,
03877             'Use int16/int64/etc, rather than the C type %s' % match.group(1))
03878 
03879   # When snprintf is used, the second argument shouldn't be a literal.
03880   match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
03881   if match and match.group(2) != '0':
03882     # If 2nd arg is zero, snprintf is used to calculate size.
03883     error(filename, linenum, 'runtime/printf', 3,
03884           'If you can, use sizeof(%s) instead of %s as the 2nd arg '
03885           'to snprintf.' % (match.group(1), match.group(2)))
03886 
03887   # Check if some verboten C functions are being used.
03888   if Search(r'\bsprintf\b', line):
03889     error(filename, linenum, 'runtime/printf', 5,
03890           'Never use sprintf.  Use snprintf instead.')
03891   match = Search(r'\b(strcpy|strcat)\b', line)
03892   if match:
03893     error(filename, linenum, 'runtime/printf', 4,
03894           'Almost always, snprintf is better than %s' % match.group(1))
03895 
03896   # Check if some verboten operator overloading is going on
03897   # TODO(unknown): catch out-of-line unary operator&:
03898   #   class X {};
03899   #   int operator&(const X& x) { return 42; }  // unary operator&
03900   # The trick is it's hard to tell apart from binary operator&:
03901   #   class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
03902   if Search(r'\boperator\s*&\s*\(\s*\)', line):
03903     error(filename, linenum, 'runtime/operator', 4,
03904           'Unary operator& is dangerous.  Do not use it.')
03905 
03906   # Check for suspicious usage of "if" like
03907   # } if (a == b) {
03908   if Search(r'\}\s*if\s*\(', line):
03909     error(filename, linenum, 'readability/braces', 4,
03910           'Did you mean "else if"? If not, start a new line for "if".')
03911 
03912   # Check for potential format string bugs like printf(foo).
03913   # We constrain the pattern not to pick things like DocidForPrintf(foo).
03914   # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
03915   # TODO(sugawarayu): Catch the following case. Need to change the calling
03916   # convention of the whole function to process multiple line to handle it.
03917   #   printf(
03918   #       boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
03919   printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
03920   if printf_args:
03921     match = Match(r'([\w.\->()]+)$', printf_args)
03922     if match and match.group(1) != '__VA_ARGS__':
03923       function_name = re.search(r'\b((?:string)?printf)\s*\(',
03924                                 line, re.I).group(1)
03925       error(filename, linenum, 'runtime/printf', 4,
03926             'Potential format string bug. Do %s("%%s", %s) instead.'
03927             % (function_name, match.group(1)))
03928 
03929   # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
03930   match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
03931   if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
03932     error(filename, linenum, 'runtime/memset', 4,
03933           'Did you mean "memset(%s, 0, %s)"?'
03934           % (match.group(1), match.group(2)))
03935 
03936   if Search(r'\busing namespace\b', line):
03937     error(filename, linenum, 'build/namespaces', 5,
03938           'Do not use namespace using-directives.  '
03939           'Use using-declarations instead.')
03940 
03941   # Detect variable-length arrays.
03942   match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
03943   if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
03944       match.group(3).find(']') == -1):
03945     # Split the size using space and arithmetic operators as delimiters.
03946     # If any of the resulting tokens are not compile time constants then
03947     # report the error.
03948     tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
03949     is_const = True
03950     skip_next = False
03951     for tok in tokens:
03952       if skip_next:
03953         skip_next = False
03954         continue
03955 
03956       if Search(r'sizeof\(.+\)', tok): continue
03957       if Search(r'arraysize\(\w+\)', tok): continue
03958 
03959       tok = tok.lstrip('(')
03960       tok = tok.rstrip(')')
03961       if not tok: continue
03962       if Match(r'\d+', tok): continue
03963       if Match(r'0[xX][0-9a-fA-F]+', tok): continue
03964       if Match(r'k[A-Z0-9]\w*', tok): continue
03965       if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
03966       if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
03967       # A catch all for tricky sizeof cases, including 'sizeof expression',
03968       # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
03969       # requires skipping the next token because we split on ' ' and '*'.
03970       if tok.startswith('sizeof'):
03971         skip_next = True
03972         continue
03973       is_const = False
03974       break
03975     if not is_const:
03976       error(filename, linenum, 'runtime/arrays', 1,
03977             'Do not use variable-length arrays.  Use an appropriately named '
03978             "('k' followed by CamelCase) compile-time constant for the size.")
03979 
03980   # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
03981   # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
03982   # in the class declaration.
03983   match = Match(
03984       (r'\s*'
03985        r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
03986        r'\(.*\);$'),
03987       line)
03988   if match and linenum + 1 < clean_lines.NumLines():
03989     next_line = clean_lines.elided[linenum + 1]
03990     # We allow some, but not all, declarations of variables to be present
03991     # in the statement that defines the class.  The [\w\*,\s]* fragment of
03992     # the regular expression below allows users to declare instances of
03993     # the class or pointers to instances, but not less common types such
03994     # as function pointers or arrays.  It's a tradeoff between allowing
03995     # reasonable code and avoiding trying to parse more C++ using regexps.
03996     if not Search(r'^\s*}[\w\*,\s]*;', next_line):
03997       error(filename, linenum, 'readability/constructors', 3,
03998             match.group(1) + ' should be the last thing in the class')
03999 
04000   # Check for use of unnamed namespaces in header files.  Registration
04001   # macros are typically OK, so we allow use of "namespace {" on lines
04002   # that end with backslashes.
04003   if (file_extension == 'h'
04004       and Search(r'\bnamespace\s*{', line)
04005       and line[-1] != '\\'):
04006     error(filename, linenum, 'build/namespaces', 4,
04007           'Do not use unnamed namespaces in header files.  See '
04008           'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
04009           ' for more information.')
04010 
04011 def CheckForNonConstReference(filename, clean_lines, linenum,
04012                               nesting_state, error):
04013   """Check for non-const references.
04014 
04015   Separate from CheckLanguage since it scans backwards from current
04016   line, instead of scanning forward.
04017 
04018   Args:
04019     filename: The name of the current file.
04020     clean_lines: A CleansedLines instance containing the file.
04021     linenum: The number of the line to check.
04022     nesting_state: A _NestingState instance which maintains information about
04023                    the current stack of nested blocks being parsed.
04024     error: The function to call with any errors found.
04025   """
04026   # Do nothing if there is no '&' on current line.
04027   line = clean_lines.elided[linenum]
04028   if '&' not in line:
04029     return
04030 
04031   # Long type names may be broken across multiple lines, usually in one
04032   # of these forms:
04033   #   LongType
04034   #       ::LongTypeContinued &identifier
04035   #   LongType::
04036   #       LongTypeContinued &identifier
04037   #   LongType<
04038   #       ...>::LongTypeContinued &identifier
04039   #
04040   # If we detected a type split across two lines, join the previous
04041   # line to current line so that we can match const references
04042   # accordingly.
04043   #
04044   # Note that this only scans back one line, since scanning back
04045   # arbitrary number of lines would be expensive.  If you have a type
04046   # that spans more than 2 lines, please use a typedef.
04047   if linenum > 1:
04048     previous = None
04049     if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
04050       # previous_line\n + ::current_line
04051       previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
04052                         clean_lines.elided[linenum - 1])
04053     elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
04054       # previous_line::\n + current_line
04055       previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
04056                         clean_lines.elided[linenum - 1])
04057     if previous:
04058       line = previous.group(1) + line.lstrip()
04059     else:
04060       # Check for templated parameter that is split across multiple lines
04061       endpos = line.rfind('>')
04062       if endpos > -1:
04063         (_, startline, startpos) = ReverseCloseExpression(
04064             clean_lines, linenum, endpos)
04065         if startpos > -1 and startline < linenum:
04066           # Found the matching < on an earlier line, collect all
04067           # pieces up to current line.
04068           line = ''
04069           for i in xrange(startline, linenum + 1):
04070             line += clean_lines.elided[i].strip()
04071 
04072   # Check for non-const references in function parameters.  A single '&' may
04073   # found in the following places:
04074   #   inside expression: binary & for bitwise AND
04075   #   inside expression: unary & for taking the address of something
04076   #   inside declarators: reference parameter
04077   # We will exclude the first two cases by checking that we are not inside a
04078   # function body, including one that was just introduced by a trailing '{'.
04079   # TODO(unknwon): Doesn't account for preprocessor directives.
04080   # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
04081   check_params = False
04082   if not nesting_state.stack:
04083     check_params = True  # top level
04084   elif (isinstance(nesting_state.stack[-1], _ClassInfo) or
04085         isinstance(nesting_state.stack[-1], _NamespaceInfo)):
04086     check_params = True  # within class or namespace
04087   elif Match(r'.*{\s*$', line):
04088     if (len(nesting_state.stack) == 1 or
04089         isinstance(nesting_state.stack[-2], _ClassInfo) or
04090         isinstance(nesting_state.stack[-2], _NamespaceInfo)):
04091       check_params = True  # just opened global/class/namespace block
04092   # We allow non-const references in a few standard places, like functions
04093   # called "swap()" or iostream operators like "<<" or ">>".  Do not check
04094   # those function parameters.
04095   #
04096   # We also accept & in static_assert, which looks like a function but
04097   # it's actually a declaration expression.
04098   whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
04099                            r'operator\s*[<>][<>]|'
04100                            r'static_assert|COMPILE_ASSERT'
04101                            r')\s*\(')
04102   if Search(whitelisted_functions, line):
04103     check_params = False
04104   elif not Search(r'\S+\([^)]*$', line):
04105     # Don't see a whitelisted function on this line.  Actually we
04106     # didn't see any function name on this line, so this is likely a
04107     # multi-line parameter list.  Try a bit harder to catch this case.
04108     for i in xrange(2):
04109       if (linenum > i and
04110           Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
04111         check_params = False
04112         break
04113 
04114   if check_params:
04115     decls = ReplaceAll(r'{[^}]*}', ' ', line)  # exclude function body
04116     for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
04117       if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
04118         error(filename, linenum, 'runtime/references', 2,
04119               'Is this a non-const reference? '
04120               'If so, make const or use a pointer: ' +
04121               ReplaceAll(' *<', '<', parameter))
04122 
04123 
04124 def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
04125                     error):
04126   """Checks for a C-style cast by looking for the pattern.
04127 
04128   Args:
04129     filename: The name of the current file.
04130     linenum: The number of the line to check.
04131     line: The line of code to check.
04132     raw_line: The raw line of code to check, with comments.
04133     cast_type: The string for the C++ cast to recommend.  This is either
04134       reinterpret_cast, static_cast, or const_cast, depending.
04135     pattern: The regular expression used to find C-style casts.
04136     error: The function to call with any errors found.
04137 
04138   Returns:
04139     True if an error was emitted.
04140     False otherwise.
04141   """
04142   match = Search(pattern, line)
04143   if not match:
04144     return False
04145 
04146   # Exclude lines with sizeof, since sizeof looks like a cast.
04147   sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
04148   if sizeof_match:
04149     return False
04150 
04151   # operator++(int) and operator--(int)
04152   if (line[0:match.start(1) - 1].endswith(' operator++') or
04153       line[0:match.start(1) - 1].endswith(' operator--')):
04154     return False
04155 
04156   # A single unnamed argument for a function tends to look like old
04157   # style cast.  If we see those, don't issue warnings for deprecated
04158   # casts, instead issue warnings for unnamed arguments where
04159   # appropriate.
04160   #
04161   # These are things that we want warnings for, since the style guide
04162   # explicitly require all parameters to be named:
04163   #   Function(int);
04164   #   Function(int) {
04165   #   ConstMember(int) const;
04166   #   ConstMember(int) const {
04167   #   ExceptionMember(int) throw (...);
04168   #   ExceptionMember(int) throw (...) {
04169   #   PureVirtual(int) = 0;
04170   #
04171   # These are functions of some sort, where the compiler would be fine
04172   # if they had named parameters, but people often omit those
04173   # identifiers to reduce clutter:
04174   #   (FunctionPointer)(int);
04175   #   (FunctionPointer)(int) = value;
04176   #   Function((function_pointer_arg)(int))
04177   #   <TemplateArgument(int)>;
04178   #   <(FunctionPointerTemplateArgument)(int)>;
04179   remainder = line[match.end(0):]
04180   if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder):
04181     # Looks like an unnamed parameter.
04182 
04183     # Don't warn on any kind of template arguments.
04184     if Match(r'^\s*>', remainder):
04185       return False
04186 
04187     # Don't warn on assignments to function pointers, but keep warnings for
04188     # unnamed parameters to pure virtual functions.  Note that this pattern
04189     # will also pass on assignments of "0" to function pointers, but the
04190     # preferred values for those would be "nullptr" or "NULL".
04191     matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
04192     if matched_zero and matched_zero.group(1) != '0':
04193       return False
04194 
04195     # Don't warn on function pointer declarations.  For this we need
04196     # to check what came before the "(type)" string.
04197     if Match(r'.*\)\s*$', line[0:match.start(0)]):
04198       return False
04199 
04200     # Don't warn if the parameter is named with block comments, e.g.:
04201     #  Function(int /*unused_param*/);
04202     if '/*' in raw_line:
04203       return False
04204 
04205     # Passed all filters, issue warning here.
04206     error(filename, linenum, 'readability/function', 3,
04207           'All parameters should be named in a function')
04208     return True
04209 
04210   # At this point, all that should be left is actual casts.
04211   error(filename, linenum, 'readability/casting', 4,
04212         'Using C-style cast.  Use %s<%s>(...) instead' %
04213         (cast_type, match.group(1)))
04214 
04215   return True
04216 
04217 
04218 _HEADERS_CONTAINING_TEMPLATES = (
04219     ('<deque>', ('deque',)),
04220     ('<functional>', ('unary_function', 'binary_function',
04221                       'plus', 'minus', 'multiplies', 'divides', 'modulus',
04222                       'negate',
04223                       'equal_to', 'not_equal_to', 'greater', 'less',
04224                       'greater_equal', 'less_equal',
04225                       'logical_and', 'logical_or', 'logical_not',
04226                       'unary_negate', 'not1', 'binary_negate', 'not2',
04227                       'bind1st', 'bind2nd',
04228                       'pointer_to_unary_function',
04229                       'pointer_to_binary_function',
04230                       'ptr_fun',
04231                       'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
04232                       'mem_fun_ref_t',
04233                       'const_mem_fun_t', 'const_mem_fun1_t',
04234                       'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
04235                       'mem_fun_ref',
04236                      )),
04237     ('<limits>', ('numeric_limits',)),
04238     ('<list>', ('list',)),
04239     ('<map>', ('map', 'multimap',)),
04240     ('<memory>', ('allocator',)),
04241     ('<queue>', ('queue', 'priority_queue',)),
04242     ('<set>', ('set', 'multiset',)),
04243     ('<stack>', ('stack',)),
04244     ('<string>', ('char_traits', 'basic_string',)),
04245     ('<utility>', ('pair',)),
04246     ('<vector>', ('vector',)),
04247 
04248     # gcc extensions.
04249     # Note: std::hash is their hash, ::hash is our hash
04250     ('<hash_map>', ('hash_map', 'hash_multimap',)),
04251     ('<hash_set>', ('hash_set', 'hash_multiset',)),
04252     ('<slist>', ('slist',)),
04253     )
04254 
04255 _RE_PATTERN_STRING = re.compile(r'\bstring\b')
04256 
04257 _re_pattern_algorithm_header = []
04258 for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
04259                   'transform'):
04260   # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
04261   # type::max().
04262   _re_pattern_algorithm_header.append(
04263       (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
04264        _template,
04265        '<algorithm>'))
04266 
04267 _re_pattern_templates = []
04268 for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
04269   for _template in _templates:
04270     _re_pattern_templates.append(
04271         (re.compile(r'(<|\b)' + _template + r'\s*<'),
04272          _template + '<>',
04273          _header))
04274 
04275 
04276 def FilesBelongToSameModule(filename_cc, filename_h):
04277   """Check if these two filenames belong to the same module.
04278 
04279   The concept of a 'module' here is a as follows:
04280   foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
04281   same 'module' if they are in the same directory.
04282   some/path/public/xyzzy and some/path/internal/xyzzy are also considered
04283   to belong to the same module here.
04284 
04285   If the filename_cc contains a longer path than the filename_h, for example,
04286   '/absolute/path/to/base/sysinfo.cc', and this file would include
04287   'base/sysinfo.h', this function also produces the prefix needed to open the
04288   header. This is used by the caller of this function to more robustly open the
04289   header file. We don't have access to the real include paths in this context,
04290   so we need this guesswork here.
04291 
04292   Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
04293   according to this implementation. Because of this, this function gives
04294   some false positives. This should be sufficiently rare in practice.
04295 
04296   Args:
04297     filename_cc: is the path for the .cc file
04298     filename_h: is the path for the header path
04299 
04300   Returns:
04301     Tuple with a bool and a string:
04302     bool: True if filename_cc and filename_h belong to the same module.
04303     string: the additional prefix needed to open the header file.
04304   """
04305 
04306   if not filename_cc.endswith('.cc'):
04307     return (False, '')
04308   filename_cc = filename_cc[:-len('.cc')]
04309   if filename_cc.endswith('_unittest'):
04310     filename_cc = filename_cc[:-len('_unittest')]
04311   elif filename_cc.endswith('_test'):
04312     filename_cc = filename_cc[:-len('_test')]
04313   filename_cc = filename_cc.replace('/public/', '/')
04314   filename_cc = filename_cc.replace('/internal/', '/')
04315 
04316   if not filename_h.endswith('.h'):
04317     return (False, '')
04318   filename_h = filename_h[:-len('.h')]
04319   if filename_h.endswith('-inl'):
04320     filename_h = filename_h[:-len('-inl')]
04321   filename_h = filename_h.replace('/public/', '/')
04322   filename_h = filename_h.replace('/internal/', '/')
04323 
04324   files_belong_to_same_module = filename_cc.endswith(filename_h)
04325   common_path = ''
04326   if files_belong_to_same_module:
04327     common_path = filename_cc[:-len(filename_h)]
04328   return files_belong_to_same_module, common_path
04329 
04330 
04331 def UpdateIncludeState(filename, include_state, io=codecs):
04332   """Fill up the include_state with new includes found from the file.
04333 
04334   Args:
04335     filename: the name of the header to read.
04336     include_state: an _IncludeState instance in which the headers are inserted.
04337     io: The io factory to use to read the file. Provided for testability.
04338 
04339   Returns:
04340     True if a header was succesfully added. False otherwise.
04341   """
04342   headerfile = None
04343   try:
04344     headerfile = io.open(filename, 'r', 'utf8', 'replace')
04345   except IOError:
04346     return False
04347   linenum = 0
04348   for line in headerfile:
04349     linenum += 1
04350     clean_line = CleanseComments(line)
04351     match = _RE_PATTERN_INCLUDE.search(clean_line)
04352     if match:
04353       include = match.group(2)
04354       # The value formatting is cute, but not really used right now.
04355       # What matters here is that the key is in include_state.
04356       include_state.setdefault(include, '%s:%d' % (filename, linenum))
04357   return True
04358 
04359 
04360 def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
04361                               io=codecs):
04362   """Reports for missing stl includes.
04363 
04364   This function will output warnings to make sure you are including the headers
04365   necessary for the stl containers and functions that you use. We only give one
04366   reason to include a header. For example, if you use both equal_to<> and
04367   less<> in a .h file, only one (the latter in the file) of these will be
04368   reported as a reason to include the <functional>.
04369 
04370   Args:
04371     filename: The name of the current file.
04372     clean_lines: A CleansedLines instance containing the file.
04373     include_state: An _IncludeState instance.
04374     error: The function to call with any errors found.
04375     io: The IO factory to use to read the header file. Provided for unittest
04376         injection.
04377   """
04378   required = {}  # A map of header name to linenumber and the template entity.
04379                  # Example of required: { '<functional>': (1219, 'less<>') }
04380 
04381   for linenum in xrange(clean_lines.NumLines()):
04382     line = clean_lines.elided[linenum]
04383     if not line or line[0] == '#':
04384       continue
04385 
04386     # String is special -- it is a non-templatized type in STL.
04387     matched = _RE_PATTERN_STRING.search(line)
04388     if matched:
04389       # Don't warn about strings in non-STL namespaces:
04390       # (We check only the first match per line; good enough.)
04391       prefix = line[:matched.start()]
04392       if prefix.endswith('std::') or not prefix.endswith('::'):
04393         required['<string>'] = (linenum, 'string')
04394 
04395     for pattern, template, header in _re_pattern_algorithm_header:
04396       if pattern.search(line):
04397         required[header] = (linenum, template)
04398 
04399     # The following function is just a speed up, no semantics are changed.
04400     if not '<' in line:  # Reduces the cpu time usage by skipping lines.
04401       continue
04402 
04403     for pattern, template, header in _re_pattern_templates:
04404       if pattern.search(line):
04405         required[header] = (linenum, template)
04406 
04407   # The policy is that if you #include something in foo.h you don't need to
04408   # include it again in foo.cc. Here, we will look at possible includes.
04409   # Let's copy the include_state so it is only messed up within this function.
04410   include_state = include_state.copy()
04411 
04412   # Did we find the header for this file (if any) and succesfully load it?
04413   header_found = False
04414 
04415   # Use the absolute path so that matching works properly.
04416   abs_filename = FileInfo(filename).FullName()
04417 
04418   # For Emacs's flymake.
04419   # If cpplint is invoked from Emacs's flymake, a temporary file is generated
04420   # by flymake and that file name might end with '_flymake.cc'. In that case,
04421   # restore original file name here so that the corresponding header file can be
04422   # found.
04423   # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
04424   # instead of 'foo_flymake.h'
04425   abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
04426 
04427   # include_state is modified during iteration, so we iterate over a copy of
04428   # the keys.
04429   header_keys = include_state.keys()
04430   for header in header_keys:
04431     (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
04432     fullpath = common_path + header
04433     if same_module and UpdateIncludeState(fullpath, include_state, io):
04434       header_found = True
04435 
04436   # If we can't find the header file for a .cc, assume it's because we don't
04437   # know where to look. In that case we'll give up as we're not sure they
04438   # didn't include it in the .h file.
04439   # TODO(unknown): Do a better job of finding .h files so we are confident that
04440   # not having the .h file means there isn't one.
04441   if filename.endswith('.cc') and not header_found:
04442     return
04443 
04444   # All the lines have been processed, report the errors found.
04445   for required_header_unstripped in required:
04446     template = required[required_header_unstripped][1]
04447     if required_header_unstripped.strip('<>"') not in include_state:
04448       error(filename, required[required_header_unstripped][0],
04449             'build/include_what_you_use', 4,
04450             'Add #include ' + required_header_unstripped + ' for ' + template)
04451 
04452 
04453 _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
04454 
04455 
04456 def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
04457   """Check that make_pair's template arguments are deduced.
04458 
04459   G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
04460   specified explicitly, and such use isn't intended in any case.
04461 
04462   Args:
04463     filename: The name of the current file.
04464     clean_lines: A CleansedLines instance containing the file.
04465     linenum: The number of the line to check.
04466     error: The function to call with any errors found.
04467   """
04468   line = clean_lines.elided[linenum]
04469   match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
04470   if match:
04471     error(filename, linenum, 'build/explicit_make_pair',
04472           4,  # 4 = high confidence
04473           'For C++11-compatibility, omit template arguments from make_pair'
04474           ' OR use pair directly OR if appropriate, construct a pair directly')
04475 
04476 
04477 def ProcessLine(filename, file_extension, clean_lines, line,
04478                 include_state, function_state, nesting_state, error,
04479                 extra_check_functions=[]):
04480   """Processes a single line in the file.
04481 
04482   Args:
04483     filename: Filename of the file that is being processed.
04484     file_extension: The extension (dot not included) of the file.
04485     clean_lines: An array of strings, each representing a line of the file,
04486                  with comments stripped.
04487     line: Number of line being processed.
04488     include_state: An _IncludeState instance in which the headers are inserted.
04489     function_state: A _FunctionState instance which counts function lines, etc.
04490     nesting_state: A _NestingState instance which maintains information about
04491                    the current stack of nested blocks being parsed.
04492     error: A callable to which errors are reported, which takes 4 arguments:
04493            filename, line number, error level, and message
04494     extra_check_functions: An array of additional check functions that will be
04495                            run on each source line. Each function takes 4
04496                            arguments: filename, clean_lines, line, error
04497   """
04498   raw_lines = clean_lines.raw_lines
04499   ParseNolintSuppressions(filename, raw_lines[line], line, error)
04500   nesting_state.Update(filename, clean_lines, line, error)
04501   if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
04502     return
04503   CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
04504   CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
04505   CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
04506   CheckLanguage(filename, clean_lines, line, file_extension, include_state,
04507                 nesting_state, error)
04508   CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
04509   CheckForNonStandardConstructs(filename, clean_lines, line,
04510                                 nesting_state, error)
04511   CheckVlogArguments(filename, clean_lines, line, error)
04512   CheckPosixThreading(filename, clean_lines, line, error)
04513   CheckInvalidIncrement(filename, clean_lines, line, error)
04514   CheckMakePairUsesDeduction(filename, clean_lines, line, error)
04515   for check_fn in extra_check_functions:
04516     check_fn(filename, clean_lines, line, error)
04517 
04518 def ProcessFileData(filename, file_extension, lines, error,
04519                     extra_check_functions=[]):
04520   """Performs lint checks and reports any errors to the given error function.
04521 
04522   Args:
04523     filename: Filename of the file that is being processed.
04524     file_extension: The extension (dot not included) of the file.
04525     lines: An array of strings, each representing a line of the file, with the
04526            last element being empty if the file is terminated with a newline.
04527     error: A callable to which errors are reported, which takes 4 arguments:
04528            filename, line number, error level, and message
04529     extra_check_functions: An array of additional check functions that will be
04530                            run on each source line. Each function takes 4
04531                            arguments: filename, clean_lines, line, error
04532   """
04533   lines = (['// marker so line numbers and indices both start at 1'] + lines +
04534            ['// marker so line numbers end in a known way'])
04535 
04536   include_state = _IncludeState()
04537   function_state = _FunctionState()
04538   nesting_state = _NestingState()
04539 
04540   ResetNolintSuppressions()
04541 
04542   CheckForCopyright(filename, lines, error)
04543 
04544   if file_extension == 'h':
04545     CheckForHeaderGuard(filename, lines, error)
04546 
04547   RemoveMultiLineComments(filename, lines, error)
04548   clean_lines = CleansedLines(lines)
04549   for line in xrange(clean_lines.NumLines()):
04550     ProcessLine(filename, file_extension, clean_lines, line,
04551                 include_state, function_state, nesting_state, error,
04552                 extra_check_functions)
04553   nesting_state.CheckCompletedBlocks(filename, error)
04554 
04555   CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
04556 
04557   # We check here rather than inside ProcessLine so that we see raw
04558   # lines rather than "cleaned" lines.
04559   CheckForBadCharacters(filename, lines, error)
04560 
04561   CheckForNewlineAtEOF(filename, lines, error)
04562 
04563 def ProcessFile(filename, vlevel, extra_check_functions=[]):
04564   """Does google-lint on a single file.
04565 
04566   Args:
04567     filename: The name of the file to parse.
04568 
04569     vlevel: The level of errors to report.  Every error of confidence
04570     >= verbose_level will be reported.  0 is a good default.
04571 
04572     extra_check_functions: An array of additional check functions that will be
04573                            run on each source line. Each function takes 4
04574                            arguments: filename, clean_lines, line, error
04575   """
04576 
04577   _SetVerboseLevel(vlevel)
04578 
04579   try:
04580     # Support the UNIX convention of using "-" for stdin.  Note that
04581     # we are not opening the file with universal newline support
04582     # (which codecs doesn't support anyway), so the resulting lines do
04583     # contain trailing '\r' characters if we are reading a file that
04584     # has CRLF endings.
04585     # If after the split a trailing '\r' is present, it is removed
04586     # below. If it is not expected to be present (i.e. os.linesep !=
04587     # '\r\n' as in Windows), a warning is issued below if this file
04588     # is processed.
04589 
04590     if filename == '-':
04591       lines = codecs.StreamReaderWriter(sys.stdin,
04592                                         codecs.getreader('utf8'),
04593                                         codecs.getwriter('utf8'),
04594                                         'replace').read().split('\n')
04595     else:
04596       lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
04597 
04598     carriage_return_found = False
04599     # Remove trailing '\r'.
04600     for linenum in range(len(lines)):
04601       if lines[linenum].endswith('\r'):
04602         lines[linenum] = lines[linenum].rstrip('\r')
04603         carriage_return_found = True
04604 
04605   except IOError:
04606     sys.stderr.write(
04607         "Skipping input '%s': Can't open for reading\n" % filename)
04608     return
04609 
04610   # Note, if no dot is found, this will give the entire filename as the ext.
04611   file_extension = filename[filename.rfind('.') + 1:]
04612 
04613   # When reading from stdin, the extension is unknown, so no cpplint tests
04614   # should rely on the extension.
04615   valid_extensions = ['cc', 'h', 'cpp', 'cu', 'cuh']
04616   if filename != '-' and file_extension not in valid_extensions:
04617     sys.stderr.write('Ignoring %s; not a valid file name '
04618                      '(.cc, .h, .cpp, .cu, .cuh)\n' % filename)
04619   else:
04620     ProcessFileData(filename, file_extension, lines, Error,
04621                     extra_check_functions)
04622     if carriage_return_found and os.linesep != '\r\n':
04623       # Use 0 for linenum since outputting only one error for potentially
04624       # several lines.
04625       Error(filename, 0, 'whitespace/newline', 1,
04626             'One or more unexpected \\r (^M) found;'
04627             'better to use only a \\n')
04628 
04629   sys.stderr.write('Done processing %s\n' % filename)
04630 
04631 
04632 def PrintUsage(message):
04633   """Prints a brief usage string and exits, optionally with an error message.
04634 
04635   Args:
04636     message: The optional error message.
04637   """
04638   sys.stderr.write(_USAGE)
04639   if message:
04640     sys.exit('\nFATAL ERROR: ' + message)
04641   else:
04642     sys.exit(1)
04643 
04644 
04645 def PrintCategories():
04646   """Prints a list of all the error-categories used by error messages.
04647 
04648   These are the categories used to filter messages via --filter.
04649   """
04650   sys.stderr.write(''.join('  %s\n' % cat for cat in _ERROR_CATEGORIES))
04651   sys.exit(0)
04652 
04653 
04654 def ParseArguments(args):
04655   """Parses the command line arguments.
04656 
04657   This may set the output format and verbosity level as side-effects.
04658 
04659   Args:
04660     args: The command line arguments:
04661 
04662   Returns:
04663     The list of filenames to lint.
04664   """
04665   try:
04666     (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
04667                                                  'counting=',
04668                                                  'filter=',
04669                                                  'root=',
04670                                                  'linelength='])
04671   except getopt.GetoptError:
04672     PrintUsage('Invalid arguments.')
04673 
04674   verbosity = _VerboseLevel()
04675   output_format = _OutputFormat()
04676   filters = ''
04677   counting_style = ''
04678 
04679   for (opt, val) in opts:
04680     if opt == '--help':
04681       PrintUsage(None)
04682     elif opt == '--output':
04683       if val not in ('emacs', 'vs7', 'eclipse'):
04684         PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
04685       output_format = val
04686     elif opt == '--verbose':
04687       verbosity = int(val)
04688     elif opt == '--filter':
04689       filters = val
04690       if not filters:
04691         PrintCategories()
04692     elif opt == '--counting':
04693       if val not in ('total', 'toplevel', 'detailed'):
04694         PrintUsage('Valid counting options are total, toplevel, and detailed')
04695       counting_style = val
04696     elif opt == '--root':
04697       global _root
04698       _root = val
04699     elif opt == '--linelength':
04700       global _line_length
04701       try:
04702           _line_length = int(val)
04703       except ValueError:
04704           PrintUsage('Line length must be digits.')
04705 
04706   if not filenames:
04707     PrintUsage('No files were specified.')
04708 
04709   _SetOutputFormat(output_format)
04710   _SetVerboseLevel(verbosity)
04711   _SetFilters(filters)
04712   _SetCountingStyle(counting_style)
04713 
04714   return filenames
04715 
04716 
04717 def main():
04718   filenames = ParseArguments(sys.argv[1:])
04719 
04720   # Change stderr to write with replacement characters so we don't die
04721   # if we try to print something containing non-ASCII characters.
04722   sys.stderr = codecs.StreamReaderWriter(sys.stderr,
04723                                          codecs.getreader('utf8'),
04724                                          codecs.getwriter('utf8'),
04725                                          'replace')
04726 
04727   _cpplint_state.ResetErrorCounts()
04728   for filename in filenames:
04729     ProcessFile(filename, _cpplint_state.verbose_level)
04730   _cpplint_state.PrintErrorCounts()
04731 
04732   sys.exit(_cpplint_state.error_count > 0)
04733 
04734 
04735 if __name__ == '__main__':
04736   main()


roslint
Author(s): Mike Purvis, Jack O'Quin
autogenerated on Mon Oct 6 2014 07:06:19