cpplint.py
Go to the documentation of this file.
00001 #!/usr/bin/python
00002 #
00003 # Copyright (c) 2009 Google Inc. All rights reserved.
00004 #
00005 # Redistribution and use in source and binary forms, with or without
00006 # modification, are permitted provided that the following conditions are
00007 # met:
00008 #
00009 #    * Redistributions of source code must retain the above copyright
00010 # notice, this list of conditions and the following disclaimer.
00011 #    * Redistributions in binary form must reproduce the above
00012 # copyright notice, this list of conditions and the following disclaimer
00013 # in the documentation and/or other materials provided with the
00014 # distribution.
00015 #    * Neither the name of Google Inc. nor the names of its
00016 # contributors may be used to endorse or promote products derived from
00017 # this software without specific prior written permission.
00018 #
00019 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00020 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00021 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
00022 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
00023 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00024 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00025 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
00026 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
00027 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00028 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
00029 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00030 
00031 
00032 # April 2014, Greg Horn:
00033 #   Edited the original file to supress "Done processing xxx"
00034 
00035 """Does google-lint on c++ files.
00036 
00037 The goal of this script is to identify places in the code that *may*
00038 be in non-compliance with google style.  It does not attempt to fix
00039 up these problems -- the point is to educate.  It does also not
00040 attempt to find all problems, or to ensure that everything it does
00041 find is legitimately a problem.
00042 
00043 In particular, we can get very confused by /* and // inside strings!
00044 We do a small hack, which is to ignore //'s with "'s after them on the
00045 same line, but it is far from perfect (in either direction).
00046 """
00047 
00048 import codecs
00049 import copy
00050 import getopt
00051 import math  # for log
00052 import os
00053 import re
00054 import sre_compile
00055 import string
00056 import sys
00057 import unicodedata
00058 
00059 
00060 _USAGE = """
00061 Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
00062                    [--counting=total|toplevel|detailed] [--root=subdir]
00063                    [--linelength=digits]
00064         <file> [file] ...
00065 
00066   The style guidelines this tries to follow are those in
00067     http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
00068 
00069   Every problem is given a confidence score from 1-5, with 5 meaning we are
00070   certain of the problem, and 1 meaning it could be a legitimate construct.
00071   This will miss some errors, and is not a substitute for a code review.
00072 
00073   To suppress false-positive errors of a certain category, add a
00074   'NOLINT(category)' comment to the line.  NOLINT or NOLINT(*)
00075   suppresses errors of all categories on that line.
00076 
00077   The files passed in will be linted; at least one file must be provided.
00078   Default linted extensions are .cc, .cpp, .cu, .cuh and .h.  Change the
00079   extensions with the --extensions flag.
00080 
00081   Flags:
00082 
00083     output=vs7
00084       By default, the output is formatted to ease emacs parsing.  Visual Studio
00085       compatible output (vs7) may also be used.  Other formats are unsupported.
00086 
00087     verbose=#
00088       Specify a number 0-5 to restrict errors to certain verbosity levels.
00089 
00090     filter=-x,+y,...
00091       Specify a comma-separated list of category-filters to apply: only
00092       error messages whose category names pass the filters will be printed.
00093       (Category names are printed with the message and look like
00094       "[whitespace/indent]".)  Filters are evaluated left to right.
00095       "-FOO" and "FOO" means "do not print categories that start with FOO".
00096       "+FOO" means "do print categories that start with FOO".
00097 
00098       Examples: --filter=-whitespace,+whitespace/braces
00099                 --filter=whitespace,runtime/printf,+runtime/printf_format
00100                 --filter=-,+build/include_what_you_use
00101 
00102       To see a list of all the categories used in cpplint, pass no arg:
00103          --filter=
00104 
00105     counting=total|toplevel|detailed
00106       The total number of errors found is always printed. If
00107       'toplevel' is provided, then the count of errors in each of
00108       the top-level categories like 'build' and 'whitespace' will
00109       also be printed. If 'detailed' is provided, then a count
00110       is provided for each category like 'build/class'.
00111 
00112     root=subdir
00113       The root directory used for deriving header guard CPP variable.
00114       By default, the header guard CPP variable is calculated as the relative
00115       path to the directory that contains .git, .hg, or .svn.  When this flag
00116       is specified, the relative path is calculated from the specified
00117       directory. If the specified directory does not exist, this flag is
00118       ignored.
00119 
00120       Examples:
00121         Assuing that src/.git exists, the header guard CPP variables for
00122         src/chrome/browser/ui/browser.h are:
00123 
00124         No flag => CHROME_BROWSER_UI_BROWSER_H_
00125         --root=chrome => BROWSER_UI_BROWSER_H_
00126         --root=chrome/browser => UI_BROWSER_H_
00127 
00128     linelength=digits
00129       This is the allowed line length for the project. The default value is
00130       80 characters.
00131 
00132       Examples:
00133         --linelength=120
00134 
00135     extensions=extension,extension,...
00136       The allowed file extensions that cpplint will check
00137 
00138       Examples:
00139         --extensions=hpp,cpp
00140 """
00141 
00142 # We categorize each error message we print.  Here are the categories.
00143 # We want an explicit list so we can list them all in cpplint --filter=.
00144 # If you add a new error message with a new category, add it to the list
00145 # here!  cpplint_unittest.py should tell you if you forget to do this.
00146 _ERROR_CATEGORIES = [
00147   'build/class',
00148   'build/deprecated',
00149   'build/endif_comment',
00150   'build/explicit_make_pair',
00151   'build/forward_decl',
00152   'build/header_guard',
00153   'build/include',
00154   'build/include_alpha',
00155   'build/include_order',
00156   'build/include_what_you_use',
00157   'build/namespaces',
00158   'build/printf_format',
00159   'build/storage_class',
00160   'legal/copyright',
00161   'readability/alt_tokens',
00162   'readability/braces',
00163   'readability/casting',
00164   'readability/check',
00165   'readability/constructors',
00166   'readability/fn_size',
00167   'readability/function',
00168   'readability/multiline_comment',
00169   'readability/multiline_string',
00170   'readability/namespace',
00171   'readability/nolint',
00172   'readability/nul',
00173   'readability/streams',
00174   'readability/todo',
00175   'readability/utf8',
00176   'runtime/arrays',
00177   'runtime/casting',
00178   'runtime/explicit',
00179   'runtime/int',
00180   'runtime/init',
00181   'runtime/invalid_increment',
00182   'runtime/member_string_references',
00183   'runtime/memset',
00184   'runtime/operator',
00185   'runtime/printf',
00186   'runtime/printf_format',
00187   'runtime/references',
00188   'runtime/string',
00189   'runtime/threadsafe_fn',
00190   'runtime/vlog',
00191   'whitespace/blank_line',
00192   'whitespace/braces',
00193   'whitespace/comma',
00194   'whitespace/comments',
00195   'whitespace/empty_conditional_body',
00196   'whitespace/empty_loop_body',
00197   'whitespace/end_of_line',
00198   'whitespace/ending_newline',
00199   'whitespace/forcolon',
00200   'whitespace/indent',
00201   'whitespace/line_length',
00202   'whitespace/newline',
00203   'whitespace/operators',
00204   'whitespace/parens',
00205   'whitespace/semicolon',
00206   'whitespace/tab',
00207   'whitespace/todo'
00208   ]
00209 
00210 # The default state of the category filter. This is overrided by the --filter=
00211 # flag. By default all errors are on, so only add here categories that should be
00212 # off by default (i.e., categories that must be enabled by the --filter= flags).
00213 # All entries here should start with a '-' or '+', as in the --filter= flag.
00214 _DEFAULT_FILTERS = ['-build/include_alpha']
00215 
00216 # We used to check for high-bit characters, but after much discussion we
00217 # decided those were OK, as long as they were in UTF-8 and didn't represent
00218 # hard-coded international strings, which belong in a separate i18n file.
00219 
00220 
00221 # C++ headers
00222 _CPP_HEADERS = frozenset([
00223     # Legacy
00224     'algobase.h',
00225     'algo.h',
00226     'alloc.h',
00227     'builtinbuf.h',
00228     'bvector.h',
00229     'complex.h',
00230     'defalloc.h',
00231     'deque.h',
00232     'editbuf.h',
00233     'fstream.h',
00234     'function.h',
00235     'hash_map',
00236     'hash_map.h',
00237     'hash_set',
00238     'hash_set.h',
00239     'hashtable.h',
00240     'heap.h',
00241     'indstream.h',
00242     'iomanip.h',
00243     'iostream.h',
00244     'istream.h',
00245     'iterator.h',
00246     'list.h',
00247     'map.h',
00248     'multimap.h',
00249     'multiset.h',
00250     'ostream.h',
00251     'pair.h',
00252     'parsestream.h',
00253     'pfstream.h',
00254     'procbuf.h',
00255     'pthread_alloc',
00256     'pthread_alloc.h',
00257     'rope',
00258     'rope.h',
00259     'ropeimpl.h',
00260     'set.h',
00261     'slist',
00262     'slist.h',
00263     'stack.h',
00264     'stdiostream.h',
00265     'stl_alloc.h',
00266     'stl_relops.h',
00267     'streambuf.h',
00268     'stream.h',
00269     'strfile.h',
00270     'strstream.h',
00271     'tempbuf.h',
00272     'tree.h',
00273     'type_traits.h',
00274     'vector.h',
00275     # 17.6.1.2 C++ library headers
00276     'algorithm',
00277     'array',
00278     'atomic',
00279     'bitset',
00280     'chrono',
00281     'codecvt',
00282     'complex',
00283     'condition_variable',
00284     'deque',
00285     'exception',
00286     'forward_list',
00287     'fstream',
00288     'functional',
00289     'future',
00290     'initializer_list',
00291     'iomanip',
00292     'ios',
00293     'iosfwd',
00294     'iostream',
00295     'istream',
00296     'iterator',
00297     'limits',
00298     'list',
00299     'locale',
00300     'map',
00301     'memory',
00302     'mutex',
00303     'new',
00304     'numeric',
00305     'ostream',
00306     'queue',
00307     'random',
00308     'ratio',
00309     'regex',
00310     'set',
00311     'sstream',
00312     'stack',
00313     'stdexcept',
00314     'streambuf',
00315     'string',
00316     'strstream',
00317     'system_error',
00318     'thread',
00319     'tuple',
00320     'typeindex',
00321     'typeinfo',
00322     'type_traits',
00323     'unordered_map',
00324     'unordered_set',
00325     'utility',
00326     'valarray',
00327     'vector',
00328     # 17.6.1.2 C++ headers for C library facilities
00329     'cassert',
00330     'ccomplex',
00331     'cctype',
00332     'cerrno',
00333     'cfenv',
00334     'cfloat',
00335     'cinttypes',
00336     'ciso646',
00337     'climits',
00338     'clocale',
00339     'cmath',
00340     'csetjmp',
00341     'csignal',
00342     'cstdalign',
00343     'cstdarg',
00344     'cstdbool',
00345     'cstddef',
00346     'cstdint',
00347     'cstdio',
00348     'cstdlib',
00349     'cstring',
00350     'ctgmath',
00351     'ctime',
00352     'cuchar',
00353     'cwchar',
00354     'cwctype',
00355     ])
00356 
00357 # Assertion macros.  These are defined in base/logging.h and
00358 # testing/base/gunit.h.  Note that the _M versions need to come first
00359 # for substring matching to work.
00360 _CHECK_MACROS = [
00361     'DCHECK', 'CHECK',
00362     'EXPECT_TRUE_M', 'EXPECT_TRUE',
00363     'ASSERT_TRUE_M', 'ASSERT_TRUE',
00364     'EXPECT_FALSE_M', 'EXPECT_FALSE',
00365     'ASSERT_FALSE_M', 'ASSERT_FALSE',
00366     ]
00367 
00368 # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
00369 _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
00370 
00371 for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
00372                         ('>=', 'GE'), ('>', 'GT'),
00373                         ('<=', 'LE'), ('<', 'LT')]:
00374   _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
00375   _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
00376   _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
00377   _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
00378   _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
00379   _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
00380 
00381 for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
00382                             ('>=', 'LT'), ('>', 'LE'),
00383                             ('<=', 'GT'), ('<', 'GE')]:
00384   _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
00385   _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
00386   _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
00387   _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
00388 
00389 # Alternative tokens and their replacements.  For full list, see section 2.5
00390 # Alternative tokens [lex.digraph] in the C++ standard.
00391 #
00392 # Digraphs (such as '%:') are not included here since it's a mess to
00393 # match those on a word boundary.
00394 _ALT_TOKEN_REPLACEMENT = {
00395     'and': '&&',
00396     'bitor': '|',
00397     'or': '||',
00398     'xor': '^',
00399     'compl': '~',
00400     'bitand': '&',
00401     'and_eq': '&=',
00402     'or_eq': '|=',
00403     'xor_eq': '^=',
00404     'not': '!',
00405     'not_eq': '!='
00406     }
00407 
00408 # Compile regular expression that matches all the above keywords.  The "[ =()]"
00409 # bit is meant to avoid matching these keywords outside of boolean expressions.
00410 #
00411 # False positives include C-style multi-line comments and multi-line strings
00412 # but those have always been troublesome for cpplint.
00413 _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
00414     r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
00415 
00416 
00417 # These constants define types of headers for use with
00418 # _IncludeState.CheckNextIncludeOrder().
00419 _C_SYS_HEADER = 1
00420 _CPP_SYS_HEADER = 2
00421 _LIKELY_MY_HEADER = 3
00422 _POSSIBLE_MY_HEADER = 4
00423 _OTHER_HEADER = 5
00424 
00425 # These constants define the current inline assembly state
00426 _NO_ASM = 0       # Outside of inline assembly block
00427 _INSIDE_ASM = 1   # Inside inline assembly block
00428 _END_ASM = 2      # Last line of inline assembly block
00429 _BLOCK_ASM = 3    # The whole block is an inline assembly block
00430 
00431 # Match start of assembly blocks
00432 _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
00433                         r'(?:\s+(volatile|__volatile__))?'
00434                         r'\s*[{(]')
00435 
00436 
00437 _regexp_compile_cache = {}
00438 
00439 # Finds occurrences of NOLINT or NOLINT(...).
00440 _RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
00441 
00442 # {str, set(int)}: a map from error categories to sets of linenumbers
00443 # on which those errors are expected and should be suppressed.
00444 _error_suppressions = {}
00445 
00446 # The root directory used for deriving header guard CPP variable.
00447 # This is set by --root flag.
00448 _root = None
00449 
00450 # The allowed line length of files.
00451 # This is set by --linelength flag.
00452 _line_length = 80
00453 
00454 # The allowed extensions for file names
00455 # This is set by --extensions flag.
00456 _valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
00457 
00458 def ParseNolintSuppressions(filename, raw_line, linenum, error):
00459   """Updates the global list of error-suppressions.
00460 
00461   Parses any NOLINT comments on the current line, updating the global
00462   error_suppressions store.  Reports an error if the NOLINT comment
00463   was malformed.
00464 
00465   Args:
00466     filename: str, the name of the input file.
00467     raw_line: str, the line of input text, with comments.
00468     linenum: int, the number of the current line.
00469     error: function, an error handler.
00470   """
00471   # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
00472   matched = _RE_SUPPRESSION.search(raw_line)
00473   if matched:
00474     category = matched.group(1)
00475     if category in (None, '(*)'):  # => "suppress all"
00476       _error_suppressions.setdefault(None, set()).add(linenum)
00477     else:
00478       if category.startswith('(') and category.endswith(')'):
00479         category = category[1:-1]
00480         if category in _ERROR_CATEGORIES:
00481           _error_suppressions.setdefault(category, set()).add(linenum)
00482         else:
00483           error(filename, linenum, 'readability/nolint', 5,
00484                 'Unknown NOLINT error category: %s' % category)
00485 
00486 
00487 def ResetNolintSuppressions():
00488   "Resets the set of NOLINT suppressions to empty."
00489   _error_suppressions.clear()
00490 
00491 
00492 def IsErrorSuppressedByNolint(category, linenum):
00493   """Returns true if the specified error category is suppressed on this line.
00494 
00495   Consults the global error_suppressions map populated by
00496   ParseNolintSuppressions/ResetNolintSuppressions.
00497 
00498   Args:
00499     category: str, the category of the error.
00500     linenum: int, the current line number.
00501   Returns:
00502     bool, True iff the error should be suppressed due to a NOLINT comment.
00503   """
00504   return (linenum in _error_suppressions.get(category, set()) or
00505           linenum in _error_suppressions.get(None, set()))
00506 
00507 def Match(pattern, s):
00508   """Matches the string with the pattern, caching the compiled regexp."""
00509   # The regexp compilation caching is inlined in both Match and Search for
00510   # performance reasons; factoring it out into a separate function turns out
00511   # to be noticeably expensive.
00512   if pattern not in _regexp_compile_cache:
00513     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00514   return _regexp_compile_cache[pattern].match(s)
00515 
00516 
00517 def ReplaceAll(pattern, rep, s):
00518   """Replaces instances of pattern in a string with a replacement.
00519 
00520   The compiled regex is kept in a cache shared by Match and Search.
00521 
00522   Args:
00523     pattern: regex pattern
00524     rep: replacement text
00525     s: search string
00526 
00527   Returns:
00528     string with replacements made (or original string if no replacements)
00529   """
00530   if pattern not in _regexp_compile_cache:
00531     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00532   return _regexp_compile_cache[pattern].sub(rep, s)
00533 
00534 
00535 def Search(pattern, s):
00536   """Searches the string for the pattern, caching the compiled regexp."""
00537   if pattern not in _regexp_compile_cache:
00538     _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
00539   return _regexp_compile_cache[pattern].search(s)
00540 
00541 
00542 class _IncludeState(dict):
00543   """Tracks line numbers for includes, and the order in which includes appear.
00544 
00545   As a dict, an _IncludeState object serves as a mapping between include
00546   filename and line number on which that file was included.
00547 
00548   Call CheckNextIncludeOrder() once for each header in the file, passing
00549   in the type constants defined above. Calls in an illegal order will
00550   raise an _IncludeError with an appropriate error message.
00551 
00552   """
00553   # self._section will move monotonically through this set. If it ever
00554   # needs to move backwards, CheckNextIncludeOrder will raise an error.
00555   _INITIAL_SECTION = 0
00556   _MY_H_SECTION = 1
00557   _C_SECTION = 2
00558   _CPP_SECTION = 3
00559   _OTHER_H_SECTION = 4
00560 
00561   _TYPE_NAMES = {
00562       _C_SYS_HEADER: 'C system header',
00563       _CPP_SYS_HEADER: 'C++ system header',
00564       _LIKELY_MY_HEADER: 'header this file implements',
00565       _POSSIBLE_MY_HEADER: 'header this file may implement',
00566       _OTHER_HEADER: 'other header',
00567       }
00568   _SECTION_NAMES = {
00569       _INITIAL_SECTION: "... nothing. (This can't be an error.)",
00570       _MY_H_SECTION: 'a header this file implements',
00571       _C_SECTION: 'C system header',
00572       _CPP_SECTION: 'C++ system header',
00573       _OTHER_H_SECTION: 'other header',
00574       }
00575 
00576   def __init__(self):
00577     dict.__init__(self)
00578     self.ResetSection()
00579 
00580   def ResetSection(self):
00581     # The name of the current section.
00582     self._section = self._INITIAL_SECTION
00583     # The path of last found header.
00584     self._last_header = ''
00585 
00586   def SetLastHeader(self, header_path):
00587     self._last_header = header_path
00588 
00589   def CanonicalizeAlphabeticalOrder(self, header_path):
00590     """Returns a path canonicalized for alphabetical comparison.
00591 
00592     - replaces "-" with "_" so they both cmp the same.
00593     - removes '-inl' since we don't require them to be after the main header.
00594     - lowercase everything, just in case.
00595 
00596     Args:
00597       header_path: Path to be canonicalized.
00598 
00599     Returns:
00600       Canonicalized path.
00601     """
00602     return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
00603 
00604   def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
00605     """Check if a header is in alphabetical order with the previous header.
00606 
00607     Args:
00608       clean_lines: A CleansedLines instance containing the file.
00609       linenum: The number of the line to check.
00610       header_path: Canonicalized header to be checked.
00611 
00612     Returns:
00613       Returns true if the header is in alphabetical order.
00614     """
00615     # If previous section is different from current section, _last_header will
00616     # be reset to empty string, so it's always less than current header.
00617     #
00618     # If previous line was a blank line, assume that the headers are
00619     # intentionally sorted the way they are.
00620     if (self._last_header > header_path and
00621         not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
00622       return False
00623     return True
00624 
00625   def CheckNextIncludeOrder(self, header_type):
00626     """Returns a non-empty error message if the next header is out of order.
00627 
00628     This function also updates the internal state to be ready to check
00629     the next include.
00630 
00631     Args:
00632       header_type: One of the _XXX_HEADER constants defined above.
00633 
00634     Returns:
00635       The empty string if the header is in the right order, or an
00636       error message describing what's wrong.
00637 
00638     """
00639     error_message = ('Found %s after %s' %
00640                      (self._TYPE_NAMES[header_type],
00641                       self._SECTION_NAMES[self._section]))
00642 
00643     last_section = self._section
00644 
00645     if header_type == _C_SYS_HEADER:
00646       if self._section <= self._C_SECTION:
00647         self._section = self._C_SECTION
00648       else:
00649         self._last_header = ''
00650         return error_message
00651     elif header_type == _CPP_SYS_HEADER:
00652       if self._section <= self._CPP_SECTION:
00653         self._section = self._CPP_SECTION
00654       else:
00655         self._last_header = ''
00656         return error_message
00657     elif header_type == _LIKELY_MY_HEADER:
00658       if self._section <= self._MY_H_SECTION:
00659         self._section = self._MY_H_SECTION
00660       else:
00661         self._section = self._OTHER_H_SECTION
00662     elif header_type == _POSSIBLE_MY_HEADER:
00663       if self._section <= self._MY_H_SECTION:
00664         self._section = self._MY_H_SECTION
00665       else:
00666         # This will always be the fallback because we're not sure
00667         # enough that the header is associated with this file.
00668         self._section = self._OTHER_H_SECTION
00669     else:
00670       assert header_type == _OTHER_HEADER
00671       self._section = self._OTHER_H_SECTION
00672 
00673     if last_section != self._section:
00674       self._last_header = ''
00675 
00676     return ''
00677 
00678 
00679 class _CppLintState(object):
00680   """Maintains module-wide state.."""
00681 
00682   def __init__(self):
00683     self.verbose_level = 1  # global setting.
00684     self.error_count = 0    # global count of reported errors
00685     # filters to apply when emitting error messages
00686     self.filters = _DEFAULT_FILTERS[:]
00687     self.counting = 'total'  # In what way are we counting errors?
00688     self.errors_by_category = {}  # string to int dict storing error counts
00689 
00690     # output format:
00691     # "emacs" - format that emacs can parse (default)
00692     # "vs7" - format that Microsoft Visual Studio 7 can parse
00693     self.output_format = 'emacs'
00694 
00695   def SetOutputFormat(self, output_format):
00696     """Sets the output format for errors."""
00697     self.output_format = output_format
00698 
00699   def SetVerboseLevel(self, level):
00700     """Sets the module's verbosity, and returns the previous setting."""
00701     last_verbose_level = self.verbose_level
00702     self.verbose_level = level
00703     return last_verbose_level
00704 
00705   def SetCountingStyle(self, counting_style):
00706     """Sets the module's counting options."""
00707     self.counting = counting_style
00708 
00709   def SetFilters(self, filters):
00710     """Sets the error-message filters.
00711 
00712     These filters are applied when deciding whether to emit a given
00713     error message.
00714 
00715     Args:
00716       filters: A string of comma-separated filters (eg "+whitespace/indent").
00717                Each filter should start with + or -; else we die.
00718 
00719     Raises:
00720       ValueError: The comma-separated filters did not all start with '+' or '-'.
00721                   E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
00722     """
00723     # Default filters always have less priority than the flag ones.
00724     self.filters = _DEFAULT_FILTERS[:]
00725     for filt in filters.split(','):
00726       clean_filt = filt.strip()
00727       if clean_filt:
00728         self.filters.append(clean_filt)
00729     for filt in self.filters:
00730       if not (filt.startswith('+') or filt.startswith('-')):
00731         raise ValueError('Every filter in --filters must start with + or -'
00732                          ' (%s does not)' % filt)
00733 
00734   def ResetErrorCounts(self):
00735     """Sets the module's error statistic back to zero."""
00736     self.error_count = 0
00737     self.errors_by_category = {}
00738 
00739   def IncrementErrorCount(self, category):
00740     """Bumps the module's error statistic."""
00741     self.error_count += 1
00742     if self.counting in ('toplevel', 'detailed'):
00743       if self.counting != 'detailed':
00744         category = category.split('/')[0]
00745       if category not in self.errors_by_category:
00746         self.errors_by_category[category] = 0
00747       self.errors_by_category[category] += 1
00748 
00749   def PrintErrorCounts(self):
00750     """Print a summary of errors by category, and the total."""
00751     for category, count in self.errors_by_category.iteritems():
00752       sys.stderr.write('Category \'%s\' errors found: %d\n' %
00753                        (category, count))
00754     sys.stderr.write('Total errors found: %d\n' % self.error_count)
00755 
00756 _cpplint_state = _CppLintState()
00757 
00758 
00759 def _OutputFormat():
00760   """Gets the module's output format."""
00761   return _cpplint_state.output_format
00762 
00763 
00764 def _SetOutputFormat(output_format):
00765   """Sets the module's output format."""
00766   _cpplint_state.SetOutputFormat(output_format)
00767 
00768 
00769 def _VerboseLevel():
00770   """Returns the module's verbosity setting."""
00771   return _cpplint_state.verbose_level
00772 
00773 
00774 def _SetVerboseLevel(level):
00775   """Sets the module's verbosity, and returns the previous setting."""
00776   return _cpplint_state.SetVerboseLevel(level)
00777 
00778 
00779 def _SetCountingStyle(level):
00780   """Sets the module's counting options."""
00781   _cpplint_state.SetCountingStyle(level)
00782 
00783 
00784 def _Filters():
00785   """Returns the module's list of output filters, as a list."""
00786   return _cpplint_state.filters
00787 
00788 
00789 def _SetFilters(filters):
00790   """Sets the module's error-message filters.
00791 
00792   These filters are applied when deciding whether to emit a given
00793   error message.
00794 
00795   Args:
00796     filters: A string of comma-separated filters (eg "whitespace/indent").
00797              Each filter should start with + or -; else we die.
00798   """
00799   _cpplint_state.SetFilters(filters)
00800 
00801 
00802 class _FunctionState(object):
00803   """Tracks current function name and the number of lines in its body."""
00804 
00805   _NORMAL_TRIGGER = 250  # for --v=0, 500 for --v=1, etc.
00806   _TEST_TRIGGER = 400    # about 50% more than _NORMAL_TRIGGER.
00807 
00808   def __init__(self):
00809     self.in_a_function = False
00810     self.lines_in_function = 0
00811     self.current_function = ''
00812 
00813   def Begin(self, function_name):
00814     """Start analyzing function body.
00815 
00816     Args:
00817       function_name: The name of the function being tracked.
00818     """
00819     self.in_a_function = True
00820     self.lines_in_function = 0
00821     self.current_function = function_name
00822 
00823   def Count(self):
00824     """Count line in current function body."""
00825     if self.in_a_function:
00826       self.lines_in_function += 1
00827 
00828   def Check(self, error, filename, linenum):
00829     """Report if too many lines in function body.
00830 
00831     Args:
00832       error: The function to call with any errors found.
00833       filename: The name of the current file.
00834       linenum: The number of the line to check.
00835     """
00836     if Match(r'T(EST|est)', self.current_function):
00837       base_trigger = self._TEST_TRIGGER
00838     else:
00839       base_trigger = self._NORMAL_TRIGGER
00840     trigger = base_trigger * 2**_VerboseLevel()
00841 
00842     if self.lines_in_function > trigger:
00843       error_level = int(math.log(self.lines_in_function / base_trigger, 2))
00844       # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
00845       if error_level > 5:
00846         error_level = 5
00847       error(filename, linenum, 'readability/fn_size', error_level,
00848             'Small and focused functions are preferred:'
00849             ' %s has %d non-comment lines'
00850             ' (error triggered by exceeding %d lines).'  % (
00851                 self.current_function, self.lines_in_function, trigger))
00852 
00853   def End(self):
00854     """Stop analyzing function body."""
00855     self.in_a_function = False
00856 
00857 
00858 class _IncludeError(Exception):
00859   """Indicates a problem with the include order in a file."""
00860   pass
00861 
00862 
00863 class FileInfo:
00864   """Provides utility functions for filenames.
00865 
00866   FileInfo provides easy access to the components of a file's path
00867   relative to the project root.
00868   """
00869 
00870   def __init__(self, filename):
00871     self._filename = filename
00872 
00873   def FullName(self):
00874     """Make Windows paths like Unix."""
00875     return os.path.abspath(self._filename).replace('\\', '/')
00876 
00877   def RepositoryName(self):
00878     """FullName after removing the local path to the repository.
00879 
00880     If we have a real absolute path name here we can try to do something smart:
00881     detecting the root of the checkout and truncating /path/to/checkout from
00882     the name so that we get header guards that don't include things like
00883     "C:\Documents and Settings\..." or "/home/username/..." in them and thus
00884     people on different computers who have checked the source out to different
00885     locations won't see bogus errors.
00886     """
00887     fullname = self.FullName()
00888 
00889     if os.path.exists(fullname):
00890       project_dir = os.path.dirname(fullname)
00891 
00892       if os.path.exists(os.path.join(project_dir, ".svn")):
00893         # If there's a .svn file in the current directory, we recursively look
00894         # up the directory tree for the top of the SVN checkout
00895         root_dir = project_dir
00896         one_up_dir = os.path.dirname(root_dir)
00897         while os.path.exists(os.path.join(one_up_dir, ".svn")):
00898           root_dir = os.path.dirname(root_dir)
00899           one_up_dir = os.path.dirname(one_up_dir)
00900 
00901         prefix = os.path.commonprefix([root_dir, project_dir])
00902         return fullname[len(prefix) + 1:]
00903 
00904       # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
00905       # searching up from the current path.
00906       root_dir = os.path.dirname(fullname)
00907       while (root_dir != os.path.dirname(root_dir) and
00908              not os.path.exists(os.path.join(root_dir, ".git")) and
00909              not os.path.exists(os.path.join(root_dir, ".hg")) and
00910              not os.path.exists(os.path.join(root_dir, ".svn"))):
00911         root_dir = os.path.dirname(root_dir)
00912 
00913       if (os.path.exists(os.path.join(root_dir, ".git")) or
00914           os.path.exists(os.path.join(root_dir, ".hg")) or
00915           os.path.exists(os.path.join(root_dir, ".svn"))):
00916         prefix = os.path.commonprefix([root_dir, project_dir])
00917         return fullname[len(prefix) + 1:]
00918 
00919     # Don't know what to do; header guard warnings may be wrong...
00920     return fullname
00921 
00922   def Split(self):
00923     """Splits the file into the directory, basename, and extension.
00924 
00925     For 'chrome/browser/browser.cc', Split() would
00926     return ('chrome/browser', 'browser', '.cc')
00927 
00928     Returns:
00929       A tuple of (directory, basename, extension).
00930     """
00931 
00932     googlename = self.RepositoryName()
00933     project, rest = os.path.split(googlename)
00934     return (project,) + os.path.splitext(rest)
00935 
00936   def BaseName(self):
00937     """File base name - text after the final slash, before the final period."""
00938     return self.Split()[1]
00939 
00940   def Extension(self):
00941     """File extension - text following the final period."""
00942     return self.Split()[2]
00943 
00944   def NoExtension(self):
00945     """File has no source file extension."""
00946     return '/'.join(self.Split()[0:2])
00947 
00948   def IsSource(self):
00949     """File has a source file extension."""
00950     return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
00951 
00952 
00953 def _ShouldPrintError(category, confidence, linenum):
00954   """If confidence >= verbose, category passes filter and is not suppressed."""
00955 
00956   # There are three ways we might decide not to print an error message:
00957   # a "NOLINT(category)" comment appears in the source,
00958   # the verbosity level isn't high enough, or the filters filter it out.
00959   if IsErrorSuppressedByNolint(category, linenum):
00960     return False
00961   if confidence < _cpplint_state.verbose_level:
00962     return False
00963 
00964   is_filtered = False
00965   for one_filter in _Filters():
00966     if one_filter.startswith('-'):
00967       if category.startswith(one_filter[1:]):
00968         is_filtered = True
00969     elif one_filter.startswith('+'):
00970       if category.startswith(one_filter[1:]):
00971         is_filtered = False
00972     else:
00973       assert False  # should have been checked for in SetFilter.
00974   if is_filtered:
00975     return False
00976 
00977   return True
00978 
00979 
00980 def Error(filename, linenum, category, confidence, message):
00981   """Logs the fact we've found a lint error.
00982 
00983   We log where the error was found, and also our confidence in the error,
00984   that is, how certain we are this is a legitimate style regression, and
00985   not a misidentification or a use that's sometimes justified.
00986 
00987   False positives can be suppressed by the use of
00988   "cpplint(category)"  comments on the offending line.  These are
00989   parsed into _error_suppressions.
00990 
00991   Args:
00992     filename: The name of the file containing the error.
00993     linenum: The number of the line containing the error.
00994     category: A string used to describe the "category" this bug
00995       falls under: "whitespace", say, or "runtime".  Categories
00996       may have a hierarchy separated by slashes: "whitespace/indent".
00997     confidence: A number from 1-5 representing a confidence score for
00998       the error, with 5 meaning that we are certain of the problem,
00999       and 1 meaning that it could be a legitimate construct.
01000     message: The error message.
01001   """
01002   if _ShouldPrintError(category, confidence, linenum):
01003     _cpplint_state.IncrementErrorCount(category)
01004     if _cpplint_state.output_format == 'vs7':
01005       sys.stderr.write('%s(%s):  %s  [%s] [%d]\n' % (
01006           filename, linenum, message, category, confidence))
01007     elif _cpplint_state.output_format == 'eclipse':
01008       sys.stderr.write('%s:%s: warning: %s  [%s] [%d]\n' % (
01009           filename, linenum, message, category, confidence))
01010     else:
01011       sys.stderr.write('%s:%s:  %s  [%s] [%d]\n' % (
01012           filename, linenum, message, category, confidence))
01013 
01014 
01015 # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
01016 _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
01017     r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
01018 # Matches strings.  Escape codes should already be removed by ESCAPES.
01019 _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
01020 # Matches characters.  Escape codes should already be removed by ESCAPES.
01021 _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
01022 # Matches multi-line C++ comments.
01023 # This RE is a little bit more complicated than one might expect, because we
01024 # have to take care of space removals tools so we can handle comments inside
01025 # statements better.
01026 # The current rule is: We only clear spaces from both sides when we're at the
01027 # end of the line. Otherwise, we try to remove spaces from the right side,
01028 # if this doesn't work we try on left side but only if there's a non-character
01029 # on the right.
01030 _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
01031     r"""(\s*/\*.*\*/\s*$|
01032             /\*.*\*/\s+|
01033          \s+/\*.*\*/(?=\W)|
01034             /\*.*\*/)""", re.VERBOSE)
01035 
01036 
01037 def IsCppString(line):
01038   """Does line terminate so, that the next symbol is in string constant.
01039 
01040   This function does not consider single-line nor multi-line comments.
01041 
01042   Args:
01043     line: is a partial line of code starting from the 0..n.
01044 
01045   Returns:
01046     True, if next character appended to 'line' is inside a
01047     string constant.
01048   """
01049 
01050   line = line.replace(r'\\', 'XX')  # after this, \\" does not match to \"
01051   return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
01052 
01053 
01054 def CleanseRawStrings(raw_lines):
01055   """Removes C++11 raw strings from lines.
01056 
01057     Before:
01058       static const char kData[] = R"(
01059           multi-line string
01060           )";
01061 
01062     After:
01063       static const char kData[] = ""
01064           (replaced by blank line)
01065           "";
01066 
01067   Args:
01068     raw_lines: list of raw lines.
01069 
01070   Returns:
01071     list of lines with C++11 raw strings replaced by empty strings.
01072   """
01073 
01074   delimiter = None
01075   lines_without_raw_strings = []
01076   for line in raw_lines:
01077     if delimiter:
01078       # Inside a raw string, look for the end
01079       end = line.find(delimiter)
01080       if end >= 0:
01081         # Found the end of the string, match leading space for this
01082         # line and resume copying the original lines, and also insert
01083         # a "" on the last line.
01084         leading_space = Match(r'^(\s*)\S', line)
01085         line = leading_space.group(1) + '""' + line[end + len(delimiter):]
01086         delimiter = None
01087       else:
01088         # Haven't found the end yet, append a blank line.
01089         line = ''
01090 
01091     else:
01092       # Look for beginning of a raw string.
01093       # See 2.14.15 [lex.string] for syntax.
01094       matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
01095       if matched:
01096         delimiter = ')' + matched.group(2) + '"'
01097 
01098         end = matched.group(3).find(delimiter)
01099         if end >= 0:
01100           # Raw string ended on same line
01101           line = (matched.group(1) + '""' +
01102                   matched.group(3)[end + len(delimiter):])
01103           delimiter = None
01104         else:
01105           # Start of a multi-line raw string
01106           line = matched.group(1) + '""'
01107 
01108     lines_without_raw_strings.append(line)
01109 
01110   # TODO(unknown): if delimiter is not None here, we might want to
01111   # emit a warning for unterminated string.
01112   return lines_without_raw_strings
01113 
01114 
01115 def FindNextMultiLineCommentStart(lines, lineix):
01116   """Find the beginning marker for a multiline comment."""
01117   while lineix < len(lines):
01118     if lines[lineix].strip().startswith('/*'):
01119       # Only return this marker if the comment goes beyond this line
01120       if lines[lineix].strip().find('*/', 2) < 0:
01121         return lineix
01122     lineix += 1
01123   return len(lines)
01124 
01125 
01126 def FindNextMultiLineCommentEnd(lines, lineix):
01127   """We are inside a comment, find the end marker."""
01128   while lineix < len(lines):
01129     if lines[lineix].strip().endswith('*/'):
01130       return lineix
01131     lineix += 1
01132   return len(lines)
01133 
01134 
01135 def RemoveMultiLineCommentsFromRange(lines, begin, end):
01136   """Clears a range of lines for multi-line comments."""
01137   # Having // dummy comments makes the lines non-empty, so we will not get
01138   # unnecessary blank line warnings later in the code.
01139   for i in range(begin, end):
01140     lines[i] = '// dummy'
01141 
01142 
01143 def RemoveMultiLineComments(filename, lines, error):
01144   """Removes multiline (c-style) comments from lines."""
01145   lineix = 0
01146   while lineix < len(lines):
01147     lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
01148     if lineix_begin >= len(lines):
01149       return
01150     lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
01151     if lineix_end >= len(lines):
01152       error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
01153             'Could not find end of multi-line comment')
01154       return
01155     RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
01156     lineix = lineix_end + 1
01157 
01158 
01159 def CleanseComments(line):
01160   """Removes //-comments and single-line C-style /* */ comments.
01161 
01162   Args:
01163     line: A line of C++ source.
01164 
01165   Returns:
01166     The line with single-line comments removed.
01167   """
01168   commentpos = line.find('//')
01169   if commentpos != -1 and not IsCppString(line[:commentpos]):
01170     line = line[:commentpos].rstrip()
01171   # get rid of /* ... */
01172   return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
01173 
01174 
01175 class CleansedLines(object):
01176   """Holds 3 copies of all lines with different preprocessing applied to them.
01177 
01178   1) elided member contains lines without strings and comments,
01179   2) lines member contains lines without comments, and
01180   3) raw_lines member contains all the lines without processing.
01181   All these three members are of <type 'list'>, and of the same length.
01182   """
01183 
01184   def __init__(self, lines):
01185     self.elided = []
01186     self.lines = []
01187     self.raw_lines = lines
01188     self.num_lines = len(lines)
01189     self.lines_without_raw_strings = CleanseRawStrings(lines)
01190     for linenum in range(len(self.lines_without_raw_strings)):
01191       self.lines.append(CleanseComments(
01192           self.lines_without_raw_strings[linenum]))
01193       elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
01194       self.elided.append(CleanseComments(elided))
01195 
01196   def NumLines(self):
01197     """Returns the number of lines represented."""
01198     return self.num_lines
01199 
01200   @staticmethod
01201   def _CollapseStrings(elided):
01202     """Collapses strings and chars on a line to simple "" or '' blocks.
01203 
01204     We nix strings first so we're not fooled by text like '"http://"'
01205 
01206     Args:
01207       elided: The line being processed.
01208 
01209     Returns:
01210       The line with collapsed strings.
01211     """
01212     if not _RE_PATTERN_INCLUDE.match(elided):
01213       # Remove escaped characters first to make quote/single quote collapsing
01214       # basic.  Things that look like escaped characters shouldn't occur
01215       # outside of strings and chars.
01216       elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
01217       elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
01218       elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
01219     return elided
01220 
01221 
01222 def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
01223   """Find the position just after the matching endchar.
01224 
01225   Args:
01226     line: a CleansedLines line.
01227     startpos: start searching at this position.
01228     depth: nesting level at startpos.
01229     startchar: expression opening character.
01230     endchar: expression closing character.
01231 
01232   Returns:
01233     On finding matching endchar: (index just after matching endchar, 0)
01234     Otherwise: (-1, new depth at end of this line)
01235   """
01236   for i in xrange(startpos, len(line)):
01237     if line[i] == startchar:
01238       depth += 1
01239     elif line[i] == endchar:
01240       depth -= 1
01241       if depth == 0:
01242         return (i + 1, 0)
01243   return (-1, depth)
01244 
01245 
01246 def CloseExpression(clean_lines, linenum, pos):
01247   """If input points to ( or { or [ or <, finds the position that closes it.
01248 
01249   If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
01250   linenum/pos that correspond to the closing of the expression.
01251 
01252   Args:
01253     clean_lines: A CleansedLines instance containing the file.
01254     linenum: The number of the line to check.
01255     pos: A position on the line.
01256 
01257   Returns:
01258     A tuple (line, linenum, pos) pointer *past* the closing brace, or
01259     (line, len(lines), -1) if we never find a close.  Note we ignore
01260     strings and comments when matching; and the line we return is the
01261     'cleansed' line at linenum.
01262   """
01263 
01264   line = clean_lines.elided[linenum]
01265   startchar = line[pos]
01266   if startchar not in '({[<':
01267     return (line, clean_lines.NumLines(), -1)
01268   if startchar == '(': endchar = ')'
01269   if startchar == '[': endchar = ']'
01270   if startchar == '{': endchar = '}'
01271   if startchar == '<': endchar = '>'
01272 
01273   # Check first line
01274   (end_pos, num_open) = FindEndOfExpressionInLine(
01275       line, pos, 0, startchar, endchar)
01276   if end_pos > -1:
01277     return (line, linenum, end_pos)
01278 
01279   # Continue scanning forward
01280   while linenum < clean_lines.NumLines() - 1:
01281     linenum += 1
01282     line = clean_lines.elided[linenum]
01283     (end_pos, num_open) = FindEndOfExpressionInLine(
01284         line, 0, num_open, startchar, endchar)
01285     if end_pos > -1:
01286       return (line, linenum, end_pos)
01287 
01288   # Did not find endchar before end of file, give up
01289   return (line, clean_lines.NumLines(), -1)
01290 
01291 
01292 def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
01293   """Find position at the matching startchar.
01294 
01295   This is almost the reverse of FindEndOfExpressionInLine, but note
01296   that the input position and returned position differs by 1.
01297 
01298   Args:
01299     line: a CleansedLines line.
01300     endpos: start searching at this position.
01301     depth: nesting level at endpos.
01302     startchar: expression opening character.
01303     endchar: expression closing character.
01304 
01305   Returns:
01306     On finding matching startchar: (index at matching startchar, 0)
01307     Otherwise: (-1, new depth at beginning of this line)
01308   """
01309   for i in xrange(endpos, -1, -1):
01310     if line[i] == endchar:
01311       depth += 1
01312     elif line[i] == startchar:
01313       depth -= 1
01314       if depth == 0:
01315         return (i, 0)
01316   return (-1, depth)
01317 
01318 
01319 def ReverseCloseExpression(clean_lines, linenum, pos):
01320   """If input points to ) or } or ] or >, finds the position that opens it.
01321 
01322   If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
01323   linenum/pos that correspond to the opening of the expression.
01324 
01325   Args:
01326     clean_lines: A CleansedLines instance containing the file.
01327     linenum: The number of the line to check.
01328     pos: A position on the line.
01329 
01330   Returns:
01331     A tuple (line, linenum, pos) pointer *at* the opening brace, or
01332     (line, 0, -1) if we never find the matching opening brace.  Note
01333     we ignore strings and comments when matching; and the line we
01334     return is the 'cleansed' line at linenum.
01335   """
01336   line = clean_lines.elided[linenum]
01337   endchar = line[pos]
01338   if endchar not in ')}]>':
01339     return (line, 0, -1)
01340   if endchar == ')': startchar = '('
01341   if endchar == ']': startchar = '['
01342   if endchar == '}': startchar = '{'
01343   if endchar == '>': startchar = '<'
01344 
01345   # Check last line
01346   (start_pos, num_open) = FindStartOfExpressionInLine(
01347       line, pos, 0, startchar, endchar)
01348   if start_pos > -1:
01349     return (line, linenum, start_pos)
01350 
01351   # Continue scanning backward
01352   while linenum > 0:
01353     linenum -= 1
01354     line = clean_lines.elided[linenum]
01355     (start_pos, num_open) = FindStartOfExpressionInLine(
01356         line, len(line) - 1, num_open, startchar, endchar)
01357     if start_pos > -1:
01358       return (line, linenum, start_pos)
01359 
01360   # Did not find startchar before beginning of file, give up
01361   return (line, 0, -1)
01362 
01363 
01364 def CheckForCopyright(filename, lines, error):
01365   """Logs an error if no Copyright message appears at the top of the file."""
01366 
01367   # We'll say it should occur by line 10. Don't forget there's a
01368   # dummy line at the front.
01369   for line in xrange(1, min(len(lines), 11)):
01370     if re.search(r'Copyright', lines[line], re.I): break
01371   else:                       # means no copyright line was found
01372     error(filename, 0, 'legal/copyright', 5,
01373           'No copyright message found.  '
01374           'You should have a line: "Copyright [year] <Copyright Owner>"')
01375 
01376 
01377 def GetHeaderGuardCPPVariable(filename):
01378   """Returns the CPP variable that should be used as a header guard.
01379 
01380   Args:
01381     filename: The name of a C++ header file.
01382 
01383   Returns:
01384     The CPP variable that should be used as a header guard in the
01385     named file.
01386 
01387   """
01388 
01389   # Restores original filename in case that cpplint is invoked from Emacs's
01390   # flymake.
01391   filename = re.sub(r'_flymake\.h$', '.h', filename)
01392   filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
01393 
01394   fileinfo = FileInfo(filename)
01395   file_path_from_root = fileinfo.RepositoryName()
01396   if _root:
01397     file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
01398   return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
01399 
01400 
01401 def CheckForHeaderGuard(filename, lines, error):
01402   """Checks that the file contains a header guard.
01403 
01404   Logs an error if no #ifndef header guard is present.  For other
01405   headers, checks that the full pathname is used.
01406 
01407   Args:
01408     filename: The name of the C++ header file.
01409     lines: An array of strings, each representing a line of the file.
01410     error: The function to call with any errors found.
01411   """
01412 
01413   cppvar = GetHeaderGuardCPPVariable(filename)
01414 
01415   ifndef = None
01416   ifndef_linenum = 0
01417   define = None
01418   endif = None
01419   endif_linenum = 0
01420   for linenum, line in enumerate(lines):
01421     linesplit = line.split()
01422     if len(linesplit) >= 2:
01423       # find the first occurrence of #ifndef and #define, save arg
01424       if not ifndef and linesplit[0] == '#ifndef':
01425         # set ifndef to the header guard presented on the #ifndef line.
01426         ifndef = linesplit[1]
01427         ifndef_linenum = linenum
01428       if not define and linesplit[0] == '#define':
01429         define = linesplit[1]
01430     # find the last occurrence of #endif, save entire line
01431     if line.startswith('#endif'):
01432       endif = line
01433       endif_linenum = linenum
01434 
01435   if not ifndef:
01436     error(filename, 0, 'build/header_guard', 5,
01437           'No #ifndef header guard found, suggested CPP variable is: %s' %
01438           cppvar)
01439     return
01440 
01441   if not define:
01442     error(filename, 0, 'build/header_guard', 5,
01443           'No #define header guard found, suggested CPP variable is: %s' %
01444           cppvar)
01445     return
01446 
01447   # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
01448   # for backward compatibility.
01449   if ifndef != cppvar:
01450     error_level = 0
01451     if ifndef != cppvar + '_':
01452       error_level = 5
01453 
01454     ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
01455                             error)
01456     error(filename, ifndef_linenum, 'build/header_guard', error_level,
01457           '#ifndef header guard has wrong style, please use: %s' % cppvar)
01458 
01459   if define != ifndef:
01460     error(filename, 0, 'build/header_guard', 5,
01461           '#ifndef and #define don\'t match, suggested CPP variable is: %s' %
01462           cppvar)
01463     return
01464 
01465   if endif != ('#endif  // %s' % cppvar):
01466     error_level = 0
01467     if endif != ('#endif  // %s' % (cppvar + '_')):
01468       error_level = 5
01469 
01470     ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
01471                             error)
01472     error(filename, endif_linenum, 'build/header_guard', error_level,
01473           '#endif line should be "#endif  // %s"' % cppvar)
01474 
01475 
01476 def CheckForBadCharacters(filename, lines, error):
01477   """Logs an error for each line containing bad characters.
01478 
01479   Two kinds of bad characters:
01480 
01481   1. Unicode replacement characters: These indicate that either the file
01482   contained invalid UTF-8 (likely) or Unicode replacement characters (which
01483   it shouldn't).  Note that it's possible for this to throw off line
01484   numbering if the invalid UTF-8 occurred adjacent to a newline.
01485 
01486   2. NUL bytes.  These are problematic for some tools.
01487 
01488   Args:
01489     filename: The name of the current file.
01490     lines: An array of strings, each representing a line of the file.
01491     error: The function to call with any errors found.
01492   """
01493   for linenum, line in enumerate(lines):
01494     if u'\ufffd' in line:
01495       error(filename, linenum, 'readability/utf8', 5,
01496             'Line contains invalid UTF-8 (or Unicode replacement character).')
01497     if '\0' in line:
01498       error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
01499 
01500 
01501 def CheckForNewlineAtEOF(filename, lines, error):
01502   """Logs an error if there is no newline char at the end of the file.
01503 
01504   Args:
01505     filename: The name of the current file.
01506     lines: An array of strings, each representing a line of the file.
01507     error: The function to call with any errors found.
01508   """
01509 
01510   # The array lines() was created by adding two newlines to the
01511   # original file (go figure), then splitting on \n.
01512   # To verify that the file ends in \n, we just have to make sure the
01513   # last-but-two element of lines() exists and is empty.
01514   if len(lines) < 3 or lines[-2]:
01515     error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
01516           'Could not find a newline character at the end of the file.')
01517 
01518 
01519 def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
01520   """Logs an error if we see /* ... */ or "..." that extend past one line.
01521 
01522   /* ... */ comments are legit inside macros, for one line.
01523   Otherwise, we prefer // comments, so it's ok to warn about the
01524   other.  Likewise, it's ok for strings to extend across multiple
01525   lines, as long as a line continuation character (backslash)
01526   terminates each line. Although not currently prohibited by the C++
01527   style guide, it's ugly and unnecessary. We don't do well with either
01528   in this lint program, so we warn about both.
01529 
01530   Args:
01531     filename: The name of the current file.
01532     clean_lines: A CleansedLines instance containing the file.
01533     linenum: The number of the line to check.
01534     error: The function to call with any errors found.
01535   """
01536   line = clean_lines.elided[linenum]
01537 
01538   # Remove all \\ (escaped backslashes) from the line. They are OK, and the
01539   # second (escaped) slash may trigger later \" detection erroneously.
01540   line = line.replace('\\\\', '')
01541 
01542   if line.count('/*') > line.count('*/'):
01543     error(filename, linenum, 'readability/multiline_comment', 5,
01544           'Complex multi-line /*...*/-style comment found. '
01545           'Lint may give bogus warnings.  '
01546           'Consider replacing these with //-style comments, '
01547           'with #if 0...#endif, '
01548           'or with more clearly structured multi-line comments.')
01549 
01550   if (line.count('"') - line.count('\\"')) % 2:
01551     error(filename, linenum, 'readability/multiline_string', 5,
01552           'Multi-line string ("...") found.  This lint script doesn\'t '
01553           'do well with such strings, and may give bogus warnings.  '
01554           'Use C++11 raw strings or concatenation instead.')
01555 
01556 
01557 threading_list = (
01558     ('asctime(', 'asctime_r('),
01559     ('ctime(', 'ctime_r('),
01560     ('getgrgid(', 'getgrgid_r('),
01561     ('getgrnam(', 'getgrnam_r('),
01562     ('getlogin(', 'getlogin_r('),
01563     ('getpwnam(', 'getpwnam_r('),
01564     ('getpwuid(', 'getpwuid_r('),
01565     ('gmtime(', 'gmtime_r('),
01566     ('localtime(', 'localtime_r('),
01567     ('rand(', 'rand_r('),
01568     ('strtok(', 'strtok_r('),
01569     ('ttyname(', 'ttyname_r('),
01570     )
01571 
01572 
01573 def CheckPosixThreading(filename, clean_lines, linenum, error):
01574   """Checks for calls to thread-unsafe functions.
01575 
01576   Much code has been originally written without consideration of
01577   multi-threading. Also, engineers are relying on their old experience;
01578   they have learned posix before threading extensions were added. These
01579   tests guide the engineers to use thread-safe functions (when using
01580   posix directly).
01581 
01582   Args:
01583     filename: The name of the current file.
01584     clean_lines: A CleansedLines instance containing the file.
01585     linenum: The number of the line to check.
01586     error: The function to call with any errors found.
01587   """
01588   line = clean_lines.elided[linenum]
01589   for single_thread_function, multithread_safe_function in threading_list:
01590     ix = line.find(single_thread_function)
01591     # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
01592     if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
01593                                 line[ix - 1] not in ('_', '.', '>'))):
01594       error(filename, linenum, 'runtime/threadsafe_fn', 2,
01595             'Consider using ' + multithread_safe_function +
01596             '...) instead of ' + single_thread_function +
01597             '...) for improved thread safety.')
01598 
01599 
01600 def CheckVlogArguments(filename, clean_lines, linenum, error):
01601   """Checks that VLOG() is only used for defining a logging level.
01602 
01603   For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
01604   VLOG(FATAL) are not.
01605 
01606   Args:
01607     filename: The name of the current file.
01608     clean_lines: A CleansedLines instance containing the file.
01609     linenum: The number of the line to check.
01610     error: The function to call with any errors found.
01611   """
01612   line = clean_lines.elided[linenum]
01613   if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
01614     error(filename, linenum, 'runtime/vlog', 5,
01615           'VLOG() should be used with numeric verbosity level.  '
01616           'Use LOG() if you want symbolic severity levels.')
01617 
01618 
01619 # Matches invalid increment: *count++, which moves pointer instead of
01620 # incrementing a value.
01621 _RE_PATTERN_INVALID_INCREMENT = re.compile(
01622     r'^\s*\*\w+(\+\+|--);')
01623 
01624 
01625 def CheckInvalidIncrement(filename, clean_lines, linenum, error):
01626   """Checks for invalid increment *count++.
01627 
01628   For example following function:
01629   void increment_counter(int* count) {
01630     *count++;
01631   }
01632   is invalid, because it effectively does count++, moving pointer, and should
01633   be replaced with ++*count, (*count)++ or *count += 1.
01634 
01635   Args:
01636     filename: The name of the current file.
01637     clean_lines: A CleansedLines instance containing the file.
01638     linenum: The number of the line to check.
01639     error: The function to call with any errors found.
01640   """
01641   line = clean_lines.elided[linenum]
01642   if _RE_PATTERN_INVALID_INCREMENT.match(line):
01643     error(filename, linenum, 'runtime/invalid_increment', 5,
01644           'Changing pointer instead of value (or unused value of operator*).')
01645 
01646 
01647 class _BlockInfo(object):
01648   """Stores information about a generic block of code."""
01649 
01650   def __init__(self, seen_open_brace):
01651     self.seen_open_brace = seen_open_brace
01652     self.open_parentheses = 0
01653     self.inline_asm = _NO_ASM
01654 
01655   def CheckBegin(self, filename, clean_lines, linenum, error):
01656     """Run checks that applies to text up to the opening brace.
01657 
01658     This is mostly for checking the text after the class identifier
01659     and the "{", usually where the base class is specified.  For other
01660     blocks, there isn't much to check, so we always pass.
01661 
01662     Args:
01663       filename: The name of the current file.
01664       clean_lines: A CleansedLines instance containing the file.
01665       linenum: The number of the line to check.
01666       error: The function to call with any errors found.
01667     """
01668     pass
01669 
01670   def CheckEnd(self, filename, clean_lines, linenum, error):
01671     """Run checks that applies to text after the closing brace.
01672 
01673     This is mostly used for checking end of namespace comments.
01674 
01675     Args:
01676       filename: The name of the current file.
01677       clean_lines: A CleansedLines instance containing the file.
01678       linenum: The number of the line to check.
01679       error: The function to call with any errors found.
01680     """
01681     pass
01682 
01683 
01684 class _ClassInfo(_BlockInfo):
01685   """Stores information about a class."""
01686 
01687   def __init__(self, name, class_or_struct, clean_lines, linenum):
01688     _BlockInfo.__init__(self, False)
01689     self.name = name
01690     self.starting_linenum = linenum
01691     self.is_derived = False
01692     if class_or_struct == 'struct':
01693       self.access = 'public'
01694       self.is_struct = True
01695     else:
01696       self.access = 'private'
01697       self.is_struct = False
01698 
01699     # Remember initial indentation level for this class.  Using raw_lines here
01700     # instead of elided to account for leading comments.
01701     initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum])
01702     if initial_indent:
01703       self.class_indent = len(initial_indent.group(1))
01704     else:
01705       self.class_indent = 0
01706 
01707     # Try to find the end of the class.  This will be confused by things like:
01708     #   class A {
01709     #   } *x = { ...
01710     #
01711     # But it's still good enough for CheckSectionSpacing.
01712     self.last_line = 0
01713     depth = 0
01714     for i in range(linenum, clean_lines.NumLines()):
01715       line = clean_lines.elided[i]
01716       depth += line.count('{') - line.count('}')
01717       if not depth:
01718         self.last_line = i
01719         break
01720 
01721   def CheckBegin(self, filename, clean_lines, linenum, error):
01722     # Look for a bare ':'
01723     if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
01724       self.is_derived = True
01725 
01726   def CheckEnd(self, filename, clean_lines, linenum, error):
01727     # Check that closing brace is aligned with beginning of the class.
01728     # Only do this if the closing brace is indented by only whitespaces.
01729     # This means we will not check single-line class definitions.
01730     indent = Match(r'^( *)\}', clean_lines.elided[linenum])
01731     if indent and len(indent.group(1)) != self.class_indent:
01732       if self.is_struct:
01733         parent = 'struct ' + self.name
01734       else:
01735         parent = 'class ' + self.name
01736       error(filename, linenum, 'whitespace/indent', 3,
01737             'Closing brace should be aligned with beginning of %s' % parent)
01738 
01739 
01740 class _NamespaceInfo(_BlockInfo):
01741   """Stores information about a namespace."""
01742 
01743   def __init__(self, name, linenum):
01744     _BlockInfo.__init__(self, False)
01745     self.name = name or ''
01746     self.starting_linenum = linenum
01747 
01748   def CheckEnd(self, filename, clean_lines, linenum, error):
01749     """Check end of namespace comments."""
01750     line = clean_lines.raw_lines[linenum]
01751 
01752     # Check how many lines is enclosed in this namespace.  Don't issue
01753     # warning for missing namespace comments if there aren't enough
01754     # lines.  However, do apply checks if there is already an end of
01755     # namespace comment and it's incorrect.
01756     #
01757     # TODO(unknown): We always want to check end of namespace comments
01758     # if a namespace is large, but sometimes we also want to apply the
01759     # check if a short namespace contained nontrivial things (something
01760     # other than forward declarations).  There is currently no logic on
01761     # deciding what these nontrivial things are, so this check is
01762     # triggered by namespace size only, which works most of the time.
01763     if (linenum - self.starting_linenum < 10
01764         and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
01765       return
01766 
01767     # Look for matching comment at end of namespace.
01768     #
01769     # Note that we accept C style "/* */" comments for terminating
01770     # namespaces, so that code that terminate namespaces inside
01771     # preprocessor macros can be cpplint clean.
01772     #
01773     # We also accept stuff like "// end of namespace <name>." with the
01774     # period at the end.
01775     #
01776     # Besides these, we don't accept anything else, otherwise we might
01777     # get false negatives when existing comment is a substring of the
01778     # expected namespace.
01779     if self.name:
01780       # Named namespace
01781       if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
01782                     r'[\*/\.\\\s]*$'),
01783                    line):
01784         error(filename, linenum, 'readability/namespace', 5,
01785               'Namespace should be terminated with "// namespace %s"' %
01786               self.name)
01787     else:
01788       # Anonymous namespace
01789       if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
01790         error(filename, linenum, 'readability/namespace', 5,
01791               'Namespace should be terminated with "// namespace"')
01792 
01793 
01794 class _PreprocessorInfo(object):
01795   """Stores checkpoints of nesting stacks when #if/#else is seen."""
01796 
01797   def __init__(self, stack_before_if):
01798     # The entire nesting stack before #if
01799     self.stack_before_if = stack_before_if
01800 
01801     # The entire nesting stack up to #else
01802     self.stack_before_else = []
01803 
01804     # Whether we have already seen #else or #elif
01805     self.seen_else = False
01806 
01807 
01808 class _NestingState(object):
01809   """Holds states related to parsing braces."""
01810 
01811   def __init__(self):
01812     # Stack for tracking all braces.  An object is pushed whenever we
01813     # see a "{", and popped when we see a "}".  Only 3 types of
01814     # objects are possible:
01815     # - _ClassInfo: a class or struct.
01816     # - _NamespaceInfo: a namespace.
01817     # - _BlockInfo: some other type of block.
01818     self.stack = []
01819 
01820     # Stack of _PreprocessorInfo objects.
01821     self.pp_stack = []
01822 
01823   def SeenOpenBrace(self):
01824     """Check if we have seen the opening brace for the innermost block.
01825 
01826     Returns:
01827       True if we have seen the opening brace, False if the innermost
01828       block is still expecting an opening brace.
01829     """
01830     return (not self.stack) or self.stack[-1].seen_open_brace
01831 
01832   def InNamespaceBody(self):
01833     """Check if we are currently one level inside a namespace body.
01834 
01835     Returns:
01836       True if top of the stack is a namespace block, False otherwise.
01837     """
01838     return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
01839 
01840   def UpdatePreprocessor(self, line):
01841     """Update preprocessor stack.
01842 
01843     We need to handle preprocessors due to classes like this:
01844       #ifdef SWIG
01845       struct ResultDetailsPageElementExtensionPoint {
01846       #else
01847       struct ResultDetailsPageElementExtensionPoint : public Extension {
01848       #endif
01849 
01850     We make the following assumptions (good enough for most files):
01851     - Preprocessor condition evaluates to true from #if up to first
01852       #else/#elif/#endif.
01853 
01854     - Preprocessor condition evaluates to false from #else/#elif up
01855       to #endif.  We still perform lint checks on these lines, but
01856       these do not affect nesting stack.
01857 
01858     Args:
01859       line: current line to check.
01860     """
01861     if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
01862       # Beginning of #if block, save the nesting stack here.  The saved
01863       # stack will allow us to restore the parsing state in the #else case.
01864       self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
01865     elif Match(r'^\s*#\s*(else|elif)\b', line):
01866       # Beginning of #else block
01867       if self.pp_stack:
01868         if not self.pp_stack[-1].seen_else:
01869           # This is the first #else or #elif block.  Remember the
01870           # whole nesting stack up to this point.  This is what we
01871           # keep after the #endif.
01872           self.pp_stack[-1].seen_else = True
01873           self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
01874 
01875         # Restore the stack to how it was before the #if
01876         self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
01877       else:
01878         # TODO(unknown): unexpected #else, issue warning?
01879         pass
01880     elif Match(r'^\s*#\s*endif\b', line):
01881       # End of #if or #else blocks.
01882       if self.pp_stack:
01883         # If we saw an #else, we will need to restore the nesting
01884         # stack to its former state before the #else, otherwise we
01885         # will just continue from where we left off.
01886         if self.pp_stack[-1].seen_else:
01887           # Here we can just use a shallow copy since we are the last
01888           # reference to it.
01889           self.stack = self.pp_stack[-1].stack_before_else
01890         # Drop the corresponding #if
01891         self.pp_stack.pop()
01892       else:
01893         # TODO(unknown): unexpected #endif, issue warning?
01894         pass
01895 
01896   def Update(self, filename, clean_lines, linenum, error):
01897     """Update nesting state with current line.
01898 
01899     Args:
01900       filename: The name of the current file.
01901       clean_lines: A CleansedLines instance containing the file.
01902       linenum: The number of the line to check.
01903       error: The function to call with any errors found.
01904     """
01905     line = clean_lines.elided[linenum]
01906 
01907     # Update pp_stack first
01908     self.UpdatePreprocessor(line)
01909 
01910     # Count parentheses.  This is to avoid adding struct arguments to
01911     # the nesting stack.
01912     if self.stack:
01913       inner_block = self.stack[-1]
01914       depth_change = line.count('(') - line.count(')')
01915       inner_block.open_parentheses += depth_change
01916 
01917       # Also check if we are starting or ending an inline assembly block.
01918       if inner_block.inline_asm in (_NO_ASM, _END_ASM):
01919         if (depth_change != 0 and
01920             inner_block.open_parentheses == 1 and
01921             _MATCH_ASM.match(line)):
01922           # Enter assembly block
01923           inner_block.inline_asm = _INSIDE_ASM
01924         else:
01925           # Not entering assembly block.  If previous line was _END_ASM,
01926           # we will now shift to _NO_ASM state.
01927           inner_block.inline_asm = _NO_ASM
01928       elif (inner_block.inline_asm == _INSIDE_ASM and
01929             inner_block.open_parentheses == 0):
01930         # Exit assembly block
01931         inner_block.inline_asm = _END_ASM
01932 
01933     # Consume namespace declaration at the beginning of the line.  Do
01934     # this in a loop so that we catch same line declarations like this:
01935     #   namespace proto2 { namespace bridge { class MessageSet; } }
01936     while True:
01937       # Match start of namespace.  The "\b\s*" below catches namespace
01938       # declarations even if it weren't followed by a whitespace, this
01939       # is so that we don't confuse our namespace checker.  The
01940       # missing spaces will be flagged by CheckSpacing.
01941       namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
01942       if not namespace_decl_match:
01943         break
01944 
01945       new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
01946       self.stack.append(new_namespace)
01947 
01948       line = namespace_decl_match.group(2)
01949       if line.find('{') != -1:
01950         new_namespace.seen_open_brace = True
01951         line = line[line.find('{') + 1:]
01952 
01953     # Look for a class declaration in whatever is left of the line
01954     # after parsing namespaces.  The regexp accounts for decorated classes
01955     # such as in:
01956     #   class LOCKABLE API Object {
01957     #   };
01958     #
01959     # Templates with class arguments may confuse the parser, for example:
01960     #   template <class T
01961     #             class Comparator = less<T>,
01962     #             class Vector = vector<T> >
01963     #   class HeapQueue {
01964     #
01965     # Because this parser has no nesting state about templates, by the
01966     # time it saw "class Comparator", it may think that it's a new class.
01967     # Nested templates have a similar problem:
01968     #   template <
01969     #       typename ExportedType,
01970     #       typename TupleType,
01971     #       template <typename, typename> class ImplTemplate>
01972     #
01973     # To avoid these cases, we ignore classes that are followed by '=' or '>'
01974     class_decl_match = Match(
01975         r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
01976         r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
01977         r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line)
01978     if (class_decl_match and
01979         (not self.stack or self.stack[-1].open_parentheses == 0)):
01980       self.stack.append(_ClassInfo(
01981           class_decl_match.group(4), class_decl_match.group(2),
01982           clean_lines, linenum))
01983       line = class_decl_match.group(5)
01984 
01985     # If we have not yet seen the opening brace for the innermost block,
01986     # run checks here.
01987     if not self.SeenOpenBrace():
01988       self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
01989 
01990     # Update access control if we are inside a class/struct
01991     if self.stack and isinstance(self.stack[-1], _ClassInfo):
01992       classinfo = self.stack[-1]
01993       access_match = Match(
01994           r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
01995           r':(?:[^:]|$)',
01996           line)
01997       if access_match:
01998         classinfo.access = access_match.group(2)
01999 
02000         # Check that access keywords are indented +1 space.  Skip this
02001         # check if the keywords are not preceded by whitespaces.
02002         indent = access_match.group(1)
02003         if (len(indent) != classinfo.class_indent + 1 and
02004             Match(r'^\s*$', indent)):
02005           if classinfo.is_struct:
02006             parent = 'struct ' + classinfo.name
02007           else:
02008             parent = 'class ' + classinfo.name
02009           slots = ''
02010           if access_match.group(3):
02011             slots = access_match.group(3)
02012           error(filename, linenum, 'whitespace/indent', 3,
02013                 '%s%s: should be indented +1 space inside %s' % (
02014                     access_match.group(2), slots, parent))
02015 
02016     # Consume braces or semicolons from what's left of the line
02017     while True:
02018       # Match first brace, semicolon, or closed parenthesis.
02019       matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
02020       if not matched:
02021         break
02022 
02023       token = matched.group(1)
02024       if token == '{':
02025         # If namespace or class hasn't seen a opening brace yet, mark
02026         # namespace/class head as complete.  Push a new block onto the
02027         # stack otherwise.
02028         if not self.SeenOpenBrace():
02029           self.stack[-1].seen_open_brace = True
02030         else:
02031           self.stack.append(_BlockInfo(True))
02032           if _MATCH_ASM.match(line):
02033             self.stack[-1].inline_asm = _BLOCK_ASM
02034       elif token == ';' or token == ')':
02035         # If we haven't seen an opening brace yet, but we already saw
02036         # a semicolon, this is probably a forward declaration.  Pop
02037         # the stack for these.
02038         #
02039         # Similarly, if we haven't seen an opening brace yet, but we
02040         # already saw a closing parenthesis, then these are probably
02041         # function arguments with extra "class" or "struct" keywords.
02042         # Also pop these stack for these.
02043         if not self.SeenOpenBrace():
02044           self.stack.pop()
02045       else:  # token == '}'
02046         # Perform end of block checks and pop the stack.
02047         if self.stack:
02048           self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
02049           self.stack.pop()
02050       line = matched.group(2)
02051 
02052   def InnermostClass(self):
02053     """Get class info on the top of the stack.
02054 
02055     Returns:
02056       A _ClassInfo object if we are inside a class, or None otherwise.
02057     """
02058     for i in range(len(self.stack), 0, -1):
02059       classinfo = self.stack[i - 1]
02060       if isinstance(classinfo, _ClassInfo):
02061         return classinfo
02062     return None
02063 
02064   def CheckCompletedBlocks(self, filename, error):
02065     """Checks that all classes and namespaces have been completely parsed.
02066 
02067     Call this when all lines in a file have been processed.
02068     Args:
02069       filename: The name of the current file.
02070       error: The function to call with any errors found.
02071     """
02072     # Note: This test can result in false positives if #ifdef constructs
02073     # get in the way of brace matching. See the testBuildClass test in
02074     # cpplint_unittest.py for an example of this.
02075     for obj in self.stack:
02076       if isinstance(obj, _ClassInfo):
02077         error(filename, obj.starting_linenum, 'build/class', 5,
02078               'Failed to find complete declaration of class %s' %
02079               obj.name)
02080       elif isinstance(obj, _NamespaceInfo):
02081         error(filename, obj.starting_linenum, 'build/namespaces', 5,
02082               'Failed to find complete declaration of namespace %s' %
02083               obj.name)
02084 
02085 
02086 def CheckForNonStandardConstructs(filename, clean_lines, linenum,
02087                                   nesting_state, error):
02088   r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
02089 
02090   Complain about several constructs which gcc-2 accepts, but which are
02091   not standard C++.  Warning about these in lint is one way to ease the
02092   transition to new compilers.
02093   - put storage class first (e.g. "static const" instead of "const static").
02094   - "%lld" instead of %qd" in printf-type functions.
02095   - "%1$d" is non-standard in printf-type functions.
02096   - "\%" is an undefined character escape sequence.
02097   - text after #endif is not allowed.
02098   - invalid inner-style forward declaration.
02099   - >? and <? operators, and their >?= and <?= cousins.
02100 
02101   Additionally, check for constructor/destructor style violations and reference
02102   members, as it is very convenient to do so while checking for
02103   gcc-2 compliance.
02104 
02105   Args:
02106     filename: The name of the current file.
02107     clean_lines: A CleansedLines instance containing the file.
02108     linenum: The number of the line to check.
02109     nesting_state: A _NestingState instance which maintains information about
02110                    the current stack of nested blocks being parsed.
02111     error: A callable to which errors are reported, which takes 4 arguments:
02112            filename, line number, error level, and message
02113   """
02114 
02115   # Remove comments from the line, but leave in strings for now.
02116   line = clean_lines.lines[linenum]
02117 
02118   if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
02119     error(filename, linenum, 'runtime/printf_format', 3,
02120           '%q in format strings is deprecated.  Use %ll instead.')
02121 
02122   if Search(r'printf\s*\(.*".*%\d+\$', line):
02123     error(filename, linenum, 'runtime/printf_format', 2,
02124           '%N$ formats are unconventional.  Try rewriting to avoid them.')
02125 
02126   # Remove escaped backslashes before looking for undefined escapes.
02127   line = line.replace('\\\\', '')
02128 
02129   if Search(r'("|\').*\\(%|\[|\(|{)', line):
02130     error(filename, linenum, 'build/printf_format', 3,
02131           '%, [, (, and { are undefined character escapes.  Unescape them.')
02132 
02133   # For the rest, work with both comments and strings removed.
02134   line = clean_lines.elided[linenum]
02135 
02136   if Search(r'\b(const|volatile|void|char|short|int|long'
02137             r'|float|double|signed|unsigned'
02138             r'|schar|u?int8|u?int16|u?int32|u?int64)'
02139             r'\s+(register|static|extern|typedef)\b',
02140             line):
02141     error(filename, linenum, 'build/storage_class', 5,
02142           'Storage class (static, extern, typedef, etc) should be first.')
02143 
02144   if Match(r'\s*#\s*endif\s*[^/\s]+', line):
02145     error(filename, linenum, 'build/endif_comment', 5,
02146           'Uncommented text after #endif is non-standard.  Use a comment.')
02147 
02148   if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
02149     error(filename, linenum, 'build/forward_decl', 5,
02150           'Inner-style forward declarations are invalid.  Remove this line.')
02151 
02152   if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
02153             line):
02154     error(filename, linenum, 'build/deprecated', 3,
02155           '>? and <? (max and min) operators are non-standard and deprecated.')
02156 
02157   if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
02158     # TODO(unknown): Could it be expanded safely to arbitrary references,
02159     # without triggering too many false positives? The first
02160     # attempt triggered 5 warnings for mostly benign code in the regtest, hence
02161     # the restriction.
02162     # Here's the original regexp, for the reference:
02163     # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
02164     # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
02165     error(filename, linenum, 'runtime/member_string_references', 2,
02166           'const string& members are dangerous. It is much better to use '
02167           'alternatives, such as pointers or simple constants.')
02168 
02169   # Everything else in this function operates on class declarations.
02170   # Return early if the top of the nesting stack is not a class, or if
02171   # the class head is not completed yet.
02172   classinfo = nesting_state.InnermostClass()
02173   if not classinfo or not classinfo.seen_open_brace:
02174     return
02175 
02176   # The class may have been declared with namespace or classname qualifiers.
02177   # The constructor and destructor will not have those qualifiers.
02178   base_classname = classinfo.name.split('::')[-1]
02179 
02180   # Look for single-argument constructors that aren't marked explicit.
02181   # Technically a valid construct, but against style.
02182   args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
02183                % re.escape(base_classname),
02184                line)
02185   if (args and
02186       args.group(1) != 'void' and
02187       not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&'
02188                 % re.escape(base_classname), args.group(1).strip())):
02189     error(filename, linenum, 'runtime/explicit', 5,
02190           'Single-argument constructors should be marked explicit.')
02191 
02192 
02193 def CheckSpacingForFunctionCall(filename, line, linenum, error):
02194   """Checks for the correctness of various spacing around function calls.
02195 
02196   Args:
02197     filename: The name of the current file.
02198     line: The text of the line to check.
02199     linenum: The number of the line to check.
02200     error: The function to call with any errors found.
02201   """
02202 
02203   # Since function calls often occur inside if/for/while/switch
02204   # expressions - which have their own, more liberal conventions - we
02205   # first see if we should be looking inside such an expression for a
02206   # function call, to which we can apply more strict standards.
02207   fncall = line    # if there's no control flow construct, look at whole line
02208   for pattern in (r'\bif\s*\((.*)\)\s*{',
02209                   r'\bfor\s*\((.*)\)\s*{',
02210                   r'\bwhile\s*\((.*)\)\s*[{;]',
02211                   r'\bswitch\s*\((.*)\)\s*{'):
02212     match = Search(pattern, line)
02213     if match:
02214       fncall = match.group(1)    # look inside the parens for function calls
02215       break
02216 
02217   # Except in if/for/while/switch, there should never be space
02218   # immediately inside parens (eg "f( 3, 4 )").  We make an exception
02219   # for nested parens ( (a+b) + c ).  Likewise, there should never be
02220   # a space before a ( when it's a function argument.  I assume it's a
02221   # function argument when the char before the whitespace is legal in
02222   # a function name (alnum + _) and we're not starting a macro. Also ignore
02223   # pointers and references to arrays and functions coz they're too tricky:
02224   # we use a very simple way to recognize these:
02225   # " (something)(maybe-something)" or
02226   # " (something)(maybe-something," or
02227   # " (something)[something]"
02228   # Note that we assume the contents of [] to be short enough that
02229   # they'll never need to wrap.
02230   if (  # Ignore control structures.
02231       not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
02232                  fncall) and
02233       # Ignore pointers/references to functions.
02234       not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
02235       # Ignore pointers/references to arrays.
02236       not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
02237     if Search(r'\w\s*\(\s(?!\s*\\$)', fncall):      # a ( used for a fn call
02238       error(filename, linenum, 'whitespace/parens', 4,
02239             'Extra space after ( in function call')
02240     elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
02241       error(filename, linenum, 'whitespace/parens', 2,
02242             'Extra space after (')
02243     if (Search(r'\w\s+\(', fncall) and
02244         not Search(r'#\s*define|typedef', fncall) and
02245         not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
02246       error(filename, linenum, 'whitespace/parens', 4,
02247             'Extra space before ( in function call')
02248     # If the ) is followed only by a newline or a { + newline, assume it's
02249     # part of a control statement (if/while/etc), and don't complain
02250     if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
02251       # If the closing parenthesis is preceded by only whitespaces,
02252       # try to give a more descriptive error message.
02253       if Search(r'^\s+\)', fncall):
02254         error(filename, linenum, 'whitespace/parens', 2,
02255               'Closing ) should be moved to the previous line')
02256       else:
02257         error(filename, linenum, 'whitespace/parens', 2,
02258               'Extra space before )')
02259 
02260 
02261 def IsBlankLine(line):
02262   """Returns true if the given line is blank.
02263 
02264   We consider a line to be blank if the line is empty or consists of
02265   only white spaces.
02266 
02267   Args:
02268     line: A line of a string.
02269 
02270   Returns:
02271     True, if the given line is blank.
02272   """
02273   return not line or line.isspace()
02274 
02275 
02276 def CheckForFunctionLengths(filename, clean_lines, linenum,
02277                             function_state, error):
02278   """Reports for long function bodies.
02279 
02280   For an overview why this is done, see:
02281   http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
02282 
02283   Uses a simplistic algorithm assuming other style guidelines
02284   (especially spacing) are followed.
02285   Only checks unindented functions, so class members are unchecked.
02286   Trivial bodies are unchecked, so constructors with huge initializer lists
02287   may be missed.
02288   Blank/comment lines are not counted so as to avoid encouraging the removal
02289   of vertical space and comments just to get through a lint check.
02290   NOLINT *on the last line of a function* disables this check.
02291 
02292   Args:
02293     filename: The name of the current file.
02294     clean_lines: A CleansedLines instance containing the file.
02295     linenum: The number of the line to check.
02296     function_state: Current function name and lines in body so far.
02297     error: The function to call with any errors found.
02298   """
02299   lines = clean_lines.lines
02300   line = lines[linenum]
02301   raw = clean_lines.raw_lines
02302   raw_line = raw[linenum]
02303   joined_line = ''
02304 
02305   starting_func = False
02306   regexp = r'(\w(\w|::|\*|\&|\s)*)\('  # decls * & space::name( ...
02307   match_result = Match(regexp, line)
02308   if match_result:
02309     # If the name is all caps and underscores, figure it's a macro and
02310     # ignore it, unless it's TEST or TEST_F.
02311     function_name = match_result.group(1).split()[-1]
02312     if function_name == 'TEST' or function_name == 'TEST_F' or (
02313         not Match(r'[A-Z_]+$', function_name)):
02314       starting_func = True
02315 
02316   if starting_func:
02317     body_found = False
02318     for start_linenum in xrange(linenum, clean_lines.NumLines()):
02319       start_line = lines[start_linenum]
02320       joined_line += ' ' + start_line.lstrip()
02321       if Search(r'(;|})', start_line):  # Declarations and trivial functions
02322         body_found = True
02323         break                              # ... ignore
02324       elif Search(r'{', start_line):
02325         body_found = True
02326         function = Search(r'((\w|:)*)\(', line).group(1)
02327         if Match(r'TEST', function):    # Handle TEST... macros
02328           parameter_regexp = Search(r'(\(.*\))', joined_line)
02329           if parameter_regexp:             # Ignore bad syntax
02330             function += parameter_regexp.group(1)
02331         else:
02332           function += '()'
02333         function_state.Begin(function)
02334         break
02335     if not body_found:
02336       # No body for the function (or evidence of a non-function) was found.
02337       error(filename, linenum, 'readability/fn_size', 5,
02338             'Lint failed to find start of function body.')
02339   elif Match(r'^\}\s*$', line):  # function end
02340     function_state.Check(error, filename, linenum)
02341     function_state.End()
02342   elif not Match(r'^\s*$', line):
02343     function_state.Count()  # Count non-blank/non-comment lines.
02344 
02345 
02346 _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
02347 
02348 
02349 def CheckComment(comment, filename, linenum, error):
02350   """Checks for common mistakes in TODO comments.
02351 
02352   Args:
02353     comment: The text of the comment from the line in question.
02354     filename: The name of the current file.
02355     linenum: The number of the line to check.
02356     error: The function to call with any errors found.
02357   """
02358   match = _RE_PATTERN_TODO.match(comment)
02359   if match:
02360     # One whitespace is correct; zero whitespace is handled elsewhere.
02361     leading_whitespace = match.group(1)
02362     if len(leading_whitespace) > 1:
02363       error(filename, linenum, 'whitespace/todo', 2,
02364             'Too many spaces before TODO')
02365 
02366     username = match.group(2)
02367     if not username:
02368       error(filename, linenum, 'readability/todo', 2,
02369             'Missing username in TODO; it should look like '
02370             '"// TODO(my_username): Stuff."')
02371 
02372     middle_whitespace = match.group(3)
02373     # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
02374     if middle_whitespace != ' ' and middle_whitespace != '':
02375       error(filename, linenum, 'whitespace/todo', 2,
02376             'TODO(my_username) should be followed by a space')
02377 
02378 def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
02379   """Checks for improper use of DISALLOW* macros.
02380 
02381   Args:
02382     filename: The name of the current file.
02383     clean_lines: A CleansedLines instance containing the file.
02384     linenum: The number of the line to check.
02385     nesting_state: A _NestingState instance which maintains information about
02386                    the current stack of nested blocks being parsed.
02387     error: The function to call with any errors found.
02388   """
02389   line = clean_lines.elided[linenum]  # get rid of comments and strings
02390 
02391   matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
02392                    r'DISALLOW_EVIL_CONSTRUCTORS|'
02393                    r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
02394   if not matched:
02395     return
02396   if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
02397     if nesting_state.stack[-1].access != 'private':
02398       error(filename, linenum, 'readability/constructors', 3,
02399             '%s must be in the private: section' % matched.group(1))
02400 
02401   else:
02402     # Found DISALLOW* macro outside a class declaration, or perhaps it
02403     # was used inside a function when it should have been part of the
02404     # class declaration.  We could issue a warning here, but it
02405     # probably resulted in a compiler error already.
02406     pass
02407 
02408 
02409 def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
02410   """Find the corresponding > to close a template.
02411 
02412   Args:
02413     clean_lines: A CleansedLines instance containing the file.
02414     linenum: Current line number.
02415     init_suffix: Remainder of the current line after the initial <.
02416 
02417   Returns:
02418     True if a matching bracket exists.
02419   """
02420   line = init_suffix
02421   nesting_stack = ['<']
02422   while True:
02423     # Find the next operator that can tell us whether < is used as an
02424     # opening bracket or as a less-than operator.  We only want to
02425     # warn on the latter case.
02426     #
02427     # We could also check all other operators and terminate the search
02428     # early, e.g. if we got something like this "a<b+c", the "<" is
02429     # most likely a less-than operator, but then we will get false
02430     # positives for default arguments and other template expressions.
02431     match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
02432     if match:
02433       # Found an operator, update nesting stack
02434       operator = match.group(1)
02435       line = match.group(2)
02436 
02437       if nesting_stack[-1] == '<':
02438         # Expecting closing angle bracket
02439         if operator in ('<', '(', '['):
02440           nesting_stack.append(operator)
02441         elif operator == '>':
02442           nesting_stack.pop()
02443           if not nesting_stack:
02444             # Found matching angle bracket
02445             return True
02446         elif operator == ',':
02447           # Got a comma after a bracket, this is most likely a template
02448           # argument.  We have not seen a closing angle bracket yet, but
02449           # it's probably a few lines later if we look for it, so just
02450           # return early here.
02451           return True
02452         else:
02453           # Got some other operator.
02454           return False
02455 
02456       else:
02457         # Expecting closing parenthesis or closing bracket
02458         if operator in ('<', '(', '['):
02459           nesting_stack.append(operator)
02460         elif operator in (')', ']'):
02461           # We don't bother checking for matching () or [].  If we got
02462           # something like (] or [), it would have been a syntax error.
02463           nesting_stack.pop()
02464 
02465     else:
02466       # Scan the next line
02467       linenum += 1
02468       if linenum >= len(clean_lines.elided):
02469         break
02470       line = clean_lines.elided[linenum]
02471 
02472   # Exhausted all remaining lines and still no matching angle bracket.
02473   # Most likely the input was incomplete, otherwise we should have
02474   # seen a semicolon and returned early.
02475   return True
02476 
02477 
02478 def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
02479   """Find the corresponding < that started a template.
02480 
02481   Args:
02482     clean_lines: A CleansedLines instance containing the file.
02483     linenum: Current line number.
02484     init_prefix: Part of the current line before the initial >.
02485 
02486   Returns:
02487     True if a matching bracket exists.
02488   """
02489   line = init_prefix
02490   nesting_stack = ['>']
02491   while True:
02492     # Find the previous operator
02493     match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
02494     if match:
02495       # Found an operator, update nesting stack
02496       operator = match.group(2)
02497       line = match.group(1)
02498 
02499       if nesting_stack[-1] == '>':
02500         # Expecting opening angle bracket
02501         if operator in ('>', ')', ']'):
02502           nesting_stack.append(operator)
02503         elif operator == '<':
02504           nesting_stack.pop()
02505           if not nesting_stack:
02506             # Found matching angle bracket
02507             return True
02508         elif operator == ',':
02509           # Got a comma before a bracket, this is most likely a
02510           # template argument.  The opening angle bracket is probably
02511           # there if we look for it, so just return early here.
02512           return True
02513         else:
02514           # Got some other operator.
02515           return False
02516 
02517       else:
02518         # Expecting opening parenthesis or opening bracket
02519         if operator in ('>', ')', ']'):
02520           nesting_stack.append(operator)
02521         elif operator in ('(', '['):
02522           nesting_stack.pop()
02523 
02524     else:
02525       # Scan the previous line
02526       linenum -= 1
02527       if linenum < 0:
02528         break
02529       line = clean_lines.elided[linenum]
02530 
02531   # Exhausted all earlier lines and still no matching angle bracket.
02532   return False
02533 
02534 
02535 def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
02536   """Checks for the correctness of various spacing issues in the code.
02537 
02538   Things we check for: spaces around operators, spaces after
02539   if/for/while/switch, no spaces around parens in function calls, two
02540   spaces between code and comment, don't start a block with a blank
02541   line, don't end a function with a blank line, don't add a blank line
02542   after public/protected/private, don't have too many blank lines in a row.
02543 
02544   Args:
02545     filename: The name of the current file.
02546     clean_lines: A CleansedLines instance containing the file.
02547     linenum: The number of the line to check.
02548     nesting_state: A _NestingState instance which maintains information about
02549                    the current stack of nested blocks being parsed.
02550     error: The function to call with any errors found.
02551   """
02552 
02553   # Don't use "elided" lines here, otherwise we can't check commented lines.
02554   # Don't want to use "raw" either, because we don't want to check inside C++11
02555   # raw strings,
02556   raw = clean_lines.lines_without_raw_strings
02557   line = raw[linenum]
02558 
02559   # Before nixing comments, check if the line is blank for no good
02560   # reason.  This includes the first line after a block is opened, and
02561   # blank lines at the end of a function (ie, right before a line like '}'
02562   #
02563   # Skip all the blank line checks if we are immediately inside a
02564   # namespace body.  In other words, don't issue blank line warnings
02565   # for this block:
02566   #   namespace {
02567   #
02568   #   }
02569   #
02570   # A warning about missing end of namespace comments will be issued instead.
02571   if IsBlankLine(line) and not nesting_state.InNamespaceBody():
02572     elided = clean_lines.elided
02573     prev_line = elided[linenum - 1]
02574     prevbrace = prev_line.rfind('{')
02575     # TODO(unknown): Don't complain if line before blank line, and line after,
02576     #                both start with alnums and are indented the same amount.
02577     #                This ignores whitespace at the start of a namespace block
02578     #                because those are not usually indented.
02579     if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
02580       # OK, we have a blank line at the start of a code block.  Before we
02581       # complain, we check if it is an exception to the rule: The previous
02582       # non-empty line has the parameters of a function header that are indented
02583       # 4 spaces (because they did not fit in a 80 column line when placed on
02584       # the same line as the function name).  We also check for the case where
02585       # the previous line is indented 6 spaces, which may happen when the
02586       # initializers of a constructor do not fit into a 80 column line.
02587       exception = False
02588       if Match(r' {6}\w', prev_line):  # Initializer list?
02589         # We are looking for the opening column of initializer list, which
02590         # should be indented 4 spaces to cause 6 space indentation afterwards.
02591         search_position = linenum-2
02592         while (search_position >= 0
02593                and Match(r' {6}\w', elided[search_position])):
02594           search_position -= 1
02595         exception = (search_position >= 0
02596                      and elided[search_position][:5] == '    :')
02597       else:
02598         # Search for the function arguments or an initializer list.  We use a
02599         # simple heuristic here: If the line is indented 4 spaces; and we have a
02600         # closing paren, without the opening paren, followed by an opening brace
02601         # or colon (for initializer lists) we assume that it is the last line of
02602         # a function header.  If we have a colon indented 4 spaces, it is an
02603         # initializer list.
02604         exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
02605                            prev_line)
02606                      or Match(r' {4}:', prev_line))
02607 
02608       if not exception:
02609         error(filename, linenum, 'whitespace/blank_line', 2,
02610               'Redundant blank line at the start of a code block '
02611               'should be deleted.')
02612     # Ignore blank lines at the end of a block in a long if-else
02613     # chain, like this:
02614     #   if (condition1) {
02615     #     // Something followed by a blank line
02616     #
02617     #   } else if (condition2) {
02618     #     // Something else
02619     #   }
02620     if linenum + 1 < clean_lines.NumLines():
02621       next_line = raw[linenum + 1]
02622       if (next_line
02623           and Match(r'\s*}', next_line)
02624           and next_line.find('} else ') == -1):
02625         error(filename, linenum, 'whitespace/blank_line', 3,
02626               'Redundant blank line at the end of a code block '
02627               'should be deleted.')
02628 
02629     matched = Match(r'\s*(public|protected|private):', prev_line)
02630     if matched:
02631       error(filename, linenum, 'whitespace/blank_line', 3,
02632             'Do not leave a blank line after "%s:"' % matched.group(1))
02633 
02634   # Next, we complain if there's a comment too near the text
02635   commentpos = line.find('//')
02636   if commentpos != -1:
02637     # Check if the // may be in quotes.  If so, ignore it
02638     # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
02639     if (line.count('"', 0, commentpos) -
02640         line.count('\\"', 0, commentpos)) % 2 == 0:   # not in quotes
02641       # Allow one space for new scopes, two spaces otherwise:
02642       if (not Match(r'^\s*{ //', line) and
02643           ((commentpos >= 1 and
02644             line[commentpos-1] not in string.whitespace) or
02645            (commentpos >= 2 and
02646             line[commentpos-2] not in string.whitespace))):
02647         error(filename, linenum, 'whitespace/comments', 2,
02648               'At least two spaces is best between code and comments')
02649       # There should always be a space between the // and the comment
02650       commentend = commentpos + 2
02651       if commentend < len(line) and not line[commentend] == ' ':
02652         # but some lines are exceptions -- e.g. if they're big
02653         # comment delimiters like:
02654         # //----------------------------------------------------------
02655         # or are an empty C++ style Doxygen comment, like:
02656         # ///
02657         # or C++ style Doxygen comments placed after the variable:
02658         # ///<  Header comment
02659         # //!<  Header comment
02660         # or they begin with multiple slashes followed by a space:
02661         # //////// Header comment
02662         match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
02663                  Search(r'^/$', line[commentend:]) or
02664                  Search(r'^!< ', line[commentend:]) or
02665                  Search(r'^/< ', line[commentend:]) or
02666                  Search(r'^/+ ', line[commentend:]))
02667         if not match:
02668           error(filename, linenum, 'whitespace/comments', 4,
02669                 'Should have a space between // and comment')
02670       CheckComment(line[commentpos:], filename, linenum, error)
02671 
02672   line = clean_lines.elided[linenum]  # get rid of comments and strings
02673 
02674   # Don't try to do spacing checks for operator methods
02675   line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
02676 
02677   # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
02678   # Otherwise not.  Note we only check for non-spaces on *both* sides;
02679   # sometimes people put non-spaces on one side when aligning ='s among
02680   # many lines (not that this is behavior that I approve of...)
02681   if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
02682     error(filename, linenum, 'whitespace/operators', 4,
02683           'Missing spaces around =')
02684 
02685   # It's ok not to have spaces around binary operators like + - * /, but if
02686   # there's too little whitespace, we get concerned.  It's hard to tell,
02687   # though, so we punt on this one for now.  TODO.
02688 
02689   # You should always have whitespace around binary operators.
02690   #
02691   # Check <= and >= first to avoid false positives with < and >, then
02692   # check non-include lines for spacing around < and >.
02693   match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
02694   if match:
02695     error(filename, linenum, 'whitespace/operators', 3,
02696           'Missing spaces around %s' % match.group(1))
02697   # We allow no-spaces around << when used like this: 10<<20, but
02698   # not otherwise (particularly, not when used as streams)
02699   # Also ignore using ns::operator<<;
02700   match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
02701   if (match and
02702       not (match.group(1).isdigit() and match.group(2).isdigit()) and
02703       not (match.group(1) == 'operator' and match.group(2) == ';')):
02704     error(filename, linenum, 'whitespace/operators', 3,
02705           'Missing spaces around <<')
02706   elif not Match(r'#.*include', line):
02707     # Avoid false positives on ->
02708     reduced_line = line.replace('->', '')
02709 
02710     # Look for < that is not surrounded by spaces.  This is only
02711     # triggered if both sides are missing spaces, even though
02712     # technically should should flag if at least one side is missing a
02713     # space.  This is done to avoid some false positives with shifts.
02714     match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
02715     if (match and
02716         not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
02717       error(filename, linenum, 'whitespace/operators', 3,
02718             'Missing spaces around <')
02719 
02720     # Look for > that is not surrounded by spaces.  Similar to the
02721     # above, we only trigger if both sides are missing spaces to avoid
02722     # false positives with shifts.
02723     match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
02724     if (match and
02725         not FindPreviousMatchingAngleBracket(clean_lines, linenum,
02726                                              match.group(1))):
02727       error(filename, linenum, 'whitespace/operators', 3,
02728             'Missing spaces around >')
02729 
02730   # We allow no-spaces around >> for almost anything.  This is because
02731   # C++11 allows ">>" to close nested templates, which accounts for
02732   # most cases when ">>" is not followed by a space.
02733   #
02734   # We still warn on ">>" followed by alpha character, because that is
02735   # likely due to ">>" being used for right shifts, e.g.:
02736   #   value >> alpha
02737   #
02738   # When ">>" is used to close templates, the alphanumeric letter that
02739   # follows would be part of an identifier, and there should still be
02740   # a space separating the template type and the identifier.
02741   #   type<type<type>> alpha
02742   match = Search(r'>>[a-zA-Z_]', line)
02743   if match:
02744     error(filename, linenum, 'whitespace/operators', 3,
02745           'Missing spaces around >>')
02746 
02747   # There shouldn't be space around unary operators
02748   match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
02749   if match:
02750     error(filename, linenum, 'whitespace/operators', 4,
02751           'Extra space for operator %s' % match.group(1))
02752 
02753   # A pet peeve of mine: no spaces after an if, while, switch, or for
02754   match = Search(r' (if\(|for\(|while\(|switch\()', line)
02755   if match:
02756     error(filename, linenum, 'whitespace/parens', 5,
02757           'Missing space before ( in %s' % match.group(1))
02758 
02759   # For if/for/while/switch, the left and right parens should be
02760   # consistent about how many spaces are inside the parens, and
02761   # there should either be zero or one spaces inside the parens.
02762   # We don't want: "if ( foo)" or "if ( foo   )".
02763   # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
02764   match = Search(r'\b(if|for|while|switch)\s*'
02765                  r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
02766                  line)
02767   if match:
02768     if len(match.group(2)) != len(match.group(4)):
02769       if not (match.group(3) == ';' and
02770               len(match.group(2)) == 1 + len(match.group(4)) or
02771               not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
02772         error(filename, linenum, 'whitespace/parens', 5,
02773               'Mismatching spaces inside () in %s' % match.group(1))
02774     if len(match.group(2)) not in [0, 1]:
02775       error(filename, linenum, 'whitespace/parens', 5,
02776             'Should have zero or one spaces inside ( and ) in %s' %
02777             match.group(1))
02778 
02779   # You should always have a space after a comma (either as fn arg or operator)
02780   #
02781   # This does not apply when the non-space character following the
02782   # comma is another comma, since the only time when that happens is
02783   # for empty macro arguments.
02784   #
02785   # We run this check in two passes: first pass on elided lines to
02786   # verify that lines contain missing whitespaces, second pass on raw
02787   # lines to confirm that those missing whitespaces are not due to
02788   # elided comments.
02789   if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
02790     error(filename, linenum, 'whitespace/comma', 3,
02791           'Missing space after ,')
02792 
02793   # You should always have a space after a semicolon
02794   # except for few corner cases
02795   # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
02796   # space after ;
02797   if Search(r';[^\s};\\)/]', line):
02798     error(filename, linenum, 'whitespace/semicolon', 3,
02799           'Missing space after ;')
02800 
02801   # Next we will look for issues with function calls.
02802   CheckSpacingForFunctionCall(filename, line, linenum, error)
02803 
02804   # Except after an opening paren, or after another opening brace (in case of
02805   # an initializer list, for instance), you should have spaces before your
02806   # braces. And since you should never have braces at the beginning of a line,
02807   # this is an easy test.
02808   match = Match(r'^(.*[^ ({]){', line)
02809   if match:
02810     # Try a bit harder to check for brace initialization.  This
02811     # happens in one of the following forms:
02812     #   Constructor() : initializer_list_{} { ... }
02813     #   Constructor{}.MemberFunction()
02814     #   Type variable{};
02815     #   FunctionCall(type{}, ...);
02816     #   LastArgument(..., type{});
02817     #   LOG(INFO) << type{} << " ...";
02818     #   map_of_type[{...}] = ...;
02819     #
02820     # We check for the character following the closing brace, and
02821     # silence the warning if it's one of those listed above, i.e.
02822     # "{.;,)<]".
02823     #
02824     # To account for nested initializer list, we allow any number of
02825     # closing braces up to "{;,)<".  We can't simply silence the
02826     # warning on first sight of closing brace, because that would
02827     # cause false negatives for things that are not initializer lists.
02828     #   Silence this:         But not this:
02829     #     Outer{                if (...) {
02830     #       Inner{...}            if (...){  // Missing space before {
02831     #     };                    }
02832     #
02833     # There is a false negative with this approach if people inserted
02834     # spurious semicolons, e.g. "if (cond){};", but we will catch the
02835     # spurious semicolon with a separate check.
02836     (endline, endlinenum, endpos) = CloseExpression(
02837         clean_lines, linenum, len(match.group(1)))
02838     trailing_text = ''
02839     if endpos > -1:
02840       trailing_text = endline[endpos:]
02841     for offset in xrange(endlinenum + 1,
02842                          min(endlinenum + 3, clean_lines.NumLines() - 1)):
02843       trailing_text += clean_lines.elided[offset]
02844     if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text):
02845       error(filename, linenum, 'whitespace/braces', 5,
02846             'Missing space before {')
02847 
02848   # Make sure '} else {' has spaces.
02849   if Search(r'}else', line):
02850     error(filename, linenum, 'whitespace/braces', 5,
02851           'Missing space before else')
02852 
02853   # You shouldn't have spaces before your brackets, except maybe after
02854   # 'delete []' or 'new char * []'.
02855   if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
02856     error(filename, linenum, 'whitespace/braces', 5,
02857           'Extra space before [')
02858 
02859   # You shouldn't have a space before a semicolon at the end of the line.
02860   # There's a special case for "for" since the style guide allows space before
02861   # the semicolon there.
02862   if Search(r':\s*;\s*$', line):
02863     error(filename, linenum, 'whitespace/semicolon', 5,
02864           'Semicolon defining empty statement. Use {} instead.')
02865   elif Search(r'^\s*;\s*$', line):
02866     error(filename, linenum, 'whitespace/semicolon', 5,
02867           'Line contains only semicolon. If this should be an empty statement, '
02868           'use {} instead.')
02869   elif (Search(r'\s+;\s*$', line) and
02870         not Search(r'\bfor\b', line)):
02871     error(filename, linenum, 'whitespace/semicolon', 5,
02872           'Extra space before last semicolon. If this should be an empty '
02873           'statement, use {} instead.')
02874 
02875   # In range-based for, we wanted spaces before and after the colon, but
02876   # not around "::" tokens that might appear.
02877   if (Search('for *\(.*[^:]:[^: ]', line) or
02878       Search('for *\(.*[^: ]:[^:]', line)):
02879     error(filename, linenum, 'whitespace/forcolon', 2,
02880           'Missing space around colon in range-based for loop')
02881 
02882 
02883 def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
02884   """Checks for additional blank line issues related to sections.
02885 
02886   Currently the only thing checked here is blank line before protected/private.
02887 
02888   Args:
02889     filename: The name of the current file.
02890     clean_lines: A CleansedLines instance containing the file.
02891     class_info: A _ClassInfo objects.
02892     linenum: The number of the line to check.
02893     error: The function to call with any errors found.
02894   """
02895   # Skip checks if the class is small, where small means 25 lines or less.
02896   # 25 lines seems like a good cutoff since that's the usual height of
02897   # terminals, and any class that can't fit in one screen can't really
02898   # be considered "small".
02899   #
02900   # Also skip checks if we are on the first line.  This accounts for
02901   # classes that look like
02902   #   class Foo { public: ... };
02903   #
02904   # If we didn't find the end of the class, last_line would be zero,
02905   # and the check will be skipped by the first condition.
02906   if (class_info.last_line - class_info.starting_linenum <= 24 or
02907       linenum <= class_info.starting_linenum):
02908     return
02909 
02910   matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
02911   if matched:
02912     # Issue warning if the line before public/protected/private was
02913     # not a blank line, but don't do this if the previous line contains
02914     # "class" or "struct".  This can happen two ways:
02915     #  - We are at the beginning of the class.
02916     #  - We are forward-declaring an inner class that is semantically
02917     #    private, but needed to be public for implementation reasons.
02918     # Also ignores cases where the previous line ends with a backslash as can be
02919     # common when defining classes in C macros.
02920     prev_line = clean_lines.lines[linenum - 1]
02921     if (not IsBlankLine(prev_line) and
02922         not Search(r'\b(class|struct)\b', prev_line) and
02923         not Search(r'\\$', prev_line)):
02924       # Try a bit harder to find the beginning of the class.  This is to
02925       # account for multi-line base-specifier lists, e.g.:
02926       #   class Derived
02927       #       : public Base {
02928       end_class_head = class_info.starting_linenum
02929       for i in range(class_info.starting_linenum, linenum):
02930         if Search(r'\{\s*$', clean_lines.lines[i]):
02931           end_class_head = i
02932           break
02933       if end_class_head < linenum - 1:
02934         error(filename, linenum, 'whitespace/blank_line', 3,
02935               '"%s:" should be preceded by a blank line' % matched.group(1))
02936 
02937 
02938 def GetPreviousNonBlankLine(clean_lines, linenum):
02939   """Return the most recent non-blank line and its line number.
02940 
02941   Args:
02942     clean_lines: A CleansedLines instance containing the file contents.
02943     linenum: The number of the line to check.
02944 
02945   Returns:
02946     A tuple with two elements.  The first element is the contents of the last
02947     non-blank line before the current line, or the empty string if this is the
02948     first non-blank line.  The second is the line number of that line, or -1
02949     if this is the first non-blank line.
02950   """
02951 
02952   prevlinenum = linenum - 1
02953   while prevlinenum >= 0:
02954     prevline = clean_lines.elided[prevlinenum]
02955     if not IsBlankLine(prevline):     # if not a blank line...
02956       return (prevline, prevlinenum)
02957     prevlinenum -= 1
02958   return ('', -1)
02959 
02960 
02961 def CheckBraces(filename, clean_lines, linenum, error):
02962   """Looks for misplaced braces (e.g. at the end of line).
02963 
02964   Args:
02965     filename: The name of the current file.
02966     clean_lines: A CleansedLines instance containing the file.
02967     linenum: The number of the line to check.
02968     error: The function to call with any errors found.
02969   """
02970 
02971   line = clean_lines.elided[linenum]        # get rid of comments and strings
02972 
02973   if Match(r'\s*{\s*$', line):
02974     # We allow an open brace to start a line in the case where someone is using
02975     # braces in a block to explicitly create a new scope, which is commonly used
02976     # to control the lifetime of stack-allocated variables.  Braces are also
02977     # used for brace initializers inside function calls.  We don't detect this
02978     # perfectly: we just don't complain if the last non-whitespace character on
02979     # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
02980     # previous line starts a preprocessor block.
02981     prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
02982     if (not Search(r'[,;:}{(]\s*$', prevline) and
02983         not Match(r'\s*#', prevline)):
02984       error(filename, linenum, 'whitespace/braces', 4,
02985             '{ should almost always be at the end of the previous line')
02986 
02987   # An else clause should be on the same line as the preceding closing brace.
02988   if Match(r'\s*else\s*', line):
02989     prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
02990     if Match(r'\s*}\s*$', prevline):
02991       error(filename, linenum, 'whitespace/newline', 4,
02992             'An else should appear on the same line as the preceding }')
02993 
02994   # If braces come on one side of an else, they should be on both.
02995   # However, we have to worry about "else if" that spans multiple lines!
02996   if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
02997     if Search(r'}\s*else if([^{]*)$', line):       # could be multi-line if
02998       # find the ( after the if
02999       pos = line.find('else if')
03000       pos = line.find('(', pos)
03001       if pos > 0:
03002         (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
03003         if endline[endpos:].find('{') == -1:    # must be brace after if
03004           error(filename, linenum, 'readability/braces', 5,
03005                 'If an else has a brace on one side, it should have it on both')
03006     else:            # common case: else not followed by a multi-line if
03007       error(filename, linenum, 'readability/braces', 5,
03008             'If an else has a brace on one side, it should have it on both')
03009 
03010   # Likewise, an else should never have the else clause on the same line
03011   if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
03012     error(filename, linenum, 'whitespace/newline', 4,
03013           'Else clause should never be on same line as else (use 2 lines)')
03014 
03015   # In the same way, a do/while should never be on one line
03016   if Match(r'\s*do [^\s{]', line):
03017     error(filename, linenum, 'whitespace/newline', 4,
03018           'do/while clauses should not be on a single line')
03019 
03020   # Block bodies should not be followed by a semicolon.  Due to C++11
03021   # brace initialization, there are more places where semicolons are
03022   # required than not, so we use a whitelist approach to check these
03023   # rather than a blacklist.  These are the places where "};" should
03024   # be replaced by just "}":
03025   # 1. Some flavor of block following closing parenthesis:
03026   #    for (;;) {};
03027   #    while (...) {};
03028   #    switch (...) {};
03029   #    Function(...) {};
03030   #    if (...) {};
03031   #    if (...) else if (...) {};
03032   #
03033   # 2. else block:
03034   #    if (...) else {};
03035   #
03036   # 3. const member function:
03037   #    Function(...) const {};
03038   #
03039   # 4. Block following some statement:
03040   #    x = 42;
03041   #    {};
03042   #
03043   # 5. Block at the beginning of a function:
03044   #    Function(...) {
03045   #      {};
03046   #    }
03047   #
03048   #    Note that naively checking for the preceding "{" will also match
03049   #    braces inside multi-dimensional arrays, but this is fine since
03050   #    that expression will not contain semicolons.
03051   #
03052   # 6. Block following another block:
03053   #    while (true) {}
03054   #    {};
03055   #
03056   # 7. End of namespaces:
03057   #    namespace {};
03058   #
03059   #    These semicolons seems far more common than other kinds of
03060   #    redundant semicolons, possibly due to people converting classes
03061   #    to namespaces.  For now we do not warn for this case.
03062   #
03063   # Try matching case 1 first.
03064   match = Match(r'^(.*\)\s*)\{', line)
03065   if match:
03066     # Matched closing parenthesis (case 1).  Check the token before the
03067     # matching opening parenthesis, and don't warn if it looks like a
03068     # macro.  This avoids these false positives:
03069     #  - macro that defines a base class
03070     #  - multi-line macro that defines a base class
03071     #  - macro that defines the whole class-head
03072     #
03073     # But we still issue warnings for macros that we know are safe to
03074     # warn, specifically:
03075     #  - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
03076     #  - TYPED_TEST
03077     #  - INTERFACE_DEF
03078     #  - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
03079     #
03080     # We implement a whitelist of safe macros instead of a blacklist of
03081     # unsafe macros, even though the latter appears less frequently in
03082     # google code and would have been easier to implement.  This is because
03083     # the downside for getting the whitelist wrong means some extra
03084     # semicolons, while the downside for getting the blacklist wrong
03085     # would result in compile errors.
03086     #
03087     # In addition to macros, we also don't want to warn on compound
03088     # literals.
03089     closing_brace_pos = match.group(1).rfind(')')
03090     opening_parenthesis = ReverseCloseExpression(
03091         clean_lines, linenum, closing_brace_pos)
03092     if opening_parenthesis[2] > -1:
03093       line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
03094       macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
03095       if ((macro and
03096            macro.group(1) not in (
03097                'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
03098                'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
03099                'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
03100           Search(r'\s+=\s*$', line_prefix)):
03101         match = None
03102 
03103   else:
03104     # Try matching cases 2-3.
03105     match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
03106     if not match:
03107       # Try matching cases 4-6.  These are always matched on separate lines.
03108       #
03109       # Note that we can't simply concatenate the previous line to the
03110       # current line and do a single match, otherwise we may output
03111       # duplicate warnings for the blank line case:
03112       #   if (cond) {
03113       #     // blank line
03114       #   }
03115       prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
03116       if prevline and Search(r'[;{}]\s*$', prevline):
03117         match = Match(r'^(\s*)\{', line)
03118 
03119   # Check matching closing brace
03120   if match:
03121     (endline, endlinenum, endpos) = CloseExpression(
03122         clean_lines, linenum, len(match.group(1)))
03123     if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
03124       # Current {} pair is eligible for semicolon check, and we have found
03125       # the redundant semicolon, output warning here.
03126       #
03127       # Note: because we are scanning forward for opening braces, and
03128       # outputting warnings for the matching closing brace, if there are
03129       # nested blocks with trailing semicolons, we will get the error
03130       # messages in reversed order.
03131       error(filename, endlinenum, 'readability/braces', 4,
03132             "You don't need a ; after a }")
03133 
03134 
03135 def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
03136   """Look for empty loop/conditional body with only a single semicolon.
03137 
03138   Args:
03139     filename: The name of the current file.
03140     clean_lines: A CleansedLines instance containing the file.
03141     linenum: The number of the line to check.
03142     error: The function to call with any errors found.
03143   """
03144 
03145   # Search for loop keywords at the beginning of the line.  Because only
03146   # whitespaces are allowed before the keywords, this will also ignore most
03147   # do-while-loops, since those lines should start with closing brace.
03148   #
03149   # We also check "if" blocks here, since an empty conditional block
03150   # is likely an error.
03151   line = clean_lines.elided[linenum]
03152   matched = Match(r'\s*(for|while|if)\s*\(', line)
03153   if matched:
03154     # Find the end of the conditional expression
03155     (end_line, end_linenum, end_pos) = CloseExpression(
03156         clean_lines, linenum, line.find('('))
03157 
03158     # Output warning if what follows the condition expression is a semicolon.
03159     # No warning for all other cases, including whitespace or newline, since we
03160     # have a separate check for semicolons preceded by whitespace.
03161     if end_pos >= 0 and Match(r';', end_line[end_pos:]):
03162       if matched.group(1) == 'if':
03163         error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
03164               'Empty conditional bodies should use {}')
03165       else:
03166         error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
03167               'Empty loop bodies should use {} or continue')
03168 
03169 
03170 def CheckCheck(filename, clean_lines, linenum, error):
03171   """Checks the use of CHECK and EXPECT macros.
03172 
03173   Args:
03174     filename: The name of the current file.
03175     clean_lines: A CleansedLines instance containing the file.
03176     linenum: The number of the line to check.
03177     error: The function to call with any errors found.
03178   """
03179 
03180   # Decide the set of replacement macros that should be suggested
03181   lines = clean_lines.elided
03182   check_macro = None
03183   start_pos = -1
03184   for macro in _CHECK_MACROS:
03185     i = lines[linenum].find(macro)
03186     if i >= 0:
03187       check_macro = macro
03188 
03189       # Find opening parenthesis.  Do a regular expression match here
03190       # to make sure that we are matching the expected CHECK macro, as
03191       # opposed to some other macro that happens to contain the CHECK
03192       # substring.
03193       matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum])
03194       if not matched:
03195         continue
03196       start_pos = len(matched.group(1))
03197       break
03198   if not check_macro or start_pos < 0:
03199     # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
03200     return
03201 
03202   # Find end of the boolean expression by matching parentheses
03203   (last_line, end_line, end_pos) = CloseExpression(
03204       clean_lines, linenum, start_pos)
03205   if end_pos < 0:
03206     return
03207   if linenum == end_line:
03208     expression = lines[linenum][start_pos + 1:end_pos - 1]
03209   else:
03210     expression = lines[linenum][start_pos + 1:]
03211     for i in xrange(linenum + 1, end_line):
03212       expression += lines[i]
03213     expression += last_line[0:end_pos - 1]
03214 
03215   # Parse expression so that we can take parentheses into account.
03216   # This avoids false positives for inputs like "CHECK((a < 4) == b)",
03217   # which is not replaceable by CHECK_LE.
03218   lhs = ''
03219   rhs = ''
03220   operator = None
03221   while expression:
03222     matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
03223                     r'==|!=|>=|>|<=|<|\()(.*)$', expression)
03224     if matched:
03225       token = matched.group(1)
03226       if token == '(':
03227         # Parenthesized operand
03228         expression = matched.group(2)
03229         (end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')')
03230         if end < 0:
03231           return  # Unmatched parenthesis
03232         lhs += '(' + expression[0:end]
03233         expression = expression[end:]
03234       elif token in ('&&', '||'):
03235         # Logical and/or operators.  This means the expression
03236         # contains more than one term, for example:
03237         #   CHECK(42 < a && a < b);
03238         #
03239         # These are not replaceable with CHECK_LE, so bail out early.
03240         return
03241       elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
03242         # Non-relational operator
03243         lhs += token
03244         expression = matched.group(2)
03245       else:
03246         # Relational operator
03247         operator = token
03248         rhs = matched.group(2)
03249         break
03250     else:
03251       # Unparenthesized operand.  Instead of appending to lhs one character
03252       # at a time, we do another regular expression match to consume several
03253       # characters at once if possible.  Trivial benchmark shows that this
03254       # is more efficient when the operands are longer than a single
03255       # character, which is generally the case.
03256       matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
03257       if not matched:
03258         matched = Match(r'^(\s*\S)(.*)$', expression)
03259         if not matched:
03260           break
03261       lhs += matched.group(1)
03262       expression = matched.group(2)
03263 
03264   # Only apply checks if we got all parts of the boolean expression
03265   if not (lhs and operator and rhs):
03266     return
03267 
03268   # Check that rhs do not contain logical operators.  We already know
03269   # that lhs is fine since the loop above parses out && and ||.
03270   if rhs.find('&&') > -1 or rhs.find('||') > -1:
03271     return
03272 
03273   # At least one of the operands must be a constant literal.  This is
03274   # to avoid suggesting replacements for unprintable things like
03275   # CHECK(variable != iterator)
03276   #
03277   # The following pattern matches decimal, hex integers, strings, and
03278   # characters (in that order).
03279   lhs = lhs.strip()
03280   rhs = rhs.strip()
03281   match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
03282   if Match(match_constant, lhs) or Match(match_constant, rhs):
03283     # Note: since we know both lhs and rhs, we can provide a more
03284     # descriptive error message like:
03285     #   Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
03286     # Instead of:
03287     #   Consider using CHECK_EQ instead of CHECK(a == b)
03288     #
03289     # We are still keeping the less descriptive message because if lhs
03290     # or rhs gets long, the error message might become unreadable.
03291     error(filename, linenum, 'readability/check', 2,
03292           'Consider using %s instead of %s(a %s b)' % (
03293               _CHECK_REPLACEMENT[check_macro][operator],
03294               check_macro, operator))
03295 
03296 
03297 def CheckAltTokens(filename, clean_lines, linenum, error):
03298   """Check alternative keywords being used in boolean expressions.
03299 
03300   Args:
03301     filename: The name of the current file.
03302     clean_lines: A CleansedLines instance containing the file.
03303     linenum: The number of the line to check.
03304     error: The function to call with any errors found.
03305   """
03306   line = clean_lines.elided[linenum]
03307 
03308   # Avoid preprocessor lines
03309   if Match(r'^\s*#', line):
03310     return
03311 
03312   # Last ditch effort to avoid multi-line comments.  This will not help
03313   # if the comment started before the current line or ended after the
03314   # current line, but it catches most of the false positives.  At least,
03315   # it provides a way to workaround this warning for people who use
03316   # multi-line comments in preprocessor macros.
03317   #
03318   # TODO(unknown): remove this once cpplint has better support for
03319   # multi-line comments.
03320   if line.find('/*') >= 0 or line.find('*/') >= 0:
03321     return
03322 
03323   for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
03324     error(filename, linenum, 'readability/alt_tokens', 2,
03325           'Use operator %s instead of %s' % (
03326               _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
03327 
03328 
03329 def GetLineWidth(line):
03330   """Determines the width of the line in column positions.
03331 
03332   Args:
03333     line: A string, which may be a Unicode string.
03334 
03335   Returns:
03336     The width of the line in column positions, accounting for Unicode
03337     combining characters and wide characters.
03338   """
03339   if isinstance(line, unicode):
03340     width = 0
03341     for uc in unicodedata.normalize('NFC', line):
03342       if unicodedata.east_asian_width(uc) in ('W', 'F'):
03343         width += 2
03344       elif not unicodedata.combining(uc):
03345         width += 1
03346     return width
03347   else:
03348     return len(line)
03349 
03350 
03351 def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
03352                error):
03353   """Checks rules from the 'C++ style rules' section of cppguide.html.
03354 
03355   Most of these rules are hard to test (naming, comment style), but we
03356   do what we can.  In particular we check for 2-space indents, line lengths,
03357   tab usage, spaces inside code, etc.
03358 
03359   Args:
03360     filename: The name of the current file.
03361     clean_lines: A CleansedLines instance containing the file.
03362     linenum: The number of the line to check.
03363     file_extension: The extension (without the dot) of the filename.
03364     nesting_state: A _NestingState instance which maintains information about
03365                    the current stack of nested blocks being parsed.
03366     error: The function to call with any errors found.
03367   """
03368 
03369   # Don't use "elided" lines here, otherwise we can't check commented lines.
03370   # Don't want to use "raw" either, because we don't want to check inside C++11
03371   # raw strings,
03372   raw_lines = clean_lines.lines_without_raw_strings
03373   line = raw_lines[linenum]
03374 
03375   if line.find('\t') != -1:
03376     error(filename, linenum, 'whitespace/tab', 1,
03377           'Tab found; better to use spaces')
03378 
03379   # One or three blank spaces at the beginning of the line is weird; it's
03380   # hard to reconcile that with 2-space indents.
03381   # NOTE: here are the conditions rob pike used for his tests.  Mine aren't
03382   # as sophisticated, but it may be worth becoming so:  RLENGTH==initial_spaces
03383   # if(RLENGTH > 20) complain = 0;
03384   # if(match($0, " +(error|private|public|protected):")) complain = 0;
03385   # if(match(prev, "&& *$")) complain = 0;
03386   # if(match(prev, "\\|\\| *$")) complain = 0;
03387   # if(match(prev, "[\",=><] *$")) complain = 0;
03388   # if(match($0, " <<")) complain = 0;
03389   # if(match(prev, " +for \\(")) complain = 0;
03390   # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
03391   initial_spaces = 0
03392   cleansed_line = clean_lines.elided[linenum]
03393   while initial_spaces < len(line) and line[initial_spaces] == ' ':
03394     initial_spaces += 1
03395   if line and line[-1].isspace():
03396     error(filename, linenum, 'whitespace/end_of_line', 4,
03397           'Line ends in whitespace.  Consider deleting these extra spaces.')
03398   # There are certain situations we allow one space, notably for section labels
03399   elif ((initial_spaces == 1 or initial_spaces == 3) and
03400         not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
03401     error(filename, linenum, 'whitespace/indent', 3,
03402           'Weird number of spaces at line-start.  '
03403           'Are you using a 2-space indent?')
03404 
03405   # Check if the line is a header guard.
03406   is_header_guard = False
03407   if file_extension == 'h':
03408     cppvar = GetHeaderGuardCPPVariable(filename)
03409     if (line.startswith('#ifndef %s' % cppvar) or
03410         line.startswith('#define %s' % cppvar) or
03411         line.startswith('#endif  // %s' % cppvar)):
03412       is_header_guard = True
03413   # #include lines and header guards can be long, since there's no clean way to
03414   # split them.
03415   #
03416   # URLs can be long too.  It's possible to split these, but it makes them
03417   # harder to cut&paste.
03418   #
03419   # The "$Id:...$" comment may also get very long without it being the
03420   # developers fault.
03421   if (not line.startswith('#include') and not is_header_guard and
03422       not Match(r'^\s*//.*http(s?)://\S*$', line) and
03423       not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
03424     line_width = GetLineWidth(line)
03425     extended_length = int((_line_length * 1.25))
03426     if line_width > extended_length:
03427       error(filename, linenum, 'whitespace/line_length', 4,
03428             'Lines should very rarely be longer than %i characters' %
03429             extended_length)
03430     elif line_width > _line_length:
03431       error(filename, linenum, 'whitespace/line_length', 2,
03432             'Lines should be <= %i characters long' % _line_length)
03433 
03434   if (cleansed_line.count(';') > 1 and
03435       # for loops are allowed two ;'s (and may run over two lines).
03436       cleansed_line.find('for') == -1 and
03437       (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
03438        GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
03439       # It's ok to have many commands in a switch case that fits in 1 line
03440       not ((cleansed_line.find('case ') != -1 or
03441             cleansed_line.find('default:') != -1) and
03442            cleansed_line.find('break;') != -1)):
03443     error(filename, linenum, 'whitespace/newline', 0,
03444           'More than one command on the same line')
03445 
03446   # Some more style checks
03447   CheckBraces(filename, clean_lines, linenum, error)
03448   CheckEmptyBlockBody(filename, clean_lines, linenum, error)
03449   CheckAccess(filename, clean_lines, linenum, nesting_state, error)
03450   CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
03451   CheckCheck(filename, clean_lines, linenum, error)
03452   CheckAltTokens(filename, clean_lines, linenum, error)
03453   classinfo = nesting_state.InnermostClass()
03454   if classinfo:
03455     CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
03456 
03457 
03458 _RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
03459 _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
03460 # Matches the first component of a filename delimited by -s and _s. That is:
03461 #  _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
03462 #  _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
03463 #  _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
03464 #  _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
03465 _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
03466 
03467 
03468 def _DropCommonSuffixes(filename):
03469   """Drops common suffixes like _test.cc or -inl.h from filename.
03470 
03471   For example:
03472     >>> _DropCommonSuffixes('foo/foo-inl.h')
03473     'foo/foo'
03474     >>> _DropCommonSuffixes('foo/bar/foo.cc')
03475     'foo/bar/foo'
03476     >>> _DropCommonSuffixes('foo/foo_internal.h')
03477     'foo/foo'
03478     >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
03479     'foo/foo_unusualinternal'
03480 
03481   Args:
03482     filename: The input filename.
03483 
03484   Returns:
03485     The filename with the common suffix removed.
03486   """
03487   for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
03488                  'inl.h', 'impl.h', 'internal.h'):
03489     if (filename.endswith(suffix) and len(filename) > len(suffix) and
03490         filename[-len(suffix) - 1] in ('-', '_')):
03491       return filename[:-len(suffix) - 1]
03492   return os.path.splitext(filename)[0]
03493 
03494 
03495 def _IsTestFilename(filename):
03496   """Determines if the given filename has a suffix that identifies it as a test.
03497 
03498   Args:
03499     filename: The input filename.
03500 
03501   Returns:
03502     True if 'filename' looks like a test, False otherwise.
03503   """
03504   if (filename.endswith('_test.cc') or
03505       filename.endswith('_unittest.cc') or
03506       filename.endswith('_regtest.cc')):
03507     return True
03508   else:
03509     return False
03510 
03511 
03512 def _ClassifyInclude(fileinfo, include, is_system):
03513   """Figures out what kind of header 'include' is.
03514 
03515   Args:
03516     fileinfo: The current file cpplint is running over. A FileInfo instance.
03517     include: The path to a #included file.
03518     is_system: True if the #include used <> rather than "".
03519 
03520   Returns:
03521     One of the _XXX_HEADER constants.
03522 
03523   For example:
03524     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
03525     _C_SYS_HEADER
03526     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
03527     _CPP_SYS_HEADER
03528     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
03529     _LIKELY_MY_HEADER
03530     >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
03531     ...                  'bar/foo_other_ext.h', False)
03532     _POSSIBLE_MY_HEADER
03533     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
03534     _OTHER_HEADER
03535   """
03536   # This is a list of all standard c++ header files, except
03537   # those already checked for above.
03538   is_cpp_h = include in _CPP_HEADERS
03539 
03540   if is_system:
03541     if is_cpp_h:
03542       return _CPP_SYS_HEADER
03543     else:
03544       return _C_SYS_HEADER
03545 
03546   # If the target file and the include we're checking share a
03547   # basename when we drop common extensions, and the include
03548   # lives in . , then it's likely to be owned by the target file.
03549   target_dir, target_base = (
03550       os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
03551   include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
03552   if target_base == include_base and (
03553       include_dir == target_dir or
03554       include_dir == os.path.normpath(target_dir + '/../public')):
03555     return _LIKELY_MY_HEADER
03556 
03557   # If the target and include share some initial basename
03558   # component, it's possible the target is implementing the
03559   # include, so it's allowed to be first, but we'll never
03560   # complain if it's not there.
03561   target_first_component = _RE_FIRST_COMPONENT.match(target_base)
03562   include_first_component = _RE_FIRST_COMPONENT.match(include_base)
03563   if (target_first_component and include_first_component and
03564       target_first_component.group(0) ==
03565       include_first_component.group(0)):
03566     return _POSSIBLE_MY_HEADER
03567 
03568   return _OTHER_HEADER
03569 
03570 
03571 
03572 def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
03573   """Check rules that are applicable to #include lines.
03574 
03575   Strings on #include lines are NOT removed from elided line, to make
03576   certain tasks easier. However, to prevent false positives, checks
03577   applicable to #include lines in CheckLanguage must be put here.
03578 
03579   Args:
03580     filename: The name of the current file.
03581     clean_lines: A CleansedLines instance containing the file.
03582     linenum: The number of the line to check.
03583     include_state: An _IncludeState instance in which the headers are inserted.
03584     error: The function to call with any errors found.
03585   """
03586   fileinfo = FileInfo(filename)
03587 
03588   line = clean_lines.lines[linenum]
03589 
03590   # "include" should use the new style "foo/bar.h" instead of just "bar.h"
03591   if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
03592     error(filename, linenum, 'build/include', 4,
03593           'Include the directory when naming .h files')
03594 
03595   # we shouldn't include a file more than once. actually, there are a
03596   # handful of instances where doing so is okay, but in general it's
03597   # not.
03598   match = _RE_PATTERN_INCLUDE.search(line)
03599   if match:
03600     include = match.group(2)
03601     is_system = (match.group(1) == '<')
03602     if include in include_state:
03603       error(filename, linenum, 'build/include', 4,
03604             '"%s" already included at %s:%s' %
03605             (include, filename, include_state[include]))
03606     else:
03607       include_state[include] = linenum
03608 
03609       # We want to ensure that headers appear in the right order:
03610       # 1) for foo.cc, foo.h  (preferred location)
03611       # 2) c system files
03612       # 3) cpp system files
03613       # 4) for foo.cc, foo.h  (deprecated location)
03614       # 5) other google headers
03615       #
03616       # We classify each include statement as one of those 5 types
03617       # using a number of techniques. The include_state object keeps
03618       # track of the highest type seen, and complains if we see a
03619       # lower type after that.
03620       error_message = include_state.CheckNextIncludeOrder(
03621           _ClassifyInclude(fileinfo, include, is_system))
03622       if error_message:
03623         error(filename, linenum, 'build/include_order', 4,
03624               '%s. Should be: %s.h, c system, c++ system, other.' %
03625               (error_message, fileinfo.BaseName()))
03626       canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
03627       if not include_state.IsInAlphabeticalOrder(
03628           clean_lines, linenum, canonical_include):
03629         error(filename, linenum, 'build/include_alpha', 4,
03630               'Include "%s" not in alphabetical order' % include)
03631       include_state.SetLastHeader(canonical_include)
03632 
03633   # Look for any of the stream classes that are part of standard C++.
03634   match = _RE_PATTERN_INCLUDE.match(line)
03635   if match:
03636     include = match.group(2)
03637     if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
03638       # Many unit tests use cout, so we exempt them.
03639       if not _IsTestFilename(filename):
03640         error(filename, linenum, 'readability/streams', 3,
03641               'Streams are highly discouraged.')
03642 
03643 
03644 def _GetTextInside(text, start_pattern):
03645   r"""Retrieves all the text between matching open and close parentheses.
03646 
03647   Given a string of lines and a regular expression string, retrieve all the text
03648   following the expression and between opening punctuation symbols like
03649   (, [, or {, and the matching close-punctuation symbol. This properly nested
03650   occurrences of the punctuations, so for the text like
03651     printf(a(), b(c()));
03652   a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
03653   start_pattern must match string having an open punctuation symbol at the end.
03654 
03655   Args:
03656     text: The lines to extract text. Its comments and strings must be elided.
03657            It can be single line and can span multiple lines.
03658     start_pattern: The regexp string indicating where to start extracting
03659                    the text.
03660   Returns:
03661     The extracted text.
03662     None if either the opening string or ending punctuation could not be found.
03663   """
03664   # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
03665   # rewritten to use _GetTextInside (and use inferior regexp matching today).
03666 
03667   # Give opening punctuations to get the matching close-punctuations.
03668   matching_punctuation = {'(': ')', '{': '}', '[': ']'}
03669   closing_punctuation = set(matching_punctuation.itervalues())
03670 
03671   # Find the position to start extracting text.
03672   match = re.search(start_pattern, text, re.M)
03673   if not match:  # start_pattern not found in text.
03674     return None
03675   start_position = match.end(0)
03676 
03677   assert start_position > 0, (
03678       'start_pattern must ends with an opening punctuation.')
03679   assert text[start_position - 1] in matching_punctuation, (
03680       'start_pattern must ends with an opening punctuation.')
03681   # Stack of closing punctuations we expect to have in text after position.
03682   punctuation_stack = [matching_punctuation[text[start_position - 1]]]
03683   position = start_position
03684   while punctuation_stack and position < len(text):
03685     if text[position] == punctuation_stack[-1]:
03686       punctuation_stack.pop()
03687     elif text[position] in closing_punctuation:
03688       # A closing punctuation without matching opening punctuations.
03689       return None
03690     elif text[position] in matching_punctuation:
03691       punctuation_stack.append(matching_punctuation[text[position]])
03692     position += 1
03693   if punctuation_stack:
03694     # Opening punctuations left without matching close-punctuations.
03695     return None
03696   # punctuations match.
03697   return text[start_position:position - 1]
03698 
03699 
03700 # Patterns for matching call-by-reference parameters.
03701 #
03702 # Supports nested templates up to 2 levels deep using this messy pattern:
03703 #   < (?: < (?: < [^<>]*
03704 #               >
03705 #           |   [^<>] )*
03706 #         >
03707 #     |   [^<>] )*
03708 #   >
03709 _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*'  # =~ [[:alpha:]][[:alnum:]]*
03710 _RE_PATTERN_TYPE = (
03711     r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
03712     r'(?:\w|'
03713     r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
03714     r'::)+')
03715 # A call-by-reference parameter ends with '& identifier'.
03716 _RE_PATTERN_REF_PARAM = re.compile(
03717     r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
03718     r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
03719 # A call-by-const-reference parameter either ends with 'const& identifier'
03720 # or looks like 'const type& identifier' when 'type' is atomic.
03721 _RE_PATTERN_CONST_REF_PARAM = (
03722     r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
03723     r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
03724 
03725 
03726 def CheckLanguage(filename, clean_lines, linenum, file_extension,
03727                   include_state, nesting_state, error):
03728   """Checks rules from the 'C++ language rules' section of cppguide.html.
03729 
03730   Some of these rules are hard to test (function overloading, using
03731   uint32 inappropriately), but we do the best we can.
03732 
03733   Args:
03734     filename: The name of the current file.
03735     clean_lines: A CleansedLines instance containing the file.
03736     linenum: The number of the line to check.
03737     file_extension: The extension (without the dot) of the filename.
03738     include_state: An _IncludeState instance in which the headers are inserted.
03739     nesting_state: A _NestingState instance which maintains information about
03740                    the current stack of nested blocks being parsed.
03741     error: The function to call with any errors found.
03742   """
03743   # If the line is empty or consists of entirely a comment, no need to
03744   # check it.
03745   line = clean_lines.elided[linenum]
03746   if not line:
03747     return
03748 
03749   match = _RE_PATTERN_INCLUDE.search(line)
03750   if match:
03751     CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
03752     return
03753 
03754   # Reset include state across preprocessor directives.  This is meant
03755   # to silence warnings for conditional includes.
03756   if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
03757     include_state.ResetSection()
03758 
03759   # Make Windows paths like Unix.
03760   fullname = os.path.abspath(filename).replace('\\', '/')
03761 
03762   # TODO(unknown): figure out if they're using default arguments in fn proto.
03763 
03764   # Check to see if they're using an conversion function cast.
03765   # I just try to capture the most common basic types, though there are more.
03766   # Parameterless conversion functions, such as bool(), are allowed as they are
03767   # probably a member operator declaration or default constructor.
03768   match = Search(
03769       r'(\bnew\s+)?\b'  # Grab 'new' operator, if it's there
03770       r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
03771       r'(\([^)].*)', line)
03772   if match:
03773     matched_new = match.group(1)
03774     matched_type = match.group(2)
03775     matched_funcptr = match.group(3)
03776 
03777     # gMock methods are defined using some variant of MOCK_METHODx(name, type)
03778     # where type may be float(), int(string), etc.  Without context they are
03779     # virtually indistinguishable from int(x) casts. Likewise, gMock's
03780     # MockCallback takes a template parameter of the form return_type(arg_type),
03781     # which looks much like the cast we're trying to detect.
03782     #
03783     # std::function<> wrapper has a similar problem.
03784     #
03785     # Return types for function pointers also look like casts if they
03786     # don't have an extra space.
03787     if (matched_new is None and  # If new operator, then this isn't a cast
03788         not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
03789              Search(r'\bMockCallback<.*>', line) or
03790              Search(r'\bstd::function<.*>', line)) and
03791         not (matched_funcptr and
03792              Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
03793                    matched_funcptr))):
03794       # Try a bit harder to catch gmock lines: the only place where
03795       # something looks like an old-style cast is where we declare the
03796       # return type of the mocked method, and the only time when we
03797       # are missing context is if MOCK_METHOD was split across
03798       # multiple lines.  The missing MOCK_METHOD is usually one or two
03799       # lines back, so scan back one or two lines.
03800       #
03801       # It's not possible for gmock macros to appear in the first 2
03802       # lines, since the class head + section name takes up 2 lines.
03803       if (linenum < 2 or
03804           not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
03805                      clean_lines.elided[linenum - 1]) or
03806                Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
03807                      clean_lines.elided[linenum - 2]))):
03808         error(filename, linenum, 'readability/casting', 4,
03809               'Using deprecated casting style.  '
03810               'Use static_cast<%s>(...) instead' %
03811               matched_type)
03812 
03813   CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
03814                   'static_cast',
03815                   r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
03816 
03817   # This doesn't catch all cases. Consider (const char * const)"hello".
03818   #
03819   # (char *) "foo" should always be a const_cast (reinterpret_cast won't
03820   # compile).
03821   if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
03822                      'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
03823     pass
03824   else:
03825     # Check pointer casts for other than string constants
03826     CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
03827                     'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
03828 
03829   # In addition, we look for people taking the address of a cast.  This
03830   # is dangerous -- casts can assign to temporaries, so the pointer doesn't
03831   # point where you think.
03832   match = Search(
03833       r'(?:&\(([^)]+)\)[\w(])|'
03834       r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line)
03835   if match and match.group(1) != '*':
03836     error(filename, linenum, 'runtime/casting', 4,
03837           ('Are you taking an address of a cast?  '
03838            'This is dangerous: could be a temp var.  '
03839            'Take the address before doing the cast, rather than after'))
03840 
03841   # Create an extended_line, which is the concatenation of the current and
03842   # next lines, for more effective checking of code that may span more than one
03843   # line.
03844   if linenum + 1 < clean_lines.NumLines():
03845     extended_line = line + clean_lines.elided[linenum + 1]
03846   else:
03847     extended_line = line
03848 
03849   # Check for people declaring static/global STL strings at the top level.
03850   # This is dangerous because the C++ language does not guarantee that
03851   # globals with constructors are initialized before the first access.
03852   match = Match(
03853       r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
03854       line)
03855   # Make sure it's not a function.
03856   # Function template specialization looks like: "string foo<Type>(...".
03857   # Class template definitions look like: "string Foo<Type>::Method(...".
03858   #
03859   # Also ignore things that look like operators.  These are matched separately
03860   # because operator names cross non-word boundaries.  If we change the pattern
03861   # above, we would decrease the accuracy of matching identifiers.
03862   if (match and
03863       not Search(r'\boperator\W', line) and
03864       not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))):
03865     error(filename, linenum, 'runtime/string', 4,
03866           'For a static/global string constant, use a C style string instead: '
03867           '"%schar %s[]".' %
03868           (match.group(1), match.group(2)))
03869 
03870   if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
03871     error(filename, linenum, 'runtime/init', 4,
03872           'You seem to be initializing a member variable with itself.')
03873 
03874   if file_extension == 'h':
03875     # TODO(unknown): check that 1-arg constructors are explicit.
03876     #                How to tell it's a constructor?
03877     #                (handled in CheckForNonStandardConstructs for now)
03878     # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
03879     #                (level 1 error)
03880     pass
03881 
03882   # Check if people are using the verboten C basic types.  The only exception
03883   # we regularly allow is "unsigned short port" for port.
03884   if Search(r'\bshort port\b', line):
03885     if not Search(r'\bunsigned short port\b', line):
03886       error(filename, linenum, 'runtime/int', 4,
03887             'Use "unsigned short" for ports, not "short"')
03888   else:
03889     match = Search(r'\b(short|long(?! +double)|long long)\b', line)
03890     if match:
03891       error(filename, linenum, 'runtime/int', 4,
03892             'Use int16/int64/etc, rather than the C type %s' % match.group(1))
03893 
03894   # When snprintf is used, the second argument shouldn't be a literal.
03895   match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
03896   if match and match.group(2) != '0':
03897     # If 2nd arg is zero, snprintf is used to calculate size.
03898     error(filename, linenum, 'runtime/printf', 3,
03899           'If you can, use sizeof(%s) instead of %s as the 2nd arg '
03900           'to snprintf.' % (match.group(1), match.group(2)))
03901 
03902   # Check if some verboten C functions are being used.
03903   if Search(r'\bsprintf\b', line):
03904     error(filename, linenum, 'runtime/printf', 5,
03905           'Never use sprintf.  Use snprintf instead.')
03906   match = Search(r'\b(strcpy|strcat)\b', line)
03907   if match:
03908     error(filename, linenum, 'runtime/printf', 4,
03909           'Almost always, snprintf is better than %s' % match.group(1))
03910 
03911   # Check if some verboten operator overloading is going on
03912   # TODO(unknown): catch out-of-line unary operator&:
03913   #   class X {};
03914   #   int operator&(const X& x) { return 42; }  // unary operator&
03915   # The trick is it's hard to tell apart from binary operator&:
03916   #   class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
03917   if Search(r'\boperator\s*&\s*\(\s*\)', line):
03918     error(filename, linenum, 'runtime/operator', 4,
03919           'Unary operator& is dangerous.  Do not use it.')
03920 
03921   # Check for suspicious usage of "if" like
03922   # } if (a == b) {
03923   if Search(r'\}\s*if\s*\(', line):
03924     error(filename, linenum, 'readability/braces', 4,
03925           'Did you mean "else if"? If not, start a new line for "if".')
03926 
03927   # Check for potential format string bugs like printf(foo).
03928   # We constrain the pattern not to pick things like DocidForPrintf(foo).
03929   # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
03930   # TODO(sugawarayu): Catch the following case. Need to change the calling
03931   # convention of the whole function to process multiple line to handle it.
03932   #   printf(
03933   #       boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
03934   printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
03935   if printf_args:
03936     match = Match(r'([\w.\->()]+)$', printf_args)
03937     if match and match.group(1) != '__VA_ARGS__':
03938       function_name = re.search(r'\b((?:string)?printf)\s*\(',
03939                                 line, re.I).group(1)
03940       error(filename, linenum, 'runtime/printf', 4,
03941             'Potential format string bug. Do %s("%%s", %s) instead.'
03942             % (function_name, match.group(1)))
03943 
03944   # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
03945   match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
03946   if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
03947     error(filename, linenum, 'runtime/memset', 4,
03948           'Did you mean "memset(%s, 0, %s)"?'
03949           % (match.group(1), match.group(2)))
03950 
03951   if Search(r'\busing namespace\b', line):
03952     error(filename, linenum, 'build/namespaces', 5,
03953           'Do not use namespace using-directives.  '
03954           'Use using-declarations instead.')
03955 
03956   # Detect variable-length arrays.
03957   match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
03958   if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
03959       match.group(3).find(']') == -1):
03960     # Split the size using space and arithmetic operators as delimiters.
03961     # If any of the resulting tokens are not compile time constants then
03962     # report the error.
03963     tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
03964     is_const = True
03965     skip_next = False
03966     for tok in tokens:
03967       if skip_next:
03968         skip_next = False
03969         continue
03970 
03971       if Search(r'sizeof\(.+\)', tok): continue
03972       if Search(r'arraysize\(\w+\)', tok): continue
03973 
03974       tok = tok.lstrip('(')
03975       tok = tok.rstrip(')')
03976       if not tok: continue
03977       if Match(r'\d+', tok): continue
03978       if Match(r'0[xX][0-9a-fA-F]+', tok): continue
03979       if Match(r'k[A-Z0-9]\w*', tok): continue
03980       if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
03981       if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
03982       # A catch all for tricky sizeof cases, including 'sizeof expression',
03983       # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
03984       # requires skipping the next token because we split on ' ' and '*'.
03985       if tok.startswith('sizeof'):
03986         skip_next = True
03987         continue
03988       is_const = False
03989       break
03990     if not is_const:
03991       error(filename, linenum, 'runtime/arrays', 1,
03992             'Do not use variable-length arrays.  Use an appropriately named '
03993             "('k' followed by CamelCase) compile-time constant for the size.")
03994 
03995   # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
03996   # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
03997   # in the class declaration.
03998   match = Match(
03999       (r'\s*'
04000        r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
04001        r'\(.*\);$'),
04002       line)
04003   if match and linenum + 1 < clean_lines.NumLines():
04004     next_line = clean_lines.elided[linenum + 1]
04005     # We allow some, but not all, declarations of variables to be present
04006     # in the statement that defines the class.  The [\w\*,\s]* fragment of
04007     # the regular expression below allows users to declare instances of
04008     # the class or pointers to instances, but not less common types such
04009     # as function pointers or arrays.  It's a tradeoff between allowing
04010     # reasonable code and avoiding trying to parse more C++ using regexps.
04011     if not Search(r'^\s*}[\w\*,\s]*;', next_line):
04012       error(filename, linenum, 'readability/constructors', 3,
04013             match.group(1) + ' should be the last thing in the class')
04014 
04015   # Check for use of unnamed namespaces in header files.  Registration
04016   # macros are typically OK, so we allow use of "namespace {" on lines
04017   # that end with backslashes.
04018   if (file_extension == 'h'
04019       and Search(r'\bnamespace\s*{', line)
04020       and line[-1] != '\\'):
04021     error(filename, linenum, 'build/namespaces', 4,
04022           'Do not use unnamed namespaces in header files.  See '
04023           'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
04024           ' for more information.')
04025 
04026 def CheckForNonConstReference(filename, clean_lines, linenum,
04027                               nesting_state, error):
04028   """Check for non-const references.
04029 
04030   Separate from CheckLanguage since it scans backwards from current
04031   line, instead of scanning forward.
04032 
04033   Args:
04034     filename: The name of the current file.
04035     clean_lines: A CleansedLines instance containing the file.
04036     linenum: The number of the line to check.
04037     nesting_state: A _NestingState instance which maintains information about
04038                    the current stack of nested blocks being parsed.
04039     error: The function to call with any errors found.
04040   """
04041   # Do nothing if there is no '&' on current line.
04042   line = clean_lines.elided[linenum]
04043   if '&' not in line:
04044     return
04045 
04046   # Long type names may be broken across multiple lines, usually in one
04047   # of these forms:
04048   #   LongType
04049   #       ::LongTypeContinued &identifier
04050   #   LongType::
04051   #       LongTypeContinued &identifier
04052   #   LongType<
04053   #       ...>::LongTypeContinued &identifier
04054   #
04055   # If we detected a type split across two lines, join the previous
04056   # line to current line so that we can match const references
04057   # accordingly.
04058   #
04059   # Note that this only scans back one line, since scanning back
04060   # arbitrary number of lines would be expensive.  If you have a type
04061   # that spans more than 2 lines, please use a typedef.
04062   if linenum > 1:
04063     previous = None
04064     if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
04065       # previous_line\n + ::current_line
04066       previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
04067                         clean_lines.elided[linenum - 1])
04068     elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
04069       # previous_line::\n + current_line
04070       previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
04071                         clean_lines.elided[linenum - 1])
04072     if previous:
04073       line = previous.group(1) + line.lstrip()
04074     else:
04075       # Check for templated parameter that is split across multiple lines
04076       endpos = line.rfind('>')
04077       if endpos > -1:
04078         (_, startline, startpos) = ReverseCloseExpression(
04079             clean_lines, linenum, endpos)
04080         if startpos > -1 and startline < linenum:
04081           # Found the matching < on an earlier line, collect all
04082           # pieces up to current line.
04083           line = ''
04084           for i in xrange(startline, linenum + 1):
04085             line += clean_lines.elided[i].strip()
04086 
04087   # Check for non-const references in function parameters.  A single '&' may
04088   # found in the following places:
04089   #   inside expression: binary & for bitwise AND
04090   #   inside expression: unary & for taking the address of something
04091   #   inside declarators: reference parameter
04092   # We will exclude the first two cases by checking that we are not inside a
04093   # function body, including one that was just introduced by a trailing '{'.
04094   # TODO(unknwon): Doesn't account for preprocessor directives.
04095   # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
04096   check_params = False
04097   if not nesting_state.stack:
04098     check_params = True  # top level
04099   elif (isinstance(nesting_state.stack[-1], _ClassInfo) or
04100         isinstance(nesting_state.stack[-1], _NamespaceInfo)):
04101     check_params = True  # within class or namespace
04102   elif Match(r'.*{\s*$', line):
04103     if (len(nesting_state.stack) == 1 or
04104         isinstance(nesting_state.stack[-2], _ClassInfo) or
04105         isinstance(nesting_state.stack[-2], _NamespaceInfo)):
04106       check_params = True  # just opened global/class/namespace block
04107   # We allow non-const references in a few standard places, like functions
04108   # called "swap()" or iostream operators like "<<" or ">>".  Do not check
04109   # those function parameters.
04110   #
04111   # We also accept & in static_assert, which looks like a function but
04112   # it's actually a declaration expression.
04113   whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
04114                            r'operator\s*[<>][<>]|'
04115                            r'static_assert|COMPILE_ASSERT'
04116                            r')\s*\(')
04117   if Search(whitelisted_functions, line):
04118     check_params = False
04119   elif not Search(r'\S+\([^)]*$', line):
04120     # Don't see a whitelisted function on this line.  Actually we
04121     # didn't see any function name on this line, so this is likely a
04122     # multi-line parameter list.  Try a bit harder to catch this case.
04123     for i in xrange(2):
04124       if (linenum > i and
04125           Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
04126         check_params = False
04127         break
04128 
04129   if check_params:
04130     decls = ReplaceAll(r'{[^}]*}', ' ', line)  # exclude function body
04131     for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
04132       if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
04133         error(filename, linenum, 'runtime/references', 2,
04134               'Is this a non-const reference? '
04135               'If so, make const or use a pointer: ' +
04136               ReplaceAll(' *<', '<', parameter))
04137 
04138 
04139 def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
04140                     error):
04141   """Checks for a C-style cast by looking for the pattern.
04142 
04143   Args:
04144     filename: The name of the current file.
04145     linenum: The number of the line to check.
04146     line: The line of code to check.
04147     raw_line: The raw line of code to check, with comments.
04148     cast_type: The string for the C++ cast to recommend.  This is either
04149       reinterpret_cast, static_cast, or const_cast, depending.
04150     pattern: The regular expression used to find C-style casts.
04151     error: The function to call with any errors found.
04152 
04153   Returns:
04154     True if an error was emitted.
04155     False otherwise.
04156   """
04157   match = Search(pattern, line)
04158   if not match:
04159     return False
04160 
04161   # Exclude lines with sizeof, since sizeof looks like a cast.
04162   sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
04163   if sizeof_match:
04164     return False
04165 
04166   # operator++(int) and operator--(int)
04167   if (line[0:match.start(1) - 1].endswith(' operator++') or
04168       line[0:match.start(1) - 1].endswith(' operator--')):
04169     return False
04170 
04171   # A single unnamed argument for a function tends to look like old
04172   # style cast.  If we see those, don't issue warnings for deprecated
04173   # casts, instead issue warnings for unnamed arguments where
04174   # appropriate.
04175   #
04176   # These are things that we want warnings for, since the style guide
04177   # explicitly require all parameters to be named:
04178   #   Function(int);
04179   #   Function(int) {
04180   #   ConstMember(int) const;
04181   #   ConstMember(int) const {
04182   #   ExceptionMember(int) throw (...);
04183   #   ExceptionMember(int) throw (...) {
04184   #   PureVirtual(int) = 0;
04185   #
04186   # These are functions of some sort, where the compiler would be fine
04187   # if they had named parameters, but people often omit those
04188   # identifiers to reduce clutter:
04189   #   (FunctionPointer)(int);
04190   #   (FunctionPointer)(int) = value;
04191   #   Function((function_pointer_arg)(int))
04192   #   <TemplateArgument(int)>;
04193   #   <(FunctionPointerTemplateArgument)(int)>;
04194   remainder = line[match.end(0):]
04195   if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder):
04196     # Looks like an unnamed parameter.
04197 
04198     # Don't warn on any kind of template arguments.
04199     if Match(r'^\s*>', remainder):
04200       return False
04201 
04202     # Don't warn on assignments to function pointers, but keep warnings for
04203     # unnamed parameters to pure virtual functions.  Note that this pattern
04204     # will also pass on assignments of "0" to function pointers, but the
04205     # preferred values for those would be "nullptr" or "NULL".
04206     matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
04207     if matched_zero and matched_zero.group(1) != '0':
04208       return False
04209 
04210     # Don't warn on function pointer declarations.  For this we need
04211     # to check what came before the "(type)" string.
04212     if Match(r'.*\)\s*$', line[0:match.start(0)]):
04213       return False
04214 
04215     # Don't warn if the parameter is named with block comments, e.g.:
04216     #  Function(int /*unused_param*/);
04217     if '/*' in raw_line:
04218       return False
04219 
04220     # Passed all filters, issue warning here.
04221     error(filename, linenum, 'readability/function', 3,
04222           'All parameters should be named in a function')
04223     return True
04224 
04225   # At this point, all that should be left is actual casts.
04226   error(filename, linenum, 'readability/casting', 4,
04227         'Using C-style cast.  Use %s<%s>(...) instead' %
04228         (cast_type, match.group(1)))
04229 
04230   return True
04231 
04232 
04233 _HEADERS_CONTAINING_TEMPLATES = (
04234     ('<deque>', ('deque',)),
04235     ('<functional>', ('unary_function', 'binary_function',
04236                       'plus', 'minus', 'multiplies', 'divides', 'modulus',
04237                       'negate',
04238                       'equal_to', 'not_equal_to', 'greater', 'less',
04239                       'greater_equal', 'less_equal',
04240                       'logical_and', 'logical_or', 'logical_not',
04241                       'unary_negate', 'not1', 'binary_negate', 'not2',
04242                       'bind1st', 'bind2nd',
04243                       'pointer_to_unary_function',
04244                       'pointer_to_binary_function',
04245                       'ptr_fun',
04246                       'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
04247                       'mem_fun_ref_t',
04248                       'const_mem_fun_t', 'const_mem_fun1_t',
04249                       'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
04250                       'mem_fun_ref',
04251                      )),
04252     ('<limits>', ('numeric_limits',)),
04253     ('<list>', ('list',)),
04254     ('<map>', ('map', 'multimap',)),
04255     ('<memory>', ('allocator',)),
04256     ('<queue>', ('queue', 'priority_queue',)),
04257     ('<set>', ('set', 'multiset',)),
04258     ('<stack>', ('stack',)),
04259     ('<string>', ('char_traits', 'basic_string',)),
04260     ('<utility>', ('pair',)),
04261     ('<vector>', ('vector',)),
04262 
04263     # gcc extensions.
04264     # Note: std::hash is their hash, ::hash is our hash
04265     ('<hash_map>', ('hash_map', 'hash_multimap',)),
04266     ('<hash_set>', ('hash_set', 'hash_multiset',)),
04267     ('<slist>', ('slist',)),
04268     )
04269 
04270 _RE_PATTERN_STRING = re.compile(r'\bstring\b')
04271 
04272 _re_pattern_algorithm_header = []
04273 for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
04274                   'transform'):
04275   # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
04276   # type::max().
04277   _re_pattern_algorithm_header.append(
04278       (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
04279        _template,
04280        '<algorithm>'))
04281 
04282 _re_pattern_templates = []
04283 for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
04284   for _template in _templates:
04285     _re_pattern_templates.append(
04286         (re.compile(r'(<|\b)' + _template + r'\s*<'),
04287          _template + '<>',
04288          _header))
04289 
04290 
04291 def FilesBelongToSameModule(filename_cc, filename_h):
04292   """Check if these two filenames belong to the same module.
04293 
04294   The concept of a 'module' here is a as follows:
04295   foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
04296   same 'module' if they are in the same directory.
04297   some/path/public/xyzzy and some/path/internal/xyzzy are also considered
04298   to belong to the same module here.
04299 
04300   If the filename_cc contains a longer path than the filename_h, for example,
04301   '/absolute/path/to/base/sysinfo.cc', and this file would include
04302   'base/sysinfo.h', this function also produces the prefix needed to open the
04303   header. This is used by the caller of this function to more robustly open the
04304   header file. We don't have access to the real include paths in this context,
04305   so we need this guesswork here.
04306 
04307   Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
04308   according to this implementation. Because of this, this function gives
04309   some false positives. This should be sufficiently rare in practice.
04310 
04311   Args:
04312     filename_cc: is the path for the .cc file
04313     filename_h: is the path for the header path
04314 
04315   Returns:
04316     Tuple with a bool and a string:
04317     bool: True if filename_cc and filename_h belong to the same module.
04318     string: the additional prefix needed to open the header file.
04319   """
04320 
04321   if not filename_cc.endswith('.cc'):
04322     return (False, '')
04323   filename_cc = filename_cc[:-len('.cc')]
04324   if filename_cc.endswith('_unittest'):
04325     filename_cc = filename_cc[:-len('_unittest')]
04326   elif filename_cc.endswith('_test'):
04327     filename_cc = filename_cc[:-len('_test')]
04328   filename_cc = filename_cc.replace('/public/', '/')
04329   filename_cc = filename_cc.replace('/internal/', '/')
04330 
04331   if not filename_h.endswith('.h'):
04332     return (False, '')
04333   filename_h = filename_h[:-len('.h')]
04334   if filename_h.endswith('-inl'):
04335     filename_h = filename_h[:-len('-inl')]
04336   filename_h = filename_h.replace('/public/', '/')
04337   filename_h = filename_h.replace('/internal/', '/')
04338 
04339   files_belong_to_same_module = filename_cc.endswith(filename_h)
04340   common_path = ''
04341   if files_belong_to_same_module:
04342     common_path = filename_cc[:-len(filename_h)]
04343   return files_belong_to_same_module, common_path
04344 
04345 
04346 def UpdateIncludeState(filename, include_state, io=codecs):
04347   """Fill up the include_state with new includes found from the file.
04348 
04349   Args:
04350     filename: the name of the header to read.
04351     include_state: an _IncludeState instance in which the headers are inserted.
04352     io: The io factory to use to read the file. Provided for testability.
04353 
04354   Returns:
04355     True if a header was succesfully added. False otherwise.
04356   """
04357   headerfile = None
04358   try:
04359     headerfile = io.open(filename, 'r', 'utf8', 'replace')
04360   except IOError:
04361     return False
04362   linenum = 0
04363   for line in headerfile:
04364     linenum += 1
04365     clean_line = CleanseComments(line)
04366     match = _RE_PATTERN_INCLUDE.search(clean_line)
04367     if match:
04368       include = match.group(2)
04369       # The value formatting is cute, but not really used right now.
04370       # What matters here is that the key is in include_state.
04371       include_state.setdefault(include, '%s:%d' % (filename, linenum))
04372   return True
04373 
04374 
04375 def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
04376                               io=codecs):
04377   """Reports for missing stl includes.
04378 
04379   This function will output warnings to make sure you are including the headers
04380   necessary for the stl containers and functions that you use. We only give one
04381   reason to include a header. For example, if you use both equal_to<> and
04382   less<> in a .h file, only one (the latter in the file) of these will be
04383   reported as a reason to include the <functional>.
04384 
04385   Args:
04386     filename: The name of the current file.
04387     clean_lines: A CleansedLines instance containing the file.
04388     include_state: An _IncludeState instance.
04389     error: The function to call with any errors found.
04390     io: The IO factory to use to read the header file. Provided for unittest
04391         injection.
04392   """
04393   required = {}  # A map of header name to linenumber and the template entity.
04394                  # Example of required: { '<functional>': (1219, 'less<>') }
04395 
04396   for linenum in xrange(clean_lines.NumLines()):
04397     line = clean_lines.elided[linenum]
04398     if not line or line[0] == '#':
04399       continue
04400 
04401     # String is special -- it is a non-templatized type in STL.
04402     matched = _RE_PATTERN_STRING.search(line)
04403     if matched:
04404       # Don't warn about strings in non-STL namespaces:
04405       # (We check only the first match per line; good enough.)
04406       prefix = line[:matched.start()]
04407       if prefix.endswith('std::') or not prefix.endswith('::'):
04408         required['<string>'] = (linenum, 'string')
04409 
04410     for pattern, template, header in _re_pattern_algorithm_header:
04411       if pattern.search(line):
04412         required[header] = (linenum, template)
04413 
04414     # The following function is just a speed up, no semantics are changed.
04415     if not '<' in line:  # Reduces the cpu time usage by skipping lines.
04416       continue
04417 
04418     for pattern, template, header in _re_pattern_templates:
04419       if pattern.search(line):
04420         required[header] = (linenum, template)
04421 
04422   # The policy is that if you #include something in foo.h you don't need to
04423   # include it again in foo.cc. Here, we will look at possible includes.
04424   # Let's copy the include_state so it is only messed up within this function.
04425   include_state = include_state.copy()
04426 
04427   # Did we find the header for this file (if any) and succesfully load it?
04428   header_found = False
04429 
04430   # Use the absolute path so that matching works properly.
04431   abs_filename = FileInfo(filename).FullName()
04432 
04433   # For Emacs's flymake.
04434   # If cpplint is invoked from Emacs's flymake, a temporary file is generated
04435   # by flymake and that file name might end with '_flymake.cc'. In that case,
04436   # restore original file name here so that the corresponding header file can be
04437   # found.
04438   # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
04439   # instead of 'foo_flymake.h'
04440   abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
04441 
04442   # include_state is modified during iteration, so we iterate over a copy of
04443   # the keys.
04444   header_keys = include_state.keys()
04445   for header in header_keys:
04446     (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
04447     fullpath = common_path + header
04448     if same_module and UpdateIncludeState(fullpath, include_state, io):
04449       header_found = True
04450 
04451   # If we can't find the header file for a .cc, assume it's because we don't
04452   # know where to look. In that case we'll give up as we're not sure they
04453   # didn't include it in the .h file.
04454   # TODO(unknown): Do a better job of finding .h files so we are confident that
04455   # not having the .h file means there isn't one.
04456   if filename.endswith('.cc') and not header_found:
04457     return
04458 
04459   # All the lines have been processed, report the errors found.
04460   for required_header_unstripped in required:
04461     template = required[required_header_unstripped][1]
04462     if required_header_unstripped.strip('<>"') not in include_state:
04463       error(filename, required[required_header_unstripped][0],
04464             'build/include_what_you_use', 4,
04465             'Add #include ' + required_header_unstripped + ' for ' + template)
04466 
04467 
04468 _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
04469 
04470 
04471 def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
04472   """Check that make_pair's template arguments are deduced.
04473 
04474   G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
04475   specified explicitly, and such use isn't intended in any case.
04476 
04477   Args:
04478     filename: The name of the current file.
04479     clean_lines: A CleansedLines instance containing the file.
04480     linenum: The number of the line to check.
04481     error: The function to call with any errors found.
04482   """
04483   line = clean_lines.elided[linenum]
04484   match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
04485   if match:
04486     error(filename, linenum, 'build/explicit_make_pair',
04487           4,  # 4 = high confidence
04488           'For C++11-compatibility, omit template arguments from make_pair'
04489           ' OR use pair directly OR if appropriate, construct a pair directly')
04490 
04491 
04492 def ProcessLine(filename, file_extension, clean_lines, line,
04493                 include_state, function_state, nesting_state, error,
04494                 extra_check_functions=[]):
04495   """Processes a single line in the file.
04496 
04497   Args:
04498     filename: Filename of the file that is being processed.
04499     file_extension: The extension (dot not included) of the file.
04500     clean_lines: An array of strings, each representing a line of the file,
04501                  with comments stripped.
04502     line: Number of line being processed.
04503     include_state: An _IncludeState instance in which the headers are inserted.
04504     function_state: A _FunctionState instance which counts function lines, etc.
04505     nesting_state: A _NestingState instance which maintains information about
04506                    the current stack of nested blocks being parsed.
04507     error: A callable to which errors are reported, which takes 4 arguments:
04508            filename, line number, error level, and message
04509     extra_check_functions: An array of additional check functions that will be
04510                            run on each source line. Each function takes 4
04511                            arguments: filename, clean_lines, line, error
04512   """
04513   raw_lines = clean_lines.raw_lines
04514   ParseNolintSuppressions(filename, raw_lines[line], line, error)
04515   nesting_state.Update(filename, clean_lines, line, error)
04516   if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
04517     return
04518   CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
04519   CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
04520   CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
04521   CheckLanguage(filename, clean_lines, line, file_extension, include_state,
04522                 nesting_state, error)
04523   CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
04524   CheckForNonStandardConstructs(filename, clean_lines, line,
04525                                 nesting_state, error)
04526   CheckVlogArguments(filename, clean_lines, line, error)
04527   CheckPosixThreading(filename, clean_lines, line, error)
04528   CheckInvalidIncrement(filename, clean_lines, line, error)
04529   CheckMakePairUsesDeduction(filename, clean_lines, line, error)
04530   for check_fn in extra_check_functions:
04531     check_fn(filename, clean_lines, line, error)
04532 
04533 def ProcessFileData(filename, file_extension, lines, error,
04534                     extra_check_functions=[]):
04535   """Performs lint checks and reports any errors to the given error function.
04536 
04537   Args:
04538     filename: Filename of the file that is being processed.
04539     file_extension: The extension (dot not included) of the file.
04540     lines: An array of strings, each representing a line of the file, with the
04541            last element being empty if the file is terminated with a newline.
04542     error: A callable to which errors are reported, which takes 4 arguments:
04543            filename, line number, error level, and message
04544     extra_check_functions: An array of additional check functions that will be
04545                            run on each source line. Each function takes 4
04546                            arguments: filename, clean_lines, line, error
04547   """
04548   lines = (['// marker so line numbers and indices both start at 1'] + lines +
04549            ['// marker so line numbers end in a known way'])
04550 
04551   include_state = _IncludeState()
04552   function_state = _FunctionState()
04553   nesting_state = _NestingState()
04554 
04555   ResetNolintSuppressions()
04556 
04557   CheckForCopyright(filename, lines, error)
04558 
04559   if file_extension == 'h':
04560     CheckForHeaderGuard(filename, lines, error)
04561 
04562   RemoveMultiLineComments(filename, lines, error)
04563   clean_lines = CleansedLines(lines)
04564   for line in xrange(clean_lines.NumLines()):
04565     ProcessLine(filename, file_extension, clean_lines, line,
04566                 include_state, function_state, nesting_state, error,
04567                 extra_check_functions)
04568   nesting_state.CheckCompletedBlocks(filename, error)
04569 
04570   CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
04571 
04572   # We check here rather than inside ProcessLine so that we see raw
04573   # lines rather than "cleaned" lines.
04574   CheckForBadCharacters(filename, lines, error)
04575 
04576   CheckForNewlineAtEOF(filename, lines, error)
04577 
04578 def ProcessFile(filename, vlevel, extra_check_functions=[]):
04579   """Does google-lint on a single file.
04580 
04581   Args:
04582     filename: The name of the file to parse.
04583 
04584     vlevel: The level of errors to report.  Every error of confidence
04585     >= verbose_level will be reported.  0 is a good default.
04586 
04587     extra_check_functions: An array of additional check functions that will be
04588                            run on each source line. Each function takes 4
04589                            arguments: filename, clean_lines, line, error
04590   """
04591 
04592   _SetVerboseLevel(vlevel)
04593 
04594   try:
04595     # Support the UNIX convention of using "-" for stdin.  Note that
04596     # we are not opening the file with universal newline support
04597     # (which codecs doesn't support anyway), so the resulting lines do
04598     # contain trailing '\r' characters if we are reading a file that
04599     # has CRLF endings.
04600     # If after the split a trailing '\r' is present, it is removed
04601     # below. If it is not expected to be present (i.e. os.linesep !=
04602     # '\r\n' as in Windows), a warning is issued below if this file
04603     # is processed.
04604 
04605     if filename == '-':
04606       lines = codecs.StreamReaderWriter(sys.stdin,
04607                                         codecs.getreader('utf8'),
04608                                         codecs.getwriter('utf8'),
04609                                         'replace').read().split('\n')
04610     else:
04611       lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
04612 
04613     carriage_return_found = False
04614     # Remove trailing '\r'.
04615     for linenum in range(len(lines)):
04616       if lines[linenum].endswith('\r'):
04617         lines[linenum] = lines[linenum].rstrip('\r')
04618         carriage_return_found = True
04619 
04620   except IOError:
04621     sys.stderr.write(
04622         "Skipping input '%s': Can't open for reading\n" % filename)
04623     return
04624 
04625   # Note, if no dot is found, this will give the entire filename as the ext.
04626   file_extension = filename[filename.rfind('.') + 1:]
04627 
04628   # When reading from stdin, the extension is unknown, so no cpplint tests
04629   # should rely on the extension.
04630   if filename != '-' and file_extension not in _valid_extensions:
04631     sys.stderr.write('Ignoring %s; not a valid file name '
04632                      '(%s)\n' % (filename, ', '.join(_valid_extensions)))
04633   else:
04634     ProcessFileData(filename, file_extension, lines, Error,
04635                     extra_check_functions)
04636     if carriage_return_found and os.linesep != '\r\n':
04637       # Use 0 for linenum since outputting only one error for potentially
04638       # several lines.
04639       Error(filename, 0, 'whitespace/newline', 1,
04640             'One or more unexpected \\r (^M) found;'
04641             'better to use only a \\n')
04642 
04643   # supress printing
04644   # sys.stderr.write('Done processing %s\n' % filename)
04645 
04646 
04647 def PrintUsage(message):
04648   """Prints a brief usage string and exits, optionally with an error message.
04649 
04650   Args:
04651     message: The optional error message.
04652   """
04653   sys.stderr.write(_USAGE)
04654   if message:
04655     sys.exit('\nFATAL ERROR: ' + message)
04656   else:
04657     sys.exit(1)
04658 
04659 
04660 def PrintCategories():
04661   """Prints a list of all the error-categories used by error messages.
04662 
04663   These are the categories used to filter messages via --filter.
04664   """
04665   sys.stderr.write(''.join('  %s\n' % cat for cat in _ERROR_CATEGORIES))
04666   sys.exit(0)
04667 
04668 
04669 def ParseArguments(args):
04670   """Parses the command line arguments.
04671 
04672   This may set the output format and verbosity level as side-effects.
04673 
04674   Args:
04675     args: The command line arguments:
04676 
04677   Returns:
04678     The list of filenames to lint.
04679   """
04680   try:
04681     (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
04682                                                  'counting=',
04683                                                  'filter=',
04684                                                  'root=',
04685                                                  'linelength=',
04686                                                  'extensions='])
04687   except getopt.GetoptError:
04688     PrintUsage('Invalid arguments.')
04689 
04690   verbosity = _VerboseLevel()
04691   output_format = _OutputFormat()
04692   filters = ''
04693   counting_style = ''
04694 
04695   for (opt, val) in opts:
04696     if opt == '--help':
04697       PrintUsage(None)
04698     elif opt == '--output':
04699       if val not in ('emacs', 'vs7', 'eclipse'):
04700         PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
04701       output_format = val
04702     elif opt == '--verbose':
04703       verbosity = int(val)
04704     elif opt == '--filter':
04705       filters = val
04706       if not filters:
04707         PrintCategories()
04708     elif opt == '--counting':
04709       if val not in ('total', 'toplevel', 'detailed'):
04710         PrintUsage('Valid counting options are total, toplevel, and detailed')
04711       counting_style = val
04712     elif opt == '--root':
04713       global _root
04714       _root = val
04715     elif opt == '--linelength':
04716       global _line_length
04717       try:
04718           _line_length = int(val)
04719       except ValueError:
04720           PrintUsage('Line length must be digits.')
04721     elif opt == '--extensions':
04722       global _valid_extensions
04723       try:
04724           _valid_extensions = set(val.split(','))
04725       except ValueError:
04726           PrintUsage('Extensions must be comma seperated list.')
04727 
04728   if not filenames:
04729     PrintUsage('No files were specified.')
04730 
04731   _SetOutputFormat(output_format)
04732   _SetVerboseLevel(verbosity)
04733   _SetFilters(filters)
04734   _SetCountingStyle(counting_style)
04735 
04736   return filenames
04737 
04738 
04739 def main():
04740   filenames = ParseArguments(sys.argv[1:])
04741 
04742   # Change stderr to write with replacement characters so we don't die
04743   # if we try to print something containing non-ASCII characters.
04744   sys.stderr = codecs.StreamReaderWriter(sys.stderr,
04745                                          codecs.getreader('utf8'),
04746                                          codecs.getwriter('utf8'),
04747                                          'replace')
04748 
04749   _cpplint_state.ResetErrorCounts()
04750   for filename in filenames:
04751     ProcessFile(filename, _cpplint_state.verbose_level)
04752   _cpplint_state.PrintErrorCounts()
04753 
04754   sys.exit(_cpplint_state.error_count > 0)
04755 
04756 
04757 if __name__ == '__main__':
04758   main()


acado
Author(s): Milan Vukov, Rien Quirynen
autogenerated on Thu Aug 27 2015 11:58:04