cpplint.py
Go to the documentation of this file.
1 #!/usr/bin/python
2 #
3 # Copyright (c) 2009 Google Inc. All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 # * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 
32 # April 2014, Greg Horn:
33 # Edited the original file to supress "Done processing xxx"
34 
35 """Does google-lint on c++ files.
36 
37 The goal of this script is to identify places in the code that *may*
38 be in non-compliance with google style. It does not attempt to fix
39 up these problems -- the point is to educate. It does also not
40 attempt to find all problems, or to ensure that everything it does
41 find is legitimately a problem.
42 
43 In particular, we can get very confused by /* and // inside strings!
44 We do a small hack, which is to ignore //'s with "'s after them on the
45 same line, but it is far from perfect (in either direction).
46 """
47 
48 import codecs
49 import copy
50 import getopt
51 import math # for log
52 import os
53 import re
54 import sre_compile
55 import string
56 import sys
57 import unicodedata
58 
59 
60 _USAGE = """
61 Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
62  [--counting=total|toplevel|detailed] [--root=subdir]
63  [--linelength=digits]
64  <file> [file] ...
65 
66  The style guidelines this tries to follow are those in
67  http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
68 
69  Every problem is given a confidence score from 1-5, with 5 meaning we are
70  certain of the problem, and 1 meaning it could be a legitimate construct.
71  This will miss some errors, and is not a substitute for a code review.
72 
73  To suppress false-positive errors of a certain category, add a
74  'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
75  suppresses errors of all categories on that line.
76 
77  The files passed in will be linted; at least one file must be provided.
78  Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
79  extensions with the --extensions flag.
80 
81  Flags:
82 
83  output=vs7
84  By default, the output is formatted to ease emacs parsing. Visual Studio
85  compatible output (vs7) may also be used. Other formats are unsupported.
86 
87  verbose=#
88  Specify a number 0-5 to restrict errors to certain verbosity levels.
89 
90  filter=-x,+y,...
91  Specify a comma-separated list of category-filters to apply: only
92  error messages whose category names pass the filters will be printed.
93  (Category names are printed with the message and look like
94  "[whitespace/indent]".) Filters are evaluated left to right.
95  "-FOO" and "FOO" means "do not print categories that start with FOO".
96  "+FOO" means "do print categories that start with FOO".
97 
98  Examples: --filter=-whitespace,+whitespace/braces
99  --filter=whitespace,runtime/printf,+runtime/printf_format
100  --filter=-,+build/include_what_you_use
101 
102  To see a list of all the categories used in cpplint, pass no arg:
103  --filter=
104 
105  counting=total|toplevel|detailed
106  The total number of errors found is always printed. If
107  'toplevel' is provided, then the count of errors in each of
108  the top-level categories like 'build' and 'whitespace' will
109  also be printed. If 'detailed' is provided, then a count
110  is provided for each category like 'build/class'.
111 
112  root=subdir
113  The root directory used for deriving header guard CPP variable.
114  By default, the header guard CPP variable is calculated as the relative
115  path to the directory that contains .git, .hg, or .svn. When this flag
116  is specified, the relative path is calculated from the specified
117  directory. If the specified directory does not exist, this flag is
118  ignored.
119 
120  Examples:
121  Assuing that src/.git exists, the header guard CPP variables for
122  src/chrome/browser/ui/browser.h are:
123 
124  No flag => CHROME_BROWSER_UI_BROWSER_H_
125  --root=chrome => BROWSER_UI_BROWSER_H_
126  --root=chrome/browser => UI_BROWSER_H_
127 
128  linelength=digits
129  This is the allowed line length for the project. The default value is
130  80 characters.
131 
132  Examples:
133  --linelength=120
134 
135  extensions=extension,extension,...
136  The allowed file extensions that cpplint will check
137 
138  Examples:
139  --extensions=hpp,cpp
140 """
141 
142 # We categorize each error message we print. Here are the categories.
143 # We want an explicit list so we can list them all in cpplint --filter=.
144 # If you add a new error message with a new category, add it to the list
145 # here! cpplint_unittest.py should tell you if you forget to do this.
146 _ERROR_CATEGORIES = [
147  'build/class',
148  'build/deprecated',
149  'build/endif_comment',
150  'build/explicit_make_pair',
151  'build/forward_decl',
152  'build/header_guard',
153  'build/include',
154  'build/include_alpha',
155  'build/include_order',
156  'build/include_what_you_use',
157  'build/namespaces',
158  'build/printf_format',
159  'build/storage_class',
160  'legal/copyright',
161  'readability/alt_tokens',
162  'readability/braces',
163  'readability/casting',
164  'readability/check',
165  'readability/constructors',
166  'readability/fn_size',
167  'readability/function',
168  'readability/multiline_comment',
169  'readability/multiline_string',
170  'readability/namespace',
171  'readability/nolint',
172  'readability/nul',
173  'readability/streams',
174  'readability/todo',
175  'readability/utf8',
176  'runtime/arrays',
177  'runtime/casting',
178  'runtime/explicit',
179  'runtime/int',
180  'runtime/init',
181  'runtime/invalid_increment',
182  'runtime/member_string_references',
183  'runtime/memset',
184  'runtime/operator',
185  'runtime/printf',
186  'runtime/printf_format',
187  'runtime/references',
188  'runtime/string',
189  'runtime/threadsafe_fn',
190  'runtime/vlog',
191  'whitespace/blank_line',
192  'whitespace/braces',
193  'whitespace/comma',
194  'whitespace/comments',
195  'whitespace/empty_conditional_body',
196  'whitespace/empty_loop_body',
197  'whitespace/end_of_line',
198  'whitespace/ending_newline',
199  'whitespace/forcolon',
200  'whitespace/indent',
201  'whitespace/line_length',
202  'whitespace/newline',
203  'whitespace/operators',
204  'whitespace/parens',
205  'whitespace/semicolon',
206  'whitespace/tab',
207  'whitespace/todo'
208  ]
209 
210 # The default state of the category filter. This is overrided by the --filter=
211 # flag. By default all errors are on, so only add here categories that should be
212 # off by default (i.e., categories that must be enabled by the --filter= flags).
213 # All entries here should start with a '-' or '+', as in the --filter= flag.
214 _DEFAULT_FILTERS = ['-build/include_alpha']
215 
216 # We used to check for high-bit characters, but after much discussion we
217 # decided those were OK, as long as they were in UTF-8 and didn't represent
218 # hard-coded international strings, which belong in a separate i18n file.
219 
220 
221 # C++ headers
222 _CPP_HEADERS = frozenset([
223  # Legacy
224  'algobase.h',
225  'algo.h',
226  'alloc.h',
227  'builtinbuf.h',
228  'bvector.h',
229  'complex.h',
230  'defalloc.h',
231  'deque.h',
232  'editbuf.h',
233  'fstream.h',
234  'function.h',
235  'hash_map',
236  'hash_map.h',
237  'hash_set',
238  'hash_set.h',
239  'hashtable.h',
240  'heap.h',
241  'indstream.h',
242  'iomanip.h',
243  'iostream.h',
244  'istream.h',
245  'iterator.h',
246  'list.h',
247  'map.h',
248  'multimap.h',
249  'multiset.h',
250  'ostream.h',
251  'pair.h',
252  'parsestream.h',
253  'pfstream.h',
254  'procbuf.h',
255  'pthread_alloc',
256  'pthread_alloc.h',
257  'rope',
258  'rope.h',
259  'ropeimpl.h',
260  'set.h',
261  'slist',
262  'slist.h',
263  'stack.h',
264  'stdiostream.h',
265  'stl_alloc.h',
266  'stl_relops.h',
267  'streambuf.h',
268  'stream.h',
269  'strfile.h',
270  'strstream.h',
271  'tempbuf.h',
272  'tree.h',
273  'type_traits.h',
274  'vector.h',
275  # 17.6.1.2 C++ library headers
276  'algorithm',
277  'array',
278  'atomic',
279  'bitset',
280  'chrono',
281  'codecvt',
282  'complex',
283  'condition_variable',
284  'deque',
285  'exception',
286  'forward_list',
287  'fstream',
288  'functional',
289  'future',
290  'initializer_list',
291  'iomanip',
292  'ios',
293  'iosfwd',
294  'iostream',
295  'istream',
296  'iterator',
297  'limits',
298  'list',
299  'locale',
300  'map',
301  'memory',
302  'mutex',
303  'new',
304  'numeric',
305  'ostream',
306  'queue',
307  'random',
308  'ratio',
309  'regex',
310  'set',
311  'sstream',
312  'stack',
313  'stdexcept',
314  'streambuf',
315  'string',
316  'strstream',
317  'system_error',
318  'thread',
319  'tuple',
320  'typeindex',
321  'typeinfo',
322  'type_traits',
323  'unordered_map',
324  'unordered_set',
325  'utility',
326  'valarray',
327  'vector',
328  # 17.6.1.2 C++ headers for C library facilities
329  'cassert',
330  'ccomplex',
331  'cctype',
332  'cerrno',
333  'cfenv',
334  'cfloat',
335  'cinttypes',
336  'ciso646',
337  'climits',
338  'clocale',
339  'cmath',
340  'csetjmp',
341  'csignal',
342  'cstdalign',
343  'cstdarg',
344  'cstdbool',
345  'cstddef',
346  'cstdint',
347  'cstdio',
348  'cstdlib',
349  'cstring',
350  'ctgmath',
351  'ctime',
352  'cuchar',
353  'cwchar',
354  'cwctype',
355  ])
356 
357 # Assertion macros. These are defined in base/logging.h and
358 # testing/base/gunit.h. Note that the _M versions need to come first
359 # for substring matching to work.
360 _CHECK_MACROS = [
361  'DCHECK', 'CHECK',
362  'EXPECT_TRUE_M', 'EXPECT_TRUE',
363  'ASSERT_TRUE_M', 'ASSERT_TRUE',
364  'EXPECT_FALSE_M', 'EXPECT_FALSE',
365  'ASSERT_FALSE_M', 'ASSERT_FALSE',
366  ]
367 
368 # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
369 _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
370 
371 for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
372  ('>=', 'GE'), ('>', 'GT'),
373  ('<=', 'LE'), ('<', 'LT')]:
374  _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
375  _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
376  _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
377  _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
378  _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
379  _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
380 
381 for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
382  ('>=', 'LT'), ('>', 'LE'),
383  ('<=', 'GT'), ('<', 'GE')]:
384  _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
385  _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
386  _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
387  _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
388 
389 # Alternative tokens and their replacements. For full list, see section 2.5
390 # Alternative tokens [lex.digraph] in the C++ standard.
391 #
392 # Digraphs (such as '%:') are not included here since it's a mess to
393 # match those on a word boundary.
394 _ALT_TOKEN_REPLACEMENT = {
395  'and': '&&',
396  'bitor': '|',
397  'or': '||',
398  'xor': '^',
399  'compl': '~',
400  'bitand': '&',
401  'and_eq': '&=',
402  'or_eq': '|=',
403  'xor_eq': '^=',
404  'not': '!',
405  'not_eq': '!='
406  }
407 
408 # Compile regular expression that matches all the above keywords. The "[ =()]"
409 # bit is meant to avoid matching these keywords outside of boolean expressions.
410 #
411 # False positives include C-style multi-line comments and multi-line strings
412 # but those have always been troublesome for cpplint.
413 _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
414  r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
415 
416 
417 # These constants define types of headers for use with
418 # _IncludeState.CheckNextIncludeOrder().
419 _C_SYS_HEADER = 1
420 _CPP_SYS_HEADER = 2
421 _LIKELY_MY_HEADER = 3
422 _POSSIBLE_MY_HEADER = 4
423 _OTHER_HEADER = 5
424 
425 # These constants define the current inline assembly state
426 _NO_ASM = 0 # Outside of inline assembly block
427 _INSIDE_ASM = 1 # Inside inline assembly block
428 _END_ASM = 2 # Last line of inline assembly block
429 _BLOCK_ASM = 3 # The whole block is an inline assembly block
430 
431 # Match start of assembly blocks
432 _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
433  r'(?:\s+(volatile|__volatile__))?'
434  r'\s*[{(]')
435 
436 
437 _regexp_compile_cache = {}
438 
439 # Finds occurrences of NOLINT or NOLINT(...).
440 _RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
441 
442 # {str, set(int)}: a map from error categories to sets of linenumbers
443 # on which those errors are expected and should be suppressed.
444 _error_suppressions = {}
445 
446 # The root directory used for deriving header guard CPP variable.
447 # This is set by --root flag.
448 _root = None
449 
450 # The allowed line length of files.
451 # This is set by --linelength flag.
452 _line_length = 80
453 
454 # The allowed extensions for file names
455 # This is set by --extensions flag.
456 _valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
457 
458 def ParseNolintSuppressions(filename, raw_line, linenum, error):
459  """Updates the global list of error-suppressions.
460 
461  Parses any NOLINT comments on the current line, updating the global
462  error_suppressions store. Reports an error if the NOLINT comment
463  was malformed.
464 
465  Args:
466  filename: str, the name of the input file.
467  raw_line: str, the line of input text, with comments.
468  linenum: int, the number of the current line.
469  error: function, an error handler.
470  """
471  # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
472  matched = _RE_SUPPRESSION.search(raw_line)
473  if matched:
474  category = matched.group(1)
475  if category in (None, '(*)'): # => "suppress all"
476  _error_suppressions.setdefault(None, set()).add(linenum)
477  else:
478  if category.startswith('(') and category.endswith(')'):
479  category = category[1:-1]
480  if category in _ERROR_CATEGORIES:
481  _error_suppressions.setdefault(category, set()).add(linenum)
482  else:
483  error(filename, linenum, 'readability/nolint', 5,
484  'Unknown NOLINT error category: %s' % category)
485 
486 
488  "Resets the set of NOLINT suppressions to empty."
489  _error_suppressions.clear()
490 
491 
492 def IsErrorSuppressedByNolint(category, linenum):
493  """Returns true if the specified error category is suppressed on this line.
494 
495  Consults the global error_suppressions map populated by
496  ParseNolintSuppressions/ResetNolintSuppressions.
497 
498  Args:
499  category: str, the category of the error.
500  linenum: int, the current line number.
501  Returns:
502  bool, True iff the error should be suppressed due to a NOLINT comment.
503  """
504  return (linenum in _error_suppressions.get(category, set()) or
505  linenum in _error_suppressions.get(None, set()))
506 
507 def Match(pattern, s):
508  """Matches the string with the pattern, caching the compiled regexp."""
509  # The regexp compilation caching is inlined in both Match and Search for
510  # performance reasons; factoring it out into a separate function turns out
511  # to be noticeably expensive.
512  if pattern not in _regexp_compile_cache:
513  _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
514  return _regexp_compile_cache[pattern].match(s)
515 
516 
517 def ReplaceAll(pattern, rep, s):
518  """Replaces instances of pattern in a string with a replacement.
519 
520  The compiled regex is kept in a cache shared by Match and Search.
521 
522  Args:
523  pattern: regex pattern
524  rep: replacement text
525  s: search string
526 
527  Returns:
528  string with replacements made (or original string if no replacements)
529  """
530  if pattern not in _regexp_compile_cache:
531  _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
532  return _regexp_compile_cache[pattern].sub(rep, s)
533 
534 
535 def Search(pattern, s):
536  """Searches the string for the pattern, caching the compiled regexp."""
537  if pattern not in _regexp_compile_cache:
538  _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
539  return _regexp_compile_cache[pattern].search(s)
540 
541 
542 class _IncludeState(dict):
543  """Tracks line numbers for includes, and the order in which includes appear.
544 
545  As a dict, an _IncludeState object serves as a mapping between include
546  filename and line number on which that file was included.
547 
548  Call CheckNextIncludeOrder() once for each header in the file, passing
549  in the type constants defined above. Calls in an illegal order will
550  raise an _IncludeError with an appropriate error message.
551 
552  """
553  # self._section will move monotonically through this set. If it ever
554  # needs to move backwards, CheckNextIncludeOrder will raise an error.
555  _INITIAL_SECTION = 0
556  _MY_H_SECTION = 1
557  _C_SECTION = 2
558  _CPP_SECTION = 3
559  _OTHER_H_SECTION = 4
560 
561  _TYPE_NAMES = {
562  _C_SYS_HEADER: 'C system header',
563  _CPP_SYS_HEADER: 'C++ system header',
564  _LIKELY_MY_HEADER: 'header this file implements',
565  _POSSIBLE_MY_HEADER: 'header this file may implement',
566  _OTHER_HEADER: 'other header',
567  }
568  _SECTION_NAMES = {
569  _INITIAL_SECTION: "... nothing. (This can't be an error.)",
570  _MY_H_SECTION: 'a header this file implements',
571  _C_SECTION: 'C system header',
572  _CPP_SECTION: 'C++ system header',
573  _OTHER_H_SECTION: 'other header',
574  }
575 
576  def __init__(self):
577  dict.__init__(self)
578  self.ResetSection()
579 
580  def ResetSection(self):
581  # The name of the current section.
583  # The path of last found header.
584  self._last_header = ''
585 
586  def SetLastHeader(self, header_path):
587  self._last_header = header_path
588 
589  def CanonicalizeAlphabeticalOrder(self, header_path):
590  """Returns a path canonicalized for alphabetical comparison.
591 
592  - replaces "-" with "_" so they both cmp the same.
593  - removes '-inl' since we don't require them to be after the main header.
594  - lowercase everything, just in case.
595 
596  Args:
597  header_path: Path to be canonicalized.
598 
599  Returns:
600  Canonicalized path.
601  """
602  return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
603 
604  def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
605  """Check if a header is in alphabetical order with the previous header.
606 
607  Args:
608  clean_lines: A CleansedLines instance containing the file.
609  linenum: The number of the line to check.
610  header_path: Canonicalized header to be checked.
611 
612  Returns:
613  Returns true if the header is in alphabetical order.
614  """
615  # If previous section is different from current section, _last_header will
616  # be reset to empty string, so it's always less than current header.
617  #
618  # If previous line was a blank line, assume that the headers are
619  # intentionally sorted the way they are.
620  if (self._last_header > header_path and
621  not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
622  return False
623  return True
624 
625  def CheckNextIncludeOrder(self, header_type):
626  """Returns a non-empty error message if the next header is out of order.
627 
628  This function also updates the internal state to be ready to check
629  the next include.
630 
631  Args:
632  header_type: One of the _XXX_HEADER constants defined above.
633 
634  Returns:
635  The empty string if the header is in the right order, or an
636  error message describing what's wrong.
637 
638  """
639  error_message = ('Found %s after %s' %
640  (self._TYPE_NAMES[header_type],
641  self._SECTION_NAMES[self._section]))
642 
643  last_section = self._section
644 
645  if header_type == _C_SYS_HEADER:
646  if self._section <= self._C_SECTION:
647  self._section = self._C_SECTION
648  else:
649  self._last_header = ''
650  return error_message
651  elif header_type == _CPP_SYS_HEADER:
652  if self._section <= self._CPP_SECTION:
653  self._section = self._CPP_SECTION
654  else:
655  self._last_header = ''
656  return error_message
657  elif header_type == _LIKELY_MY_HEADER:
658  if self._section <= self._MY_H_SECTION:
659  self._section = self._MY_H_SECTION
660  else:
661  self._section = self._OTHER_H_SECTION
662  elif header_type == _POSSIBLE_MY_HEADER:
663  if self._section <= self._MY_H_SECTION:
664  self._section = self._MY_H_SECTION
665  else:
666  # This will always be the fallback because we're not sure
667  # enough that the header is associated with this file.
668  self._section = self._OTHER_H_SECTION
669  else:
670  assert header_type == _OTHER_HEADER
671  self._section = self._OTHER_H_SECTION
672 
673  if last_section != self._section:
674  self._last_header = ''
675 
676  return ''
677 
678 
679 class _CppLintState(object):
680  """Maintains module-wide state.."""
681 
682  def __init__(self):
683  self.verbose_level = 1 # global setting.
684  self.error_count = 0 # global count of reported errors
685  # filters to apply when emitting error messages
686  self.filters = _DEFAULT_FILTERS[:]
687  self.counting = 'total' # In what way are we counting errors?
688  self.errors_by_category = {} # string to int dict storing error counts
689 
690  # output format:
691  # "emacs" - format that emacs can parse (default)
692  # "vs7" - format that Microsoft Visual Studio 7 can parse
693  self.output_format = 'emacs'
694 
695  def SetOutputFormat(self, output_format):
696  """Sets the output format for errors."""
697  self.output_format = output_format
698 
699  def SetVerboseLevel(self, level):
700  """Sets the module's verbosity, and returns the previous setting."""
701  last_verbose_level = self.verbose_level
702  self.verbose_level = level
703  return last_verbose_level
704 
705  def SetCountingStyle(self, counting_style):
706  """Sets the module's counting options."""
707  self.counting = counting_style
708 
709  def SetFilters(self, filters):
710  """Sets the error-message filters.
711 
712  These filters are applied when deciding whether to emit a given
713  error message.
714 
715  Args:
716  filters: A string of comma-separated filters (eg "+whitespace/indent").
717  Each filter should start with + or -; else we die.
718 
719  Raises:
720  ValueError: The comma-separated filters did not all start with '+' or '-'.
721  E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
722  """
723  # Default filters always have less priority than the flag ones.
724  self.filters = _DEFAULT_FILTERS[:]
725  for filt in filters.split(','):
726  clean_filt = filt.strip()
727  if clean_filt:
728  self.filters.append(clean_filt)
729  for filt in self.filters:
730  if not (filt.startswith('+') or filt.startswith('-')):
731  raise ValueError('Every filter in --filters must start with + or -'
732  ' (%s does not)' % filt)
733 
734  def ResetErrorCounts(self):
735  """Sets the module's error statistic back to zero."""
736  self.error_count = 0
737  self.errors_by_category = {}
738 
739  def IncrementErrorCount(self, category):
740  """Bumps the module's error statistic."""
741  self.error_count += 1
742  if self.counting in ('toplevel', 'detailed'):
743  if self.counting != 'detailed':
744  category = category.split('/')[0]
745  if category not in self.errors_by_category:
746  self.errors_by_category[category] = 0
747  self.errors_by_category[category] += 1
748 
749  def PrintErrorCounts(self):
750  """Print a summary of errors by category, and the total."""
751  for category, count in self.errors_by_category.iteritems():
752  sys.stderr.write('Category \'%s\' errors found: %d\n' %
753  (category, count))
754  sys.stderr.write('Total errors found: %d\n' % self.error_count)
755 
756 _cpplint_state = _CppLintState()
757 
758 
760  """Gets the module's output format."""
761  return _cpplint_state.output_format
762 
763 
764 def _SetOutputFormat(output_format):
765  """Sets the module's output format."""
766  _cpplint_state.SetOutputFormat(output_format)
767 
768 
770  """Returns the module's verbosity setting."""
771  return _cpplint_state.verbose_level
772 
773 
774 def _SetVerboseLevel(level):
775  """Sets the module's verbosity, and returns the previous setting."""
776  return _cpplint_state.SetVerboseLevel(level)
777 
778 
779 def _SetCountingStyle(level):
780  """Sets the module's counting options."""
781  _cpplint_state.SetCountingStyle(level)
782 
783 
784 def _Filters():
785  """Returns the module's list of output filters, as a list."""
786  return _cpplint_state.filters
787 
788 
789 def _SetFilters(filters):
790  """Sets the module's error-message filters.
791 
792  These filters are applied when deciding whether to emit a given
793  error message.
794 
795  Args:
796  filters: A string of comma-separated filters (eg "whitespace/indent").
797  Each filter should start with + or -; else we die.
798  """
799  _cpplint_state.SetFilters(filters)
800 
801 
802 class _FunctionState(object):
803  """Tracks current function name and the number of lines in its body."""
804 
805  _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
806  _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
807 
808  def __init__(self):
809  self.in_a_function = False
812 
813  def Begin(self, function_name):
814  """Start analyzing function body.
815 
816  Args:
817  function_name: The name of the function being tracked.
818  """
819  self.in_a_function = True
820  self.lines_in_function = 0
821  self.current_function = function_name
822 
823  def Count(self):
824  """Count line in current function body."""
825  if self.in_a_function:
826  self.lines_in_function += 1
827 
828  def Check(self, error, filename, linenum):
829  """Report if too many lines in function body.
830 
831  Args:
832  error: The function to call with any errors found.
833  filename: The name of the current file.
834  linenum: The number of the line to check.
835  """
836  if Match(r'T(EST|est)', self.current_function):
837  base_trigger = self._TEST_TRIGGER
838  else:
839  base_trigger = self._NORMAL_TRIGGER
840  trigger = base_trigger * 2**_VerboseLevel()
841 
842  if self.lines_in_function > trigger:
843  error_level = int(math.log(self.lines_in_function / base_trigger, 2))
844  # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
845  if error_level > 5:
846  error_level = 5
847  error(filename, linenum, 'readability/fn_size', error_level,
848  'Small and focused functions are preferred:'
849  ' %s has %d non-comment lines'
850  ' (error triggered by exceeding %d lines).' % (
851  self.current_function, self.lines_in_function, trigger))
852 
853  def End(self):
854  """Stop analyzing function body."""
855  self.in_a_function = False
856 
857 
858 class _IncludeError(Exception):
859  """Indicates a problem with the include order in a file."""
860  pass
861 
862 
863 class FileInfo:
864  """Provides utility functions for filenames.
865 
866  FileInfo provides easy access to the components of a file's path
867  relative to the project root.
868  """
869 
870  def __init__(self, filename):
871  self._filename = filename
872 
873  def FullName(self):
874  """Make Windows paths like Unix."""
875  return os.path.abspath(self._filename).replace('\\', '/')
876 
877  def RepositoryName(self):
878  """FullName after removing the local path to the repository.
879 
880  If we have a real absolute path name here we can try to do something smart:
881  detecting the root of the checkout and truncating /path/to/checkout from
882  the name so that we get header guards that don't include things like
883  "C:\Documents and Settings\..." or "/home/username/..." in them and thus
884  people on different computers who have checked the source out to different
885  locations won't see bogus errors.
886  """
887  fullname = self.FullName()
888 
889  if os.path.exists(fullname):
890  project_dir = os.path.dirname(fullname)
891 
892  if os.path.exists(os.path.join(project_dir, ".svn")):
893  # If there's a .svn file in the current directory, we recursively look
894  # up the directory tree for the top of the SVN checkout
895  root_dir = project_dir
896  one_up_dir = os.path.dirname(root_dir)
897  while os.path.exists(os.path.join(one_up_dir, ".svn")):
898  root_dir = os.path.dirname(root_dir)
899  one_up_dir = os.path.dirname(one_up_dir)
900 
901  prefix = os.path.commonprefix([root_dir, project_dir])
902  return fullname[len(prefix) + 1:]
903 
904  # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
905  # searching up from the current path.
906  root_dir = os.path.dirname(fullname)
907  while (root_dir != os.path.dirname(root_dir) and
908  not os.path.exists(os.path.join(root_dir, ".git")) and
909  not os.path.exists(os.path.join(root_dir, ".hg")) and
910  not os.path.exists(os.path.join(root_dir, ".svn"))):
911  root_dir = os.path.dirname(root_dir)
912 
913  if (os.path.exists(os.path.join(root_dir, ".git")) or
914  os.path.exists(os.path.join(root_dir, ".hg")) or
915  os.path.exists(os.path.join(root_dir, ".svn"))):
916  prefix = os.path.commonprefix([root_dir, project_dir])
917  return fullname[len(prefix) + 1:]
918 
919  # Don't know what to do; header guard warnings may be wrong...
920  return fullname
921 
922  def Split(self):
923  """Splits the file into the directory, basename, and extension.
924 
925  For 'chrome/browser/browser.cc', Split() would
926  return ('chrome/browser', 'browser', '.cc')
927 
928  Returns:
929  A tuple of (directory, basename, extension).
930  """
931 
932  googlename = self.RepositoryName()
933  project, rest = os.path.split(googlename)
934  return (project,) + os.path.splitext(rest)
935 
936  def BaseName(self):
937  """File base name - text after the final slash, before the final period."""
938  return self.Split()[1]
939 
940  def Extension(self):
941  """File extension - text following the final period."""
942  return self.Split()[2]
943 
944  def NoExtension(self):
945  """File has no source file extension."""
946  return '/'.join(self.Split()[0:2])
947 
948  def IsSource(self):
949  """File has a source file extension."""
950  return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
951 
952 
953 def _ShouldPrintError(category, confidence, linenum):
954  """If confidence >= verbose, category passes filter and is not suppressed."""
955 
956  # There are three ways we might decide not to print an error message:
957  # a "NOLINT(category)" comment appears in the source,
958  # the verbosity level isn't high enough, or the filters filter it out.
959  if IsErrorSuppressedByNolint(category, linenum):
960  return False
961  if confidence < _cpplint_state.verbose_level:
962  return False
963 
964  is_filtered = False
965  for one_filter in _Filters():
966  if one_filter.startswith('-'):
967  if category.startswith(one_filter[1:]):
968  is_filtered = True
969  elif one_filter.startswith('+'):
970  if category.startswith(one_filter[1:]):
971  is_filtered = False
972  else:
973  assert False # should have been checked for in SetFilter.
974  if is_filtered:
975  return False
976 
977  return True
978 
979 
980 def Error(filename, linenum, category, confidence, message):
981  """Logs the fact we've found a lint error.
982 
983  We log where the error was found, and also our confidence in the error,
984  that is, how certain we are this is a legitimate style regression, and
985  not a misidentification or a use that's sometimes justified.
986 
987  False positives can be suppressed by the use of
988  "cpplint(category)" comments on the offending line. These are
989  parsed into _error_suppressions.
990 
991  Args:
992  filename: The name of the file containing the error.
993  linenum: The number of the line containing the error.
994  category: A string used to describe the "category" this bug
995  falls under: "whitespace", say, or "runtime". Categories
996  may have a hierarchy separated by slashes: "whitespace/indent".
997  confidence: A number from 1-5 representing a confidence score for
998  the error, with 5 meaning that we are certain of the problem,
999  and 1 meaning that it could be a legitimate construct.
1000  message: The error message.
1001  """
1002  if _ShouldPrintError(category, confidence, linenum):
1003  _cpplint_state.IncrementErrorCount(category)
1004  if _cpplint_state.output_format == 'vs7':
1005  sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
1006  filename, linenum, message, category, confidence))
1007  elif _cpplint_state.output_format == 'eclipse':
1008  sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
1009  filename, linenum, message, category, confidence))
1010  else:
1011  sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
1012  filename, linenum, message, category, confidence))
1013 
1014 
1015 # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
1016 _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
1017  r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
1018 # Matches strings. Escape codes should already be removed by ESCAPES.
1019 _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
1020 # Matches characters. Escape codes should already be removed by ESCAPES.
1021 _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
1022 # Matches multi-line C++ comments.
1023 # This RE is a little bit more complicated than one might expect, because we
1024 # have to take care of space removals tools so we can handle comments inside
1025 # statements better.
1026 # The current rule is: We only clear spaces from both sides when we're at the
1027 # end of the line. Otherwise, we try to remove spaces from the right side,
1028 # if this doesn't work we try on left side but only if there's a non-character
1029 # on the right.
1030 _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
1031  r"""(\s*/\*.*\*/\s*$|
1032  /\*.*\*/\s+|
1033  \s+/\*.*\*/(?=\W)|
1034  /\*.*\*/)""", re.VERBOSE)
1035 
1036 
1037 def IsCppString(line):
1038  """Does line terminate so, that the next symbol is in string constant.
1039 
1040  This function does not consider single-line nor multi-line comments.
1041 
1042  Args:
1043  line: is a partial line of code starting from the 0..n.
1044 
1045  Returns:
1046  True, if next character appended to 'line' is inside a
1047  string constant.
1048  """
1049 
1050  line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
1051  return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
1052 
1053 
1054 def CleanseRawStrings(raw_lines):
1055  """Removes C++11 raw strings from lines.
1056 
1057  Before:
1058  static const char kData[] = R"(
1059  multi-line string
1060  )";
1061 
1062  After:
1063  static const char kData[] = ""
1064  (replaced by blank line)
1065  "";
1066 
1067  Args:
1068  raw_lines: list of raw lines.
1069 
1070  Returns:
1071  list of lines with C++11 raw strings replaced by empty strings.
1072  """
1073 
1074  delimiter = None
1075  lines_without_raw_strings = []
1076  for line in raw_lines:
1077  if delimiter:
1078  # Inside a raw string, look for the end
1079  end = line.find(delimiter)
1080  if end >= 0:
1081  # Found the end of the string, match leading space for this
1082  # line and resume copying the original lines, and also insert
1083  # a "" on the last line.
1084  leading_space = Match(r'^(\s*)\S', line)
1085  line = leading_space.group(1) + '""' + line[end + len(delimiter):]
1086  delimiter = None
1087  else:
1088  # Haven't found the end yet, append a blank line.
1089  line = ''
1090 
1091  else:
1092  # Look for beginning of a raw string.
1093  # See 2.14.15 [lex.string] for syntax.
1094  matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
1095  if matched:
1096  delimiter = ')' + matched.group(2) + '"'
1097 
1098  end = matched.group(3).find(delimiter)
1099  if end >= 0:
1100  # Raw string ended on same line
1101  line = (matched.group(1) + '""' +
1102  matched.group(3)[end + len(delimiter):])
1103  delimiter = None
1104  else:
1105  # Start of a multi-line raw string
1106  line = matched.group(1) + '""'
1107 
1108  lines_without_raw_strings.append(line)
1109 
1110  # TODO(unknown): if delimiter is not None here, we might want to
1111  # emit a warning for unterminated string.
1112  return lines_without_raw_strings
1113 
1114 
1116  """Find the beginning marker for a multiline comment."""
1117  while lineix < len(lines):
1118  if lines[lineix].strip().startswith('/*'):
1119  # Only return this marker if the comment goes beyond this line
1120  if lines[lineix].strip().find('*/', 2) < 0:
1121  return lineix
1122  lineix += 1
1123  return len(lines)
1124 
1125 
1126 def FindNextMultiLineCommentEnd(lines, lineix):
1127  """We are inside a comment, find the end marker."""
1128  while lineix < len(lines):
1129  if lines[lineix].strip().endswith('*/'):
1130  return lineix
1131  lineix += 1
1132  return len(lines)
1133 
1134 
1135 def RemoveMultiLineCommentsFromRange(lines, begin, end):
1136  """Clears a range of lines for multi-line comments."""
1137  # Having // dummy comments makes the lines non-empty, so we will not get
1138  # unnecessary blank line warnings later in the code.
1139  for i in range(begin, end):
1140  lines[i] = '// dummy'
1141 
1142 
1143 def RemoveMultiLineComments(filename, lines, error):
1144  """Removes multiline (c-style) comments from lines."""
1145  lineix = 0
1146  while lineix < len(lines):
1147  lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
1148  if lineix_begin >= len(lines):
1149  return
1150  lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
1151  if lineix_end >= len(lines):
1152  error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
1153  'Could not find end of multi-line comment')
1154  return
1155  RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
1156  lineix = lineix_end + 1
1157 
1158 
1160  """Removes //-comments and single-line C-style /* */ comments.
1161 
1162  Args:
1163  line: A line of C++ source.
1164 
1165  Returns:
1166  The line with single-line comments removed.
1167  """
1168  commentpos = line.find('//')
1169  if commentpos != -1 and not IsCppString(line[:commentpos]):
1170  line = line[:commentpos].rstrip()
1171  # get rid of /* ... */
1172  return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
1173 
1174 
1175 class CleansedLines(object):
1176  """Holds 3 copies of all lines with different preprocessing applied to them.
1177 
1178  1) elided member contains lines without strings and comments,
1179  2) lines member contains lines without comments, and
1180  3) raw_lines member contains all the lines without processing.
1181  All these three members are of <type 'list'>, and of the same length.
1182  """
1183 
1184  def __init__(self, lines):
1185  self.elided = []
1186  self.lines = []
1187  self.raw_lines = lines
1188  self.num_lines = len(lines)
1190  for linenum in range(len(self.lines_without_raw_strings)):
1191  self.lines.append(CleanseComments(
1192  self.lines_without_raw_strings[linenum]))
1193  elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
1194  self.elided.append(CleanseComments(elided))
1195 
1196  def NumLines(self):
1197  """Returns the number of lines represented."""
1198  return self.num_lines
1199 
1200  @staticmethod
1201  def _CollapseStrings(elided):
1202  """Collapses strings and chars on a line to simple "" or '' blocks.
1203 
1204  We nix strings first so we're not fooled by text like '"http://"'
1205 
1206  Args:
1207  elided: The line being processed.
1208 
1209  Returns:
1210  The line with collapsed strings.
1211  """
1212  if not _RE_PATTERN_INCLUDE.match(elided):
1213  # Remove escaped characters first to make quote/single quote collapsing
1214  # basic. Things that look like escaped characters shouldn't occur
1215  # outside of strings and chars.
1216  elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
1217  elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
1218  elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
1219  return elided
1220 
1221 
1222 def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
1223  """Find the position just after the matching endchar.
1224 
1225  Args:
1226  line: a CleansedLines line.
1227  startpos: start searching at this position.
1228  depth: nesting level at startpos.
1229  startchar: expression opening character.
1230  endchar: expression closing character.
1231 
1232  Returns:
1233  On finding matching endchar: (index just after matching endchar, 0)
1234  Otherwise: (-1, new depth at end of this line)
1235  """
1236  for i in xrange(startpos, len(line)):
1237  if line[i] == startchar:
1238  depth += 1
1239  elif line[i] == endchar:
1240  depth -= 1
1241  if depth == 0:
1242  return (i + 1, 0)
1243  return (-1, depth)
1244 
1245 
1246 def CloseExpression(clean_lines, linenum, pos):
1247  """If input points to ( or { or [ or <, finds the position that closes it.
1248 
1249  If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
1250  linenum/pos that correspond to the closing of the expression.
1251 
1252  Args:
1253  clean_lines: A CleansedLines instance containing the file.
1254  linenum: The number of the line to check.
1255  pos: A position on the line.
1256 
1257  Returns:
1258  A tuple (line, linenum, pos) pointer *past* the closing brace, or
1259  (line, len(lines), -1) if we never find a close. Note we ignore
1260  strings and comments when matching; and the line we return is the
1261  'cleansed' line at linenum.
1262  """
1263 
1264  line = clean_lines.elided[linenum]
1265  startchar = line[pos]
1266  if startchar not in '({[<':
1267  return (line, clean_lines.NumLines(), -1)
1268  if startchar == '(': endchar = ')'
1269  if startchar == '[': endchar = ']'
1270  if startchar == '{': endchar = '}'
1271  if startchar == '<': endchar = '>'
1272 
1273  # Check first line
1274  (end_pos, num_open) = FindEndOfExpressionInLine(
1275  line, pos, 0, startchar, endchar)
1276  if end_pos > -1:
1277  return (line, linenum, end_pos)
1278 
1279  # Continue scanning forward
1280  while linenum < clean_lines.NumLines() - 1:
1281  linenum += 1
1282  line = clean_lines.elided[linenum]
1283  (end_pos, num_open) = FindEndOfExpressionInLine(
1284  line, 0, num_open, startchar, endchar)
1285  if end_pos > -1:
1286  return (line, linenum, end_pos)
1287 
1288  # Did not find endchar before end of file, give up
1289  return (line, clean_lines.NumLines(), -1)
1290 
1291 
1292 def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
1293  """Find position at the matching startchar.
1294 
1295  This is almost the reverse of FindEndOfExpressionInLine, but note
1296  that the input position and returned position differs by 1.
1297 
1298  Args:
1299  line: a CleansedLines line.
1300  endpos: start searching at this position.
1301  depth: nesting level at endpos.
1302  startchar: expression opening character.
1303  endchar: expression closing character.
1304 
1305  Returns:
1306  On finding matching startchar: (index at matching startchar, 0)
1307  Otherwise: (-1, new depth at beginning of this line)
1308  """
1309  for i in xrange(endpos, -1, -1):
1310  if line[i] == endchar:
1311  depth += 1
1312  elif line[i] == startchar:
1313  depth -= 1
1314  if depth == 0:
1315  return (i, 0)
1316  return (-1, depth)
1317 
1318 
1319 def ReverseCloseExpression(clean_lines, linenum, pos):
1320  """If input points to ) or } or ] or >, finds the position that opens it.
1321 
1322  If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
1323  linenum/pos that correspond to the opening of the expression.
1324 
1325  Args:
1326  clean_lines: A CleansedLines instance containing the file.
1327  linenum: The number of the line to check.
1328  pos: A position on the line.
1329 
1330  Returns:
1331  A tuple (line, linenum, pos) pointer *at* the opening brace, or
1332  (line, 0, -1) if we never find the matching opening brace. Note
1333  we ignore strings and comments when matching; and the line we
1334  return is the 'cleansed' line at linenum.
1335  """
1336  line = clean_lines.elided[linenum]
1337  endchar = line[pos]
1338  if endchar not in ')}]>':
1339  return (line, 0, -1)
1340  if endchar == ')': startchar = '('
1341  if endchar == ']': startchar = '['
1342  if endchar == '}': startchar = '{'
1343  if endchar == '>': startchar = '<'
1344 
1345  # Check last line
1346  (start_pos, num_open) = FindStartOfExpressionInLine(
1347  line, pos, 0, startchar, endchar)
1348  if start_pos > -1:
1349  return (line, linenum, start_pos)
1350 
1351  # Continue scanning backward
1352  while linenum > 0:
1353  linenum -= 1
1354  line = clean_lines.elided[linenum]
1355  (start_pos, num_open) = FindStartOfExpressionInLine(
1356  line, len(line) - 1, num_open, startchar, endchar)
1357  if start_pos > -1:
1358  return (line, linenum, start_pos)
1359 
1360  # Did not find startchar before beginning of file, give up
1361  return (line, 0, -1)
1362 
1363 
1364 def CheckForCopyright(filename, lines, error):
1365  """Logs an error if no Copyright message appears at the top of the file."""
1366 
1367  # We'll say it should occur by line 10. Don't forget there's a
1368  # dummy line at the front.
1369  for line in xrange(1, min(len(lines), 11)):
1370  if re.search(r'Copyright', lines[line], re.I): break
1371  else: # means no copyright line was found
1372  error(filename, 0, 'legal/copyright', 5,
1373  'No copyright message found. '
1374  'You should have a line: "Copyright [year] <Copyright Owner>"')
1375 
1376 
1378  """Returns the CPP variable that should be used as a header guard.
1379 
1380  Args:
1381  filename: The name of a C++ header file.
1382 
1383  Returns:
1384  The CPP variable that should be used as a header guard in the
1385  named file.
1386 
1387  """
1388 
1389  # Restores original filename in case that cpplint is invoked from Emacs's
1390  # flymake.
1391  filename = re.sub(r'_flymake\.h$', '.h', filename)
1392  filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
1393 
1394  fileinfo = FileInfo(filename)
1395  file_path_from_root = fileinfo.RepositoryName()
1396  if _root:
1397  file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
1398  return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
1399 
1400 
1401 def CheckForHeaderGuard(filename, lines, error):
1402  """Checks that the file contains a header guard.
1403 
1404  Logs an error if no #ifndef header guard is present. For other
1405  headers, checks that the full pathname is used.
1406 
1407  Args:
1408  filename: The name of the C++ header file.
1409  lines: An array of strings, each representing a line of the file.
1410  error: The function to call with any errors found.
1411  """
1412 
1413  cppvar = GetHeaderGuardCPPVariable(filename)
1414 
1415  ifndef = None
1416  ifndef_linenum = 0
1417  define = None
1418  endif = None
1419  endif_linenum = 0
1420  for linenum, line in enumerate(lines):
1421  linesplit = line.split()
1422  if len(linesplit) >= 2:
1423  # find the first occurrence of #ifndef and #define, save arg
1424  if not ifndef and linesplit[0] == '#ifndef':
1425  # set ifndef to the header guard presented on the #ifndef line.
1426  ifndef = linesplit[1]
1427  ifndef_linenum = linenum
1428  if not define and linesplit[0] == '#define':
1429  define = linesplit[1]
1430  # find the last occurrence of #endif, save entire line
1431  if line.startswith('#endif'):
1432  endif = line
1433  endif_linenum = linenum
1434 
1435  if not ifndef:
1436  error(filename, 0, 'build/header_guard', 5,
1437  'No #ifndef header guard found, suggested CPP variable is: %s' %
1438  cppvar)
1439  return
1440 
1441  if not define:
1442  error(filename, 0, 'build/header_guard', 5,
1443  'No #define header guard found, suggested CPP variable is: %s' %
1444  cppvar)
1445  return
1446 
1447  # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
1448  # for backward compatibility.
1449  if ifndef != cppvar:
1450  error_level = 0
1451  if ifndef != cppvar + '_':
1452  error_level = 5
1453 
1454  ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
1455  error)
1456  error(filename, ifndef_linenum, 'build/header_guard', error_level,
1457  '#ifndef header guard has wrong style, please use: %s' % cppvar)
1458 
1459  if define != ifndef:
1460  error(filename, 0, 'build/header_guard', 5,
1461  '#ifndef and #define don\'t match, suggested CPP variable is: %s' %
1462  cppvar)
1463  return
1464 
1465  if endif != ('#endif // %s' % cppvar):
1466  error_level = 0
1467  if endif != ('#endif // %s' % (cppvar + '_')):
1468  error_level = 5
1469 
1470  ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
1471  error)
1472  error(filename, endif_linenum, 'build/header_guard', error_level,
1473  '#endif line should be "#endif // %s"' % cppvar)
1474 
1475 
1476 def CheckForBadCharacters(filename, lines, error):
1477  """Logs an error for each line containing bad characters.
1478 
1479  Two kinds of bad characters:
1480 
1481  1. Unicode replacement characters: These indicate that either the file
1482  contained invalid UTF-8 (likely) or Unicode replacement characters (which
1483  it shouldn't). Note that it's possible for this to throw off line
1484  numbering if the invalid UTF-8 occurred adjacent to a newline.
1485 
1486  2. NUL bytes. These are problematic for some tools.
1487 
1488  Args:
1489  filename: The name of the current file.
1490  lines: An array of strings, each representing a line of the file.
1491  error: The function to call with any errors found.
1492  """
1493  for linenum, line in enumerate(lines):
1494  if u'\ufffd' in line:
1495  error(filename, linenum, 'readability/utf8', 5,
1496  'Line contains invalid UTF-8 (or Unicode replacement character).')
1497  if '\0' in line:
1498  error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
1499 
1500 
1501 def CheckForNewlineAtEOF(filename, lines, error):
1502  """Logs an error if there is no newline char at the end of the file.
1503 
1504  Args:
1505  filename: The name of the current file.
1506  lines: An array of strings, each representing a line of the file.
1507  error: The function to call with any errors found.
1508  """
1509 
1510  # The array lines() was created by adding two newlines to the
1511  # original file (go figure), then splitting on \n.
1512  # To verify that the file ends in \n, we just have to make sure the
1513  # last-but-two element of lines() exists and is empty.
1514  if len(lines) < 3 or lines[-2]:
1515  error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
1516  'Could not find a newline character at the end of the file.')
1517 
1518 
1519 def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
1520  """Logs an error if we see /* ... */ or "..." that extend past one line.
1521 
1522  /* ... */ comments are legit inside macros, for one line.
1523  Otherwise, we prefer // comments, so it's ok to warn about the
1524  other. Likewise, it's ok for strings to extend across multiple
1525  lines, as long as a line continuation character (backslash)
1526  terminates each line. Although not currently prohibited by the C++
1527  style guide, it's ugly and unnecessary. We don't do well with either
1528  in this lint program, so we warn about both.
1529 
1530  Args:
1531  filename: The name of the current file.
1532  clean_lines: A CleansedLines instance containing the file.
1533  linenum: The number of the line to check.
1534  error: The function to call with any errors found.
1535  """
1536  line = clean_lines.elided[linenum]
1537 
1538  # Remove all \\ (escaped backslashes) from the line. They are OK, and the
1539  # second (escaped) slash may trigger later \" detection erroneously.
1540  line = line.replace('\\\\', '')
1541 
1542  if line.count('/*') > line.count('*/'):
1543  error(filename, linenum, 'readability/multiline_comment', 5,
1544  'Complex multi-line /*...*/-style comment found. '
1545  'Lint may give bogus warnings. '
1546  'Consider replacing these with //-style comments, '
1547  'with #if 0...#endif, '
1548  'or with more clearly structured multi-line comments.')
1549 
1550  if (line.count('"') - line.count('\\"')) % 2:
1551  error(filename, linenum, 'readability/multiline_string', 5,
1552  'Multi-line string ("...") found. This lint script doesn\'t '
1553  'do well with such strings, and may give bogus warnings. '
1554  'Use C++11 raw strings or concatenation instead.')
1555 
1556 
1557 threading_list = (
1558  ('asctime(', 'asctime_r('),
1559  ('ctime(', 'ctime_r('),
1560  ('getgrgid(', 'getgrgid_r('),
1561  ('getgrnam(', 'getgrnam_r('),
1562  ('getlogin(', 'getlogin_r('),
1563  ('getpwnam(', 'getpwnam_r('),
1564  ('getpwuid(', 'getpwuid_r('),
1565  ('gmtime(', 'gmtime_r('),
1566  ('localtime(', 'localtime_r('),
1567  ('rand(', 'rand_r('),
1568  ('strtok(', 'strtok_r('),
1569  ('ttyname(', 'ttyname_r('),
1570  )
1571 
1572 
1573 def CheckPosixThreading(filename, clean_lines, linenum, error):
1574  """Checks for calls to thread-unsafe functions.
1575 
1576  Much code has been originally written without consideration of
1577  multi-threading. Also, engineers are relying on their old experience;
1578  they have learned posix before threading extensions were added. These
1579  tests guide the engineers to use thread-safe functions (when using
1580  posix directly).
1581 
1582  Args:
1583  filename: The name of the current file.
1584  clean_lines: A CleansedLines instance containing the file.
1585  linenum: The number of the line to check.
1586  error: The function to call with any errors found.
1587  """
1588  line = clean_lines.elided[linenum]
1589  for single_thread_function, multithread_safe_function in threading_list:
1590  ix = line.find(single_thread_function)
1591  # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
1592  if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
1593  line[ix - 1] not in ('_', '.', '>'))):
1594  error(filename, linenum, 'runtime/threadsafe_fn', 2,
1595  'Consider using ' + multithread_safe_function +
1596  '...) instead of ' + single_thread_function +
1597  '...) for improved thread safety.')
1598 
1599 
1600 def CheckVlogArguments(filename, clean_lines, linenum, error):
1601  """Checks that VLOG() is only used for defining a logging level.
1602 
1603  For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
1604  VLOG(FATAL) are not.
1605 
1606  Args:
1607  filename: The name of the current file.
1608  clean_lines: A CleansedLines instance containing the file.
1609  linenum: The number of the line to check.
1610  error: The function to call with any errors found.
1611  """
1612  line = clean_lines.elided[linenum]
1613  if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
1614  error(filename, linenum, 'runtime/vlog', 5,
1615  'VLOG() should be used with numeric verbosity level. '
1616  'Use LOG() if you want symbolic severity levels.')
1617 
1618 
1619 # Matches invalid increment: *count++, which moves pointer instead of
1620 # incrementing a value.
1621 _RE_PATTERN_INVALID_INCREMENT = re.compile(
1622  r'^\s*\*\w+(\+\+|--);')
1623 
1624 
1625 def CheckInvalidIncrement(filename, clean_lines, linenum, error):
1626  """Checks for invalid increment *count++.
1627 
1628  For example following function:
1629  void increment_counter(int* count) {
1630  *count++;
1631  }
1632  is invalid, because it effectively does count++, moving pointer, and should
1633  be replaced with ++*count, (*count)++ or *count += 1.
1634 
1635  Args:
1636  filename: The name of the current file.
1637  clean_lines: A CleansedLines instance containing the file.
1638  linenum: The number of the line to check.
1639  error: The function to call with any errors found.
1640  """
1641  line = clean_lines.elided[linenum]
1642  if _RE_PATTERN_INVALID_INCREMENT.match(line):
1643  error(filename, linenum, 'runtime/invalid_increment', 5,
1644  'Changing pointer instead of value (or unused value of operator*).')
1645 
1646 
1647 class _BlockInfo(object):
1648  """Stores information about a generic block of code."""
1649 
1650  def __init__(self, seen_open_brace):
1651  self.seen_open_brace = seen_open_brace
1653  self.inline_asm = _NO_ASM
1654 
1655  def CheckBegin(self, filename, clean_lines, linenum, error):
1656  """Run checks that applies to text up to the opening brace.
1657 
1658  This is mostly for checking the text after the class identifier
1659  and the "{", usually where the base class is specified. For other
1660  blocks, there isn't much to check, so we always pass.
1661 
1662  Args:
1663  filename: The name of the current file.
1664  clean_lines: A CleansedLines instance containing the file.
1665  linenum: The number of the line to check.
1666  error: The function to call with any errors found.
1667  """
1668  pass
1669 
1670  def CheckEnd(self, filename, clean_lines, linenum, error):
1671  """Run checks that applies to text after the closing brace.
1672 
1673  This is mostly used for checking end of namespace comments.
1674 
1675  Args:
1676  filename: The name of the current file.
1677  clean_lines: A CleansedLines instance containing the file.
1678  linenum: The number of the line to check.
1679  error: The function to call with any errors found.
1680  """
1681  pass
1682 
1683 
1684 class _ClassInfo(_BlockInfo):
1685  """Stores information about a class."""
1686 
1687  def __init__(self, name, class_or_struct, clean_lines, linenum):
1688  _BlockInfo.__init__(self, False)
1689  self.name = name
1690  self.starting_linenum = linenum
1691  self.is_derived = False
1692  if class_or_struct == 'struct':
1693  self.access = 'public'
1694  self.is_struct = True
1695  else:
1696  self.access = 'private'
1697  self.is_struct = False
1698 
1699  # Remember initial indentation level for this class. Using raw_lines here
1700  # instead of elided to account for leading comments.
1701  initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum])
1702  if initial_indent:
1703  self.class_indent = len(initial_indent.group(1))
1704  else:
1705  self.class_indent = 0
1706 
1707  # Try to find the end of the class. This will be confused by things like:
1708  # class A {
1709  # } *x = { ...
1710  #
1711  # But it's still good enough for CheckSectionSpacing.
1712  self.last_line = 0
1713  depth = 0
1714  for i in range(linenum, clean_lines.NumLines()):
1715  line = clean_lines.elided[i]
1716  depth += line.count('{') - line.count('}')
1717  if not depth:
1718  self.last_line = i
1719  break
1720 
1721  def CheckBegin(self, filename, clean_lines, linenum, error):
1722  # Look for a bare ':'
1723  if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
1724  self.is_derived = True
1725 
1726  def CheckEnd(self, filename, clean_lines, linenum, error):
1727  # Check that closing brace is aligned with beginning of the class.
1728  # Only do this if the closing brace is indented by only whitespaces.
1729  # This means we will not check single-line class definitions.
1730  indent = Match(r'^( *)\}', clean_lines.elided[linenum])
1731  if indent and len(indent.group(1)) != self.class_indent:
1732  if self.is_struct:
1733  parent = 'struct ' + self.name
1734  else:
1735  parent = 'class ' + self.name
1736  error(filename, linenum, 'whitespace/indent', 3,
1737  'Closing brace should be aligned with beginning of %s' % parent)
1738 
1739 
1741  """Stores information about a namespace."""
1742 
1743  def __init__(self, name, linenum):
1744  _BlockInfo.__init__(self, False)
1745  self.name = name or ''
1746  self.starting_linenum = linenum
1747 
1748  def CheckEnd(self, filename, clean_lines, linenum, error):
1749  """Check end of namespace comments."""
1750  line = clean_lines.raw_lines[linenum]
1751 
1752  # Check how many lines is enclosed in this namespace. Don't issue
1753  # warning for missing namespace comments if there aren't enough
1754  # lines. However, do apply checks if there is already an end of
1755  # namespace comment and it's incorrect.
1756  #
1757  # TODO(unknown): We always want to check end of namespace comments
1758  # if a namespace is large, but sometimes we also want to apply the
1759  # check if a short namespace contained nontrivial things (something
1760  # other than forward declarations). There is currently no logic on
1761  # deciding what these nontrivial things are, so this check is
1762  # triggered by namespace size only, which works most of the time.
1763  if (linenum - self.starting_linenum < 10
1764  and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
1765  return
1766 
1767  # Look for matching comment at end of namespace.
1768  #
1769  # Note that we accept C style "/* */" comments for terminating
1770  # namespaces, so that code that terminate namespaces inside
1771  # preprocessor macros can be cpplint clean.
1772  #
1773  # We also accept stuff like "// end of namespace <name>." with the
1774  # period at the end.
1775  #
1776  # Besides these, we don't accept anything else, otherwise we might
1777  # get false negatives when existing comment is a substring of the
1778  # expected namespace.
1779  if self.name:
1780  # Named namespace
1781  if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
1782  r'[\*/\.\\\s]*$'),
1783  line):
1784  error(filename, linenum, 'readability/namespace', 5,
1785  'Namespace should be terminated with "// namespace %s"' %
1786  self.name)
1787  else:
1788  # Anonymous namespace
1789  if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
1790  error(filename, linenum, 'readability/namespace', 5,
1791  'Namespace should be terminated with "// namespace"')
1792 
1793 
1794 class _PreprocessorInfo(object):
1795  """Stores checkpoints of nesting stacks when #if/#else is seen."""
1796 
1797  def __init__(self, stack_before_if):
1798  # The entire nesting stack before #if
1799  self.stack_before_if = stack_before_if
1800 
1801  # The entire nesting stack up to #else
1803 
1804  # Whether we have already seen #else or #elif
1805  self.seen_else = False
1806 
1807 
1808 class _NestingState(object):
1809  """Holds states related to parsing braces."""
1810 
1811  def __init__(self):
1812  # Stack for tracking all braces. An object is pushed whenever we
1813  # see a "{", and popped when we see a "}". Only 3 types of
1814  # objects are possible:
1815  # - _ClassInfo: a class or struct.
1816  # - _NamespaceInfo: a namespace.
1817  # - _BlockInfo: some other type of block.
1818  self.stack = []
1819 
1820  # Stack of _PreprocessorInfo objects.
1821  self.pp_stack = []
1822 
1823  def SeenOpenBrace(self):
1824  """Check if we have seen the opening brace for the innermost block.
1825 
1826  Returns:
1827  True if we have seen the opening brace, False if the innermost
1828  block is still expecting an opening brace.
1829  """
1830  return (not self.stack) or self.stack[-1].seen_open_brace
1831 
1832  def InNamespaceBody(self):
1833  """Check if we are currently one level inside a namespace body.
1834 
1835  Returns:
1836  True if top of the stack is a namespace block, False otherwise.
1837  """
1838  return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
1839 
1840  def UpdatePreprocessor(self, line):
1841  """Update preprocessor stack.
1842 
1843  We need to handle preprocessors due to classes like this:
1844  #ifdef SWIG
1845  struct ResultDetailsPageElementExtensionPoint {
1846  #else
1847  struct ResultDetailsPageElementExtensionPoint : public Extension {
1848  #endif
1849 
1850  We make the following assumptions (good enough for most files):
1851  - Preprocessor condition evaluates to true from #if up to first
1852  #else/#elif/#endif.
1853 
1854  - Preprocessor condition evaluates to false from #else/#elif up
1855  to #endif. We still perform lint checks on these lines, but
1856  these do not affect nesting stack.
1857 
1858  Args:
1859  line: current line to check.
1860  """
1861  if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
1862  # Beginning of #if block, save the nesting stack here. The saved
1863  # stack will allow us to restore the parsing state in the #else case.
1864  self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
1865  elif Match(r'^\s*#\s*(else|elif)\b', line):
1866  # Beginning of #else block
1867  if self.pp_stack:
1868  if not self.pp_stack[-1].seen_else:
1869  # This is the first #else or #elif block. Remember the
1870  # whole nesting stack up to this point. This is what we
1871  # keep after the #endif.
1872  self.pp_stack[-1].seen_else = True
1873  self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
1874 
1875  # Restore the stack to how it was before the #if
1876  self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
1877  else:
1878  # TODO(unknown): unexpected #else, issue warning?
1879  pass
1880  elif Match(r'^\s*#\s*endif\b', line):
1881  # End of #if or #else blocks.
1882  if self.pp_stack:
1883  # If we saw an #else, we will need to restore the nesting
1884  # stack to its former state before the #else, otherwise we
1885  # will just continue from where we left off.
1886  if self.pp_stack[-1].seen_else:
1887  # Here we can just use a shallow copy since we are the last
1888  # reference to it.
1889  self.stack = self.pp_stack[-1].stack_before_else
1890  # Drop the corresponding #if
1891  self.pp_stack.pop()
1892  else:
1893  # TODO(unknown): unexpected #endif, issue warning?
1894  pass
1895 
1896  def Update(self, filename, clean_lines, linenum, error):
1897  """Update nesting state with current line.
1898 
1899  Args:
1900  filename: The name of the current file.
1901  clean_lines: A CleansedLines instance containing the file.
1902  linenum: The number of the line to check.
1903  error: The function to call with any errors found.
1904  """
1905  line = clean_lines.elided[linenum]
1906 
1907  # Update pp_stack first
1908  self.UpdatePreprocessor(line)
1909 
1910  # Count parentheses. This is to avoid adding struct arguments to
1911  # the nesting stack.
1912  if self.stack:
1913  inner_block = self.stack[-1]
1914  depth_change = line.count('(') - line.count(')')
1915  inner_block.open_parentheses += depth_change
1916 
1917  # Also check if we are starting or ending an inline assembly block.
1918  if inner_block.inline_asm in (_NO_ASM, _END_ASM):
1919  if (depth_change != 0 and
1920  inner_block.open_parentheses == 1 and
1921  _MATCH_ASM.match(line)):
1922  # Enter assembly block
1923  inner_block.inline_asm = _INSIDE_ASM
1924  else:
1925  # Not entering assembly block. If previous line was _END_ASM,
1926  # we will now shift to _NO_ASM state.
1927  inner_block.inline_asm = _NO_ASM
1928  elif (inner_block.inline_asm == _INSIDE_ASM and
1929  inner_block.open_parentheses == 0):
1930  # Exit assembly block
1931  inner_block.inline_asm = _END_ASM
1932 
1933  # Consume namespace declaration at the beginning of the line. Do
1934  # this in a loop so that we catch same line declarations like this:
1935  # namespace proto2 { namespace bridge { class MessageSet; } }
1936  while True:
1937  # Match start of namespace. The "\b\s*" below catches namespace
1938  # declarations even if it weren't followed by a whitespace, this
1939  # is so that we don't confuse our namespace checker. The
1940  # missing spaces will be flagged by CheckSpacing.
1941  namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
1942  if not namespace_decl_match:
1943  break
1944 
1945  new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
1946  self.stack.append(new_namespace)
1947 
1948  line = namespace_decl_match.group(2)
1949  if line.find('{') != -1:
1950  new_namespace.seen_open_brace = True
1951  line = line[line.find('{') + 1:]
1952 
1953  # Look for a class declaration in whatever is left of the line
1954  # after parsing namespaces. The regexp accounts for decorated classes
1955  # such as in:
1956  # class LOCKABLE API Object {
1957  # };
1958  #
1959  # Templates with class arguments may confuse the parser, for example:
1960  # template <class T
1961  # class Comparator = less<T>,
1962  # class Vector = vector<T> >
1963  # class HeapQueue {
1964  #
1965  # Because this parser has no nesting state about templates, by the
1966  # time it saw "class Comparator", it may think that it's a new class.
1967  # Nested templates have a similar problem:
1968  # template <
1969  # typename ExportedType,
1970  # typename TupleType,
1971  # template <typename, typename> class ImplTemplate>
1972  #
1973  # To avoid these cases, we ignore classes that are followed by '=' or '>'
1974  class_decl_match = Match(
1975  r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
1976  r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
1977  r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line)
1978  if (class_decl_match and
1979  (not self.stack or self.stack[-1].open_parentheses == 0)):
1980  self.stack.append(_ClassInfo(
1981  class_decl_match.group(4), class_decl_match.group(2),
1982  clean_lines, linenum))
1983  line = class_decl_match.group(5)
1984 
1985  # If we have not yet seen the opening brace for the innermost block,
1986  # run checks here.
1987  if not self.SeenOpenBrace():
1988  self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
1989 
1990  # Update access control if we are inside a class/struct
1991  if self.stack and isinstance(self.stack[-1], _ClassInfo):
1992  classinfo = self.stack[-1]
1993  access_match = Match(
1994  r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
1995  r':(?:[^:]|$)',
1996  line)
1997  if access_match:
1998  classinfo.access = access_match.group(2)
1999 
2000  # Check that access keywords are indented +1 space. Skip this
2001  # check if the keywords are not preceded by whitespaces.
2002  indent = access_match.group(1)
2003  if (len(indent) != classinfo.class_indent + 1 and
2004  Match(r'^\s*$', indent)):
2005  if classinfo.is_struct:
2006  parent = 'struct ' + classinfo.name
2007  else:
2008  parent = 'class ' + classinfo.name
2009  slots = ''
2010  if access_match.group(3):
2011  slots = access_match.group(3)
2012  error(filename, linenum, 'whitespace/indent', 3,
2013  '%s%s: should be indented +1 space inside %s' % (
2014  access_match.group(2), slots, parent))
2015 
2016  # Consume braces or semicolons from what's left of the line
2017  while True:
2018  # Match first brace, semicolon, or closed parenthesis.
2019  matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
2020  if not matched:
2021  break
2022 
2023  token = matched.group(1)
2024  if token == '{':
2025  # If namespace or class hasn't seen a opening brace yet, mark
2026  # namespace/class head as complete. Push a new block onto the
2027  # stack otherwise.
2028  if not self.SeenOpenBrace():
2029  self.stack[-1].seen_open_brace = True
2030  else:
2031  self.stack.append(_BlockInfo(True))
2032  if _MATCH_ASM.match(line):
2033  self.stack[-1].inline_asm = _BLOCK_ASM
2034  elif token == ';' or token == ')':
2035  # If we haven't seen an opening brace yet, but we already saw
2036  # a semicolon, this is probably a forward declaration. Pop
2037  # the stack for these.
2038  #
2039  # Similarly, if we haven't seen an opening brace yet, but we
2040  # already saw a closing parenthesis, then these are probably
2041  # function arguments with extra "class" or "struct" keywords.
2042  # Also pop these stack for these.
2043  if not self.SeenOpenBrace():
2044  self.stack.pop()
2045  else: # token == '}'
2046  # Perform end of block checks and pop the stack.
2047  if self.stack:
2048  self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
2049  self.stack.pop()
2050  line = matched.group(2)
2051 
2052  def InnermostClass(self):
2053  """Get class info on the top of the stack.
2054 
2055  Returns:
2056  A _ClassInfo object if we are inside a class, or None otherwise.
2057  """
2058  for i in range(len(self.stack), 0, -1):
2059  classinfo = self.stack[i - 1]
2060  if isinstance(classinfo, _ClassInfo):
2061  return classinfo
2062  return None
2063 
2064  def CheckCompletedBlocks(self, filename, error):
2065  """Checks that all classes and namespaces have been completely parsed.
2066 
2067  Call this when all lines in a file have been processed.
2068  Args:
2069  filename: The name of the current file.
2070  error: The function to call with any errors found.
2071  """
2072  # Note: This test can result in false positives if #ifdef constructs
2073  # get in the way of brace matching. See the testBuildClass test in
2074  # cpplint_unittest.py for an example of this.
2075  for obj in self.stack:
2076  if isinstance(obj, _ClassInfo):
2077  error(filename, obj.starting_linenum, 'build/class', 5,
2078  'Failed to find complete declaration of class %s' %
2079  obj.name)
2080  elif isinstance(obj, _NamespaceInfo):
2081  error(filename, obj.starting_linenum, 'build/namespaces', 5,
2082  'Failed to find complete declaration of namespace %s' %
2083  obj.name)
2084 
2085 
2086 def CheckForNonStandardConstructs(filename, clean_lines, linenum,
2087  nesting_state, error):
2088  r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
2089 
2090  Complain about several constructs which gcc-2 accepts, but which are
2091  not standard C++. Warning about these in lint is one way to ease the
2092  transition to new compilers.
2093  - put storage class first (e.g. "static const" instead of "const static").
2094  - "%lld" instead of %qd" in printf-type functions.
2095  - "%1$d" is non-standard in printf-type functions.
2096  - "\%" is an undefined character escape sequence.
2097  - text after #endif is not allowed.
2098  - invalid inner-style forward declaration.
2099  - >? and <? operators, and their >?= and <?= cousins.
2100 
2101  Additionally, check for constructor/destructor style violations and reference
2102  members, as it is very convenient to do so while checking for
2103  gcc-2 compliance.
2104 
2105  Args:
2106  filename: The name of the current file.
2107  clean_lines: A CleansedLines instance containing the file.
2108  linenum: The number of the line to check.
2109  nesting_state: A _NestingState instance which maintains information about
2110  the current stack of nested blocks being parsed.
2111  error: A callable to which errors are reported, which takes 4 arguments:
2112  filename, line number, error level, and message
2113  """
2114 
2115  # Remove comments from the line, but leave in strings for now.
2116  line = clean_lines.lines[linenum]
2117 
2118  if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
2119  error(filename, linenum, 'runtime/printf_format', 3,
2120  '%q in format strings is deprecated. Use %ll instead.')
2121 
2122  if Search(r'printf\s*\(.*".*%\d+\$', line):
2123  error(filename, linenum, 'runtime/printf_format', 2,
2124  '%N$ formats are unconventional. Try rewriting to avoid them.')
2125 
2126  # Remove escaped backslashes before looking for undefined escapes.
2127  line = line.replace('\\\\', '')
2128 
2129  if Search(r'("|\').*\\(%|\[|\(|{)', line):
2130  error(filename, linenum, 'build/printf_format', 3,
2131  '%, [, (, and { are undefined character escapes. Unescape them.')
2132 
2133  # For the rest, work with both comments and strings removed.
2134  line = clean_lines.elided[linenum]
2135 
2136  if Search(r'\b(const|volatile|void|char|short|int|long'
2137  r'|float|double|signed|unsigned'
2138  r'|schar|u?int8|u?int16|u?int32|u?int64)'
2139  r'\s+(register|static|extern|typedef)\b',
2140  line):
2141  error(filename, linenum, 'build/storage_class', 5,
2142  'Storage class (static, extern, typedef, etc) should be first.')
2143 
2144  if Match(r'\s*#\s*endif\s*[^/\s]+', line):
2145  error(filename, linenum, 'build/endif_comment', 5,
2146  'Uncommented text after #endif is non-standard. Use a comment.')
2147 
2148  if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
2149  error(filename, linenum, 'build/forward_decl', 5,
2150  'Inner-style forward declarations are invalid. Remove this line.')
2151 
2152  if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
2153  line):
2154  error(filename, linenum, 'build/deprecated', 3,
2155  '>? and <? (max and min) operators are non-standard and deprecated.')
2156 
2157  if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
2158  # TODO(unknown): Could it be expanded safely to arbitrary references,
2159  # without triggering too many false positives? The first
2160  # attempt triggered 5 warnings for mostly benign code in the regtest, hence
2161  # the restriction.
2162  # Here's the original regexp, for the reference:
2163  # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
2164  # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
2165  error(filename, linenum, 'runtime/member_string_references', 2,
2166  'const string& members are dangerous. It is much better to use '
2167  'alternatives, such as pointers or simple constants.')
2168 
2169  # Everything else in this function operates on class declarations.
2170  # Return early if the top of the nesting stack is not a class, or if
2171  # the class head is not completed yet.
2172  classinfo = nesting_state.InnermostClass()
2173  if not classinfo or not classinfo.seen_open_brace:
2174  return
2175 
2176  # The class may have been declared with namespace or classname qualifiers.
2177  # The constructor and destructor will not have those qualifiers.
2178  base_classname = classinfo.name.split('::')[-1]
2179 
2180  # Look for single-argument constructors that aren't marked explicit.
2181  # Technically a valid construct, but against style.
2182  args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
2183  % re.escape(base_classname),
2184  line)
2185  if (args and
2186  args.group(1) != 'void' and
2187  not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&'
2188  % re.escape(base_classname), args.group(1).strip())):
2189  error(filename, linenum, 'runtime/explicit', 5,
2190  'Single-argument constructors should be marked explicit.')
2191 
2192 
2193 def CheckSpacingForFunctionCall(filename, line, linenum, error):
2194  """Checks for the correctness of various spacing around function calls.
2195 
2196  Args:
2197  filename: The name of the current file.
2198  line: The text of the line to check.
2199  linenum: The number of the line to check.
2200  error: The function to call with any errors found.
2201  """
2202 
2203  # Since function calls often occur inside if/for/while/switch
2204  # expressions - which have their own, more liberal conventions - we
2205  # first see if we should be looking inside such an expression for a
2206  # function call, to which we can apply more strict standards.
2207  fncall = line # if there's no control flow construct, look at whole line
2208  for pattern in (r'\bif\s*\((.*)\)\s*{',
2209  r'\bfor\s*\((.*)\)\s*{',
2210  r'\bwhile\s*\((.*)\)\s*[{;]',
2211  r'\bswitch\s*\((.*)\)\s*{'):
2212  match = Search(pattern, line)
2213  if match:
2214  fncall = match.group(1) # look inside the parens for function calls
2215  break
2216 
2217  # Except in if/for/while/switch, there should never be space
2218  # immediately inside parens (eg "f( 3, 4 )"). We make an exception
2219  # for nested parens ( (a+b) + c ). Likewise, there should never be
2220  # a space before a ( when it's a function argument. I assume it's a
2221  # function argument when the char before the whitespace is legal in
2222  # a function name (alnum + _) and we're not starting a macro. Also ignore
2223  # pointers and references to arrays and functions coz they're too tricky:
2224  # we use a very simple way to recognize these:
2225  # " (something)(maybe-something)" or
2226  # " (something)(maybe-something," or
2227  # " (something)[something]"
2228  # Note that we assume the contents of [] to be short enough that
2229  # they'll never need to wrap.
2230  if ( # Ignore control structures.
2231  not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
2232  fncall) and
2233  # Ignore pointers/references to functions.
2234  not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
2235  # Ignore pointers/references to arrays.
2236  not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
2237  if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
2238  error(filename, linenum, 'whitespace/parens', 4,
2239  'Extra space after ( in function call')
2240  elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
2241  error(filename, linenum, 'whitespace/parens', 2,
2242  'Extra space after (')
2243  if (Search(r'\w\s+\(', fncall) and
2244  not Search(r'#\s*define|typedef', fncall) and
2245  not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
2246  error(filename, linenum, 'whitespace/parens', 4,
2247  'Extra space before ( in function call')
2248  # If the ) is followed only by a newline or a { + newline, assume it's
2249  # part of a control statement (if/while/etc), and don't complain
2250  if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
2251  # If the closing parenthesis is preceded by only whitespaces,
2252  # try to give a more descriptive error message.
2253  if Search(r'^\s+\)', fncall):
2254  error(filename, linenum, 'whitespace/parens', 2,
2255  'Closing ) should be moved to the previous line')
2256  else:
2257  error(filename, linenum, 'whitespace/parens', 2,
2258  'Extra space before )')
2259 
2260 
2261 def IsBlankLine(line):
2262  """Returns true if the given line is blank.
2263 
2264  We consider a line to be blank if the line is empty or consists of
2265  only white spaces.
2266 
2267  Args:
2268  line: A line of a string.
2269 
2270  Returns:
2271  True, if the given line is blank.
2272  """
2273  return not line or line.isspace()
2274 
2275 
2276 def CheckForFunctionLengths(filename, clean_lines, linenum,
2277  function_state, error):
2278  """Reports for long function bodies.
2279 
2280  For an overview why this is done, see:
2281  http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
2282 
2283  Uses a simplistic algorithm assuming other style guidelines
2284  (especially spacing) are followed.
2285  Only checks unindented functions, so class members are unchecked.
2286  Trivial bodies are unchecked, so constructors with huge initializer lists
2287  may be missed.
2288  Blank/comment lines are not counted so as to avoid encouraging the removal
2289  of vertical space and comments just to get through a lint check.
2290  NOLINT *on the last line of a function* disables this check.
2291 
2292  Args:
2293  filename: The name of the current file.
2294  clean_lines: A CleansedLines instance containing the file.
2295  linenum: The number of the line to check.
2296  function_state: Current function name and lines in body so far.
2297  error: The function to call with any errors found.
2298  """
2299  lines = clean_lines.lines
2300  line = lines[linenum]
2301  raw = clean_lines.raw_lines
2302  raw_line = raw[linenum]
2303  joined_line = ''
2304 
2305  starting_func = False
2306  regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
2307  match_result = Match(regexp, line)
2308  if match_result:
2309  # If the name is all caps and underscores, figure it's a macro and
2310  # ignore it, unless it's TEST or TEST_F.
2311  function_name = match_result.group(1).split()[-1]
2312  if function_name == 'TEST' or function_name == 'TEST_F' or (
2313  not Match(r'[A-Z_]+$', function_name)):
2314  starting_func = True
2315 
2316  if starting_func:
2317  body_found = False
2318  for start_linenum in xrange(linenum, clean_lines.NumLines()):
2319  start_line = lines[start_linenum]
2320  joined_line += ' ' + start_line.lstrip()
2321  if Search(r'(;|})', start_line): # Declarations and trivial functions
2322  body_found = True
2323  break # ... ignore
2324  elif Search(r'{', start_line):
2325  body_found = True
2326  function = Search(r'((\w|:)*)\(', line).group(1)
2327  if Match(r'TEST', function): # Handle TEST... macros
2328  parameter_regexp = Search(r'(\(.*\))', joined_line)
2329  if parameter_regexp: # Ignore bad syntax
2330  function += parameter_regexp.group(1)
2331  else:
2332  function += '()'
2333  function_state.Begin(function)
2334  break
2335  if not body_found:
2336  # No body for the function (or evidence of a non-function) was found.
2337  error(filename, linenum, 'readability/fn_size', 5,
2338  'Lint failed to find start of function body.')
2339  elif Match(r'^\}\s*$', line): # function end
2340  function_state.Check(error, filename, linenum)
2341  function_state.End()
2342  elif not Match(r'^\s*$', line):
2343  function_state.Count() # Count non-blank/non-comment lines.
2344 
2345 
2346 _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
2347 
2348 
2349 def CheckComment(comment, filename, linenum, error):
2350  """Checks for common mistakes in TODO comments.
2351 
2352  Args:
2353  comment: The text of the comment from the line in question.
2354  filename: The name of the current file.
2355  linenum: The number of the line to check.
2356  error: The function to call with any errors found.
2357  """
2358  match = _RE_PATTERN_TODO.match(comment)
2359  if match:
2360  # One whitespace is correct; zero whitespace is handled elsewhere.
2361  leading_whitespace = match.group(1)
2362  if len(leading_whitespace) > 1:
2363  error(filename, linenum, 'whitespace/todo', 2,
2364  'Too many spaces before TODO')
2365 
2366  username = match.group(2)
2367  if not username:
2368  error(filename, linenum, 'readability/todo', 2,
2369  'Missing username in TODO; it should look like '
2370  '"// TODO(my_username): Stuff."')
2371 
2372  middle_whitespace = match.group(3)
2373  # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
2374  if middle_whitespace != ' ' and middle_whitespace != '':
2375  error(filename, linenum, 'whitespace/todo', 2,
2376  'TODO(my_username) should be followed by a space')
2377 
2378 def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
2379  """Checks for improper use of DISALLOW* macros.
2380 
2381  Args:
2382  filename: The name of the current file.
2383  clean_lines: A CleansedLines instance containing the file.
2384  linenum: The number of the line to check.
2385  nesting_state: A _NestingState instance which maintains information about
2386  the current stack of nested blocks being parsed.
2387  error: The function to call with any errors found.
2388  """
2389  line = clean_lines.elided[linenum] # get rid of comments and strings
2390 
2391  matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
2392  r'DISALLOW_EVIL_CONSTRUCTORS|'
2393  r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
2394  if not matched:
2395  return
2396  if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
2397  if nesting_state.stack[-1].access != 'private':
2398  error(filename, linenum, 'readability/constructors', 3,
2399  '%s must be in the private: section' % matched.group(1))
2400 
2401  else:
2402  # Found DISALLOW* macro outside a class declaration, or perhaps it
2403  # was used inside a function when it should have been part of the
2404  # class declaration. We could issue a warning here, but it
2405  # probably resulted in a compiler error already.
2406  pass
2407 
2408 
2409 def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
2410  """Find the corresponding > to close a template.
2411 
2412  Args:
2413  clean_lines: A CleansedLines instance containing the file.
2414  linenum: Current line number.
2415  init_suffix: Remainder of the current line after the initial <.
2416 
2417  Returns:
2418  True if a matching bracket exists.
2419  """
2420  line = init_suffix
2421  nesting_stack = ['<']
2422  while True:
2423  # Find the next operator that can tell us whether < is used as an
2424  # opening bracket or as a less-than operator. We only want to
2425  # warn on the latter case.
2426  #
2427  # We could also check all other operators and terminate the search
2428  # early, e.g. if we got something like this "a<b+c", the "<" is
2429  # most likely a less-than operator, but then we will get false
2430  # positives for default arguments and other template expressions.
2431  match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
2432  if match:
2433  # Found an operator, update nesting stack
2434  operator = match.group(1)
2435  line = match.group(2)
2436 
2437  if nesting_stack[-1] == '<':
2438  # Expecting closing angle bracket
2439  if operator in ('<', '(', '['):
2440  nesting_stack.append(operator)
2441  elif operator == '>':
2442  nesting_stack.pop()
2443  if not nesting_stack:
2444  # Found matching angle bracket
2445  return True
2446  elif operator == ',':
2447  # Got a comma after a bracket, this is most likely a template
2448  # argument. We have not seen a closing angle bracket yet, but
2449  # it's probably a few lines later if we look for it, so just
2450  # return early here.
2451  return True
2452  else:
2453  # Got some other operator.
2454  return False
2455 
2456  else:
2457  # Expecting closing parenthesis or closing bracket
2458  if operator in ('<', '(', '['):
2459  nesting_stack.append(operator)
2460  elif operator in (')', ']'):
2461  # We don't bother checking for matching () or []. If we got
2462  # something like (] or [), it would have been a syntax error.
2463  nesting_stack.pop()
2464 
2465  else:
2466  # Scan the next line
2467  linenum += 1
2468  if linenum >= len(clean_lines.elided):
2469  break
2470  line = clean_lines.elided[linenum]
2471 
2472  # Exhausted all remaining lines and still no matching angle bracket.
2473  # Most likely the input was incomplete, otherwise we should have
2474  # seen a semicolon and returned early.
2475  return True
2476 
2477 
2478 def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
2479  """Find the corresponding < that started a template.
2480 
2481  Args:
2482  clean_lines: A CleansedLines instance containing the file.
2483  linenum: Current line number.
2484  init_prefix: Part of the current line before the initial >.
2485 
2486  Returns:
2487  True if a matching bracket exists.
2488  """
2489  line = init_prefix
2490  nesting_stack = ['>']
2491  while True:
2492  # Find the previous operator
2493  match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
2494  if match:
2495  # Found an operator, update nesting stack
2496  operator = match.group(2)
2497  line = match.group(1)
2498 
2499  if nesting_stack[-1] == '>':
2500  # Expecting opening angle bracket
2501  if operator in ('>', ')', ']'):
2502  nesting_stack.append(operator)
2503  elif operator == '<':
2504  nesting_stack.pop()
2505  if not nesting_stack:
2506  # Found matching angle bracket
2507  return True
2508  elif operator == ',':
2509  # Got a comma before a bracket, this is most likely a
2510  # template argument. The opening angle bracket is probably
2511  # there if we look for it, so just return early here.
2512  return True
2513  else:
2514  # Got some other operator.
2515  return False
2516 
2517  else:
2518  # Expecting opening parenthesis or opening bracket
2519  if operator in ('>', ')', ']'):
2520  nesting_stack.append(operator)
2521  elif operator in ('(', '['):
2522  nesting_stack.pop()
2523 
2524  else:
2525  # Scan the previous line
2526  linenum -= 1
2527  if linenum < 0:
2528  break
2529  line = clean_lines.elided[linenum]
2530 
2531  # Exhausted all earlier lines and still no matching angle bracket.
2532  return False
2533 
2534 
2535 def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
2536  """Checks for the correctness of various spacing issues in the code.
2537 
2538  Things we check for: spaces around operators, spaces after
2539  if/for/while/switch, no spaces around parens in function calls, two
2540  spaces between code and comment, don't start a block with a blank
2541  line, don't end a function with a blank line, don't add a blank line
2542  after public/protected/private, don't have too many blank lines in a row.
2543 
2544  Args:
2545  filename: The name of the current file.
2546  clean_lines: A CleansedLines instance containing the file.
2547  linenum: The number of the line to check.
2548  nesting_state: A _NestingState instance which maintains information about
2549  the current stack of nested blocks being parsed.
2550  error: The function to call with any errors found.
2551  """
2552 
2553  # Don't use "elided" lines here, otherwise we can't check commented lines.
2554  # Don't want to use "raw" either, because we don't want to check inside C++11
2555  # raw strings,
2556  raw = clean_lines.lines_without_raw_strings
2557  line = raw[linenum]
2558 
2559  # Before nixing comments, check if the line is blank for no good
2560  # reason. This includes the first line after a block is opened, and
2561  # blank lines at the end of a function (ie, right before a line like '}'
2562  #
2563  # Skip all the blank line checks if we are immediately inside a
2564  # namespace body. In other words, don't issue blank line warnings
2565  # for this block:
2566  # namespace {
2567  #
2568  # }
2569  #
2570  # A warning about missing end of namespace comments will be issued instead.
2571  if IsBlankLine(line) and not nesting_state.InNamespaceBody():
2572  elided = clean_lines.elided
2573  prev_line = elided[linenum - 1]
2574  prevbrace = prev_line.rfind('{')
2575  # TODO(unknown): Don't complain if line before blank line, and line after,
2576  # both start with alnums and are indented the same amount.
2577  # This ignores whitespace at the start of a namespace block
2578  # because those are not usually indented.
2579  if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
2580  # OK, we have a blank line at the start of a code block. Before we
2581  # complain, we check if it is an exception to the rule: The previous
2582  # non-empty line has the parameters of a function header that are indented
2583  # 4 spaces (because they did not fit in a 80 column line when placed on
2584  # the same line as the function name). We also check for the case where
2585  # the previous line is indented 6 spaces, which may happen when the
2586  # initializers of a constructor do not fit into a 80 column line.
2587  exception = False
2588  if Match(r' {6}\w', prev_line): # Initializer list?
2589  # We are looking for the opening column of initializer list, which
2590  # should be indented 4 spaces to cause 6 space indentation afterwards.
2591  search_position = linenum-2
2592  while (search_position >= 0
2593  and Match(r' {6}\w', elided[search_position])):
2594  search_position -= 1
2595  exception = (search_position >= 0
2596  and elided[search_position][:5] == ' :')
2597  else:
2598  # Search for the function arguments or an initializer list. We use a
2599  # simple heuristic here: If the line is indented 4 spaces; and we have a
2600  # closing paren, without the opening paren, followed by an opening brace
2601  # or colon (for initializer lists) we assume that it is the last line of
2602  # a function header. If we have a colon indented 4 spaces, it is an
2603  # initializer list.
2604  exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
2605  prev_line)
2606  or Match(r' {4}:', prev_line))
2607 
2608  if not exception:
2609  error(filename, linenum, 'whitespace/blank_line', 2,
2610  'Redundant blank line at the start of a code block '
2611  'should be deleted.')
2612  # Ignore blank lines at the end of a block in a long if-else
2613  # chain, like this:
2614  # if (condition1) {
2615  # // Something followed by a blank line
2616  #
2617  # } else if (condition2) {
2618  # // Something else
2619  # }
2620  if linenum + 1 < clean_lines.NumLines():
2621  next_line = raw[linenum + 1]
2622  if (next_line
2623  and Match(r'\s*}', next_line)
2624  and next_line.find('} else ') == -1):
2625  error(filename, linenum, 'whitespace/blank_line', 3,
2626  'Redundant blank line at the end of a code block '
2627  'should be deleted.')
2628 
2629  matched = Match(r'\s*(public|protected|private):', prev_line)
2630  if matched:
2631  error(filename, linenum, 'whitespace/blank_line', 3,
2632  'Do not leave a blank line after "%s:"' % matched.group(1))
2633 
2634  # Next, we complain if there's a comment too near the text
2635  commentpos = line.find('//')
2636  if commentpos != -1:
2637  # Check if the // may be in quotes. If so, ignore it
2638  # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
2639  if (line.count('"', 0, commentpos) -
2640  line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
2641  # Allow one space for new scopes, two spaces otherwise:
2642  if (not Match(r'^\s*{ //', line) and
2643  ((commentpos >= 1 and
2644  line[commentpos-1] not in string.whitespace) or
2645  (commentpos >= 2 and
2646  line[commentpos-2] not in string.whitespace))):
2647  error(filename, linenum, 'whitespace/comments', 2,
2648  'At least two spaces is best between code and comments')
2649  # There should always be a space between the // and the comment
2650  commentend = commentpos + 2
2651  if commentend < len(line) and not line[commentend] == ' ':
2652  # but some lines are exceptions -- e.g. if they're big
2653  # comment delimiters like:
2654  # //----------------------------------------------------------
2655  # or are an empty C++ style Doxygen comment, like:
2656  # ///
2657  # or C++ style Doxygen comments placed after the variable:
2658  # ///< Header comment
2659  # //!< Header comment
2660  # or they begin with multiple slashes followed by a space:
2661  # //////// Header comment
2662  match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
2663  Search(r'^/$', line[commentend:]) or
2664  Search(r'^!< ', line[commentend:]) or
2665  Search(r'^/< ', line[commentend:]) or
2666  Search(r'^/+ ', line[commentend:]))
2667  if not match:
2668  error(filename, linenum, 'whitespace/comments', 4,
2669  'Should have a space between // and comment')
2670  CheckComment(line[commentpos:], filename, linenum, error)
2671 
2672  line = clean_lines.elided[linenum] # get rid of comments and strings
2673 
2674  # Don't try to do spacing checks for operator methods
2675  line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
2676 
2677  # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
2678  # Otherwise not. Note we only check for non-spaces on *both* sides;
2679  # sometimes people put non-spaces on one side when aligning ='s among
2680  # many lines (not that this is behavior that I approve of...)
2681  if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
2682  error(filename, linenum, 'whitespace/operators', 4,
2683  'Missing spaces around =')
2684 
2685  # It's ok not to have spaces around binary operators like + - * /, but if
2686  # there's too little whitespace, we get concerned. It's hard to tell,
2687  # though, so we punt on this one for now. TODO.
2688 
2689  # You should always have whitespace around binary operators.
2690  #
2691  # Check <= and >= first to avoid false positives with < and >, then
2692  # check non-include lines for spacing around < and >.
2693  match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
2694  if match:
2695  error(filename, linenum, 'whitespace/operators', 3,
2696  'Missing spaces around %s' % match.group(1))
2697  # We allow no-spaces around << when used like this: 10<<20, but
2698  # not otherwise (particularly, not when used as streams)
2699  # Also ignore using ns::operator<<;
2700  match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
2701  if (match and
2702  not (match.group(1).isdigit() and match.group(2).isdigit()) and
2703  not (match.group(1) == 'operator' and match.group(2) == ';')):
2704  error(filename, linenum, 'whitespace/operators', 3,
2705  'Missing spaces around <<')
2706  elif not Match(r'#.*include', line):
2707  # Avoid false positives on ->
2708  reduced_line = line.replace('->', '')
2709 
2710  # Look for < that is not surrounded by spaces. This is only
2711  # triggered if both sides are missing spaces, even though
2712  # technically should should flag if at least one side is missing a
2713  # space. This is done to avoid some false positives with shifts.
2714  match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
2715  if (match and
2716  not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
2717  error(filename, linenum, 'whitespace/operators', 3,
2718  'Missing spaces around <')
2719 
2720  # Look for > that is not surrounded by spaces. Similar to the
2721  # above, we only trigger if both sides are missing spaces to avoid
2722  # false positives with shifts.
2723  match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
2724  if (match and
2725  not FindPreviousMatchingAngleBracket(clean_lines, linenum,
2726  match.group(1))):
2727  error(filename, linenum, 'whitespace/operators', 3,
2728  'Missing spaces around >')
2729 
2730  # We allow no-spaces around >> for almost anything. This is because
2731  # C++11 allows ">>" to close nested templates, which accounts for
2732  # most cases when ">>" is not followed by a space.
2733  #
2734  # We still warn on ">>" followed by alpha character, because that is
2735  # likely due to ">>" being used for right shifts, e.g.:
2736  # value >> alpha
2737  #
2738  # When ">>" is used to close templates, the alphanumeric letter that
2739  # follows would be part of an identifier, and there should still be
2740  # a space separating the template type and the identifier.
2741  # type<type<type>> alpha
2742  match = Search(r'>>[a-zA-Z_]', line)
2743  if match:
2744  error(filename, linenum, 'whitespace/operators', 3,
2745  'Missing spaces around >>')
2746 
2747  # There shouldn't be space around unary operators
2748  match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
2749  if match:
2750  error(filename, linenum, 'whitespace/operators', 4,
2751  'Extra space for operator %s' % match.group(1))
2752 
2753  # A pet peeve of mine: no spaces after an if, while, switch, or for
2754  match = Search(r' (if\(|for\(|while\(|switch\()', line)
2755  if match:
2756  error(filename, linenum, 'whitespace/parens', 5,
2757  'Missing space before ( in %s' % match.group(1))
2758 
2759  # For if/for/while/switch, the left and right parens should be
2760  # consistent about how many spaces are inside the parens, and
2761  # there should either be zero or one spaces inside the parens.
2762  # We don't want: "if ( foo)" or "if ( foo )".
2763  # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
2764  match = Search(r'\b(if|for|while|switch)\s*'
2765  r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
2766  line)
2767  if match:
2768  if len(match.group(2)) != len(match.group(4)):
2769  if not (match.group(3) == ';' and
2770  len(match.group(2)) == 1 + len(match.group(4)) or
2771  not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
2772  error(filename, linenum, 'whitespace/parens', 5,
2773  'Mismatching spaces inside () in %s' % match.group(1))
2774  if len(match.group(2)) not in [0, 1]:
2775  error(filename, linenum, 'whitespace/parens', 5,
2776  'Should have zero or one spaces inside ( and ) in %s' %
2777  match.group(1))
2778 
2779  # You should always have a space after a comma (either as fn arg or operator)
2780  #
2781  # This does not apply when the non-space character following the
2782  # comma is another comma, since the only time when that happens is
2783  # for empty macro arguments.
2784  #
2785  # We run this check in two passes: first pass on elided lines to
2786  # verify that lines contain missing whitespaces, second pass on raw
2787  # lines to confirm that those missing whitespaces are not due to
2788  # elided comments.
2789  if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
2790  error(filename, linenum, 'whitespace/comma', 3,
2791  'Missing space after ,')
2792 
2793  # You should always have a space after a semicolon
2794  # except for few corner cases
2795  # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
2796  # space after ;
2797  if Search(r';[^\s};\\)/]', line):
2798  error(filename, linenum, 'whitespace/semicolon', 3,
2799  'Missing space after ;')
2800 
2801  # Next we will look for issues with function calls.
2802  CheckSpacingForFunctionCall(filename, line, linenum, error)
2803 
2804  # Except after an opening paren, or after another opening brace (in case of
2805  # an initializer list, for instance), you should have spaces before your
2806  # braces. And since you should never have braces at the beginning of a line,
2807  # this is an easy test.
2808  match = Match(r'^(.*[^ ({]){', line)
2809  if match:
2810  # Try a bit harder to check for brace initialization. This
2811  # happens in one of the following forms:
2812  # Constructor() : initializer_list_{} { ... }
2813  # Constructor{}.MemberFunction()
2814  # Type variable{};
2815  # FunctionCall(type{}, ...);
2816  # LastArgument(..., type{});
2817  # LOG(INFO) << type{} << " ...";
2818  # map_of_type[{...}] = ...;
2819  #
2820  # We check for the character following the closing brace, and
2821  # silence the warning if it's one of those listed above, i.e.
2822  # "{.;,)<]".
2823  #
2824  # To account for nested initializer list, we allow any number of
2825  # closing braces up to "{;,)<". We can't simply silence the
2826  # warning on first sight of closing brace, because that would
2827  # cause false negatives for things that are not initializer lists.
2828  # Silence this: But not this:
2829  # Outer{ if (...) {
2830  # Inner{...} if (...){ // Missing space before {
2831  # }; }
2832  #
2833  # There is a false negative with this approach if people inserted
2834  # spurious semicolons, e.g. "if (cond){};", but we will catch the
2835  # spurious semicolon with a separate check.
2836  (endline, endlinenum, endpos) = CloseExpression(
2837  clean_lines, linenum, len(match.group(1)))
2838  trailing_text = ''
2839  if endpos > -1:
2840  trailing_text = endline[endpos:]
2841  for offset in xrange(endlinenum + 1,
2842  min(endlinenum + 3, clean_lines.NumLines() - 1)):
2843  trailing_text += clean_lines.elided[offset]
2844  if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text):
2845  error(filename, linenum, 'whitespace/braces', 5,
2846  'Missing space before {')
2847 
2848  # Make sure '} else {' has spaces.
2849  if Search(r'}else', line):
2850  error(filename, linenum, 'whitespace/braces', 5,
2851  'Missing space before else')
2852 
2853  # You shouldn't have spaces before your brackets, except maybe after
2854  # 'delete []' or 'new char * []'.
2855  if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
2856  error(filename, linenum, 'whitespace/braces', 5,
2857  'Extra space before [')
2858 
2859  # You shouldn't have a space before a semicolon at the end of the line.
2860  # There's a special case for "for" since the style guide allows space before
2861  # the semicolon there.
2862  if Search(r':\s*;\s*$', line):
2863  error(filename, linenum, 'whitespace/semicolon', 5,
2864  'Semicolon defining empty statement. Use {} instead.')
2865  elif Search(r'^\s*;\s*$', line):
2866  error(filename, linenum, 'whitespace/semicolon', 5,
2867  'Line contains only semicolon. If this should be an empty statement, '
2868  'use {} instead.')
2869  elif (Search(r'\s+;\s*$', line) and
2870  not Search(r'\bfor\b', line)):
2871  error(filename, linenum, 'whitespace/semicolon', 5,
2872  'Extra space before last semicolon. If this should be an empty '
2873  'statement, use {} instead.')
2874 
2875  # In range-based for, we wanted spaces before and after the colon, but
2876  # not around "::" tokens that might appear.
2877  if (Search('for *\(.*[^:]:[^: ]', line) or
2878  Search('for *\(.*[^: ]:[^:]', line)):
2879  error(filename, linenum, 'whitespace/forcolon', 2,
2880  'Missing space around colon in range-based for loop')
2881 
2882 
2883 def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
2884  """Checks for additional blank line issues related to sections.
2885 
2886  Currently the only thing checked here is blank line before protected/private.
2887 
2888  Args:
2889  filename: The name of the current file.
2890  clean_lines: A CleansedLines instance containing the file.
2891  class_info: A _ClassInfo objects.
2892  linenum: The number of the line to check.
2893  error: The function to call with any errors found.
2894  """
2895  # Skip checks if the class is small, where small means 25 lines or less.
2896  # 25 lines seems like a good cutoff since that's the usual height of
2897  # terminals, and any class that can't fit in one screen can't really
2898  # be considered "small".
2899  #
2900  # Also skip checks if we are on the first line. This accounts for
2901  # classes that look like
2902  # class Foo { public: ... };
2903  #
2904  # If we didn't find the end of the class, last_line would be zero,
2905  # and the check will be skipped by the first condition.
2906  if (class_info.last_line - class_info.starting_linenum <= 24 or
2907  linenum <= class_info.starting_linenum):
2908  return
2909 
2910  matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
2911  if matched:
2912  # Issue warning if the line before public/protected/private was
2913  # not a blank line, but don't do this if the previous line contains
2914  # "class" or "struct". This can happen two ways:
2915  # - We are at the beginning of the class.
2916  # - We are forward-declaring an inner class that is semantically
2917  # private, but needed to be public for implementation reasons.
2918  # Also ignores cases where the previous line ends with a backslash as can be
2919  # common when defining classes in C macros.
2920  prev_line = clean_lines.lines[linenum - 1]
2921  if (not IsBlankLine(prev_line) and
2922  not Search(r'\b(class|struct)\b', prev_line) and
2923  not Search(r'\\$', prev_line)):
2924  # Try a bit harder to find the beginning of the class. This is to
2925  # account for multi-line base-specifier lists, e.g.:
2926  # class Derived
2927  # : public Base {
2928  end_class_head = class_info.starting_linenum
2929  for i in range(class_info.starting_linenum, linenum):
2930  if Search(r'\{\s*$', clean_lines.lines[i]):
2931  end_class_head = i
2932  break
2933  if end_class_head < linenum - 1:
2934  error(filename, linenum, 'whitespace/blank_line', 3,
2935  '"%s:" should be preceded by a blank line' % matched.group(1))
2936 
2937 
2938 def GetPreviousNonBlankLine(clean_lines, linenum):
2939  """Return the most recent non-blank line and its line number.
2940 
2941  Args:
2942  clean_lines: A CleansedLines instance containing the file contents.
2943  linenum: The number of the line to check.
2944 
2945  Returns:
2946  A tuple with two elements. The first element is the contents of the last
2947  non-blank line before the current line, or the empty string if this is the
2948  first non-blank line. The second is the line number of that line, or -1
2949  if this is the first non-blank line.
2950  """
2951 
2952  prevlinenum = linenum - 1
2953  while prevlinenum >= 0:
2954  prevline = clean_lines.elided[prevlinenum]
2955  if not IsBlankLine(prevline): # if not a blank line...
2956  return (prevline, prevlinenum)
2957  prevlinenum -= 1
2958  return ('', -1)
2959 
2960 
2961 def CheckBraces(filename, clean_lines, linenum, error):
2962  """Looks for misplaced braces (e.g. at the end of line).
2963 
2964  Args:
2965  filename: The name of the current file.
2966  clean_lines: A CleansedLines instance containing the file.
2967  linenum: The number of the line to check.
2968  error: The function to call with any errors found.
2969  """
2970 
2971  line = clean_lines.elided[linenum] # get rid of comments and strings
2972 
2973  if Match(r'\s*{\s*$', line):
2974  # We allow an open brace to start a line in the case where someone is using
2975  # braces in a block to explicitly create a new scope, which is commonly used
2976  # to control the lifetime of stack-allocated variables. Braces are also
2977  # used for brace initializers inside function calls. We don't detect this
2978  # perfectly: we just don't complain if the last non-whitespace character on
2979  # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
2980  # previous line starts a preprocessor block.
2981  prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
2982  if (not Search(r'[,;:}{(]\s*$', prevline) and
2983  not Match(r'\s*#', prevline)):
2984  error(filename, linenum, 'whitespace/braces', 4,
2985  '{ should almost always be at the end of the previous line')
2986 
2987  # An else clause should be on the same line as the preceding closing brace.
2988  if Match(r'\s*else\s*', line):
2989  prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
2990  if Match(r'\s*}\s*$', prevline):
2991  error(filename, linenum, 'whitespace/newline', 4,
2992  'An else should appear on the same line as the preceding }')
2993 
2994  # If braces come on one side of an else, they should be on both.
2995  # However, we have to worry about "else if" that spans multiple lines!
2996  if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
2997  if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
2998  # find the ( after the if
2999  pos = line.find('else if')
3000  pos = line.find('(', pos)
3001  if pos > 0:
3002  (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
3003  if endline[endpos:].find('{') == -1: # must be brace after if
3004  error(filename, linenum, 'readability/braces', 5,
3005  'If an else has a brace on one side, it should have it on both')
3006  else: # common case: else not followed by a multi-line if
3007  error(filename, linenum, 'readability/braces', 5,
3008  'If an else has a brace on one side, it should have it on both')
3009 
3010  # Likewise, an else should never have the else clause on the same line
3011  if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
3012  error(filename, linenum, 'whitespace/newline', 4,
3013  'Else clause should never be on same line as else (use 2 lines)')
3014 
3015  # In the same way, a do/while should never be on one line
3016  if Match(r'\s*do [^\s{]', line):
3017  error(filename, linenum, 'whitespace/newline', 4,
3018  'do/while clauses should not be on a single line')
3019 
3020  # Block bodies should not be followed by a semicolon. Due to C++11
3021  # brace initialization, there are more places where semicolons are
3022  # required than not, so we use a whitelist approach to check these
3023  # rather than a blacklist. These are the places where "};" should
3024  # be replaced by just "}":
3025  # 1. Some flavor of block following closing parenthesis:
3026  # for (;;) {};
3027  # while (...) {};
3028  # switch (...) {};
3029  # Function(...) {};
3030  # if (...) {};
3031  # if (...) else if (...) {};
3032  #
3033  # 2. else block:
3034  # if (...) else {};
3035  #
3036  # 3. const member function:
3037  # Function(...) const {};
3038  #
3039  # 4. Block following some statement:
3040  # x = 42;
3041  # {};
3042  #
3043  # 5. Block at the beginning of a function:
3044  # Function(...) {
3045  # {};
3046  # }
3047  #
3048  # Note that naively checking for the preceding "{" will also match
3049  # braces inside multi-dimensional arrays, but this is fine since
3050  # that expression will not contain semicolons.
3051  #
3052  # 6. Block following another block:
3053  # while (true) {}
3054  # {};
3055  #
3056  # 7. End of namespaces:
3057  # namespace {};
3058  #
3059  # These semicolons seems far more common than other kinds of
3060  # redundant semicolons, possibly due to people converting classes
3061  # to namespaces. For now we do not warn for this case.
3062  #
3063  # Try matching case 1 first.
3064  match = Match(r'^(.*\)\s*)\{', line)
3065  if match:
3066  # Matched closing parenthesis (case 1). Check the token before the
3067  # matching opening parenthesis, and don't warn if it looks like a
3068  # macro. This avoids these false positives:
3069  # - macro that defines a base class
3070  # - multi-line macro that defines a base class
3071  # - macro that defines the whole class-head
3072  #
3073  # But we still issue warnings for macros that we know are safe to
3074  # warn, specifically:
3075  # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
3076  # - TYPED_TEST
3077  # - INTERFACE_DEF
3078  # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
3079  #
3080  # We implement a whitelist of safe macros instead of a blacklist of
3081  # unsafe macros, even though the latter appears less frequently in
3082  # google code and would have been easier to implement. This is because
3083  # the downside for getting the whitelist wrong means some extra
3084  # semicolons, while the downside for getting the blacklist wrong
3085  # would result in compile errors.
3086  #
3087  # In addition to macros, we also don't want to warn on compound
3088  # literals.
3089  closing_brace_pos = match.group(1).rfind(')')
3090  opening_parenthesis = ReverseCloseExpression(
3091  clean_lines, linenum, closing_brace_pos)
3092  if opening_parenthesis[2] > -1:
3093  line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
3094  macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
3095  if ((macro and
3096  macro.group(1) not in (
3097  'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
3098  'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
3099  'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
3100  Search(r'\s+=\s*$', line_prefix)):
3101  match = None
3102 
3103  else:
3104  # Try matching cases 2-3.
3105  match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
3106  if not match:
3107  # Try matching cases 4-6. These are always matched on separate lines.
3108  #
3109  # Note that we can't simply concatenate the previous line to the
3110  # current line and do a single match, otherwise we may output
3111  # duplicate warnings for the blank line case:
3112  # if (cond) {
3113  # // blank line
3114  # }
3115  prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
3116  if prevline and Search(r'[;{}]\s*$', prevline):
3117  match = Match(r'^(\s*)\{', line)
3118 
3119  # Check matching closing brace
3120  if match:
3121  (endline, endlinenum, endpos) = CloseExpression(
3122  clean_lines, linenum, len(match.group(1)))
3123  if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
3124  # Current {} pair is eligible for semicolon check, and we have found
3125  # the redundant semicolon, output warning here.
3126  #
3127  # Note: because we are scanning forward for opening braces, and
3128  # outputting warnings for the matching closing brace, if there are
3129  # nested blocks with trailing semicolons, we will get the error
3130  # messages in reversed order.
3131  error(filename, endlinenum, 'readability/braces', 4,
3132  "You don't need a ; after a }")
3133 
3134 
3135 def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
3136  """Look for empty loop/conditional body with only a single semicolon.
3137 
3138  Args:
3139  filename: The name of the current file.
3140  clean_lines: A CleansedLines instance containing the file.
3141  linenum: The number of the line to check.
3142  error: The function to call with any errors found.
3143  """
3144 
3145  # Search for loop keywords at the beginning of the line. Because only
3146  # whitespaces are allowed before the keywords, this will also ignore most
3147  # do-while-loops, since those lines should start with closing brace.
3148  #
3149  # We also check "if" blocks here, since an empty conditional block
3150  # is likely an error.
3151  line = clean_lines.elided[linenum]
3152  matched = Match(r'\s*(for|while|if)\s*\(', line)
3153  if matched:
3154  # Find the end of the conditional expression
3155  (end_line, end_linenum, end_pos) = CloseExpression(
3156  clean_lines, linenum, line.find('('))
3157 
3158  # Output warning if what follows the condition expression is a semicolon.
3159  # No warning for all other cases, including whitespace or newline, since we
3160  # have a separate check for semicolons preceded by whitespace.
3161  if end_pos >= 0 and Match(r';', end_line[end_pos:]):
3162  if matched.group(1) == 'if':
3163  error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
3164  'Empty conditional bodies should use {}')
3165  else:
3166  error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
3167  'Empty loop bodies should use {} or continue')
3168 
3169 
3170 def CheckCheck(filename, clean_lines, linenum, error):
3171  """Checks the use of CHECK and EXPECT macros.
3172 
3173  Args:
3174  filename: The name of the current file.
3175  clean_lines: A CleansedLines instance containing the file.
3176  linenum: The number of the line to check.
3177  error: The function to call with any errors found.
3178  """
3179 
3180  # Decide the set of replacement macros that should be suggested
3181  lines = clean_lines.elided
3182  check_macro = None
3183  start_pos = -1
3184  for macro in _CHECK_MACROS:
3185  i = lines[linenum].find(macro)
3186  if i >= 0:
3187  check_macro = macro
3188 
3189  # Find opening parenthesis. Do a regular expression match here
3190  # to make sure that we are matching the expected CHECK macro, as
3191  # opposed to some other macro that happens to contain the CHECK
3192  # substring.
3193  matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum])
3194  if not matched:
3195  continue
3196  start_pos = len(matched.group(1))
3197  break
3198  if not check_macro or start_pos < 0:
3199  # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
3200  return
3201 
3202  # Find end of the boolean expression by matching parentheses
3203  (last_line, end_line, end_pos) = CloseExpression(
3204  clean_lines, linenum, start_pos)
3205  if end_pos < 0:
3206  return
3207  if linenum == end_line:
3208  expression = lines[linenum][start_pos + 1:end_pos - 1]
3209  else:
3210  expression = lines[linenum][start_pos + 1:]
3211  for i in xrange(linenum + 1, end_line):
3212  expression += lines[i]
3213  expression += last_line[0:end_pos - 1]
3214 
3215  # Parse expression so that we can take parentheses into account.
3216  # This avoids false positives for inputs like "CHECK((a < 4) == b)",
3217  # which is not replaceable by CHECK_LE.
3218  lhs = ''
3219  rhs = ''
3220  operator = None
3221  while expression:
3222  matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
3223  r'==|!=|>=|>|<=|<|\()(.*)$', expression)
3224  if matched:
3225  token = matched.group(1)
3226  if token == '(':
3227  # Parenthesized operand
3228  expression = matched.group(2)
3229  (end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')')
3230  if end < 0:
3231  return # Unmatched parenthesis
3232  lhs += '(' + expression[0:end]
3233  expression = expression[end:]
3234  elif token in ('&&', '||'):
3235  # Logical and/or operators. This means the expression
3236  # contains more than one term, for example:
3237  # CHECK(42 < a && a < b);
3238  #
3239  # These are not replaceable with CHECK_LE, so bail out early.
3240  return
3241  elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
3242  # Non-relational operator
3243  lhs += token
3244  expression = matched.group(2)
3245  else:
3246  # Relational operator
3247  operator = token
3248  rhs = matched.group(2)
3249  break
3250  else:
3251  # Unparenthesized operand. Instead of appending to lhs one character
3252  # at a time, we do another regular expression match to consume several
3253  # characters at once if possible. Trivial benchmark shows that this
3254  # is more efficient when the operands are longer than a single
3255  # character, which is generally the case.
3256  matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
3257  if not matched:
3258  matched = Match(r'^(\s*\S)(.*)$', expression)
3259  if not matched:
3260  break
3261  lhs += matched.group(1)
3262  expression = matched.group(2)
3263 
3264  # Only apply checks if we got all parts of the boolean expression
3265  if not (lhs and operator and rhs):
3266  return
3267 
3268  # Check that rhs do not contain logical operators. We already know
3269  # that lhs is fine since the loop above parses out && and ||.
3270  if rhs.find('&&') > -1 or rhs.find('||') > -1:
3271  return
3272 
3273  # At least one of the operands must be a constant literal. This is
3274  # to avoid suggesting replacements for unprintable things like
3275  # CHECK(variable != iterator)
3276  #
3277  # The following pattern matches decimal, hex integers, strings, and
3278  # characters (in that order).
3279  lhs = lhs.strip()
3280  rhs = rhs.strip()
3281  match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
3282  if Match(match_constant, lhs) or Match(match_constant, rhs):
3283  # Note: since we know both lhs and rhs, we can provide a more
3284  # descriptive error message like:
3285  # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
3286  # Instead of:
3287  # Consider using CHECK_EQ instead of CHECK(a == b)
3288  #
3289  # We are still keeping the less descriptive message because if lhs
3290  # or rhs gets long, the error message might become unreadable.
3291  error(filename, linenum, 'readability/check', 2,
3292  'Consider using %s instead of %s(a %s b)' % (
3293  _CHECK_REPLACEMENT[check_macro][operator],
3294  check_macro, operator))
3295 
3296 
3297 def CheckAltTokens(filename, clean_lines, linenum, error):
3298  """Check alternative keywords being used in boolean expressions.
3299 
3300  Args:
3301  filename: The name of the current file.
3302  clean_lines: A CleansedLines instance containing the file.
3303  linenum: The number of the line to check.
3304  error: The function to call with any errors found.
3305  """
3306  line = clean_lines.elided[linenum]
3307 
3308  # Avoid preprocessor lines
3309  if Match(r'^\s*#', line):
3310  return
3311 
3312  # Last ditch effort to avoid multi-line comments. This will not help
3313  # if the comment started before the current line or ended after the
3314  # current line, but it catches most of the false positives. At least,
3315  # it provides a way to workaround this warning for people who use
3316  # multi-line comments in preprocessor macros.
3317  #
3318  # TODO(unknown): remove this once cpplint has better support for
3319  # multi-line comments.
3320  if line.find('/*') >= 0 or line.find('*/') >= 0:
3321  return
3322 
3323  for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
3324  error(filename, linenum, 'readability/alt_tokens', 2,
3325  'Use operator %s instead of %s' % (
3326  _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
3327 
3328 
3329 def GetLineWidth(line):
3330  """Determines the width of the line in column positions.
3331 
3332  Args:
3333  line: A string, which may be a Unicode string.
3334 
3335  Returns:
3336  The width of the line in column positions, accounting for Unicode
3337  combining characters and wide characters.
3338  """
3339  if isinstance(line, unicode):
3340  width = 0
3341  for uc in unicodedata.normalize('NFC', line):
3342  if unicodedata.east_asian_width(uc) in ('W', 'F'):
3343  width += 2
3344  elif not unicodedata.combining(uc):
3345  width += 1
3346  return width
3347  else:
3348  return len(line)
3349 
3350 
3351 def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
3352  error):
3353  """Checks rules from the 'C++ style rules' section of cppguide.html.
3354 
3355  Most of these rules are hard to test (naming, comment style), but we
3356  do what we can. In particular we check for 2-space indents, line lengths,
3357  tab usage, spaces inside code, etc.
3358 
3359  Args:
3360  filename: The name of the current file.
3361  clean_lines: A CleansedLines instance containing the file.
3362  linenum: The number of the line to check.
3363  file_extension: The extension (without the dot) of the filename.
3364  nesting_state: A _NestingState instance which maintains information about
3365  the current stack of nested blocks being parsed.
3366  error: The function to call with any errors found.
3367  """
3368 
3369  # Don't use "elided" lines here, otherwise we can't check commented lines.
3370  # Don't want to use "raw" either, because we don't want to check inside C++11
3371  # raw strings,
3372  raw_lines = clean_lines.lines_without_raw_strings
3373  line = raw_lines[linenum]
3374 
3375  if line.find('\t') != -1:
3376  error(filename, linenum, 'whitespace/tab', 1,
3377  'Tab found; better to use spaces')
3378 
3379  # One or three blank spaces at the beginning of the line is weird; it's
3380  # hard to reconcile that with 2-space indents.
3381  # NOTE: here are the conditions rob pike used for his tests. Mine aren't
3382  # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
3383  # if(RLENGTH > 20) complain = 0;
3384  # if(match($0, " +(error|private|public|protected):")) complain = 0;
3385  # if(match(prev, "&& *$")) complain = 0;
3386  # if(match(prev, "\\|\\| *$")) complain = 0;
3387  # if(match(prev, "[\",=><] *$")) complain = 0;
3388  # if(match($0, " <<")) complain = 0;
3389  # if(match(prev, " +for \\(")) complain = 0;
3390  # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
3391  initial_spaces = 0
3392  cleansed_line = clean_lines.elided[linenum]
3393  while initial_spaces < len(line) and line[initial_spaces] == ' ':
3394  initial_spaces += 1
3395  if line and line[-1].isspace():
3396  error(filename, linenum, 'whitespace/end_of_line', 4,
3397  'Line ends in whitespace. Consider deleting these extra spaces.')
3398  # There are certain situations we allow one space, notably for section labels
3399  elif ((initial_spaces == 1 or initial_spaces == 3) and
3400  not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
3401  error(filename, linenum, 'whitespace/indent', 3,
3402  'Weird number of spaces at line-start. '
3403  'Are you using a 2-space indent?')
3404 
3405  # Check if the line is a header guard.
3406  is_header_guard = False
3407  if file_extension == 'h':
3408  cppvar = GetHeaderGuardCPPVariable(filename)
3409  if (line.startswith('#ifndef %s' % cppvar) or
3410  line.startswith('#define %s' % cppvar) or
3411  line.startswith('#endif // %s' % cppvar)):
3412  is_header_guard = True
3413  # #include lines and header guards can be long, since there's no clean way to
3414  # split them.
3415  #
3416  # URLs can be long too. It's possible to split these, but it makes them
3417  # harder to cut&paste.
3418  #
3419  # The "$Id:...$" comment may also get very long without it being the
3420  # developers fault.
3421  if (not line.startswith('#include') and not is_header_guard and
3422  not Match(r'^\s*//.*http(s?)://\S*$', line) and
3423  not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
3424  line_width = GetLineWidth(line)
3425  extended_length = int((_line_length * 1.25))
3426  if line_width > extended_length:
3427  error(filename, linenum, 'whitespace/line_length', 4,
3428  'Lines should very rarely be longer than %i characters' %
3429  extended_length)
3430  elif line_width > _line_length:
3431  error(filename, linenum, 'whitespace/line_length', 2,
3432  'Lines should be <= %i characters long' % _line_length)
3433 
3434  if (cleansed_line.count(';') > 1 and
3435  # for loops are allowed two ;'s (and may run over two lines).
3436  cleansed_line.find('for') == -1 and
3437  (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
3438  GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
3439  # It's ok to have many commands in a switch case that fits in 1 line
3440  not ((cleansed_line.find('case ') != -1 or
3441  cleansed_line.find('default:') != -1) and
3442  cleansed_line.find('break;') != -1)):
3443  error(filename, linenum, 'whitespace/newline', 0,
3444  'More than one command on the same line')
3445 
3446  # Some more style checks
3447  CheckBraces(filename, clean_lines, linenum, error)
3448  CheckEmptyBlockBody(filename, clean_lines, linenum, error)
3449  CheckAccess(filename, clean_lines, linenum, nesting_state, error)
3450  CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
3451  CheckCheck(filename, clean_lines, linenum, error)
3452  CheckAltTokens(filename, clean_lines, linenum, error)
3453  classinfo = nesting_state.InnermostClass()
3454  if classinfo:
3455  CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
3456 
3457 
3458 _RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
3459 _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
3460 # Matches the first component of a filename delimited by -s and _s. That is:
3461 # _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
3462 # _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
3463 # _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
3464 # _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
3465 _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
3466 
3467 
3468 def _DropCommonSuffixes(filename):
3469  """Drops common suffixes like _test.cc or -inl.h from filename.
3470 
3471  For example:
3472  >>> _DropCommonSuffixes('foo/foo-inl.h')
3473  'foo/foo'
3474  >>> _DropCommonSuffixes('foo/bar/foo.cc')
3475  'foo/bar/foo'
3476  >>> _DropCommonSuffixes('foo/foo_internal.h')
3477  'foo/foo'
3478  >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
3479  'foo/foo_unusualinternal'
3480 
3481  Args:
3482  filename: The input filename.
3483 
3484  Returns:
3485  The filename with the common suffix removed.
3486  """
3487  for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
3488  'inl.h', 'impl.h', 'internal.h'):
3489  if (filename.endswith(suffix) and len(filename) > len(suffix) and
3490  filename[-len(suffix) - 1] in ('-', '_')):
3491  return filename[:-len(suffix) - 1]
3492  return os.path.splitext(filename)[0]
3493 
3494 
3495 def _IsTestFilename(filename):
3496  """Determines if the given filename has a suffix that identifies it as a test.
3497 
3498  Args:
3499  filename: The input filename.
3500 
3501  Returns:
3502  True if 'filename' looks like a test, False otherwise.
3503  """
3504  if (filename.endswith('_test.cc') or
3505  filename.endswith('_unittest.cc') or
3506  filename.endswith('_regtest.cc')):
3507  return True
3508  else:
3509  return False
3510 
3511 
3512 def _ClassifyInclude(fileinfo, include, is_system):
3513  """Figures out what kind of header 'include' is.
3514 
3515  Args:
3516  fileinfo: The current file cpplint is running over. A FileInfo instance.
3517  include: The path to a #included file.
3518  is_system: True if the #include used <> rather than "".
3519 
3520  Returns:
3521  One of the _XXX_HEADER constants.
3522 
3523  For example:
3524  >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
3525  _C_SYS_HEADER
3526  >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
3527  _CPP_SYS_HEADER
3528  >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
3529  _LIKELY_MY_HEADER
3530  >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
3531  ... 'bar/foo_other_ext.h', False)
3532  _POSSIBLE_MY_HEADER
3533  >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
3534  _OTHER_HEADER
3535  """
3536  # This is a list of all standard c++ header files, except
3537  # those already checked for above.
3538  is_cpp_h = include in _CPP_HEADERS
3539 
3540  if is_system:
3541  if is_cpp_h:
3542  return _CPP_SYS_HEADER
3543  else:
3544  return _C_SYS_HEADER
3545 
3546  # If the target file and the include we're checking share a
3547  # basename when we drop common extensions, and the include
3548  # lives in . , then it's likely to be owned by the target file.
3549  target_dir, target_base = (
3550  os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
3551  include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
3552  if target_base == include_base and (
3553  include_dir == target_dir or
3554  include_dir == os.path.normpath(target_dir + '/../public')):
3555  return _LIKELY_MY_HEADER
3556 
3557  # If the target and include share some initial basename
3558  # component, it's possible the target is implementing the
3559  # include, so it's allowed to be first, but we'll never
3560  # complain if it's not there.
3561  target_first_component = _RE_FIRST_COMPONENT.match(target_base)
3562  include_first_component = _RE_FIRST_COMPONENT.match(include_base)
3563  if (target_first_component and include_first_component and
3564  target_first_component.group(0) ==
3565  include_first_component.group(0)):
3566  return _POSSIBLE_MY_HEADER
3567 
3568  return _OTHER_HEADER
3569 
3570 
3571 
3572 def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
3573  """Check rules that are applicable to #include lines.
3574 
3575  Strings on #include lines are NOT removed from elided line, to make
3576  certain tasks easier. However, to prevent false positives, checks
3577  applicable to #include lines in CheckLanguage must be put here.
3578 
3579  Args:
3580  filename: The name of the current file.
3581  clean_lines: A CleansedLines instance containing the file.
3582  linenum: The number of the line to check.
3583  include_state: An _IncludeState instance in which the headers are inserted.
3584  error: The function to call with any errors found.
3585  """
3586  fileinfo = FileInfo(filename)
3587 
3588  line = clean_lines.lines[linenum]
3589 
3590  # "include" should use the new style "foo/bar.h" instead of just "bar.h"
3591  if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
3592  error(filename, linenum, 'build/include', 4,
3593  'Include the directory when naming .h files')
3594 
3595  # we shouldn't include a file more than once. actually, there are a
3596  # handful of instances where doing so is okay, but in general it's
3597  # not.
3598  match = _RE_PATTERN_INCLUDE.search(line)
3599  if match:
3600  include = match.group(2)
3601  is_system = (match.group(1) == '<')
3602  if include in include_state:
3603  error(filename, linenum, 'build/include', 4,
3604  '"%s" already included at %s:%s' %
3605  (include, filename, include_state[include]))
3606  else:
3607  include_state[include] = linenum
3608 
3609  # We want to ensure that headers appear in the right order:
3610  # 1) for foo.cc, foo.h (preferred location)
3611  # 2) c system files
3612  # 3) cpp system files
3613  # 4) for foo.cc, foo.h (deprecated location)
3614  # 5) other google headers
3615  #
3616  # We classify each include statement as one of those 5 types
3617  # using a number of techniques. The include_state object keeps
3618  # track of the highest type seen, and complains if we see a
3619  # lower type after that.
3620  error_message = include_state.CheckNextIncludeOrder(
3621  _ClassifyInclude(fileinfo, include, is_system))
3622  if error_message:
3623  error(filename, linenum, 'build/include_order', 4,
3624  '%s. Should be: %s.h, c system, c++ system, other.' %
3625  (error_message, fileinfo.BaseName()))
3626  canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
3627  if not include_state.IsInAlphabeticalOrder(
3628  clean_lines, linenum, canonical_include):
3629  error(filename, linenum, 'build/include_alpha', 4,
3630  'Include "%s" not in alphabetical order' % include)
3631  include_state.SetLastHeader(canonical_include)
3632 
3633  # Look for any of the stream classes that are part of standard C++.
3634  match = _RE_PATTERN_INCLUDE.match(line)
3635  if match:
3636  include = match.group(2)
3637  if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
3638  # Many unit tests use cout, so we exempt them.
3639  if not _IsTestFilename(filename):
3640  error(filename, linenum, 'readability/streams', 3,
3641  'Streams are highly discouraged.')
3642 
3643 
3644 def _GetTextInside(text, start_pattern):
3645  r"""Retrieves all the text between matching open and close parentheses.
3646 
3647  Given a string of lines and a regular expression string, retrieve all the text
3648  following the expression and between opening punctuation symbols like
3649  (, [, or {, and the matching close-punctuation symbol. This properly nested
3650  occurrences of the punctuations, so for the text like
3651  printf(a(), b(c()));
3652  a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
3653  start_pattern must match string having an open punctuation symbol at the end.
3654 
3655  Args:
3656  text: The lines to extract text. Its comments and strings must be elided.
3657  It can be single line and can span multiple lines.
3658  start_pattern: The regexp string indicating where to start extracting
3659  the text.
3660  Returns:
3661  The extracted text.
3662  None if either the opening string or ending punctuation could not be found.
3663  """
3664  # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
3665  # rewritten to use _GetTextInside (and use inferior regexp matching today).
3666 
3667  # Give opening punctuations to get the matching close-punctuations.
3668  matching_punctuation = {'(': ')', '{': '}', '[': ']'}
3669  closing_punctuation = set(matching_punctuation.itervalues())
3670 
3671  # Find the position to start extracting text.
3672  match = re.search(start_pattern, text, re.M)
3673  if not match: # start_pattern not found in text.
3674  return None
3675  start_position = match.end(0)
3676 
3677  assert start_position > 0, (
3678  'start_pattern must ends with an opening punctuation.')
3679  assert text[start_position - 1] in matching_punctuation, (
3680  'start_pattern must ends with an opening punctuation.')
3681  # Stack of closing punctuations we expect to have in text after position.
3682  punctuation_stack = [matching_punctuation[text[start_position - 1]]]
3683  position = start_position
3684  while punctuation_stack and position < len(text):
3685  if text[position] == punctuation_stack[-1]:
3686  punctuation_stack.pop()
3687  elif text[position] in closing_punctuation:
3688  # A closing punctuation without matching opening punctuations.
3689  return None
3690  elif text[position] in matching_punctuation:
3691  punctuation_stack.append(matching_punctuation[text[position]])
3692  position += 1
3693  if punctuation_stack:
3694  # Opening punctuations left without matching close-punctuations.
3695  return None
3696  # punctuations match.
3697  return text[start_position:position - 1]
3698 
3699 
3700 # Patterns for matching call-by-reference parameters.
3701 #
3702 # Supports nested templates up to 2 levels deep using this messy pattern:
3703 # < (?: < (?: < [^<>]*
3704 # >
3705 # | [^<>] )*
3706 # >
3707 # | [^<>] )*
3708 # >
3709 _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
3710 _RE_PATTERN_TYPE = (
3711  r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
3712  r'(?:\w|'
3713  r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
3714  r'::)+')
3715 # A call-by-reference parameter ends with '& identifier'.
3716 _RE_PATTERN_REF_PARAM = re.compile(
3717  r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
3718  r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
3719 # A call-by-const-reference parameter either ends with 'const& identifier'
3720 # or looks like 'const type& identifier' when 'type' is atomic.
3721 _RE_PATTERN_CONST_REF_PARAM = (
3722  r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
3723  r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
3724 
3725 
3726 def CheckLanguage(filename, clean_lines, linenum, file_extension,
3727  include_state, nesting_state, error):
3728  """Checks rules from the 'C++ language rules' section of cppguide.html.
3729 
3730  Some of these rules are hard to test (function overloading, using
3731  uint32 inappropriately), but we do the best we can.
3732 
3733  Args:
3734  filename: The name of the current file.
3735  clean_lines: A CleansedLines instance containing the file.
3736  linenum: The number of the line to check.
3737  file_extension: The extension (without the dot) of the filename.
3738  include_state: An _IncludeState instance in which the headers are inserted.
3739  nesting_state: A _NestingState instance which maintains information about
3740  the current stack of nested blocks being parsed.
3741  error: The function to call with any errors found.
3742  """
3743  # If the line is empty or consists of entirely a comment, no need to
3744  # check it.
3745  line = clean_lines.elided[linenum]
3746  if not line:
3747  return
3748 
3749  match = _RE_PATTERN_INCLUDE.search(line)
3750  if match:
3751  CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
3752  return
3753 
3754  # Reset include state across preprocessor directives. This is meant
3755  # to silence warnings for conditional includes.
3756  if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
3757  include_state.ResetSection()
3758 
3759  # Make Windows paths like Unix.
3760  fullname = os.path.abspath(filename).replace('\\', '/')
3761 
3762  # TODO(unknown): figure out if they're using default arguments in fn proto.
3763 
3764  # Check to see if they're using an conversion function cast.
3765  # I just try to capture the most common basic types, though there are more.
3766  # Parameterless conversion functions, such as bool(), are allowed as they are
3767  # probably a member operator declaration or default constructor.
3768  match = Search(
3769  r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
3770  r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
3771  r'(\([^)].*)', line)
3772  if match:
3773  matched_new = match.group(1)
3774  matched_type = match.group(2)
3775  matched_funcptr = match.group(3)
3776 
3777  # gMock methods are defined using some variant of MOCK_METHODx(name, type)
3778  # where type may be float(), int(string), etc. Without context they are
3779  # virtually indistinguishable from int(x) casts. Likewise, gMock's
3780  # MockCallback takes a template parameter of the form return_type(arg_type),
3781  # which looks much like the cast we're trying to detect.
3782  #
3783  # std::function<> wrapper has a similar problem.
3784  #
3785  # Return types for function pointers also look like casts if they
3786  # don't have an extra space.
3787  if (matched_new is None and # If new operator, then this isn't a cast
3788  not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
3789  Search(r'\bMockCallback<.*>', line) or
3790  Search(r'\bstd::function<.*>', line)) and
3791  not (matched_funcptr and
3792  Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
3793  matched_funcptr))):
3794  # Try a bit harder to catch gmock lines: the only place where
3795  # something looks like an old-style cast is where we declare the
3796  # return type of the mocked method, and the only time when we
3797  # are missing context is if MOCK_METHOD was split across
3798  # multiple lines. The missing MOCK_METHOD is usually one or two
3799  # lines back, so scan back one or two lines.
3800  #
3801  # It's not possible for gmock macros to appear in the first 2
3802  # lines, since the class head + section name takes up 2 lines.
3803  if (linenum < 2 or
3804  not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
3805  clean_lines.elided[linenum - 1]) or
3806  Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
3807  clean_lines.elided[linenum - 2]))):
3808  error(filename, linenum, 'readability/casting', 4,
3809  'Using deprecated casting style. '
3810  'Use static_cast<%s>(...) instead' %
3811  matched_type)
3812 
3813  CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
3814  'static_cast',
3815  r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
3816 
3817  # This doesn't catch all cases. Consider (const char * const)"hello".
3818  #
3819  # (char *) "foo" should always be a const_cast (reinterpret_cast won't
3820  # compile).
3821  if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
3822  'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
3823  pass
3824  else:
3825  # Check pointer casts for other than string constants
3826  CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
3827  'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
3828 
3829  # In addition, we look for people taking the address of a cast. This
3830  # is dangerous -- casts can assign to temporaries, so the pointer doesn't
3831  # point where you think.
3832  match = Search(
3833  r'(?:&\(([^)]+)\)[\w(])|'
3834  r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line)
3835  if match and match.group(1) != '*':
3836  error(filename, linenum, 'runtime/casting', 4,
3837  ('Are you taking an address of a cast? '
3838  'This is dangerous: could be a temp var. '
3839  'Take the address before doing the cast, rather than after'))
3840 
3841  # Create an extended_line, which is the concatenation of the current and
3842  # next lines, for more effective checking of code that may span more than one
3843  # line.
3844  if linenum + 1 < clean_lines.NumLines():
3845  extended_line = line + clean_lines.elided[linenum + 1]
3846  else:
3847  extended_line = line
3848 
3849  # Check for people declaring static/global STL strings at the top level.
3850  # This is dangerous because the C++ language does not guarantee that
3851  # globals with constructors are initialized before the first access.
3852  match = Match(
3853  r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
3854  line)
3855  # Make sure it's not a function.
3856  # Function template specialization looks like: "string foo<Type>(...".
3857  # Class template definitions look like: "string Foo<Type>::Method(...".
3858  #
3859  # Also ignore things that look like operators. These are matched separately
3860  # because operator names cross non-word boundaries. If we change the pattern
3861  # above, we would decrease the accuracy of matching identifiers.
3862  if (match and
3863  not Search(r'\boperator\W', line) and
3864  not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))):
3865  error(filename, linenum, 'runtime/string', 4,
3866  'For a static/global string constant, use a C style string instead: '
3867  '"%schar %s[]".' %
3868  (match.group(1), match.group(2)))
3869 
3870  if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
3871  error(filename, linenum, 'runtime/init', 4,
3872  'You seem to be initializing a member variable with itself.')
3873 
3874  if file_extension == 'h':
3875  # TODO(unknown): check that 1-arg constructors are explicit.
3876  # How to tell it's a constructor?
3877  # (handled in CheckForNonStandardConstructs for now)
3878  # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
3879  # (level 1 error)
3880  pass
3881 
3882  # Check if people are using the verboten C basic types. The only exception
3883  # we regularly allow is "unsigned short port" for port.
3884  if Search(r'\bshort port\b', line):
3885  if not Search(r'\bunsigned short port\b', line):
3886  error(filename, linenum, 'runtime/int', 4,
3887  'Use "unsigned short" for ports, not "short"')
3888  else:
3889  match = Search(r'\b(short|long(?! +double)|long long)\b', line)
3890  if match:
3891  error(filename, linenum, 'runtime/int', 4,
3892  'Use int16/int64/etc, rather than the C type %s' % match.group(1))
3893 
3894  # When snprintf is used, the second argument shouldn't be a literal.
3895  match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
3896  if match and match.group(2) != '0':
3897  # If 2nd arg is zero, snprintf is used to calculate size.
3898  error(filename, linenum, 'runtime/printf', 3,
3899  'If you can, use sizeof(%s) instead of %s as the 2nd arg '
3900  'to snprintf.' % (match.group(1), match.group(2)))
3901 
3902  # Check if some verboten C functions are being used.
3903  if Search(r'\bsprintf\b', line):
3904  error(filename, linenum, 'runtime/printf', 5,
3905  'Never use sprintf. Use snprintf instead.')
3906  match = Search(r'\b(strcpy|strcat)\b', line)
3907  if match:
3908  error(filename, linenum, 'runtime/printf', 4,
3909  'Almost always, snprintf is better than %s' % match.group(1))
3910 
3911  # Check if some verboten operator overloading is going on
3912  # TODO(unknown): catch out-of-line unary operator&:
3913  # class X {};
3914  # int operator&(const X& x) { return 42; } // unary operator&
3915  # The trick is it's hard to tell apart from binary operator&:
3916  # class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
3917  if Search(r'\boperator\s*&\s*\(\s*\)', line):
3918  error(filename, linenum, 'runtime/operator', 4,
3919  'Unary operator& is dangerous. Do not use it.')
3920 
3921  # Check for suspicious usage of "if" like
3922  # } if (a == b) {
3923  if Search(r'\}\s*if\s*\(', line):
3924  error(filename, linenum, 'readability/braces', 4,
3925  'Did you mean "else if"? If not, start a new line for "if".')
3926 
3927  # Check for potential format string bugs like printf(foo).
3928  # We constrain the pattern not to pick things like DocidForPrintf(foo).
3929  # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
3930  # TODO(sugawarayu): Catch the following case. Need to change the calling
3931  # convention of the whole function to process multiple line to handle it.
3932  # printf(
3933  # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
3934  printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
3935  if printf_args:
3936  match = Match(r'([\w.\->()]+)$', printf_args)
3937  if match and match.group(1) != '__VA_ARGS__':
3938  function_name = re.search(r'\b((?:string)?printf)\s*\(',
3939  line, re.I).group(1)
3940  error(filename, linenum, 'runtime/printf', 4,
3941  'Potential format string bug. Do %s("%%s", %s) instead.'
3942  % (function_name, match.group(1)))
3943 
3944  # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
3945  match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
3946  if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
3947  error(filename, linenum, 'runtime/memset', 4,
3948  'Did you mean "memset(%s, 0, %s)"?'
3949  % (match.group(1), match.group(2)))
3950 
3951  if Search(r'\busing namespace\b', line):
3952  error(filename, linenum, 'build/namespaces', 5,
3953  'Do not use namespace using-directives. '
3954  'Use using-declarations instead.')
3955 
3956  # Detect variable-length arrays.
3957  match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
3958  if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
3959  match.group(3).find(']') == -1):
3960  # Split the size using space and arithmetic operators as delimiters.
3961  # If any of the resulting tokens are not compile time constants then
3962  # report the error.
3963  tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
3964  is_const = True
3965  skip_next = False
3966  for tok in tokens:
3967  if skip_next:
3968  skip_next = False
3969  continue
3970 
3971  if Search(r'sizeof\(.+\)', tok): continue
3972  if Search(r'arraysize\(\w+\)', tok): continue
3973 
3974  tok = tok.lstrip('(')
3975  tok = tok.rstrip(')')
3976  if not tok: continue
3977  if Match(r'\d+', tok): continue
3978  if Match(r'0[xX][0-9a-fA-F]+', tok): continue
3979  if Match(r'k[A-Z0-9]\w*', tok): continue
3980  if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
3981  if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
3982  # A catch all for tricky sizeof cases, including 'sizeof expression',
3983  # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
3984  # requires skipping the next token because we split on ' ' and '*'.
3985  if tok.startswith('sizeof'):
3986  skip_next = True
3987  continue
3988  is_const = False
3989  break
3990  if not is_const:
3991  error(filename, linenum, 'runtime/arrays', 1,
3992  'Do not use variable-length arrays. Use an appropriately named '
3993  "('k' followed by CamelCase) compile-time constant for the size.")
3994 
3995  # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
3996  # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
3997  # in the class declaration.
3998  match = Match(
3999  (r'\s*'
4000  r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
4001  r'\(.*\);$'),
4002  line)
4003  if match and linenum + 1 < clean_lines.NumLines():
4004  next_line = clean_lines.elided[linenum + 1]
4005  # We allow some, but not all, declarations of variables to be present
4006  # in the statement that defines the class. The [\w\*,\s]* fragment of
4007  # the regular expression below allows users to declare instances of
4008  # the class or pointers to instances, but not less common types such
4009  # as function pointers or arrays. It's a tradeoff between allowing
4010  # reasonable code and avoiding trying to parse more C++ using regexps.
4011  if not Search(r'^\s*}[\w\*,\s]*;', next_line):
4012  error(filename, linenum, 'readability/constructors', 3,
4013  match.group(1) + ' should be the last thing in the class')
4014 
4015  # Check for use of unnamed namespaces in header files. Registration
4016  # macros are typically OK, so we allow use of "namespace {" on lines
4017  # that end with backslashes.
4018  if (file_extension == 'h'
4019  and Search(r'\bnamespace\s*{', line)
4020  and line[-1] != '\\'):
4021  error(filename, linenum, 'build/namespaces', 4,
4022  'Do not use unnamed namespaces in header files. See '
4023  'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
4024  ' for more information.')
4025 
4026 def CheckForNonConstReference(filename, clean_lines, linenum,
4027  nesting_state, error):
4028  """Check for non-const references.
4029 
4030  Separate from CheckLanguage since it scans backwards from current
4031  line, instead of scanning forward.
4032 
4033  Args:
4034  filename: The name of the current file.
4035  clean_lines: A CleansedLines instance containing the file.
4036  linenum: The number of the line to check.
4037  nesting_state: A _NestingState instance which maintains information about
4038  the current stack of nested blocks being parsed.
4039  error: The function to call with any errors found.
4040  """
4041  # Do nothing if there is no '&' on current line.
4042  line = clean_lines.elided[linenum]
4043  if '&' not in line:
4044  return
4045 
4046  # Long type names may be broken across multiple lines, usually in one
4047  # of these forms:
4048  # LongType
4049  # ::LongTypeContinued &identifier
4050  # LongType::
4051  # LongTypeContinued &identifier
4052  # LongType<
4053  # ...>::LongTypeContinued &identifier
4054  #
4055  # If we detected a type split across two lines, join the previous
4056  # line to current line so that we can match const references
4057  # accordingly.
4058  #
4059  # Note that this only scans back one line, since scanning back
4060  # arbitrary number of lines would be expensive. If you have a type
4061  # that spans more than 2 lines, please use a typedef.
4062  if linenum > 1:
4063  previous = None
4064  if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
4065  # previous_line\n + ::current_line
4066  previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
4067  clean_lines.elided[linenum - 1])
4068  elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
4069  # previous_line::\n + current_line
4070  previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
4071  clean_lines.elided[linenum - 1])
4072  if previous:
4073  line = previous.group(1) + line.lstrip()
4074  else:
4075  # Check for templated parameter that is split across multiple lines
4076  endpos = line.rfind('>')
4077  if endpos > -1:
4078  (_, startline, startpos) = ReverseCloseExpression(
4079  clean_lines, linenum, endpos)
4080  if startpos > -1 and startline < linenum:
4081  # Found the matching < on an earlier line, collect all
4082  # pieces up to current line.
4083  line = ''
4084  for i in xrange(startline, linenum + 1):
4085  line += clean_lines.elided[i].strip()
4086 
4087  # Check for non-const references in function parameters. A single '&' may
4088  # found in the following places:
4089  # inside expression: binary & for bitwise AND
4090  # inside expression: unary & for taking the address of something
4091  # inside declarators: reference parameter
4092  # We will exclude the first two cases by checking that we are not inside a
4093  # function body, including one that was just introduced by a trailing '{'.
4094  # TODO(unknwon): Doesn't account for preprocessor directives.
4095  # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
4096  check_params = False
4097  if not nesting_state.stack:
4098  check_params = True # top level
4099  elif (isinstance(nesting_state.stack[-1], _ClassInfo) or
4100  isinstance(nesting_state.stack[-1], _NamespaceInfo)):
4101  check_params = True # within class or namespace
4102  elif Match(r'.*{\s*$', line):
4103  if (len(nesting_state.stack) == 1 or
4104  isinstance(nesting_state.stack[-2], _ClassInfo) or
4105  isinstance(nesting_state.stack[-2], _NamespaceInfo)):
4106  check_params = True # just opened global/class/namespace block
4107  # We allow non-const references in a few standard places, like functions
4108  # called "swap()" or iostream operators like "<<" or ">>". Do not check
4109  # those function parameters.
4110  #
4111  # We also accept & in static_assert, which looks like a function but
4112  # it's actually a declaration expression.
4113  whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
4114  r'operator\s*[<>][<>]|'
4115  r'static_assert|COMPILE_ASSERT'
4116  r')\s*\(')
4117  if Search(whitelisted_functions, line):
4118  check_params = False
4119  elif not Search(r'\S+\([^)]*$', line):
4120  # Don't see a whitelisted function on this line. Actually we
4121  # didn't see any function name on this line, so this is likely a
4122  # multi-line parameter list. Try a bit harder to catch this case.
4123  for i in xrange(2):
4124  if (linenum > i and
4125  Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
4126  check_params = False
4127  break
4128 
4129  if check_params:
4130  decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
4131  for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
4132  if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
4133  error(filename, linenum, 'runtime/references', 2,
4134  'Is this a non-const reference? '
4135  'If so, make const or use a pointer: ' +
4136  ReplaceAll(' *<', '<', parameter))
4137 
4138 
4139 def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
4140  error):
4141  """Checks for a C-style cast by looking for the pattern.
4142 
4143  Args:
4144  filename: The name of the current file.
4145  linenum: The number of the line to check.
4146  line: The line of code to check.
4147  raw_line: The raw line of code to check, with comments.
4148  cast_type: The string for the C++ cast to recommend. This is either
4149  reinterpret_cast, static_cast, or const_cast, depending.
4150  pattern: The regular expression used to find C-style casts.
4151  error: The function to call with any errors found.
4152 
4153  Returns:
4154  True if an error was emitted.
4155  False otherwise.
4156  """
4157  match = Search(pattern, line)
4158  if not match:
4159  return False
4160 
4161  # Exclude lines with sizeof, since sizeof looks like a cast.
4162  sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
4163  if sizeof_match:
4164  return False
4165 
4166  # operator++(int) and operator--(int)
4167  if (line[0:match.start(1) - 1].endswith(' operator++') or
4168  line[0:match.start(1) - 1].endswith(' operator--')):
4169  return False
4170 
4171  # A single unnamed argument for a function tends to look like old
4172  # style cast. If we see those, don't issue warnings for deprecated
4173  # casts, instead issue warnings for unnamed arguments where
4174  # appropriate.
4175  #
4176  # These are things that we want warnings for, since the style guide
4177  # explicitly require all parameters to be named:
4178  # Function(int);
4179  # Function(int) {
4180  # ConstMember(int) const;
4181  # ConstMember(int) const {
4182  # ExceptionMember(int) throw (...);
4183  # ExceptionMember(int) throw (...) {
4184  # PureVirtual(int) = 0;
4185  #
4186  # These are functions of some sort, where the compiler would be fine
4187  # if they had named parameters, but people often omit those
4188  # identifiers to reduce clutter:
4189  # (FunctionPointer)(int);
4190  # (FunctionPointer)(int) = value;
4191  # Function((function_pointer_arg)(int))
4192  # <TemplateArgument(int)>;
4193  # <(FunctionPointerTemplateArgument)(int)>;
4194  remainder = line[match.end(0):]
4195  if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder):
4196  # Looks like an unnamed parameter.
4197 
4198  # Don't warn on any kind of template arguments.
4199  if Match(r'^\s*>', remainder):
4200  return False
4201 
4202  # Don't warn on assignments to function pointers, but keep warnings for
4203  # unnamed parameters to pure virtual functions. Note that this pattern
4204  # will also pass on assignments of "0" to function pointers, but the
4205  # preferred values for those would be "nullptr" or "NULL".
4206  matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
4207  if matched_zero and matched_zero.group(1) != '0':
4208  return False
4209 
4210  # Don't warn on function pointer declarations. For this we need
4211  # to check what came before the "(type)" string.
4212  if Match(r'.*\)\s*$', line[0:match.start(0)]):
4213  return False
4214 
4215  # Don't warn if the parameter is named with block comments, e.g.:
4216  # Function(int /*unused_param*/);
4217  if '/*' in raw_line:
4218  return False
4219 
4220  # Passed all filters, issue warning here.
4221  error(filename, linenum, 'readability/function', 3,
4222  'All parameters should be named in a function')
4223  return True
4224 
4225  # At this point, all that should be left is actual casts.
4226  error(filename, linenum, 'readability/casting', 4,
4227  'Using C-style cast. Use %s<%s>(...) instead' %
4228  (cast_type, match.group(1)))
4229 
4230  return True
4231 
4232 
4233 _HEADERS_CONTAINING_TEMPLATES = (
4234  ('<deque>', ('deque',)),
4235  ('<functional>', ('unary_function', 'binary_function',
4236  'plus', 'minus', 'multiplies', 'divides', 'modulus',
4237  'negate',
4238  'equal_to', 'not_equal_to', 'greater', 'less',
4239  'greater_equal', 'less_equal',
4240  'logical_and', 'logical_or', 'logical_not',
4241  'unary_negate', 'not1', 'binary_negate', 'not2',
4242  'bind1st', 'bind2nd',
4243  'pointer_to_unary_function',
4244  'pointer_to_binary_function',
4245  'ptr_fun',
4246  'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
4247  'mem_fun_ref_t',
4248  'const_mem_fun_t', 'const_mem_fun1_t',
4249  'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
4250  'mem_fun_ref',
4251  )),
4252  ('<limits>', ('numeric_limits',)),
4253  ('<list>', ('list',)),
4254  ('<map>', ('map', 'multimap',)),
4255  ('<memory>', ('allocator',)),
4256  ('<queue>', ('queue', 'priority_queue',)),
4257  ('<set>', ('set', 'multiset',)),
4258  ('<stack>', ('stack',)),
4259  ('<string>', ('char_traits', 'basic_string',)),
4260  ('<utility>', ('pair',)),
4261  ('<vector>', ('vector',)),
4262 
4263  # gcc extensions.
4264  # Note: std::hash is their hash, ::hash is our hash
4265  ('<hash_map>', ('hash_map', 'hash_multimap',)),
4266  ('<hash_set>', ('hash_set', 'hash_multiset',)),
4267  ('<slist>', ('slist',)),
4268  )
4269 
4270 _RE_PATTERN_STRING = re.compile(r'\bstring\b')
4271 
4272 _re_pattern_algorithm_header = []
4273 for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
4274  'transform'):
4275  # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
4276  # type::max().
4277  _re_pattern_algorithm_header.append(
4278  (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
4279  _template,
4280  '<algorithm>'))
4281 
4282 _re_pattern_templates = []
4283 for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
4284  for _template in _templates:
4285  _re_pattern_templates.append(
4286  (re.compile(r'(<|\b)' + _template + r'\s*<'),
4287  _template + '<>',
4288  _header))
4289 
4290 
4291 def FilesBelongToSameModule(filename_cc, filename_h):
4292  """Check if these two filenames belong to the same module.
4293 
4294  The concept of a 'module' here is a as follows:
4295  foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
4296  same 'module' if they are in the same directory.
4297  some/path/public/xyzzy and some/path/internal/xyzzy are also considered
4298  to belong to the same module here.
4299 
4300  If the filename_cc contains a longer path than the filename_h, for example,
4301  '/absolute/path/to/base/sysinfo.cc', and this file would include
4302  'base/sysinfo.h', this function also produces the prefix needed to open the
4303  header. This is used by the caller of this function to more robustly open the
4304  header file. We don't have access to the real include paths in this context,
4305  so we need this guesswork here.
4306 
4307  Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
4308  according to this implementation. Because of this, this function gives
4309  some false positives. This should be sufficiently rare in practice.
4310 
4311  Args:
4312  filename_cc: is the path for the .cc file
4313  filename_h: is the path for the header path
4314 
4315  Returns:
4316  Tuple with a bool and a string:
4317  bool: True if filename_cc and filename_h belong to the same module.
4318  string: the additional prefix needed to open the header file.
4319  """
4320 
4321  if not filename_cc.endswith('.cc'):
4322  return (False, '')
4323  filename_cc = filename_cc[:-len('.cc')]
4324  if filename_cc.endswith('_unittest'):
4325  filename_cc = filename_cc[:-len('_unittest')]
4326  elif filename_cc.endswith('_test'):
4327  filename_cc = filename_cc[:-len('_test')]
4328  filename_cc = filename_cc.replace('/public/', '/')
4329  filename_cc = filename_cc.replace('/internal/', '/')
4330 
4331  if not filename_h.endswith('.h'):
4332  return (False, '')
4333  filename_h = filename_h[:-len('.h')]
4334  if filename_h.endswith('-inl'):
4335  filename_h = filename_h[:-len('-inl')]
4336  filename_h = filename_h.replace('/public/', '/')
4337  filename_h = filename_h.replace('/internal/', '/')
4338 
4339  files_belong_to_same_module = filename_cc.endswith(filename_h)
4340  common_path = ''
4341  if files_belong_to_same_module:
4342  common_path = filename_cc[:-len(filename_h)]
4343  return files_belong_to_same_module, common_path
4344 
4345 
4346 def UpdateIncludeState(filename, include_state, io=codecs):
4347  """Fill up the include_state with new includes found from the file.
4348 
4349  Args:
4350  filename: the name of the header to read.
4351  include_state: an _IncludeState instance in which the headers are inserted.
4352  io: The io factory to use to read the file. Provided for testability.
4353 
4354  Returns:
4355  True if a header was succesfully added. False otherwise.
4356  """
4357  headerfile = None
4358  try:
4359  headerfile = io.open(filename, 'r', 'utf8', 'replace')
4360  except IOError:
4361  return False
4362  linenum = 0
4363  for line in headerfile:
4364  linenum += 1
4365  clean_line = CleanseComments(line)
4366  match = _RE_PATTERN_INCLUDE.search(clean_line)
4367  if match:
4368  include = match.group(2)
4369  # The value formatting is cute, but not really used right now.
4370  # What matters here is that the key is in include_state.
4371  include_state.setdefault(include, '%s:%d' % (filename, linenum))
4372  return True
4373 
4374 
4375 def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
4376  io=codecs):
4377  """Reports for missing stl includes.
4378 
4379  This function will output warnings to make sure you are including the headers
4380  necessary for the stl containers and functions that you use. We only give one
4381  reason to include a header. For example, if you use both equal_to<> and
4382  less<> in a .h file, only one (the latter in the file) of these will be
4383  reported as a reason to include the <functional>.
4384 
4385  Args:
4386  filename: The name of the current file.
4387  clean_lines: A CleansedLines instance containing the file.
4388  include_state: An _IncludeState instance.
4389  error: The function to call with any errors found.
4390  io: The IO factory to use to read the header file. Provided for unittest
4391  injection.
4392  """
4393  required = {} # A map of header name to linenumber and the template entity.
4394  # Example of required: { '<functional>': (1219, 'less<>') }
4395 
4396  for linenum in xrange(clean_lines.NumLines()):
4397  line = clean_lines.elided[linenum]
4398  if not line or line[0] == '#':
4399  continue
4400 
4401  # String is special -- it is a non-templatized type in STL.
4402  matched = _RE_PATTERN_STRING.search(line)
4403  if matched:
4404  # Don't warn about strings in non-STL namespaces:
4405  # (We check only the first match per line; good enough.)
4406  prefix = line[:matched.start()]
4407  if prefix.endswith('std::') or not prefix.endswith('::'):
4408  required['<string>'] = (linenum, 'string')
4409 
4410  for pattern, template, header in _re_pattern_algorithm_header:
4411  if pattern.search(line):
4412  required[header] = (linenum, template)
4413 
4414  # The following function is just a speed up, no semantics are changed.
4415  if not '<' in line: # Reduces the cpu time usage by skipping lines.
4416  continue
4417 
4418  for pattern, template, header in _re_pattern_templates:
4419  if pattern.search(line):
4420  required[header] = (linenum, template)
4421 
4422  # The policy is that if you #include something in foo.h you don't need to
4423  # include it again in foo.cc. Here, we will look at possible includes.
4424  # Let's copy the include_state so it is only messed up within this function.
4425  include_state = include_state.copy()
4426 
4427  # Did we find the header for this file (if any) and succesfully load it?
4428  header_found = False
4429 
4430  # Use the absolute path so that matching works properly.
4431  abs_filename = FileInfo(filename).FullName()
4432 
4433  # For Emacs's flymake.
4434  # If cpplint is invoked from Emacs's flymake, a temporary file is generated
4435  # by flymake and that file name might end with '_flymake.cc'. In that case,
4436  # restore original file name here so that the corresponding header file can be
4437  # found.
4438  # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
4439  # instead of 'foo_flymake.h'
4440  abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
4441 
4442  # include_state is modified during iteration, so we iterate over a copy of
4443  # the keys.
4444  header_keys = include_state.keys()
4445  for header in header_keys:
4446  (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
4447  fullpath = common_path + header
4448  if same_module and UpdateIncludeState(fullpath, include_state, io):
4449  header_found = True
4450 
4451  # If we can't find the header file for a .cc, assume it's because we don't
4452  # know where to look. In that case we'll give up as we're not sure they
4453  # didn't include it in the .h file.
4454  # TODO(unknown): Do a better job of finding .h files so we are confident that
4455  # not having the .h file means there isn't one.
4456  if filename.endswith('.cc') and not header_found:
4457  return
4458 
4459  # All the lines have been processed, report the errors found.
4460  for required_header_unstripped in required:
4461  template = required[required_header_unstripped][1]
4462  if required_header_unstripped.strip('<>"') not in include_state:
4463  error(filename, required[required_header_unstripped][0],
4464  'build/include_what_you_use', 4,
4465  'Add #include ' + required_header_unstripped + ' for ' + template)
4466 
4467 
4468 _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
4469 
4470 
4471 def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
4472  """Check that make_pair's template arguments are deduced.
4473 
4474  G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
4475  specified explicitly, and such use isn't intended in any case.
4476 
4477  Args:
4478  filename: The name of the current file.
4479  clean_lines: A CleansedLines instance containing the file.
4480  linenum: The number of the line to check.
4481  error: The function to call with any errors found.
4482  """
4483  line = clean_lines.elided[linenum]
4484  match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
4485  if match:
4486  error(filename, linenum, 'build/explicit_make_pair',
4487  4, # 4 = high confidence
4488  'For C++11-compatibility, omit template arguments from make_pair'
4489  ' OR use pair directly OR if appropriate, construct a pair directly')
4490 
4491 
4492 def ProcessLine(filename, file_extension, clean_lines, line,
4493  include_state, function_state, nesting_state, error,
4494  extra_check_functions=[]):
4495  """Processes a single line in the file.
4496 
4497  Args:
4498  filename: Filename of the file that is being processed.
4499  file_extension: The extension (dot not included) of the file.
4500  clean_lines: An array of strings, each representing a line of the file,
4501  with comments stripped.
4502  line: Number of line being processed.
4503  include_state: An _IncludeState instance in which the headers are inserted.
4504  function_state: A _FunctionState instance which counts function lines, etc.
4505  nesting_state: A _NestingState instance which maintains information about
4506  the current stack of nested blocks being parsed.
4507  error: A callable to which errors are reported, which takes 4 arguments:
4508  filename, line number, error level, and message
4509  extra_check_functions: An array of additional check functions that will be
4510  run on each source line. Each function takes 4
4511  arguments: filename, clean_lines, line, error
4512  """
4513  raw_lines = clean_lines.raw_lines
4514  ParseNolintSuppressions(filename, raw_lines[line], line, error)
4515  nesting_state.Update(filename, clean_lines, line, error)
4516  if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
4517  return
4518  CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
4519  CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
4520  CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
4521  CheckLanguage(filename, clean_lines, line, file_extension, include_state,
4522  nesting_state, error)
4523  CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
4524  CheckForNonStandardConstructs(filename, clean_lines, line,
4525  nesting_state, error)
4526  CheckVlogArguments(filename, clean_lines, line, error)
4527  CheckPosixThreading(filename, clean_lines, line, error)
4528  CheckInvalidIncrement(filename, clean_lines, line, error)
4529  CheckMakePairUsesDeduction(filename, clean_lines, line, error)
4530  for check_fn in extra_check_functions:
4531  check_fn(filename, clean_lines, line, error)
4532 
4533 def ProcessFileData(filename, file_extension, lines, error,
4534  extra_check_functions=[]):
4535  """Performs lint checks and reports any errors to the given error function.
4536 
4537  Args:
4538  filename: Filename of the file that is being processed.
4539  file_extension: The extension (dot not included) of the file.
4540  lines: An array of strings, each representing a line of the file, with the
4541  last element being empty if the file is terminated with a newline.
4542  error: A callable to which errors are reported, which takes 4 arguments:
4543  filename, line number, error level, and message
4544  extra_check_functions: An array of additional check functions that will be
4545  run on each source line. Each function takes 4
4546  arguments: filename, clean_lines, line, error
4547  """
4548  lines = (['// marker so line numbers and indices both start at 1'] + lines +
4549  ['// marker so line numbers end in a known way'])
4550 
4551  include_state = _IncludeState()
4552  function_state = _FunctionState()
4553  nesting_state = _NestingState()
4554 
4556 
4557  CheckForCopyright(filename, lines, error)
4558 
4559  if file_extension == 'h':
4560  CheckForHeaderGuard(filename, lines, error)
4561 
4562  RemoveMultiLineComments(filename, lines, error)
4563  clean_lines = CleansedLines(lines)
4564  for line in xrange(clean_lines.NumLines()):
4565  ProcessLine(filename, file_extension, clean_lines, line,
4566  include_state, function_state, nesting_state, error,
4567  extra_check_functions)
4568  nesting_state.CheckCompletedBlocks(filename, error)
4569 
4570  CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
4571 
4572  # We check here rather than inside ProcessLine so that we see raw
4573  # lines rather than "cleaned" lines.
4574  CheckForBadCharacters(filename, lines, error)
4575 
4576  CheckForNewlineAtEOF(filename, lines, error)
4577 
4578 def ProcessFile(filename, vlevel, extra_check_functions=[]):
4579  """Does google-lint on a single file.
4580 
4581  Args:
4582  filename: The name of the file to parse.
4583 
4584  vlevel: The level of errors to report. Every error of confidence
4585  >= verbose_level will be reported. 0 is a good default.
4586 
4587  extra_check_functions: An array of additional check functions that will be
4588  run on each source line. Each function takes 4
4589  arguments: filename, clean_lines, line, error
4590  """
4591 
4592  _SetVerboseLevel(vlevel)
4593 
4594  try:
4595  # Support the UNIX convention of using "-" for stdin. Note that
4596  # we are not opening the file with universal newline support
4597  # (which codecs doesn't support anyway), so the resulting lines do
4598  # contain trailing '\r' characters if we are reading a file that
4599  # has CRLF endings.
4600  # If after the split a trailing '\r' is present, it is removed
4601  # below. If it is not expected to be present (i.e. os.linesep !=
4602  # '\r\n' as in Windows), a warning is issued below if this file
4603  # is processed.
4604 
4605  if filename == '-':
4606  lines = codecs.StreamReaderWriter(sys.stdin,
4607  codecs.getreader('utf8'),
4608  codecs.getwriter('utf8'),
4609  'replace').read().split('\n')
4610  else:
4611  lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
4612 
4613  carriage_return_found = False
4614  # Remove trailing '\r'.
4615  for linenum in range(len(lines)):
4616  if lines[linenum].endswith('\r'):
4617  lines[linenum] = lines[linenum].rstrip('\r')
4618  carriage_return_found = True
4619 
4620  except IOError:
4621  sys.stderr.write(
4622  "Skipping input '%s': Can't open for reading\n" % filename)
4623  return
4624 
4625  # Note, if no dot is found, this will give the entire filename as the ext.
4626  file_extension = filename[filename.rfind('.') + 1:]
4627 
4628  # When reading from stdin, the extension is unknown, so no cpplint tests
4629  # should rely on the extension.
4630  if filename != '-' and file_extension not in _valid_extensions:
4631  sys.stderr.write('Ignoring %s; not a valid file name '
4632  '(%s)\n' % (filename, ', '.join(_valid_extensions)))
4633  else:
4634  ProcessFileData(filename, file_extension, lines, Error,
4635  extra_check_functions)
4636  if carriage_return_found and os.linesep != '\r\n':
4637  # Use 0 for linenum since outputting only one error for potentially
4638  # several lines.
4639  Error(filename, 0, 'whitespace/newline', 1,
4640  'One or more unexpected \\r (^M) found;'
4641  'better to use only a \\n')
4642 
4643  # supress printing
4644  # sys.stderr.write('Done processing %s\n' % filename)
4645 
4646 
4647 def PrintUsage(message):
4648  """Prints a brief usage string and exits, optionally with an error message.
4649 
4650  Args:
4651  message: The optional error message.
4652  """
4653  sys.stderr.write(_USAGE)
4654  if message:
4655  sys.exit('\nFATAL ERROR: ' + message)
4656  else:
4657  sys.exit(1)
4658 
4659 
4661  """Prints a list of all the error-categories used by error messages.
4662 
4663  These are the categories used to filter messages via --filter.
4664  """
4665  sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
4666  sys.exit(0)
4667 
4668 
4669 def ParseArguments(args):
4670  """Parses the command line arguments.
4671 
4672  This may set the output format and verbosity level as side-effects.
4673 
4674  Args:
4675  args: The command line arguments:
4676 
4677  Returns:
4678  The list of filenames to lint.
4679  """
4680  try:
4681  (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
4682  'counting=',
4683  'filter=',
4684  'root=',
4685  'linelength=',
4686  'extensions='])
4687  except getopt.GetoptError:
4688  PrintUsage('Invalid arguments.')
4689 
4690  verbosity = _VerboseLevel()
4691  output_format = _OutputFormat()
4692  filters = ''
4693  counting_style = ''
4694 
4695  for (opt, val) in opts:
4696  if opt == '--help':
4697  PrintUsage(None)
4698  elif opt == '--output':
4699  if val not in ('emacs', 'vs7', 'eclipse'):
4700  PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
4701  output_format = val
4702  elif opt == '--verbose':
4703  verbosity = int(val)
4704  elif opt == '--filter':
4705  filters = val
4706  if not filters:
4707  PrintCategories()
4708  elif opt == '--counting':
4709  if val not in ('total', 'toplevel', 'detailed'):
4710  PrintUsage('Valid counting options are total, toplevel, and detailed')
4711  counting_style = val
4712  elif opt == '--root':
4713  global _root
4714  _root = val
4715  elif opt == '--linelength':
4716  global _line_length
4717  try:
4718  _line_length = int(val)
4719  except ValueError:
4720  PrintUsage('Line length must be digits.')
4721  elif opt == '--extensions':
4722  global _valid_extensions
4723  try:
4724  _valid_extensions = set(val.split(','))
4725  except ValueError:
4726  PrintUsage('Extensions must be comma seperated list.')
4727 
4728  if not filenames:
4729  PrintUsage('No files were specified.')
4730 
4731  _SetOutputFormat(output_format)
4732  _SetVerboseLevel(verbosity)
4733  _SetFilters(filters)
4734  _SetCountingStyle(counting_style)
4735 
4736  return filenames
4737 
4738 
4739 def main():
4740  filenames = ParseArguments(sys.argv[1:])
4741 
4742  # Change stderr to write with replacement characters so we don't die
4743  # if we try to print something containing non-ASCII characters.
4744  sys.stderr = codecs.StreamReaderWriter(sys.stderr,
4745  codecs.getreader('utf8'),
4746  codecs.getwriter('utf8'),
4747  'replace')
4748 
4749  _cpplint_state.ResetErrorCounts()
4750  for filename in filenames:
4751  ProcessFile(filename, _cpplint_state.verbose_level)
4752  _cpplint_state.PrintErrorCounts()
4753 
4754  sys.exit(_cpplint_state.error_count > 0)
4755 
4756 
4757 if __name__ == '__main__':
4758  main()
def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state, error)
Definition: cpplint.py:2087
def FullName(self)
Definition: cpplint.py:873
def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix)
Definition: cpplint.py:2409
def CheckSpacingForFunctionCall(filename, line, linenum, error)
Definition: cpplint.py:2193
def UpdatePreprocessor(self, line)
Definition: cpplint.py:1840
def IsErrorSuppressedByNolint(category, linenum)
Definition: cpplint.py:492
def CheckForNewlineAtEOF(filename, lines, error)
Definition: cpplint.py:1501
def SeenOpenBrace(self)
Definition: cpplint.py:1823
def CheckForBadCharacters(filename, lines, error)
Definition: cpplint.py:1476
def _ClassifyInclude(fileinfo, include, is_system)
Definition: cpplint.py:3512
def PrintErrorCounts(self)
Definition: cpplint.py:749
def GetHeaderGuardCPPVariable(filename)
Definition: cpplint.py:1377
def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[])
Definition: cpplint.py:4494
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error)
Definition: cpplint.py:1519
def CheckForHeaderGuard(filename, lines, error)
Definition: cpplint.py:1401
def CheckNextIncludeOrder(self, header_type)
Definition: cpplint.py:625
def CheckCheck(filename, clean_lines, linenum, error)
Definition: cpplint.py:3170
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path)
Definition: cpplint.py:604
def NumLines(self)
Definition: cpplint.py:1196
def ResetNolintSuppressions()
Definition: cpplint.py:487
def FilesBelongToSameModule(filename_cc, filename_h)
Definition: cpplint.py:4291
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar)
Definition: cpplint.py:1222
def CleanseComments(line)
Definition: cpplint.py:1159
def SetOutputFormat(self, output_format)
Definition: cpplint.py:695
def CheckEnd(self, filename, clean_lines, linenum, error)
Definition: cpplint.py:1748
def __init__(self)
Definition: cpplint.py:1811
dictionary _SECTION_NAMES
Definition: cpplint.py:568
def RepositoryName(self)
Definition: cpplint.py:877
def NoExtension(self)
Definition: cpplint.py:944
def __init__(self)
Definition: cpplint.py:808
def __init__(self, name, linenum)
Definition: cpplint.py:1743
def CheckBegin(self, filename, clean_lines, linenum, error)
Definition: cpplint.py:1721
def ReplaceAll(pattern, rep, s)
Definition: cpplint.py:517
def CheckVlogArguments(filename, clean_lines, linenum, error)
Definition: cpplint.py:1600
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error)
Definition: cpplint.py:2883
def CheckBraces(filename, clean_lines, linenum, error)
Definition: cpplint.py:2961
def ParseArguments(args)
Definition: cpplint.py:4669
def CheckCompletedBlocks(self, filename, error)
Definition: cpplint.py:2064
def Error(filename, linenum, category, confidence, message)
Definition: cpplint.py:980
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern, error)
Definition: cpplint.py:4140
def RemoveMultiLineComments(filename, lines, error)
Definition: cpplint.py:1143
def CloseExpression(clean_lines, linenum, pos)
Definition: cpplint.py:1246
def CheckAltTokens(filename, clean_lines, linenum, error)
Definition: cpplint.py:3297
def Match(pattern, s)
Definition: cpplint.py:507
def RemoveMultiLineCommentsFromRange(lines, begin, end)
Definition: cpplint.py:1135
def _SetFilters(filters)
Definition: cpplint.py:789
def ReverseCloseExpression(clean_lines, linenum, pos)
Definition: cpplint.py:1319
def _VerboseLevel()
Definition: cpplint.py:769
def _SetOutputFormat(output_format)
Definition: cpplint.py:764
def _SetCountingStyle(level)
Definition: cpplint.py:779
def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix)
Definition: cpplint.py:2478
dictionary _TYPE_NAMES
Definition: cpplint.py:561
def InnermostClass(self)
Definition: cpplint.py:2052
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error)
Definition: cpplint.py:3352
def ParseNolintSuppressions(filename, raw_line, linenum, error)
Definition: cpplint.py:458
def __init__(self, filename)
Definition: cpplint.py:870
def _CollapseStrings(elided)
Definition: cpplint.py:1201
def SetCountingStyle(self, counting_style)
Definition: cpplint.py:705
def ProcessFile(filename, vlevel, extra_check_functions=[])
Definition: cpplint.py:4578
def IsSource(self)
Definition: cpplint.py:948
def CheckForCopyright(filename, lines, error)
Definition: cpplint.py:1364
def SetLastHeader(self, header_path)
Definition: cpplint.py:586
def __init__(self, lines)
Definition: cpplint.py:1184
def CheckBegin(self, filename, clean_lines, linenum, error)
Definition: cpplint.py:1655
def CleanseRawStrings(raw_lines)
Definition: cpplint.py:1054
def _SetVerboseLevel(level)
Definition: cpplint.py:774
def CheckForFunctionLengths(filename, clean_lines, linenum, function_state, error)
Definition: cpplint.py:2277
def SetVerboseLevel(self, level)
Definition: cpplint.py:699
def __init__(self, seen_open_brace)
Definition: cpplint.py:1650
def CheckEnd(self, filename, clean_lines, linenum, error)
Definition: cpplint.py:1670
def IncrementErrorCount(self, category)
Definition: cpplint.py:739
def InNamespaceBody(self)
Definition: cpplint.py:1832
def Search(pattern, s)
Definition: cpplint.py:535
def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar)
Definition: cpplint.py:1292
def PrintUsage(message)
Definition: cpplint.py:4647
def CheckPosixThreading(filename, clean_lines, linenum, error)
Definition: cpplint.py:1573
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error)
Definition: cpplint.py:3727
def CheckEmptyBlockBody(filename, clean_lines, linenum, error)
Definition: cpplint.py:3135
def Check(self, error, filename, linenum)
Definition: cpplint.py:828
def _OutputFormat()
Definition: cpplint.py:759
def PrintCategories()
Definition: cpplint.py:4660
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
Definition: cpplint.py:2535
def IsCppString(line)
Definition: cpplint.py:1037
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error)
Definition: cpplint.py:4471
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs)
Definition: cpplint.py:4376
def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=[])
Definition: cpplint.py:4534
def CheckAccess(filename, clean_lines, linenum, nesting_state, error)
Definition: cpplint.py:2378
def GetPreviousNonBlankLine(clean_lines, linenum)
Definition: cpplint.py:2938
def IsBlankLine(line)
Definition: cpplint.py:2261
def FindNextMultiLineCommentEnd(lines, lineix)
Definition: cpplint.py:1126
def _ShouldPrintError(category, confidence, linenum)
Definition: cpplint.py:953
def __init__(self)
Definition: cpplint.py:682
def _IsTestFilename(filename)
Definition: cpplint.py:3495
def _Filters()
Definition: cpplint.py:784
def CheckComment(comment, filename, linenum, error)
Definition: cpplint.py:2349
def CanonicalizeAlphabeticalOrder(self, header_path)
Definition: cpplint.py:589
def __init__(self, stack_before_if)
Definition: cpplint.py:1797
def SetFilters(self, filters)
Definition: cpplint.py:709
def Extension(self)
Definition: cpplint.py:940
def GetLineWidth(line)
Definition: cpplint.py:3329
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
Definition: cpplint.py:3572
def main()
Definition: cpplint.py:4739
def _DropCommonSuffixes(filename)
Definition: cpplint.py:3468
def __init__(self, name, class_or_struct, clean_lines, linenum)
Definition: cpplint.py:1687
def CheckInvalidIncrement(filename, clean_lines, linenum, error)
Definition: cpplint.py:1625
def BaseName(self)
Definition: cpplint.py:936
def ResetSection(self)
Definition: cpplint.py:580
def Update(self, filename, clean_lines, linenum, error)
Definition: cpplint.py:1896
def ResetErrorCounts(self)
Definition: cpplint.py:734
def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error)
Definition: cpplint.py:4027
def UpdateIncludeState(filename, include_state, io=codecs)
Definition: cpplint.py:4346
def FindNextMultiLineCommentStart(lines, lineix)
Definition: cpplint.py:1115
def CheckEnd(self, filename, clean_lines, linenum, error)
Definition: cpplint.py:1726
def Split(self)
Definition: cpplint.py:922
def Begin(self, function_name)
Definition: cpplint.py:813
def _GetTextInside(text, start_pattern)
Definition: cpplint.py:3644
def __init__(self)
Definition: cpplint.py:576


acado
Author(s): Milan Vukov, Rien Quirynen
autogenerated on Mon Jun 10 2019 12:34:31