30 Check Python source code formatting, according to PEP 8. 32 For usage and a list of options, try this: 33 $ python pycodestyle.py -h 35 This program and its regression test suite live here: 36 https://github.com/pycqa/pycodestyle 38 Groups of errors and warnings: 50 from __future__
import with_statement
63 from functools
import lru_cache
66 """Does not really need a real a lru_cache, it's just 67 optimization, so let's just do nothing here. Python 3.2+ will 68 just get better performances, time to upgrade? 70 return lambda function: function
72 from fnmatch
import fnmatch
73 from optparse
import OptionParser
76 from configparser
import RawConfigParser
77 from io
import TextIOWrapper
79 from ConfigParser
import RawConfigParser
83 DEFAULT_EXCLUDE =
'.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' 84 DEFAULT_IGNORE =
'E121,E123,E126,E226,E24,E704,W503,W504' 86 if sys.platform ==
'win32':
87 USER_CONFIG = os.path.expanduser(
r'~\.pycodestyle')
89 USER_CONFIG = os.path.join(
90 os.getenv(
'XDG_CONFIG_HOME')
or os.path.expanduser(
'~/.config'),
96 PROJECT_CONFIG = (
'setup.cfg',
'tox.ini')
97 TESTSUITE_PATH = os.path.join(os.path.dirname(__file__),
'testsuite')
100 BLANK_LINES_CONFIG = {
108 'default':
'%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
109 'pylint':
'%(path)s:%(row)d: [%(code)s] %(text)s',
113 SINGLETONS = frozenset([
'False',
'None',
'True'])
114 KEYWORDS = frozenset(keyword.kwlist + [
'print',
'async']) - SINGLETONS
115 UNARY_OPERATORS = frozenset([
'>>',
'**',
'*',
'+',
'-'])
116 ARITHMETIC_OP = frozenset([
'**',
'*',
'/',
'//',
'+',
'-'])
117 WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union([
'^',
'&',
'|',
'<<',
'>>',
'%'])
119 FUNCTION_RETURN_ANNOTATION_OP = [
'->']
if sys.version_info >= (3, 5)
else []
120 ASSIGNMENT_EXPRESSION_OP = [
':=']
if sys.version_info >= (3, 8)
else []
121 WS_NEEDED_OPERATORS = frozenset([
122 '**=',
'*=',
'/=',
'//=',
'+=',
'-=',
'!=',
'<>',
'<',
'>',
123 '%=',
'^=',
'&=',
'|=',
'==',
'<=',
'>=',
'<<=',
'>>=',
'=',
124 'and',
'in',
'is',
'or'] +
125 FUNCTION_RETURN_ANNOTATION_OP +
126 ASSIGNMENT_EXPRESSION_OP)
127 WHITESPACE = frozenset(
' \t')
128 NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
129 SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT])
131 SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN])
132 BENCHMARK_KEYS = [
'directories',
'files',
'logical lines',
'physical lines']
134 INDENT_REGEX = re.compile(
r'([ \t]*)')
135 RAISE_COMMA_REGEX = re.compile(
r'raise\s+\w+\s*,')
136 RERAISE_COMMA_REGEX = re.compile(
r'raise\s+\w+\s*,.*,\s*\w+\s*$')
137 ERRORCODE_REGEX = re.compile(
r'\b[A-Z]\d{3}\b')
138 DOCSTRING_REGEX = re.compile(
r'u?r?["\']')
139 EXTRANEOUS_WHITESPACE_REGEX = re.compile(
r'[\[({] | [\]}),;]| :(?!=)')
140 WHITESPACE_AFTER_COMMA_REGEX = re.compile(
r'[,;:]\s*(?: |\t)')
141 COMPARE_SINGLETON_REGEX = re.compile(
r'(\bNone|\bFalse|\bTrue)?\s*([=!]=)' 142 r'\s*(?(1)|(None|False|True))\b')
143 COMPARE_NEGATIVE_REGEX = re.compile(
r'\b(not)\s+[^][)(}{ ]+\s+(in|is)\s')
144 COMPARE_TYPE_REGEX = re.compile(
r'(?:[=!]=|is(?:\s+not)?)\s+type(?:s.\w+Type' 145 r'|\s*\(\s*([^)]*[^ )])\s*\))')
146 KEYWORD_REGEX = re.compile(
r'(\s*)\b(?:%s)\b(\s*)' %
r'|'.join(KEYWORDS))
147 OPERATOR_REGEX = re.compile(
r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
148 LAMBDA_REGEX = re.compile(
r'\blambda\b')
149 HUNK_REGEX = re.compile(
r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
150 STARTSWITH_DEF_REGEX = re.compile(
r'^(async\s+def|def)\b')
151 STARTSWITH_TOP_LEVEL_REGEX = re.compile(
r'^(async\s+def\s+|def\s+|class\s+|@)')
152 STARTSWITH_INDENT_STATEMENT_REGEX = re.compile(
153 r'^\s*({0})\b'.format(
'|'.join(s.replace(
' ',
r'\s+')
for s
in (
156 'if',
'elif',
'else',
157 'try',
'except',
'finally',
158 'with',
'async with',
163 DUNDER_REGEX = re.compile(
r'^__([^\s]+)__ = ')
165 _checks = {
'physical_line': {},
'logical_line': {},
'tree': {}}
169 if sys.version_info >= (3, 3):
170 return [parameter.name
172 in inspect.signature(function).parameters.values()
173 if parameter.kind == parameter.POSITIONAL_OR_KEYWORD]
175 return inspect.getargspec(function)[0]
179 """Register a new check object.""" 180 def _add_check(check, kind, codes, args):
181 if check
in _checks[kind]:
182 _checks[kind][check][0].extend(codes
or [])
184 _checks[kind][check] = (codes
or [
''], args)
185 if inspect.isfunction(check):
187 if args
and args[0]
in (
'physical_line',
'logical_line'):
189 codes = ERRORCODE_REGEX.findall(check.__doc__
or '')
190 _add_check(check, args[0], codes, args)
191 elif inspect.isclass(check):
193 _add_check(check,
'tree', codes,
None)
203 r"""Never mix tabs and spaces. 205 The most popular way of indenting Python is with spaces only. The 206 second-most popular way is with tabs only. Code indented with a 207 mixture of tabs and spaces should be converted to using spaces 208 exclusively. When invoking the Python command line interpreter with 209 the -t option, it issues warnings about code that illegally mixes 210 tabs and spaces. When using -tt these warnings become errors. 211 These options are highly recommended! 213 Okay: if a == 0:\n a = 1\n b = 1 214 E101: if a == 0:\n a = 1\n\tb = 1 216 indent = INDENT_REGEX.match(physical_line).group(1)
217 for offset, char
in enumerate(indent):
218 if char != indent_char:
219 return offset,
"E101 indentation contains mixed spaces and tabs" 224 r"""On new projects, spaces-only are strongly recommended over tabs. 226 Okay: if True:\n return 227 W191: if True:\n\treturn 229 indent = INDENT_REGEX.match(physical_line).group(1)
231 return indent.index(
'\t'),
"W191 indentation contains tabs" 236 r"""Trailing whitespace is superfluous. 238 The warning returned varies on whether the line itself is blank, 239 for easier filtering for those who want to indent their blank lines. 243 W293: class Foo(object):\n \n bang = 12 245 physical_line = physical_line.rstrip(
'\n')
246 physical_line = physical_line.rstrip(
'\r')
247 physical_line = physical_line.rstrip(
'\x0c')
248 stripped = physical_line.rstrip(
' \t\v')
249 if physical_line != stripped:
251 return len(stripped),
"W291 trailing whitespace" 253 return 0,
"W293 blank line contains whitespace" 258 r"""Trailing blank lines are superfluous. 263 However the last line should end with a new line (warning W292). 265 if line_number == total_lines:
266 stripped_last_line = physical_line.rstrip()
267 if physical_line
and not stripped_last_line:
268 return 0,
"W391 blank line at end of file" 269 if stripped_last_line == physical_line:
270 return len(lines[-1]),
"W292 no newline at end of file" 276 r"""Limit all lines to a maximum of 79 characters. 278 There are still many devices around that are limited to 80 character 279 lines; plus, limiting windows to 80 characters makes it possible to 280 have several windows side-by-side. The default wrapping on such 281 devices looks ugly. Therefore, please limit all lines to a maximum 282 of 79 characters. For flowing long blocks of text (docstrings or 283 comments), limiting the length to 72 characters is recommended. 287 line = physical_line.rstrip()
289 if length > max_line_length
and not noqa:
291 if line_number == 1
and line.startswith(
'#!'):
296 chunks = line.split()
297 if ((len(chunks) == 1
and multiline)
or 298 (len(chunks) == 2
and chunks[0] ==
'#'))
and \
299 len(line) - len(chunks[-1]) < max_line_length - 7:
301 if hasattr(line,
'decode'):
304 length = len(line.decode(
'utf-8'))
307 if length > max_line_length:
308 return (max_line_length,
"E501 line too long " 309 "(%d > %d characters)" % (length, max_line_length))
318 def blank_lines(logical_line, blank_lines, indent_level, line_number,
319 blank_before, previous_logical,
320 previous_unindented_logical_line, previous_indent_level,
322 r"""Separate top-level function and class definitions with two blank 325 Method definitions inside a class are separated by a single blank 328 Extra blank lines may be used (sparingly) to separate groups of 329 related functions. Blank lines may be omitted between a bunch of 330 related one-liners (e.g. a set of dummy implementations). 332 Use blank lines in functions, sparingly, to indicate logical 335 Okay: def a():\n pass\n\n\ndef b():\n pass 336 Okay: def a():\n pass\n\n\nasync def b():\n pass 337 Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass 338 Okay: default = 1\nfoo = 1 339 Okay: classify = 1\nfoo = 1 341 E301: class Foo:\n b = 0\n def bar():\n pass 342 E302: def a():\n pass\n\ndef b(n):\n pass 343 E302: def a():\n pass\n\nasync def b(n):\n pass 344 E303: def a():\n pass\n\n\n\ndef b(n):\n pass 345 E303: def a():\n\n\n\n pass 346 E304: @decorator\n\ndef a():\n pass 347 E305: def a():\n pass\na() 348 E306: def a():\n def b():\n pass\n def c():\n pass 350 top_level_lines = BLANK_LINES_CONFIG[
'top_level']
351 method_lines = BLANK_LINES_CONFIG[
'method']
353 if line_number < top_level_lines + 1
and not previous_logical:
355 if previous_logical.startswith(
'@'):
357 yield 0,
"E304 blank lines found after function decorator" 358 elif (blank_lines > top_level_lines
or 359 (indent_level
and blank_lines == method_lines + 1)
361 yield 0,
"E303 too many blank lines (%d)" % blank_lines
362 elif STARTSWITH_TOP_LEVEL_REGEX.match(logical_line):
368 prev_line = lines[line_number - 2]
if line_number >= 2
else '' 369 next_line = lines[line_number]
if line_number < len(lines)
else '' 370 if (
not logical_line.startswith(
"@")
and 375 if not (blank_before == method_lines
or 376 previous_indent_level < indent_level
or 377 DOCSTRING_REGEX.match(previous_logical)
379 ancestor_level = indent_level
383 for line
in lines[line_number - top_level_lines::-1]:
386 nested = line.lstrip().startswith(
'def ')
387 if nested
or ancestor_level == 0:
390 yield 0,
"E306 expected %s blank line before a " \
391 "nested definition, found 0" % (method_lines,)
393 yield 0,
"E301 expected %s blank line, found 0" % (
395 elif blank_before != top_level_lines:
396 yield 0,
"E302 expected %s blank lines, found %d" % (
397 top_level_lines, blank_before)
398 elif (logical_line
and 400 blank_before != top_level_lines
and 401 previous_unindented_logical_line.startswith((
'def ',
'class '))
403 yield 0,
"E305 expected %s blank lines after " \
404 "class or function definition, found %d" % (
405 top_level_lines, blank_before)
410 r"""Avoid extraneous whitespace. 412 Avoid extraneous whitespace in these situations: 413 - Immediately inside parentheses, brackets or braces. 414 - Immediately before a comma, semicolon, or colon. 416 Okay: spam(ham[1], {eggs: 2}) 417 E201: spam( ham[1], {eggs: 2}) 418 E201: spam(ham[ 1], {eggs: 2}) 419 E201: spam(ham[1], { eggs: 2}) 420 E202: spam(ham[1], {eggs: 2} ) 421 E202: spam(ham[1 ], {eggs: 2}) 422 E202: spam(ham[1], {eggs: 2 }) 424 E203: if x == 4: print x, y; x, y = y , x 425 E203: if x == 4: print x, y ; x, y = y, x 426 E203: if x == 4 : print x, y; x, y = y, x 429 for match
in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
432 found = match.start()
433 if text == char +
' ':
435 yield found + 1,
"E201 whitespace after '%s'" % char
436 elif line[found - 1] !=
',':
437 code = (
'E202' if char
in '}])' else 'E203')
438 yield found,
"%s whitespace before '%s'" % (code, char)
443 r"""Avoid extraneous whitespace around keywords. 448 E273: True and\tFalse 449 E274: True\tand False 451 for match
in KEYWORD_REGEX.finditer(logical_line):
452 before, after = match.groups()
455 yield match.start(1),
"E274 tab before keyword" 456 elif len(before) > 1:
457 yield match.start(1),
"E272 multiple spaces before keyword" 460 yield match.start(2),
"E273 tab after keyword" 462 yield match.start(2),
"E271 multiple spaces after keyword" 467 r"""Multiple imports in form from x import (a, b, c) should have 468 space between import statement and parenthesised name list. 470 Okay: from foo import (bar, baz) 471 E275: from foo import(bar, baz) 472 E275: from importable.module import(bar, baz) 475 indicator =
' import(' 476 if line.startswith(
'from '):
477 found = line.find(indicator)
479 pos = found + len(indicator) - 1
480 yield pos,
"E275 missing whitespace after keyword" 485 r"""Each comma, semicolon or colon should be followed by whitespace. 498 for index
in range(len(line) - 1):
500 next_char = line[index + 1]
501 if char
in ',;:' and next_char
not in WHITESPACE:
502 before = line[:index]
503 if char ==
':' and before.count(
'[') > before.count(
']')
and \
504 before.rfind(
'{') < before.rfind(
'['):
506 if char ==
',' and next_char ==
')':
508 if char ==
':' and next_char ==
'=' and sys.version_info >= (3, 8):
510 yield index,
"E231 missing whitespace after '%s'" % char
514 def indentation(logical_line, previous_logical, indent_char,
515 indent_level, previous_indent_level):
516 r"""Use 4 spaces per indentation level. 518 For really old code that you don't want to mess up, you can continue 522 Okay: if a == 0:\n a = 1 526 Okay: for item in items:\n pass 527 E112: for item in items:\npass 528 E115: for item in items:\n# Hi\n pass 532 E116: a = 1\n # b = 2 534 c = 0
if logical_line
else 3
535 tmpl =
"E11%d %s" if logical_line
else "E11%d %s (comment)" 537 yield 0, tmpl % (1 + c,
"indentation is not a multiple of four")
538 indent_expect = previous_logical.endswith(
':')
539 if indent_expect
and indent_level <= previous_indent_level:
540 yield 0, tmpl % (2 + c,
"expected an indented block")
541 elif not indent_expect
and indent_level > previous_indent_level:
542 yield 0, tmpl % (3 + c,
"unexpected indentation")
545 expected_indent_amount = 8
if indent_char ==
'\t' else 4
546 expected_indent_level = previous_indent_level + expected_indent_amount
547 if indent_level > expected_indent_level:
548 yield 0, tmpl % (7,
'over-indented')
553 indent_char, noqa, verbose):
554 r"""Continuation lines indentation. 556 Continuation lines should align wrapped elements either vertically 557 using Python's implicit line joining inside parentheses, brackets 558 and braces, or using a hanging indent. 560 When using a hanging indent these considerations should be applied: 561 - there should be no arguments on the first line, and 562 - further indentation should be used to clearly distinguish itself 563 as a continuation line. 572 E124: a = (24,\n 42\n) 573 E125: if (\n b):\n pass 577 E129: if (a or\n b):\n pass 578 E131: a = (\n 42\n 24) 580 first_row = tokens[0][2][0]
581 nrows = 1 + tokens[-1][2][0] - first_row
582 if noqa
or nrows == 1:
589 indent_next = logical_line.endswith(
':')
592 valid_hangs = (4,)
if indent_char !=
'\t' else (4, 8)
596 rel_indent = [0] * nrows
603 last_indent = tokens[0][2]
605 last_token_multiline =
False 607 indent = [last_indent[1]]
609 print(
">>> " + tokens[0][4].rstrip())
611 for token_type, text, start, end, line
in tokens:
613 newline = row < start[0] - first_row
615 row = start[0] - first_row
616 newline =
not last_token_multiline
and token_type
not in NEWLINE
622 print(
"... " + line.rstrip())
628 close_bracket = (token_type == tokenize.OP
and text
in ']})')
631 for open_row
in reversed(open_rows[depth]):
632 hang = rel_indent[row] - rel_indent[open_row]
633 hanging_indent = hang
in valid_hangs
637 hanging_indent = (hang == hangs[depth])
639 visual_indent = (
not close_bracket
and hang > 0
and 640 indent_chances.get(start[1]))
642 if close_bracket
and indent[depth]:
644 if start[1] != indent[depth]:
645 yield (start,
"E124 closing bracket does not match " 646 "visual indentation")
647 elif close_bracket
and not hang:
651 yield start,
"E133 closing bracket is missing indentation" 652 elif indent[depth]
and start[1] < indent[depth]:
653 if visual_indent
is not True:
655 yield (start,
"E128 continuation line " 656 "under-indented for visual indent")
657 elif hanging_indent
or (indent_next
and rel_indent[row] == 8):
659 if close_bracket
and not hang_closing:
660 yield (start,
"E123 closing bracket does not match " 661 "indentation of opening bracket's line")
663 elif visual_indent
is True:
665 indent[depth] = start[1]
666 elif visual_indent
in (text, str):
673 error =
"E122",
"missing indentation or outdented" 675 error =
"E127",
"over-indented for visual indent" 676 elif not close_bracket
and hangs[depth]:
677 error =
"E131",
"unaligned for hanging indent" 681 error =
"E126",
"over-indented for hanging indent" 683 error =
"E121",
"under-indented for hanging indent" 684 yield start,
"%s continuation line %s" % error
688 token_type
not in (tokenize.NL, tokenize.COMMENT)
and 690 indent[depth] = start[1]
691 indent_chances[start[1]] =
True 693 print(
"bracket depth %s indent to %s" % (depth, start[1]))
695 elif (token_type
in (tokenize.STRING, tokenize.COMMENT)
or 696 text
in (
'u', 'ur', 'b', 'br')):
697 indent_chances[start[1]] = str
699 elif not indent_chances
and not row
and not depth
and text ==
'if':
700 indent_chances[end[1] + 1] =
True 701 elif text ==
':' and line[end[1]:].isspace():
702 open_rows[depth].append(row)
705 if token_type == tokenize.OP:
710 if len(open_rows) == depth:
712 open_rows[depth].append(row)
715 print(
"bracket depth %s seen, col %s, visual min = %s" %
716 (depth, start[1], indent[depth]))
717 elif text
in ')]}' and depth > 0:
719 prev_indent = indent.pop()
or last_indent[1]
721 for d
in range(depth):
722 if indent[d] > prev_indent:
724 for ind
in list(indent_chances):
725 if ind >= prev_indent:
726 del indent_chances[ind]
727 del open_rows[depth + 1:]
730 indent_chances[indent[depth]] =
True 731 for idx
in range(row, -1, -1):
735 assert len(indent) == depth + 1
736 if start[1]
not in indent_chances:
738 indent_chances[start[1]] = text
740 last_token_multiline = (start[0] != end[0])
741 if last_token_multiline:
742 rel_indent[end[0] - first_row] = rel_indent[row]
745 pos = (start[0], indent[0] + 4)
747 code =
"E129 visually indented line" 749 code =
"E125 continuation line" 750 yield pos,
"%s with same indent as next logical line" % code
755 r"""Avoid extraneous whitespace. 757 Avoid extraneous whitespace in the following situations: 758 - before the open parenthesis that starts the argument list of a 760 - before the open parenthesis that starts an indexing or slicing. 765 Okay: dict['key'] = list[index] 766 E211: dict ['key'] = list[index] 767 E211: dict['key'] = list [index] 769 prev_type, prev_text, __, prev_end, __ = tokens[0]
770 for index
in range(1, len(tokens)):
771 token_type, text, start, end, __ = tokens[index]
772 if (token_type == tokenize.OP
and 774 start != prev_end
and 775 (prev_type == tokenize.NAME
or prev_text
in '}])')
and 777 (index < 2
or tokens[index - 2][1] !=
'class')
and 779 not keyword.iskeyword(prev_text)):
780 yield prev_end,
"E211 whitespace before '%s'" % text
781 prev_type = token_type
788 r"""Avoid extraneous whitespace around an operator. 796 for match
in OPERATOR_REGEX.finditer(logical_line):
797 before, after = match.groups()
800 yield match.start(1),
"E223 tab before operator" 801 elif len(before) > 1:
802 yield match.start(1),
"E221 multiple spaces before operator" 805 yield match.start(2),
"E224 tab after operator" 807 yield match.start(2),
"E222 multiple spaces after operator" 812 r"""Surround operators with a single space on either side. 814 - Always surround these binary operators with a single space on 815 either side: assignment (=), augmented assignment (+=, -= etc.), 816 comparisons (==, <, >, !=, <=, >=, in, not in, is, is not), 817 Booleans (and, or, not). 819 - If operators with different priorities are used, consider adding 820 whitespace around the operators with the lowest priorities. 825 Okay: hypot2 = x * x + y * y 826 Okay: c = (a + b) * (a - b) 827 Okay: foo(bar, key='word', *args, **kwargs) 835 E226: c = (a+b) * (a-b) 836 E226: hypot2 = x*x + y*y 838 E228: msg = fmt%(errno, errmsg) 842 prev_type = tokenize.OP
843 prev_text = prev_end =
None 844 operator_types = (tokenize.OP, tokenize.NAME)
845 for token_type, text, start, end, line
in tokens:
846 if token_type
in SKIP_COMMENTS:
848 if text
in (
'(',
'lambda'):
853 if start != prev_end:
855 if need_space
is not True and not need_space[1]:
856 yield (need_space[0],
857 "E225 missing whitespace around operator")
859 elif text ==
'>' and prev_text
in (
'<',
'-'):
863 elif prev_text ==
'/' and text ==
',':
868 if need_space
is True or need_space[1]:
870 yield prev_end,
"E225 missing whitespace around operator" 871 elif prev_text !=
'**':
872 code, optype =
'E226',
'arithmetic' 874 code, optype =
'E228',
'modulo' 875 elif prev_text
not in ARITHMETIC_OP:
876 code, optype =
'E227',
'bitwise or shift' 877 yield (need_space[0],
"%s missing whitespace " 878 "around %s operator" % (code, optype))
880 elif token_type
in operator_types
and prev_end
is not None:
881 if text ==
'=' and parens:
884 elif text
in WS_NEEDED_OPERATORS:
886 elif text
in UNARY_OPERATORS:
890 if (prev_text
in '}])' if prev_type == tokenize.OP
891 else prev_text
not in KEYWORDS):
893 elif text
in WS_OPTIONAL_OPERATORS:
896 if need_space
is None:
899 need_space = (prev_end, start != prev_end)
900 elif need_space
and start == prev_end:
902 yield prev_end,
"E225 missing whitespace around operator" 904 prev_type = token_type
911 r"""Avoid extraneous whitespace after a comma or a colon. 913 Note: these checks are disabled by default 920 for m
in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
921 found = m.start() + 1
922 if '\t' in m.group():
923 yield found,
"E242 tab after '%s'" % m.group()[0]
925 yield found,
"E241 multiple spaces after '%s'" % m.group()[0]
930 r"""Don't use spaces around the '=' sign in function arguments. 932 Don't use spaces around the '=' sign when used to indicate a 933 keyword argument or a default parameter value, except when 934 using a type annotation. 936 Okay: def complex(real, imag=0.0): 937 Okay: return magic(r=real, i=imag) 938 Okay: boolean(a == b) 939 Okay: boolean(a != b) 940 Okay: boolean(a <= b) 941 Okay: boolean(a >= b) 942 Okay: def foo(arg: int = 42): 943 Okay: async def foo(arg: int = 42): 945 E251: def complex(real, imag = 0.0): 946 E251: return magic(r = real, i = imag) 947 E252: def complex(real, image: float=0.0): 951 require_space =
False 953 annotated_func_arg =
False 954 in_def = bool(STARTSWITH_DEF_REGEX.match(logical_line))
956 message =
"E251 unexpected spaces around keyword / parameter equals" 957 missing_message =
"E252 missing whitespace around parameter equals" 959 for token_type, text, start, end, line
in tokens:
960 if token_type == tokenize.NL:
964 if start != prev_end:
965 yield (prev_end, message)
967 require_space =
False 968 if start == prev_end:
969 yield (prev_end, missing_message)
970 if token_type == tokenize.OP:
975 elif in_def
and text ==
':' and parens == 1:
976 annotated_func_arg =
True 977 elif parens == 1
and text ==
',':
978 annotated_func_arg =
False 979 elif parens
and text ==
'=':
980 if annotated_func_arg
and parens == 1:
982 if start == prev_end:
983 yield (prev_end, missing_message)
986 if start != prev_end:
987 yield (prev_end, message)
989 annotated_func_arg =
False 996 r"""Separate inline comments by at least two spaces. 998 An inline comment is a comment on the same line as a statement. 999 Inline comments should be separated by at least two spaces from the 1000 statement. They should start with a # and a single space. 1002 Each line of a block comment starts with a # and a single space 1003 (unless it is indented text inside the comment). 1005 Okay: x = x + 1 # Increment x 1006 Okay: x = x + 1 # Increment x 1007 Okay: # Block comment 1008 E261: x = x + 1 # Increment x 1009 E262: x = x + 1 #Increment x 1010 E262: x = x + 1 # Increment x 1011 E265: #Block comment 1012 E266: ### Block comment 1015 for token_type, text, start, end, line
in tokens:
1016 if token_type == tokenize.COMMENT:
1017 inline_comment = line[:start[1]].strip()
1019 if prev_end[0] == start[0]
and start[1] < prev_end[1] + 2:
1021 "E261 at least two spaces before inline comment")
1022 symbol, sp, comment = text.partition(
' ')
1023 bad_prefix = symbol
not in '#:' and (symbol.lstrip(
'#')[:1]
or '#')
1025 if bad_prefix
or comment[:1]
in WHITESPACE:
1026 yield start,
"E262 inline comment should start with '# '" 1027 elif bad_prefix
and (bad_prefix !=
'!' or start[0] > 1):
1028 if bad_prefix !=
'#':
1029 yield start,
"E265 block comment should start with '# '" 1031 yield start,
"E266 too many leading '#' for block comment" 1032 elif token_type != tokenize.NL:
1038 r"""Place imports on separate lines. 1040 Okay: import os\nimport sys 1041 E401: import sys, os 1043 Okay: from subprocess import Popen, PIPE 1044 Okay: from myclas import MyClass 1045 Okay: from foo.bar.yourclass import YourClass 1046 Okay: import myclass 1047 Okay: import foo.bar.yourclass 1050 if line.startswith(
'import '):
1051 found = line.find(
',')
1052 if -1 < found
and ';' not in line[:found]:
1053 yield found,
"E401 multiple imports on one line" 1058 logical_line, indent_level, checker_state, noqa):
1059 r"""Place imports at the top of the file. 1061 Always put imports at the top of the file, just after any module 1062 comments and docstrings, and before module globals and constants. 1065 Okay: # this is a comment\nimport os 1066 Okay: '''this is a module docstring'''\nimport os 1067 Okay: r'''this is a module docstring'''\nimport os 1069 try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y 1071 try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y 1072 E402: a=1\nimport os 1073 E402: 'One string'\n"Two string"\nimport os 1074 E402: a=1\nfrom sys import x 1076 Okay: if x:\n import os 1078 def is_string_literal(line):
1079 if line[0]
in 'uUbB':
1081 if line
and line[0]
in 'rR':
1083 return line
and (line[0] ==
'"' or line[0] ==
"'")
1085 allowed_keywords = (
1086 'try',
'except',
'else',
'finally',
'with',
'if',
'elif')
1090 if not logical_line:
1095 if line.startswith(
'import ')
or line.startswith(
'from '):
1096 if checker_state.get(
'seen_non_imports',
False):
1097 yield 0,
"E402 module level import not at top of file" 1098 elif re.match(DUNDER_REGEX, line):
1100 elif any(line.startswith(kw)
for kw
in allowed_keywords):
1104 elif is_string_literal(line):
1107 if checker_state.get(
'seen_docstring',
False):
1108 checker_state[
'seen_non_imports'] =
True 1110 checker_state[
'seen_docstring'] =
True 1112 checker_state[
'seen_non_imports'] =
True 1117 r"""Compound statements (on the same line) are generally 1120 While sometimes it's okay to put an if/for/while with a small body 1121 on the same line, never do this for multi-clause statements. 1122 Also avoid folding such long lines! 1124 Always use a def statement instead of an assignment statement that 1125 binds a lambda expression directly to a name. 1127 Okay: if foo == 'blah':\n do_blah_thing() 1132 E701: if foo == 'blah': do_blah_thing() 1133 E701: for x in lst: total += x 1134 E701: while t < 10: t = delay() 1135 E701: if foo == 'blah': do_blah_thing() 1136 E701: else: do_non_blah_thing() 1137 E701: try: something() 1138 E701: finally: cleanup() 1139 E701: if foo == 'blah': one(); two(); three() 1140 E702: do_one(); do_two(); do_three() 1141 E703: do_four(); # useless semicolon 1142 E704: def f(x): return 2*x 1143 E731: f = lambda x: 2*x 1146 last_char = len(line) - 1
1147 found = line.find(
':')
1149 counts = {char: 0
for char
in '{}[]()'}
1150 while -1 < found < last_char:
1152 if ((counts[
'{'] <= counts[
'}']
and 1153 counts[
'['] <= counts[
']']
and 1154 counts[
'('] <= counts[
')'])
and 1155 not (sys.version_info >= (3, 8)
and 1156 line[found + 1] ==
'=')):
1157 lambda_kw = LAMBDA_REGEX.search(line, 0, found)
1159 before = line[:lambda_kw.start()].rstrip()
1160 if before[-1:] ==
'=' and isidentifier(before[:-1].strip()):
1161 yield 0, (
"E731 do not assign a lambda expression, use a " 1164 if STARTSWITH_DEF_REGEX.match(line):
1165 yield 0,
"E704 multiple statements on one line (def)" 1166 elif STARTSWITH_INDENT_STATEMENT_REGEX.match(line):
1167 yield found,
"E701 multiple statements on one line (colon)" 1169 found = line.find(
':', found + 1)
1170 found = line.find(
';')
1172 if found < last_char:
1173 yield found,
"E702 multiple statements on one line (semicolon)" 1175 yield found,
"E703 statement ends with a semicolon" 1176 found = line.find(
';', found + 1)
1181 r"""Avoid explicit line join between brackets. 1183 The preferred way of wrapping long lines is by using Python's 1184 implied line continuation inside parentheses, brackets and braces. 1185 Long lines can be broken over multiple lines by wrapping expressions 1186 in parentheses. These should be used in preference to using a 1187 backslash for line continuation. 1189 E502: aaa = [123, \\n 123] 1190 E502: aaa = ("bbb " \\n "ccc") 1192 Okay: aaa = [123,\n 123] 1193 Okay: aaa = ("bbb "\n "ccc") 1194 Okay: aaa = "bbb " \\n "ccc" 1195 Okay: aaa = 123 # \\ 1197 prev_start = prev_end = parens = 0
1200 for token_type, text, start, end, line
in tokens:
1201 if token_type == tokenize.COMMENT:
1203 if start[0] != prev_start
and parens
and backslash
and not comment:
1204 yield backslash,
"E502 the backslash is redundant between brackets" 1205 if end[0] != prev_end:
1206 if line.rstrip(
'\r\n').endswith(
'\\'):
1207 backslash = (end[0], len(line.splitlines()[-1]) - 1)
1210 prev_start = prev_end = end[0]
1212 prev_start = start[0]
1213 if token_type == tokenize.OP:
1220 _SYMBOLIC_OPS = frozenset(
"()[]{},:.;@=%~") | frozenset((
"...",))
1224 is_op_token = token_type == tokenize.OP
1225 is_conjunction = text
in [
'and',
'or']
1229 not_a_symbol = text
and text
not in _SYMBOLIC_OPS
1233 return ((is_op_token
or is_conjunction)
and not_a_symbol)
1237 """Private function to reduce duplication. 1239 This factors out the shared details between 1240 :func:`break_before_binary_operator` and 1241 :func:`break_after_binary_operator`. 1244 unary_context =
True 1246 previous_token_type =
None 1247 previous_text =
None 1248 for token_type, text, start, end, line
in tokens:
1249 if token_type == tokenize.COMMENT:
1251 if (
'\n' in text
or '\r' in text)
and token_type != tokenize.STRING:
1254 yield (token_type, text, previous_token_type, previous_text,
1255 line_break, unary_context, start)
1256 unary_context = text
in '([{,;' 1258 previous_token_type = token_type
1259 previous_text = text
1265 Avoid breaks before binary operators. 1267 The preferred place to break around a binary operator is after the 1268 operator, not before it. 1270 W503: (width == 0\n + height == 0) 1271 W503: (width == 0\n and height == 0) 1272 W503: var = (1\n & ~2) 1273 W503: var = (1\n / -2) 1274 W503: var = (1\n + -1\n + -2) 1278 Okay: x = '''\n''' + '' 1280 Okay: foo(x, # comment\n -y) 1283 (token_type, text, previous_token_type, previous_text,
1284 line_break, unary_context, start) = context
1286 not unary_context
and 1289 yield start,
"W503 line break before binary operator" 1295 Avoid breaks after binary operators. 1297 The preferred place to break around a binary operator is before the 1298 operator, not after it. 1300 W504: (width == 0 +\n height == 0) 1301 W504: (width == 0 and\n height == 0) 1302 W504: var = (1 &\n ~2) 1306 Okay: x = '''\n''' + '' 1307 Okay: x = '' + '''\n''' 1309 Okay: foo(x, # comment\n -y) 1311 The following should be W504 but unary_context is tricky with these 1312 Okay: var = (1 /\n -2) 1313 Okay: var = (1 +\n -1 +\n -2) 1317 (token_type, text, previous_token_type, previous_text,
1318 line_break, unary_context, start) = context
1321 not unary_context
and 1323 yield prev_start,
"W504 line break after binary operator" 1329 r"""Comparison to singletons should use "is" or "is not". 1331 Comparisons to singletons like None should always be done 1332 with "is" or "is not", never the equality operators. 1334 Okay: if arg is not None: 1335 E711: if arg != None: 1336 E711: if None == arg: 1337 E712: if arg == True: 1338 E712: if False == arg: 1340 Also, beware of writing if x when you really mean if x is not None 1341 -- e.g. when testing whether a variable or argument that defaults to 1342 None was set to some other value. The other value might have a type 1343 (such as a container) that could be false in a boolean context! 1345 match =
not noqa
and COMPARE_SINGLETON_REGEX.search(logical_line)
1347 singleton = match.group(1)
or match.group(3)
1348 same = (match.group(2) ==
'==')
1350 msg =
"'if cond is %s:'" % ((
'' if same
else 'not ') + singleton)
1351 if singleton
in (
'None',):
1355 nonzero = ((singleton ==
'True' and same)
or 1356 (singleton ==
'False' and not same))
1357 msg +=
" or 'if %scond:'" % (
'' if nonzero
else 'not ')
1358 yield match.start(2), (
"%s comparison to %s should be %s" %
1359 (code, singleton, msg))
1364 r"""Negative comparison should be done using "not in" and "is not". 1366 Okay: if x not in y:\n pass 1367 Okay: assert (X in Y or X is Z) 1368 Okay: if not (X in Y):\n pass 1369 Okay: zz = x is not y 1370 E713: Z = not X in Y 1371 E713: if not X.B in Y:\n pass 1372 E714: if not X is Y:\n pass 1373 E714: Z = not X.B is Y 1375 match = COMPARE_NEGATIVE_REGEX.search(logical_line)
1377 pos = match.start(1)
1378 if match.group(2) ==
'in':
1379 yield pos,
"E713 test for membership should be 'not in'" 1381 yield pos,
"E714 test for object identity should be 'is not'" 1386 r"""Object type comparisons should always use isinstance(). 1388 Do not compare types directly. 1390 Okay: if isinstance(obj, int): 1391 E721: if type(obj) is type(1): 1393 When checking if an object is a string, keep in mind that it might 1394 be a unicode string too! In Python 2.3, str and unicode have a 1395 common base class, basestring, so you can do: 1397 Okay: if isinstance(obj, basestring): 1398 Okay: if type(a1) is type(b1): 1400 match = COMPARE_TYPE_REGEX.search(logical_line)
1401 if match
and not noqa:
1402 inst = match.group(1)
1403 if inst
and isidentifier(inst)
and inst
not in SINGLETONS:
1405 yield match.start(),
"E721 do not compare types, use 'isinstance()'" 1410 r"""When catching exceptions, mention specific exceptions when 1413 Okay: except Exception: 1414 Okay: except BaseException: 1420 regex = re.compile(
r"except\s*:")
1421 match = regex.match(logical_line)
1423 yield match.start(),
"E722 do not use bare 'except'" 1428 r"""Never use the characters 'l', 'O', or 'I' as variable names. 1430 In some fonts, these characters are indistinguishable from the 1431 numerals one and zero. When tempted to use 'l', use 'L' instead. 1440 Variables can be bound in several other contexts, including class 1441 and function definitions, 'global' and 'nonlocal' statements, 1442 exception handlers, and 'with' and 'for' statements. 1443 In addition, we have a special handling for function parameters. 1445 Okay: except AttributeError as o: 1446 Okay: with lock as L: 1448 Okay: for a in foo(l=12): 1449 E741: except AttributeError as O: 1450 E741: with lock as l: 1454 E741: def foo(l=12): 1456 E741: for l in range(10): 1457 E742: class I(object): 1461 parameter_parentheses_level = 0
1462 idents_to_avoid = (
'l',
'O',
'I')
1463 prev_type, prev_text, prev_start, prev_end, __ = tokens[0]
1464 for token_type, text, start, end, line
in tokens[1:]:
1467 if prev_text ==
'def':
1470 if parameter_parentheses_level == 0
and \
1471 prev_type == tokenize.NAME
and \
1472 token_type == tokenize.OP
and text ==
'(':
1473 parameter_parentheses_level = 1
1474 elif parameter_parentheses_level > 0
and \
1475 token_type == tokenize.OP:
1477 parameter_parentheses_level += 1
1479 parameter_parentheses_level -= 1
1481 if token_type == tokenize.OP
and '=' in text
and \
1482 parameter_parentheses_level == 0:
1483 if prev_text
in idents_to_avoid:
1488 if prev_text
in (
'as',
'for',
'global',
'nonlocal'):
1489 if text
in idents_to_avoid:
1494 if text
in idents_to_avoid:
1497 if prev_text ==
'class':
1498 if text
in idents_to_avoid:
1499 yield start,
"E742 ambiguous class definition '%s'" % text
1500 if prev_text ==
'def':
1501 if text
in idents_to_avoid:
1502 yield start,
"E743 ambiguous function definition '%s'" % text
1504 yield pos,
"E741 ambiguous variable name '%s'" % ident
1505 prev_type = token_type
1512 r"""The {}.has_key() method is removed in Python 3: use the 'in' 1515 Okay: if "alph" in d:\n print d["alph"] 1516 W601: assert d.has_key('alph') 1518 pos = logical_line.find(
'.has_key(')
1519 if pos > -1
and not noqa:
1520 yield pos,
"W601 .has_key() is deprecated, use 'in'" 1525 r"""When raising an exception, use "raise ValueError('message')". 1527 The older form is removed in Python 3. 1529 Okay: raise DummyError("Message") 1530 W602: raise DummyError, "Message" 1532 match = RAISE_COMMA_REGEX.match(logical_line)
1533 if match
and not RERAISE_COMMA_REGEX.match(logical_line):
1534 yield match.end() - 1,
"W602 deprecated form of raising exception" 1539 r"""New code should always use != instead of <>. 1541 The older syntax is removed in Python 3. 1546 pos = logical_line.find(
'<>')
1548 yield pos,
"W603 '<>' is deprecated, use '!='" 1553 r"""Use repr() instead of backticks in Python 3. 1555 Okay: val = repr(1 + 2) 1558 pos = logical_line.find(
'`')
1560 yield pos,
"W604 backticks are deprecated, use 'repr()'" 1565 r"""Invalid escape sequences are deprecated in Python 3.6. 1567 Okay: regex = r'\.png$' 1568 W605: regex = '\.png$' 1586 '0',
'1',
'2',
'3',
'4',
'5',
'6',
'7',
1595 for token_type, text, start, end, line
in tokens:
1596 if token_type == tokenize.STRING:
1597 start_line, start_col = start
1598 quote = text[-3:]
if text[-3:]
in (
'"""',
"'''")
else text[-1]
1600 quote_pos = text.index(quote)
1601 prefix = text[:quote_pos].lower()
1602 start = quote_pos + len(quote)
1603 string = text[start:-len(quote)]
1605 if 'r' not in prefix: 1606 pos = string.find('\\')
1609 if string[pos]
not in valid:
1610 line = start_line + string.count(
'\n', 0, pos)
1611 if line == start_line:
1612 col = start_col + len(prefix) + len(quote) + pos
1614 col = pos - string.rfind(
'\n', 0, pos) - 1
1617 "W605 invalid escape sequence '\\%s'" %
1620 pos = string.find(
'\\', pos + 1)
1625 """'async' and 'await' are reserved keywords starting at Python 3.7. 1629 Okay: async def read(db):\n data = await db.fetch('SELECT ...') 1638 for token_type, text, start, end, line
in tokens:
1641 if token_type == tokenize.NL:
1645 if token_type == tokenize.NAME:
1647 state = (
'async_stmt', start)
1648 elif text ==
'await':
1649 state = (
'await', start)
1650 elif (token_type == tokenize.NAME
and 1651 text
in (
'def',
'for')):
1652 state = (
'define', start)
1654 elif state[0] ==
'async_stmt':
1655 if token_type == tokenize.NAME
and text
in (
'def',
'with',
'for'):
1661 elif state[0] ==
'await':
1662 if token_type == tokenize.NAME:
1666 elif token_type == tokenize.OP
and text ==
'(':
1670 elif state[0] ==
'define':
1671 if token_type == tokenize.NAME
and text
in (
'async',
'await'):
1679 "W606 'async' and 'await' are reserved keywords starting with " 1685 if state
is not None:
1688 "W606 'async' and 'await' are reserved keywords starting with " 1696 r"""Limit all doc lines to a maximum of 72 characters. 1698 For flowing long blocks of text (docstrings or comments), limiting 1699 the length to 72 characters is recommended. 1701 Reports warning W505 1703 if max_doc_length
is None or noqa:
1709 for token_type, text, start, end, line
in tokens:
1710 if token_type
not in SKIP_COMMENTS.union([tokenize.STRING]):
1711 skip_lines.add(line)
1713 for token_type, text, start, end, line
in tokens:
1715 if token_type == tokenize.STRING
and skip_lines:
1717 if token_type
in (tokenize.STRING, tokenize.COMMENT):
1719 if prev_token
is None or prev_token
in SKIP_TOKENS:
1720 lines = line.splitlines()
1721 for line_num, physical_line
in enumerate(lines):
1722 if hasattr(physical_line,
'decode'):
1725 physical_line = physical_line.decode(
'utf-8')
1726 except UnicodeError:
1728 if start[0] + line_num == 1
and line.startswith(
'#!'):
1730 length = len(physical_line)
1731 chunks = physical_line.split()
1732 if token_type == tokenize.COMMENT:
1733 if (len(chunks) == 2
and 1734 length - len(chunks[-1]) < MAX_DOC_LENGTH):
1736 if len(chunks) == 1
and line_num + 1 < len(lines):
1737 if (len(chunks) == 1
and 1738 length - len(chunks[-1]) < MAX_DOC_LENGTH):
1740 if length > max_doc_length:
1741 doc_error = (start[0] + line_num, max_doc_length)
1742 yield (doc_error,
"W505 doc line too long " 1743 "(%d > %d characters)" 1744 % (length, max_doc_length))
1745 prev_token = token_type
1753 if sys.version_info < (3,):
1756 """Read the source code.""" 1757 with open(filename,
'rU')
as f:
1758 return f.readlines()
1759 isidentifier = re.compile(
r'[a-zA-Z_]\w*$').match
1760 stdin_get_value = sys.stdin.read
1764 """Read the source code.""" 1766 with open(filename,
'rb')
as f:
1767 (coding, lines) = tokenize.detect_encoding(f.readline)
1768 f = TextIOWrapper(f, coding, line_buffering=
True)
1769 return [line.decode(coding)
for line
in lines] + f.readlines()
1770 except (LookupError, SyntaxError, UnicodeError):
1772 with open(filename, encoding=
'latin-1')
as f:
1773 return f.readlines()
1774 isidentifier = str.isidentifier
1777 """Read the value from stdin.""" 1778 return TextIOWrapper(sys.stdin.buffer, errors=
'ignore').read()
1780 noqa =
lru_cache(512)(re.compile(
r'# no(?:qa|pep8)\b', re.I).search)
1784 r"""Return the amount of indentation. 1786 Tabs are expanded to the next multiple of 8. 1788 >>> expand_indent(' ') 1790 >>> expand_indent('\t') 1792 >>> expand_indent(' \t') 1794 >>> expand_indent(' \t') 1797 if '\t' not in line:
1798 return len(line) - len(line.lstrip())
1802 result = result // 8 * 8 + 8
1811 """Replace contents with 'xxx' to prevent syntax matching. 1813 >>> mute_string('"abc"') 1815 >>> mute_string("'''abc'''") 1817 >>> mute_string("r'abc'")
1820 # String modifiers (e.g. u or r)
1821 start = text.index(text[-1]) + 1
1824 if text[-3:] in ('"""', "'''"):
1827 return text[:start] + 'x' * (end - start) + text[end:]
1830 def parse_udiff(diff, patterns=None, parent='.'):
1831 """Return a dictionary of matching lines."""
1832 # For each file of the diff, the entry key is the filename,
1833 # and the value is a set of row numbers to consider.
1836 for line in diff.splitlines():
1841 if line[:3] == '@@ ':
1842 hunk_match = HUNK_REGEX.match(line)
1843 (row, nrows) = [int(g or '1') for g in hunk_match.groups()]
1844 rv[path].update(range(row, row + nrows))
1845 elif line[:3] == '+++':
1846 path = line[4:].split('\t', 1)[0]
1847 # Git diff will use (i)ndex, (w)ork tree, (c)ommit and
1848 # (o)bject instead of a/b/c/d as prefixes for patches
1849 if path[:2] in ('b/', 'w/', 'i/'):
1853 os.path.join(parent, filepath): rows
1854 for (filepath, rows) in rv.items()
1855 if rows and filename_match(filepath, patterns)
1859 def normalize_paths(value, parent=os.curdir):
1860 """Parse a comma-separated list of paths.
1862 Return a list of absolute paths.
1866 if isinstance(value, list):
1869 for path in value.split(','):
1872 path = os.path.abspath(os.path.join(parent, path))
1873 paths.append(path.rstrip('/'))
1877 def filename_match(filename, patterns, default=True):
1878 """Check if patterns contains a pattern that matches filename.
1880 If patterns is unspecified, this always returns True.
1884 return any(fnmatch(filename, pattern) for pattern in patterns)
1887 def update_counts(s, counts):
1888 r"""Adds one to the counts of each appearance of characters in s,
1889 for characters in counts"""
1895 def _is_eol_token(token):
1896 return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n'
1899 ########################################################################
1900 # Framework to run all checks
1901 ########################################################################
1904 class Checker(object):
1905 """Load a Python source file, tokenize it, check coding style."""
1907 def __init__(self, filename=None, lines=None,
1908 options=None, report=None, **kwargs):
1910 options = StyleGuide(kwargs).options
1913 self._io_error = None
1914 self._physical_checks = options.physical_checks
1915 self._logical_checks = options.logical_checks
1916 self._ast_checks = options.ast_checks
1917 self.max_line_length = options.max_line_length
1918 self.max_doc_length = options.max_doc_length
1919 self.multiline = False # in a multiline string?
1920 self.hang_closing = options.hang_closing
1921 self.verbose = options.verbose
1922 self.filename = filename
1923 # Dictionary where a checker can store its custom state.
1924 self._checker_states = {}
1925 if filename is None:
1926 self.filename = 'stdin'
1927 self.lines = lines or []
1928 elif filename == '-':
1929 self.filename = 'stdin'
1930 self.lines = stdin_get_value().splitlines(True)
1933 self.lines = readlines(filename)
1935 (exc_type, exc) = sys.exc_info()[:2]
1936 self._io_error = '%s: %s' % (exc_type.__name__, exc)
1941 ord0 = ord(self.lines[0][0])
1942 if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
1944 self.lines[0] = self.lines[0][1:]
1945 elif self.lines[0][:3] == '\xef\xbb\xbf':
1946 self.lines[0] = self.lines[0][3:]
1947 self.report = report or options.report
1948 self.report_error = self.report.error
1951 def report_invalid_syntax(self):
1952 """Check if the syntax is valid."""
1953 (exc_type, exc) = sys.exc_info()[:2]
1954 if len(exc.args) > 1:
1955 offset = exc.args[1]
1957 offset = offset[1:3]
1960 self.report_error(offset[0], offset[1] or 0,
1961 'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
1962 self.report_invalid_syntax)
1965 """Get the next line from the input buffer."""
1966 if self.line_number >= self.total_lines:
1968 line = self.lines[self.line_number]
1969 self.line_number += 1
1970 if self.indent_char is None and line[:1] in WHITESPACE:
1971 self.indent_char = line[0]
1974 def run_check(self, check, argument_names):
1975 """Run a check plugin."""
1977 for name in argument_names:
1978 arguments.append(getattr(self, name))
1979 return check(*arguments)
1981 def init_checker_state(self, name, argument_names):
1982 """Prepare custom state for the specific checker plugin."""
1983 if 'checker_state' in argument_names:
1984 self.checker_state = self._checker_states.setdefault(name, {})
1986 def check_physical(self, line):
1987 """Run all physical checks on a raw input line."""
1988 self.physical_line = line
1989 for name, check, argument_names in self._physical_checks:
1990 self.init_checker_state(name, argument_names)
1991 result = self.run_check(check, argument_names)
1992 if result is not None:
1993 (offset, text) = result
1994 self.report_error(self.line_number, offset, text, check)
1995 if text[:4] == 'E101':
1996 self.indent_char = line[0]
1998 def build_tokens_line(self):
1999 """Build a logical line from tokens."""
2003 prev_row = prev_col = mapping = None
2004 for token_type, text, start, end, line in self.tokens:
2005 if token_type in SKIP_TOKENS:
2008 mapping = [(0, start)]
2009 if token_type == tokenize.COMMENT:
2010 comments.append(text)
2012 if token_type == tokenize.STRING:
2013 text = mute_string(text)
2015 (start_row, start_col) = start
2016 if prev_row != start_row: # different row
2017 prev_text = self.lines[prev_row - 1][prev_col - 1]
2018 if prev_text == ',' or (prev_text not in '{[(' and
2021 elif prev_col != start_col: # different column
2022 text = line[prev_col:start_col] + text
2023 logical.append(text)
2025 mapping.append((length, end))
2026 (prev_row, prev_col) = end
2027 self.logical_line = ''.join(logical)
2028 self.noqa = comments and noqa(''.join(comments))
2031 def check_logical(self):
2032 """Build a line from tokens and run all logical checks on it."""
2033 self.report.increment_logical_line()
2034 mapping = self.build_tokens_line()
2038 mapping_offsets = [offset for offset, _ in mapping]
2039 (start_row, start_col) = mapping[0][1]
2040 start_line = self.lines[start_row - 1]
2041 self.indent_level = expand_indent(start_line[:start_col])
2042 if self.blank_before < self.blank_lines:
2043 self.blank_before = self.blank_lines
2044 if self.verbose >= 2:
2045 print(self.logical_line[:80].rstrip())
2046 for name, check, argument_names in self._logical_checks:
2047 if self.verbose >= 4:
2049 self.init_checker_state(name, argument_names)
2050 for offset, text in self.run_check(check, argument_names) or ():
2051 if not isinstance(offset, tuple):
2052 # As mappings are ordered, bisecting is a fast way
2053 # to find a given offset in them.
2054 token_offset, pos = mapping[bisect.bisect_left(
2055 mapping_offsets, offset)]
2056 offset = (pos[0], pos[1] + offset - token_offset)
2057 self.report_error(offset[0], offset[1], text, check)
2058 if self.logical_line:
2059 self.previous_indent_level = self.indent_level
2060 self.previous_logical = self.logical_line
2061 if not self.indent_level:
2062 self.previous_unindented_logical_line = self.logical_line
2063 self.blank_lines = 0
2066 def check_ast(self):
2067 """Build the file's AST and run all AST checks."""
2069 tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
2070 except (ValueError, SyntaxError, TypeError):
2071 return self.report_invalid_syntax()
2072 for name, cls, __ in self._ast_checks:
2073 checker = cls(tree, self.filename)
2074 for lineno, offset, text, check in checker.run():
2075 if not self.lines or not noqa(self.lines[lineno - 1]):
2076 self.report_error(lineno, offset, text, check)
2078 def generate_tokens(self):
2079 """Tokenize file, run physical line checks and yield tokens."""
2081 self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
2082 tokengen = tokenize.generate_tokens(self.readline)
2084 for token in tokengen:
2085 if token[2][0] > self.total_lines:
2087 self.noqa = token[4] and noqa(token[4])
2088 self.maybe_check_physical(token)
2090 except (SyntaxError, tokenize.TokenError):
2091 self.report_invalid_syntax()
2093 def maybe_check_physical(self, token):
2094 """If appropriate for token, check current physical line(s)."""
2095 # Called after every token, but act only on end of line.
2096 if _is_eol_token(token):
2097 # Obviously, a newline token ends a single physical line.
2098 self.check_physical(token[4])
2099 elif token[0] == tokenize.STRING and '\n' in token[1]:
2100 # Less obviously, a string that contains newlines is a
2101 # multiline string, either triple-quoted or with internal
2102 # newlines backslash-escaped. Check every physical line in
2103 # the string *except* for the last one: its newline is
2104 # outside of the multiline string, so we consider it a
2105 # regular physical line, and will check it like any other
2109 # - we don't *completely* ignore the last line; if it
2110 # contains the magical "# noqa" comment, we disable all
2111 # physical checks for the entire multiline string
2112 # - have to wind self.line_number back because initially it
2113 # points to the last line of the string, and we want
2114 # check_physical() to give accurate feedback
2117 self.multiline = True
2118 self.line_number = token[2][0]
2119 _, src, (_, offset), _, _ = token
2120 src = self.lines[self.line_number - 1][:offset] + src
2121 for line in src.split('\n')[:-1]:
2122 self.check_physical(line + '\n')
2123 self.line_number += 1
2124 self.multiline = False
2126 def check_all(self, expected=None, line_offset=0):
2127 """Run all checks on the input file."""
2128 self.report.init_file(self.filename, self.lines, expected, line_offset)
2129 self.total_lines = len(self.lines)
2130 if self._ast_checks:
2132 self.line_number = 0
2133 self.indent_char = None
2134 self.indent_level = self.previous_indent_level = 0
2135 self.previous_logical = ''
2136 self.previous_unindented_logical_line = ''
2138 self.blank_lines = self.blank_before = 0
2140 for token in self.generate_tokens():
2141 self.tokens.append(token)
2142 token_type, text = token[0:2]
2143 if self.verbose >= 3:
2144 if token[2][0] == token[3][0]:
2145 pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
2147 pos = 'l.%s' % token[3][0]
2148 print('l.%s\t%s\t%s\t%r' %
2149 (token[2][0], pos, tokenize.tok_name[token[0]], text))
2150 if token_type == tokenize.OP:
2156 if token_type in NEWLINE:
2157 if token_type == tokenize.NEWLINE:
2158 self.check_logical()
2159 self.blank_before = 0
2160 elif len(self.tokens) == 1:
2161 # The physical line contains only this token.
2162 self.blank_lines += 1
2165 self.check_logical()
2167 self.check_physical(self.lines[-1])
2168 self.check_logical()
2169 return self.report.get_file_results()
2172 class BaseReport(object):
2173 """Collect the results of the checks."""
2175 print_filename = False
2177 def __init__(self, options):
2178 self._benchmark_keys = options.benchmark_keys
2179 self._ignore_code = options.ignore_code
2182 self.total_errors = 0
2183 self.counters = dict.fromkeys(self._benchmark_keys, 0)
2187 """Start the timer."""
2188 self._start_time = time.time()
2191 """Stop the timer."""
2192 self.elapsed = time.time() - self._start_time
2194 def init_file(self, filename, lines, expected, line_offset):
2195 """Signal a new file."""
2196 self.filename = filename
2198 self.expected = expected or ()
2199 self.line_offset = line_offset
2200 self.file_errors = 0
2201 self.counters['files'] += 1
2202 self.counters['physical lines'] += len(lines)
2204 def increment_logical_line(self):
2205 """Signal a new logical line."""
2206 self.counters['logical lines'] += 1
2208 def error(self, line_number, offset, text, check):
2209 """Report an error, according to options."""
2211 if self._ignore_code(code):
2213 if code in self.counters:
2214 self.counters[code] += 1
2216 self.counters[code] = 1
2217 self.messages[code] = text[5:]
2218 # Don't care about expected errors or warnings
2219 if code in self.expected:
2221 if self.print_filename and not self.file_errors:
2222 print(self.filename)
2223 self.file_errors += 1
2224 self.total_errors += 1
2227 def get_file_results(self):
2228 """Return the count of errors and warnings for this file."""
2229 return self.file_errors
2231 def get_count(self, prefix=''):
2232 """Return the total count of errors and warnings."""
2233 return sum(self.counters[key]
2234 for key in self.messages if key.startswith(prefix))
2236 def get_statistics(self, prefix=''):
2237 """Get statistics for message codes that start with the prefix.
2239 prefix='' matches all errors and warnings
2240 prefix='E' matches all errors
2241 prefix='W' matches all warnings
2242 prefix='E4' matches all errors that have to do with imports
2244 return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
2245 for key in sorted(self.messages) if key.startswith(prefix)]
2247 def print_statistics(self, prefix=''):
2248 """Print overall statistics (number of errors and warnings)."""
2249 for line in self.get_statistics(prefix):
2252 def print_benchmark(self):
2253 """Print benchmark numbers."""
2254 print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
2256 for key in self._benchmark_keys:
2257 print('%-7d %s per second (%d total)' %
2258 (self.counters[key] / self.elapsed, key,
2259 self.counters[key]))
2262 class FileReport(BaseReport):
2263 """Collect the results of the checks and print the filenames."""
2265 print_filename = True
2268 class StandardReport(BaseReport):
2269 """Collect and print the results of the checks."""
2271 def __init__(self, options):
2272 super(StandardReport, self).__init__(options)
2273 self._fmt = REPORT_FORMAT.get(options.format.lower(),
2275 self._repeat = options.repeat
2276 self._show_source = options.show_source
2277 self._show_pep8 = options.show_pep8
2279 def init_file(self, filename, lines, expected, line_offset):
2280 """Signal a new file."""
2281 self._deferred_print = []
2282 return super(StandardReport, self).init_file(
2283 filename, lines, expected, line_offset)
2285 def error(self, line_number, offset, text, check):
2286 """Report an error, according to options."""
2287 code = super(StandardReport, self).error(line_number, offset,
2289 if code and (self.counters[code] == 1 or self._repeat):
2290 self._deferred_print.append(
2291 (line_number, offset, code, text[5:], check.__doc__))
2294 def get_file_results(self):
2295 """Print results and return the overall count for this file."""
2296 self._deferred_print.sort()
2297 for line_number, offset, code, text, doc in self._deferred_print:
2299 'path': self.filename,
2300 'row': self.line_offset + line_number, 'col': offset + 1,
2301 'code': code, 'text': text,
2303 if self._show_source:
2304 if line_number > len(self.lines):
2307 line = self.lines[line_number - 1]
2308 print(line.rstrip())
2309 print(re.sub(r'\S', ' ', line[:offset]) + '^')
2310 if self._show_pep8 and doc:
2311 print(' ' + doc.strip())
2313 # stdout is block buffered when not stdout.isatty().
2314 # line can be broken where buffer boundary since other
2315 # processes write to same file.
2316 # flush() after print() to avoid buffer boundary.
2317 # Typical buffer size is 8192. line written safely when
2320 return self.file_errors
2323 class DiffReport(StandardReport):
2324 """Collect and print the results for the changed lines only."""
2326 def __init__(self, options):
2327 super(DiffReport, self).__init__(options)
2328 self._selected = options.selected_lines
2330 def error(self, line_number, offset, text, check):
2331 if line_number not in self._selected[self.filename]:
2333 return super(DiffReport, self).error(line_number, offset, text, check)
2336 class StyleGuide(object):
2337 """Initialize a PEP-8 instance with few options."""
2339 def __init__(self, *args, **kwargs):
2340 # build options from the command line
2341 self.checker_class = kwargs.pop('checker_class', Checker)
2342 parse_argv = kwargs.pop('parse_argv', False)
2343 config_file = kwargs.pop('config_file', False)
2344 parser = kwargs.pop('parser', None)
2345 # build options from dict
2346 options_dict = dict(*args, **kwargs)
2347 arglist = None if parse_argv else options_dict.get('paths', None)
2348 verbose = options_dict.get('verbose', None)
2349 options, self.paths = process_options(
2350 arglist, parse_argv, config_file, parser, verbose)
2352 options.__dict__.update(options_dict)
2353 if 'paths' in options_dict:
2354 self.paths = options_dict['paths']
2356 self.runner = self.input_file
2357 self.options = options
2359 if not options.reporter:
2360 options.reporter = BaseReport if options.quiet else StandardReport
2362 options.select = tuple(options.select or ())
2363 if not (options.select or options.ignore or
2364 options.testsuite or options.doctest) and DEFAULT_IGNORE:
2365 # The default choice: ignore controversial checks
2366 options.ignore = tuple(DEFAULT_IGNORE.split(','))
2368 # Ignore all checks which are not explicitly selected
2369 options.ignore = ('',) if options.select else tuple(options.ignore)
2370 options.benchmark_keys = BENCHMARK_KEYS[:]
2371 options.ignore_code = self.ignore_code
2372 options.physical_checks = self.get_checks('physical_line')
2373 options.logical_checks = self.get_checks('logical_line')
2374 options.ast_checks = self.get_checks('tree')
2377 def init_report(self, reporter=None):
2378 """Initialize the report instance."""
2379 self.options.report = (reporter or self.options.reporter)(self.options)
2380 return self.options.report
2382 def check_files(self, paths=None):
2383 """Run all checks on the paths."""
2386 report = self.options.report
2387 runner = self.runner
2391 if os.path.isdir(path):
2392 self.input_dir(path)
2393 elif not self.excluded(path):
2395 except KeyboardInterrupt:
2396 print('... stopped')
2400 def input_file(self, filename, lines=None, expected=None, line_offset=0):
2401 """Run all checks on a Python source file."""
2402 if self.options.verbose:
2403 print('checking %s' % filename)
2404 fchecker = self.checker_class(
2405 filename, lines=lines, options=self.options)
2406 return fchecker.check_all(expected=expected, line_offset=line_offset)
2408 def input_dir(self, dirname):
2409 """Check all files in this directory and all subdirectories."""
2410 dirname = dirname.rstrip('/')
2411 if self.excluded(dirname):
2413 counters = self.options.report.counters
2414 verbose = self.options.verbose
2415 filepatterns = self.options.filename
2416 runner = self.runner
2417 for root, dirs, files in os.walk(dirname):
2419 print('directory ' + root)
2420 counters['directories'] += 1
2421 for subdir in sorted(dirs):
2422 if self.excluded(subdir, root):
2424 for filename in sorted(files):
2425 # contain a pattern that matches?
2426 if ((filename_match(filename, filepatterns) and
2427 not self.excluded(filename, root))):
2428 runner(os.path.join(root, filename))
2430 def excluded(self, filename, parent=None):
2431 """Check if the file should be excluded.
2433 Check if 'options.exclude' contains a pattern matching filename.
2435 if not self.options.exclude:
2437 basename = os.path.basename(filename)
2438 if filename_match(basename, self.options.exclude):
2441 filename = os.path.join(parent, filename)
2442 filename = os.path.abspath(filename)
2443 return filename_match(filename, self.options.exclude)
2445 def ignore_code(self, code):
2446 """Check if the error code should be ignored.
2448 If 'options.select' contains a prefix of the error code,
2449 return False. Else, if 'options.ignore' contains a prefix of
2450 the error code, return True.
2452 if len(code) < 4 and any(s.startswith(code)
2453 for s in self.options.select):
2455 return (code.startswith(self.options.ignore) and
2456 not code.startswith(self.options.select))
2458 def get_checks(self, argument_name):
2459 """Get all the checks for this category.
2461 Find all globally visible functions where the first argument
2462 name starts with argument_name and which contain selected tests.
2465 for check, attrs in _checks[argument_name].items():
2466 (codes, args) = attrs
2467 if any(not (code and self.ignore_code(code)) for code in codes):
2468 checks.append((check.__name__, check, args))
2469 return sorted(checks)
2472 def get_parser(prog='pycodestyle', version=__version__):
2473 """Create the parser for the program."""
2474 parser = OptionParser(prog=prog, version=version,
2475 usage="%prog [options] input ...")
2476 parser.config_options = [
2477 'exclude', 'filename', 'select', 'ignore', 'max-line-length',
2478 'max-doc-length', 'hang-closing', 'count', 'format', 'quiet',
2479 'show-pep8', 'show-source', 'statistics', 'verbose']
2480 parser.add_option('-v', '--verbose', default=0, action='count',
2481 help="print status messages, or debug with -vv")
2482 parser.add_option('-q', '--quiet', default=0, action='count',
2483 help="report only file names, or nothing with -qq")
2484 parser.add_option('-r', '--repeat', default=True, action='store_true',
2485 help="(obsolete) show all occurrences of the same error")
2486 parser.add_option('--first', action='store_false', dest='repeat',
2487 help="show first occurrence of each error")
2488 parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
2489 help="exclude files or directories which match these "
2490 "comma separated patterns (default: %default)")
2491 parser.add_option('--filename', metavar='patterns', default='*.py',
2492 help="when parsing directories, only check filenames "
2493 "matching these comma separated patterns "
2494 "(default: %default)")
2495 parser.add_option('--select', metavar='errors', default='',
2496 help="select errors and warnings (e.g. E,W6)")
2497 parser.add_option('--ignore', metavar='errors', default='',
2498 help="skip errors and warnings (e.g. E4,W) "
2499 "(default: %s)" % DEFAULT_IGNORE)
2500 parser.add_option('--show-source', action='store_true',
2501 help="show source code for each error")
2502 parser.add_option('--show-pep8', action='store_true',
2503 help="show text of PEP 8 for each error "
2504 "(implies --first)")
2505 parser.add_option('--statistics', action='store_true',
2506 help="count errors and warnings")
2507 parser.add_option('--count', action='store_true',
2508 help="print total number of errors and warnings "
2509 "to standard error and set exit code to 1 if "
2510 "total is not null")
2511 parser.add_option('--max-line-length', type='int', metavar='n',
2512 default=MAX_LINE_LENGTH,
2513 help="set maximum allowed line length "
2514 "(default: %default)")
2515 parser.add_option('--max-doc-length', type='int', metavar='n',
2517 help="set maximum allowed doc line length and perform "
2518 "these checks (unchecked if not set)")
2519 parser.add_option('--hang-closing', action='store_true',
2520 help="hang closing bracket instead of matching "
2521 "indentation of opening bracket's line")
2522 parser.add_option('--format', metavar='format', default='default',
2523 help="set the error format [default|pylint|<custom>]")
2524 parser.add_option('--diff', action='store_true',
2525 help="report changes only within line number ranges in "
2526 "the unified diff received on STDIN")
2527 group = parser.add_option_group("Testing Options")
2528 if os.path.exists(TESTSUITE_PATH):
2529 group.add_option('--testsuite', metavar='dir',
2530 help="run regression tests from dir")
2531 group.add_option('--doctest', action='store_true',
2532 help="run doctest on myself")
2533 group.add_option('--benchmark', action='store_true',
2534 help="measure processing speed")
2538 def read_config(options, args, arglist, parser):
2539 """Read and parse configurations.
2541 If a config file is specified on the command line with the
2542 "--config" option, then only it is used for configuration.
2544 Otherwise, the user configuration (~/.config/pycodestyle) and any
2545 local configurations in the current directory or above will be
2546 merged together (in that order) using the read method of
2549 config = RawConfigParser()
2551 cli_conf = options.config
2553 local_dir = os.curdir
2555 if USER_CONFIG and os.path.isfile(USER_CONFIG):
2557 print('user configuration: %s' % USER_CONFIG)
2558 config.read(USER_CONFIG)
2560 parent = tail = args and os.path.abspath(os.path.commonprefix(args))
2562 if config.read(os.path.join(parent, fn) for fn in PROJECT_CONFIG):
2565 print('local configuration: in %s' % parent)
2567 (parent, tail) = os.path.split(parent)
2569 if cli_conf and os.path.isfile(cli_conf):
2571 print('cli configuration: %s' % cli_conf)
2572 config.read(cli_conf)
2574 pycodestyle_section = None
2575 if config.has_section(parser.prog):
2576 pycodestyle_section = parser.prog
2577 elif config.has_section('pep8'):
2578 pycodestyle_section = 'pep8' # Deprecated
2579 warnings.warn('[pep8] section is deprecated. Use [pycodestyle].')
2581 if pycodestyle_section:
2582 option_list = {o.dest: o.type or o.action for o in parser.option_list}
2584 # First, read the default values
2585 (new_options, __) = parser.parse_args([])
2587 # Second, parse the configuration
2588 for opt in config.options(pycodestyle_section):
2589 if opt.replace('_', '-') not in parser.config_options:
2590 print(" unknown option '%s' ignored" % opt)
2592 if options.verbose > 1:
2593 print(" %s = %s" % (opt,
2594 config.get(pycodestyle_section, opt)))
2595 normalized_opt = opt.replace('-', '_')
2596 opt_type = option_list[normalized_opt]
2597 if opt_type in ('int', 'count'):
2598 value = config.getint(pycodestyle_section, opt)
2599 elif opt_type in ('store_true', 'store_false'):
2600 value = config.getboolean(pycodestyle_section, opt)
2602 value = config.get(pycodestyle_section, opt)
2603 if normalized_opt == 'exclude':
2604 value = normalize_paths(value, local_dir)
2605 setattr(new_options, normalized_opt, value)
2607 # Third, overwrite with the command-line options
2608 (options, __) = parser.parse_args(arglist, values=new_options)
2609 options.doctest = options.testsuite = False
2613 def process_options(arglist=None, parse_argv=False, config_file=None,
2614 parser=None, verbose=None):
2615 """Process options passed either via arglist or command line args.
2617 Passing in the ``config_file`` parameter allows other tools, such as
2618 flake8 to specify their own options to be processed in pycodestyle.
2621 parser = get_parser()
2622 if not parser.has_option('--config'):
2623 group = parser.add_option_group("Configuration", description=(
2624 "The project options are read from the [%s] section of the "
2625 "tox.ini file or the setup.cfg file located in any parent folder "
2626 "of the path(s) being processed. Allowed options are: %s." %
2627 (parser.prog, ', '.join(parser.config_options))))
2628 group.add_option('--config', metavar='path', default=config_file,
2629 help="user config file location")
2630 # Don't read the command line if the module is used as a library.
2631 if not arglist and not parse_argv:
2633 # If parse_argv is True and arglist is None, arguments are
2634 # parsed from the command line (sys.argv)
2635 (options, args) = parser.parse_args(arglist)
2636 options.reporter = None
2638 # If explicitly specified verbosity, override any `-v` CLI flag
2639 if verbose is not None:
2640 options.verbose = verbose
2642 if options.ensure_value('testsuite', False):
2643 args.append(options.testsuite)
2644 elif not options.ensure_value('doctest', False):
2645 if parse_argv and not args:
2646 if options.diff or any(os.path.exists(name)
2647 for name in PROJECT_CONFIG):
2650 parser.error('input not specified')
2651 options = read_config(options, args, arglist, parser)
2652 options.reporter = parse_argv and options.quiet == 1 and FileReport
2654 options.filename = _parse_multi_options(options.filename)
2655 options.exclude = normalize_paths(options.exclude)
2656 options.select = _parse_multi_options(options.select)
2657 options.ignore = _parse_multi_options(options.ignore)
2660 options.reporter = DiffReport
2661 stdin = stdin_get_value()
2662 options.selected_lines = parse_udiff(stdin, options.filename, args[0])
2663 args = sorted(options.selected_lines)
2665 return options, args
2668 def _parse_multi_options(options, split_token=','):
2669 r"""Split and strip and discard empties.
2671 Turns the following:
2679 return [o.strip() for o in options.split(split_token) if o.strip()]
2685 """Parse options and run checks on Python source."""
2688 # Handle "Broken pipe" gracefully
2690 signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1))
2691 except AttributeError:
2692 pass # not supported on Windows
2694 style_guide = StyleGuide(parse_argv=True)
2695 options = style_guide.options
2697 if options.doctest or options.testsuite:
2698 from testsuite.support import run_tests
2699 report = run_tests(style_guide)
2701 report = style_guide.check_files()
2703 if options.statistics:
2704 report.print_statistics()
2706 if options.benchmark:
2707 report.print_benchmark()
2709 if options.testsuite and not options.quiet:
2710 report.print_results()
2712 if report.total_errors:
2714 sys.stderr.write(str(report.total_errors) + '\n')
2718 if __name__ == '__main__':
def python_3000_raise_comma(logical_line)
def comparison_negative(logical_line)
def whitespace_before_parameters(logical_line, tokens)
def tabs_or_spaces(physical_line, indent_char)
Plugins (check functions) for physical lines.
def whitespace_around_comma(logical_line)
def tabs_obsolete(physical_line)
def extraneous_whitespace(logical_line)
def blank_lines(logical_line, blank_lines, indent_level, line_number, blank_before, previous_logical, previous_unindented_logical_line, previous_indent_level, lines)
Plugins (check functions) for logical lines.
def maximum_line_length(physical_line, max_line_length, multiline, line_number, noqa)
def imports_on_separate_lines(logical_line)
def trailing_blank_lines(physical_line, lines, line_number, total_lines)
def _break_around_binary_operators(tokens)
def register_check(check, codes=None)
def python_3000_invalid_escape_sequence(logical_line, tokens, noqa)
def whitespace_around_named_parameter_equals(logical_line, tokens)
def bare_except(logical_line, noqa)
def ambiguous_identifier(logical_line, tokens)
def readlines(filename)
Helper functions.
def whitespace_around_operator(logical_line)
def comparison_to_singleton(logical_line, noqa)
def trailing_whitespace(physical_line)
def indentation(logical_line, previous_logical, indent_char, indent_level, previous_indent_level)
def continued_indentation(logical_line, tokens, indent_level, hang_closing, indent_char, noqa, verbose)
def break_before_binary_operator(logical_line, tokens)
def python_3000_async_await_keywords(logical_line, tokens)
def lru_cache(maxsize=128)
def _is_binary_operator(token_type, text)
def maximum_doc_length(logical_line, max_doc_length, noqa, tokens)
def missing_whitespace_after_import_keyword(logical_line)
def compound_statements(logical_line)
def missing_whitespace_around_operator(logical_line, tokens)
def module_imports_on_top_of_file(logical_line, indent_level, checker_state, noqa)
def missing_whitespace(logical_line)
def break_after_binary_operator(logical_line, tokens)
def comparison_type(logical_line, noqa)
def python_3000_backticks(logical_line)
def update_counts(s, counts)
def python_3000_not_equal(logical_line)
def whitespace_before_comment(logical_line, tokens)
def explicit_line_join(logical_line, tokens)
def python_3000_has_key(logical_line, noqa)
def whitespace_around_keywords(logical_line)
def _get_parameters(function)