00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032 """pump v0.2.0 - Pretty Useful for Meta Programming.
00033
00034 A tool for preprocessor meta programming. Useful for generating
00035 repetitive boilerplate code. Especially useful for writing C++
00036 classes, functions, macros, and templates that need to work with
00037 various number of arguments.
00038
00039 USAGE:
00040 pump.py SOURCE_FILE
00041
00042 EXAMPLES:
00043 pump.py foo.cc.pump
00044 Converts foo.cc.pump to foo.cc.
00045
00046 GRAMMAR:
00047 CODE ::= ATOMIC_CODE*
00048 ATOMIC_CODE ::= $var ID = EXPRESSION
00049 | $var ID = [[ CODE ]]
00050 | $range ID EXPRESSION..EXPRESSION
00051 | $for ID SEPARATOR [[ CODE ]]
00052 | $($)
00053 | $ID
00054 | $(EXPRESSION)
00055 | $if EXPRESSION [[ CODE ]] ELSE_BRANCH
00056 | [[ CODE ]]
00057 | RAW_CODE
00058 SEPARATOR ::= RAW_CODE | EMPTY
00059 ELSE_BRANCH ::= $else [[ CODE ]]
00060 | $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
00061 | EMPTY
00062 EXPRESSION has Python syntax.
00063 """
00064
00065 __author__ = 'wan@google.com (Zhanyong Wan)'
00066
00067 import os
00068 import re
00069 import sys
00070
00071
00072 TOKEN_TABLE = [
00073 (re.compile(r'\$var\s+'), '$var'),
00074 (re.compile(r'\$elif\s+'), '$elif'),
00075 (re.compile(r'\$else\s+'), '$else'),
00076 (re.compile(r'\$for\s+'), '$for'),
00077 (re.compile(r'\$if\s+'), '$if'),
00078 (re.compile(r'\$range\s+'), '$range'),
00079 (re.compile(r'\$[_A-Za-z]\w*'), '$id'),
00080 (re.compile(r'\$\(\$\)'), '$($)'),
00081 (re.compile(r'\$'), '$'),
00082 (re.compile(r'\[\[\n?'), '[['),
00083 (re.compile(r'\]\]\n?'), ']]'),
00084 ]
00085
00086
00087 class Cursor:
00088 """Represents a position (line and column) in a text file."""
00089
00090 def __init__(self, line=-1, column=-1):
00091 self.line = line
00092 self.column = column
00093
00094 def __eq__(self, rhs):
00095 return self.line == rhs.line and self.column == rhs.column
00096
00097 def __ne__(self, rhs):
00098 return not self == rhs
00099
00100 def __lt__(self, rhs):
00101 return self.line < rhs.line or (
00102 self.line == rhs.line and self.column < rhs.column)
00103
00104 def __le__(self, rhs):
00105 return self < rhs or self == rhs
00106
00107 def __gt__(self, rhs):
00108 return rhs < self
00109
00110 def __ge__(self, rhs):
00111 return rhs <= self
00112
00113 def __str__(self):
00114 if self == Eof():
00115 return 'EOF'
00116 else:
00117 return '%s(%s)' % (self.line + 1, self.column)
00118
00119 def __add__(self, offset):
00120 return Cursor(self.line, self.column + offset)
00121
00122 def __sub__(self, offset):
00123 return Cursor(self.line, self.column - offset)
00124
00125 def Clone(self):
00126 """Returns a copy of self."""
00127
00128 return Cursor(self.line, self.column)
00129
00130
00131
00132 def Eof():
00133 """Returns the special cursor to denote the end-of-file."""
00134 return Cursor(-1, -1)
00135
00136
00137 class Token:
00138 """Represents a token in a Pump source file."""
00139
00140 def __init__(self, start=None, end=None, value=None, token_type=None):
00141 if start is None:
00142 self.start = Eof()
00143 else:
00144 self.start = start
00145 if end is None:
00146 self.end = Eof()
00147 else:
00148 self.end = end
00149 self.value = value
00150 self.token_type = token_type
00151
00152 def __str__(self):
00153 return 'Token @%s: \'%s\' type=%s' % (
00154 self.start, self.value, self.token_type)
00155
00156 def Clone(self):
00157 """Returns a copy of self."""
00158
00159 return Token(self.start.Clone(), self.end.Clone(), self.value,
00160 self.token_type)
00161
00162
00163 def StartsWith(lines, pos, string):
00164 """Returns True iff the given position in lines starts with 'string'."""
00165
00166 return lines[pos.line][pos.column:].startswith(string)
00167
00168
00169 def FindFirstInLine(line, token_table):
00170 best_match_start = -1
00171 for (regex, token_type) in token_table:
00172 m = regex.search(line)
00173 if m:
00174
00175 if best_match_start < 0 or m.start() < best_match_start:
00176 best_match_start = m.start()
00177 best_match_length = m.end() - m.start()
00178 best_match_token_type = token_type
00179
00180 if best_match_start < 0:
00181 return None
00182
00183 return (best_match_start, best_match_length, best_match_token_type)
00184
00185
00186 def FindFirst(lines, token_table, cursor):
00187 """Finds the first occurrence of any string in strings in lines."""
00188
00189 start = cursor.Clone()
00190 cur_line_number = cursor.line
00191 for line in lines[start.line:]:
00192 if cur_line_number == start.line:
00193 line = line[start.column:]
00194 m = FindFirstInLine(line, token_table)
00195 if m:
00196
00197 (start_column, length, token_type) = m
00198 if cur_line_number == start.line:
00199 start_column += start.column
00200 found_start = Cursor(cur_line_number, start_column)
00201 found_end = found_start + length
00202 return MakeToken(lines, found_start, found_end, token_type)
00203 cur_line_number += 1
00204
00205 return None
00206
00207
00208 def SubString(lines, start, end):
00209 """Returns a substring in lines."""
00210
00211 if end == Eof():
00212 end = Cursor(len(lines) - 1, len(lines[-1]))
00213
00214 if start >= end:
00215 return ''
00216
00217 if start.line == end.line:
00218 return lines[start.line][start.column:end.column]
00219
00220 result_lines = ([lines[start.line][start.column:]] +
00221 lines[start.line + 1:end.line] +
00222 [lines[end.line][:end.column]])
00223 return ''.join(result_lines)
00224
00225
00226 def StripMetaComments(str):
00227 """Strip meta comments from each line in the given string."""
00228
00229
00230
00231 str = re.sub(r'^\s*\$\$.*\n', '', str)
00232
00233
00234 return re.sub(r'\s*\$\$.*', '', str)
00235
00236
00237 def MakeToken(lines, start, end, token_type):
00238 """Creates a new instance of Token."""
00239
00240 return Token(start, end, SubString(lines, start, end), token_type)
00241
00242
00243 def ParseToken(lines, pos, regex, token_type):
00244 line = lines[pos.line][pos.column:]
00245 m = regex.search(line)
00246 if m and not m.start():
00247 return MakeToken(lines, pos, pos + m.end(), token_type)
00248 else:
00249 print 'ERROR: %s expected at %s.' % (token_type, pos)
00250 sys.exit(1)
00251
00252
00253 ID_REGEX = re.compile(r'[_A-Za-z]\w*')
00254 EQ_REGEX = re.compile(r'=')
00255 REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
00256 OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
00257 WHITE_SPACE_REGEX = re.compile(r'\s')
00258 DOT_DOT_REGEX = re.compile(r'\.\.')
00259
00260
00261 def Skip(lines, pos, regex):
00262 line = lines[pos.line][pos.column:]
00263 m = re.search(regex, line)
00264 if m and not m.start():
00265 return pos + m.end()
00266 else:
00267 return pos
00268
00269
00270 def SkipUntil(lines, pos, regex, token_type):
00271 line = lines[pos.line][pos.column:]
00272 m = re.search(regex, line)
00273 if m:
00274 return pos + m.start()
00275 else:
00276 print ('ERROR: %s expected on line %s after column %s.' %
00277 (token_type, pos.line + 1, pos.column))
00278 sys.exit(1)
00279
00280
00281 def ParseExpTokenInParens(lines, pos):
00282 def ParseInParens(pos):
00283 pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
00284 pos = Skip(lines, pos, r'\(')
00285 pos = Parse(pos)
00286 pos = Skip(lines, pos, r'\)')
00287 return pos
00288
00289 def Parse(pos):
00290 pos = SkipUntil(lines, pos, r'\(|\)', ')')
00291 if SubString(lines, pos, pos + 1) == '(':
00292 pos = Parse(pos + 1)
00293 pos = Skip(lines, pos, r'\)')
00294 return Parse(pos)
00295 else:
00296 return pos
00297
00298 start = pos.Clone()
00299 pos = ParseInParens(pos)
00300 return MakeToken(lines, start, pos, 'exp')
00301
00302
00303 def RStripNewLineFromToken(token):
00304 if token.value.endswith('\n'):
00305 return Token(token.start, token.end, token.value[:-1], token.token_type)
00306 else:
00307 return token
00308
00309
00310 def TokenizeLines(lines, pos):
00311 while True:
00312 found = FindFirst(lines, TOKEN_TABLE, pos)
00313 if not found:
00314 yield MakeToken(lines, pos, Eof(), 'code')
00315 return
00316
00317 if found.start == pos:
00318 prev_token = None
00319 prev_token_rstripped = None
00320 else:
00321 prev_token = MakeToken(lines, pos, found.start, 'code')
00322 prev_token_rstripped = RStripNewLineFromToken(prev_token)
00323
00324 if found.token_type == '$var':
00325 if prev_token_rstripped:
00326 yield prev_token_rstripped
00327 yield found
00328 id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
00329 yield id_token
00330 pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
00331
00332 eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
00333 yield eq_token
00334 pos = Skip(lines, eq_token.end, r'\s*')
00335
00336 if SubString(lines, pos, pos + 2) != '[[':
00337 exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
00338 yield exp_token
00339 pos = Cursor(exp_token.end.line + 1, 0)
00340 elif found.token_type == '$for':
00341 if prev_token_rstripped:
00342 yield prev_token_rstripped
00343 yield found
00344 id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
00345 yield id_token
00346 pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
00347 elif found.token_type == '$range':
00348 if prev_token_rstripped:
00349 yield prev_token_rstripped
00350 yield found
00351 id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
00352 yield id_token
00353 pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
00354
00355 dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
00356 yield MakeToken(lines, pos, dots_pos, 'exp')
00357 yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
00358 pos = dots_pos + 2
00359 new_pos = Cursor(pos.line + 1, 0)
00360 yield MakeToken(lines, pos, new_pos, 'exp')
00361 pos = new_pos
00362 elif found.token_type == '$':
00363 if prev_token:
00364 yield prev_token
00365 yield found
00366 exp_token = ParseExpTokenInParens(lines, found.end)
00367 yield exp_token
00368 pos = exp_token.end
00369 elif (found.token_type == ']]' or found.token_type == '$if' or
00370 found.token_type == '$elif' or found.token_type == '$else'):
00371 if prev_token_rstripped:
00372 yield prev_token_rstripped
00373 yield found
00374 pos = found.end
00375 else:
00376 if prev_token:
00377 yield prev_token
00378 yield found
00379 pos = found.end
00380
00381
00382 def Tokenize(s):
00383 """A generator that yields the tokens in the given string."""
00384 if s != '':
00385 lines = s.splitlines(True)
00386 for token in TokenizeLines(lines, Cursor(0, 0)):
00387 yield token
00388
00389
00390 class CodeNode:
00391 def __init__(self, atomic_code_list=None):
00392 self.atomic_code = atomic_code_list
00393
00394
00395 class VarNode:
00396 def __init__(self, identifier=None, atomic_code=None):
00397 self.identifier = identifier
00398 self.atomic_code = atomic_code
00399
00400
00401 class RangeNode:
00402 def __init__(self, identifier=None, exp1=None, exp2=None):
00403 self.identifier = identifier
00404 self.exp1 = exp1
00405 self.exp2 = exp2
00406
00407
00408 class ForNode:
00409 def __init__(self, identifier=None, sep=None, code=None):
00410 self.identifier = identifier
00411 self.sep = sep
00412 self.code = code
00413
00414
00415 class ElseNode:
00416 def __init__(self, else_branch=None):
00417 self.else_branch = else_branch
00418
00419
00420 class IfNode:
00421 def __init__(self, exp=None, then_branch=None, else_branch=None):
00422 self.exp = exp
00423 self.then_branch = then_branch
00424 self.else_branch = else_branch
00425
00426
00427 class RawCodeNode:
00428 def __init__(self, token=None):
00429 self.raw_code = token
00430
00431
00432 class LiteralDollarNode:
00433 def __init__(self, token):
00434 self.token = token
00435
00436
00437 class ExpNode:
00438 def __init__(self, token, python_exp):
00439 self.token = token
00440 self.python_exp = python_exp
00441
00442
00443 def PopFront(a_list):
00444 head = a_list[0]
00445 a_list[:1] = []
00446 return head
00447
00448
00449 def PushFront(a_list, elem):
00450 a_list[:0] = [elem]
00451
00452
00453 def PopToken(a_list, token_type=None):
00454 token = PopFront(a_list)
00455 if token_type is not None and token.token_type != token_type:
00456 print 'ERROR: %s expected at %s' % (token_type, token.start)
00457 print 'ERROR: %s found instead' % (token,)
00458 sys.exit(1)
00459
00460 return token
00461
00462
00463 def PeekToken(a_list):
00464 if not a_list:
00465 return None
00466
00467 return a_list[0]
00468
00469
00470 def ParseExpNode(token):
00471 python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
00472 return ExpNode(token, python_exp)
00473
00474
00475 def ParseElseNode(tokens):
00476 def Pop(token_type=None):
00477 return PopToken(tokens, token_type)
00478
00479 next = PeekToken(tokens)
00480 if not next:
00481 return None
00482 if next.token_type == '$else':
00483 Pop('$else')
00484 Pop('[[')
00485 code_node = ParseCodeNode(tokens)
00486 Pop(']]')
00487 return code_node
00488 elif next.token_type == '$elif':
00489 Pop('$elif')
00490 exp = Pop('code')
00491 Pop('[[')
00492 code_node = ParseCodeNode(tokens)
00493 Pop(']]')
00494 inner_else_node = ParseElseNode(tokens)
00495 return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
00496 elif not next.value.strip():
00497 Pop('code')
00498 return ParseElseNode(tokens)
00499 else:
00500 return None
00501
00502
00503 def ParseAtomicCodeNode(tokens):
00504 def Pop(token_type=None):
00505 return PopToken(tokens, token_type)
00506
00507 head = PopFront(tokens)
00508 t = head.token_type
00509 if t == 'code':
00510 return RawCodeNode(head)
00511 elif t == '$var':
00512 id_token = Pop('id')
00513 Pop('=')
00514 next = PeekToken(tokens)
00515 if next.token_type == 'exp':
00516 exp_token = Pop()
00517 return VarNode(id_token, ParseExpNode(exp_token))
00518 Pop('[[')
00519 code_node = ParseCodeNode(tokens)
00520 Pop(']]')
00521 return VarNode(id_token, code_node)
00522 elif t == '$for':
00523 id_token = Pop('id')
00524 next_token = PeekToken(tokens)
00525 if next_token.token_type == 'code':
00526 sep_token = next_token
00527 Pop('code')
00528 else:
00529 sep_token = None
00530 Pop('[[')
00531 code_node = ParseCodeNode(tokens)
00532 Pop(']]')
00533 return ForNode(id_token, sep_token, code_node)
00534 elif t == '$if':
00535 exp_token = Pop('code')
00536 Pop('[[')
00537 code_node = ParseCodeNode(tokens)
00538 Pop(']]')
00539 else_node = ParseElseNode(tokens)
00540 return IfNode(ParseExpNode(exp_token), code_node, else_node)
00541 elif t == '$range':
00542 id_token = Pop('id')
00543 exp1_token = Pop('exp')
00544 Pop('..')
00545 exp2_token = Pop('exp')
00546 return RangeNode(id_token, ParseExpNode(exp1_token),
00547 ParseExpNode(exp2_token))
00548 elif t == '$id':
00549 return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
00550 elif t == '$($)':
00551 return LiteralDollarNode(head)
00552 elif t == '$':
00553 exp_token = Pop('exp')
00554 return ParseExpNode(exp_token)
00555 elif t == '[[':
00556 code_node = ParseCodeNode(tokens)
00557 Pop(']]')
00558 return code_node
00559 else:
00560 PushFront(tokens, head)
00561 return None
00562
00563
00564 def ParseCodeNode(tokens):
00565 atomic_code_list = []
00566 while True:
00567 if not tokens:
00568 break
00569 atomic_code_node = ParseAtomicCodeNode(tokens)
00570 if atomic_code_node:
00571 atomic_code_list.append(atomic_code_node)
00572 else:
00573 break
00574 return CodeNode(atomic_code_list)
00575
00576
00577 def ParseToAST(pump_src_text):
00578 """Convert the given Pump source text into an AST."""
00579 tokens = list(Tokenize(pump_src_text))
00580 code_node = ParseCodeNode(tokens)
00581 return code_node
00582
00583
00584 class Env:
00585 def __init__(self):
00586 self.variables = []
00587 self.ranges = []
00588
00589 def Clone(self):
00590 clone = Env()
00591 clone.variables = self.variables[:]
00592 clone.ranges = self.ranges[:]
00593 return clone
00594
00595 def PushVariable(self, var, value):
00596
00597 try:
00598 int_value = int(value)
00599 if ('%s' % int_value) == value:
00600 value = int_value
00601 except Exception:
00602 pass
00603 self.variables[:0] = [(var, value)]
00604
00605 def PopVariable(self):
00606 self.variables[:1] = []
00607
00608 def PushRange(self, var, lower, upper):
00609 self.ranges[:0] = [(var, lower, upper)]
00610
00611 def PopRange(self):
00612 self.ranges[:1] = []
00613
00614 def GetValue(self, identifier):
00615 for (var, value) in self.variables:
00616 if identifier == var:
00617 return value
00618
00619 print 'ERROR: meta variable %s is undefined.' % (identifier,)
00620 sys.exit(1)
00621
00622 def EvalExp(self, exp):
00623 try:
00624 result = eval(exp.python_exp)
00625 except Exception, e:
00626 print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
00627 print ('ERROR: failed to evaluate meta expression %s at %s' %
00628 (exp.python_exp, exp.token.start))
00629 sys.exit(1)
00630 return result
00631
00632 def GetRange(self, identifier):
00633 for (var, lower, upper) in self.ranges:
00634 if identifier == var:
00635 return (lower, upper)
00636
00637 print 'ERROR: range %s is undefined.' % (identifier,)
00638 sys.exit(1)
00639
00640
00641 class Output:
00642 def __init__(self):
00643 self.string = ''
00644
00645 def GetLastLine(self):
00646 index = self.string.rfind('\n')
00647 if index < 0:
00648 return ''
00649
00650 return self.string[index + 1:]
00651
00652 def Append(self, s):
00653 self.string += s
00654
00655
00656 def RunAtomicCode(env, node, output):
00657 if isinstance(node, VarNode):
00658 identifier = node.identifier.value.strip()
00659 result = Output()
00660 RunAtomicCode(env.Clone(), node.atomic_code, result)
00661 value = result.string
00662 env.PushVariable(identifier, value)
00663 elif isinstance(node, RangeNode):
00664 identifier = node.identifier.value.strip()
00665 lower = int(env.EvalExp(node.exp1))
00666 upper = int(env.EvalExp(node.exp2))
00667 env.PushRange(identifier, lower, upper)
00668 elif isinstance(node, ForNode):
00669 identifier = node.identifier.value.strip()
00670 if node.sep is None:
00671 sep = ''
00672 else:
00673 sep = node.sep.value
00674 (lower, upper) = env.GetRange(identifier)
00675 for i in range(lower, upper + 1):
00676 new_env = env.Clone()
00677 new_env.PushVariable(identifier, i)
00678 RunCode(new_env, node.code, output)
00679 if i != upper:
00680 output.Append(sep)
00681 elif isinstance(node, RawCodeNode):
00682 output.Append(node.raw_code.value)
00683 elif isinstance(node, IfNode):
00684 cond = env.EvalExp(node.exp)
00685 if cond:
00686 RunCode(env.Clone(), node.then_branch, output)
00687 elif node.else_branch is not None:
00688 RunCode(env.Clone(), node.else_branch, output)
00689 elif isinstance(node, ExpNode):
00690 value = env.EvalExp(node)
00691 output.Append('%s' % (value,))
00692 elif isinstance(node, LiteralDollarNode):
00693 output.Append('$')
00694 elif isinstance(node, CodeNode):
00695 RunCode(env.Clone(), node, output)
00696 else:
00697 print 'BAD'
00698 print node
00699 sys.exit(1)
00700
00701
00702 def RunCode(env, code_node, output):
00703 for atomic_code in code_node.atomic_code:
00704 RunAtomicCode(env, atomic_code, output)
00705
00706
00707 def IsSingleLineComment(cur_line):
00708 return '//' in cur_line
00709
00710
00711 def IsInPreprocessorDirective(prev_lines, cur_line):
00712 if cur_line.lstrip().startswith('#'):
00713 return True
00714 return prev_lines and prev_lines[-1].endswith('\\')
00715
00716
00717 def WrapComment(line, output):
00718 loc = line.find('//')
00719 before_comment = line[:loc].rstrip()
00720 if before_comment == '':
00721 indent = loc
00722 else:
00723 output.append(before_comment)
00724 indent = len(before_comment) - len(before_comment.lstrip())
00725 prefix = indent*' ' + '// '
00726 max_len = 80 - len(prefix)
00727 comment = line[loc + 2:].strip()
00728 segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
00729 cur_line = ''
00730 for seg in segs:
00731 if len((cur_line + seg).rstrip()) < max_len:
00732 cur_line += seg
00733 else:
00734 if cur_line.strip() != '':
00735 output.append(prefix + cur_line.rstrip())
00736 cur_line = seg.lstrip()
00737 if cur_line.strip() != '':
00738 output.append(prefix + cur_line.strip())
00739
00740
00741 def WrapCode(line, line_concat, output):
00742 indent = len(line) - len(line.lstrip())
00743 prefix = indent*' '
00744 max_len = 80 - indent - len(line_concat)
00745 new_prefix = prefix + 4*' '
00746 new_max_len = max_len - 4
00747
00748 segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
00749 cur_line = ''
00750 for seg in segs:
00751
00752 while cur_line == '' and len(seg.strip()) > max_len:
00753 seg = seg.lstrip()
00754 split_at = seg.rfind(' ', 0, max_len)
00755 output.append(prefix + seg[:split_at].strip() + line_concat)
00756 seg = seg[split_at + 1:]
00757 prefix = new_prefix
00758 max_len = new_max_len
00759
00760 if len((cur_line + seg).rstrip()) < max_len:
00761 cur_line = (cur_line + seg).lstrip()
00762 else:
00763 output.append(prefix + cur_line.rstrip() + line_concat)
00764 prefix = new_prefix
00765 max_len = new_max_len
00766 cur_line = seg.lstrip()
00767 if cur_line.strip() != '':
00768 output.append(prefix + cur_line.strip())
00769
00770
00771 def WrapPreprocessorDirective(line, output):
00772 WrapCode(line, ' \\', output)
00773
00774
00775 def WrapPlainCode(line, output):
00776 WrapCode(line, '', output)
00777
00778
00779 def IsMultiLineIWYUPragma(line):
00780 return re.search(r'/\* IWYU pragma: ', line)
00781
00782
00783 def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
00784 return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
00785 re.match(r'^#include\s', line) or
00786
00787 re.search(r'// IWYU pragma: ', line))
00788
00789
00790 def WrapLongLine(line, output):
00791 line = line.rstrip()
00792 if len(line) <= 80:
00793 output.append(line)
00794 elif IsSingleLineComment(line):
00795 if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
00796
00797
00798 output.append(line)
00799 else:
00800 WrapComment(line, output)
00801 elif IsInPreprocessorDirective(output, line):
00802 if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
00803
00804
00805 output.append(line)
00806 else:
00807 WrapPreprocessorDirective(line, output)
00808 elif IsMultiLineIWYUPragma(line):
00809 output.append(line)
00810 else:
00811 WrapPlainCode(line, output)
00812
00813
00814 def BeautifyCode(string):
00815 lines = string.splitlines()
00816 output = []
00817 for line in lines:
00818 WrapLongLine(line, output)
00819 output2 = [line.rstrip() for line in output]
00820 return '\n'.join(output2) + '\n'
00821
00822
00823 def ConvertFromPumpSource(src_text):
00824 """Return the text generated from the given Pump source text."""
00825 ast = ParseToAST(StripMetaComments(src_text))
00826 output = Output()
00827 RunCode(Env(), ast, output)
00828 return BeautifyCode(output.string)
00829
00830
00831 def main(argv):
00832 if len(argv) == 1:
00833 print __doc__
00834 sys.exit(1)
00835
00836 file_path = argv[-1]
00837 output_str = ConvertFromPumpSource(file(file_path, 'r').read())
00838 if file_path.endswith('.pump'):
00839 output_file_path = file_path[:-5]
00840 else:
00841 output_file_path = '-'
00842 if output_file_path == '-':
00843 print output_str,
00844 else:
00845 output_file = file(output_file_path, 'w')
00846 output_file.write('// This file was GENERATED by command:\n')
00847 output_file.write('// %s %s\n' %
00848 (os.path.basename(__file__), os.path.basename(file_path)))
00849 output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
00850 output_file.write(output_str)
00851 output_file.close()
00852
00853
00854 if __name__ == '__main__':
00855 main(sys.argv)