1"""Token constants (from "token.h").""" 2 3__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF'] 4 5# This file is automatically generated; please don't muck it up! 6# 7# To update the symbols in this file, 'cd' to the top directory of 8# the python source tree after building the interpreter and run: 9# 10# ./python Lib/token.py 11 12#--start constants-- 13ENDMARKER = 0 14NAME = 1 15NUMBER = 2 16STRING = 3 17NEWLINE = 4 18INDENT = 5 19DEDENT = 6 20LPAR = 7 21RPAR = 8 22LSQB = 9 23RSQB = 10 24COLON = 11 25COMMA = 12 26SEMI = 13 27PLUS = 14 28MINUS = 15 29STAR = 16 30SLASH = 17 31VBAR = 18 32AMPER = 19 33LESS = 20 34GREATER = 21 35EQUAL = 22 36DOT = 23 37PERCENT = 24 38LBRACE = 25 39RBRACE = 26 40EQEQUAL = 27 41NOTEQUAL = 28 42LESSEQUAL = 29 43GREATEREQUAL = 30 44TILDE = 31 45CIRCUMFLEX = 32 46LEFTSHIFT = 33 47RIGHTSHIFT = 34 48DOUBLESTAR = 35 49PLUSEQUAL = 36 50MINEQUAL = 37 51STAREQUAL = 38 52SLASHEQUAL = 39 53PERCENTEQUAL = 40 54AMPEREQUAL = 41 55VBAREQUAL = 42 56CIRCUMFLEXEQUAL = 43 57LEFTSHIFTEQUAL = 44 58RIGHTSHIFTEQUAL = 45 59DOUBLESTAREQUAL = 46 60DOUBLESLASH = 47 61DOUBLESLASHEQUAL = 48 62AT = 49 63ATEQUAL = 50 64RARROW = 51 65ELLIPSIS = 52 66# Don't forget to update the table _PyParser_TokenNames in tokenizer.c! 67OP = 53 68ERRORTOKEN = 54 69# These aren't used by the C tokenizer but are needed for tokenize.py 70COMMENT = 55 71NL = 56 72ENCODING = 57 73N_TOKENS = 58 74# Special definitions for cooperation with parser 75NT_OFFSET = 256 76#--end constants-- 77 78tok_name = {value: name 79 for name, value in globals().items() 80 if isinstance(value, int) and not name.startswith('_')} 81__all__.extend(tok_name.values()) 82 83def ISTERMINAL(x): 84 return x < NT_OFFSET 85 86def ISNONTERMINAL(x): 87 return x >= NT_OFFSET 88 89def ISEOF(x): 90 return x == ENDMARKER 91 92 93def _main(): 94 import re 95 import sys 96 args = sys.argv[1:] 97 inFileName = args and args[0] or "Include/token.h" 98 outFileName = "Lib/token.py" 99 if len(args) > 1: 100 outFileName = args[1] 101 try: 102 fp = open(inFileName) 103 except OSError as err: 104 sys.stdout.write("I/O error: %s\n" % str(err)) 105 sys.exit(1) 106 with fp: 107 lines = fp.read().split("\n") 108 prog = re.compile( 109 r"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)", 110 re.IGNORECASE) 111 comment_regex = re.compile( 112 r"^\s*/\*\s*(.+?)\s*\*/\s*$", 113 re.IGNORECASE) 114 115 tokens = {} 116 prev_val = None 117 for line in lines: 118 match = prog.match(line) 119 if match: 120 name, val = match.group(1, 2) 121 val = int(val) 122 tokens[val] = {'token': name} # reverse so we can sort them... 123 prev_val = val 124 else: 125 comment_match = comment_regex.match(line) 126 if comment_match and prev_val is not None: 127 comment = comment_match.group(1) 128 tokens[prev_val]['comment'] = comment 129 keys = sorted(tokens.keys()) 130 # load the output skeleton from the target: 131 try: 132 fp = open(outFileName) 133 except OSError as err: 134 sys.stderr.write("I/O error: %s\n" % str(err)) 135 sys.exit(2) 136 with fp: 137 format = fp.read().split("\n") 138 try: 139 start = format.index("#--start constants--") + 1 140 end = format.index("#--end constants--") 141 except ValueError: 142 sys.stderr.write("target does not contain format markers") 143 sys.exit(3) 144 lines = [] 145 for key in keys: 146 lines.append("%s = %d" % (tokens[key]["token"], key)) 147 if "comment" in tokens[key]: 148 lines.append("# %s" % tokens[key]["comment"]) 149 format[start:end] = lines 150 try: 151 fp = open(outFileName, 'w') 152 except OSError as err: 153 sys.stderr.write("I/O error: %s\n" % str(err)) 154 sys.exit(4) 155 with fp: 156 fp.write("\n".join(format)) 157 158 159if __name__ == "__main__": 160 _main() 161