1.1 --- a/pyparser/pytokenizer.py Sun Jan 08 21:15:30 2017 +0100
1.2 +++ b/pyparser/pytokenizer.py Sun Jan 08 23:28:19 2017 +0100
1.3 @@ -103,7 +103,7 @@
1.4 endmatch = endDFA.recognize(line)
1.5 if endmatch >= 0:
1.6 pos = end = endmatch
1.7 - tok = (tokens.STRING, contstr + line[:end], strstart[0],
1.8 + tok = (tokens["STRING"], contstr + line[:end], strstart[0],
1.9 strstart[1], line)
1.10 token_list.append(tok)
1.11 last_comment = ''
1.12 @@ -111,7 +111,7 @@
1.13 contline = None
1.14 elif (needcont and not line.endswith('\\\n') and
1.15 not line.endswith('\\\r\n')):
1.16 - tok = (tokens.ERRORTOKEN, contstr + line, strstart[0],
1.17 + tok = (tokens["ERRORTOKEN"], contstr + line, strstart[0],
1.18 strstart[1], line)
1.19 token_list.append(tok)
1.20 last_comment = ''
1.21 @@ -140,11 +140,11 @@
1.22
1.23 if column > indents[-1]: # count indents or dedents
1.24 indents.append(column)
1.25 - token_list.append((tokens.INDENT, line[:pos], lnum, 0, line))
1.26 + token_list.append((tokens["INDENT"], line[:pos], lnum, 0, line))
1.27 last_comment = ''
1.28 while column < indents[-1]:
1.29 indents = indents[:-1]
1.30 - token_list.append((tokens.DEDENT, '', lnum, pos, line))
1.31 + token_list.append((tokens["DEDENT"], '', lnum, pos, line))
1.32 last_comment = ''
1.33 if column != indents[-1]:
1.34 err = "unindent does not match any outer indentation level"
1.35 @@ -177,11 +177,11 @@
1.36 token, initial = line[start:end], line[start]
1.37 if initial in numchars or \
1.38 (initial == '.' and token != '.'): # ordinary number
1.39 - token_list.append((tokens.NUMBER, token, lnum, start, line))
1.40 + token_list.append((tokens["NUMBER"], token, lnum, start, line))
1.41 last_comment = ''
1.42 elif initial in '\r\n':
1.43 if parenlev <= 0:
1.44 - tok = (tokens.NEWLINE, last_comment, lnum, start, line)
1.45 + tok = (tokens["NEWLINE"], last_comment, lnum, start, line)
1.46 token_list.append(tok)
1.47 last_comment = ''
1.48 elif initial == '#':
1.49 @@ -193,7 +193,7 @@
1.50 if endmatch >= 0: # all on one line
1.51 pos = endmatch
1.52 token = line[start:pos]
1.53 - tok = (tokens.STRING, token, lnum, start, line)
1.54 + tok = (tokens["STRING"], token, lnum, start, line)
1.55 token_list.append(tok)
1.56 last_comment = ''
1.57 else:
1.58 @@ -212,11 +212,11 @@
1.59 contline = line
1.60 break
1.61 else: # ordinary string
1.62 - tok = (tokens.STRING, token, lnum, start, line)
1.63 + tok = (tokens["STRING"], token, lnum, start, line)
1.64 token_list.append(tok)
1.65 last_comment = ''
1.66 elif initial in namechars: # ordinary name
1.67 - token_list.append((tokens.NAME, token, lnum, start, line))
1.68 + token_list.append((tokens["NAME"], token, lnum, start, line))
1.69 last_comment = ''
1.70 elif initial == '\\': # continued stmt
1.71 continued = 1
1.72 @@ -233,7 +233,7 @@
1.73 if token in python_opmap:
1.74 punct = python_opmap[token]
1.75 else:
1.76 - punct = tokens.OP
1.77 + punct = tokens["OP"]
1.78 token_list.append((punct, token, lnum, start, line))
1.79 last_comment = ''
1.80 else:
1.81 @@ -243,22 +243,22 @@
1.82 if start<max and line[start] in single_quoted:
1.83 raise TokenError("EOL while scanning string literal",
1.84 line, lnum, start+1, token_list)
1.85 - tok = (tokens.ERRORTOKEN, line[pos], lnum, pos, line)
1.86 + tok = (tokens["ERRORTOKEN"], line[pos], lnum, pos, line)
1.87 token_list.append(tok)
1.88 last_comment = ''
1.89 pos = pos + 1
1.90
1.91 lnum -= 1
1.92 if not (flags & consts.PyCF_DONT_IMPLY_DEDENT):
1.93 - if token_list and token_list[-1][0] != tokens.NEWLINE:
1.94 - tok = (tokens.NEWLINE, '', lnum, 0, '\n')
1.95 + if token_list and token_list[-1][0] != tokens["NEWLINE"]:
1.96 + tok = (tokens["NEWLINE"], '', lnum, 0, '\n')
1.97 token_list.append(tok)
1.98 for indent in indents[1:]: # pop remaining indent levels
1.99 - token_list.append((tokens.DEDENT, '', lnum, pos, line))
1.100 - tok = (tokens.NEWLINE, '', lnum, 0, '\n')
1.101 + token_list.append((tokens["DEDENT"], '', lnum, pos, line))
1.102 + tok = (tokens["NEWLINE"], '', lnum, 0, '\n')
1.103 token_list.append(tok)
1.104
1.105 - token_list.append((tokens.ENDMARKER, '', lnum, pos, line))
1.106 + token_list.append((tokens["ENDMARKER"], '', lnum, pos, line))
1.107 return token_list
1.108
1.109