bpo-33338: [tokenize] Minor code cleanup (#6573)

This change contains minor things that make diffing between Lib/tokenize.py and
Lib/lib2to3/pgen2/tokenize.py cleaner.
This commit is contained in:
Łukasz Langa 2018-04-23 01:07:11 -07:00 committed by GitHub
parent d5a2377c3d
commit c2d384dbd7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 8 additions and 11 deletions

View File

@ -28,7 +28,6 @@ from builtins import open as _builtin_open
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
from itertools import chain
import itertools as _itertools
import re
import sys
@ -278,7 +277,7 @@ class Untokenizer:
startline = token[0] in (NEWLINE, NL)
prevstring = False
for tok in chain([token], iterable):
for tok in _itertools.chain([token], iterable):
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
@ -475,13 +474,10 @@ def tokenize(readline):
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
# This import is here to avoid problems when the itertools module is not
# built yet and tokenize is imported.
from itertools import chain, repeat
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = repeat(b"")
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
empty = _itertools.repeat(b"")
rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
return _tokenize(rl_gen.__next__, encoding)
def _tokenize(readline, encoding):
@ -667,7 +663,8 @@ def main():
# Helper error handling routines
def perror(message):
print(message, file=sys.stderr)
sys.stderr.write(message)
sys.stderr.write('\n')
def error(message, filename=None, location=None):
if location: