bpo-33338: [tokenize] Minor code cleanup (#6573)
This change contains minor things that make diffing between Lib/tokenize.py and Lib/lib2to3/pgen2/tokenize.py cleaner.
This commit is contained in:
parent
d5a2377c3d
commit
c2d384dbd7
|
@ -28,7 +28,6 @@ from builtins import open as _builtin_open
|
||||||
from codecs import lookup, BOM_UTF8
|
from codecs import lookup, BOM_UTF8
|
||||||
import collections
|
import collections
|
||||||
from io import TextIOWrapper
|
from io import TextIOWrapper
|
||||||
from itertools import chain
|
|
||||||
import itertools as _itertools
|
import itertools as _itertools
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
@ -278,7 +277,7 @@ class Untokenizer:
|
||||||
startline = token[0] in (NEWLINE, NL)
|
startline = token[0] in (NEWLINE, NL)
|
||||||
prevstring = False
|
prevstring = False
|
||||||
|
|
||||||
for tok in chain([token], iterable):
|
for tok in _itertools.chain([token], iterable):
|
||||||
toknum, tokval = tok[:2]
|
toknum, tokval = tok[:2]
|
||||||
if toknum == ENCODING:
|
if toknum == ENCODING:
|
||||||
self.encoding = tokval
|
self.encoding = tokval
|
||||||
|
@ -475,13 +474,10 @@ def tokenize(readline):
|
||||||
The first token sequence will always be an ENCODING token
|
The first token sequence will always be an ENCODING token
|
||||||
which tells you which encoding was used to decode the bytes stream.
|
which tells you which encoding was used to decode the bytes stream.
|
||||||
"""
|
"""
|
||||||
# This import is here to avoid problems when the itertools module is not
|
|
||||||
# built yet and tokenize is imported.
|
|
||||||
from itertools import chain, repeat
|
|
||||||
encoding, consumed = detect_encoding(readline)
|
encoding, consumed = detect_encoding(readline)
|
||||||
rl_gen = iter(readline, b"")
|
empty = _itertools.repeat(b"")
|
||||||
empty = repeat(b"")
|
rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
|
||||||
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
|
return _tokenize(rl_gen.__next__, encoding)
|
||||||
|
|
||||||
|
|
||||||
def _tokenize(readline, encoding):
|
def _tokenize(readline, encoding):
|
||||||
|
@ -667,7 +663,8 @@ def main():
|
||||||
|
|
||||||
# Helper error handling routines
|
# Helper error handling routines
|
||||||
def perror(message):
|
def perror(message):
|
||||||
print(message, file=sys.stderr)
|
sys.stderr.write(message)
|
||||||
|
sys.stderr.write('\n')
|
||||||
|
|
||||||
def error(message, filename=None, location=None):
|
def error(message, filename=None, location=None):
|
||||||
if location:
|
if location:
|
||||||
|
|
Loading…
Reference in New Issue