Issue #8478: Untokenizer.compat now processes first token from iterator input.
Patch based on lines from Georg Brandl, Eric Snow, and Gareth Rees.
This commit is contained in:
parent
7751a34400
commit
6858f00dab
|
@ -627,9 +627,17 @@ class UntokenizeTest(TestCase):
|
|||
'start (1,3) precedes previous end (2,2)')
|
||||
self.assertRaises(ValueError, u.add_whitespace, (2,1))
|
||||
|
||||
__test__ = {"doctests" : doctests, 'decistmt': decistmt}
|
||||
def test_iter_compat(self):
|
||||
u = Untokenizer()
|
||||
token = (NAME, 'Hello')
|
||||
u.compat(token, iter([]))
|
||||
self.assertEqual(u.tokens, ["Hello "])
|
||||
u = Untokenizer()
|
||||
self.assertEqual(u.untokenize(iter([token])), 'Hello ')
|
||||
|
||||
|
||||
__test__ = {"doctests" : doctests, 'decistmt': decistmt}
|
||||
|
||||
def test_main():
|
||||
from test import test_tokenize
|
||||
test_support.run_doctest(test_tokenize, True)
|
||||
|
|
|
@ -26,6 +26,7 @@ __author__ = 'Ka-Ping Yee <ping@lfw.org>'
|
|||
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
|
||||
'Skip Montanaro, Raymond Hettinger')
|
||||
|
||||
from itertools import chain
|
||||
import string, re
|
||||
from token import *
|
||||
|
||||
|
@ -192,9 +193,10 @@ class Untokenizer:
|
|||
self.tokens.append(" " * col_offset)
|
||||
|
||||
def untokenize(self, iterable):
|
||||
for t in iterable:
|
||||
it = iter(iterable)
|
||||
for t in it:
|
||||
if len(t) == 2:
|
||||
self.compat(t, iterable)
|
||||
self.compat(t, it)
|
||||
break
|
||||
tok_type, token, start, end, line = t
|
||||
self.add_whitespace(start)
|
||||
|
@ -206,16 +208,12 @@ class Untokenizer:
|
|||
return "".join(self.tokens)
|
||||
|
||||
def compat(self, token, iterable):
|
||||
startline = False
|
||||
indents = []
|
||||
toks_append = self.tokens.append
|
||||
toknum, tokval = token
|
||||
if toknum in (NAME, NUMBER):
|
||||
tokval += ' '
|
||||
if toknum in (NEWLINE, NL):
|
||||
startline = True
|
||||
startline = token[0] in (NEWLINE, NL)
|
||||
prevstring = False
|
||||
for tok in iterable:
|
||||
|
||||
for tok in chain([token], iterable):
|
||||
toknum, tokval = tok[:2]
|
||||
|
||||
if toknum in (NAME, NUMBER):
|
||||
|
|
|
@ -42,6 +42,9 @@ Library
|
|||
- Issue #17671: Fixed a crash when use non-initialized io.BufferedRWPair.
|
||||
Based on patch by Stephen Tu.
|
||||
|
||||
- Issue #8478: Untokenizer.compat processes first token from iterator input.
|
||||
Patch based on lines from Georg Brandl, Eric Snow, and Gareth Rees.
|
||||
|
||||
- Issue #20594: Avoid name clash with the libc function posix_close.
|
||||
|
||||
- Issue #19856: shutil.move() failed to move a directory to other directory
|
||||
|
|
Loading…
Reference in New Issue