Issue #8478: Untokenizer.compat now processes first token from iterator input.

Patch based on lines from Georg Brandl, Eric Snow, and Gareth Rees.
This commit is contained in:
Terry Jan Reedy 2014-02-17 23:12:07 -05:00
parent 7751a34400
commit 6858f00dab
3 changed files with 19 additions and 10 deletions

View File

@ -627,9 +627,17 @@ class UntokenizeTest(TestCase):
'start (1,3) precedes previous end (2,2)') 'start (1,3) precedes previous end (2,2)')
self.assertRaises(ValueError, u.add_whitespace, (2,1)) self.assertRaises(ValueError, u.add_whitespace, (2,1))
__test__ = {"doctests" : doctests, 'decistmt': decistmt} def test_iter_compat(self):
u = Untokenizer()
token = (NAME, 'Hello')
u.compat(token, iter([]))
self.assertEqual(u.tokens, ["Hello "])
u = Untokenizer()
self.assertEqual(u.untokenize(iter([token])), 'Hello ')
__test__ = {"doctests" : doctests, 'decistmt': decistmt}
def test_main(): def test_main():
from test import test_tokenize from test import test_tokenize
test_support.run_doctest(test_tokenize, True) test_support.run_doctest(test_tokenize, True)

View File

@ -26,6 +26,7 @@ __author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger') 'Skip Montanaro, Raymond Hettinger')
from itertools import chain
import string, re import string, re
from token import * from token import *
@ -192,9 +193,10 @@ class Untokenizer:
self.tokens.append(" " * col_offset) self.tokens.append(" " * col_offset)
def untokenize(self, iterable): def untokenize(self, iterable):
for t in iterable: it = iter(iterable)
for t in it:
if len(t) == 2: if len(t) == 2:
self.compat(t, iterable) self.compat(t, it)
break break
tok_type, token, start, end, line = t tok_type, token, start, end, line = t
self.add_whitespace(start) self.add_whitespace(start)
@ -206,16 +208,12 @@ class Untokenizer:
return "".join(self.tokens) return "".join(self.tokens)
def compat(self, token, iterable): def compat(self, token, iterable):
startline = False
indents = [] indents = []
toks_append = self.tokens.append toks_append = self.tokens.append
toknum, tokval = token startline = token[0] in (NEWLINE, NL)
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
prevstring = False prevstring = False
for tok in iterable:
for tok in chain([token], iterable):
toknum, tokval = tok[:2] toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER): if toknum in (NAME, NUMBER):

View File

@ -42,6 +42,9 @@ Library
- Issue #17671: Fixed a crash when use non-initialized io.BufferedRWPair. - Issue #17671: Fixed a crash when use non-initialized io.BufferedRWPair.
Based on patch by Stephen Tu. Based on patch by Stephen Tu.
- Issue #8478: Untokenizer.compat processes first token from iterator input.
Patch based on lines from Georg Brandl, Eric Snow, and Gareth Rees.
- Issue #20594: Avoid name clash with the libc function posix_close. - Issue #20594: Avoid name clash with the libc function posix_close.
- Issue #19856: shutil.move() failed to move a directory to other directory - Issue #19856: shutil.move() failed to move a directory to other directory