mirror of https://github.com/python/cpython
#16152: merge with 3.2.
This commit is contained in:
commit
fafa8b7797
|
@ -1109,6 +1109,10 @@ class TestTokenize(TestCase):
|
|||
token.NAME, token.AMPER, token.NUMBER,
|
||||
token.RPAR)
|
||||
|
||||
def test_pathological_trailing_whitespace(self):
|
||||
# See http://bugs.python.org/issue16152
|
||||
self.assertExactTypeEqual('@ ', token.AT)
|
||||
|
||||
__test__ = {"doctests" : doctests, 'decistmt': decistmt}
|
||||
|
||||
def test_main():
|
||||
|
|
|
@ -162,7 +162,7 @@ ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
|
|||
group("'", r'\\\r?\n'),
|
||||
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
|
||||
group('"', r'\\\r?\n'))
|
||||
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
|
||||
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
|
||||
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
|
||||
|
||||
def _compile(expr):
|
||||
|
@ -555,6 +555,8 @@ def _tokenize(readline, encoding):
|
|||
if pseudomatch: # scan for tokens
|
||||
start, end = pseudomatch.span(1)
|
||||
spos, epos, pos = (lnum, start), (lnum, end), end
|
||||
if start == end:
|
||||
continue
|
||||
token, initial = line[start:end], line[start]
|
||||
|
||||
if (initial in numchars or # ordinary number
|
||||
|
|
|
@ -77,6 +77,7 @@ Ulf Bartelt
|
|||
Don Bashford
|
||||
Pior Bastida
|
||||
Nick Bastin
|
||||
Ned Batchelder
|
||||
Jeff Bauer
|
||||
Michael R Bax
|
||||
Anthony Baxter
|
||||
|
|
Loading…
Reference in New Issue