SF bug #1224621: tokenize module does not detect inconsistent dedents
This commit is contained in:
parent
8fa7eb563b
commit
da99d1cbfe
|
@ -1,4 +1,4 @@
|
|||
from test.test_support import verbose, findfile, is_resource_enabled
|
||||
from test.test_support import verbose, findfile, is_resource_enabled, TestFailed
|
||||
import os, glob, random
|
||||
from tokenize import (tokenize, generate_tokens, untokenize,
|
||||
NUMBER, NAME, OP, STRING)
|
||||
|
@ -41,6 +41,24 @@ for f in testfiles:
|
|||
test_roundtrip(f)
|
||||
|
||||
|
||||
###### Test detecton of IndentationError ######################
|
||||
|
||||
from cStringIO import StringIO
|
||||
|
||||
sampleBadText = """
|
||||
def foo():
|
||||
bar
|
||||
baz
|
||||
"""
|
||||
|
||||
try:
|
||||
for tok in generate_tokens(StringIO(sampleBadText).readline):
|
||||
pass
|
||||
except IndentationError:
|
||||
pass
|
||||
else:
|
||||
raise TestFailed("Did not detect IndentationError:")
|
||||
|
||||
|
||||
###### Test example in the docs ###############################
|
||||
|
||||
|
|
|
@ -271,6 +271,9 @@ def generate_tokens(readline):
|
|||
indents.append(column)
|
||||
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
|
||||
while column < indents[-1]:
|
||||
if column not in indents:
|
||||
raise IndentationError(
|
||||
"unindent does not match any outer indentation level")
|
||||
indents = indents[:-1]
|
||||
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
|
||||
|
||||
|
|
|
@ -147,6 +147,9 @@ Extension Modules
|
|||
Library
|
||||
-------
|
||||
|
||||
- The tokenize module now detects and reports indentation errors.
|
||||
Bug #1224621.
|
||||
|
||||
- The tokenize module has a new untokenize() function to support a full
|
||||
roundtrip from lexed tokens back to Python sourcecode. In addition,
|
||||
the generate_tokens() function now accepts a callable argument that
|
||||
|
|
Loading…
Reference in New Issue