Merged revisions 76062 via svnmerge from

svn+ssh://pythondev@svn.python.org/python/trunk

................
  r76062 | benjamin.peterson | 2009-11-02 12:12:12 -0600 (Mon, 02 Nov 2009) | 70 lines

  Merged revisions 74359,75081,75088,75213,75278,75303,75427-75428,75734-75736,75865,76059-76061 via svnmerge from
  svn+ssh://pythondev@svn.python.org/sandbox/trunk/2to3/lib2to3

  ........
    r74359 | benjamin.peterson | 2009-08-12 17:23:13 -0500 (Wed, 12 Aug 2009) | 1 line

    don't pass the deprecated print_function option
  ........
    r75081 | benjamin.peterson | 2009-09-26 22:02:57 -0500 (Sat, 26 Sep 2009) | 1 line

    let 2to3 work with extended iterable unpacking
  ........
    r75088 | benjamin.peterson | 2009-09-27 11:25:21 -0500 (Sun, 27 Sep 2009) | 1 line

    look on the type only for __call__
  ........
    r75213 | benjamin.peterson | 2009-10-03 10:09:46 -0500 (Sat, 03 Oct 2009) | 5 lines

    revert 75212; it's not correct

    People can use isinstance(x, collections.Callable) if they expect objects with
    __call__ in their instance dictionaries.
  ........
    r75278 | benjamin.peterson | 2009-10-07 16:25:56 -0500 (Wed, 07 Oct 2009) | 4 lines

    fix whitespace problems with fix_idioms #3563

    Patch by Joe Amenta.
  ........
    r75303 | benjamin.peterson | 2009-10-09 16:59:11 -0500 (Fri, 09 Oct 2009) | 1 line

    port latin-1 and utf-8 cookie improvements
  ........
    r75427 | benjamin.peterson | 2009-10-14 20:35:57 -0500 (Wed, 14 Oct 2009) | 1 line

    force floor division
  ........
    r75428 | benjamin.peterson | 2009-10-14 20:39:21 -0500 (Wed, 14 Oct 2009) | 1 line

    silence -3 warnings about __hash__
  ........
    r75734 | benjamin.peterson | 2009-10-26 16:25:53 -0500 (Mon, 26 Oct 2009) | 2 lines

    warn on map(None, ...) with more than 2 arguments #7203
  ........
    r75735 | benjamin.peterson | 2009-10-26 16:28:25 -0500 (Mon, 26 Oct 2009) | 1 line

    remove unused result
  ........
    r75736 | benjamin.peterson | 2009-10-26 16:29:02 -0500 (Mon, 26 Oct 2009) | 1 line

    using get() here is a bit pointless
  ........
    r75865 | benjamin.peterson | 2009-10-27 15:49:00 -0500 (Tue, 27 Oct 2009) | 1 line

    explain reason for warning
  ........
    r76059 | benjamin.peterson | 2009-11-02 11:43:47 -0600 (Mon, 02 Nov 2009) | 1 line

    tuples are no longer used for children
  ........
    r76060 | benjamin.peterson | 2009-11-02 11:55:40 -0600 (Mon, 02 Nov 2009) | 1 line

    revert r76059; apparently some fixers rely on Leaf no () for children
  ........
    r76061 | benjamin.peterson | 2009-11-02 12:06:17 -0600 (Mon, 02 Nov 2009) | 1 line

    make fix_tuple_params keep the tree valid #7253
  ........
................
This commit is contained in:
Benjamin Peterson 2009-11-02 18:16:28 +00:00
parent 03f13445c9
commit d9af52ba7e
10 changed files with 118 additions and 21 deletions

View File

@ -53,8 +53,9 @@ stmt: simple_stmt | compound_stmt
simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
import_stmt | global_stmt | exec_stmt | assert_stmt)
expr_stmt: testlist (augassign (yield_expr|testlist) |
('=' (yield_expr|testlist))*)
expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
('=' (yield_expr|testlist_star_expr))*)
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
'<<=' | '>>=' | '**=' | '//=')
# For normal assignments, additional restrictions enforced by the interpreter
@ -112,6 +113,7 @@ and_test: not_test ('and' not_test)*
not_test: 'not' not_test | comparison
comparison: expr (comp_op expr)*
comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
star_expr: '*' expr
expr: xor_expr ('|' xor_expr)*
xor_expr: and_expr ('^' and_expr)*
and_expr: shift_expr ('&' shift_expr)*
@ -125,14 +127,14 @@ atom: ('(' [yield_expr|testlist_gexp] ')' |
'{' [dictsetmaker] '}' |
'`' testlist1 '`' |
NAME | NUMBER | STRING+ | '.' '.' '.')
listmaker: test ( comp_for | (',' test)* [','] )
testlist_gexp: test ( comp_for | (',' test)* [','] )
listmaker: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
testlist_gexp: test ( comp_for | (',' (test|star_expr))* [','] )
lambdef: 'lambda' [varargslist] ':' test
trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
subscriptlist: subscript (',' subscript)* [',']
subscript: test | [test] ':' [test] [sliceop]
sliceop: ':' [test]
exprlist: expr (',' expr)* [',']
exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
testlist: test (',' test)* [',']
dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
(test (comp_for | (',' test)* [','])) )

View File

@ -29,7 +29,7 @@ into
# Local imports
from .. import fixer_base
from ..fixer_util import Call, Comma, Name, Node, syms
from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms
CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
TYPE = "power< 'type' trailer< '(' x=any ')' > >"
@ -130,5 +130,24 @@ class FixIdioms(fixer_base.BaseFix):
else:
raise RuntimeError("should not have reached here")
sort_stmt.remove()
if next_stmt:
next_stmt[0].prefix = sort_stmt.prefix
btwn = sort_stmt.prefix
# Keep any prefix lines between the sort_stmt and the list_call and
# shove them right after the sorted() call.
if "\n" in btwn:
if next_stmt:
# The new prefix should be everything from the sort_stmt's
# prefix up to the last newline, then the old prefix after a new
# line.
prefix_lines = (btwn.rpartition("\n")[0], next_stmt[0].prefix)
next_stmt[0].prefix = "\n".join(prefix_lines)
else:
assert list_call.parent
assert list_call.next_sibling is None
# Put a blank line after list_call and set its prefix.
end_line = BlankLine()
list_call.parent.append_child(end_line)
assert list_call.next_sibling is end_line
# The new prefix should be everything up to the first new line
# of sort_stmt's prefix.
end_line.prefix = btwn.rpartition("\n")[0]

View File

@ -49,8 +49,7 @@ class FixMap(fixer_base.ConditionalFix):
>
|
power<
'map'
args=trailer< '(' [any] ')' >
'map' trailer< '(' [arglist=any] ')' >
>
"""
@ -66,13 +65,22 @@ class FixMap(fixer_base.ConditionalFix):
new.prefix = ""
new = Call(Name("list"), [new])
elif "map_lambda" in results:
new = ListComp(results.get("xp").clone(),
results.get("fp").clone(),
results.get("it").clone())
new = ListComp(results["xp"].clone(),
results["fp"].clone(),
results["it"].clone())
else:
if "map_none" in results:
new = results["arg"].clone()
else:
if "arglist" in results:
args = results["arglist"]
if args.type == syms.arglist and \
args.children[0].type == token.NAME and \
args.children[0].value == "None":
self.warning(node, "cannot convert map(None, ...) "
"with multiple arguments because map() "
"now truncates to the shortest sequence")
return
if in_special_context(node):
return None
new = node.clone()

View File

@ -96,6 +96,8 @@ class FixTupleParams(fixer_base.BaseFix):
new_lines[0].prefix = indent
after = start + 1
for line in new_lines:
line.parent = suite[0]
suite[0].children[after:after] = new_lines
for i in range(after+1, after+len(new_lines)+1):
suite[0].children[i].prefix = indent

View File

@ -379,6 +379,8 @@ class DFAState(object):
return False
return True
__hash__ = None # For Py3 compatibility.
def generate_grammar(filename="Grammar.txt"):
p = ParserGenerator(filename)
return p.make_grammar()

View File

@ -231,6 +231,17 @@ class Untokenizer:
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
@ -265,7 +276,7 @@ def detect_encoding(readline):
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = matches[0]
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
@ -375,7 +386,7 @@ def generate_tokens(readline):
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1

View File

@ -63,6 +63,8 @@ class Base(object):
return NotImplemented
return self._eq(other)
__hash__ = None # For Py3 compatibility.
def __ne__(self, other):
"""
Compare two nodes for inequality.

View File

@ -16,8 +16,7 @@ from . import support
class Test_all(support.TestCase):
def setUp(self):
options = {"print_function" : False}
self.refactor = support.get_refactorer(options=options)
self.refactor = support.get_refactorer()
def test_all_project_files(self):
for filepath in support.all_project_files():

View File

@ -339,6 +339,12 @@ class Test_reduce(FixerTestCase):
a = "from functools import reduce\nreduce(a, b, c)"
self.check(b, a)
def test_bug_7253(self):
# fix_tuple_params was being bad and orphaning nodes in the tree.
b = "def x(arg): reduce(sum, [])"
a = "from functools import reduce\ndef x(arg): reduce(sum, [])"
self.check(b, a)
def test_call_with_lambda(self):
b = "reduce(lambda x, y: x + y, seq)"
a = "from functools import reduce\nreduce(lambda x, y: x + y, seq)"
@ -2834,6 +2840,11 @@ class Test_map(FixerTestCase):
a = """x = list(map(f, 'abc')) # foo"""
self.check(b, a)
def test_None_with_multiple_arguments(self):
s = """x = map(None, a, b, c)"""
self.warns_unchanged(s, "cannot convert map(None, ...) with "
"multiple arguments")
def test_map_basic(self):
b = """x = map(f, 'abc')"""
a = """x = list(map(f, 'abc'))"""
@ -2847,10 +2858,6 @@ class Test_map(FixerTestCase):
a = """x = list('abc')"""
self.check(b, a)
b = """x = map(None, 'abc', 'def')"""
a = """x = list(map(None, 'abc', 'def'))"""
self.check(b, a)
b = """x = map(lambda x: x+1, range(4))"""
a = """x = [x+1 for x in range(4)]"""
self.check(b, a)
@ -3238,6 +3245,46 @@ class Test_idioms(FixerTestCase):
"""
self.check(b, a)
b = r"""
try:
m = list(s)
m.sort()
except: pass
"""
a = r"""
try:
m = sorted(s)
except: pass
"""
self.check(b, a)
b = r"""
try:
m = list(s)
# foo
m.sort()
except: pass
"""
a = r"""
try:
m = sorted(s)
# foo
except: pass
"""
self.check(b, a)
b = r"""
m = list(s)
# more comments
m.sort()"""
a = r"""
m = sorted(s)
# more comments"""
self.check(b, a)
def test_sort_simple_expr(self):
b = """
v = t

View File

@ -161,6 +161,11 @@ class TestParserIdempotency(support.TestCase):
if diff(filepath, new):
self.fail("Idempotency failed: %s" % filepath)
def test_extended_unpacking(self):
driver.parse_string("a, *b, c = x\n")
driver.parse_string("[*a, b] = x\n")
driver.parse_string("(z, *y, w) = m\n")
driver.parse_string("for *z, m in d: pass\n")
class TestLiterals(GrammarTest):