Run 2to3 on this library.

This commit is contained in:
Martin v. Löwis 2008-03-19 05:33:36 +00:00
parent f733c60d9a
commit 8a5f8ca33b
22 changed files with 196 additions and 194 deletions

View File

@ -108,7 +108,7 @@ class BaseFix(object):
"""
name = template
while name in self.used_names:
name = template + str(self.numbers.next())
name = template + str(next(self.numbers))
self.used_names.add(name)
return name

View File

@ -10,8 +10,8 @@ Fixes:
# Local imports
from . import basefix
from .util import Name, attr_chain, any, set
import __builtin__
builtin_names = [name for name in dir(__builtin__)
import builtins
builtin_names = [name for name in dir(builtins)
if name not in ("__name__", "__doc__")]
MAPPING = {"StringIO": ("io", ["StringIO"]),
@ -26,7 +26,7 @@ def alternates(members):
def build_pattern():
bare = set()
for old_module, (new_module, members) in MAPPING.items():
for old_module, (new_module, members) in list(MAPPING.items()):
bare.add(old_module)
bare.update(members)
members = alternates(members)

View File

@ -20,8 +20,8 @@ def alternates(members):
def build_pattern():
#bare = set()
for module, replace in MAPPING.items():
for old_attr, new_attr in replace.items():
for module, replace in list(MAPPING.items()):
for old_attr, new_attr in list(replace.items()):
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)

View File

@ -323,7 +323,7 @@ def _is_import_binding(node, name, package=None):
elif node.type == syms.import_from:
# unicode(...) is used to make life easier here, because
# from a.b import parses to ['import', ['a', '.', 'b'], ...]
if package and unicode(node.children[1]).strip() != package:
if package and str(node.children[1]).strip() != package:
return None
n = node.children[3]
if package and _find('as', n):

View File

@ -31,7 +31,7 @@ _PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
def tokenize_wrapper(input):
"""Tokenizes a string suppressing significant whitespace."""
skip = (token.NEWLINE, token.INDENT, token.DEDENT)
tokens = tokenize.generate_tokens(driver.generate_lines(input).next)
tokens = tokenize.generate_tokens(driver.generate_lines(input).__next__)
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if type not in skip:

View File

@ -60,8 +60,8 @@ class Converter(grammar.Grammar):
"""
try:
f = open(filename)
except IOError, err:
print "Can't open %s: %s" % (filename, err)
except IOError as err:
print("Can't open %s: %s" % (filename, err))
return False
self.symbol2number = {}
self.number2symbol = {}
@ -70,8 +70,8 @@ class Converter(grammar.Grammar):
lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip():
print "%s(%s): can't parse %s" % (filename, lineno,
line.strip())
print("%s(%s): can't parse %s" % (filename, lineno,
line.strip()))
else:
symbol, number = mo.groups()
number = int(number)
@ -111,20 +111,20 @@ class Converter(grammar.Grammar):
"""
try:
f = open(filename)
except IOError, err:
print "Can't open %s: %s" % (filename, err)
except IOError as err:
print("Can't open %s: %s" % (filename, err))
return False
# The code below essentially uses f's iterator-ness!
lineno = 0
# Expect the two #include lines
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
assert line == '#include "pgenheaders.h"\n', (lineno, line)
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
assert line == '#include "grammar.h"\n', (lineno, line)
# Parse the state definitions
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
allarcs = {}
states = []
while line.startswith("static arc "):
@ -132,35 +132,35 @@ class Converter(grammar.Grammar):
mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
line)
assert mo, (lineno, line)
n, m, k = map(int, mo.groups())
n, m, k = list(map(int, mo.groups()))
arcs = []
for _ in range(k):
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+{(\d+), (\d+)},$", line)
assert mo, (lineno, line)
i, j = map(int, mo.groups())
i, j = list(map(int, mo.groups()))
arcs.append((i, j))
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
allarcs[(n, m)] = arcs
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
assert mo, (lineno, line)
s, t = map(int, mo.groups())
s, t = list(map(int, mo.groups()))
assert s == len(states), (lineno, line)
state = []
for _ in range(t):
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
assert mo, (lineno, line)
k, n, m = map(int, mo.groups())
k, n, m = list(map(int, mo.groups()))
arcs = allarcs[n, m]
assert k == len(arcs), (lineno, line)
state.append(arcs)
states.append(state)
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
self.states = states
# Parse the dfas
@ -169,18 +169,18 @@ class Converter(grammar.Grammar):
assert mo, (lineno, line)
ndfas = int(mo.group(1))
for i in range(ndfas):
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
line)
assert mo, (lineno, line)
symbol = mo.group(2)
number, x, y, z = map(int, mo.group(1, 3, 4, 5))
number, x, y, z = list(map(int, mo.group(1, 3, 4, 5)))
assert self.symbol2number[symbol] == number, (lineno, line)
assert self.number2symbol[number] == symbol, (lineno, line)
assert x == 0, (lineno, line)
state = states[z]
assert y == len(state), (lineno, line)
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
assert mo, (lineno, line)
first = {}
@ -191,18 +191,18 @@ class Converter(grammar.Grammar):
if byte & (1<<j):
first[i*8 + j] = 1
dfas[number] = (state, first)
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
self.dfas = dfas
# Parse the labels
labels = []
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
mo = re.match(r"static label labels\[(\d+)\] = {$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
for i in range(nlabels):
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
assert mo, (lineno, line)
x, y = mo.groups()
@ -212,35 +212,35 @@ class Converter(grammar.Grammar):
else:
y = eval(y)
labels.append((x, y))
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
self.labels = labels
# Parse the grammar struct
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+(\d+),$", line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
assert ndfas == len(self.dfas)
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
assert line == "\tdfas,\n", (lineno, line)
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+{(\d+), labels},$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
assert nlabels == len(self.labels), (lineno, line)
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
mo = re.match(r"\s+(\d+)$", line)
assert mo, (lineno, line)
start = int(mo.group(1))
assert start in self.number2symbol, (lineno, line)
self.start = start
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
assert line == "};\n", (lineno, line)
try:
lineno, line = lineno+1, f.next()
lineno, line = lineno+1, next(f)
except StopIteration:
pass
else:

View File

@ -99,7 +99,7 @@ class Driver(object):
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(generate_lines(text).next)
tokens = tokenize.generate_tokens(generate_lines(text).__next__)
return self.parse_tokens(tokens, debug)

View File

@ -100,17 +100,17 @@ class Grammar(object):
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print "s2n"
print("s2n")
pprint(self.symbol2number)
print "n2s"
print("n2s")
pprint(self.number2symbol)
print "states"
print("states")
pprint(self.states)
print "dfas"
print("dfas")
pprint(self.dfas)
print "labels"
print("labels")
pprint(self.labels)
print "start", self.start
print("start", self.start)
# Map from operator to number (since tokenize doesn't do this)

View File

@ -53,7 +53,7 @@ def test():
s = repr(c)
e = evalString(s)
if e != c:
print i, c, s, e
print(i, c, s, e)
if __name__ == "__main__":

View File

@ -26,7 +26,7 @@ class ParserGenerator(object):
def make_grammar(self):
c = PgenGrammar()
names = self.dfas.keys()
names = list(self.dfas.keys())
names.sort()
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
@ -39,7 +39,7 @@ class ParserGenerator(object):
states = []
for state in dfa:
arcs = []
for label, next in state.arcs.iteritems():
for label, next in state.arcs.items():
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
@ -105,7 +105,7 @@ class ParserGenerator(object):
return ilabel
def addfirstsets(self):
names = self.dfas.keys()
names = list(self.dfas.keys())
names.sort()
for name in names:
if name not in self.first:
@ -118,7 +118,7 @@ class ParserGenerator(object):
state = dfa[0]
totalset = {}
overlapcheck = {}
for label, next in state.arcs.iteritems():
for label, next in state.arcs.items():
if label in self.dfas:
if label in self.first:
fset = self.first[label]
@ -133,7 +133,7 @@ class ParserGenerator(object):
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse = {}
for label, itsfirst in overlapcheck.iteritems():
for label, itsfirst in overlapcheck.items():
for symbol in itsfirst:
if symbol in inverse:
raise ValueError("rule %s is ambiguous; %s is in the"
@ -192,7 +192,7 @@ class ParserGenerator(object):
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
for label, nfaset in arcs.iteritems():
for label, nfaset in arcs.items():
for st in states:
if st.nfaset == nfaset:
break
@ -203,10 +203,10 @@ class ParserGenerator(object):
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
print "Dump of NFA for", name
print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
print " State", i, state is finish and "(final)" or ""
print(" State", i, state is finish and "(final)" or "")
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
@ -214,16 +214,16 @@ class ParserGenerator(object):
j = len(todo)
todo.append(next)
if label is None:
print " -> %d" % j
print(" -> %d" % j)
else:
print " %s -> %d" % (label, j)
print(" %s -> %d" % (label, j))
def dump_dfa(self, name, dfa):
print "Dump of DFA for", name
print("Dump of DFA for", name)
for i, state in enumerate(dfa):
print " State", i, state.isfinal and "(final)" or ""
for label, next in state.arcs.iteritems():
print " %s -> %d" % (label, dfa.index(next))
print(" State", i, state.isfinal and "(final)" or "")
for label, next in state.arcs.items():
print(" %s -> %d" % (label, dfa.index(next)))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
@ -319,9 +319,9 @@ class ParserGenerator(object):
return value
def gettoken(self):
tup = self.generator.next()
tup = next(self.generator)
while tup[0] in (tokenize.COMMENT, tokenize.NL):
tup = self.generator.next()
tup = next(self.generator)
self.type, self.value, self.begin, self.end, self.line = tup
#print token.tok_name[self.type], repr(self.value)
@ -330,7 +330,7 @@ class ParserGenerator(object):
try:
msg = msg % args
except:
msg = " ".join([msg] + map(str, args))
msg = " ".join([msg] + list(map(str, args)))
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
@ -348,7 +348,7 @@ class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
assert isinstance(iter(nfaset).next(), NFAState)
assert isinstance(next(iter(nfaset)), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
@ -361,7 +361,7 @@ class DFAState(object):
self.arcs[label] = next
def unifystate(self, old, new):
for label, next in self.arcs.iteritems():
for label, next in self.arcs.items():
if next is old:
self.arcs[label] = new
@ -374,7 +374,7 @@ class DFAState(object):
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
for label, next in self.arcs.iteritems():
for label, next in self.arcs.items():
if next is not other.arcs.get(label):
return False
return True

View File

@ -67,7 +67,7 @@ NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
for _name, _value in list(globals().items()):
if type(_value) is type(0):
tok_name[_value] = _name

View File

@ -94,8 +94,8 @@ ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
tokenprog, pseudoprog, single3prog, double3prog = list(map(
re.compile, (Token, PseudoToken, Single3, Double3)))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
@ -143,9 +143,11 @@ class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
@ -279,7 +281,7 @@ def generate_tokens(readline):
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
@ -335,7 +337,7 @@ def generate_tokens(readline):
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:

View File

@ -23,7 +23,7 @@ class Symbols(object):
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.iteritems():
for name, symbol in grammar.symbol2number.items():
setattr(self, name, symbol)

View File

@ -443,7 +443,7 @@ class LeafPattern(BasePattern):
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, basestring), repr(content)
assert isinstance(content, str), repr(content)
self.type = type
self.content = content
self.name = name
@ -491,7 +491,7 @@ class NodePattern(BasePattern):
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, basestring), repr(content)
assert not isinstance(content, str), repr(content)
content = list(content)
for i, item in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
@ -622,7 +622,7 @@ class WildcardPattern(BasePattern):
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in xrange(self.min, 1 + min(len(nodes), self.max)):
for count in range(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]

View File

@ -63,14 +63,14 @@ def main(args=None):
# Parse command line arguments
options, args = parser.parse_args(args)
if options.list_fixes:
print "Available transformations for the -f/--fix option:"
print("Available transformations for the -f/--fix option:")
for fixname in get_all_fix_names():
print fixname
print(fixname)
if not args:
return 0
if not args:
print >>sys.stderr, "At least one file or directory argument required."
print >>sys.stderr, "Use --help to show usage."
print("At least one file or directory argument required.", file=sys.stderr)
print("Use --help to show usage.", file=sys.stderr)
return 2
# Initialize the refactoring tool
@ -145,7 +145,7 @@ class RefactoringTool(object):
continue
try:
fixer = fix_class(self.options, self.fixer_log)
except Exception, err:
except Exception as err:
self.log_error("Can't instantiate fixes.fix_%s.%s()",
fix_name, class_name, exc_info=True)
continue
@ -207,7 +207,7 @@ class RefactoringTool(object):
"""Refactors a file."""
try:
f = open(filename)
except IOError, err:
except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return
try:
@ -243,7 +243,7 @@ class RefactoringTool(object):
"""
try:
tree = self.driver.parse_string(data,1)
except Exception, err:
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
@ -331,7 +331,7 @@ class RefactoringTool(object):
if old_text is None:
try:
f = open(filename, "r")
except IOError, err:
except IOError as err:
self.log_error("Can't read %s: %s", filename, err)
return
try:
@ -351,21 +351,21 @@ class RefactoringTool(object):
if os.path.lexists(backup):
try:
os.remove(backup)
except os.error, err:
except os.error as err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except os.error, err:
except os.error as err:
self.log_message("Can't rename %s to %s", filename, backup)
try:
f = open(filename, "w")
except os.error, err:
except os.error as err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
try:
f.write(new_text)
except os.error, err:
except os.error as err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
@ -428,7 +428,7 @@ class RefactoringTool(object):
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception, err:
except Exception as err:
if self.options.verbose:
for line in block:
self.log_message("Source: %s", line.rstrip("\n"))
@ -480,7 +480,7 @@ class RefactoringTool(object):
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
@ -519,7 +519,7 @@ def diff_texts(a, b, filename):
for line in difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm=""):
print line
print(line)
if __name__ == "__main__":

View File

@ -23,7 +23,7 @@ from .. import refactor
###############################################################################
class Options:
def __init__(self, **kwargs):
for k, v in kwargs.items():
for k, v in list(kwargs.items()):
setattr(self, k, v)
self.verbose = False
@ -34,7 +34,7 @@ def dummy_transform(*args, **kwargs):
### Collect list of modules to match against
###############################################################################
files = []
for mod in sys.modules.values():
for mod in list(sys.modules.values()):
if mod is None or not hasattr(mod, '__file__'):
continue
f = mod.__file__
@ -53,6 +53,6 @@ for fixer in refactor.fixers:
t = time()
for f in files:
print "Matching", f
print("Matching", f)
refactor.refactor_file(f)
print "%d seconds to match %d files" % (time() - t, len(sys.modules))
print("%d seconds to match %d files" % (time() - t, len(sys.modules)))

View File

@ -30,13 +30,13 @@ class TokenTests(unittest.TestCase):
def testPlainIntegers(self):
self.assertEquals(0xff, 255)
self.assertEquals(0377, 255)
self.assertEquals(2147483647, 017777777777)
from sys import maxint
self.assertEquals(0o377, 255)
self.assertEquals(2147483647, 0o17777777777)
from sys import maxsize
if maxint == 2147483647:
self.assertEquals(-2147483647-1, -020000000000)
self.assertEquals(-2147483647-1, -0o20000000000)
# XXX -2147483648
self.assert_(037777777777 > 0)
self.assert_(0o37777777777 > 0)
self.assert_(0xffffffff > 0)
for s in '2147483648', '040000000000', '0x100000000':
try:
@ -44,8 +44,8 @@ class TokenTests(unittest.TestCase):
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxint == 9223372036854775807:
self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
self.assert_(01777777777777777777777 > 0)
self.assertEquals(-9223372036854775807-1, -0o1000000000000000000000)
self.assert_(0o1777777777777777777777 > 0)
self.assert_(0xffffffffffffffff > 0)
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
@ -57,14 +57,14 @@ class TokenTests(unittest.TestCase):
self.fail('Weird maxint value %r' % maxint)
def testLongIntegers(self):
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
x = 0
x = 0
x = 0xffffffffffffffff
x = 0xffffffffffffffff
x = 0o77777777777777777
x = 0o77777777777777777
x = 123456789012345678901234567890
x = 123456789012345678901234567890
def testFloats(self):
x = 3.14
@ -152,27 +152,27 @@ class GrammarTests(unittest.TestCase):
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
def f4(two, (compound, (argument, list))): pass
def f5((compound, first), two): pass
self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
def f4(two, xxx_todo_changeme): (compound, (argument, list)) = xxx_todo_changeme; pass
def f5(xxx_todo_changeme1, two): (compound, first) = xxx_todo_changeme1; pass
self.assertEquals(f2.__code__.co_varnames, ('one_argument',))
self.assertEquals(f3.__code__.co_varnames, ('two', 'arguments'))
if sys.platform.startswith('java'):
self.assertEquals(f4.func_code.co_varnames,
self.assertEquals(f4.__code__.co_varnames,
('two', '(compound, (argument, list))', 'compound', 'argument',
'list',))
self.assertEquals(f5.func_code.co_varnames,
self.assertEquals(f5.__code__.co_varnames,
('(compound, first)', 'two', 'compound', 'first'))
else:
self.assertEquals(f4.func_code.co_varnames,
self.assertEquals(f4.__code__.co_varnames,
('two', '.1', 'compound', 'argument', 'list'))
self.assertEquals(f5.func_code.co_varnames,
self.assertEquals(f5.__code__.co_varnames,
('.0', 'two', 'compound', 'first'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
def v3(a, (b, c), *rest): return a, b, c, rest
def v3(a, xxx_todo_changeme2, *rest): (b, c) = xxx_todo_changeme2; return a, b, c, rest
f1()
f2(1)
@ -201,9 +201,9 @@ class GrammarTests(unittest.TestCase):
# ceval unpacks the formal arguments into the first argcount names;
# thus, the names nested inside tuples must appear after these names.
if sys.platform.startswith('java'):
self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
self.assertEquals(v3.__code__.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
else:
self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
self.assertEquals(v3.__code__.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
def d01(a=1): pass
d01()
@ -277,9 +277,9 @@ class GrammarTests(unittest.TestCase):
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
def d31v((x)): pass
def d31v(xxx_todo_changeme3): (x) = xxx_todo_changeme3; pass
d31v(1)
def d32v((x,)): pass
def d32v(xxx_todo_changeme4): (x,) = xxx_todo_changeme4; pass
d32v((1,))
def testLambdef(self):
@ -287,7 +287,7 @@ class GrammarTests(unittest.TestCase):
l1 = lambda : 0
self.assertEquals(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0L]]
l3 = lambda : [2 < x for x in [-1, 3, 0]]
self.assertEquals(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEquals(l4(), 1)
@ -325,36 +325,36 @@ class GrammarTests(unittest.TestCase):
def testPrintStmt(self):
# 'print' (test ',')* [test]
import StringIO
import io
# Can't test printing to real stdout without comparing output
# which is not available in unittest.
save_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
sys.stdout = io.StringIO()
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
print(1, 2, 3)
print(1, 2, 3, end=' ')
print()
print(0 or 1, 0 or 1, end=' ')
print(0 or 1)
# 'print' '>>' test ','
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
print(1, 2, 3, file=sys.stdout)
print(1, 2, 3, end=' ', file=sys.stdout)
print(file=sys.stdout)
print(0 or 1, 0 or 1, end=' ', file=sys.stdout)
print(0 or 1, file=sys.stdout)
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
print(1, 2, 3, file=gulp)
print(1, 2, 3, end=' ', file=gulp)
print(file=gulp)
print(0 or 1, 0 or 1, end=' ', file=gulp)
print(0 or 1, file=gulp)
# test print >> None
def driver():
@ -368,13 +368,13 @@ class GrammarTests(unittest.TestCase):
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
print('hello world', file=file)
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
print('goodbye universe', file=file)
driver()
@ -461,7 +461,7 @@ hello world
continue
except:
raise
if count > 2 or big_hippo <> 1:
if count > 2 or big_hippo != 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
@ -478,7 +478,7 @@ hello world
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
try: raise RuntimeError('just testing')
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
@ -506,33 +506,33 @@ hello world
# 'exec' expr ['in' expr [',' expr]]
z = None
del z
exec 'z=1+1\n'
exec('z=1+1\n')
if z != 2: self.fail('exec \'z=1+1\'\\n')
del z
exec 'z=1+1'
exec('z=1+1')
if z != 2: self.fail('exec \'z=1+1\'')
z = None
del z
import types
if hasattr(types, "UnicodeType"):
exec r"""if 1:
exec(r"""if 1:
exec u'z=1+1\n'
if z != 2: self.fail('exec u\'z=1+1\'\\n')
del z
exec u'z=1+1'
if z != 2: self.fail('exec u\'z=1+1\'')"""
if z != 2: self.fail('exec u\'z=1+1\'')""")
g = {}
exec 'z = 1' in g
if g.has_key('__builtins__'): del g['__builtins__']
exec('z = 1', g)
if '__builtins__' in g: del g['__builtins__']
if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
g = {}
l = {}
import warnings
warnings.filterwarnings("ignore", "global statement", module="<string>")
exec 'global a; a = 1; b = 2' in g, l
if g.has_key('__builtins__'): del g['__builtins__']
if l.has_key('__builtins__'): del l['__builtins__']
exec('global a; a = 1; b = 2', g, l)
if '__builtins__' in g: del g['__builtins__']
if '__builtins__' in l: del l['__builtins__']
if (g, l) != ({'a':1}, {'b':2}):
self.fail('exec ... in g (%s), l (%s)' %(g,l))
@ -544,7 +544,7 @@ hello world
assert 1, lambda x:x+1
try:
assert 0, "msg"
except AssertionError, e:
except AssertionError as e:
self.assertEquals(e.args[0], "msg")
else:
if __debug__:
@ -655,7 +655,7 @@ hello world
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 != 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
@ -664,7 +664,7 @@ hello world
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 != 1 in 1 not in 1 is 1 is not 1: pass
def testBinaryMaskOps(self):
x = 1 & 1
@ -747,9 +747,9 @@ hello world
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
self.assertEqual(`1,2`, '(1, 2)')
x = repr(x)
x = repr(1 or 2 or 3)
self.assertEqual(repr((1,2)), '(1, 2)')
x = x
x = 'x'
@ -837,9 +837,9 @@ hello world
def testGenexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(g.next(), [x for x in range(10)])
self.assertEqual(next(g), [x for x in range(10)])
try:
g.next()
next(g)
self.fail('should produce StopIteration exception')
except StopIteration:
pass
@ -847,7 +847,7 @@ hello world
a = 1
try:
g = (a for d in a)
g.next()
next(g)
self.fail('should produce TypeError')
except TypeError:
pass
@ -892,7 +892,7 @@ hello world
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print x
print(x)
return ret
self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])

View File

@ -32,7 +32,7 @@ class TokenTests(unittest.TestCase):
self.assertEquals(0o377, 255)
self.assertEquals(2147483647, 0o17777777777)
self.assertEquals(0b1001, 9)
from sys import maxint
from sys import maxsize
if maxint == 2147483647:
self.assertEquals(-2147483647-1, -0o20000000000)
# XXX -2147483648
@ -438,7 +438,7 @@ class GrammarTests(unittest.TestCase):
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
try: raise RuntimeError('just testing')
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass

View File

@ -28,7 +28,7 @@ def main():
fn = "example.py"
tree = dr.parse_file(fn, debug=True)
if not diff(fn, tree):
print "No diffs."
print("No diffs.")
if not sys.argv[1:]:
return # Pass a dummy argument to run the complete test suite below
@ -44,7 +44,7 @@ def main():
fn = fn[:-1]
if not fn.endswith(".py"):
continue
print >>sys.stderr, "Parsing", fn
print("Parsing", fn, file=sys.stderr)
tree = dr.parse_file(fn, debug=True)
if diff(fn, tree):
problems.append(fn)
@ -55,27 +55,27 @@ def main():
names = os.listdir(dir)
except os.error:
continue
print >>sys.stderr, "Scanning", dir, "..."
print("Scanning", dir, "...", file=sys.stderr)
for name in names:
if not name.endswith(".py"):
continue
print >>sys.stderr, "Parsing", name
print("Parsing", name, file=sys.stderr)
fn = os.path.join(dir, name)
try:
tree = dr.parse_file(fn, debug=True)
except pgen2.parse.ParseError, err:
print "ParseError:", err
except pgen2.parse.ParseError as err:
print("ParseError:", err)
else:
if diff(fn, tree):
problems.append(fn)
# Show summary of problem files
if not problems:
print "No problems. Congratulations!"
print("No problems. Congratulations!")
else:
print "Problems in following files:"
print("Problems in following files:")
for fn in problems:
print "***", fn
print("***", fn)
def diff(fn, tree):
f = open("@", "w")

View File

@ -21,7 +21,7 @@ from .. import refactor
class Options:
def __init__(self, **kwargs):
for k, v in kwargs.items():
for k, v in list(kwargs.items()):
setattr(self, k, v)
self.verbose = False
@ -33,7 +33,7 @@ class Test_all(support.TestCase):
def test_all_project_files(self):
for filepath in support.all_project_files():
print "Fixing %s..." % filepath
print("Fixing %s..." % filepath)
self.refactor.refactor_string(open(filepath).read(), filepath)

View File

@ -18,7 +18,7 @@ from .. import refactor
class Options:
def __init__(self, **kwargs):
for k, v in kwargs.items():
for k, v in list(kwargs.items()):
setattr(self, k, v)
self.verbose = False
@ -1285,7 +1285,7 @@ class Test_imports(FixerTestCase):
}
def test_import_module(self):
for old, (new, members) in self.modules.items():
for old, (new, members) in list(self.modules.items()):
b = "import %s" % old
a = "import %s" % new
self.check(b, a)
@ -1295,7 +1295,7 @@ class Test_imports(FixerTestCase):
self.check(b, a)
def test_import_from(self):
for old, (new, members) in self.modules.items():
for old, (new, members) in list(self.modules.items()):
for member in members:
b = "from %s import %s" % (old, member)
a = "from %s import %s" % (new, member)
@ -1305,7 +1305,7 @@ class Test_imports(FixerTestCase):
self.unchanged(s)
def test_import_module_as(self):
for old, (new, members) in self.modules.items():
for old, (new, members) in list(self.modules.items()):
b = "import %s as foo_bar" % old
a = "import %s as foo_bar" % new
self.check(b, a)
@ -1315,7 +1315,7 @@ class Test_imports(FixerTestCase):
self.check(b, a)
def test_import_from_as(self):
for old, (new, members) in self.modules.items():
for old, (new, members) in list(self.modules.items()):
for member in members:
b = "from %s import %s as foo_bar" % (old, member)
a = "from %s import %s as foo_bar" % (new, member)
@ -1327,7 +1327,7 @@ class Test_imports(FixerTestCase):
self.warns_unchanged(s, "Cannot handle star imports")
def test_import_module_usage(self):
for old, (new, members) in self.modules.items():
for old, (new, members) in list(self.modules.items()):
for member in members:
b = """
import %s
@ -1340,7 +1340,7 @@ class Test_imports(FixerTestCase):
self.check(b, a)
def test_from_import_usage(self):
for old, (new, members) in self.modules.items():
for old, (new, members) in list(self.modules.items()):
for member in members:
b = """
from %s import %s
@ -2211,7 +2211,7 @@ class Test_renames(FixerTestCase):
}
def test_import_from(self):
for mod, (old, new) in self.modules.items():
for mod, (old, new) in list(self.modules.items()):
b = "from %s import %s" % (mod, old)
a = "from %s import %s" % (mod, new)
self.check(b, a)
@ -2220,13 +2220,13 @@ class Test_renames(FixerTestCase):
self.unchanged(s)
def test_import_from_as(self):
for mod, (old, new) in self.modules.items():
for mod, (old, new) in list(self.modules.items()):
b = "from %s import %s as foo_bar" % (mod, old)
a = "from %s import %s as foo_bar" % (mod, new)
self.check(b, a)
def test_import_module_usage(self):
for mod, (old, new) in self.modules.items():
for mod, (old, new) in list(self.modules.items()):
b = """
import %s
foo(%s, %s.%s)
@ -2239,7 +2239,7 @@ class Test_renames(FixerTestCase):
def XXX_test_from_import_usage(self):
# not implemented yet
for mod, (old, new) in self.modules.items():
for mod, (old, new) in list(self.modules.items()):
b = """
from %s import %s
foo(%s, %s)

View File

@ -149,7 +149,7 @@ class TestParserIdempotency(support.TestCase):
def test_all_project_files(self):
for filepath in support.all_project_files():
print "Parsing %s..." % filepath
print("Parsing %s..." % filepath)
tree = driver.parse_file(filepath, debug=True)
if diff(filepath, tree):
self.fail("Idempotency failed: %s" % filepath)